From d4f9b80c95816dd2c817bf65a2996d7dd6d83d11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 12:21:48 +0200 Subject: [PATCH 001/392] --- (#4239) updated-dependencies: - dependency-name: requests dependency-type: direct:production dependency-group: pip ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- bazel/notebook_requirements.in | 2 +- bazel/notebook_requirements.txt | 2 +- bazel/ortools_requirements.in | 2 +- bazel/ortools_requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index f473a794b3..3f35853168 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -3,7 +3,7 @@ absl-py==2.0.0 immutabledict==3.0.0 numpy==1.26.1 protobuf==4.25.3 -requests==2.31.0 +requests==2.32.0 scipy==1.11.3 # OR-Tools build dependencies diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index a8ba9039af..3620a22715 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -278,7 +278,7 @@ referencing==0.30.2 # jsonschema # jsonschema-specifications # jupyter-events -requests==2.31.0 +requests==2.32.0 # via # -r notebook_requirements.in # jupyterlab-server diff --git a/bazel/ortools_requirements.in b/bazel/ortools_requirements.in index e804b7daeb..a585bf75ae 100644 --- a/bazel/ortools_requirements.in +++ b/bazel/ortools_requirements.in @@ -3,7 +3,7 @@ absl-py==2.0.0 immutabledict==3.0.0 numpy==1.26.1 protobuf==4.25.3 -requests==2.31.0 +requests==2.32.0 scipy==1.11.3 # OR-Tools build dependencies diff --git a/bazel/ortools_requirements.txt b/bazel/ortools_requirements.txt index e9d026ba0d..5385b3a8eb 100644 --- a/bazel/ortools_requirements.txt +++ b/bazel/ortools_requirements.txt @@ -53,7 +53,7 @@ python-dateutil==2.8.2 # via pandas pytz==2022.7.1 # via pandas -requests==2.31.0 +requests==2.32.0 # via -r ortools_requirements.in scipy==1.11.3 # via -r ortools_requirements.in From 511bf047a7a0b977997a72e279b5ff2574345845 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 21 Mar 2024 11:34:50 +0100 Subject: [PATCH 002/392] backport cp-sat code from main --- examples/python/rcpsp_sat.py | 18 +- ortools/sat/BUILD.bazel | 2 + ortools/sat/clause.cc | 1 - ortools/sat/constraint_violation.cc | 35 ++ ortools/sat/constraint_violation.h | 20 + ortools/sat/cp_constraints.cc | 10 +- ortools/sat/cp_model.cc | 2 +- ortools/sat/cp_model.h | 2 +- ortools/sat/cp_model_expand.cc | 90 +++++ ortools/sat/cp_model_loader.cc | 22 +- ortools/sat/cp_model_mapping.h | 4 + ortools/sat/cp_model_presolve.cc | 1 - ortools/sat/cp_model_search.cc | 27 +- ortools/sat/cp_model_solver.cc | 7 +- ortools/sat/disjunctive.cc | 43 ++- ortools/sat/disjunctive.h | 16 +- ortools/sat/docs/integer_arithmetic.md | 4 +- ortools/sat/docs/model.md | 4 +- ortools/sat/drat_checker.cc | 2 +- ortools/sat/drat_checker.h | 2 +- ortools/sat/go/cp_model.go | 18 +- ortools/sat/go/cp_model_test.go | 180 ++++----- ortools/sat/go/domain.go | 6 +- ortools/sat/go/domain_test.go | 24 +- ortools/sat/integer.cc | 40 +- ortools/sat/integer.h | 4 +- ortools/sat/lb_tree_search.cc | 7 +- ortools/sat/lb_tree_search.h | 1 + ortools/sat/linear_constraint.h | 1 + ortools/sat/linear_programming_constraint.cc | 4 +- ortools/sat/linear_programming_constraint.h | 1 + ortools/sat/linear_propagation.cc | 275 +++++++++----- ortools/sat/linear_propagation.h | 15 +- ortools/sat/lp_utils.cc | 7 +- ortools/sat/optimization.cc | 13 +- ortools/sat/optimization.h | 7 +- ortools/sat/precedences.cc | 281 ++++++++------ ortools/sat/precedences.h | 83 +++-- ortools/sat/probing.cc | 2 +- ortools/sat/probing.h | 2 +- ortools/sat/python/cp_model.py | 346 +++++++++--------- ortools/sat/python/cp_model_helper.py | 8 +- ortools/sat/python/cp_model_helper_test.py | 5 +- ortools/sat/python/cp_model_test.py | 1 + ortools/sat/python/swig_helper_test.py | 2 + ortools/sat/rins.cc | 4 +- ortools/sat/routing_cuts.cc | 10 +- ortools/sat/samples/bin_packing_sat.py | 9 +- ortools/sat/samples/schedule_requests_sat.py | 4 +- .../samples/solution_hinting_sample_sat.go | 4 +- .../sat/samples/step_function_sample_sat.go | 2 +- ortools/sat/sat_parameters.proto | 15 +- 52 files changed, 1086 insertions(+), 607 deletions(-) diff --git a/examples/python/rcpsp_sat.py b/examples/python/rcpsp_sat.py index cddb3ff059..c4e66d62df 100644 --- a/examples/python/rcpsp_sat.py +++ b/examples/python/rcpsp_sat.py @@ -22,6 +22,7 @@ Data use in flags: """ import collections +import time from typing import Optional from absl import app @@ -50,9 +51,9 @@ _ADD_REDUNDANT_ENERGETIC_CONSTRAINTS = flags.DEFINE_bool( + " precedence graph.", ) _DELAY_TIME_LIMIT = flags.DEFINE_float( - "delay_time_limit", - 20.0, - "Time limit when computing min delay between tasks." + "pairwise_delay_total_time_limit", + 120.0, + "Total time limit when computing min delay between tasks." + " A non-positive time limit disable min delays computation.", ) _PREEMPTIVE_LB_TIME_LIMIT = flags.DEFINE_float( @@ -601,21 +602,30 @@ def compute_delays_between_nodes( ): return delays, None, False + time_limit = _DELAY_TIME_LIMIT.value complete_problem_assignment = None num_optimal_delays = 0 num_delays_not_found = 0 optimal_found = True for start_task, end_task, active_tasks in task_intervals: + if time_limit <= 0: + optimal_found = False + print(f" - #timeout ({_DELAY_TIME_LIMIT.value}s) reached", flush=True) + break + + start_time = time.time() min_delay, feasible_delay, assignment = solve_rcpsp( problem, "", - f"num_search_workers:16,max_time_in_seconds:{_DELAY_TIME_LIMIT.value}", + f"num_search_workers:16,max_time_in_seconds:{time_limit}", set(active_tasks), start_task, end_task, [], delays, ) + time_limit -= time.time() - start_time + if min_delay != -1: delays[(start_task, end_task)] = min_delay, feasible_delay if start_task == 0 and end_task == len(problem.tasks) - 1: diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index bd6a42c395..2a03e3e838 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -1090,6 +1090,7 @@ cc_library( "//ortools/graph", "//ortools/graph:topologicalsorter", "//ortools/util:bitset", + "//ortools/util:logging", "//ortools/util:strong_integers", "//ortools/util:time_limit", "@com_google_absl//absl/cleanup", @@ -1148,6 +1149,7 @@ cc_library( "//ortools/util:strong_integers", "//ortools/util:time_limit", "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/base:log_severity", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:inlined_vector", "@com_google_absl//absl/log", diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index a4047466bc..684676a894 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -1765,7 +1765,6 @@ bool BinaryImplicationGraph::TransformIntoMaxCliques( } // Remove clique (before extension) that are included in an extended one. - absl::flat_hash_set cannot_be_superset; detector.DetectInclusions([&](int subset, int superset) { const int subset_index = detector_clique_index[subset]; const int superset_index = detector_clique_index[superset]; diff --git a/ortools/sat/constraint_violation.cc b/ortools/sat/constraint_violation.cc index a990f4ba09..76aba599e3 100644 --- a/ortools/sat/constraint_violation.cc +++ b/ortools/sat/constraint_violation.cc @@ -1001,6 +1001,35 @@ int64_t CompiledIntDivConstraint::ComputeViolation( return std::abs(target_value - div_value); } +// ----- CompiledIntModConstraint ----- + +CompiledIntModConstraint::CompiledIntModConstraint( + const ConstraintProto& ct_proto) + : CompiledConstraint(ct_proto) {} + +int64_t CompiledIntModConstraint::ComputeViolation( + absl::Span solution) { + const int64_t target_value = + ExprValue(ct_proto().int_mod().target(), solution); + DCHECK_EQ(ct_proto().int_mod().exprs_size(), 2); + // Note: The violation computation assumes the modulo is constant. + const int64_t expr_value = ExprValue(ct_proto().int_mod().exprs(0), solution); + const int64_t mod_value = ExprValue(ct_proto().int_mod().exprs(1), solution); + const int64_t rhs = expr_value % mod_value; + if ((expr_value >= 0 && target_value >= 0) || + (expr_value <= 0 && target_value <= 0)) { + // Easy case. + return std::min({std::abs(target_value - rhs), + std::abs(target_value) + std::abs(mod_value - rhs), + std::abs(rhs) + std::abs(mod_value - target_value)}); + } else { + // Different signs. + // We use the sum of the absolute value to have a better gradiant. + // We could also use the min of target_move and the expr_move. + return std::abs(target_value) + std::abs(expr_value); + } +} + // ----- CompiledAllDiffConstraint ----- CompiledAllDiffConstraint::CompiledAllDiffConstraint( @@ -1501,6 +1530,12 @@ void LsEvaluator::CompileOneConstraint(const ConstraintProto& ct) { constraints_.emplace_back(new CompiledIntDivConstraint(ct)); break; } + case ConstraintProto::ConstraintCase::kIntMod: { + DCHECK_EQ(ExprMin(ct.int_mod().exprs(1), cp_model_), + ExprMax(ct.int_mod().exprs(1), cp_model_)); + constraints_.emplace_back(new CompiledIntModConstraint(ct)); + break; + } case ConstraintProto::ConstraintCase::kLinear: { const Domain domain = ReadDomainFromProto(ct.linear()); const int ct_index = linear_evaluator_.NewConstraint(domain); diff --git a/ortools/sat/constraint_violation.h b/ortools/sat/constraint_violation.h index 1b108cd293..2599ffb376 100644 --- a/ortools/sat/constraint_violation.h +++ b/ortools/sat/constraint_violation.h @@ -490,6 +490,26 @@ class CompiledIntDivConstraint : public CompiledConstraint { int64_t ComputeViolation(absl::Span solution) override; }; +// The violation of an int_mod constraint is defined as follow: +// +// if target and expr0 have the same sign: +// min( +// abs(value(target) - (value(expr0) % value(expr1))), +// abs(value(target)) + abs((value(expr0) % value(expr1)) - value(expr1)), +// abs(value(expr0) % value(expr1)) + abs(value(target) - value(expr1)), +// ) +// +// if target and expr0 have different sign: +// abs(target) + abs(expr0) +// Note: the modulo (expr1) is always fixed. +class CompiledIntModConstraint : public CompiledConstraint { + public: + explicit CompiledIntModConstraint(const ConstraintProto& ct_proto); + ~CompiledIntModConstraint() override = default; + + int64_t ComputeViolation(absl::Span solution) override; +}; + // The violation of a all_diff is the number of unordered pairs of expressions // with the same value. class CompiledAllDiffConstraint : public CompiledConstraint { diff --git a/ortools/sat/cp_constraints.cc b/ortools/sat/cp_constraints.cc index 76256e8999..79f037a3be 100644 --- a/ortools/sat/cp_constraints.cc +++ b/ortools/sat/cp_constraints.cc @@ -121,6 +121,10 @@ bool GreaterThanAtLeastOneOfPropagator::Propagate() { literal_reason_.push_back(l.Negated()); } for (int i = 0; i < exprs_.size(); ++i) { + // If the level zero bounds is good enough, no reason needed. + if (integer_trail_->LevelZeroLowerBound(exprs_[i]) >= target_min) { + continue; + } if (trail_->Assignment().LiteralIsFalse(selectors_[i])) { literal_reason_.push_back(selectors_[i]); } else { @@ -139,7 +143,11 @@ void GreaterThanAtLeastOneOfPropagator::RegisterWith( const int id = watcher->Register(this); for (const Literal l : selectors_) watcher->WatchLiteral(l.Negated(), id); for (const Literal l : enforcements_) watcher->WatchLiteral(l, id); - for (const AffineExpression e : exprs_) watcher->WatchLowerBound(e, id); + for (const AffineExpression e : exprs_) { + if (!e.IsConstant()) { + watcher->WatchLowerBound(e, id); + } + } } } // namespace sat diff --git a/ortools/sat/cp_model.cc b/ortools/sat/cp_model.cc index 02d53e4f2d..5c9da884f8 100644 --- a/ortools/sat/cp_model.cc +++ b/ortools/sat/cp_model.cc @@ -497,7 +497,7 @@ Constraint Constraint::WithName(absl::string_view name) { return *this; } -const std::string& Constraint::Name() const { return proto_->name(); } +absl::string_view Constraint::Name() const { return proto_->name(); } Constraint Constraint::OnlyEnforceIf(absl::Span literals) { for (const BoolVar& var : literals) { diff --git a/ortools/sat/cp_model.h b/ortools/sat/cp_model.h index 29c6cb1d39..70123343a0 100644 --- a/ortools/sat/cp_model.h +++ b/ortools/sat/cp_model.h @@ -556,7 +556,7 @@ class Constraint { Constraint WithName(absl::string_view name); /// Returns the name of the constraint (or the empty string if not set). - const std::string& Name() const; + absl::string_view Name() const; /// Returns the underlying protobuf object (useful for testing). const ConstraintProto& Proto() const { return *proto_; } diff --git a/ortools/sat/cp_model_expand.cc b/ortools/sat/cp_model_expand.cc index bd640620ec..821eff9ca2 100644 --- a/ortools/sat/cp_model_expand.cc +++ b/ortools/sat/cp_model_expand.cc @@ -460,6 +460,90 @@ void ExpandInverse(ConstraintProto* ct, PresolveContext* context) { context->UpdateRuleStats("inverse: expanded"); } +void ExpandLinMaxWithTwoTerms(ConstraintProto* ct, PresolveContext* context) { + CHECK_EQ(ct->lin_max().exprs().size(), 2); + + // We will create 4 constraints for target = max(a, b). + // First. + // - target >= a. + // - target >= b. + for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { + LinearConstraintProto* lin = + context->working_model->add_constraints()->mutable_linear(); + lin->add_domain(0); + lin->add_domain(std::numeric_limits::max()); + AddLinearExpressionToLinearConstraint(ct->lin_max().target(), 1, lin); + AddLinearExpressionToLinearConstraint(expr, -1, lin); + } + + // And then, a new boolean b, and + // - b => target == a + // - not(b) => target == b + const int new_bool = context->NewBoolVar(); + bool first_loop = true; + for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { + ConstraintProto* new_ct = context->working_model->add_constraints(); + new_ct->add_enforcement_literal(first_loop ? new_bool + : NegatedRef(new_bool)); + first_loop = false; + + LinearConstraintProto* lin = new_ct->mutable_linear(); + lin->add_domain(0); + lin->add_domain(0); + AddLinearExpressionToLinearConstraint(ct->lin_max().target(), 1, lin); + AddLinearExpressionToLinearConstraint(expr, -1, lin); + } + + ct->Clear(); + context->UpdateRuleStats("lin_max: expanded lin_max with two terms"); +} + +void ExpandGeneralLinMax(ConstraintProto* ct, PresolveContext* context) { + CHECK_GT(ct->lin_max().exprs().size(), 2); + + // We will create 2 * n constraints for target = max(a1, .., an). + // First. + // - target >= ai + for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { + LinearConstraintProto* lin = + context->working_model->add_constraints()->mutable_linear(); + lin->add_domain(0); + lin->add_domain(std::numeric_limits::max()); + AddLinearExpressionToLinearConstraint(ct->lin_max().target(), 1, lin); + AddLinearExpressionToLinearConstraint(expr, -1, lin); + } + + // And then, a new boolean bi, and + // - bi => target == ai + // With exactly_one(bi) + ConstraintProto* exo = context->working_model->add_constraints(); + + for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { + const int new_bool = context->NewBoolVar(); + exo->mutable_exactly_one()->add_literals(new_bool); + ConstraintProto* new_ct = context->working_model->add_constraints(); + new_ct->add_enforcement_literal(new_bool); + + LinearConstraintProto* lin = new_ct->mutable_linear(); + lin->add_domain(0); + lin->add_domain(0); + AddLinearExpressionToLinearConstraint(ct->lin_max().target(), 1, lin); + AddLinearExpressionToLinearConstraint(expr, -1, lin); + } + + ct->Clear(); + context->UpdateRuleStats("lin_max: expanded lin_max"); +} + +void ExpandLinMax(ConstraintProto* ct, PresolveContext* context) { + if (ct->lin_max().exprs().size() < 2) return; + if (ct->lin_max().exprs().size() == 2) { + ExpandLinMaxWithTwoTerms(ct, context); + } else { + ExpandGeneralLinMax(ct, context); + } +} + // A[V] == V means for all i, V == i => A_i == i void ExpandElementWithTargetEqualIndex(ConstraintProto* ct, PresolveContext* context) { @@ -2227,6 +2311,12 @@ void ExpandCpModel(PresolveContext* context) { ExpandPositiveTable(ct, context); } break; + case ConstraintProto::kLinMax: + if (ct->lin_max().exprs().size() <= + context->params().max_lin_max_size_for_expansion()) { + ExpandLinMax(ct, context); + } + break; case ConstraintProto::kAllDiff: has_all_diffs = true; skip = true; diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index 32b72dd37f..09c9b13d7b 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -1253,6 +1253,27 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { max_sum += std::max(term_a, term_b); } + // Load conditional precedences. + const SatParameters& params = *m->GetOrCreate(); + if (params.auto_detect_greater_than_at_least_one_of() && + ct.enforcement_literal().size() == 1 && vars.size() <= 2) { + // To avoid overflow in the code below, we tighten the bounds. + int64_t rhs_min = ct.linear().domain(0); + int64_t rhs_max = ct.linear().domain(ct.linear().domain().size() - 1); + rhs_min = std::max(rhs_min, min_sum.value()); + rhs_max = std::min(rhs_max, max_sum.value()); + + auto* detector = m->GetOrCreate(); + const Literal lit = mapping->Literal(ct.enforcement_literal(0)); + const Domain domain = ReadDomainFromProto(ct.linear()); + if (vars.size() == 1) { + detector->Add(lit, {vars[0], coeffs[0]}, {}, rhs_min, rhs_max); + } else if (vars.size() == 2) { + detector->Add(lit, {vars[0], coeffs[0]}, {vars[1], coeffs[1]}, rhs_min, + rhs_max); + } + } + // Load precedences. if (!HasEnforcementLiteral(ct)) { auto* precedences = m->GetOrCreate(); @@ -1311,7 +1332,6 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { } } - const SatParameters& params = *m->GetOrCreate(); const IntegerValue domain_size_limit( params.max_domain_size_when_encoding_eq_neq_constraints()); if (ct.linear().vars_size() == 2 && !integer_trail->IsFixed(vars[0]) && diff --git a/ortools/sat/cp_model_mapping.h b/ortools/sat/cp_model_mapping.h index 85acd1ec1e..c4dd9cb342 100644 --- a/ortools/sat/cp_model_mapping.h +++ b/ortools/sat/cp_model_mapping.h @@ -59,6 +59,10 @@ struct ObjectiveDefinition { double ScaleIntegerObjective(IntegerValue value) const { return (ToDouble(value) + offset) * scaling_factor; } + + double ScaleObjective(double value) const { + return (value + offset) * scaling_factor; + } }; // Holds the mapping between CpModel proto indices and the sat::model ones. diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 6222e1f8a1..3a461cc417 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -9877,7 +9877,6 @@ void CpModelPresolver::FindBigHorizontalLinearOverlap( int64_t num_blocks = 0; int64_t nz_reduction = 0; - absl::flat_hash_set processed; for (int i = 0; i < sorted_linear.size(); ++i) { const int c = sorted_linear[i]; if (c < 0) continue; diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index f045fe30b9..2d201bb642 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -547,18 +547,20 @@ absl::flat_hash_map GetNamedParameters( { SatParameters new_params = base_params; new_params.set_optimize_with_lb_tree_search(true); + // We do not want to change the objective_var lb from outside as it gives + // better result to only use locally derived reason in that algo. + new_params.set_share_objective_bounds(false); + + new_params.set_linearization_level(0); + strategies["lb_tree_search_no_lp"] = new_params; + new_params.set_linearization_level(2); if (base_params.use_dual_scheduling_heuristics()) { AddDualSchedulingHeuristics(new_params); } - // We want to spend more time on the LP here. new_params.set_add_lp_constraints_lazily(false); new_params.set_root_lp_iterations(100'000); - - // We do not want to change the objective_var lb from outside as it gives - // better result to only use locally derived reason in that algo. - new_params.set_share_objective_bounds(false); strategies["lb_tree_search"] = new_params; } @@ -630,6 +632,21 @@ absl::flat_hash_map GetNamedParameters( strategies["fixed"] = new_params; } + // Inprocessing + { + SatParameters new_params = base_params; + new_params.set_search_branching(SatParameters::AUTOMATIC_SEARCH); + new_params.set_use_sat_inprocessing(false); + strategies["no_inprocessing"] = new_params; + + new_params.set_use_sat_inprocessing(true); + new_params.set_inprocessing_dtime_ratio(1.0); + strategies["max_inprocessing"] = new_params; + + new_params.set_linearization_level(0); + strategies["max_inprocessing_no_lp"] = new_params; + } + // Quick restart. { // TODO(user): Experiment with search_random_variable_pool_size. diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 6d2a3d0989..6eda7ced56 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -1637,9 +1637,8 @@ void LoadCpModel(const CpModelProto& model_proto, Model* model) { // Note that we do that before we finish loading the problem (objective and // LP relaxation), because propagation will be faster at this point and it // should be enough for the purpose of this auto-detection. - if (model->Mutable() != nullptr && - parameters.auto_detect_greater_than_at_least_one_of()) { - model->Mutable() + if (parameters.auto_detect_greater_than_at_least_one_of()) { + model->GetOrCreate() ->AddGreaterThanAtLeastOneOfConstraints(model); if (!sat_solver->FinishPropagation()) return unsat(); } @@ -3469,7 +3468,7 @@ void SolveCpModelParallel(const CpModelProto& model_proto, subsolvers.push_back(std::make_unique( std::make_unique( helper, "packing_precedences_lns"), - params, helper, &shared)); + lns_params, helper, &shared)); subsolvers.push_back(std::make_unique( std::make_unique( helper, "packing_slice_lns"), diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index 13ee11f025..07c34c86ed 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -14,7 +14,10 @@ #include "ortools/sat/disjunctive.h" #include +#include #include +#include +#include #include #include "absl/algorithm/container.h" @@ -136,9 +139,8 @@ void AddDisjunctive(const std::vector& intervals, if (params.use_precedences_in_disjunctive_constraint() && !params.use_combined_no_overlap()) { for (const bool time_direction : {true, false}) { - DisjunctivePrecedences* precedences = new DisjunctivePrecedences( - time_direction, helper, model->GetOrCreate(), - model->GetOrCreate()); + DisjunctivePrecedences* precedences = + new DisjunctivePrecedences(time_direction, helper, model); const int id = precedences->RegisterWith(watcher); watcher->SetPropagatorPriority(id, 5); model->TakeOwnership(precedences); @@ -910,6 +912,14 @@ int DisjunctiveDetectablePrecedences::RegisterWith( return id; } +DisjunctivePrecedences::~DisjunctivePrecedences() { + if (!VLOG_IS_ON(1)) return; + if (shared_stats_ == nullptr) return; + std::vector> stats; + stats.push_back({"disj_precedences/num_propagations_", num_propagations_}); + shared_stats_->AddStats(stats); +} + bool DisjunctivePrecedences::Propagate() { if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) return false; window_.clear(); @@ -1016,6 +1026,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { // TODO(user): If var is actually a start-min of an interval, we // could push the end-min and check the interval consistency right away. + ++num_propagations_; if (!helper_->PushIntegerLiteral( IntegerLiteral::GreaterOrEqual(var, new_lb))) { return false; @@ -1354,6 +1365,7 @@ bool DisjunctiveEdgeFinding::PropagateSubwindow(IntegerValue window_end_min) { // tasks in the tree actually), otherwise there will be no way to schedule // the critical_tasks inside their time window. while (theta_tree_.GetOptionalEnvelope() > non_gray_end_max) { + const IntegerValue end_min_with_gray = theta_tree_.GetOptionalEnvelope(); int critical_event_with_gray; int gray_event; IntegerValue available_energy; @@ -1405,13 +1417,13 @@ bool DisjunctiveEdgeFinding::PropagateSubwindow(IntegerValue window_end_min) { use_energy_reason = false; window_end = helper_->EndMin(gray_task) - 1; } else { - window_end = non_gray_end_min + event_size_[gray_event] - 1; + window_end = end_min_with_gray - 1; } CHECK_GE(window_end, non_gray_end_max); // The non-gray part of the explanation as detailed above. helper_->ClearReason(); - bool one_before = false; + bool all_before = true; for (int event = first_event; event < window_size; event++) { const int task = window_[event].task_index; if (is_gray_[task]) continue; @@ -1422,22 +1434,21 @@ bool DisjunctiveEdgeFinding::PropagateSubwindow(IntegerValue window_end_min) { const IntegerValue dist = helper_->GetCurrentMinDistanceBetweenTasks( task, gray_task, /*add_reason_if_after=*/true); - if (dist >= 0) { - one_before = true; - } else { + if (dist < 0) { + all_before = false; helper_->AddEndMaxReason(task, window_end); } } // Add the reason for the gray_task (we don't need the end-max or - // presence reason). - if (one_before) { - helper_->AddSizeMinReason(gray_task); - } else if (use_energy_reason) { - helper_->AddEnergyAfterReason(gray_task, event_size_[gray_event], - window_[critical_event_with_gray].time); - } else { - helper_->AddEndMinReason(gray_task, helper_->EndMin(gray_task)); + // presence reason) needed for the precedences. + if (!all_before) { + if (use_energy_reason) { + helper_->AddEnergyAfterReason(gray_task, event_size_[gray_event], + first_start); + } else { + helper_->AddEndMinReason(gray_task, helper_->EndMin(gray_task)); + } } // If we detect precedences at level zero, lets add them to the diff --git a/ortools/sat/disjunctive.h b/ortools/sat/disjunctive.h index 9ee6863723..7d9c6b53ab 100644 --- a/ortools/sat/disjunctive.h +++ b/ortools/sat/disjunctive.h @@ -141,7 +141,7 @@ class DisjunctiveOverloadChecker : public PropagatorInterface { int RegisterWith(GenericLiteralWatcher* watcher); private: - bool PropagateSubwindow(int relevat_size, IntegerValue global_window_end); + bool PropagateSubwindow(int relevant_size, IntegerValue global_window_end); SchedulingConstraintHelper* helper_; @@ -262,15 +262,16 @@ class DisjunctiveEdgeFinding : public PropagatorInterface { class DisjunctivePrecedences : public PropagatorInterface { public: DisjunctivePrecedences(bool time_direction, - SchedulingConstraintHelper* helper, - IntegerTrail* integer_trail, - PrecedencesPropagator* precedences) + SchedulingConstraintHelper* helper, Model* model) : time_direction_(time_direction), helper_(helper), - integer_trail_(integer_trail), - precedences_(precedences), + integer_trail_(model->GetOrCreate()), + precedences_(model->GetOrCreate()), + shared_stats_(model->GetOrCreate()), task_set_(helper->NumTasks()), task_to_arc_index_(helper->NumTasks()) {} + ~DisjunctivePrecedences() override; + bool Propagate() final; int RegisterWith(GenericLiteralWatcher* watcher); @@ -281,6 +282,9 @@ class DisjunctivePrecedences : public PropagatorInterface { SchedulingConstraintHelper* helper_; IntegerTrail* integer_trail_; PrecedencesPropagator* precedences_; + SharedStatistics* shared_stats_; + + int64_t num_propagations_ = 0; std::vector window_; std::vector index_to_end_vars_; diff --git a/ortools/sat/docs/integer_arithmetic.md b/ortools/sat/docs/integer_arithmetic.md index 93b6c4d5c5..efdf3884f6 100644 --- a/ortools/sat/docs/integer_arithmetic.md +++ b/ortools/sat/docs/integer_arithmetic.md @@ -37,7 +37,7 @@ non-contiguous domains. Here, the variable can be any of 1, 3, 4, or 6: 6}), "x");` - **C#**: `model.NewIntVarFromDomain(Domain.FromValues(new long[] {1, 3, 4, 6}), "x");` -- **Go**: `model.NewIntVarFromDomain(cpmodel.FromValues([]int64_t{1, 3, 4, 6})` +- **Go**: `model.NewIntVarFromDomain(cpmodel.FromValues([]int64{1, 3, 4, 6})` Variables can also be created using a list of intervals. Below, the variable created is constrained to be 1, 2, 4, 5, or 6: @@ -1157,7 +1157,7 @@ func stepFunctionSampleSat() error { // expr == 0 on [5, 6] U [8, 10] b0 := model.NewBoolVar() - d0 := cpmodel.FromValues([]int64_t{5, 6, 8, 9, 10}) + d0 := cpmodel.FromValues([]int64{5, 6, 8, 9, 10}) model.AddLinearConstraintForDomain(x, d0).OnlyEnforceIf(b0) model.AddEquality(expr, cpmodel.NewConstant(0)).OnlyEnforceIf(b0) diff --git a/ortools/sat/docs/model.md b/ortools/sat/docs/model.md index e8cd6d0e55..82615116b7 100644 --- a/ortools/sat/docs/model.md +++ b/ortools/sat/docs/model.md @@ -324,10 +324,10 @@ func solutionHintingSampleSat() error { model.AddNotEqual(x, y) - model.Maximize(cpmodel.NewLinearExpr().AddWeightedSum([]cpmodel.LinearArgument{x, y, z}, []int64_t{1, 2, 3})) + model.Maximize(cpmodel.NewLinearExpr().AddWeightedSum([]cpmodel.LinearArgument{x, y, z}, []int64{1, 2, 3})) // Solution hinting: x <- 1, y <- 2 - hint := &cpmodel.Hint{Ints: map[cpmodel.IntVar]int64_t{x: 7}} + hint := &cpmodel.Hint{Ints: map[cpmodel.IntVar]int64{x: 7}} model.SetHint(hint) m, err := model.Model() diff --git a/ortools/sat/drat_checker.cc b/ortools/sat/drat_checker.cc index caab5ef664..d1bd09736d 100644 --- a/ortools/sat/drat_checker.cc +++ b/ortools/sat/drat_checker.cc @@ -604,7 +604,7 @@ bool AddInferedAndDeletedClauses(const std::string& file_path, } bool PrintClauses(const std::string& file_path, SatFormat format, - const std::vector>& clauses, + absl::Span> clauses, int num_variables) { std::ofstream output_stream(file_path, std::ofstream::out); if (format == DIMACS) { diff --git a/ortools/sat/drat_checker.h b/ortools/sat/drat_checker.h index 11a5b11095..9069285ab4 100644 --- a/ortools/sat/drat_checker.h +++ b/ortools/sat/drat_checker.h @@ -336,7 +336,7 @@ enum SatFormat { // Prints the given clauses in the file at the given path, using the given file // format. Returns true iff the file was successfully written. bool PrintClauses(const std::string& file_path, SatFormat format, - const std::vector>& clauses, + absl::Span> clauses, int num_variables); } // namespace sat diff --git a/ortools/sat/go/cp_model.go b/ortools/sat/go/cp_model.go index a0c7feebee..fbd42d747b 100644 --- a/ortools/sat/go/cp_model.go +++ b/ortools/sat/go/cp_model.go @@ -108,7 +108,7 @@ func (l *LinearExpr) AddSum(las ...LinearArgument) *LinearExpr { // AddWeightedSum adds the linear arguments with the corresponding coefficients to the LinearExpr // and returns itself. -func (l *LinearExpr) AddWeightedSum(las []LinearArgument, coeffs []int64_t) *LinearExpr { +func (l *LinearExpr) AddWeightedSum(las []LinearArgument, coeffs []int64) *LinearExpr { if len(coeffs) != len(las) { log.Fatalf("las and coeffs must be the same length: %v != %v", len(las), len(coeffs)) } @@ -190,7 +190,7 @@ func (i IntVar) asLinearExpressionProto() *cmpb.LinearExpressionProto { linExprProto := &cmpb.LinearExpressionProto{} linExprProto.SetVars([]int32{int32_t(i.ind)}) - linExprProto.SetCoeffs([]int64_t{1}) + linExprProto.SetCoeffs([]int64{1}) return linExprProto } @@ -253,7 +253,7 @@ func (b BoolVar) asLinearExpressionProto() *cmpb.LinearExpressionProto { coeff = -1 offset = 1 } - linExprProto.SetCoeffs([]int64_t{coeff}) + linExprProto.SetCoeffs([]int64{coeff}) linExprProto.SetOffset(offset) return linExprProto @@ -484,7 +484,7 @@ func NewCpModelBuilder() *Builder { func (cp *Builder) NewIntVar(lb, ub int64_t) IntVar { intVar := IntVar{cpb: cp, ind: VarIndex(len(cp.cmpb.GetVariables()))} - pVar := cmpb.IntegerVariableProto_builder{Domain: []int64_t{lb, ub}}.Build() + pVar := cmpb.IntegerVariableProto_builder{Domain: []int64{lb, ub}}.Build() cp.cmpb.SetVariables(append(cp.cmpb.GetVariables(), pVar)) return intVar @@ -504,7 +504,7 @@ func (cp *Builder) NewIntVarFromDomain(d Domain) IntVar { func (cp *Builder) NewBoolVar() BoolVar { boolVar := BoolVar{cpb: cp, ind: VarIndex(len(cp.cmpb.GetVariables()))} - pVar := cmpb.IntegerVariableProto_builder{Domain: []int64_t{0, 1}}.Build() + pVar := cmpb.IntegerVariableProto_builder{Domain: []int64{0, 1}}.Build() cp.cmpb.SetVariables(append(cp.cmpb.GetVariables(), pVar)) return boolVar @@ -531,7 +531,7 @@ func (cp *Builder) TrueVar() BoolVar { } boolVar := BoolVar{cpb: cp, ind: VarIndex(len(cp.cmpb.GetVariables()))} - pVar := cmpb.IntegerVariableProto_builder{Domain: []int64_t{1, 1}}.Build() + pVar := cmpb.IntegerVariableProto_builder{Domain: []int64{1, 1}}.Build() cp.cmpb.SetVariables(append(cp.cmpb.GetVariables(), pVar)) cp.constants[1] = boolVar.ind @@ -547,7 +547,7 @@ func (cp *Builder) FalseVar() BoolVar { } boolVar := BoolVar{cpb: cp, ind: VarIndex(len(cp.cmpb.GetVariables()))} - pVar := cmpb.IntegerVariableProto_builder{Domain: []int64_t{0, 0}}.Build() + pVar := cmpb.IntegerVariableProto_builder{Domain: []int64{0, 0}}.Build() cp.cmpb.SetVariables(append(cp.cmpb.GetVariables(), pVar)) cp.constants[0] = boolVar.ind @@ -762,7 +762,7 @@ func (cp *Builder) AddVariableElement(ind IntVar, vars []IntVar, target IntVar) } // AddElement adds the element constraint: values[ind] == target -func (cp *Builder) AddElement(ind IntVar, values []int64_t, target IntVar) Constraint { +func (cp *Builder) AddElement(ind IntVar, values []int64, target IntVar) Constraint { vars := make([]IntVar, len(values)) for i, v := range values { vars[i] = cp.NewConstant(v) @@ -982,7 +982,7 @@ func (cp *Builder) AddReservoirConstraint(min, max int64_t) ReservoirConstraint // // It returns an AutomatonConstraint that allows adding transition // incrementally after construction. -func (cp *Builder) AddAutomaton(transitionVars []IntVar, startState int64_t, finalStates []int64_t) AutomatonConstraint { +func (cp *Builder) AddAutomaton(transitionVars []IntVar, startState int64_t, finalStates []int64) AutomatonConstraint { var transitions []int32 for _, v := range transitionVars { cp.checkSameModelAndSetErrorf(v.cpb, "invalid parameter intVar %v added to the AutomatonConstraint %v", v.Index(), len(cp.cmpb.GetConstraints())) diff --git a/ortools/sat/go/cp_model_test.go b/ortools/sat/go/cp_model_test.go index 227d64c378..cb52343d4d 100644 --- a/ortools/sat/go/cp_model_test.go +++ b/ortools/sat/go/cp_model_test.go @@ -156,8 +156,8 @@ func TestVar_IntVarDomain(t *testing.T) { }, { name: "DomainWithMultipleIntervals", - intVar: model.NewIntVarFromDomain(FromValues([]int64_t{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8})), - want: FromValues([]int64_t{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8}), + intVar: model.NewIntVarFromDomain(FromValues([]int64{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8})), + want: FromValues([]int64{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8}), }, } @@ -187,7 +187,7 @@ func TestVar_IntegerVariableProto(t *testing.T) { return bv.Index() }, want: cmpb.IntegerVariableProto_builder{ - Domain: []int64_t{0, 1}, + Domain: []int64{0, 1}, }.Build(), }, { @@ -197,17 +197,17 @@ func TestVar_IntegerVariableProto(t *testing.T) { return iv.Index() }, want: cmpb.IntegerVariableProto_builder{ - Domain: []int64_t{-10, 10}, + Domain: []int64{-10, 10}, }.Build(), }, { name: "IntVarFromDomain", varIndex: func(model *Builder) VarIndex { - iv := model.NewIntVarFromDomain(FromValues([]int64_t{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8})) + iv := model.NewIntVarFromDomain(FromValues([]int64{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8})) return iv.Index() }, want: cmpb.IntegerVariableProto_builder{ - Domain: FromValues([]int64_t{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8}).FlattenedIntervals(), + Domain: FromValues([]int64{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8}).FlattenedIntervals(), }.Build(), }, { @@ -217,7 +217,7 @@ func TestVar_IntegerVariableProto(t *testing.T) { return cv.Index() }, want: cmpb.IntegerVariableProto_builder{ - Domain: []int64_t{10, 10}, + Domain: []int64{10, 10}, }.Build(), }, { @@ -227,7 +227,7 @@ func TestVar_IntegerVariableProto(t *testing.T) { return tv.Index() }, want: cmpb.IntegerVariableProto_builder{ - Domain: []int64_t{1, 1}, + Domain: []int64{1, 1}, }.Build(), }, { @@ -237,7 +237,7 @@ func TestVar_IntegerVariableProto(t *testing.T) { return fv.Index() }, want: cmpb.IntegerVariableProto_builder{ - Domain: []int64_t{0, 0}, + Domain: []int64{0, 0}, }.Build(), }, } @@ -267,7 +267,7 @@ func TestVar_EvaluateSolutionValue(t *testing.T) { model := NewCpModelBuilder() iv := model.NewIntVar(0, 10) response := cmpb.CpSolverResponse_builder{ - Solution: []int64_t{5}, + Solution: []int64{5}, }.Build() return iv.evaluateSolutionValue(response) }, @@ -279,7 +279,7 @@ func TestVar_EvaluateSolutionValue(t *testing.T) { model := NewCpModelBuilder() bv := model.NewBoolVar() response := cmpb.CpSolverResponse_builder{ - Solution: []int64_t{0}, + Solution: []int64{0}, }.Build() return bv.evaluateSolutionValue(response) }, @@ -291,7 +291,7 @@ func TestVar_EvaluateSolutionValue(t *testing.T) { model := NewCpModelBuilder() bv := model.NewBoolVar() response := cmpb.CpSolverResponse_builder{ - Solution: []int64_t{0}, + Solution: []int64{0}, }.Build() return bv.Not().evaluateSolutionValue(response) }, @@ -305,7 +305,7 @@ func TestVar_EvaluateSolutionValue(t *testing.T) { bv := model.NewBoolVar() le := NewLinearExpr().AddTerm(iv, 10).AddTerm(bv, 20).AddConstant(5) response := cmpb.CpSolverResponse_builder{ - Solution: []int64_t{5, 1}, + Solution: []int64{5, 1}, }.Build() return le.evaluateSolutionValue(response) }, @@ -429,7 +429,7 @@ func TestVar_AsLinearExpressionProto(t *testing.T) { }, want: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), }, { @@ -439,7 +439,7 @@ func TestVar_AsLinearExpressionProto(t *testing.T) { }, want: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(bv.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), }, { @@ -449,7 +449,7 @@ func TestVar_AsLinearExpressionProto(t *testing.T) { }, want: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(bv.Index())}, - Coeffs: []int64_t{-1}, + Coeffs: []int64{-1}, Offset: 1, }.Build(), }, @@ -460,7 +460,7 @@ func TestVar_AsLinearExpressionProto(t *testing.T) { }, want: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv.Index()), int32_t(bv.Index())}, - Coeffs: []int64_t{10, 20}, + Coeffs: []int64{10, 20}, Offset: 5, }.Build(), }, @@ -497,7 +497,7 @@ func TestVar_AsNegatedLinearExpressionProto(t *testing.T) { }, want: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv.Index())}, - Coeffs: []int64_t{-1}, + Coeffs: []int64{-1}, }.Build(), }, { @@ -507,7 +507,7 @@ func TestVar_AsNegatedLinearExpressionProto(t *testing.T) { }, want: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(bv.Index())}, - Coeffs: []int64_t{-1}, + Coeffs: []int64{-1}, }.Build(), }, { @@ -517,7 +517,7 @@ func TestVar_AsNegatedLinearExpressionProto(t *testing.T) { }, want: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(bv.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, Offset: -1, }.Build(), }, @@ -528,7 +528,7 @@ func TestVar_AsNegatedLinearExpressionProto(t *testing.T) { }, want: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv.Index()), int32_t(bv.Index())}, - Coeffs: []int64_t{-10, -20}, + Coeffs: []int64{-10, -20}, Offset: -5, }.Build(), }, @@ -550,7 +550,7 @@ func TestLinearExpr(t *testing.T) { iv1 := model.NewIntVar(2, 8).WithName("iv1") iv2 := model.NewIntVar(1, 5).WithName("iv2") bv := model.NewBoolVar().WithName("bv1") - lin := NewLinearExpr().AddWeightedSum([]LinearArgument{iv1, bv}, []int64_t{10, 20}) + lin := NewLinearExpr().AddWeightedSum([]LinearArgument{iv1, bv}, []int64{10, 20}) testCases := []struct { name string @@ -648,7 +648,7 @@ func TestLinearExpr(t *testing.T) { { name: "AddWeightedSumIntVar", buildExpr: func() *LinearExpr { - return NewLinearExpr().AddWeightedSum([]LinearArgument{iv1}, []int64_t{10}) + return NewLinearExpr().AddWeightedSum([]LinearArgument{iv1}, []int64{10}) }, want: &LinearExpr{ varCoeffs: []varCoeff{{ind: iv1.Index(), coeff: 10}}, @@ -658,7 +658,7 @@ func TestLinearExpr(t *testing.T) { { name: "AddWeightedSumBoolVar", buildExpr: func() *LinearExpr { - return NewLinearExpr().AddWeightedSum([]LinearArgument{bv}, []int64_t{-10}) + return NewLinearExpr().AddWeightedSum([]LinearArgument{bv}, []int64{-10}) }, want: &LinearExpr{ varCoeffs: []varCoeff{{ind: bv.Index(), coeff: -10}}, @@ -668,7 +668,7 @@ func TestLinearExpr(t *testing.T) { { name: "AddWeightedSumManyVars", buildExpr: func() *LinearExpr { - return NewLinearExpr().AddWeightedSum([]LinearArgument{iv1, iv2, bv, bv.Not()}, []int64_t{10, 20, 30, 40}) + return NewLinearExpr().AddWeightedSum([]LinearArgument{iv1, iv2, bv, bv.Not()}, []int64{10, 20, 30, 40}) }, want: &LinearExpr{ varCoeffs: []varCoeff{ @@ -683,7 +683,7 @@ func TestLinearExpr(t *testing.T) { { name: "AddWeightedSumLinearExpr", buildExpr: func() *LinearExpr { - return NewLinearExpr().AddWeightedSum([]LinearArgument{lin}, []int64_t{3}) + return NewLinearExpr().AddWeightedSum([]LinearArgument{lin}, []int64{3}) }, want: &LinearExpr{ varCoeffs: []varCoeff{ @@ -738,11 +738,11 @@ func TestIntervalVar(t *testing.T) { Start: cmpb.LinearExpressionProto_builder{Offset: 1}.Build(), Size: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), End: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), }.Build(), }.Build(), @@ -759,12 +759,12 @@ func TestIntervalVar(t *testing.T) { Interval: cmpb.IntervalConstraintProto_builder{ Start: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), Size: cmpb.LinearExpressionProto_builder{Offset: 5}.Build(), End: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, Offset: 5, }.Build(), }.Build(), @@ -783,11 +783,11 @@ func TestIntervalVar(t *testing.T) { Start: cmpb.LinearExpressionProto_builder{Offset: 1}.Build(), Size: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), End: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), }.Build(), }.Build(), @@ -804,12 +804,12 @@ func TestIntervalVar(t *testing.T) { Interval: cmpb.IntervalConstraintProto_builder{ Start: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), Size: cmpb.LinearExpressionProto_builder{Offset: 5}.Build(), End: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, Offset: 5, }.Build(), }.Build(), @@ -970,8 +970,8 @@ func TestCpModelBuilder_Constraints(t *testing.T) { want: cmpb.ConstraintProto_builder{ Linear: cmpb.LinearConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index()), int32_t(bv1.Index())}, - Coeffs: []int64_t{1, 1}, - Domain: []int64_t{-5, -4, -2, -1, 6, 15}, + Coeffs: []int64{1, 1}, + Domain: []int64{-5, -4, -2, -1, 6, 15}, }.Build(), }.Build(), }, @@ -985,8 +985,8 @@ func TestCpModelBuilder_Constraints(t *testing.T) { want: cmpb.ConstraintProto_builder{ Linear: cmpb.LinearConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index()), int32_t(bv1.Index())}, - Coeffs: []int64_t{1, 1}, - Domain: []int64_t{2, 6}, + Coeffs: []int64{1, 1}, + Domain: []int64{2, 6}, }.Build(), }.Build(), }, @@ -1000,8 +1000,8 @@ func TestCpModelBuilder_Constraints(t *testing.T) { want: cmpb.ConstraintProto_builder{ Linear: cmpb.LinearConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, - Domain: []int64_t{10, 10}, + Coeffs: []int64{1}, + Domain: []int64{10, 10}, }.Build(), }.Build(), }, @@ -1015,8 +1015,8 @@ func TestCpModelBuilder_Constraints(t *testing.T) { want: cmpb.ConstraintProto_builder{ Linear: cmpb.LinearConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, - Domain: []int64_t{math.MinInt64, 10}, + Coeffs: []int64{1}, + Domain: []int64{math.MinInt64, 10}, }.Build(), }.Build(), }, @@ -1030,8 +1030,8 @@ func TestCpModelBuilder_Constraints(t *testing.T) { want: cmpb.ConstraintProto_builder{ Linear: cmpb.LinearConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, - Domain: []int64_t{math.MinInt64, 9}, + Coeffs: []int64{1}, + Domain: []int64{math.MinInt64, 9}, }.Build(), }.Build(), }, @@ -1045,8 +1045,8 @@ func TestCpModelBuilder_Constraints(t *testing.T) { want: cmpb.ConstraintProto_builder{ Linear: cmpb.LinearConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, - Domain: []int64_t{10, math.MaxInt64}, + Coeffs: []int64{1}, + Domain: []int64{10, math.MaxInt64}, }.Build(), }.Build(), }, @@ -1060,8 +1060,8 @@ func TestCpModelBuilder_Constraints(t *testing.T) { want: cmpb.ConstraintProto_builder{ Linear: cmpb.LinearConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, - Domain: []int64_t{11, math.MaxInt64}, + Coeffs: []int64{1}, + Domain: []int64{11, math.MaxInt64}, }.Build(), }.Build(), }, @@ -1075,8 +1075,8 @@ func TestCpModelBuilder_Constraints(t *testing.T) { want: cmpb.ConstraintProto_builder{ Linear: cmpb.LinearConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, - Domain: []int64_t{math.MinInt64, 9, 11, math.MaxInt64}, + Coeffs: []int64{1}, + Domain: []int64{math.MinInt64, 9, 11, math.MaxInt64}, }.Build(), }.Build(), }, @@ -1092,20 +1092,20 @@ func TestCpModelBuilder_Constraints(t *testing.T) { Exprs: []*cmpb.LinearExpressionProto{ cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(bv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(bv2.Index())}, - Coeffs: []int64_t{-1}, + Coeffs: []int64{-1}, Offset: 1, }.Build(), cmpb.LinearExpressionProto_builder{ Vars: []int32{}, - Coeffs: []int64_t{}, + Coeffs: []int64{}, Offset: 10, }.Build(), }, @@ -1130,7 +1130,7 @@ func TestCpModelBuilder_Constraints(t *testing.T) { { name: "AddElement", constraint: func() *cmpb.ConstraintProto { - c := model.AddElement(iv1, []int64_t{10, 20}, iv4) + c := model.AddElement(iv1, []int64{10, 20}, iv4) m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, @@ -1170,16 +1170,16 @@ func TestCpModelBuilder_Constraints(t *testing.T) { LinMax: cmpb.LinearArgumentProto_builder{ Target: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{-1}, + Coeffs: []int64{-1}, }.Build(), Exprs: []*cmpb.LinearExpressionProto{ cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{-1}, + Coeffs: []int64{-1}, }.Build(), cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv3.Index())}, - Coeffs: []int64_t{-1}, + Coeffs: []int64{-1}, }.Build(), }, }.Build(), @@ -1196,16 +1196,16 @@ func TestCpModelBuilder_Constraints(t *testing.T) { LinMax: cmpb.LinearArgumentProto_builder{ Target: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), Exprs: []*cmpb.LinearExpressionProto{ cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv3.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), }, }.Build(), @@ -1222,16 +1222,16 @@ func TestCpModelBuilder_Constraints(t *testing.T) { IntProd: cmpb.LinearArgumentProto_builder{ Target: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), Exprs: []*cmpb.LinearExpressionProto{ cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv3.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), }, }.Build(), @@ -1248,16 +1248,16 @@ func TestCpModelBuilder_Constraints(t *testing.T) { IntDiv: cmpb.LinearArgumentProto_builder{ Target: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), Exprs: []*cmpb.LinearExpressionProto{ cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv3.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), }, }.Build(), @@ -1274,16 +1274,16 @@ func TestCpModelBuilder_Constraints(t *testing.T) { LinMax: cmpb.LinearArgumentProto_builder{ Target: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), Exprs: []*cmpb.LinearExpressionProto{ cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{-1}, + Coeffs: []int64{-1}, }.Build(), }, }.Build(), @@ -1300,16 +1300,16 @@ func TestCpModelBuilder_Constraints(t *testing.T) { IntMod: cmpb.LinearArgumentProto_builder{ Target: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), Exprs: []*cmpb.LinearExpressionProto{ cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv3.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), }, }.Build(), @@ -1388,7 +1388,7 @@ func TestCpModelBuilder_Constraints(t *testing.T) { want: cmpb.ConstraintProto_builder{ Table: cmpb.TableConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index()), int32_t(iv2.Index())}, - Values: []int64_t{0, 2, 1, 3}, + Values: []int64{0, 2, 1, 3}, }.Build(), }.Build(), }, @@ -1407,7 +1407,7 @@ func TestCpModelBuilder_Constraints(t *testing.T) { TimeExprs: []*cmpb.LinearExpressionProto{ cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{2}, + Coeffs: []int64{2}, }.Build(), }, LevelChanges: []*cmpb.LinearExpressionProto{ @@ -1422,7 +1422,7 @@ func TestCpModelBuilder_Constraints(t *testing.T) { { name: "AddAutomaton", constraint: func() *cmpb.ConstraintProto { - c := model.AddAutomaton([]IntVar{iv1, iv2}, 0, []int64_t{5, 10}) + c := model.AddAutomaton([]IntVar{iv1, iv2}, 0, []int64{5, 10}) c.AddTransition(0, 1, 10) c.AddTransition(2, 3, 15) m := mustModel(t, model) @@ -1432,10 +1432,10 @@ func TestCpModelBuilder_Constraints(t *testing.T) { Automaton: cmpb.AutomatonConstraintProto_builder{ Vars: []int32{int32_t(iv1.Index()), int32_t(iv2.Index())}, StartingState: 0, - FinalStates: []int64_t{5, 10}, - TransitionTail: []int64_t{0, 2}, - TransitionHead: []int64_t{1, 3}, - TransitionLabel: []int64_t{10, 15}, + FinalStates: []int64{5, 10}, + TransitionTail: []int64{0, 2}, + TransitionHead: []int64{1, 3}, + TransitionLabel: []int64{10, 15}, }.Build(), }.Build(), }, @@ -1451,13 +1451,13 @@ func TestCpModelBuilder_Constraints(t *testing.T) { Cumulative: cmpb.CumulativeConstraintProto_builder{ Capacity: cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), Intervals: []int32{int32_t(interval1.Index())}, Demands: []*cmpb.LinearExpressionProto{ cmpb.LinearExpressionProto_builder{ Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64_t{1}, + Coeffs: []int64{1}, }.Build(), }, }.Build(), @@ -1486,7 +1486,7 @@ func TestCpModelBuilder_Minimize(t *testing.T) { m := mustModel(t, model) want := cmpb.CpObjectiveProto_builder{ Vars: []int32{int32_t(iv1.Index()), int32_t(iv2.Index())}, - Coeffs: []int64_t{3, 5}, + Coeffs: []int64{3, 5}, }.Build() got := m.GetObjective() @@ -1504,7 +1504,7 @@ func TestCpModelBuilder_Maximize(t *testing.T) { model.Maximize(NewLinearExpr().AddTerm(iv1, 3).AddTerm(iv2, 5).AddConstant(7)) want := cmpb.CpObjectiveProto_builder{ Vars: []int32{int32_t(iv1.Index()), int32_t(iv2.Index())}, - Coeffs: []int64_t{-3, -5}, + Coeffs: []int64{-3, -5}, ScalingFactor: -1.0, Offset: -7, }.Build() @@ -1562,12 +1562,12 @@ func TestCpModelBuilder_ConstantVars(t *testing.T) { func TestCpModelBuilder_IndexValueSlices(t *testing.T) { indices := []int32{5, 1, 3} - values := []int64_t{10, 11, 8} + values := []int64{10, 11, 8} sort.Sort(indexValueSlices{indices, values}) wantIndices := []int32{1, 3, 5} - wantValues := []int64_t{11, 8, 10} + wantValues := []int64{11, 8, 10} if diff := cmp.Diff(wantIndices, indices); diff != "" { t.Errorf("Sort indexValueSlices return unexpected indices diff (-want+got): %v", diff) @@ -1584,7 +1584,7 @@ func TestCpModelBuilder_SetHint(t *testing.T) { bv1 := model.NewBoolVar() bv2 := model.NewBoolVar() hint := &Hint{ - Ints: map[IntVar]int64_t{iv: 7}, + Ints: map[IntVar]int64{iv: 7}, Bools: map[BoolVar]bool{bv2.Not(): false, bv1: true}, } model.SetHint(hint) @@ -1593,7 +1593,7 @@ func TestCpModelBuilder_SetHint(t *testing.T) { got := m.GetSolutionHint() want := cmpb.PartialVariableAssignment_builder{ Vars: []int32{int32_t(iv.Index()), int32_t(bv1.Index()), int32_t(bv2.Index())}, - Values: []int64_t{7, 1, 1}, + Values: []int64{7, 1, 1}, }.Build() if diff := cmp.Diff(want, got, protocmp.Transform()); diff != "" { @@ -1608,7 +1608,7 @@ func TestCpModelBuilder_ClearHint(t *testing.T) { bv1 := model.NewBoolVar() bv2 := model.NewBoolVar() hint := &Hint{ - Ints: map[IntVar]int64_t{iv: 7}, + Ints: map[IntVar]int64{iv: 7}, Bools: map[BoolVar]bool{bv1: true, bv2.Not(): false}, } model.SetHint(hint) @@ -1759,7 +1759,7 @@ func TestCpModelBuilder_ErrorHandling(t *testing.T) { builder: func() *Builder { model1 := NewCpModelBuilder() model2 := NewCpModelBuilder() - model1.AddAutomaton([]IntVar{model2.NewIntVar(0, 10)}, 0, []int64_t{10}) + model1.AddAutomaton([]IntVar{model2.NewIntVar(0, 10)}, 0, []int64{10}) return model1 }, }, diff --git a/ortools/sat/go/domain.go b/ortools/sat/go/domain.go index f466f05df4..901e42614a 100644 --- a/ortools/sat/go/domain.go +++ b/ortools/sat/go/domain.go @@ -116,7 +116,7 @@ func NewDomain(left, right int64_t) Domain { // FromValues creates a new domain from `values`. `values` need not be // sorted and can repeat. -func FromValues(values []int64_t) Domain { +func FromValues(values []int64) Domain { var d Domain for _, v := range values { d.intervals = append(d.intervals, ClosedInterval{v, v}) @@ -138,7 +138,7 @@ func FromIntervals(intervals []ClosedInterval) Domain { // FromFlatIntervals creates a new domain from a flattened list of intervals. If there is an // interval where the start is greater than the end, the interval is considered empty. Returns // an error if the length of `values` is not even. -func FromFlatIntervals(values []int64_t) (Domain, error) { +func FromFlatIntervals(values []int64) (Domain, error) { if len(values) == 0 { return NewEmptyDomain(), nil } @@ -156,7 +156,7 @@ func FromFlatIntervals(values []int64_t) (Domain, error) { // FlattenedIntervals returns the flattened list of interval bounds of the domain. // For example, if Domain d is equal to `[0,2][5,5][9,10]` will return `[0,2,5,5,9,10]`. -func (d Domain) FlattenedIntervals() []int64_t { +func (d Domain) FlattenedIntervals() []int64 { var result []int64 for _, i := range d.intervals { result = append(result, i.Start, i.End) diff --git a/ortools/sat/go/domain_test.go b/ortools/sat/go/domain_test.go index 89bb4367d6..bfe0b13b97 100644 --- a/ortools/sat/go/domain_test.go +++ b/ortools/sat/go/domain_test.go @@ -71,23 +71,23 @@ func TestDomain_FromValues(t *testing.T) { want Domain }{ { - values: []int64_t{}, + values: []int64{}, want: Domain{}, }, { - values: []int64_t{4}, + values: []int64{4}, want: Domain{[]ClosedInterval{{4, 4}}}, }, { - values: []int64_t{1, 1, 3, 1, 2, 3, 2, 3}, + values: []int64{1, 1, 3, 1, 2, 3, 2, 3}, want: Domain{[]ClosedInterval{{1, 3}}}, }, { - values: []int64_t{1, 2, 3, 7, 8, -4}, + values: []int64{1, 2, 3, 7, 8, -4}, want: Domain{[]ClosedInterval{{-4, -4}, {1, 3}, {7, 8}}}, }, { - values: []int64_t{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8}, + values: []int64{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8}, want: Domain{[]ClosedInterval{{1, 6}, {8, 8}, {10, 12}, {15, 15}}}, }, } @@ -142,27 +142,27 @@ func TestDomain_FromFlatIntervals(t *testing.T) { wantError string }{ { - flatIntervals: []int64_t{}, + flatIntervals: []int64{}, wantDomain: Domain{}, }, { - flatIntervals: []int64_t{1}, + flatIntervals: []int64{1}, wantError: "must be a multiple of 2", }, { - flatIntervals: []int64_t{-1, 1, 3, 3, 5, 10}, + flatIntervals: []int64{-1, 1, 3, 3, 5, 10}, wantDomain: Domain{[]ClosedInterval{{-1, 1}, {3, 3}, {5, 10}}}, }, { - flatIntervals: []int64_t{3, 9, 6, 10}, + flatIntervals: []int64{3, 9, 6, 10}, wantDomain: Domain{[]ClosedInterval{{3, 10}}}, }, { - flatIntervals: []int64_t{3, 5, 5, 10}, + flatIntervals: []int64{3, 5, 5, 10}, wantDomain: Domain{[]ClosedInterval{{3, 10}}}, }, { - flatIntervals: []int64_t{5, 3, 4, -1}, + flatIntervals: []int64{5, 3, 4, -1}, wantDomain: Domain{}, }, } @@ -182,7 +182,7 @@ func TestDomain_FlattenedIntervals(t *testing.T) { d := Domain{[]ClosedInterval{{-1, 1}, {3, 3}, {5, 10}}} got := d.FlattenedIntervals() - want := []int64_t{-1, 1, 3, 3, 5, 10} + want := []int64{-1, 1, 3, 3, 5, 10} if diff := cmp.Diff(want, got); diff != "" { t.Errorf("FlattenedIntervals() returned with unexpected diff (-want+got);\n%s", diff) } diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index bb0fe330fc..c1707af415 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -197,20 +197,29 @@ void IntegerEncoder::AddImplications( if (!add_implications_) return; DCHECK_EQ(it->second, associated_lit); - // Literal(after) => associated_lit - auto after_it = it; - ++after_it; - if (after_it != map.end()) { - sat_solver_->AddClauseDuringSearch( - {after_it->second.Negated(), associated_lit}); - } - - // associated_lit => Literal(before) + // Tricky: We compute the literal first because AddClauseDuringSearch() might + // propagate at level zero and mess up the map. + LiteralIndex before_index = kNoLiteralIndex; if (it != map.begin()) { auto before_it = it; --before_it; + before_index = before_it->second.Index(); + } + LiteralIndex after_index = kNoLiteralIndex; + { + auto after_it = it; + ++after_it; + if (after_it != map.end()) after_index = after_it->second.Index(); + } + + // Then we add the two implications. + if (after_index != kNoLiteralIndex) { sat_solver_->AddClauseDuringSearch( - {associated_lit.Negated(), before_it->second}); + {Literal(after_index).Negated(), associated_lit}); + } + if (before_index != kNoLiteralIndex) { + sat_solver_->AddClauseDuringSearch( + {associated_lit.Negated(), Literal(before_index)}); } } @@ -2123,7 +2132,8 @@ bool GenericLiteralWatcher::Propagate(Trail* trail) { // // TODO(user): The queue will not be emptied, but I am not sure the solver // will be left in an usable state. Fix if it become needed to resume - // the solve from the last time it was interrupted. + // the solve from the last time it was interrupted. In particular, we might + // want to call UpdateCallingNeeds()? if (test_limit > 100) { test_limit = 0; if (time_limit_->LimitReached()) break; @@ -2234,8 +2244,12 @@ bool GenericLiteralWatcher::Propagate(Trail* trail) { void GenericLiteralWatcher::Untrail(const Trail& trail, int trail_index) { if (propagation_trail_index_ <= trail_index) { // Nothing to do since we found a conflict before Propagate() was called. - CHECK_EQ(propagation_trail_index_, trail_index) - << " level " << trail.CurrentDecisionLevel(); + if (DEBUG_MODE) { + // The assumption is not always true if we are currently aborting. + if (time_limit_->LimitReached()) return; + CHECK_EQ(propagation_trail_index_, trail_index) + << " level " << trail.CurrentDecisionLevel(); + } return; } diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index a4629893bd..42f0968268 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -300,9 +300,9 @@ struct AffineExpression { AffineExpression(IntegerVariable v) // NOLINT(runtime/explicit) : var(v), coeff(1) {} AffineExpression(IntegerVariable v, IntegerValue c) - : var(c > 0 ? v : NegationOf(v)), coeff(IntTypeAbs(c)) {} + : var(c >= 0 ? v : NegationOf(v)), coeff(IntTypeAbs(c)) {} AffineExpression(IntegerVariable v, IntegerValue c, IntegerValue cst) - : var(c > 0 ? v : NegationOf(v)), coeff(IntTypeAbs(c)), constant(cst) {} + : var(c >= 0 ? v : NegationOf(v)), coeff(IntTypeAbs(c)), constant(cst) {} // Returns the integer literal corresponding to expression >= value or // expression <= value. diff --git a/ortools/sat/lb_tree_search.cc b/ortools/sat/lb_tree_search.cc index 7d7e69db45..d7b127ddb9 100644 --- a/ortools/sat/lb_tree_search.cc +++ b/ortools/sat/lb_tree_search.cc @@ -47,7 +47,8 @@ namespace operations_research { namespace sat { LbTreeSearch::LbTreeSearch(Model* model) - : time_limit_(model->GetOrCreate()), + : name_(model->Name()), + time_limit_(model->GetOrCreate()), random_(model->GetOrCreate()), sat_solver_(model->GetOrCreate()), integer_encoder_(model->GetOrCreate()), @@ -298,8 +299,8 @@ SatSolver::Status LbTreeSearch::Search( const IntegerValue bound = nodes_[current_branch_[0]].MinObjective(); if (bound > current_objective_lb_) { shared_response_->UpdateInnerObjectiveBounds( - absl::StrCat("lb_tree_search (", SmallProgressString(), ") "), - bound, integer_trail_->LevelZeroUpperBound(objective_var_)); + absl::StrCat(name_, " (", SmallProgressString(), ") "), bound, + integer_trail_->LevelZeroUpperBound(objective_var_)); current_objective_lb_ = bound; if (VLOG_IS_ON(3)) DebugDisplayTree(current_branch_[0]); } diff --git a/ortools/sat/lb_tree_search.h b/ortools/sat/lb_tree_search.h index 8a72301aeb..60d7779b5b 100644 --- a/ortools/sat/lb_tree_search.h +++ b/ortools/sat/lb_tree_search.h @@ -138,6 +138,7 @@ class LbTreeSearch { std::string SmallProgressString() const; // Model singleton class used here. + const std::string name_; TimeLimit* time_limit_; ModelRandomGenerator* random_; SatSolver* sat_solver_; diff --git a/ortools/sat/linear_constraint.h b/ortools/sat/linear_constraint.h index f385e2d670..8b343343cb 100644 --- a/ortools/sat/linear_constraint.h +++ b/ortools/sat/linear_constraint.h @@ -15,6 +15,7 @@ #define OR_TOOLS_SAT_LINEAR_CONSTRAINT_H_ #include +#include #include #include #include diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 42bb314e22..18a810cdc9 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -693,7 +693,9 @@ bool LinearProgrammingConstraint::SolveLp() { << " lvl:" << trail_->CurrentDecisionLevel() << " " << simplex_.GetProblemStatus() << " iter:" << simplex_.GetNumberOfIterations() - << " obj:" << simplex_.GetObjectiveValue(); + << " obj:" << simplex_.GetObjectiveValue() << " scaled:" + << objective_definition_->ScaleObjective( + simplex_.GetObjectiveValue()); if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { lp_solution_is_set_ = true; diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index e3dd67785a..125d3ca856 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/ortools/sat/linear_propagation.cc b/ortools/sat/linear_propagation.cc index 87c5fc7f33..6d1e9a5151 100644 --- a/ortools/sat/linear_propagation.cc +++ b/ortools/sat/linear_propagation.cc @@ -23,6 +23,7 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/container/flat_hash_map.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" @@ -85,6 +86,20 @@ void CustomFifoQueue::Push(int id) { if (right_ == queue_.size()) right_ = 0; } +void CustomFifoQueue::FillAndSortTmpPositions(absl::Span elements) { + int index = 0; + const int capacity = queue_.size(); + for (const int id : elements) { + const int p = pos_[id]; + DCHECK_GE(p, 0); + DCHECK_EQ(queue_[p], id); + tmp_positions_[index++] = p >= left_ ? p : p + capacity; + } + std::sort(&tmp_positions_[0], &tmp_positions_[index]); + DCHECK(std::unique(&tmp_positions_[0], &tmp_positions_[index]) == + &tmp_positions_[index]); +} + void CustomFifoQueue::Reorder(absl::Span order) { if (order.size() <= 1) return; @@ -94,20 +109,12 @@ void CustomFifoQueue::Reorder(absl::Span order) { return ReorderDense(order); } - int index = 0; - for (const int id : order) { - const int p = pos_[id]; - DCHECK_GE(p, 0); - tmp_positions_[index++] = p >= left_ ? p : p + capacity; - } - std::sort(&tmp_positions_[0], &tmp_positions_[index]); - DCHECK(std::unique(&tmp_positions_[0], &tmp_positions_[index]) == - &tmp_positions_[index]); - - index = 0; - for (const int id : order) { - int p = tmp_positions_[index++]; + FillAndSortTmpPositions(order); + for (int i = 0; i < order.size(); ++i) { + int p = tmp_positions_[i]; if (p >= capacity) p -= capacity; + + const int id = order[i]; pos_[id] = p; queue_[p] = id; } @@ -144,20 +151,15 @@ void CustomFifoQueue::ReorderDense(absl::Span order) { DCHECK_EQ(order_index, order.size()); } +// TODO(user): combine this with reorder. +// This is slow, especially if we are dense. void CustomFifoQueue::SortByPos(absl::Span elements) { - std::sort(elements.begin(), elements.end(), - [this](const int id1, const int id2) { - const int p1 = pos_[id1]; - const int p2 = pos_[id2]; - if (p1 >= left_) { - if (p2 >= left_) return p1 < p2; - return true; - } else { - // p1 < left_. - if (p2 < left_) return p1 < p2; - return false; - } - }); + FillAndSortTmpPositions(elements); + const int capacity = queue_.size(); + for (int i = 0; i < elements.size(); ++i) { + const int p = tmp_positions_[i]; + elements[i] = queue_[p < capacity ? p : p - capacity]; + } } std::ostream& operator<<(std::ostream& os, const EnforcementStatus& e) { @@ -538,8 +540,12 @@ bool LinearPropagator::Propagate() { for (const IntegerVariable var : modified_vars_.PositionsSetAtLeastOnce()) { if (var >= var_to_constraint_ids_.size()) continue; SetPropagatedBy(var, -1); - AddWatchedToQueue(var); + AddWatchedToQueue(var, /*push_delayed_right_away=*/false); } + for (const int id : tmp_delayed_) { + AddToQueueIfNeeded(id); + } + tmp_delayed_.clear(); // We abort this propagator as soon as a Boolean is propagated, so that we // always finish the Boolean propagation first. This can happen when we push a @@ -549,13 +555,35 @@ bool LinearPropagator::Propagate() { // propagator might have pushed the same variable further. // // Empty FIFO queue. + // + // TODO(user): More than the propagation speed, I think it is important to + // have proper explanation, so if A pushes B, but later on the queue we have C + // that push A that push B again, that might be bad? We can try to avoid this + // even further, by organizing the queue in passes: + // - Scan all relevant constraints, remember who pushes but DO NOT push yet! + // - If no cycle, do not pushes constraint whose slack will changes due to + // other pushes. + // - consider the new constraint that need to be scanned and repeat. + // I think it is okay to scan twice the constraints that push something in + // order to get better explanation. We tend to diverge from the class shortest + // path algo in this regard. + // + // TODO(user): If we push the idea further, can we first compute the fix point + // without pushing anything, then compute a good order of constraints for the + // explanations? what is tricky is that we might need to "scan" more than once + // a constraint I think. ex: Y, Z, T >=0 + // - 2 * Y + Z + T <= 11 ==> Y <= 5, Z <= 11, T <= 11 (1) + // - Z + Y >= 6 ==> Z >= 1 + // - (1) again to push T <= 10 and reach the propagation fixed point. + bool result = true; + num_terms_for_dtime_update_ = 0; const int saved_index = trail_->Index(); while (!propagation_queue_.empty()) { const int id = propagation_queue_.Pop(); in_queue_[id] = false; if (!PropagateOneConstraint(id)) { - modified_vars_.ClearAndResize(integer_trail_->NumIntegerVariables()); - return false; + result = false; + break; } if (trail_->Index() > saved_index) { @@ -565,8 +593,10 @@ bool LinearPropagator::Propagate() { } // Clean-up modified_vars_ to do as little as possible on the next call. + time_limit_->AdvanceDeterministicTime( + static_cast(num_terms_for_dtime_update_) * 1e-9); modified_vars_.ClearAndResize(integer_trail_->NumIntegerVariables()); - return true; + return result; } // Adds a new constraint to the propagator. @@ -603,6 +633,7 @@ bool LinearPropagator::AddConstraint( } id_to_propagation_count_.push_back(0); + id_propagated_something_.push_back(false); variables_buffer_.insert(variables_buffer_.end(), vars.begin(), vars.end()); coeffs_buffer_.insert(coeffs_buffer_.end(), coeffs.begin(), coeffs.end()); CanonicalizeConstraint(id); @@ -671,7 +702,12 @@ bool LinearPropagator::AddConstraint( } // Propagate this new constraint. - return PropagateOneConstraint(id); + // TODO(user): Do we want to do that? + num_terms_for_dtime_update_ = 0; + const bool result = PropagateOneConstraint(id); + time_limit_->AdvanceDeterministicTime( + static_cast(num_terms_for_dtime_update_) * 1e-9); + return result; } absl::Span LinearPropagator::GetCoeffs( @@ -689,8 +725,8 @@ absl::Span LinearPropagator::GetVariables( void LinearPropagator::CanonicalizeConstraint(int id) { const ConstraintInfo& info = infos_[id]; - auto coeffs = GetCoeffs(info); - auto vars = GetVariables(info); + const auto coeffs = GetCoeffs(info); + const auto vars = GetVariables(info); for (int i = 0; i < vars.size(); ++i) { if (coeffs[i] < 0) { coeffs[i] = -coeffs[i]; @@ -705,17 +741,34 @@ bool LinearPropagator::PropagateOneConstraint(int id) { // default though, even VLOG_IS_ON(1) so we disable it. if (/* DISABLES CODE */ (false)) { ++num_scanned_; - if (id_scanned_at_least_once_[id]) { - ++num_extra_scans_; - } else { - id_scanned_at_least_once_.Set(id); + if (id < id_scanned_at_least_once_.size()) { + if (id_scanned_at_least_once_[id]) { + ++num_extra_scans_; + } else { + id_scanned_at_least_once_.Set(id); + } } } // Skip constraint not enforced or that cannot propagate if false. ConstraintInfo& info = infos_[id]; const EnforcementStatus enf_status = EnforcementStatus(info.enf_status); - DCHECK_EQ(enf_status, enforcement_propagator_->DebugStatus(info.enf_id)); + if (DEBUG_MODE) { + const EnforcementStatus debug_status = + enforcement_propagator_->DebugStatus(info.enf_id); + if (enf_status != debug_status) { + if (enf_status == EnforcementStatus::CANNOT_PROPAGATE && + debug_status == EnforcementStatus::IS_FALSE) { + // This case might happen because in our two watched literals scheme, + // we might watch two unassigned literal without knowing another one is + // already false. + } else { + LOG(FATAL) << "Enforcement status not up to date: " << enf_status + << " vs debug: " << debug_status; + } + } + } + if (enf_status == EnforcementStatus::IS_FALSE || enf_status == EnforcementStatus::CANNOT_PROPAGATE) { DCHECK(!in_queue_[id]); @@ -732,44 +785,72 @@ bool LinearPropagator::PropagateOneConstraint(int id) { // Compute the slack and max_variations_ of each variables. // We also filter out fixed variables in a reversible way. IntegerValue implied_lb(0); - auto vars = GetVariables(info); - auto coeffs = GetCoeffs(info); + const auto vars = GetVariables(info); IntegerValue max_variation(0); bool first_change = true; - time_limit_->AdvanceDeterministicTime(static_cast(info.rev_size) * - 1e-9); - for (int i = 0; i < info.rev_size;) { - const IntegerVariable var = vars[i]; - const IntegerValue coeff = coeffs[i]; - const IntegerValue lb = integer_trail_->LowerBound(var); - const IntegerValue ub = integer_trail_->UpperBound(var); - if (lb == ub) { - if (first_change) { - // Note that we can save at most one state per fixed var. Also at - // level zero we don't save anything. - rev_int_repository_->SaveState(&info.rev_size); - rev_integer_value_repository_->SaveState(&info.rev_rhs); - first_change = false; + num_terms_for_dtime_update_ += info.rev_size; + IntegerValue* max_variations = max_variations_.data(); + if (info.all_coeffs_are_one) { + // TODO(user): Avoid duplication? + for (int i = 0; i < info.rev_size;) { + const IntegerVariable var = vars[i]; + const IntegerValue lb = integer_trail_->LowerBound(var); + const IntegerValue ub = integer_trail_->UpperBound(var); + if (lb == ub) { + if (first_change) { + // Note that we can save at most one state per fixed var. Also at + // level zero we don't save anything. + rev_int_repository_->SaveState(&info.rev_size); + rev_integer_value_repository_->SaveState(&info.rev_rhs); + first_change = false; + } + info.rev_size--; + std::swap(vars[i], vars[info.rev_size]); + info.rev_rhs -= lb; + } else { + implied_lb += lb; + max_variations[i] = (ub - lb); + max_variation = std::max(max_variation, max_variations[i]); + ++i; + } + } + } else { + const auto coeffs = GetCoeffs(info); + for (int i = 0; i < info.rev_size;) { + const IntegerVariable var = vars[i]; + const IntegerValue coeff = coeffs[i]; + const IntegerValue lb = integer_trail_->LowerBound(var); + const IntegerValue ub = integer_trail_->UpperBound(var); + if (lb == ub) { + if (first_change) { + // Note that we can save at most one state per fixed var. Also at + // level zero we don't save anything. + rev_int_repository_->SaveState(&info.rev_size); + rev_integer_value_repository_->SaveState(&info.rev_rhs); + first_change = false; + } + info.rev_size--; + std::swap(vars[i], vars[info.rev_size]); + std::swap(coeffs[i], coeffs[info.rev_size]); + info.rev_rhs -= coeff * lb; + } else { + implied_lb += coeff * lb; + max_variations[i] = (ub - lb) * coeff; + max_variation = std::max(max_variation, max_variations[i]); + ++i; } - info.rev_size--; - std::swap(vars[i], vars[info.rev_size]); - std::swap(coeffs[i], coeffs[info.rev_size]); - info.rev_rhs -= coeff * lb; - } else { - implied_lb += coeff * lb; - max_variations_[i] = (ub - lb) * coeff; - max_variation = std::max(max_variation, max_variations_[i]); - ++i; } } const IntegerValue slack = info.rev_rhs - implied_lb; // Negative slack means the constraint is false. if (max_variation <= slack) return true; + id_propagated_something_[id] = true; if (slack < 0) { // Fill integer reason. integer_reason_.clear(); reason_coeffs_.clear(); + const auto coeffs = GetCoeffs(info); for (int i = 0; i < info.initial_size; ++i) { const IntegerVariable var = vars[i]; if (!integer_trail_->VariableLowerBoundIsFromLevelZero(var)) { @@ -794,8 +875,9 @@ bool LinearPropagator::PropagateOneConstraint(int id) { // The lower bound of all the variables except one can be used to update the // upper bound of the last one. int num_pushed = 0; + const auto coeffs = GetCoeffs(info); for (int i = 0; i < info.rev_size; ++i) { - if (max_variations_[i] <= slack) continue; + if (max_variations[i] <= slack) continue; // TODO(user): If the new ub fall into an hole of the variable, we can // actually relax the reason more by computing a better slack. @@ -817,8 +899,8 @@ bool LinearPropagator::PropagateOneConstraint(int id) { literal_reason); reason_coeffs_.clear(); - auto coeffs = GetCoeffs(info); - auto vars = GetVariables(info); + const auto coeffs = GetCoeffs(info); + const auto vars = GetVariables(info); for (int i = 0; i < info.initial_size; ++i) { const IntegerVariable var = vars[i]; if (PositiveVariable(var) == PositiveVariable(i_lit.var)) { @@ -873,8 +955,8 @@ bool LinearPropagator::PropagateOneConstraint(int id) { std::string LinearPropagator::ConstraintDebugString(int id) { std::string result; const ConstraintInfo& info = infos_[id]; - auto coeffs = GetCoeffs(info); - auto vars = GetVariables(info); + const auto coeffs = GetCoeffs(info); + const auto vars = GetVariables(info); IntegerValue implied_lb(0); IntegerValue rhs_correction(0); for (int i = 0; i < info.initial_size; ++i) { @@ -908,8 +990,8 @@ bool LinearPropagator::ReportConflictingCycle() { const ConstraintInfo& info = infos_[id]; enforcement_propagator_->AddEnforcementReason(info.enf_id, &literal_reason_); - auto coeffs = GetCoeffs(info); - auto vars = GetVariables(info); + const auto coeffs = GetCoeffs(info); + const auto vars = GetVariables(info); IntegerValue rhs_correction(0); for (int i = 0; i < info.initial_size; ++i) { if (i >= info.rev_size) { @@ -1020,6 +1102,9 @@ bool LinearPropagator::ReportConflictingCycle() { // // TODO(user): If one of the var coeff is > previous slack we push an id again, // we can stop early with a conflict by propagating the ids in sequence. +// +// TODO(user): Revisit the algo, no point exploring twice the same var, also +// the queue reordering heuristic might not be the best. bool LinearPropagator::DisassembleSubtree(int root_id, int num_pushed) { disassemble_to_reorder_.ClearAndResize(in_queue_.size()); disassemble_reverse_topo_order_.clear(); @@ -1033,7 +1118,7 @@ bool LinearPropagator::DisassembleSubtree(int root_id, int num_pushed) { disassemble_branch_.clear(); { const ConstraintInfo& info = infos_[root_id]; - auto vars = GetVariables(info); + const auto vars = GetVariables(info); for (int i = 0; i < num_pushed; ++i) { disassemble_queue_.push_back({root_id, NegationOf(vars[i])}); } @@ -1041,6 +1126,7 @@ bool LinearPropagator::DisassembleSubtree(int root_id, int num_pushed) { // Note that all var should be unique since there is only one propagated_by_ // for each one. And each time we explore an id, we disassemble the tree. + absl::Span id_to_count = absl::MakeSpan(id_to_propagation_count_); while (!disassemble_queue_.empty()) { const auto [prev_id, var] = disassemble_queue_.back(); if (!disassemble_branch_.empty() && @@ -1053,16 +1139,11 @@ bool LinearPropagator::DisassembleSubtree(int root_id, int num_pushed) { } disassemble_branch_.push_back({prev_id, var}); + time_limit_->AdvanceDeterministicTime( static_cast(var_to_constraint_ids_[var].size()) * 1e-9); for (const int id : var_to_constraint_ids_[var]) { - if (prev_id == root_id) { - // Root id was just propagated, so there is no need to reorder what - // it pushes. - DCHECK_NE(id, root_id); - if (disassemble_to_reorder_[id]) continue; - disassemble_to_reorder_.Set(id); - } else if (id == root_id) { + if (id == root_id) { // TODO(user): Check previous slack vs var coeff? // TODO(user): Make sure there are none or detect cycle not going back // to the root. @@ -1081,16 +1162,16 @@ bool LinearPropagator::DisassembleSubtree(int root_id, int num_pushed) { // variation in slack might be big enough to push a variable twice and // thus push a lower coeff. const ConstraintInfo& info = infos_[id]; - auto coeffs = GetCoeffs(info); - auto vars = GetVariables(info); + const auto coeffs = GetCoeffs(info); + const auto vars = GetVariables(info); IntegerValue root_coeff(0); IntegerValue var_coeff(0); for (int i = 0; i < info.initial_size; ++i) { if (vars[i] == var) var_coeff = coeffs[i]; if (vars[i] == NegationOf(root_var)) root_coeff = coeffs[i]; } - CHECK_NE(root_coeff, 0); - CHECK_NE(var_coeff, 0); + DCHECK_NE(root_coeff, 0); + DCHECK_NE(var_coeff, 0); if (var_coeff >= root_coeff) { return ReportConflictingCycle(); } else { @@ -1099,15 +1180,15 @@ bool LinearPropagator::DisassembleSubtree(int root_id, int num_pushed) { } } - if (id_to_propagation_count_[id] == 0) continue; // Didn't push. disassemble_to_reorder_.Set(id); + if (id_to_count[id] == 0) continue; // Didn't push or was desassembled. // The constraint pushed some variable. Identify which ones will be pushed // further. Disassemble the whole info since we are about to propagate // this constraint again. Any pushed variable must be before the rev_size. const ConstraintInfo& info = infos_[id]; - auto coeffs = GetCoeffs(info); - auto vars = GetVariables(info); + const auto coeffs = GetCoeffs(info); + const auto vars = GetVariables(info); IntegerValue var_coeff(0); disassemble_candidates_.clear(); ++num_explored_in_disassemble_; @@ -1124,7 +1205,7 @@ bool LinearPropagator::DisassembleSubtree(int root_id, int num_pushed) { // We will propagate var again later, so clear all this for now. propagated_by_[next_var] = -1; - id_to_propagation_count_[id]--; + id_to_count[id]--; } } for (const auto [next_var, coeff] : disassemble_candidates_) { @@ -1152,7 +1233,7 @@ bool LinearPropagator::DisassembleSubtree(int root_id, int num_pushed) { tmp_to_reorder_.push_back(id); } - // TODO(user): Reordering can be sloe since require sort and can touch many + // TODO(user): Reordering can be slow since require sort and can touch many // entries. Investigate alternatives. We could probably optimize this a bit // more. if (tmp_to_reorder_.empty()) return true; @@ -1188,12 +1269,28 @@ void LinearPropagator::AddToQueueIfNeeded(int id) { propagation_queue_.Push(id); } -void LinearPropagator::AddWatchedToQueue(IntegerVariable var) { +void LinearPropagator::AddWatchedToQueue(IntegerVariable var, + bool push_delayed_right_away) { if (var >= static_cast(var_to_constraint_ids_.size())) return; time_limit_->AdvanceDeterministicTime( static_cast(var_to_constraint_ids_[var].size()) * 1e-9); + + // If a constraint propagated something and is getting tighter, then it + // will likely propagate again, and we want to propagate it first. for (const int id : var_to_constraint_ids_[var]) { - AddToQueueIfNeeded(id); + if (in_queue_[id]) continue; + if (true || id_propagated_something_[id]) { + id_propagated_something_[id] = false; // reset. + AddToQueueIfNeeded(id); + } else { + tmp_delayed_.push_back(id); + } + } + if (push_delayed_right_away) { + for (const int id : tmp_delayed_) { + AddToQueueIfNeeded(id); + } + tmp_delayed_.clear(); } } diff --git a/ortools/sat/linear_propagation.h b/ortools/sat/linear_propagation.h index 17d7aa6e08..72757ab68d 100644 --- a/ortools/sat/linear_propagation.h +++ b/ortools/sat/linear_propagation.h @@ -66,6 +66,8 @@ class CustomFifoQueue { void SortByPos(absl::Span elements); private: + void FillAndSortTmpPositions(absl::Span elements); + // The queue is stored in [left_, right_) with eventual wrap around % size. // The positions of each element is in pos_[element] and never changes during // normal operation. A position of -1 means that the element is not in the @@ -226,7 +228,8 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { void ClearPropagatedBy(); void CanonicalizeConstraint(int id); void AddToQueueIfNeeded(int id); - void AddWatchedToQueue(IntegerVariable var); + void AddWatchedToQueue(IntegerVariable var, + bool push_delayed_right_away = true); void SetPropagatedBy(IntegerVariable var, int id); std::string ConstraintDebugString(int id); @@ -254,9 +257,6 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { std::deque infos_; // Buffer of the constraints data. - // - // TODO(user): A lot of constrains have all their coeffs at one, we could - // exploit this. std::vector variables_buffer_; std::vector coeffs_buffer_; std::vector buffer_of_ones_; @@ -302,6 +302,10 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { SparseBitset disassemble_to_reorder_; std::vector disassemble_reverse_topo_order_; + // Heuristic to enqueue interesting constraint first. + std::vector id_propagated_something_; + std::vector tmp_delayed_; + // Staging queue. // Initially, we add the constraint to the priority queue, and we extract // them one by one, each time reaching the propagation fixed point. @@ -315,6 +319,9 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { SparseBitset id_scanned_at_least_once_; int64_t num_extra_scans_ = 0; + // This is used to update the deterministic time. + int64_t num_terms_for_dtime_update_ = 0; + // Stats. int64_t num_pushes_ = 0; int64_t num_enforcement_pushes_ = 0; diff --git a/ortools/sat/lp_utils.cc b/ortools/sat/lp_utils.cc index 1a5a1629ea..0e1d7c827c 100644 --- a/ortools/sat/lp_utils.cc +++ b/ortools/sat/lp_utils.cc @@ -733,6 +733,7 @@ struct ConstraintScaler { const MPConstraintProto& mp_constraint, CpModelProto* cp_model); + bool keep_names = false; double max_relative_coeff_error = 0.0; double max_absolute_rhs_error = 0.0; double max_scaling_factor = 0.0; @@ -755,7 +756,7 @@ ConstraintProto* ConstraintScaler::AddConstraint( } auto* constraint = cp_model->add_constraints(); - constraint->set_name(mp_constraint.name()); + if (keep_names) constraint->set_name(mp_constraint.name()); auto* arg = constraint->mutable_linear(); // First scale the coefficients of the constraints so that the constraint @@ -959,10 +960,11 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters& params, // Add the variables. const int num_variables = mp_model.variable_size(); + const bool keep_names = !params.ignore_names(); for (int i = 0; i < num_variables; ++i) { const MPVariableProto& mp_var = mp_model.variable(i); IntegerVariableProto* cp_var = cp_model->add_variables(); - cp_var->set_name(mp_var.name()); + if (keep_names) cp_var->set_name(mp_var.name()); // Deal with the corner case of a domain far away from zero. // @@ -1024,6 +1026,7 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters& params, << params.mip_max_activity_exponent(); scaler.wanted_precision = kWantedPrecision; scaler.scaling_target = kScalingTarget; + scaler.keep_names = keep_names; // Add the constraints. We scale each of them individually. for (const MPConstraintProto& mp_constraint : mp_model.constraint()) { diff --git a/ortools/sat/optimization.cc b/ortools/sat/optimization.cc index 72f0eaace0..25e4db15b9 100644 --- a/ortools/sat/optimization.cc +++ b/ortools/sat/optimization.cc @@ -439,6 +439,7 @@ CoreBasedOptimizer::CoreBasedOptimizer( std::function feasible_solution_observer, Model* model) : parameters_(model->GetOrCreate()), sat_solver_(model->GetOrCreate()), + clauses_(model->GetOrCreate()), time_limit_(model->GetOrCreate()), implications_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), @@ -666,9 +667,8 @@ bool CoreBasedOptimizer::CoverOptimization() { } SatSolver::Status CoreBasedOptimizer::OptimizeWithSatEncoding( - const std::vector& literals, - const std::vector& vars, - const std::vector& coefficients, Coefficient offset) { + absl::Span literals, absl::Span vars, + absl::Span coefficients, Coefficient offset) { // Create one initial nodes per variables with cost. // TODO(user): We could create EncodingNode out of IntegerVariable. // @@ -758,9 +758,10 @@ SatSolver::Status CoreBasedOptimizer::OptimizeWithSatEncoding( const int num_bools = sat_solver_->NumVariables(); const int num_fixed = sat_solver_->NumFixedVariables(); model_->GetOrCreate()->UpdateInnerObjectiveBounds( - absl::StrFormat("bool_core (num_cores=%d [%s] a=%u d=%d fixed=%d/%d)", - iter, previous_core_info, encoder.nodes().size(), - max_depth, num_fixed, num_bools), + absl::StrFormat( + "bool_core (num_cores=%d [%s] a=%u d=%d fixed=%d/%d clauses=%s)", + iter, previous_core_info, encoder.nodes().size(), max_depth, + num_fixed, num_bools, FormatCounter(clauses_->num_clauses())), new_obj_lb, integer_trail_->LevelZeroUpperBound(objective_var_)); } diff --git a/ortools/sat/optimization.h b/ortools/sat/optimization.h index 5820783906..4482a3ebca 100644 --- a/ortools/sat/optimization.h +++ b/ortools/sat/optimization.h @@ -114,9 +114,9 @@ class CoreBasedOptimizer { // - Support resuming for interleaved search. // - Implement all core heurisitics. SatSolver::Status OptimizeWithSatEncoding( - const std::vector& literals, - const std::vector& vars, - const std::vector& coefficients, Coefficient offset); + absl::Span literals, + absl::Span vars, + absl::Span coefficients, Coefficient offset); private: CoreBasedOptimizer(const CoreBasedOptimizer&) = delete; @@ -168,6 +168,7 @@ class CoreBasedOptimizer { SatParameters* parameters_; SatSolver* sat_solver_; + ClauseManager* clauses_; TimeLimit* time_limit_; BinaryImplicationGraph* implications_; IntegerTrail* integer_trail_; diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 291cf2fff4..50e01cf5ad 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -42,6 +42,7 @@ #include "ortools/sat/sat_solver.h" #include "ortools/sat/synchronization.h" #include "ortools/util/bitset.h" +#include "ortools/util/logging.h" #include "ortools/util/strong_integers.h" #include "ortools/util/time_limit.h" @@ -1021,112 +1022,174 @@ bool PrecedencesPropagator::BellmanFordTarjan(Trail* trail) { return true; } -int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraintsFromClause( - const absl::Span clause, Model* model) { +void GreaterThanAtLeastOneOfDetector::Add(Literal lit, LinearTerm a, + LinearTerm b, IntegerValue lhs, + IntegerValue rhs) { + Relation r; + r.enforcement = lit; + r.a = a; + r.b = b; + r.lhs = lhs; + r.rhs = rhs; + + // We shall only consider positive variable here. + if (r.a.var != kNoIntegerVariable && !VariableIsPositive(r.a.var)) { + r.a.var = NegationOf(r.a.var); + r.a.coeff = -r.a.coeff; + } + if (r.b.var != kNoIntegerVariable && !VariableIsPositive(r.b.var)) { + r.b.var = NegationOf(r.b.var); + r.b.coeff = -r.b.coeff; + } + + const int index = relations_.size(); + relations_.push_back(std::move(r)); + + if (lit.Index() >= lit_to_relations_.size()) { + lit_to_relations_.resize(lit.Index() + 1); + } + lit_to_relations_[lit.Index()].push_back(index); +} + +bool GreaterThanAtLeastOneOfDetector::AddRelationFromIndices( + IntegerVariable var, absl::Span clause, + absl::Span indices, Model* model) { + std::vector exprs; + std::vector selectors; + absl::flat_hash_set used; + auto* integer_trail = model->GetOrCreate(); + + const IntegerValue var_lb = integer_trail->LevelZeroLowerBound(var); + for (const int index : indices) { + Relation r = relations_[index]; + if (r.a.var != PositiveVariable(var)) std::swap(r.a, r.b); + CHECK_EQ(r.a.var, PositiveVariable(var)); + + if ((r.a.coeff == 1) == VariableIsPositive(var)) { + // a + b >= lhs + if (r.lhs <= kMinIntegerValue) continue; + exprs.push_back(AffineExpression(r.b.var, -r.b.coeff, r.lhs)); + } else { + // -a + b <= rhs. + if (r.rhs >= kMaxIntegerValue) continue; + exprs.push_back(AffineExpression(r.b.var, r.b.coeff, -r.rhs)); + } + + // Ignore this entry if it is always true. + if (var_lb >= integer_trail->LevelZeroUpperBound(exprs.back())) { + exprs.pop_back(); + continue; + } + + // Note that duplicate selector are supported. + selectors.push_back(r.enforcement); + used.insert(r.enforcement); + } + + // The enforcement of the new constraint are simply the literal not used + // above. + std::vector enforcements; + for (const Literal l : clause) { + if (!used.contains(l.Index())) { + enforcements.push_back(l.Negated()); + } + } + + // No point adding a constraint if there is not at least two different + // literals in selectors. + if (used.size() <= 1) return false; + + // Add the constraint. + GreaterThanAtLeastOneOfPropagator* constraint = + new GreaterThanAtLeastOneOfPropagator(var, exprs, selectors, enforcements, + model); + constraint->RegisterWith(model->GetOrCreate()); + model->TakeOwnership(constraint); + return true; +} + +int GreaterThanAtLeastOneOfDetector:: + AddGreaterThanAtLeastOneOfConstraintsFromClause( + const absl::Span clause, Model* model) { CHECK_EQ(model->GetOrCreate()->CurrentDecisionLevel(), 0); if (clause.size() < 2) return 0; - // Collect all arcs impacted by this clause. - std::vector infos; + // Collect all relations impacted by this clause. + std::vector> infos; for (const Literal l : clause) { - if (l.Index() >= literal_to_new_impacted_arcs_.size()) continue; - for (const ArcIndex arc_index : literal_to_new_impacted_arcs_[l.Index()]) { - const ArcInfo& arc = arcs_[arc_index]; - if (arc.presence_literals.size() != 1) continue; - - // TODO(user): Support variable offset. - if (arc.offset_var != kNoIntegerVariable) continue; - infos.push_back(arc); + if (l.Index() >= lit_to_relations_.size()) continue; + for (const int index : lit_to_relations_[l.Index()]) { + const Relation& r = relations_[index]; + if (r.a.var != kNoIntegerVariable && IntTypeAbs(r.a.coeff) == 1) { + infos.push_back({r.a.var, index}); + } + if (r.b.var != kNoIntegerVariable && IntTypeAbs(r.b.coeff) == 1) { + infos.push_back({r.b.var, index}); + } } } if (infos.size() <= 1) return 0; - // Stable sort by head_var so that for a same head_var, the entry are sorted - // by Literal as they appear in clause. - std::stable_sort(infos.begin(), infos.end(), - [](const ArcInfo& a, const ArcInfo& b) { - return a.head_var < b.head_var; - }); + // Stable sort to regroup by var. + std::stable_sort(infos.begin(), infos.end()); - // We process ArcInfo with the same head_var toghether. + // We process the info with same variable together. int num_added_constraints = 0; - auto* solver = model->GetOrCreate(); + std::vector indices; for (int i = 0; i < infos.size();) { const int start = i; - const IntegerVariable head_var = infos[start].head_var; - for (i++; i < infos.size() && infos[i].head_var == head_var; ++i) { - } - const absl::Span arcs(&infos[start], i - start); + const IntegerVariable var = infos[start].first; - // Skip single arcs since it will already be fully propagated. - if (arcs.size() < 2) continue; - - // Heuristic. Look for full or almost full clauses. We could add - // GreaterThanAtLeastOneOf() with more enforcement literals. TODO(user): - // experiments. - if (arcs.size() + 1 < clause.size()) continue; - - std::vector vars; - std::vector offsets; - std::vector selectors; - std::vector enforcements; - - int j = 0; - for (const Literal l : clause) { - bool added = false; - for (; j < arcs.size() && l == arcs[j].presence_literals.front(); ++j) { - added = true; - vars.push_back(arcs[j].tail_var); - offsets.push_back(arcs[j].offset); - - // Note that duplicate selector are supported. - // - // TODO(user): If we support variable offset, we should regroup the arcs - // into one (tail + offset <= head) though, instead of having too - // identical entries. - selectors.push_back(l); - } - if (!added) { - enforcements.push_back(l.Negated()); - } + indices.clear(); + for (; i < infos.size() && infos[i].first == var; ++i) { + indices.push_back(infos[i].second); } - // No point adding a constraint if there is not at least two different - // literals in selectors. - if (enforcements.size() + 1 == clause.size()) continue; + // Skip single relations, we are not interested in these. + if (indices.size() < 2) continue; - ++num_added_constraints; - model->Add(GreaterThanAtLeastOneOf(head_var, vars, offsets, selectors, - enforcements)); - if (!solver->FinishPropagation()) return num_added_constraints; + // Heuristic. Look for full or almost full clauses. + // + // TODO(user): We could add GreaterThanAtLeastOneOf() with more enforcement + // literals. Experiment. + if (indices.size() + 1 < clause.size()) continue; + + if (AddRelationFromIndices(var, clause, indices, model)) { + ++num_added_constraints; + } + if (AddRelationFromIndices(NegationOf(var), clause, indices, model)) { + ++num_added_constraints; + } } return num_added_constraints; } -int PrecedencesPropagator:: +int GreaterThanAtLeastOneOfDetector:: AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection(Model* model) { auto* time_limit = model->GetOrCreate(); auto* solver = model->GetOrCreate(); - // Fill the set of incoming conditional arcs for each variables. - absl::StrongVector> incoming_arcs_; - for (ArcIndex arc_index(0); arc_index < arcs_.size(); ++arc_index) { - const ArcInfo& arc = arcs_[arc_index]; - - // Only keep arc that have a fixed offset and a single presence_literals. - if (arc.offset_var != kNoIntegerVariable) continue; - if (arc.tail_var == arc.head_var) continue; - if (arc.presence_literals.size() != 1) continue; - - if (arc.head_var >= incoming_arcs_.size()) { - incoming_arcs_.resize(arc.head_var.value() + 1); + // Fill the set of interesting relations for each variables. + absl::StrongVector> var_to_relations; + for (int index = 0; index < relations_.size(); ++index) { + const Relation& r = relations_[index]; + if (r.a.var != kNoIntegerVariable && IntTypeAbs(r.a.coeff) == 1) { + if (r.a.var >= var_to_relations.size()) { + var_to_relations.resize(r.a.var + 1); + } + var_to_relations[r.a.var].push_back(index); + } + if (r.b.var != kNoIntegerVariable && IntTypeAbs(r.b.coeff) == 1) { + if (r.b.var >= var_to_relations.size()) { + var_to_relations.resize(r.b.var + 1); + } + var_to_relations[r.b.var].push_back(index); } - incoming_arcs_[arc.head_var].push_back(arc_index); } int num_added_constraints = 0; - for (IntegerVariable target(0); target < incoming_arcs_.size(); ++target) { - if (incoming_arcs_[target].size() <= 1) continue; + for (IntegerVariable target(0); target < var_to_relations.size(); ++target) { + if (var_to_relations[target].size() <= 1) continue; if (time_limit->LimitReached()) return num_added_constraints; // Detect set of incoming arcs for which at least one must be present. @@ -1135,55 +1198,56 @@ int PrecedencesPropagator:: solver->Backtrack(0); if (solver->ModelIsUnsat()) return num_added_constraints; std::vector clause; - for (const ArcIndex arc_index : incoming_arcs_[target]) { - const Literal literal = arcs_[arc_index].presence_literals.front(); + for (const int index : var_to_relations[target]) { + const Literal literal = relations_[index].enforcement; if (solver->Assignment().LiteralIsFalse(literal)) continue; const SatSolver::Status status = solver->EnqueueDecisionAndBacktrackOnConflict(literal.Negated()); if (status == SatSolver::INFEASIBLE) return num_added_constraints; if (status == SatSolver::ASSUMPTIONS_UNSAT) { + // We need to invert it, since a clause is not all false. clause = solver->GetLastIncompatibleDecisions(); + for (Literal& ref : clause) ref = ref.Negated(); break; } } solver->Backtrack(0); + if (clause.size() <= 1) continue; - if (clause.size() > 1) { - // Extract the set of arc for which at least one must be present. - const absl::btree_set clause_set(clause.begin(), clause.end()); - std::vector arcs_in_clause; - for (const ArcIndex arc_index : incoming_arcs_[target]) { - const Literal literal(arcs_[arc_index].presence_literals.front()); - if (clause_set.contains(literal.Negated())) { - arcs_in_clause.push_back(arc_index); - } + // Recover the indices corresponding to this clause. + const absl::btree_set clause_set(clause.begin(), clause.end()); + + std::vector indices; + for (const int index : var_to_relations[target]) { + const Literal literal = relations_[index].enforcement; + if (clause_set.contains(literal)) { + indices.push_back(index); } + } - VLOG(2) << arcs_in_clause.size() << "/" << incoming_arcs_[target].size(); - + // Try both direction. + if (AddRelationFromIndices(target, clause, indices, model)) { + ++num_added_constraints; + } + if (AddRelationFromIndices(NegationOf(target), clause, indices, model)) { ++num_added_constraints; - std::vector vars; - std::vector offsets; - std::vector selectors; - for (const ArcIndex a : arcs_in_clause) { - vars.push_back(arcs_[a].tail_var); - offsets.push_back(arcs_[a].offset); - selectors.push_back(Literal(arcs_[a].presence_literals.front())); - } - model->Add(GreaterThanAtLeastOneOf(target, vars, offsets, selectors, {})); - if (!solver->FinishPropagation()) return num_added_constraints; } } + solver->Backtrack(0); return num_added_constraints; } -int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraints(Model* model) { - VLOG(1) << "Detecting GreaterThanAtLeastOneOf() constraints..."; +int GreaterThanAtLeastOneOfDetector::AddGreaterThanAtLeastOneOfConstraints( + Model* model, bool auto_detect_clauses) { auto* time_limit = model->GetOrCreate(); auto* solver = model->GetOrCreate(); auto* clauses = model->GetOrCreate(); + auto* logger = model->GetOrCreate(); + int num_added_constraints = 0; + SOLVER_LOG(logger, "[Precedences] num_relations=", relations_.size(), + " num_clauses=", clauses->AllClausesInCreationOrder().size()); // We have two possible approaches. For now, we prefer the first one except if // there is too many clauses in the problem. @@ -1191,7 +1255,8 @@ int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraints(Model* model) { // TODO(user): Do more extensive experiment. Remove the second approach as // it is more time consuming? or identify when it make sense. Note that the // first approach also allows to use "incomplete" at least one between arcs. - if (clauses->AllClausesInCreationOrder().size() < 1e6) { + if (!auto_detect_clauses && + clauses->AllClausesInCreationOrder().size() < 1e6) { // TODO(user): This does not take into account clause of size 2 since they // are stored in the BinaryImplicationGraph instead. Some ideas specific // to size 2: @@ -1229,10 +1294,14 @@ int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraints(Model* model) { } if (num_added_constraints > 0) { - SOLVER_LOG(model->GetOrCreate(), "[Precedences] Added ", - num_added_constraints, + SOLVER_LOG(logger, "[Precedences] Added ", num_added_constraints, " GreaterThanAtLeastOneOf() constraints."); } + + // Release the memory, it is not longer needed. + gtl::STLClearObject(&relations_); + gtl::STLClearObject(&lit_to_relations_); + return num_added_constraints; } diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 6b329fd9df..41245d5395 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -234,16 +234,6 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { void ComputePartialPrecedences(const std::vector& vars, std::vector* output); - // Advanced usage. To be called once all the constraints have been added to - // the model. This will loop over all "node" in this class, and if one of its - // optional incoming arcs must be chosen, it will add a corresponding - // GreaterThanAtLeastOneOfConstraint(). Returns the number of added - // constraint. - // - // TODO(user): This can be quite slow, add some kind of deterministic limit - // so that we can use it all the time. - int AddGreaterThanAtLeastOneOfConstraints(Model* model); - // If known, return an offset such that we have a + offset <= b. // Note that this only cover the case where this was conditionned by a single // literal. @@ -262,19 +252,6 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { DEFINE_STRONG_INDEX_TYPE(ArcIndex); DEFINE_STRONG_INDEX_TYPE(OptionalArcIndex); - // Given an existing clause, sees if it can be used to add "greater than at - // least one of" type of constraints. Returns the number of such constraint - // added. - int AddGreaterThanAtLeastOneOfConstraintsFromClause( - absl::Span clause, Model* model); - - // Another approach for AddGreaterThanAtLeastOneOfConstraints(), this one - // might be a bit slow as it relies on the propagation engine to detect - // clauses between incoming arcs presence literals. - // Returns the number of added constraints. - int AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection( - Model* model); - // Information about an individual arc. struct ArcInfo { IntegerVariable tail_var; @@ -432,6 +409,66 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { int64_t num_enforcement_pushes_ = 0; }; +// Similar to AffineExpression, but with a zero constant. +// If coeff is zero, then this is always zero and var is ignored. +struct LinearTerm { + IntegerVariable var = kNoIntegerVariable; + IntegerValue coeff = IntegerValue(0); +}; + +// This collect all enforced linear of size 2 or 1 and detect if at least one of +// a subset touching the same variable must be true. When this is the case +// we add a new propagator to propagate that fact. +// +// TODO(user): Shall we do that on the main thread before the workers are +// spawned? note that the probing version need the model to be loaded though. +class GreaterThanAtLeastOneOfDetector { + public: + // Adds a relation lit => a + b \in [lhs, rhs]. + void Add(Literal lit, LinearTerm a, LinearTerm b, IntegerValue lhs, + IntegerValue rhs); + + // Advanced usage. To be called once all the constraints have been added to + // the model. This will detect GreaterThanAtLeastOneOfConstraint(). + // Returns the number of added constraint. + // + // TODO(user): This can be quite slow, add some kind of deterministic limit + // so that we can use it all the time. + int AddGreaterThanAtLeastOneOfConstraints(Model* model, + bool auto_detect_clauses = false); + + private: + // Given an existing clause, sees if it can be used to add "greater than at + // least one of" type of constraints. Returns the number of such constraint + // added. + int AddGreaterThanAtLeastOneOfConstraintsFromClause( + absl::Span clause, Model* model); + + // Another approach for AddGreaterThanAtLeastOneOfConstraints(), this one + // might be a bit slow as it relies on the propagation engine to detect + // clauses between incoming arcs presence literals. + // Returns the number of added constraints. + int AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection( + Model* model); + + // Once we identified a clause and relevant indices, this build the + // constraint. Returns true if we actually add it. + bool AddRelationFromIndices(IntegerVariable var, + absl::Span clause, + absl::Span indices, Model* model); + + struct Relation { + Literal enforcement; + LinearTerm a; + LinearTerm b; + IntegerValue lhs; + IntegerValue rhs; + }; + + std::vector relations_; + absl::StrongVector> lit_to_relations_; +}; + // ============================================================================= // Implementation of the small API functions below. // ============================================================================= diff --git a/ortools/sat/probing.cc b/ortools/sat/probing.cc index c661ae2ef4..d4d24ef56a 100644 --- a/ortools/sat/probing.cc +++ b/ortools/sat/probing.cc @@ -300,7 +300,7 @@ bool Prober::ProbeBooleanVariables( } bool Prober::ProbeDnf(absl::string_view name, - const std::vector>& dnf) { + absl::Span> dnf) { if (dnf.size() <= 1) return true; // Reset the solver in case it was already used. diff --git a/ortools/sat/probing.h b/ortools/sat/probing.h index 56def373d2..801951896b 100644 --- a/ortools/sat/probing.h +++ b/ortools/sat/probing.h @@ -87,7 +87,7 @@ class Prober { // the conjunction must be true, we might be able to fix literal or improve // integer bounds if all conjunction propagate the same thing. bool ProbeDnf(absl::string_view name, - const std::vector>& dnf); + absl::Span> dnf); // Statistics. // They are reset each time ProbleBooleanVariables() is called. diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index 98deb74acc..ad76a44746 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -47,7 +47,6 @@ rather than for solving specific optimization problems. import collections import itertools -import numbers import threading import time from typing import ( @@ -66,6 +65,7 @@ from typing import ( ) import warnings +import numpy as np import pandas as pd from ortools.sat import cp_model_pb2 @@ -119,16 +119,48 @@ PARTIAL_FIXED_SEARCH = sat_parameters_pb2.SatParameters.PARTIAL_FIXED_SEARCH RANDOMIZED_SEARCH = sat_parameters_pb2.SatParameters.RANDOMIZED_SEARCH # Type aliases -# We need to add int to numbers.Integral -IntegralT = Union[numbers.Integral, int] -# We need to add int and float, otherwise type checkers complain. -NumberT = Union[numbers.Integral, int, numbers.Number, float] +IntegralT = Union[int, np.int8, np.uint8, np.int32, np.uint32, np.int64, np.uint64] +IntegralTypes = ( + int, + np.int8, + np.uint8, + np.int32, + np.uint32, + np.int64, + np.uint64, +) +NumberT = Union[ + int, + float, + np.int8, + np.uint8, + np.int32, + np.uint32, + np.int64, + np.uint64, + np.double, +] +NumberTypes = ( + int, + float, + np.int8, + np.uint8, + np.int32, + np.uint32, + np.int64, + np.uint64, + np.double, +) + LiteralT = Union["IntVar", "_NotBooleanVariable", IntegralT, bool] BoolVarT = Union["IntVar", "_NotBooleanVariable"] VariableT = Union["IntVar", IntegralT] + +# We need to add 'IntVar' for pytype. LinearExprT = Union["LinearExpr", "IntVar", IntegralT] -ObjLinearExprT = Union["LinearExpr", "IntVar", NumberT] +ObjLinearExprT = Union["LinearExpr", NumberT] BoundedLinearExprT = Union["BoundedLinearExpression", bool] + ArcT = Tuple[IntegralT, IntegralT, LiteralT] _IndexOrSeries = Union[pd.Index, pd.Series] @@ -229,8 +261,7 @@ class LinearExpr: cls, expressions: Sequence[LinearExprT], coefficients: Sequence[IntegralT], - ) -> LinearExprT: - ... + ) -> LinearExprT: ... @overload @classmethod @@ -238,8 +269,7 @@ class LinearExpr: cls, expressions: Sequence[ObjLinearExprT], coefficients: Sequence[NumberT], - ) -> ObjLinearExprT: - ... + ) -> ObjLinearExprT: ... @classmethod def weighted_sum(cls, expressions, coefficients): @@ -257,8 +287,7 @@ class LinearExpr: cls, expressions: LinearExprT, coefficients: IntegralT, - ) -> LinearExprT: - ... + ) -> LinearExprT: ... @overload @classmethod @@ -266,8 +295,7 @@ class LinearExpr: cls, expressions: ObjLinearExprT, coefficients: NumberT, - ) -> ObjLinearExprT: - ... + ) -> ObjLinearExprT: ... @classmethod def term(cls, expression, coefficient): @@ -311,14 +339,16 @@ class LinearExpr: else: return _WeightedSum(variables, coeffs, offset) - def get_integer_var_value_map(self) -> Tuple[Dict["IntVar", IntegralT], int]: + def get_integer_var_value_map(self) -> Tuple[Dict["IntVar", int], int]: """Scans the expression, and returns (var_coef_map, constant).""" - coeffs = collections.defaultdict(int) + coeffs: Dict["IntVar", int] = collections.defaultdict(int) constant = 0 - to_process: List[Tuple[LinearExprT, IntegralT]] = [(self, 1)] + to_process: List[Tuple[LinearExprT, int]] = [(self, 1)] while to_process: # Flatten to avoid recursion. + expr: LinearExprT + coeff: int expr, coeff = to_process.pop() - if isinstance(expr, numbers.Integral): + if isinstance(expr, IntegralTypes): constant += coeff * int(expr) elif isinstance(expr, _ProductCst): to_process.append((expr.expression(), coeff * expr.coefficient())) @@ -347,14 +377,14 @@ class LinearExpr: self, ) -> Tuple[Dict["IntVar", float], float, bool]: """Scans the expression. Returns (var_coef_map, constant, is_integer).""" - coeffs = {} - constant = 0 - to_process: List[Tuple[LinearExprT, Union[IntegralT, float]]] = [(self, 1)] + coeffs: Dict["IntVar", Union[int, float]] = {} + constant: Union[int, float] = 0 + to_process: List[Tuple[LinearExprT, Union[int, float]]] = [(self, 1)] while to_process: # Flatten to avoid recursion. expr, coeff = to_process.pop() - if isinstance(expr, numbers.Integral): # Keep integrality. + if isinstance(expr, IntegralTypes): # Keep integrality. constant += coeff * int(expr) - elif isinstance(expr, numbers.Number): + elif isinstance(expr, NumberTypes): constant += coeff * float(expr) elif isinstance(expr, _ProductCst): to_process.append((expr.expression(), coeff * expr.coefficient())) @@ -382,10 +412,10 @@ class LinearExpr: coeffs[expr.negated()] = -coeff else: raise TypeError("Unrecognized linear expression: " + str(expr)) - is_integer = isinstance(constant, numbers.Integral) + is_integer = isinstance(constant, IntegralTypes) if is_integer: for coeff in coeffs.values(): - if not isinstance(coeff, numbers.Integral): + if not isinstance(coeff, IntegralTypes): is_integer = False break return coeffs, constant, is_integer @@ -400,12 +430,10 @@ class LinearExpr: ) @overload - def __add__(self, arg: LinearExprT) -> LinearExprT: - ... + def __add__(self, arg: "LinearExpr") -> "LinearExpr": ... @overload - def __add__(self, arg: ObjLinearExprT) -> ObjLinearExprT: - ... + def __add__(self, arg: NumberT) -> "LinearExpr": ... def __add__(self, arg): if cmh.is_zero(arg): @@ -413,53 +441,43 @@ class LinearExpr: return _Sum(self, arg) @overload - def __radd__(self, arg: LinearExprT) -> LinearExprT: - ... + def __radd__(self, arg: "LinearExpr") -> "LinearExpr": ... @overload - def __radd__(self, arg: ObjLinearExprT) -> ObjLinearExprT: - ... + def __radd__(self, arg: NumberT) -> "LinearExpr": ... def __radd__(self, arg): - if cmh.is_zero(arg): - return self - return _Sum(self, arg) + return self.__add__(arg) @overload - def __sub__(self, arg: LinearExprT) -> LinearExprT: - ... + def __sub__(self, arg: "LinearExpr") -> "LinearExpr": ... @overload - def __sub__(self, arg: ObjLinearExprT) -> ObjLinearExprT: - ... + def __sub__(self, arg: NumberT) -> "LinearExpr": ... def __sub__(self, arg): if cmh.is_zero(arg): return self - if isinstance(arg, numbers.Number): + if isinstance(arg, NumberTypes): arg = cmh.assert_is_a_number(arg) return _Sum(self, -arg) else: return _Sum(self, -arg) @overload - def __rsub__(self, arg: LinearExprT) -> LinearExprT: - ... + def __rsub__(self, arg: "LinearExpr") -> "LinearExpr": ... @overload - def __rsub__(self, arg: ObjLinearExprT) -> ObjLinearExprT: - ... + def __rsub__(self, arg: NumberT) -> "LinearExpr": ... def __rsub__(self, arg): return _Sum(-self, arg) @overload - def __mul__(self, arg: LinearExprT) -> LinearExprT: - ... + def __mul__(self, arg: IntegralT) -> Union["LinearExpr", IntegralT]: ... @overload - def __mul__(self, arg: ObjLinearExprT) -> ObjLinearExprT: - ... + def __mul__(self, arg: NumberT) -> Union["LinearExpr", NumberT]: ... def __mul__(self, arg): arg = cmh.assert_is_a_number(arg) @@ -470,20 +488,13 @@ class LinearExpr: return _ProductCst(self, arg) @overload - def __rmul__(self, arg: LinearExprT) -> LinearExprT: - ... + def __rmul__(self, arg: IntegralT) -> Union["LinearExpr", IntegralT]: ... @overload - def __rmul__(self, arg: ObjLinearExprT) -> ObjLinearExprT: - ... + def __rmul__(self, arg: NumberT) -> Union["LinearExpr", NumberT]: ... def __rmul__(self, arg): - arg = cmh.assert_is_a_number(arg) - if cmh.is_one(arg): - return self - elif cmh.is_zero(arg): - return 0 - return _ProductCst(self, arg) + return self.__mul__(arg) def __div__(self, _) -> NoReturn: raise NotImplementedError( @@ -537,7 +548,7 @@ class LinearExpr: "please use CpModel.add_bool_xor" ) - def __neg__(self) -> LinearExprT: + def __neg__(self) -> "LinearExpr": return _ProductCst(self, -1) def __bool__(self) -> NoReturn: @@ -545,31 +556,33 @@ class LinearExpr: "Evaluating a LinearExpr instance as a Boolean is not implemented." ) - def __eq__(self, arg: LinearExprT) -> BoundedLinearExprT: + def __eq__(self, arg: LinearExprT) -> BoundedLinearExprT: # type: ignore[override] if arg is None: return False - if isinstance(arg, numbers.Integral): + if isinstance(arg, IntegralTypes): arg = cmh.assert_is_int64(arg) return BoundedLinearExpression(self, [arg, arg]) - else: + elif isinstance(arg, LinearExpr): return BoundedLinearExpression(self - arg, [0, 0]) + else: + return False - def __ge__(self, arg: LinearExprT) -> BoundedLinearExprT: - if isinstance(arg, numbers.Integral): + def __ge__(self, arg: LinearExprT) -> "BoundedLinearExpression": + if isinstance(arg, IntegralTypes): arg = cmh.assert_is_int64(arg) return BoundedLinearExpression(self, [arg, INT_MAX]) else: return BoundedLinearExpression(self - arg, [0, INT_MAX]) - def __le__(self, arg: LinearExprT) -> BoundedLinearExprT: - if isinstance(arg, numbers.Integral): + def __le__(self, arg: LinearExprT) -> "BoundedLinearExpression": + if isinstance(arg, IntegralTypes): arg = cmh.assert_is_int64(arg) return BoundedLinearExpression(self, [INT_MIN, arg]) else: return BoundedLinearExpression(self - arg, [INT_MIN, 0]) - def __lt__(self, arg: LinearExprT) -> BoundedLinearExprT: - if isinstance(arg, numbers.Integral): + def __lt__(self, arg: LinearExprT) -> "BoundedLinearExpression": + if isinstance(arg, IntegralTypes): arg = cmh.assert_is_int64(arg) if arg == INT_MIN: raise ArithmeticError("< INT_MIN is not supported") @@ -577,8 +590,8 @@ class LinearExpr: else: return BoundedLinearExpression(self - arg, [INT_MIN, -1]) - def __gt__(self, arg: LinearExprT) -> BoundedLinearExprT: - if isinstance(arg, numbers.Integral): + def __gt__(self, arg: LinearExprT) -> "BoundedLinearExpression": + if isinstance(arg, IntegralTypes): arg = cmh.assert_is_int64(arg) if arg == INT_MAX: raise ArithmeticError("> INT_MAX is not supported") @@ -586,10 +599,10 @@ class LinearExpr: else: return BoundedLinearExpression(self - arg, [1, INT_MAX]) - def __ne__(self, arg: LinearExprT) -> BoundedLinearExprT: + def __ne__(self, arg: LinearExprT) -> BoundedLinearExprT: # type: ignore[override] if arg is None: return True - if isinstance(arg, numbers.Integral): + if isinstance(arg, IntegralTypes): arg = cmh.assert_is_int64(arg) if arg == INT_MAX: return BoundedLinearExpression(self, [INT_MIN, INT_MAX - 1]) @@ -599,8 +612,10 @@ class LinearExpr: return BoundedLinearExpression( self, [INT_MIN, arg - 1, arg + 1, INT_MAX] ) - else: + elif isinstance(arg, LinearExpr): return BoundedLinearExpression(self - arg, [INT_MIN, -1, 1, INT_MAX]) + else: + return True # Compatibility with pre PEP8 # pylint: disable=invalid-name @@ -615,8 +630,7 @@ class LinearExpr: cls, expressions: Sequence[LinearExprT], coefficients: Sequence[IntegralT], - ) -> LinearExprT: - ... + ) -> LinearExprT: ... @overload @classmethod @@ -624,8 +638,7 @@ class LinearExpr: cls, expressions: Sequence[ObjLinearExprT], coefficients: Sequence[NumberT], - ) -> ObjLinearExprT: - ... + ) -> ObjLinearExprT: ... @classmethod def WeightedSum(cls, expressions, coefficients): @@ -638,8 +651,7 @@ class LinearExpr: cls, expressions: LinearExprT, coefficients: IntegralT, - ) -> LinearExprT: - ... + ) -> LinearExprT: ... @overload @classmethod @@ -647,8 +659,7 @@ class LinearExpr: cls, expressions: ObjLinearExprT, coefficients: NumberT, - ) -> ObjLinearExprT: - ... + ) -> ObjLinearExprT: ... @classmethod def Term(cls, expression, coefficient): @@ -663,7 +674,7 @@ class _Sum(LinearExpr): def __init__(self, left, right): for x in [left, right]: - if not isinstance(x, (numbers.Number, LinearExpr)): + if not isinstance(x, (NumberTypes, LinearExpr)): raise TypeError("not an linear expression: " + str(x)) self.__left = left self.__right = right @@ -716,7 +727,7 @@ class _SumArray(LinearExpr): self.__expressions = [] self.__constant = constant for x in expressions: - if isinstance(x, numbers.Number): + if isinstance(x, NumberTypes): if cmh.is_zero(x): continue x = cmh.assert_is_a_number(x) @@ -762,7 +773,7 @@ class _WeightedSum(LinearExpr): c = cmh.assert_is_a_number(c) if cmh.is_zero(c): continue - if isinstance(e, numbers.Number): + if isinstance(e, NumberTypes): e = cmh.assert_is_a_number(e) self.__constant += e * c elif isinstance(e, LinearExpr): @@ -829,9 +840,9 @@ class IntVar(LinearExpr): def __init__( self, model: cp_model_pb2.CpModelProto, - domain: Union[int, Domain], + domain: Union[int, sorted_interval_list.Domain], name: Optional[str], - ): + ) -> None: """See CpModel.new_int_var below.""" self.__negation: Optional[_NotBooleanVariable] = None # Python do not support multiple __init__ methods. @@ -841,13 +852,15 @@ class IntVar(LinearExpr): # model is a CpModelProto, domain is a Domain, and name is a string. # case 2: # model is a CpModelProto, domain is an index (int), and name is None. - if isinstance(domain, numbers.Integral) and name is None: + if isinstance(domain, IntegralTypes) and name is None: self.__index: int = int(domain) self.__var: cp_model_pb2.IntegerVariableProto = model.variables[domain] else: self.__index: int = len(model.variables) self.__var: cp_model_pb2.IntegerVariableProto = model.variables.add() - self.__var.domain.extend(cast(Domain, domain).flattened_intervals()) + self.__var.domain.extend( + cast(sorted_interval_list.Domain, domain).flattened_intervals() + ) self.__var.name = name @property @@ -974,7 +987,7 @@ class BoundedLinearExpression: model.add(x + 2 * y -1 >= z) """ - def __init__(self, expr: LinearExprT, bounds: Sequence[int]): + def __init__(self, expr: LinearExprT, bounds: Sequence[int]) -> None: self.__expr: LinearExprT = expr self.__bounds: Sequence[int] = bounds @@ -1056,7 +1069,7 @@ class Constraint: def __init__( self, cp_model: "CpModel", - ): + ) -> None: self.__index: int = len(cp_model.proto.constraints) self.__cp_model: "CpModel" = cp_model self.__constraint: cp_model_pb2.ConstraintProto = ( @@ -1064,12 +1077,10 @@ class Constraint: ) @overload - def only_enforce_if(self, boolvar: Iterable[LiteralT]) -> "Constraint": - ... + def only_enforce_if(self, boolvar: Iterable[LiteralT]) -> "Constraint": ... @overload - def only_enforce_if(self, *boolvar: LiteralT) -> "Constraint": - ... + def only_enforce_if(self, *boolvar: LiteralT) -> "Constraint": ... def only_enforce_if(self, *boolvar) -> "Constraint": """Adds an enforcement literal to the constraint. @@ -1090,12 +1101,12 @@ class Constraint: """ for lit in expand_generator_or_tuple(boolvar): if (cmh.is_boolean(lit) and lit) or ( - isinstance(lit, numbers.Integral) and lit == 1 + isinstance(lit, IntegralTypes) and lit == 1 ): # Always true. Do nothing. pass elif (cmh.is_boolean(lit) and not lit) or ( - isinstance(lit, numbers.Integral) and lit == 0 + isinstance(lit, IntegralTypes) and lit == 0 ): self.__constraint.enforcement_literal.append( self.__cp_model.new_constant(0).index @@ -1174,7 +1185,7 @@ class IntervalVar: end: Optional[cp_model_pb2.LinearExpressionProto], is_present_index: Optional[int], name: Optional[str], - ): + ) -> None: self.__model: cp_model_pb2.CpModelProto = model # As with the IntVar::__init__ method, we hack the __init__ method to # support two use cases: @@ -1275,7 +1286,7 @@ def object_is_a_true_literal(literal: LiteralT) -> bool: if isinstance(literal, _NotBooleanVariable): proto = literal.negated().proto return len(proto.domain) == 2 and proto.domain[0] == 0 and proto.domain[1] == 0 - if isinstance(literal, numbers.Integral): + if isinstance(literal, IntegralTypes): return int(literal) == 1 return False @@ -1288,7 +1299,7 @@ def object_is_a_false_literal(literal: LiteralT) -> bool: if isinstance(literal, _NotBooleanVariable): proto = literal.negated().proto return len(proto.domain) == 2 and proto.domain[0] == 1 and proto.domain[1] == 1 - if isinstance(literal, numbers.Integral): + if isinstance(literal, IntegralTypes): return int(literal) == 0 return False @@ -1302,9 +1313,9 @@ class CpModel: * ```add``` create new constraints and add them to the model. """ - def __init__(self): + def __init__(self) -> None: self.__model: cp_model_pb2.CpModelProto = cp_model_pb2.CpModelProto() - self.__constant_map = {} + self.__constant_map: Dict[IntegralT, int] = {} # Naming. @property @@ -1337,9 +1348,11 @@ class CpModel: a variable whose domain is [lb, ub]. """ - return IntVar(self.__model, Domain(lb, ub), name) + return IntVar(self.__model, sorted_interval_list.Domain(lb, ub), name) - def new_int_var_from_domain(self, domain: Domain, name: str) -> IntVar: + def new_int_var_from_domain( + self, domain: sorted_interval_list.Domain, name: str + ) -> IntVar: """Create an integer variable from a domain. A domain is a set of integers specified by a collection of intervals. @@ -1357,7 +1370,7 @@ class CpModel: def new_bool_var(self, name: str) -> IntVar: """Creates a 0-1 variable with the given name.""" - return IntVar(self.__model, Domain(0, 1), name) + return IntVar(self.__model, sorted_interval_list.Domain(0, 1), name) def new_constant(self, value: IntegralT) -> IntVar: """Declares a constant integer.""" @@ -1397,8 +1410,8 @@ class CpModel: if not name.isidentifier(): raise ValueError("name={} is not a valid identifier".format(name)) if ( - isinstance(lower_bounds, numbers.Integral) - and isinstance(upper_bounds, numbers.Integral) + isinstance(lower_bounds, IntegralTypes) + and isinstance(upper_bounds, IntegralTypes) and lower_bounds > upper_bounds ): raise ValueError( @@ -1419,7 +1432,9 @@ class CpModel: IntVar( model=self.__model, name=f"{name}[{i}]", - domain=Domain(lower_bounds[i], upper_bounds[i]), + domain=sorted_interval_list.Domain( + lower_bounds[i], upper_bounds[i] + ), ) for i in index ], @@ -1453,10 +1468,12 @@ class CpModel: self, linear_expr: LinearExprT, lb: IntegralT, ub: IntegralT ) -> Constraint: """Adds the constraint: `lb <= linear_expr <= ub`.""" - return self.add_linear_expression_in_domain(linear_expr, Domain(lb, ub)) + return self.add_linear_expression_in_domain( + linear_expr, sorted_interval_list.Domain(lb, ub) + ) def add_linear_expression_in_domain( - self, linear_expr: LinearExprT, domain: Domain + self, linear_expr: LinearExprT, domain: sorted_interval_list.Domain ) -> Constraint: """Adds the constraint: `linear_expr` in `domain`.""" if isinstance(linear_expr, LinearExpr): @@ -1476,7 +1493,7 @@ class CpModel: ] ) return ct - if isinstance(linear_expr, numbers.Integral): + if isinstance(linear_expr, IntegralTypes): if not domain.contains(int(linear_expr)): return self.add_bool_or([]) # Evaluate to false. else: @@ -1489,7 +1506,13 @@ class CpModel: + ")" ) - def add(self, ct: Union[BoundedLinearExpression, bool]) -> Constraint: + @overload + def add(self, ct: BoundedLinearExpression) -> Constraint: ... + + @overload + def add(self, ct: Union[bool, np.bool_]) -> Constraint: ... + + def add(self, ct): """Adds a `BoundedLinearExpression` to the model. Args: @@ -1500,7 +1523,8 @@ class CpModel: """ if isinstance(ct, BoundedLinearExpression): return self.add_linear_expression_in_domain( - ct.expression(), Domain.from_flat_intervals(ct.bounds()) + ct.expression(), + sorted_interval_list.Domain.from_flat_intervals(ct.bounds()), ) if ct and cmh.is_boolean(ct): return self.add_bool_or([True]) @@ -1511,12 +1535,10 @@ class CpModel: # General Integer Constraints. @overload - def add_all_different(self, expressions: Iterable[LinearExprT]) -> Constraint: - ... + def add_all_different(self, expressions: Iterable[LinearExprT]) -> Constraint: ... @overload - def add_all_different(self, *expressions: LinearExprT) -> Constraint: - ... + def add_all_different(self, *expressions: LinearExprT) -> Constraint: ... def add_all_different(self, *expressions): """Adds AllDifferent(expressions). @@ -1554,8 +1576,9 @@ class CpModel: if not variables: raise ValueError("add_element expects a non-empty variables array") - if isinstance(index, numbers.Integral): - return self.add(list(variables)[int(index)] == target) + if isinstance(index, IntegralTypes): + variable: VariableT = list(variables)[int(index)] + return self.add(variable == target) ct = Constraint(self) model_ct = self.__model.constraints[ct.index] @@ -2001,12 +2024,10 @@ class CpModel: return ct @overload - def add_bool_or(self, literals: Iterable[LiteralT]) -> Constraint: - ... + def add_bool_or(self, literals: Iterable[LiteralT]) -> Constraint: ... @overload - def add_bool_or(self, *literals: LiteralT) -> Constraint: - ... + def add_bool_or(self, *literals: LiteralT) -> Constraint: ... def add_bool_or(self, *literals): """Adds `Or(literals) == true`: sum(literals) >= 1.""" @@ -2021,24 +2042,20 @@ class CpModel: return ct @overload - def add_at_least_one(self, literals: Iterable[LiteralT]) -> Constraint: - ... + def add_at_least_one(self, literals: Iterable[LiteralT]) -> Constraint: ... @overload - def add_at_least_one(self, *literals: LiteralT) -> Constraint: - ... + def add_at_least_one(self, *literals: LiteralT) -> Constraint: ... def add_at_least_one(self, *literals): """Same as `add_bool_or`: `sum(literals) >= 1`.""" return self.add_bool_or(*literals) @overload - def add_at_most_one(self, literals: Iterable[LiteralT]) -> Constraint: - ... + def add_at_most_one(self, literals: Iterable[LiteralT]) -> Constraint: ... @overload - def add_at_most_one(self, *literals: LiteralT) -> Constraint: - ... + def add_at_most_one(self, *literals: LiteralT) -> Constraint: ... def add_at_most_one(self, *literals): """Adds `AtMostOne(literals)`: `sum(literals) <= 1`.""" @@ -2053,12 +2070,10 @@ class CpModel: return ct @overload - def add_exactly_one(self, literals: Iterable[LiteralT]) -> Constraint: - ... + def add_exactly_one(self, literals: Iterable[LiteralT]) -> Constraint: ... @overload - def add_exactly_one(self, *literals: LiteralT) -> Constraint: - ... + def add_exactly_one(self, *literals: LiteralT) -> Constraint: ... def add_exactly_one(self, *literals): """Adds `ExactlyOne(literals)`: `sum(literals) == 1`.""" @@ -2073,12 +2088,10 @@ class CpModel: return ct @overload - def add_bool_and(self, literals: Iterable[LiteralT]) -> Constraint: - ... + def add_bool_and(self, literals: Iterable[LiteralT]) -> Constraint: ... @overload - def add_bool_and(self, *literals: LiteralT) -> Constraint: - ... + def add_bool_and(self, *literals: LiteralT) -> Constraint: ... def add_bool_and(self, *literals): """Adds `And(literals) == true`.""" @@ -2093,12 +2106,10 @@ class CpModel: return ct @overload - def add_bool_xor(self, literals: Iterable[LiteralT]) -> Constraint: - ... + def add_bool_xor(self, literals: Iterable[LiteralT]) -> Constraint: ... @overload - def add_bool_xor(self, *literals: LiteralT) -> Constraint: - ... + def add_bool_xor(self, *literals: LiteralT) -> Constraint: ... def add_bool_xor(self, *literals): """Adds `XOr(literals) == true`. @@ -2725,7 +2736,7 @@ class CpModel: and arg.coefficient() == -1 ): return -arg.expression().index - 1 - if isinstance(arg, numbers.Integral): + if isinstance(arg, IntegralTypes): arg = cmh.assert_is_int64(arg) return self.get_or_make_index_from_constant(arg) raise TypeError("NotSupported: model.get_or_make_index(" + str(arg) + ")") @@ -2738,7 +2749,7 @@ class CpModel: if isinstance(arg, _NotBooleanVariable): self.assert_is_boolean_variable(arg.negated()) return arg.index - if isinstance(arg, numbers.Integral): + if isinstance(arg, IntegralTypes): arg = cmh.assert_is_zero_or_one(arg) return self.get_or_make_index_from_constant(arg) if cmh.is_boolean(arg): @@ -2774,7 +2785,7 @@ class CpModel: cp_model_pb2.LinearExpressionProto() ) mult = -1 if negate else 1 - if isinstance(linear_expr, numbers.Integral): + if isinstance(linear_expr, IntegralTypes): result.offset = int(linear_expr) * mult return result @@ -2826,7 +2837,7 @@ class CpModel: for v, c in coeffs_map.items(): self.__model.floating_point_objective.coeffs.append(c) self.__model.floating_point_objective.vars.append(v.index) - elif isinstance(obj, numbers.Integral): + elif isinstance(obj, IntegralTypes): self.__model.objective.offset = int(obj) self.__model.objective.scaling_factor = 1 else: @@ -2962,6 +2973,7 @@ class CpModel: AddAutomaton = add_automaton AddInverse = add_inverse AddReservoirConstraint = add_reservoir_constraint + AddReservoirConstraintWithActive = add_reservoir_constraint_with_active AddImplication = add_implication AddBoolOr = add_bool_or AddAtLeastOne = add_at_least_one @@ -2977,11 +2989,11 @@ class CpModel: AddMultiplicationEquality = add_multiplication_equality NewIntervalVar = new_interval_var NewIntervalVarSeries = new_interval_var_series - NewFixedSizedIntervalVar = new_fixed_size_interval_var + NewFixedSizeIntervalVar = new_fixed_size_interval_var NewOptionalIntervalVar = new_optional_interval_var NewOptionalIntervalVarSeries = new_optional_interval_var_series - NewOptionalFixedSizedIntervalVar = new_optional_fixed_size_interval_var - NewOptionalFixedSizedIntervalVarSeries = new_optional_fixed_size_interval_var_series + NewOptionalFixedSizeIntervalVar = new_optional_fixed_size_interval_var + NewOptionalFixedSizeIntervalVarSeries = new_optional_fixed_size_interval_var_series AddNoOverlap = add_no_overlap AddNoOverlap2D = add_no_overlap_2d AddCumulative = add_cumulative @@ -3009,22 +3021,20 @@ class CpModel: @overload def expand_generator_or_tuple( args: Union[Tuple[LiteralT, ...], Iterable[LiteralT]] -) -> Union[Iterable[LiteralT], LiteralT]: - ... +) -> Union[Iterable[LiteralT], LiteralT]: ... @overload def expand_generator_or_tuple( args: Union[Tuple[LinearExprT, ...], Iterable[LinearExprT]] -) -> Union[Iterable[LinearExprT], LinearExprT]: - ... +) -> Union[Iterable[LinearExprT], LinearExprT]: ... def expand_generator_or_tuple(args): if hasattr(args, "__len__"): # Tuple if len(args) != 1: return args - if isinstance(args[0], (numbers.Number, LinearExpr)): + if isinstance(args[0], (NumberTypes, LinearExpr)): return args # Generator return args[0] @@ -3034,7 +3044,7 @@ def evaluate_linear_expr( expression: LinearExprT, solution: cp_model_pb2.CpSolverResponse ) -> int: """Evaluate a linear expression against a solution.""" - if isinstance(expression, numbers.Integral): + if isinstance(expression, IntegralTypes): return int(expression) if not isinstance(expression, LinearExpr): raise TypeError("Cannot interpret %s as a linear expression." % expression) @@ -3043,7 +3053,7 @@ def evaluate_linear_expr( to_process = [(expression, 1)] while to_process: expr, coeff = to_process.pop() - if isinstance(expr, numbers.Integral): + if isinstance(expr, IntegralTypes): value += int(expr) * coeff elif isinstance(expr, _ProductCst): to_process.append((expr.expression(), coeff * expr.coefficient())) @@ -3072,7 +3082,7 @@ def evaluate_boolean_expression( literal: LiteralT, solution: cp_model_pb2.CpSolverResponse ) -> bool: """Evaluate a boolean expression against a solution.""" - if isinstance(literal, numbers.Integral): + if isinstance(literal, IntegralTypes): return bool(literal) elif isinstance(literal, IntVar) or isinstance(literal, _NotBooleanVariable): index: int = cast(Union[IntVar, _NotBooleanVariable], literal).index @@ -3095,7 +3105,7 @@ class CpSolver: about the solve procedure. """ - def __init__(self): + def __init__(self) -> None: self.__solution: Optional[cp_model_pb2.CpSolverResponse] = None self.parameters: sat_parameters_pb2.SatParameters = ( sat_parameters_pb2.SatParameters() @@ -3390,7 +3400,7 @@ class CpSolverSolutionCallback(swig_helper.SolutionCallback): `CpSolver` class. """ - def __init__(self): + def __init__(self) -> None: swig_helper.SolutionCallback.__init__(self) def OnSolutionCallback(self) -> None: @@ -3411,7 +3421,7 @@ class CpSolverSolutionCallback(swig_helper.SolutionCallback): """ if not self.has_response(): raise RuntimeError("solve() has not been called.") - if isinstance(lit, numbers.Integral): + if isinstance(lit, IntegralTypes): return bool(lit) if isinstance(lit, IntVar) or isinstance(lit, _NotBooleanVariable): return self.SolutionBooleanValue( @@ -3441,7 +3451,7 @@ class CpSolverSolutionCallback(swig_helper.SolutionCallback): to_process = [(expression, 1)] while to_process: expr, coeff = to_process.pop() - if isinstance(expr, numbers.Integral): + if isinstance(expr, IntegralTypes): value += int(expr) * coeff elif isinstance(expr, _ProductCst): to_process.append((expr.expression(), coeff * expr.coefficient())) @@ -3563,7 +3573,7 @@ class CpSolverSolutionCallback(swig_helper.SolutionCallback): class ObjectiveSolutionPrinter(CpSolverSolutionCallback): """Display the objective value and time of intermediate solutions.""" - def __init__(self): + def __init__(self) -> None: CpSolverSolutionCallback.__init__(self) self.__solution_count = 0 self.__start_time = time.time() @@ -3586,7 +3596,7 @@ class ObjectiveSolutionPrinter(CpSolverSolutionCallback): class VarArrayAndObjectiveSolutionPrinter(CpSolverSolutionCallback): """Print intermediate solutions (objective, variable values, time).""" - def __init__(self, variables): + def __init__(self, variables: Sequence[IntVar]) -> None: CpSolverSolutionCallback.__init__(self) self.__variables: Sequence[IntVar] = variables self.__solution_count: int = 0 @@ -3614,7 +3624,7 @@ class VarArrayAndObjectiveSolutionPrinter(CpSolverSolutionCallback): class VarArraySolutionPrinter(CpSolverSolutionCallback): """Print intermediate solutions (variable values, time).""" - def __init__(self, variables: Sequence[IntVar]): + def __init__(self, variables: Sequence[IntVar]) -> None: CpSolverSolutionCallback.__init__(self) self.__variables: Sequence[IntVar] = variables self.__solution_count: int = 0 @@ -3681,7 +3691,7 @@ def _convert_to_integral_series_and_validate_index( TypeError: If the type of `value_or_series` is not recognized. ValueError: If the index does not match. """ - if isinstance(value_or_series, numbers.Integral): + if isinstance(value_or_series, IntegralTypes): result = pd.Series(data=value_or_series, index=index) elif isinstance(value_or_series, pd.Series): if value_or_series.index.equals(index): @@ -3709,7 +3719,7 @@ def _convert_to_linear_expr_series_and_validate_index( TypeError: If the type of `value_or_series` is not recognized. ValueError: If the index does not match. """ - if isinstance(value_or_series, numbers.Integral): + if isinstance(value_or_series, IntegralTypes): result = pd.Series(data=value_or_series, index=index) elif isinstance(value_or_series, pd.Series): if value_or_series.index.equals(index): @@ -3737,7 +3747,7 @@ def _convert_to_literal_series_and_validate_index( TypeError: If the type of `value_or_series` is not recognized. ValueError: If the index does not match. """ - if isinstance(value_or_series, numbers.Integral): + if isinstance(value_or_series, IntegralTypes): result = pd.Series(data=value_or_series, index=index) elif isinstance(value_or_series, pd.Series): if value_or_series.index.equals(index): diff --git a/ortools/sat/python/cp_model_helper.py b/ortools/sat/python/cp_model_helper.py index 662e1cbd38..76b698ad31 100644 --- a/ortools/sat/python/cp_model_helper.py +++ b/ortools/sat/python/cp_model_helper.py @@ -37,7 +37,7 @@ def is_zero(x: Any) -> bool: """Checks if the x is 0 or 0.0.""" if isinstance(x, numbers.Integral): return int(x) == 0 - if isinstance(x, numbers.Number): + if isinstance(x, numbers.Real): return float(x) == 0.0 return False @@ -46,7 +46,7 @@ def is_one(x: Any) -> bool: """Checks if x is 1 or 1.0.""" if isinstance(x, numbers.Integral): return int(x) == 1 - if isinstance(x, numbers.Number): + if isinstance(x, numbers.Real): return float(x) == 1.0 return False @@ -55,7 +55,7 @@ def is_minus_one(x: Any) -> bool: """Checks if x is -1 or -1.0 .""" if isinstance(x, numbers.Integral): return int(x) == -1 - if isinstance(x, numbers.Number): + if isinstance(x, numbers.Real): return float(x) == -1.0 return False @@ -89,7 +89,7 @@ def assert_is_a_number(x: Any) -> Union[int, float]: """Asserts that x is a number and returns it casted to an int or a float.""" if isinstance(x, numbers.Integral): return int(x) - if isinstance(x, numbers.Number): + if isinstance(x, numbers.Real): return float(x) raise TypeError("Not a number: %s" % x) diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index 35f9347eb9..40fe7b2c25 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -20,6 +20,7 @@ from ortools.sat.python import cp_model_helper class CpModelHelperTest(absltest.TestCase): + def test_is_boolean(self): print("test_is_boolean") self.assertTrue(cp_model_helper.is_boolean(True)) @@ -34,9 +35,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertRaises(TypeError, cp_model_helper.assert_is_int64, "Hello") self.assertRaises(TypeError, cp_model_helper.assert_is_int64, 1.2) self.assertRaises(OverflowError, cp_model_helper.assert_is_int64, 2**63) - self.assertRaises( - OverflowError, cp_model_helper.assert_is_int64, -(2**63) - 1 - ) + self.assertRaises(OverflowError, cp_model_helper.assert_is_int64, -(2**63) - 1) cp_model_helper.assert_is_int64(123) cp_model_helper.assert_is_int64(2**63 - 1) cp_model_helper.assert_is_int64(-(2**63)) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index fc2562f33d..f128bf5a9a 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -111,6 +111,7 @@ class LogToString: class CpModelTest(absltest.TestCase): + def testCreateIntegerVariable(self): print("testCreateIntegerVariable") model = cp_model.CpModel() diff --git a/ortools/sat/python/swig_helper_test.py b/ortools/sat/python/swig_helper_test.py index f41364180d..adb9011f95 100644 --- a/ortools/sat/python/swig_helper_test.py +++ b/ortools/sat/python/swig_helper_test.py @@ -22,6 +22,7 @@ from ortools.sat.python import swig_helper class Callback(swig_helper.SolutionCallback): + def __init__(self): swig_helper.SolutionCallback.__init__(self) self.__solution_count = 0 @@ -35,6 +36,7 @@ class Callback(swig_helper.SolutionCallback): class SwigHelperTest(absltest.TestCase): + def testVariableDomain(self): model_string = """ variables { domain: [ -10, 10 ] } diff --git a/ortools/sat/rins.cc b/ortools/sat/rins.cc index e9576ee1b2..edff8f5048 100644 --- a/ortools/sat/rins.cc +++ b/ortools/sat/rins.cc @@ -104,8 +104,8 @@ struct VarWeight { bool operator<(const VarWeight& o) const { return weight < o.weight; } }; -void FillRinsNeighborhood(const std::vector& solution, - const std::vector& relaxation_values, +void FillRinsNeighborhood(absl::Span solution, + absl::Span relaxation_values, double difficulty, absl::BitGenRef random, ReducedDomainNeighborhood& reduced_domains) { std::vector var_lp_gap_pairs; diff --git a/ortools/sat/routing_cuts.cc b/ortools/sat/routing_cuts.cc index 4c1497c0eb..237d04f725 100644 --- a/ortools/sat/routing_cuts.cc +++ b/ortools/sat/routing_cuts.cc @@ -89,7 +89,7 @@ class OutgoingCutHelper { // Given a subset of nodes, it is easy to identify the best subset A of edge // to consider. bool TryBlossomSubsetCut(std::string name, - const std::vector& symmetrized_edges, + absl::Span symmetrized_edges, absl::Span subset); private: @@ -271,7 +271,7 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, } bool OutgoingCutHelper::TryBlossomSubsetCut( - std::string name, const std::vector& symmetrized_edges, + std::string name, absl::Span symmetrized_edges, absl::Span subset) { DCHECK_GE(subset.size(), 1); DCHECK_LT(subset.size(), num_nodes_); @@ -715,7 +715,7 @@ namespace { // Returns for each literal its integer view, or the view of its negation. std::vector GetAssociatedVariables( - const std::vector& literals, Model* model) { + absl::Span literals, Model* model) { auto* encoder = model->GetOrCreate(); std::vector result; for (const Literal l : literals) { @@ -792,8 +792,8 @@ CutGenerator CreateCVRPCutGenerator(int num_nodes, std::vector tails, // This is really similar to SeparateSubtourInequalities, see the reference // there. void SeparateFlowInequalities( - int num_nodes, const std::vector& tails, const std::vector& heads, - const std::vector& arc_capacities, + int num_nodes, absl::Span tails, absl::Span heads, + absl::Span arc_capacities, std::function& in_subset, IntegerValue* min_incoming_flow, IntegerValue* min_outgoing_flow)> diff --git a/ortools/sat/samples/bin_packing_sat.py b/ortools/sat/samples/bin_packing_sat.py index 680f1949aa..9ac4f9e807 100644 --- a/ortools/sat/samples/bin_packing_sat.py +++ b/ortools/sat/samples/bin_packing_sat.py @@ -121,10 +121,13 @@ def main() -> None: for b in active_bins: print(f"Bin {b}") - items_in_bin = x_values.xs(b, level="bin").loc[lambda x: x].index - for item in items_in_bin: + items_in_active_bin = x_values.xs(b, level="bin").loc[lambda x: x].index + for item in items_in_active_bin: print(f" Item {item} - weight {items.loc[item].weight}") - print(f" Packed items weight: {items.loc[items_in_bin].sum().to_string()}") + print( + " Packed items weight:" + f" {items.loc[items_in_active_bin].sum().to_string()}" + ) print() print(f"Total packed weight: {items.weight.sum()}") diff --git a/ortools/sat/samples/schedule_requests_sat.py b/ortools/sat/samples/schedule_requests_sat.py index f87789c64c..4518a75eda 100644 --- a/ortools/sat/samples/schedule_requests_sat.py +++ b/ortools/sat/samples/schedule_requests_sat.py @@ -15,6 +15,8 @@ # [START program] """Nurse scheduling problem with shift requests.""" # [START import] +from typing import Union + from ortools.sat.python import cp_model # [END import] @@ -80,7 +82,7 @@ def main() -> None: else: max_shifts_per_nurse = min_shifts_per_nurse + 1 for n in all_nurses: - num_shifts_worked = 0 + num_shifts_worked: Union[cp_model.LinearExpr, int] = 0 for d in all_days: for s in all_shifts: num_shifts_worked += shifts[(n, d, s)] diff --git a/ortools/sat/samples/solution_hinting_sample_sat.go b/ortools/sat/samples/solution_hinting_sample_sat.go index 1d67fdc13f..59b2766c2b 100644 --- a/ortools/sat/samples/solution_hinting_sample_sat.go +++ b/ortools/sat/samples/solution_hinting_sample_sat.go @@ -32,10 +32,10 @@ func solutionHintingSampleSat() error { model.AddNotEqual(x, y) - model.Maximize(cpmodel.NewLinearExpr().AddWeightedSum([]cpmodel.LinearArgument{x, y, z}, []int64_t{1, 2, 3})) + model.Maximize(cpmodel.NewLinearExpr().AddWeightedSum([]cpmodel.LinearArgument{x, y, z}, []int64{1, 2, 3})) // Solution hinting: x <- 1, y <- 2 - hint := &cpmodel.Hint{Ints: map[cpmodel.IntVar]int64_t{x: 7}} + hint := &cpmodel.Hint{Ints: map[cpmodel.IntVar]int64{x: 7}} model.SetHint(hint) m, err := model.Model() diff --git a/ortools/sat/samples/step_function_sample_sat.go b/ortools/sat/samples/step_function_sample_sat.go index a3dfb25daf..3545973734 100644 --- a/ortools/sat/samples/step_function_sample_sat.go +++ b/ortools/sat/samples/step_function_sample_sat.go @@ -45,7 +45,7 @@ func stepFunctionSampleSat() error { // expr == 0 on [5, 6] U [8, 10] b0 := model.NewBoolVar() - d0 := cpmodel.FromValues([]int64_t{5, 6, 8, 9, 10}) + d0 := cpmodel.FromValues([]int64{5, 6, 8, 9, 10}) model.AddLinearConstraintForDomain(x, d0).OnlyEnforceIf(b0) model.AddEquality(expr, cpmodel.NewConstant(0)).OnlyEnforceIf(b0) diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 81dda8ca2e..a856518443 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -23,7 +23,7 @@ option csharp_namespace = "Google.OrTools.Sat"; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 280 +// NEXT TAG: 281 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -464,6 +464,13 @@ message SatParameters { // possible precedences between event and encoding the constraint. optional bool expand_reservoir_constraints = 182 [default = true]; + // If true, replace target = max(x, y) by linear constraint with the + // introduction of a new boolean b such that b => target == x and not(b) => + // target == y. + // + // This is mainly for experimenting compared to a custom lin_max propagator. + optional int32 max_lin_max_size_for_expansion = 280 [default = 0]; + // If true, it disable all constraint expansion. // This should only be used to test the presolve of expanded constraints. optional bool disable_constraint_expansion = 181 [default = false]; @@ -1217,8 +1224,10 @@ message SatParameters { // breaking during search. optional int32 symmetry_level = 183 [default = 2]; - // Experimental. Use new code to propagate linear constraint. - optional bool new_linear_propagation = 224 [default = false]; + // The new linear propagation code treat all constraints at once and use + // an adaptation of Bellman-Ford-Tarjan to propagate constraint in a smarter + // order and potentially detect propagation cycle earlier. + optional bool new_linear_propagation = 224 [default = true]; // Linear constraints that are not pseudo-Boolean and that are longer than // this size will be split into sqrt(size) intermediate sums in order to have From 07fa7ddc9e9c2bd2e13939fca5f94a3e77151b63 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 21 Mar 2024 13:43:30 +0100 Subject: [PATCH 003/392] [CP-SAT] update python typing for mypy --- ortools/sat/python/cp_model.py | 67 +++++++++++++++++++-------- ortools/sat/python/cp_model_helper.py | 17 ++++--- 2 files changed, 58 insertions(+), 26 deletions(-) diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index ad76a44746..cb9eccdf93 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -844,6 +844,8 @@ class IntVar(LinearExpr): name: Optional[str], ) -> None: """See CpModel.new_int_var below.""" + self.__index: int + self.__var: cp_model_pb2.IntegerVariableProto self.__negation: Optional[_NotBooleanVariable] = None # Python do not support multiple __init__ methods. # This method is only called from the CpModel class. @@ -853,15 +855,16 @@ class IntVar(LinearExpr): # case 2: # model is a CpModelProto, domain is an index (int), and name is None. if isinstance(domain, IntegralTypes) and name is None: - self.__index: int = int(domain) - self.__var: cp_model_pb2.IntegerVariableProto = model.variables[domain] + self.__index = int(domain) + self.__var = model.variables[domain] else: - self.__index: int = len(model.variables) - self.__var: cp_model_pb2.IntegerVariableProto = model.variables.add() + self.__index = len(model.variables) + self.__var = model.variables.add() self.__var.domain.extend( cast(sorted_interval_list.Domain, domain).flattened_intervals() ) - self.__var.name = name + if name: + self.__var.name = name @property def index(self) -> int: @@ -1175,6 +1178,9 @@ class IntervalVar: enforcement literals assigned to false. Conversely, these constraints will also set these enforcement literals to false if they cannot fit these intervals into the schedule. + + Raises: + TypeError: if start, size, end are not defined, or have the wrong type. """ def __init__( @@ -1187,6 +1193,8 @@ class IntervalVar: name: Optional[str], ) -> None: self.__model: cp_model_pb2.CpModelProto = model + self.__index: int + self.__ct: cp_model_pb2.ConstraintProto # As with the IntVar::__init__ method, we hack the __init__ method to # support two use cases: # case 1: called when creating a new interval variable. @@ -1194,14 +1202,26 @@ class IntervalVar: # None or the index of a Boolean literal. name is a string # case 2: called when querying an existing interval variable. # start_index is an int, all parameters after are None. - if size is None and end is None and is_present_index is None and name is None: - self.__index: int = cast(int, start) - self.__ct: cp_model_pb2.ConstraintProto = model.constraints[self.__index] + if isinstance(start, int): + if size is not None: + raise TypeError("size should be None") + if end is not None: + raise TypeError("end should be None") + if is_present_index is not None: + raise TypeError("is_present_index should be None") + self.__index= cast(int, start) + self.__ct = model.constraints[self.__index] else: - self.__index: int = len(model.constraints) - self.__ct: cp_model_pb2.ConstraintProto = self.__model.constraints.add() + self.__index = len(model.constraints) + self.__ct = self.__model.constraints.add() + if start is None: + raise TypeError("start is not defined") self.__ct.interval.start.CopyFrom(start) + if size is None: + raise TypeError("size is not defined") self.__ct.interval.size.CopyFrom(size) + if end is None: + raise TypeError("end is not defined") self.__ct.interval.end.CopyFrom(end) if is_present_index is not None: self.__ct.enforcement_literal.append(is_present_index) @@ -2003,7 +2023,8 @@ class CpModel: model_ct = self.__model.constraints.add() model_ct.linear.vars.append(var_index) model_ct.linear.coeffs.append(1) - model_ct.linear.domain.extend([offset + i, offset + i]) + offset_as_int = int(offset) + model_ct.linear.domain.extend([offset_as_int + i, offset_as_int + i]) model_ct.enforcement_literal.append(b_index) model_ct = self.__model.constraints.add() @@ -2011,9 +2032,9 @@ class CpModel: model_ct.linear.coeffs.append(1) model_ct.enforcement_literal.append(-b_index - 1) if offset + i - 1 >= INT_MIN: - model_ct.linear.domain.extend([INT_MIN, offset + i - 1]) + model_ct.linear.domain.extend([INT_MIN, offset_as_int + i - 1]) if offset + i + 1 <= INT_MAX: - model_ct.linear.domain.extend([offset + i + 1, INT_MAX]) + model_ct.linear.domain.extend([offset_as_int + i + 1, INT_MAX]) def add_implication(self, a: LiteralT, b: LiteralT) -> Constraint: """Adds `a => b` (`a` implies `b`).""" @@ -2826,7 +2847,8 @@ class CpModel: self.__model.objective.scaling_factor = -1 self.__model.objective.offset = -constant for v, c in coeffs_map.items(): - self.__model.objective.coeffs.append(c) + c_as_int = int(c) + self.__model.objective.coeffs.append(c_as_int) if minimize: self.__model.objective.vars.append(v.index) else: @@ -2877,7 +2899,9 @@ class CpModel: solve() will fail. """ - strategy = self.__model.search_strategy.add() + strategy: cp_model_pb2.DecisionStrategyProto = ( + self.__model.search_strategy.add() + ) for v in variables: expr = strategy.exprs.add() if v.index >= 0: @@ -3110,7 +3134,7 @@ class CpSolver: self.parameters: sat_parameters_pb2.SatParameters = ( sat_parameters_pb2.SatParameters() ) - self.log_callback: Optional[swig_helper.LogCallback] = None + self.log_callback: Optional[Callable[[str], None]] = None self.__solve_wrapper: Optional[swig_helper.SolveWrapper] = None self.__lock: threading.Lock = threading.Lock() @@ -3130,7 +3154,10 @@ class CpSolver: if self.log_callback is not None: self.__solve_wrapper.add_log_callback(self.log_callback) - self.__solution = self.__solve_wrapper.solve(model.proto) + solution: cp_model_pb2.CpSolverResponse = self.__solve_wrapper.solve( + model.proto + ) + self.__solution = solution if solution_callback is not None: self.__solve_wrapper.clear_solution_callback(solution_callback) @@ -3138,7 +3165,7 @@ class CpSolver: with self.__lock: self.__solve_wrapper = None - return self.__solution.status + return solution.status def stop_search(self) -> None: """Stops the current search asynchronously.""" @@ -3376,11 +3403,11 @@ class CpSolver: enumerate_all = self.parameters.enumerate_all_solutions self.parameters.enumerate_all_solutions = True - self.solve(model, callback) + status: cp_model_pb2.CpSolverStatus = self.solve(model, callback) # Restore parameter. self.parameters.enumerate_all_solutions = enumerate_all - return self.__solution.status + return status # pylint: enable=invalid-name diff --git a/ortools/sat/python/cp_model_helper.py b/ortools/sat/python/cp_model_helper.py index 76b698ad31..96475a6a1a 100644 --- a/ortools/sat/python/cp_model_helper.py +++ b/ortools/sat/python/cp_model_helper.py @@ -64,25 +64,30 @@ def assert_is_int64(x: Any) -> int: """Asserts that x is integer and x is in [min_int_64, max_int_64] and returns it casted to an int.""" if not isinstance(x, numbers.Integral): raise TypeError("Not an integer: %s" % x) - if x < INT_MIN or x > INT_MAX: + x_as_int = int(x) + if x_as_int < INT_MIN or x_as_int > INT_MAX: raise OverflowError("Does not fit in an int64_t: %s" % x) - return int(x) + return x_as_int def assert_is_int32(x: Any) -> int: """Asserts that x is integer and x is in [min_int_32, max_int_32] and returns it casted to an int.""" if not isinstance(x, numbers.Integral): raise TypeError("Not an integer: %s" % x) - if x < INT32_MIN or x > INT32_MAX: + x_as_int = int(x) + if x_as_int < INT32_MIN or x_as_int > INT32_MAX: raise OverflowError("Does not fit in an int32_t: %s" % x) - return int(x) + return x_as_int def assert_is_zero_or_one(x: Any) -> int: """Asserts that x is 0 or 1 and returns it as an int.""" - if not isinstance(x, numbers.Integral) or x < 0 or x > 1: + if not isinstance(x, numbers.Integral): raise TypeError("Not a boolean: %s" % x) - return int(x) + x_as_int = int(x) + if x_as_int < 0 or x_as_int > 1: + raise TypeError("Not a boolean: %s" % x) + return x_as_int def assert_is_a_number(x: Any) -> Union[int, float]: From f92d236fbf087ad114eab9bb571d966b1425cd83 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 21 Mar 2024 14:04:10 +0100 Subject: [PATCH 004/392] [CP-SAT] experimental lin_max expand; improve python typing --- ortools/sat/cp_model_expand.cc | 81 ++++++++------------------- ortools/sat/python/cp_model.py | 4 +- ortools/sat/python/cp_model_helper.py | 2 +- ortools/sat/sat_parameters.proto | 6 +- 4 files changed, 29 insertions(+), 64 deletions(-) diff --git a/ortools/sat/cp_model_expand.cc b/ortools/sat/cp_model_expand.cc index 821eff9ca2..627a29a5a1 100644 --- a/ortools/sat/cp_model_expand.cc +++ b/ortools/sat/cp_model_expand.cc @@ -460,48 +460,12 @@ void ExpandInverse(ConstraintProto* ct, PresolveContext* context) { context->UpdateRuleStats("inverse: expanded"); } -void ExpandLinMaxWithTwoTerms(ConstraintProto* ct, PresolveContext* context) { - CHECK_EQ(ct->lin_max().exprs().size(), 2); +void ExpandLinMax(ConstraintProto* ct, PresolveContext* context) { + const int num_exprs = ct->lin_max().exprs().size(); + if (num_exprs < 2) return; - // We will create 4 constraints for target = max(a, b). - // First. - // - target >= a. - // - target >= b. - for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { - LinearConstraintProto* lin = - context->working_model->add_constraints()->mutable_linear(); - lin->add_domain(0); - lin->add_domain(std::numeric_limits::max()); - AddLinearExpressionToLinearConstraint(ct->lin_max().target(), 1, lin); - AddLinearExpressionToLinearConstraint(expr, -1, lin); - } + // We will create 2 * num_exprs constraints for target = max(a1, .., an). - // And then, a new boolean b, and - // - b => target == a - // - not(b) => target == b - const int new_bool = context->NewBoolVar(); - bool first_loop = true; - for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { - ConstraintProto* new_ct = context->working_model->add_constraints(); - new_ct->add_enforcement_literal(first_loop ? new_bool - : NegatedRef(new_bool)); - first_loop = false; - - LinearConstraintProto* lin = new_ct->mutable_linear(); - lin->add_domain(0); - lin->add_domain(0); - AddLinearExpressionToLinearConstraint(ct->lin_max().target(), 1, lin); - AddLinearExpressionToLinearConstraint(expr, -1, lin); - } - - ct->Clear(); - context->UpdateRuleStats("lin_max: expanded lin_max with two terms"); -} - -void ExpandGeneralLinMax(ConstraintProto* ct, PresolveContext* context) { - CHECK_GT(ct->lin_max().exprs().size(), 2); - - // We will create 2 * n constraints for target = max(a1, .., an). // First. // - target >= ai for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { @@ -513,37 +477,38 @@ void ExpandGeneralLinMax(ConstraintProto* ct, PresolveContext* context) { AddLinearExpressionToLinearConstraint(expr, -1, lin); } - // And then, a new boolean bi, and - // - bi => target == ai + // Second, for each expr, create a new boolean bi, and add bi => target >= ai // With exactly_one(bi) - ConstraintProto* exo = context->working_model->add_constraints(); - - for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { + std::vector enforcement_literals; + enforcement_literals.reserve(num_exprs); + if (num_exprs == 2) { const int new_bool = context->NewBoolVar(); - exo->mutable_exactly_one()->add_literals(new_bool); + enforcement_literals.push_back(new_bool); + enforcement_literals.push_back(NegatedRef(new_bool)); + } else { + ConstraintProto* exactly_one = context->working_model->add_constraints(); + for (int i = 0; i < num_exprs; ++i) { + const int new_bool = context->NewBoolVar(); + exactly_one->mutable_exactly_one()->add_literals(new_bool); + enforcement_literals.push_back(new_bool); + } + } + + for (int i = 0; i < num_exprs; ++i) { ConstraintProto* new_ct = context->working_model->add_constraints(); - new_ct->add_enforcement_literal(new_bool); + new_ct->add_enforcement_literal(enforcement_literals[i]); LinearConstraintProto* lin = new_ct->mutable_linear(); - lin->add_domain(0); + lin->add_domain(std::numeric_limits::min()); lin->add_domain(0); AddLinearExpressionToLinearConstraint(ct->lin_max().target(), 1, lin); - AddLinearExpressionToLinearConstraint(expr, -1, lin); + AddLinearExpressionToLinearConstraint(ct->lin_max().exprs(i), -1, lin); } ct->Clear(); context->UpdateRuleStats("lin_max: expanded lin_max"); } -void ExpandLinMax(ConstraintProto* ct, PresolveContext* context) { - if (ct->lin_max().exprs().size() < 2) return; - if (ct->lin_max().exprs().size() == 2) { - ExpandLinMaxWithTwoTerms(ct, context); - } else { - ExpandGeneralLinMax(ct, context); - } -} - // A[V] == V means for all i, V == i => A_i == i void ExpandElementWithTargetEqualIndex(ConstraintProto* ct, PresolveContext* context) { diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index cb9eccdf93..f9e50d5234 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -1209,7 +1209,7 @@ class IntervalVar: raise TypeError("end should be None") if is_present_index is not None: raise TypeError("is_present_index should be None") - self.__index= cast(int, start) + self.__index = cast(int, start) self.__ct = model.constraints[self.__index] else: self.__index = len(model.constraints) @@ -2901,7 +2901,7 @@ class CpModel: strategy: cp_model_pb2.DecisionStrategyProto = ( self.__model.search_strategy.add() - ) + ) for v in variables: expr = strategy.exprs.add() if v.index >= 0: diff --git a/ortools/sat/python/cp_model_helper.py b/ortools/sat/python/cp_model_helper.py index 96475a6a1a..69e5de9a5a 100644 --- a/ortools/sat/python/cp_model_helper.py +++ b/ortools/sat/python/cp_model_helper.py @@ -85,7 +85,7 @@ def assert_is_zero_or_one(x: Any) -> int: if not isinstance(x, numbers.Integral): raise TypeError("Not a boolean: %s" % x) x_as_int = int(x) - if x_as_int < 0 or x_as_int > 1: + if x_as_int < 0 or x_as_int > 1: raise TypeError("Not a boolean: %s" % x) return x_as_int diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index a856518443..04b08568ca 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -464,9 +464,9 @@ message SatParameters { // possible precedences between event and encoding the constraint. optional bool expand_reservoir_constraints = 182 [default = true]; - // If true, replace target = max(x, y) by linear constraint with the - // introduction of a new boolean b such that b => target == x and not(b) => - // target == y. + // If the number of expressions in the lin_max is less that the max size + // parameter, model expansion replaces target = max(xi) by linear constraint + // with the introduction of new booleans bi such that bi => target == xi. // // This is mainly for experimenting compared to a custom lin_max propagator. optional int32 max_lin_max_size_for_expansion = 280 [default = 0]; From 6e8189608a14c16ff44e74dca29da0dc0476469a Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 21 Mar 2024 15:48:11 +0100 Subject: [PATCH 005/392] [CP-SAT] polish typing --- ortools/sat/python/cp_model.py | 10 +++++----- ortools/sat/python/cp_model_helper.py | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index f9e50d5234..943f8a354e 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -863,7 +863,7 @@ class IntVar(LinearExpr): self.__var.domain.extend( cast(sorted_interval_list.Domain, domain).flattened_intervals() ) - if name: + if name is not None: self.__var.name = name @property @@ -1180,7 +1180,7 @@ class IntervalVar: intervals into the schedule. Raises: - TypeError: if start, size, end are not defined, or have the wrong type. + ValueError: if start, size, end are not defined, or have the wrong type. """ def __init__( @@ -1204,11 +1204,11 @@ class IntervalVar: # start_index is an int, all parameters after are None. if isinstance(start, int): if size is not None: - raise TypeError("size should be None") + raise ValueError("size should be None") if end is not None: - raise TypeError("end should be None") + raise ValueError("end should be None") if is_present_index is not None: - raise TypeError("is_present_index should be None") + raise ValueError("is_present_index should be None") self.__index = cast(int, start) self.__ct = model.constraints[self.__index] else: diff --git a/ortools/sat/python/cp_model_helper.py b/ortools/sat/python/cp_model_helper.py index 69e5de9a5a..364aea8485 100644 --- a/ortools/sat/python/cp_model_helper.py +++ b/ortools/sat/python/cp_model_helper.py @@ -63,30 +63,30 @@ def is_minus_one(x: Any) -> bool: def assert_is_int64(x: Any) -> int: """Asserts that x is integer and x is in [min_int_64, max_int_64] and returns it casted to an int.""" if not isinstance(x, numbers.Integral): - raise TypeError("Not an integer: %s" % x) + raise TypeError(f"Not an integer: {x} of type {type(x)}") x_as_int = int(x) if x_as_int < INT_MIN or x_as_int > INT_MAX: - raise OverflowError("Does not fit in an int64_t: %s" % x) + raise OverflowError(f"Does not fit in an int64_t: {x}") return x_as_int def assert_is_int32(x: Any) -> int: """Asserts that x is integer and x is in [min_int_32, max_int_32] and returns it casted to an int.""" if not isinstance(x, numbers.Integral): - raise TypeError("Not an integer: %s" % x) + raise TypeError(f"Not an integer: {x} of type {type(x)}") x_as_int = int(x) if x_as_int < INT32_MIN or x_as_int > INT32_MAX: - raise OverflowError("Does not fit in an int32_t: %s" % x) + raise OverflowError(f"Does not fit in an int32_t: {x}") return x_as_int def assert_is_zero_or_one(x: Any) -> int: """Asserts that x is 0 or 1 and returns it as an int.""" if not isinstance(x, numbers.Integral): - raise TypeError("Not a boolean: %s" % x) + raise TypeError(f"Not a boolean: {x} of type {type(x)}") x_as_int = int(x) if x_as_int < 0 or x_as_int > 1: - raise TypeError("Not a boolean: %s" % x) + raise TypeError(f"Not a boolean: {x}") return x_as_int @@ -96,7 +96,7 @@ def assert_is_a_number(x: Any) -> Union[int, float]: return int(x) if isinstance(x, numbers.Real): return float(x) - raise TypeError("Not a number: %s" % x) + raise TypeError(f"Not a number: {x} of type {type(x)}") def to_capped_int64(v: int) -> int: From e8e5a93dc905a7710bb7aaa00df19611e7a3ef8d Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Thu, 29 Feb 2024 14:44:48 +0100 Subject: [PATCH 006/392] deps: Bump SCIP to v900 --- Dependencies.txt | 2 +- WORKSPACE | 4 +- bazel/BUILD.bazel | 19 +- bazel/scip-v900.patch | 225 +++++++++++++++++++ bazel/scip.BUILD.bazel | 2 +- bazel/scip.patch | 85 ------- cmake/dependencies/CMakeLists.txt | 4 +- patches/{scip-v810.patch => scip-v900.patch} | 4 +- 8 files changed, 241 insertions(+), 104 deletions(-) create mode 100644 bazel/scip-v900.patch delete mode 100644 bazel/scip.patch rename patches/{scip-v810.patch => scip-v900.patch} (94%) diff --git a/Dependencies.txt b/Dependencies.txt index fea7be5644..c29527b25e 100644 --- a/Dependencies.txt +++ b/Dependencies.txt @@ -9,7 +9,7 @@ Cgl=0.60.5 Cbc=2.10.7 GLPK=5.0 HiGHS=v1.6.0 -Scip=v810 +Scip=v900 # Python pybind11=v2.11.1 pybind11_abseil=52f2739 diff --git a/WORKSPACE b/WORKSPACE index 2fec7dbcb5..c43d336172 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -129,9 +129,9 @@ http_archive( new_git_repository( name = "scip", build_file = "//bazel:scip.BUILD.bazel", - patches = ["//bazel:scip.patch"], + patches = ["//bazel:scip-v900.patch"], patch_args = ["-p1"], - tag = "v810", + tag = "v900", remote = "https://github.com/scipopt/scip.git", ) diff --git a/bazel/BUILD.bazel b/bazel/BUILD.bazel index 5a5b1a7416..21a00f04ab 100644 --- a/bazel/BUILD.bazel +++ b/bazel/BUILD.bazel @@ -14,22 +14,19 @@ load("@rules_python//python:pip.bzl", "compile_pip_requirements") exports_files([ - "gtest.BUILD", - "glpk.BUILD", - "pcre2.BUILD", - "pcre2.patch", - "re2.patch", - "scip.BUILD", - "scip.patch", - "swig.BUILD", - "swig.patch", - "bliss.BUILD", - "bliss-0.73.patch", "archive_helper.bzl", + "bliss-0.73.patch", + "bliss.BUILD.bazel", + "glpk.BUILD.bazel", "notebook_requirements.in", "notebook_requirements.txt", "ortools_requirements.in", "ortools_requirements.txt", + "pcre2.BUILD.bazel", + "scip-v900.patch", + "scip.BUILD.bazel", + "swig.BUILD.bazel", + "swig.patch", ]) compile_pip_requirements( diff --git a/bazel/scip-v900.patch b/bazel/scip-v900.patch new file mode 100644 index 0000000000..fb82f42a48 --- /dev/null +++ b/bazel/scip-v900.patch @@ -0,0 +1,225 @@ +diff --git a/src/lpi/lpi_glop.cpp b/src/lpi/lpi_glop.cpp +index a90120188a..664cb4d097 100644 +--- a/src/lpi/lpi_glop.cpp ++++ b/src/lpi/lpi_glop.cpp +@@ -51,7 +51,6 @@ + #include "ortools/util/time_limit.h" + + #include "ortools/base/logging.h" +-#include "ortools/base/vlog_is_on.h" + + #include "lpi/lpi.h" + #include "scip/pub_message.h" +@@ -2942,12 +2941,12 @@ SCIP_RETCODE SCIPlpiSetIntpar( + SCIPdebugMessage("SCIPlpiSetIntpar: SCIP_LPPAR_LPINFO -> %d.\n", ival); + if ( ival == 0 ) + { +- (void) google::SetVLOGLevel("*", google::GLOG_INFO); ++ absl::SetFlag(&FLAGS_stderrthreshold, 2); + lpi->lp_info = false; + } + else + { +- (void) google::SetVLOGLevel("*", google::GLOG_ERROR); ++ absl::SetFlag(&FLAGS_stderrthreshold, 0); + lpi->lp_info = true; + } + break; +@@ -3190,7 +3189,7 @@ SCIP_RETCODE SCIPlpiReadLP( + + const std::string filespec(fname); + MPModelProto proto; +- if ( ! ReadFileToProto(filespec, &proto) ) ++ if ( ! ReadFileToProto(filespec, &proto).ok() ) + { + SCIPerrorMessage("Could not read <%s>\n", fname); + return SCIP_READERROR; +@@ -3214,7 +3213,7 @@ SCIP_RETCODE SCIPlpiWriteLP( + MPModelProto proto; + LinearProgramToMPModelProto(*lpi->linear_program, &proto); + const std::string filespec(fname); +- if ( ! WriteProtoToFile(filespec, proto, operations_research::ProtoWriteFormat::kProtoText, true) ) ++ if ( ! WriteProtoToFile(filespec, proto, operations_research::ProtoWriteFormat::kProtoText, true).ok() ) + { + SCIPerrorMessage("Could not write <%s>\n", fname); + return SCIP_READERROR; +diff --git a/src/scip/config.h b/src/scip/config.h +new file mode 100644 +index 0000000000..871fde8e55 +--- /dev/null ++++ b/src/scip/config.h +@@ -0,0 +1,32 @@ ++#ifndef __CONFIG_H__ ++#define __CONFIG_H__ ++ ++#define CMAKE_BUILD_TYPE "Release" ++#define SCIP_VERSION_MAJOR 9 ++#define SCIP_VERSION_MINOR 0 ++#define SCIP_VERSION_PATCH 0 ++#define SCIP_VERSION_SUB 0 ++#define SCIP_VERSION_API 114 ++/* #undef BMS_NOBLOCKMEM */ ++/* #undef SCIP_NOBUFFERMEM */ ++/* #undef WITH_DEBUG_SOLUTION */ ++/* #undef SCIP_NO_SIGACTION */ ++/* #undef SCIP_NO_STRTOK_R */ ++/* #undef TPI_NONE */ ++#define TPI_TNY ++/* #undef TPI_OMP */ ++#define SCIP_THREADSAFE ++#define WITH_SCIPDEF ++/* #undef SCIP_WITH_LAPACK */ ++/* #undef SCIP_WITH_PAPILO */ ++/* #undef SCIP_WITH_ZLIB */ ++/* #undef SCIP_WITH_READLINE */ ++/* #undef SCIP_WITH_GMP */ ++/* #undef SCIP_WITH_LPSCHECK */ ++/* #undef SCIP_WITH_ZIMPL */ ++/* #undef SCIP_WITH_AMPL */ ++#define SCIP_ROUNDING_FE ++/* #undef SCIP_ROUNDING_FP */ ++/* #undef SCIP_ROUNDING_MS */ ++ ++#endif +diff --git a/src/scip/githash.c b/src/scip/githash.c +new file mode 100644 +index 0000000000..4b1dfc587f +--- /dev/null ++++ b/src/scip/githash.c +@@ -0,0 +1 @@ ++#define SCIP_GITHASH "7205bedd94" +diff --git a/src/scip/scip_export.h b/src/scip/scip_export.h +new file mode 100644 +index 0000000000..8bf2aaefa5 +--- /dev/null ++++ b/src/scip/scip_export.h +@@ -0,0 +1,42 @@ ++ ++#ifndef SCIP_EXPORT_H ++#define SCIP_EXPORT_H ++ ++#ifdef SCIP_STATIC_DEFINE ++# define SCIP_EXPORT ++# define SCIP_NO_EXPORT ++#else ++# ifndef SCIP_EXPORT ++# ifdef libscip_EXPORTS ++/* We are building this library */ ++# define SCIP_EXPORT ++# else ++/* We are using this library */ ++# define SCIP_EXPORT ++# endif ++# endif ++ ++# ifndef SCIP_NO_EXPORT ++# define SCIP_NO_EXPORT ++# endif ++#endif ++ ++#ifndef SCIP_DEPRECATED ++# define SCIP_DEPRECATED __attribute__ ((__deprecated__)) ++#endif ++ ++#ifndef SCIP_DEPRECATED_EXPORT ++# define SCIP_DEPRECATED_EXPORT SCIP_EXPORT SCIP_DEPRECATED ++#endif ++ ++#ifndef SCIP_DEPRECATED_NO_EXPORT ++# define SCIP_DEPRECATED_NO_EXPORT SCIP_NO_EXPORT SCIP_DEPRECATED ++#endif ++ ++#if 0 /* DEFINE_NO_DEPRECATED */ ++# ifndef SCIP_NO_DEPRECATED ++# define SCIP_NO_DEPRECATED ++# endif ++#endif ++ ++#endif /* SCIP_EXPORT_H */ +diff --git a/src/scip/scipbuildflag.c b/src/scip/scipbuildflag.c +new file mode 100644 +index 0000000000..2af785150e +--- /dev/null ++++ b/src/scip/scipbuildflag.c +@@ -0,0 +1,65 @@ ++/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ ++/* */ ++/* This file is part of the program and library */ ++/* SCIP --- Solving Constraint Integer Programs */ ++/* */ ++/* Copyright (c) 2002-2024 Zuse Institute Berlin (ZIB) */ ++/* */ ++/* Licensed under the Apache License, Version 2.0 (the "License"); */ ++/* you may not use this file except in compliance with the License. */ ++/* You may obtain a copy of the License at */ ++/* */ ++/* http://www.apache.org/licenses/LICENSE-2.0 */ ++/* */ ++/* Unless required by applicable law or agreed to in writing, software */ ++/* distributed under the License is distributed on an "AS IS" BASIS, */ ++/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ ++/* See the License for the specific language governing permissions and */ ++/* limitations under the License. */ ++/* */ ++/* You should have received a copy of the Apache-2.0 license */ ++/* along with SCIP; see the file LICENSE. If not visit scipopt.org. */ ++/* */ ++/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ ++ ++/**@file scipbuildflags.c ++ * @brief build flags methods ++ * @author Felipe Serrano ++ */ ++ ++/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/ ++ ++#include "scip/scipbuildflags.h" ++ ++/** returns the flags that were used to build SCIP */ ++const char* SCIPgetBuildFlags( ++ void ++ ) ++{ ++ return " ARCH=x86_64\n\ ++ OSTYPE=Linux-6.7.5-arch1-1\n\ ++ COMP=GNU 13.2.1\n\ ++ BUILD=Release\n\ ++ DEBUGSOL=OFF\n\ ++ EXPRINT=none\n\ ++ SYM=none\n\ ++ GMP=OFF\n\ ++ IPOPT=OFF\n\ ++ WORHP=OFF\n\ ++ LPS=none\n\ ++ LPSCHECK=OFF\n\ ++ NOBLKBUFMEM=OFF\n\ ++ NOBLKMEM=OFF\n\ ++ NOBUFMEM=OFF\n\ ++ THREADSAFE=ON;FORCE\n\ ++ READLINE=OFF\n\ ++ SANITIZE_ADDRESS=OFF\n\ ++ SANITIZE_MEMORY=OFF\n\ ++ SANITIZE_UNDEFINED=OFF\n\ ++ SANITIZE_THREAD=OFF\n\ ++ SHARED=OFF\n\ ++ VERSION=9.0.0.0\n\ ++ API_VERSION=114\n\ ++ ZIMPL=OFF\n\ ++ ZLIB=ON"; ++} +diff --git a/src/symmetry/compute_symmetry_bliss.cpp b/src/symmetry/compute_symmetry_bliss.cpp +index 0ba5ea060e..10570448a0 100644 +--- a/src/symmetry/compute_symmetry_bliss.cpp ++++ b/src/symmetry/compute_symmetry_bliss.cpp +@@ -34,8 +34,9 @@ + #include "compute_symmetry.h" + + /* include bliss graph */ +-#include +-#include ++#define BLISS_VERSION "0.73" ++#include ++#include + + #include + #include diff --git a/bazel/scip.BUILD.bazel b/bazel/scip.BUILD.bazel index 6b11d719e5..d31e346730 100644 --- a/bazel/scip.BUILD.bazel +++ b/bazel/scip.BUILD.bazel @@ -82,6 +82,7 @@ cc_library( "src/scip/nlpi_filtersqp.c", "src/scip/nlpi_worhp.c", "src/scip/*_xyz.c", + "src/scip/scipbuildflags.c", "src/scip/sorttpl.c", "src/symmetry/compute_symmetry_*.cpp", "src/symmetry/*nauty*", @@ -97,7 +98,6 @@ cc_library( "src/*/*.hpp", "src/scip/githash.c", "src/scip/sorttpl.c", - "src/scip/buildflags.c", ], exclude = [ diff --git a/bazel/scip.patch b/bazel/scip.patch deleted file mode 100644 index 03e6c58e7d..0000000000 --- a/bazel/scip.patch +++ /dev/null @@ -1,85 +0,0 @@ -diff --git a/src/lpi/lpi_glop.cpp b/src/lpi/lpi_glop.cpp -index 2471778a8f..17fd1e8c34 100644 ---- a/src/lpi/lpi_glop.cpp -+++ b/src/lpi/lpi_glop.cpp -@@ -51,7 +51,6 @@ - #include "ortools/util/time_limit.h" - - #include "ortools/base/logging.h" --#include "ortools/base/vlog_is_on.h" - - #include "lpi/lpi.h" - #include "scip/pub_message.h" -@@ -2942,12 +2941,12 @@ SCIP_RETCODE SCIPlpiSetIntpar( - SCIPdebugMessage("SCIPlpiSetIntpar: SCIP_LPPAR_LPINFO -> %d.\n", ival); - if ( ival == 0 ) - { -- (void) google::SetVLOGLevel("*", google::GLOG_INFO); -+ absl::SetFlag(&FLAGS_stderrthreshold, 2); - lpi->lp_info = false; - } - else - { -- (void) google::SetVLOGLevel("*", google::GLOG_ERROR); -+ absl::SetFlag(&FLAGS_stderrthreshold, 0); - lpi->lp_info = true; - } - break; -@@ -3190,7 +3189,7 @@ SCIP_RETCODE SCIPlpiReadLP( - - const std::string filespec(fname); - MPModelProto proto; -- if ( ! ReadFileToProto(filespec, &proto) ) -+ if ( ! ReadFileToProto(filespec, &proto).ok() ) - { - SCIPerrorMessage("Could not read <%s>\n", fname); - return SCIP_READERROR; -@@ -3214,7 +3213,7 @@ SCIP_RETCODE SCIPlpiWriteLP( - MPModelProto proto; - LinearProgramToMPModelProto(*lpi->linear_program, &proto); - const std::string filespec(fname); -- if ( ! WriteProtoToFile(filespec, proto, operations_research::ProtoWriteFormat::kProtoText, true) ) -+ if ( ! WriteProtoToFile(filespec, proto, operations_research::ProtoWriteFormat::kProtoText, true).ok() ) - { - SCIPerrorMessage("Could not write <%s>\n", fname); - return SCIP_READERROR; -diff --git a/src/symmetry/compute_symmetry_bliss.cpp b/src/symmetry/compute_symmetry_bliss.cpp -index 484627c4b9..27c2895165 100644 ---- a/src/symmetry/compute_symmetry_bliss.cpp -+++ b/src/symmetry/compute_symmetry_bliss.cpp -@@ -25,5 +25,5 @@ - #include "compute_symmetry.h" - - /* include bliss graph */ --#include --#include -+#include -+#include - - #include - #include - -diff --git a/src/scip/githash.c b/src/scip/githash.c -new file mode 100644 -index 0000000000..2891bc72de ---- /dev/null -+++ b/src/scip/githash.c -@@ -0,0 +1,1 @@ -+#define SCIP_GITHASH "a740f0891e" -diff --git a/src/scip/scipbuildflags.c b/src/scip/scipbuildflags.c -index b54b9112cb..dc8e62b5e0 100644 ---- a/src/scip/scipbuildflags.c -+++ b/src/scip/scipbuildflags.c -@@ -21,10 +21,9 @@ - - /*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/ - -+#define SCIP_BUILDFLAGS " ARCH=x86_64\n COMP=gnu\n DEBUGSOL=false\n EXPRINT=none\n GAMS=false\n SYM=bliss\n GMP=false\n IPOPT=false\n IPOPTOPT=opt\n WORHP=false\n WORHPOPT=opt\n LPS=spx2\n LPSCHECK=false\n LPSOPT=opt\n NOBLKBUFMEM=false\n NOBLKMEM=false\n NOBUFMEM=false\n OPT=opt\n OSTYPE=linux\n PARASCIP=true\n READLINE=false\n SANITIZE=\n SHARED=false\n USRARFLAGS=\n USRCFLAGS=-fPIC\n USRCXXFLAGS=-fPIC\n USRDFLAGS=\n USRFLAGS=\n USRLDFLAGS=\n USROFLAGS=\n VERSION=7.0.1\n ZIMPL=false\n ZIMPLOPT=opt\n ZLIB=true" -+ - #include "scip/scipbuildflags.h" --#ifdef NO_CONFIG_HEADER --#include "buildflags.c" --#endif - - /** returns the flags that were used to build SCIP */ - const char* SCIPgetBuildFlags( diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 3ee30fe20b..8688677a37 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -263,8 +263,8 @@ if(BUILD_SCIP) FetchContent_Declare( scip GIT_REPOSITORY "https://github.com/scipopt/scip.git" - GIT_TAG "v810" - PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/scip-v810.patch" + GIT_TAG "v900" + PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/scip-v900.patch" ) FetchContent_MakeAvailable(scip) set(LPI_GLOP_SRC ${scip_SOURCE_DIR}/src/lpi/lpi_glop.cpp PARENT_SCOPE) diff --git a/patches/scip-v810.patch b/patches/scip-v900.patch similarity index 94% rename from patches/scip-v810.patch rename to patches/scip-v900.patch index 6190e20eec..628716d38c 100644 --- a/patches/scip-v810.patch +++ b/patches/scip-v900.patch @@ -1,5 +1,5 @@ diff --git a/src/lpi/lpi_glop.cpp b/src/lpi/lpi_glop.cpp -index 2471778a8f..3326ac7292 100644 +index a90120188a..2c068e5e30 100644 --- a/src/lpi/lpi_glop.cpp +++ b/src/lpi/lpi_glop.cpp @@ -51,7 +51,6 @@ @@ -15,7 +15,7 @@ index 2471778a8f..3326ac7292 100644 if ( ival == 0 ) { - (void) google::SetVLOGLevel("*", google::GLOG_INFO); -+ absl::SetFlag(&FLAGS_stderrthreshold, 2); ++ absl::SetFlag(&FLAGS_stderrthreshold, 2); lpi->lp_info = false; } else From d6b490ef75705efcf77006ff2ca875e99c34da1e Mon Sep 17 00:00:00 2001 From: Iaroslav Chernyshev Date: Tue, 12 Mar 2024 12:16:25 +0100 Subject: [PATCH 007/392] Fix file::Match implementation --- ortools/base/filesystem.cc | 39 +++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/ortools/base/filesystem.cc b/ortools/base/filesystem.cc index 0d874e1859..37d9c074e5 100644 --- a/ortools/base/filesystem.cc +++ b/ortools/base/filesystem.cc @@ -14,14 +14,51 @@ #include "ortools/base/filesystem.h" #include // NOLINT(build/c++17) +#include // NOLINT #include "absl/status/status.h" +#include "absl/strings/str_replace.h" + +namespace fs = std::filesystem; + +// Converts a absl::string_view into an object compatible with std::filesystem. +#ifdef ABSL_USES_STD_STRING_VIEW +#define SV_ABSL_TO_STD(X) X +#else +#define SV_ABSL_TO_STD(X) std::string(X) +#endif namespace file { absl::Status Match(std::string_view pattern, std::vector* result, const file::Options& options) { - return absl::Status(); + try { + const auto search_dir = fs::path(SV_ABSL_TO_STD(pattern)).parent_path(); + const auto filename = fs::path(SV_ABSL_TO_STD(pattern)).filename().string(); + std::string regexp_filename = + absl::StrReplaceAll(filename, {{".", "\\."}, {"*", ".*"}, {"?", "."}}); + std::regex regexp_pattern(regexp_filename); + std::error_code error; + + const fs::directory_iterator path_end; + for (auto path = fs::directory_iterator(search_dir, error); + !error && path != path_end; path.increment(error)) { + if (!fs::is_regular_file(path->path())) { + continue; + } + if (std::regex_match(path->path().filename().string(), regexp_pattern)) { + result->push_back(path->path().string()); + } + } + if (error) { + return absl::InvalidArgumentError(error.message()); + } + + std::sort(result->begin(), result->end()); + return absl::OkStatus(); + } catch (const std::exception& e) { + return absl::InvalidArgumentError(e.what()); + } } absl::Status IsDirectory(std::string_view path, const file::Options& options) { From ca6af53f66f44217cedd931188f5b303a29d3d32 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 21 Mar 2024 15:58:17 +0100 Subject: [PATCH 008/392] support gurobi 11.0.1 --- ortools/gurobi/environment.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/gurobi/environment.cc b/ortools/gurobi/environment.cc index 362ba01d94..5cc0217c48 100644 --- a/ortools/gurobi/environment.cc +++ b/ortools/gurobi/environment.cc @@ -345,8 +345,8 @@ void LoadGurobiFunctions(DynamicLibrary* gurobi_dynamic_library) { std::vector GurobiDynamicLibraryPotentialPaths() { std::vector potential_paths; const std::vector kGurobiVersions = { - "1100", "1003", "1002", "1001", "1000", "952", "951", "950", - "911", "910", "903", "902", "811", "801", "752"}; + "1101", "1100", "1003", "1002", "1001", "1000", "952", "951", + "950", "911", "910", "903", "902", "811", "801", "752"}; potential_paths.reserve(kGurobiVersions.size() * 3); // Look for libraries pointed by GUROBI_HOME first. From 0ca8c87c3c477f0786d44500555b632218514b0a Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 22 Mar 2024 13:57:41 +0100 Subject: [PATCH 009/392] new example --- examples/python/BUILD.bazel | 2 + examples/python/pentominoes_sat.py | 166 +++++++++++++++++++++++++++++ 2 files changed, 168 insertions(+) create mode 100644 examples/python/pentominoes_sat.py diff --git a/examples/python/BUILD.bazel b/examples/python/BUILD.bazel index 721320311a..a3c8f5c7d5 100644 --- a/examples/python/BUILD.bazel +++ b/examples/python/BUILD.bazel @@ -58,6 +58,8 @@ code_sample_py("maze_escape_sat") code_sample_py("no_wait_baking_scheduling_sat") +code_sample_py("pentominoes_sat") + code_sample_py("prize_collecting_tsp_sat") code_sample_py("prize_collecting_vrp_sat") diff --git a/examples/python/pentominoes_sat.py b/examples/python/pentominoes_sat.py new file mode 100644 index 0000000000..720d2bee2c --- /dev/null +++ b/examples/python/pentominoes_sat.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# Copyright 2010-2024 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Example to solves a pentomino paving problem. + +Given a subset of n different pentomino, the problem is to pave a square of +size 5 x n. The problem is reduced to an exact set cover problem and encoded +as a linear boolean problem. + +This problem comes from the game Katamino: +http://boardgamegeek.com/boardgame/6931/katamino +""" + +from collections.abc import Sequence +from typing import Dict, List + +from absl import app +from absl import flags + +from google.protobuf import text_format +from ortools.sat.python import cp_model + + +_PARAMS = flags.DEFINE_string( + "params", + "num_search_workers:16,log_search_progress:true,max_time_in_seconds:45", + "Sat solver parameters.", +) + + +def is_one(mask: List[List[int]], x: int, y: int, orientation: int) -> bool: + if orientation & 1: + tmp: int = x + x = y + y = tmp + if orientation & 2: + x = len(mask[0]) - 1 - x + if orientation & 4: + y = len(mask) - 1 - y + return mask[y][x] == 1 + + +def get_height(mask: List[List[int]], orientation: int) -> int: + if orientation & 1: + return len(mask[0]) + return len(mask) + + +def get_width(mask: List[List[int]], orientation: int) -> int: + if orientation & 1: + return len(mask) + return len(mask[0]) + + +def orientation_is_redundant(mask: List[List[int]], orientation: int) -> bool: + """Checks if the current rotated figure is the same as a previous rotation.""" + size_i: int = get_width(mask, orientation) + size_j: int = get_height(mask, orientation) + for o in range(orientation): + if size_i != get_width(mask, o): + continue + if size_j != get_height(mask, o): + continue + + is_the_same: bool = True + for k in range(size_i): + if not is_the_same: + break + for l in range(size_j): + if not is_the_same: + break + if is_one(mask, k, l, orientation) != is_one(mask, k, l, o): + is_the_same = False + if is_the_same: + return True + return False + + +def generate_and_solve_problem(pieces: Dict[str, List[List[int]]]) -> None: + """Solves the pentominoes problem.""" + box_width = len(pieces) + box_height = 5 + + model = cp_model.CpModel() + position_to_variables: List[List[List[cp_model.IntVar]]] = [ + [[] for _ in range(box_width)] for _ in range(box_height) + ] + + for name, mask in pieces.items(): + print(f"piece:{name} mask:{mask}") + all_position_variables = [] + for orientation in range(8): + if orientation_is_redundant(mask, orientation): + continue + piece_width = get_width(mask, orientation) + piece_height = get_height(mask, orientation) + for i in range(box_width - piece_width + 1): + for j in range(box_height - piece_height + 1): + v = model.new_bool_var(name) + all_position_variables.append(v) + for k in range(piece_width): + for l in range(piece_height): + if is_one(mask, k, l, orientation): + position_to_variables[j + l][i + k].append(v) + + # Only one combination is selected. + model.add_exactly_one(all_position_variables) + print(f" {len(all_position_variables)} possible placement") + + for one_column in position_to_variables: + for all_pieces_in_one_position in one_column: + model.add_exactly_one(all_pieces_in_one_position) + + # Solve the model. + solver = cp_model.CpSolver() + if _PARAMS.value: + text_format.Parse(_PARAMS.value, solver.parameters) + status = solver.solve(model) + + # Print the solution. + if status == cp_model.OPTIMAL: + for y in range(box_height): + line = "" + for x in range(box_width): + for v in position_to_variables[y][x]: + if solver.BooleanValue(v): + line += v.name + " " + break + print(line) + + +def main(argv: Sequence[str]) -> None: + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + # Pieces are stored in a matrix. mask[height][width] + pieces: Dict[str, List[List[int]]] = { + "F": [[0, 1, 1], [1, 1, 0], [0, 1, 0]], + "I": [[1, 1, 1, 1, 1]], + "L": [[1, 1, 1, 1], [1, 0, 0, 0]], + "N": [[1, 1, 1, 0], [0, 0, 1, 1]], + "P": [[1, 1, 1], [1, 1, 0]], + "T": [[1, 1, 1], [0, 1, 0], [0, 1, 0]], + "U": [[1, 0, 1], [1, 1, 1]], + "V": [[1, 0, 0], [1, 0, 0], [1, 1, 1]], + "W": [[1, 0, 0], [1, 1, 0], [0, 1, 1]], + "X": [[0, 1, 0], [1, 1, 1], [0, 1, 0]], + "Y": [[1, 1, 1, 1], [0, 1, 0, 0]], + "Z": [[1, 1, 0], [0, 1, 0], [0, 1, 1]], + } + generate_and_solve_problem(pieces) + + +if __name__ == "__main__": + app.run(main) From 02a53ee8d01b007801557016eab177f3f6d7accb Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 22 Mar 2024 14:42:52 +0100 Subject: [PATCH 010/392] selecting a subset of pieces --- examples/python/pentominoes_sat.py | 34 +++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/examples/python/pentominoes_sat.py b/examples/python/pentominoes_sat.py index 720d2bee2c..cb3b9443d0 100644 --- a/examples/python/pentominoes_sat.py +++ b/examples/python/pentominoes_sat.py @@ -34,12 +34,27 @@ from ortools.sat.python import cp_model _PARAMS = flags.DEFINE_string( "params", - "num_search_workers:16,log_search_progress:true,max_time_in_seconds:45", + "num_search_workers:16,log_search_progress:false,max_time_in_seconds:45", "Sat solver parameters.", ) +_PIECES = flags.DEFINE_string( + "pieces", "FILNPTUVWXYZ", "The subset of pieces to consider." +) + def is_one(mask: List[List[int]], x: int, y: int, orientation: int) -> bool: + """Returns true if the oriented piece is 1 at position [i][j]. + + The 3 bits in orientation respectively mean: transposition, symmetry by + x axis, symmetry by y axis. + + Args: + mask: The shape of the piece. + x: position. + y: position. + orientation: between 0 and 7. + """ if orientation & 1: tmp: int = x x = y @@ -98,7 +113,6 @@ def generate_and_solve_problem(pieces: Dict[str, List[List[int]]]) -> None: ] for name, mask in pieces.items(): - print(f"piece:{name} mask:{mask}") all_position_variables = [] for orientation in range(8): if orientation_is_redundant(mask, orientation): @@ -116,7 +130,6 @@ def generate_and_solve_problem(pieces: Dict[str, List[List[int]]]) -> None: # Only one combination is selected. model.add_exactly_one(all_position_variables) - print(f" {len(all_position_variables)} possible placement") for one_column in position_to_variables: for all_pieces_in_one_position in one_column: @@ -128,6 +141,11 @@ def generate_and_solve_problem(pieces: Dict[str, List[List[int]]]) -> None: text_format.Parse(_PARAMS.value, solver.parameters) status = solver.solve(model) + print( + f"Problem {_PIECES.value} solved in {solver.wall_time}s with status" + f" {solver.status_name(status)}" + ) + # Print the solution. if status == cp_model.OPTIMAL: for y in range(box_height): @@ -135,7 +153,7 @@ def generate_and_solve_problem(pieces: Dict[str, List[List[int]]]) -> None: for x in range(box_width): for v in position_to_variables[y][x]: if solver.BooleanValue(v): - line += v.name + " " + line += v.name break print(line) @@ -159,7 +177,13 @@ def main(argv: Sequence[str]) -> None: "Y": [[1, 1, 1, 1], [0, 1, 0, 0]], "Z": [[1, 1, 0], [0, 1, 0], [0, 1, 1]], } - generate_and_solve_problem(pieces) + selected_pieces: Dict[str, List[List[int]]] = {} + for p in _PIECES.value: + if p not in pieces: + print(f"Piece {p} not found in the list of pieces") + return + selected_pieces[p] = pieces[p] + generate_and_solve_problem(selected_pieces) if __name__ == "__main__": From 97cf1237e06d90cd3f0936a82c3ec69623a8821d Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 22 Mar 2024 15:55:16 +0100 Subject: [PATCH 011/392] [CP-SAT] new sample;improve no_overlap_2d code --- ortools/sat/2d_orthogonal_packing.cc | 90 ++++- ortools/sat/2d_orthogonal_packing.h | 9 + ortools/sat/2d_packing_brute_force.cc | 339 ++++++++++++++++++ ortools/sat/2d_packing_brute_force.h | 40 +++ ortools/sat/BUILD.bazel | 17 +- ortools/sat/cuts.h | 2 +- ortools/sat/diffn.cc | 1 + ortools/sat/diffn_util.cc | 27 +- ortools/sat/diffn_util.h | 5 + ortools/sat/docs/channeling.md | 71 ++++ ortools/sat/samples/BUILD.bazel | 2 + .../index_first_boolvar_true_sample_sat.py | 74 ++++ 12 files changed, 672 insertions(+), 5 deletions(-) create mode 100644 ortools/sat/2d_packing_brute_force.cc create mode 100644 ortools/sat/2d_packing_brute_force.h create mode 100644 ortools/sat/samples/index_first_boolvar_true_sample_sat.py diff --git a/ortools/sat/2d_orthogonal_packing.cc b/ortools/sat/2d_orthogonal_packing.cc index 71b37f4e37..7f5f88c889 100644 --- a/ortools/sat/2d_orthogonal_packing.cc +++ b/ortools/sat/2d_orthogonal_packing.cc @@ -27,6 +27,7 @@ #include "absl/random/distributions.h" #include "absl/types/span.h" #include "ortools/base/logging.h" +#include "ortools/sat/2d_packing_brute_force.h" #include "ortools/sat/integer.h" #include "ortools/sat/util.h" #include "ortools/util/bitset.h" @@ -52,6 +53,14 @@ OrthogonalPackingInfeasibilityDetector:: num_conflicts_two_items_}); stats.push_back({"OrthogonalPackingInfeasibilityDetector/no_energy_conflict", num_scheduling_possible_}); + stats.push_back({"OrthogonalPackingInfeasibilityDetector/brute_force_calls", + num_brute_force_calls_}); + stats.push_back( + {"OrthogonalPackingInfeasibilityDetector/brute_force_conflicts", + num_brute_force_conflicts_}); + stats.push_back( + {"OrthogonalPackingInfeasibilityDetector/brute_force_relaxations", + num_brute_force_relaxation_}); shared_stats_->AddStats(stats); } @@ -550,6 +559,55 @@ OrthogonalPackingInfeasibilityDetector::CheckFeasibilityWithDualFunction2( return best_result; } +bool OrthogonalPackingInfeasibilityDetector::RelaxConflictWithBruteForce( + OrthogonalPackingResult& result, + std::pair bounding_box_size) { + const int num_items_originally = + result.items_participating_on_conflict_.size(); + std::vector sizes_x; + std::vector sizes_y; + std::vector indexes; + std::vector to_be_removed(num_items_originally, false); + + sizes_x.reserve(num_items_originally - 1); + sizes_y.reserve(num_items_originally - 1); + for (int i = 0; i < num_items_originally; i++) { + sizes_x.clear(); + sizes_y.clear(); + // Look for a conflict using all non-removed items but the i-th one. + for (int j = 0; j < num_items_originally; j++) { + if (i == j || to_be_removed[j]) { + continue; + } + sizes_x.push_back(result.items_participating_on_conflict_[j].size_x); + sizes_y.push_back(result.items_participating_on_conflict_[j].size_y); + } + const auto solution = + BruteForceOrthogonalPacking(sizes_x, sizes_y, bounding_box_size); + if (solution.empty()) { + // We still have a conflict if we remove the i-th item! + to_be_removed[i] = true; + } + } + if (!std::any_of(to_be_removed.begin(), to_be_removed.end(), + [](bool b) { return b; })) { + return false; + } + OrthogonalPackingResult original = result; + result.slack_ = 0; + result.conflict_type_ = OrthogonalPackingResult::ConflictType::BRUTE_FORCE; + result.result_ = OrthogonalPackingResult::Status::INFEASIBLE; + result.items_participating_on_conflict_.clear(); + for (int i = 0; i < num_items_originally; i++) { + if (to_be_removed[i]) { + continue; + } + result.items_participating_on_conflict_.push_back( + original.items_participating_on_conflict_[i]); + } + return true; +} + OrthogonalPackingResult OrthogonalPackingInfeasibilityDetector::TestFeasibilityImpl( absl::Span sizes_x, @@ -687,6 +745,7 @@ OrthogonalPackingInfeasibilityDetector::TestFeasibilityImpl( return result; } + bool found_scheduling_solution = false; if (options.use_dff_f2) { // Checking for conflicts using f_2 is expensive, so first try a quick // algorithm to check if there is no conflict to be found. See the comments @@ -701,9 +760,11 @@ OrthogonalPackingInfeasibilityDetector::TestFeasibilityImpl( scheduling_profile_, new_scheduling_profile_)) { num_scheduling_possible_++; CHECK(result.result_ != OrthogonalPackingResult::Status::INFEASIBLE); - return result; + found_scheduling_solution = true; } + } + if (!found_scheduling_solution && options.use_dff_f2) { // We only check for conflicts applying this DFF on heights and widths, but // not on both, which would be too expensive if done naively. auto conflict = CheckFeasibilityWithDualFunction2( @@ -730,6 +791,30 @@ OrthogonalPackingInfeasibilityDetector::TestFeasibilityImpl( } } + if (result.result_ == OrthogonalPackingResult::Status::UNKNOWN && + num_items <= options.brute_force_threshold) { + num_brute_force_calls_++; + auto solution = + BruteForceOrthogonalPacking(sizes_x, sizes_y, bounding_box_size); + if (solution.empty()) { + result.conflict_type_ = ConflictType::BRUTE_FORCE; + result.result_ = OrthogonalPackingResult::Status::INFEASIBLE; + result.items_participating_on_conflict_.resize(num_items); + for (int i = 0; i < num_items; i++) { + result.items_participating_on_conflict_[i] = make_item(i); + } + } else { + result.result_ = OrthogonalPackingResult::Status::FEASIBLE; + } + } + + if (result.result_ == OrthogonalPackingResult::Status::INFEASIBLE && + result.items_participating_on_conflict_.size() <= + options.brute_force_threshold) { + num_brute_force_relaxation_ += + RelaxConflictWithBruteForce(result, bounding_box_size); + } + return result; } @@ -760,6 +845,9 @@ OrthogonalPackingResult OrthogonalPackingInfeasibilityDetector::TestFeasibility( // The total area of the items was larger than the area of the box. num_trivial_conflicts_++; break; + case ConflictType::BRUTE_FORCE: + num_brute_force_conflicts_++; + break; case ConflictType::NO_CONFLICT: LOG(FATAL) << "Should never happen"; break; diff --git a/ortools/sat/2d_orthogonal_packing.h b/ortools/sat/2d_orthogonal_packing.h index d777014767..40e296e0b0 100644 --- a/ortools/sat/2d_orthogonal_packing.h +++ b/ortools/sat/2d_orthogonal_packing.h @@ -33,6 +33,7 @@ struct OrthogonalPackingOptions { bool use_pairwise = true; bool use_dff_f0 = true; bool use_dff_f2 = true; + int brute_force_threshold = 6; int dff2_max_number_of_parameters_to_check = std::numeric_limits::max(); }; @@ -119,6 +120,7 @@ class OrthogonalPackingResult { PAIRWISE, DFF_F0, DFF_F2, + BRUTE_FORCE, }; Status result_; @@ -144,6 +146,10 @@ class OrthogonalPackingInfeasibilityDetector { const OrthogonalPackingOptions& options = OrthogonalPackingOptions()); private: + bool RelaxConflictWithBruteForce( + OrthogonalPackingResult& result, + std::pair bounding_box_size); + OrthogonalPackingResult TestFeasibilityImpl( absl::Span sizes_x, absl::Span sizes_y, @@ -190,6 +196,9 @@ class OrthogonalPackingInfeasibilityDetector { int64_t num_conflicts_dff2_ = 0; int64_t num_conflicts_dff0_ = 0; int64_t num_scheduling_possible_ = 0; + int64_t num_brute_force_calls_ = 0; + int64_t num_brute_force_conflicts_ = 0; + int64_t num_brute_force_relaxation_ = 0; absl::BitGenRef random_; SharedStatistics* shared_stats_; diff --git a/ortools/sat/2d_packing_brute_force.cc b/ortools/sat/2d_packing_brute_force.cc new file mode 100644 index 0000000000..542fc2a5e4 --- /dev/null +++ b/ortools/sat/2d_packing_brute_force.cc @@ -0,0 +1,339 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/2d_packing_brute_force.h" + +#include +#include +#include +#include + +#include "absl/container/inlined_vector.h" +#include "absl/types/span.h" +#include "ortools/base/logging.h" +#include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" + +namespace operations_research { +namespace sat { + +namespace { + +enum class RectangleRelationship { + TOUCHING_NEITHER_LEFT_OR_BOTTOM, + TOUCHING_BOTTOM, + TOUCHING_LEFT, + OVERLAP, +}; + +// TODO(user): write faster and less hacky implementation +RectangleRelationship GetRectangleRelationship(const Rectangle& rectangle, + const Rectangle& other) { + if (!rectangle.IsDisjoint(other)) { + return RectangleRelationship::OVERLAP; + } + const Rectangle item_position_left = {.x_min = rectangle.x_min - 1, + .x_max = rectangle.x_max - 1, + .y_min = rectangle.y_min, + .y_max = rectangle.y_max}; + const Rectangle item_position_bottom = {.x_min = rectangle.x_min, + .x_max = rectangle.x_max, + .y_min = rectangle.y_min - 1, + .y_max = rectangle.y_max - 1}; + if (!item_position_left.IsDisjoint(other)) { + return RectangleRelationship::TOUCHING_LEFT; + } + if (!item_position_bottom.IsDisjoint(other)) { + return RectangleRelationship::TOUCHING_BOTTOM; + } + return RectangleRelationship::TOUCHING_NEITHER_LEFT_OR_BOTTOM; +} + +bool ShouldPlaceItemAtPosition( + int i, IntegerValue x, IntegerValue y, + absl::Span sizes_x, + absl::Span sizes_y, + std::pair bounding_box_size, + absl::InlinedVector& item_positions, + absl::InlinedVector& placed_item_indexes) { + const int num_items = sizes_x.size(); + const Rectangle item_position = { + .x_min = x, .x_max = x + sizes_x[i], .y_min = y, .y_max = y + sizes_y[i]}; + + // Check if it fits in the BB. + if (item_position.x_max > bounding_box_size.first || + item_position.y_max > bounding_box_size.second) { + return false; + } + + // Break symmetry: force 0th item to be in the bottom left quarter. + if (i == 0 && + (2 * item_position.x_min > bounding_box_size.first - sizes_x[i] || + 2 * item_position.y_min > bounding_box_size.second - sizes_y[i])) { + return false; + } + + // Check if it is conflicting with another item. + bool is_conflicting_left = x == 0; + bool is_conflicting_bottom = y == 0; + for (int j = 0; j < num_items; ++j) { + if (i != j && placed_item_indexes[j]) { + const RectangleRelationship pos = + GetRectangleRelationship(item_position, item_positions[j]); + if (pos == RectangleRelationship::OVERLAP) { + return false; + } + is_conflicting_left = + is_conflicting_left || pos == RectangleRelationship::TOUCHING_LEFT; + is_conflicting_bottom = is_conflicting_bottom || + pos == RectangleRelationship::TOUCHING_BOTTOM; + } + } + + // Finally, check if it touching something both on the bottom and to the left. + if (!is_conflicting_left || !is_conflicting_bottom) { + return false; + } + return true; +} + +// TODO(user): try the graph-based algorithm by S. Fekete, J. Shepers, and +// J. Van Der Ween, https://arxiv.org/abs/cs/0604045. +bool BruteForceOrthogonalPackingImpl( + absl::Span sizes_x, + absl::Span sizes_y, + std::pair bounding_box_size, + IntegerValue smallest_x, IntegerValue smallest_y, + absl::InlinedVector& item_positions, + absl::InlinedVector& placed_item_indexes, + const absl::InlinedVector< + absl::InlinedVector, 16>, 16>& + potential_item_positions) { + const auto add_position_if_valid = + [&item_positions, bounding_box_size, &sizes_x, &sizes_y, + &placed_item_indexes]( + absl::InlinedVector, 16>& + positions, + int i, IntegerValue x, IntegerValue y) { + if (ShouldPlaceItemAtPosition(i, x, y, sizes_x, sizes_y, + bounding_box_size, item_positions, + placed_item_indexes)) { + positions.push_back({x, y}); + } + }; + + const int num_items = sizes_x.size(); + bool has_unplaced_item = false; + for (int i = 0; i < num_items; ++i) { + if (placed_item_indexes[i]) { + continue; + } + if (potential_item_positions[i].empty()) { + return false; + } + + has_unplaced_item = true; + placed_item_indexes[i] = true; + for (std::pair potential_position : + potential_item_positions[i]) { + // Place the item on its candidate position. + item_positions[i] = {.x_min = potential_position.first, + .x_max = potential_position.first + sizes_x[i], + .y_min = potential_position.second, + .y_max = potential_position.second + sizes_y[i]}; + const Rectangle& item_position = item_positions[i]; + + // Now the hard part of the algorithm: create the new "potential + // positions" vector after placing this item. Describing the actual set of + // acceptable places to put consider for the next item in the search would + // be pretty complex. For example: + // +----------------------------+ + // | | + // |x | + // |--------+ | + // |88888888| | + // |88888888| | + // |--------+ | + // |####| | + // |####|x x | + // |####| +------+ | + // |####| |......| | + // |####| |......| | + // |####| |......| | + // |####|x x |......| | + // |####+---------+......| | + // |####|OOOOOOOOO|......| | + // |####|OOOOOOOOO|......| | + // |####|OOOOOOOOO|......|x | + // +----+---------+------+------+ + // + // To make things simpler, we just consider: + // - all previous positions if they didn't got invalid due to the new + // item; + // - new position are derived getting the right-top most corner of the + // added item and connecting it to the bottom and the left with a line. + // New potential positions are the intersection of this line with either + // the current items or the box. For example, if we add a box to the + // example above (representing the two lines by '*'): + // +----------------------------+ + // | | + // | | + // |--------+ | + // |88888888| | + // |88888888| | + // |--------+ | + // |####| | + // |####| | + // |####| +------+ | + // |x###|x |......|x | + // |************************** | + // |####| |......|@@@* | + // |####| |......|@@@* | + // |####+---------+......|@@@* | + // |####|OOOOOOOOO|......|@@@* | + // |####|OOOOOOOOO|......|@@@* | + // |####|OOOOOOOOO|......|@@@*x | + // +----+---------+------+------+ + // + // This method finds potential locations that are not useful for any item, + // but we will detect that by testing each item one by one. + absl::InlinedVector< + absl::InlinedVector, 16>, 16> + new_potential_positions(num_items); + for (int k = 0; k < num_items; ++k) { + if (k == i || !placed_item_indexes[k]) { + continue; + } + + bool add_below = + // We only add points below this one... + item_positions[k].y_max <= item_position.y_min && + // ...and where we can fit at least the smallest element. + item_position.x_max + smallest_x <= bounding_box_size.first && + item_positions[k].y_max + smallest_y <= bounding_box_size.second; + bool add_left = + item_positions[k].x_max <= item_position.x_min && + item_positions[k].x_max + smallest_x <= bounding_box_size.first && + item_position.y_max + smallest_y <= bounding_box_size.second; + for (int j = 0; j < num_items; ++j) { + if (k == j || placed_item_indexes[j]) { + continue; + } + if (add_below) { + add_position_if_valid(new_potential_positions[j], j, + item_position.x_max, item_positions[k].y_max); + } + if (add_left) { + add_position_if_valid(new_potential_positions[j], j, + item_positions[k].x_max, item_position.y_max); + } + } + } + bool is_unfeasible = false; + for (int j = 0; j < num_items; ++j) { + // No positions to attribute to the item we just placed. + if (i == j || placed_item_indexes[j]) { + continue; + } + // First copy previously valid positions that remain valid. + for (const std::pair& original_position : + potential_item_positions[j]) { + const Rectangle item_in_pos = { + .x_min = original_position.first, + .x_max = original_position.first + sizes_x[j], + .y_min = original_position.second, + .y_max = original_position.second + sizes_y[j]}; + + if (!item_in_pos.IsDisjoint(item_position)) { + // That was a valid position for item j, but now it is in conflict + // with newly added item i. + continue; + } + new_potential_positions[j].push_back(original_position); + } + add_position_if_valid(new_potential_positions[j], j, + item_positions[i].x_max, 0); + add_position_if_valid(new_potential_positions[j], j, 0, + item_positions[i].y_max); + if (new_potential_positions[j].empty()) { + // After placing the item i, there is no valid place to choose for the + // item j. We must pick another placement for i. + is_unfeasible = true; + break; + } + } + if (is_unfeasible) { + continue; + } + if (BruteForceOrthogonalPackingImpl( + sizes_x, sizes_y, bounding_box_size, smallest_x, smallest_y, + item_positions, placed_item_indexes, new_potential_positions)) { + return true; + } + } + // Placing this item at the current bottom-left positions level failed. + // Restore placed_item_indexes to its original value and try another one. + placed_item_indexes[i] = false; + } + return !has_unplaced_item; +} + +} // namespace + +std::vector BruteForceOrthogonalPacking( + absl::Span sizes_x, + absl::Span sizes_y, + std::pair bounding_box_size) { + IntegerValue smallest_x = std::numeric_limits::max(); + IntegerValue smallest_y = std::numeric_limits::max(); + int num_items = sizes_x.size(); + absl::InlinedVector item_index_sorted_by_area_desc(num_items); + absl::InlinedVector< + absl::InlinedVector, 16>, 16> + potential_item_positions(num_items); + for (int i = 0; i < num_items; ++i) { + smallest_x = std::min(smallest_x, sizes_x[i]); + smallest_y = std::min(smallest_y, sizes_y[i]); + item_index_sorted_by_area_desc[i] = i; + potential_item_positions[i].push_back({0, 0}); + } + std::sort(item_index_sorted_by_area_desc.begin(), + item_index_sorted_by_area_desc.end(), + [sizes_x, sizes_y](int a, int b) { + return sizes_x[a] * sizes_y[a] > sizes_x[b] * sizes_y[b]; + }); + absl::InlinedVector new_sizes_x(num_items); + absl::InlinedVector new_sizes_y(num_items); + for (int i = 0; i < num_items; ++i) { + new_sizes_x[i] = sizes_x[item_index_sorted_by_area_desc[i]]; + new_sizes_y[i] = sizes_y[item_index_sorted_by_area_desc[i]]; + } + absl::InlinedVector item_positions(num_items); + absl::InlinedVector placed_item_indexes(num_items); + const bool found_solution = BruteForceOrthogonalPackingImpl( + new_sizes_x, new_sizes_y, bounding_box_size, smallest_x, smallest_y, + item_positions, placed_item_indexes, potential_item_positions); + if (!found_solution) { + return {}; + } + std::vector result(num_items); + for (int i = 0; i < num_items; ++i) { + result[item_index_sorted_by_area_desc[i]] = item_positions[i]; + } + VLOG_EVERY_N_SEC(2, 3) << "Found a feasible packing by brute force. Dot:\n " + << RenderDot(bounding_box_size, result); + return result; +} + +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/2d_packing_brute_force.h b/ortools/sat/2d_packing_brute_force.h new file mode 100644 index 0000000000..a5ef0ad5f0 --- /dev/null +++ b/ortools/sat/2d_packing_brute_force.h @@ -0,0 +1,40 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_SAT_2D_PACKING_BRUTE_FORCE_H_ +#define OR_TOOLS_SAT_2D_PACKING_BRUTE_FORCE_H_ + +#include +#include + +#include "absl/types/span.h" +#include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" + +namespace operations_research { +namespace sat { + +// Try to solve the Orthogonal Packing Problem by enumeration of all possible +// solutions. Returns an empty vector if the problem is infeasible, otherwise +// returns the items in the positions they appear in the solution in the same +// order as the input arguments. +// Warning: do not call this with too many item as it will run forever. +std::vector BruteForceOrthogonalPacking( + absl::Span sizes_x, + absl::Span sizes_y, + std::pair bounding_box_size); + +} // namespace sat +} // namespace operations_research + +#endif // OR_TOOLS_SAT_2D_PACKING_BRUTE_FORCE_H_ diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 2a03e3e838..4c35261e21 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -1818,7 +1818,6 @@ cc_library( "@com_google_absl//absl/container:inlined_vector", "@com_google_absl//absl/log:check", "@com_google_absl//absl/random:bit_gen_ref", - "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:span", ], @@ -1829,6 +1828,7 @@ cc_library( srcs = ["2d_orthogonal_packing.cc"], hdrs = ["2d_orthogonal_packing.h"], deps = [ + ":2d_packing_brute_force", ":integer", ":synchronization", ":util", @@ -1842,6 +1842,21 @@ cc_library( ], ) +cc_library( + name = "2d_packing_brute_force", + srcs = ["2d_packing_brute_force.cc"], + hdrs = ["2d_packing_brute_force.h"], + deps = [ + ":diffn_util", + ":integer", + "//ortools/util:bitset", + "@com_google_absl//absl/container:inlined_vector", + "@com_google_absl//absl/log", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "2d_orthogonal_packing_testing", testonly = 1, diff --git a/ortools/sat/cuts.h b/ortools/sat/cuts.h index d6fe1772e2..c7b6639ffd 100644 --- a/ortools/sat/cuts.h +++ b/ortools/sat/cuts.h @@ -653,7 +653,7 @@ class BoolRLTCutHelper { explicit BoolRLTCutHelper(Model* model) : product_detector_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), - lp_values_(model->GetOrCreate()){}; + lp_values_(model->GetOrCreate()) {}; ~BoolRLTCutHelper(); // Precompute data according to the current lp relaxation. diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index 19b927b6f0..2ffc5028da 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -428,6 +428,7 @@ NonOverlappingRectanglesEnergyPropagator::FindConflict( .use_pairwise = true, .use_dff_f0 = true, .use_dff_f2 = true, + .brute_force_threshold = 6, .dff2_max_number_of_parameters_to_check = 100}); if (opp_result.GetResult() == OrthogonalPackingResult::Status::INFEASIBLE && (best_conflict.opp_result.GetResult() != diff --git a/ortools/sat/diffn_util.cc b/ortools/sat/diffn_util.cc index b3c7b196c3..6a44c7d7f6 100644 --- a/ortools/sat/diffn_util.cc +++ b/ortools/sat/diffn_util.cc @@ -21,8 +21,9 @@ #include #include #include -#include #include +#include +#include #include #include #include @@ -31,7 +32,6 @@ #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/random/bit_gen_ref.h" -#include "absl/random/distributions.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/stl_util.h" @@ -1490,5 +1490,28 @@ FindRectanglesResult FindRectanglesWithEnergyConflictMC( return result; } +std::string RenderDot(std::pair bb_sizes, + absl::Span solution) { + const std::vector colors = {"red", "green", "blue", + "cyan", "yellow", "purple"}; + std::stringstream ss; + ss << "digraph {\n"; + ss << " graph [ bgcolor=lightgray width=" << 2 * bb_sizes.first + << " height=" << 2 * bb_sizes.second << "]\n"; + ss << " node [style=filled]\n"; + ss << " bb [fillcolor=\"grey\" pos=\"" << bb_sizes.first << "," + << bb_sizes.second << "!\" shape=box width=" << 2 * bb_sizes.first + << " height=" << 2 * bb_sizes.second << "]\n"; + for (int i = 0; i < solution.size(); ++i) { + ss << " " << i << " [fillcolor=\"" << colors[i % colors.size()] + << "\" pos=\"" << 2 * solution[i].x_min + solution[i].SizeX() << "," + << 2 * solution[i].y_min + solution[i].SizeY() + << "!\" shape=box width=" << 2 * solution[i].SizeX() + << " height=" << 2 * solution[i].SizeY() << "]\n"; + } + ss << "}\n"; + return ss.str(); +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/diffn_util.h b/ortools/sat/diffn_util.h index 9b6c351347..685cb0ff99 100644 --- a/ortools/sat/diffn_util.h +++ b/ortools/sat/diffn_util.h @@ -588,6 +588,11 @@ FindRectanglesResult FindRectanglesWithEnergyConflictMC( const std::vector& intervals, absl::BitGenRef random, double temperature, double candidate_energy_usage_factor); +// Render a packing solution as a Graphviz dot file. Only works in the "neato" +// or "fdp" Graphviz backends. +std::string RenderDot(std::pair bb_sizes, + absl::Span solution); + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/docs/channeling.md b/ortools/sat/docs/channeling.md index 756b08f58c..74de0de410 100644 --- a/ortools/sat/docs/channeling.md +++ b/ortools/sat/docs/channeling.md @@ -385,6 +385,77 @@ x=9 y=1 b=1 x=10 y=0 b=1 ``` +## Computing the index of the first Boolean variable set to true + +A common request is to compute the index of the first Boolean variable set to +true. It can be encoded using a min_equality constraint. The index will be set +to the number of Boolean variables if they are all false. + +### Python code + +```python +#!/usr/bin/env python3 +"""Compute the index of the first Boolean variable set to true.""" + +from ortools.sat.python import cp_model + + +class VarArraySolutionPrinter(cp_model.CpSolverSolutionCallback): + """Print intermediate solutions.""" + + def __init__(self, index: cp_model.IntVar, boolvars: list[cp_model.IntVar]): + cp_model.CpSolverSolutionCallback.__init__(self) + self.__index = index + self.__boolvars = boolvars + + def on_solution_callback(self) -> None: + line = "" + for v in self.__boolvars: + line += f"{self.value(v)}" + line += f" -> {self.value(self.__index)}" + print(line) + + +def index_first_solution_true_sample_sat(): + """Compute the index of the first Boolean variable set to true.""" + + # Model. + model = cp_model.CpModel() + + # Variables + num_bool_vars = 5 + bool_vars = [model.new_bool_var(f"{i}") for i in range(num_bool_vars)] + index = model.new_int_var(0, num_bool_vars, "index") + + # Channeling between the index and the Boolean variables. + model.add_min_equality( + index, + [ + num_bool_vars - bool_vars[i] * (num_bool_vars - i) + for i in range(num_bool_vars) + ], + ) + + # Flip bool_vars in increasing order. + model.add_decision_strategy( + bool_vars, cp_model.CHOOSE_FIRST, cp_model.SELECT_MIN_VALUE + ) + + # Create a solver and solve with a fixed search. + solver = cp_model.CpSolver() + + # Force the solver to follow the decision strategy exactly. + solver.parameters.search_branching = cp_model.FIXED_SEARCH + # Enumerate all solutions. + solver.parameters.enumerate_all_solutions = True + + # Search and print out all solutions. + solution_printer = VarArraySolutionPrinter(index, bool_vars) + solver.solve(model, solution_printer) + + +index_first_solution_true_sample_sat() +``` ## A bin-packing problem diff --git a/ortools/sat/samples/BUILD.bazel b/ortools/sat/samples/BUILD.bazel index 891e34c0ba..b064e3daa6 100644 --- a/ortools/sat/samples/BUILD.bazel +++ b/ortools/sat/samples/BUILD.bazel @@ -43,6 +43,8 @@ code_sample_py(name = "cumulative_variable_profile_sample_sat") code_sample_cc_py(name = "earliness_tardiness_cost_sample_sat") +code_sample_py(name = "index_first_boolvar_true_sample_sat") + code_sample_cc_py(name = "interval_sample_sat") code_sample_cc_py(name = "minimal_jobshop_sat") diff --git a/ortools/sat/samples/index_first_boolvar_true_sample_sat.py b/ortools/sat/samples/index_first_boolvar_true_sample_sat.py new file mode 100644 index 0000000000..7d70de0528 --- /dev/null +++ b/ortools/sat/samples/index_first_boolvar_true_sample_sat.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +# Copyright 2010-2024 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Compute the index of the first Boolean variable set to true.""" + +from ortools.sat.python import cp_model + + +class VarArraySolutionPrinter(cp_model.CpSolverSolutionCallback): + """Print intermediate solutions.""" + + def __init__(self, index: cp_model.IntVar, boolvars: list[cp_model.IntVar]): + cp_model.CpSolverSolutionCallback.__init__(self) + self.__index = index + self.__boolvars = boolvars + + def on_solution_callback(self) -> None: + line = "" + for v in self.__boolvars: + line += f"{self.value(v)}" + line += f" -> {self.value(self.__index)}" + print(line) + + +def index_first_solution_true_sample_sat(): + """Compute the index of the first Boolean variable set to true.""" + + # Model. + model = cp_model.CpModel() + + # Variables + num_bool_vars = 5 + bool_vars = [model.new_bool_var(f"{i}") for i in range(num_bool_vars)] + index = model.new_int_var(0, num_bool_vars, "index") + + # Channeling between the index and the Boolean variables. + model.add_min_equality( + index, + [ + num_bool_vars - bool_vars[i] * (num_bool_vars - i) + for i in range(num_bool_vars) + ], + ) + + # Flip bool_vars in increasing order. + model.add_decision_strategy( + bool_vars, cp_model.CHOOSE_FIRST, cp_model.SELECT_MIN_VALUE + ) + + # Create a solver and solve with a fixed search. + solver = cp_model.CpSolver() + + # Force the solver to follow the decision strategy exactly. + solver.parameters.search_branching = cp_model.FIXED_SEARCH + # Enumerate all solutions. + solver.parameters.enumerate_all_solutions = True + + # Search and print out all solutions. + solution_printer = VarArraySolutionPrinter(index, bool_vars) + solver.solve(model, solution_printer) + + +index_first_solution_true_sample_sat() From 1d3d09242f9360c6a05de657a7ec9f2da82975d1 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 22 Mar 2024 16:11:45 +0100 Subject: [PATCH 012/392] add output to recipe --- ortools/sat/docs/channeling.md | 37 ++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/ortools/sat/docs/channeling.md b/ortools/sat/docs/channeling.md index 74de0de410..e29c647a8d 100644 --- a/ortools/sat/docs/channeling.md +++ b/ortools/sat/docs/channeling.md @@ -457,6 +457,43 @@ def index_first_solution_true_sample_sat(): index_first_solution_true_sample_sat() ``` +This displays the following: + +``` +00000 -> 5 +00001 -> 4 +00010 -> 3 +00011 -> 3 +00100 -> 2 +00101 -> 2 +00110 -> 2 +00111 -> 2 +01000 -> 1 +01001 -> 1 +01010 -> 1 +01011 -> 1 +01100 -> 1 +01101 -> 1 +01110 -> 1 +01111 -> 1 +10000 -> 0 +10001 -> 0 +10010 -> 0 +10011 -> 0 +10100 -> 0 +10101 -> 0 +10110 -> 0 +10111 -> 0 +11000 -> 0 +11001 -> 0 +11010 -> 0 +11011 -> 0 +11100 -> 0 +11101 -> 0 +11110 -> 0 +11111 -> 0 +``` + ## A bin-packing problem As another example of a channeling constraint, consider a bin packing problem in From f88f748635377efe96a05040ef7e534fbaf86cbb Mon Sep 17 00:00:00 2001 From: Florian OMNES Date: Fri, 22 Mar 2024 15:27:40 +0100 Subject: [PATCH 013/392] Use fixtures for XPRESS tests, instead of C-style macros Instead of macros, use fixtures for XPRESS tests --- .../linear_solver/xpress_interface_test.cc | 187 ++++++++---------- 1 file changed, 77 insertions(+), 110 deletions(-) diff --git a/ortools/linear_solver/xpress_interface_test.cc b/ortools/linear_solver/xpress_interface_test.cc index c2cc86d53e..be2370eac7 100644 --- a/ortools/linear_solver/xpress_interface_test.cc +++ b/ortools/linear_solver/xpress_interface_test.cc @@ -175,12 +175,29 @@ class XPRSGetter { } }; -#define UNITTEST_INIT_MIP() \ - MPSolver solver("XPRESS_MIP", MPSolver::XPRESS_MIXED_INTEGER_PROGRAMMING); \ - XPRSGetter getter(&solver) -#define UNITTEST_INIT_LP() \ - MPSolver solver("XPRESS_LP", MPSolver::XPRESS_LINEAR_PROGRAMMING); \ - XPRSGetter getter(&solver) +// See +// https://github.com/google/googletest/blob/main/docs/primer.md#test-fixtures-using-the-same-data-configuration-for-multiple-tests-same-data-multiple-tests +class XpressFixture : public testing::Test { + protected: + XpressFixture(const char* solverName, MPSolver::OptimizationProblemType type) + : solver(solverName, type), getter(&solver) {} + ~XpressFixture() override = default; + MPSolver solver; + XPRSGetter getter; +}; + +class XpressFixtureLP : public XpressFixture { + public: + XpressFixtureLP() + : XpressFixture("XPRESS_LP", MPSolver::XPRESS_LINEAR_PROGRAMMING) {} +}; + +class XpressFixtureMIP : public XpressFixture { + public: + XpressFixtureMIP() + : XpressFixture("XPRESS_MIP", + MPSolver::XPRESS_MIXED_INTEGER_PROGRAMMING) {} +}; void _unittest_verify_var(XPRSGetter* getter, MPVariable* x, char type, double lb, double ub) { @@ -303,18 +320,11 @@ MyMPCallback* buildLargeMipWithCallback(MPSolver& solver, int numVars, return static_cast(mpCallback); } -TEST(XpressInterface, isMIP) { - UNITTEST_INIT_MIP(); - EXPECT_EQ(solver.IsMIP(), true); -} +TEST_F(XpressFixtureMIP, isMIP) { EXPECT_EQ(solver.IsMIP(), true); } -TEST(XpressInterface, isLP) { - UNITTEST_INIT_LP(); - EXPECT_EQ(solver.IsMIP(), false); -} +TEST_F(XpressFixtureLP, isLP) { EXPECT_EQ(solver.IsMIP(), false); } -TEST(XpressInterface, LpStartingBasis) { - UNITTEST_INIT_LP(); +TEST_F(XpressFixtureLP, LpStartingBasis) { buildLargeLp(solver, 1000); // First, we record the number of iterations without an initial basis solver.Solve(); @@ -341,8 +351,7 @@ TEST(XpressInterface, LpStartingBasis) { EXPECT_LT(iterWithBasis, 10); } -TEST(XpressInterface, LpStartingBasisNoIterationsIfBasisIsProvided) { - UNITTEST_INIT_LP(); +TEST_F(XpressFixtureLP, LpStartingBasisNoIterationsIfBasisIsProvided) { buildLargeLp(solver, 1000); // First, we record the number of iterations without an initial basis solver.Solve(); @@ -366,8 +375,7 @@ TEST(XpressInterface, LpStartingBasisNoIterationsIfBasisIsProvided) { EXPECT_EQ(iterWithBasis, 0); } -TEST(XpressInterface, NumVariables) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, NumVariables) { MPVariable* x1 = solver.MakeNumVar(-1., 5.1, "x1"); MPVariable* x2 = solver.MakeNumVar(3.14, 5.1, "x2"); std::vector xs; @@ -376,8 +384,7 @@ TEST(XpressInterface, NumVariables) { EXPECT_EQ(getter.getNumVariables(), 502); } -TEST(XpressInterface, NumConstraints) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, NumConstraints) { solver.MakeRowConstraint(12., 100.0); solver.MakeRowConstraint(13., 13.1); solver.MakeRowConstraint(12.1, 1000.0); @@ -385,8 +392,7 @@ TEST(XpressInterface, NumConstraints) { EXPECT_EQ(getter.getNumConstraints(), 3); } -TEST(XpressInterface, Reset) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, Reset) { solver.MakeBoolVar("x1"); solver.MakeBoolVar("x2"); solver.MakeRowConstraint(12., 100.0); @@ -398,31 +404,27 @@ TEST(XpressInterface, Reset) { EXPECT_EQ(getter.getNumVariables(), 0); } -TEST(XpressInterface, MakeIntVar) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, MakeIntVar) { int lb = 0, ub = 10; MPVariable* x = solver.MakeIntVar(lb, ub, "x"); solver.Solve(); _unittest_verify_var(&getter, x, 'I', lb, ub); } -TEST(XpressInterface, MakeNumVar) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, MakeNumVar) { double lb = 1.5, ub = 158.2; MPVariable* x = solver.MakeNumVar(lb, ub, "x"); solver.Solve(); _unittest_verify_var(&getter, x, 'C', lb, ub); } -TEST(XpressInterface, MakeBoolVar) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, MakeBoolVar) { MPVariable* x = solver.MakeBoolVar("x"); solver.Solve(); _unittest_verify_var(&getter, x, 'B', 0, 1); } -TEST(XpressInterface, MakeIntVarArray) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, MakeIntVarArray) { int n1 = 25, lb1 = -7, ub1 = 18; std::vector xs1; solver.MakeIntVarArray(n1, lb1, ub1, "xs1", &xs1); @@ -438,8 +440,7 @@ TEST(XpressInterface, MakeIntVarArray) { } } -TEST(XpressInterface, MakeNumVarArray) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, MakeNumVarArray) { int n1 = 1; double lb1 = 5.1, ub1 = 8.1; std::vector xs1; @@ -457,8 +458,7 @@ TEST(XpressInterface, MakeNumVarArray) { } } -TEST(XpressInterface, MakeBoolVarArray) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, MakeBoolVarArray) { double n = 43; std::vector xs; solver.MakeBoolVarArray(n, "xs", &xs); @@ -468,8 +468,7 @@ TEST(XpressInterface, MakeBoolVarArray) { } } -TEST(XpressInterface, SetVariableBounds) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SetVariableBounds) { int lb1 = 3, ub1 = 4; MPVariable* x1 = solver.MakeIntVar(lb1, ub1, "x1"); double lb2 = 3.7, ub2 = 4; @@ -486,8 +485,7 @@ TEST(XpressInterface, SetVariableBounds) { _unittest_verify_var(&getter, x2, 'C', lb2, ub2); } -TEST(XpressInterface, SetVariableInteger) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SetVariableInteger) { int lb = -1, ub = 7; MPVariable* x = solver.MakeIntVar(lb, ub, "x"); solver.Solve(); @@ -497,40 +495,35 @@ TEST(XpressInterface, SetVariableInteger) { _unittest_verify_var(&getter, x, 'C', lb, ub); } -TEST(XpressInterface, ConstraintL) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ConstraintL) { double lb = -solver.infinity(), ub = 10.; MPConstraint* c = solver.MakeRowConstraint(lb, ub); solver.Solve(); _unittest_verify_constraint(&getter, c, 'L', lb, ub); } -TEST(XpressInterface, ConstraintR) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ConstraintR) { double lb = -2, ub = -1; MPConstraint* c = solver.MakeRowConstraint(lb, ub); solver.Solve(); _unittest_verify_constraint(&getter, c, 'R', lb, ub); } -TEST(XpressInterface, ConstraintG) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ConstraintG) { double lb = 8.1, ub = solver.infinity(); MPConstraint* c = solver.MakeRowConstraint(lb, ub); solver.Solve(); _unittest_verify_constraint(&getter, c, 'G', lb, ub); } -TEST(XpressInterface, ConstraintE) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ConstraintE) { double lb = 18947.3, ub = lb; MPConstraint* c = solver.MakeRowConstraint(lb, ub); solver.Solve(); _unittest_verify_constraint(&getter, c, 'E', lb, ub); } -TEST(XpressInterface, SetConstraintBoundsL) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SetConstraintBoundsL) { double lb = 18947.3, ub = lb; MPConstraint* c = solver.MakeRowConstraint(lb, ub); solver.Solve(); @@ -541,8 +534,7 @@ TEST(XpressInterface, SetConstraintBoundsL) { _unittest_verify_constraint(&getter, c, 'L', lb, ub); } -TEST(XpressInterface, SetConstraintBoundsR) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SetConstraintBoundsR) { double lb = -solver.infinity(), ub = 15; MPConstraint* c = solver.MakeRowConstraint(lb, ub); solver.Solve(); @@ -553,8 +545,7 @@ TEST(XpressInterface, SetConstraintBoundsR) { _unittest_verify_constraint(&getter, c, 'R', lb, ub); } -TEST(XpressInterface, SetConstraintBoundsG) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SetConstraintBoundsG) { double lb = 1, ub = 2; MPConstraint* c = solver.MakeRowConstraint(lb, ub); solver.Solve(); @@ -565,8 +556,7 @@ TEST(XpressInterface, SetConstraintBoundsG) { _unittest_verify_constraint(&getter, c, 'G', lb, ub); } -TEST(XpressInterface, SetConstraintBoundsE) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SetConstraintBoundsE) { double lb = -1, ub = solver.infinity(); MPConstraint* c = solver.MakeRowConstraint(lb, ub); solver.Solve(); @@ -577,8 +567,7 @@ TEST(XpressInterface, SetConstraintBoundsE) { _unittest_verify_constraint(&getter, c, 'E', lb, ub); } -TEST(XpressInterface, ConstraintCoef) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ConstraintCoef) { MPVariable* x1 = solver.MakeBoolVar("x1"); MPVariable* x2 = solver.MakeBoolVar("x2"); MPConstraint* c1 = solver.MakeRowConstraint(4.1, solver.infinity()); @@ -605,8 +594,7 @@ TEST(XpressInterface, ConstraintCoef) { EXPECT_EQ(getter.getConstraintCoef(c2->index(), x2->index()), c22); } -TEST(XpressInterface, ClearConstraint) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ClearConstraint) { MPVariable* x1 = solver.MakeBoolVar("x1"); MPVariable* x2 = solver.MakeBoolVar("x2"); MPConstraint* c1 = solver.MakeRowConstraint(4.1, solver.infinity()); @@ -630,8 +618,7 @@ TEST(XpressInterface, ClearConstraint) { EXPECT_EQ(getter.getConstraintCoef(c2->index(), x2->index()), 0); } -TEST(XpressInterface, ObjectiveCoef) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ObjectiveCoef) { MPVariable* x = solver.MakeBoolVar("x"); MPObjective* obj = solver.MutableObjective(); double coef = 3112.4; @@ -644,8 +631,7 @@ TEST(XpressInterface, ObjectiveCoef) { EXPECT_EQ(getter.getObjectiveCoef(x->index()), coef); } -TEST(XpressInterface, ObjectiveOffset) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ObjectiveOffset) { MPVariable* x = solver.MakeBoolVar("x"); MPObjective* obj = solver.MutableObjective(); double offset = 4.3; @@ -658,8 +644,7 @@ TEST(XpressInterface, ObjectiveOffset) { EXPECT_EQ(getter.getObjectiveOffset(), offset); } -TEST(XpressInterface, ClearObjective) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ClearObjective) { MPVariable* x = solver.MakeBoolVar("x"); MPObjective* obj = solver.MutableObjective(); double coef = -15.6; @@ -671,8 +656,7 @@ TEST(XpressInterface, ClearObjective) { EXPECT_EQ(getter.getObjectiveCoef(x->index()), 0); } -TEST(XpressInterface, ObjectiveSense) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, ObjectiveSense) { MPObjective* const objective = solver.MutableObjective(); objective->SetMinimization(); EXPECT_EQ(getter.getObjectiveSense(), XPRS_OBJ_MINIMIZE); @@ -680,8 +664,7 @@ TEST(XpressInterface, ObjectiveSense) { EXPECT_EQ(getter.getObjectiveSense(), XPRS_OBJ_MAXIMIZE); } -TEST(XpressInterface, interations) { - UNITTEST_INIT_LP(); +TEST_F(XpressFixtureLP, interations) { int nc = 100, nv = 100; std::vector cs(nc); for (int ci = 0; ci < nc; ++ci) { @@ -699,8 +682,7 @@ TEST(XpressInterface, interations) { EXPECT_GT(solver.iterations(), 0); } -TEST(XpressInterface, nodes) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, nodes) { int nc = 100, nv = 100; std::vector cs(nc); for (int ci = 0; ci < nc; ++ci) { @@ -718,13 +700,11 @@ TEST(XpressInterface, nodes) { EXPECT_GT(solver.nodes(), 0); } -TEST(XpressInterface, SolverVersion) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SolverVersion) { EXPECT_GE(solver.SolverVersion().size(), 30); } -TEST(XpressInterface, Write) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, Write) { MPVariable* x1 = solver.MakeIntVar(-1.2, 9.3, "C1"); MPVariable* x2 = solver.MakeNumVar(-1, 5.147593849384714, "C2"); MPConstraint* c1 = solver.MakeRowConstraint(-solver.infinity(), 1, "R1"); @@ -776,8 +756,7 @@ ENDATA )"); } -TEST(XpressInterface, SetPrimalTolerance) { - UNITTEST_INIT_LP(); +TEST_F(XpressFixtureLP, SetPrimalTolerance) { MPSolverParameters params; double tol = 1e-4; params.SetDoubleParam(MPSolverParameters::PRIMAL_TOLERANCE, tol); @@ -785,8 +764,7 @@ TEST(XpressInterface, SetPrimalTolerance) { EXPECT_EQ(getter.getDoubleControl(XPRS_FEASTOL), tol); } -TEST(XpressInterface, SetDualTolerance) { - UNITTEST_INIT_LP(); +TEST_F(XpressFixtureLP, SetDualTolerance) { MPSolverParameters params; double tol = 1e-2; params.SetDoubleParam(MPSolverParameters::DUAL_TOLERANCE, tol); @@ -794,8 +772,7 @@ TEST(XpressInterface, SetDualTolerance) { EXPECT_EQ(getter.getDoubleControl(XPRS_OPTIMALITYTOL), tol); } -TEST(XpressInterface, SetPresolveMode) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SetPresolveMode) { MPSolverParameters params; params.SetIntegerParam(MPSolverParameters::PRESOLVE, MPSolverParameters::PRESOLVE_OFF); @@ -807,8 +784,7 @@ TEST(XpressInterface, SetPresolveMode) { EXPECT_EQ(getter.getIntegerControl(XPRS_PRESOLVE), 1); } -TEST(XpressInterface, SetLpAlgorithm) { - UNITTEST_INIT_LP(); +TEST_F(XpressFixtureLP, SetLpAlgorithm) { MPSolverParameters params; params.SetIntegerParam(MPSolverParameters::LP_ALGORITHM, MPSolverParameters::DUAL); @@ -824,8 +800,7 @@ TEST(XpressInterface, SetLpAlgorithm) { EXPECT_EQ(getter.getIntegerControl(XPRS_DEFAULTALG), 4); } -TEST(XpressInterface, SetScaling) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SetScaling) { MPSolverParameters params; params.SetIntegerParam(MPSolverParameters::SCALING, MPSolverParameters::SCALING_OFF); @@ -837,8 +812,7 @@ TEST(XpressInterface, SetScaling) { EXPECT_EQ(getter.getIntegerControl(XPRS_SCALING), 163); } -TEST(XpressInterface, SetRelativeMipGap) { - UNITTEST_INIT_MIP(); +TEST_F(XpressFixtureMIP, SetRelativeMipGap) { MPSolverParameters params; double relativeMipGap = 1e-3; params.SetDoubleParam(MPSolverParameters::RELATIVE_MIP_GAP, relativeMipGap); @@ -859,7 +833,8 @@ TEST(XpressInterface, setStringControls) { {"COMPUTEEXECSERVICE", XPRS_COMPUTEEXECSERVICE, "default_value"}, }; for (const auto& [paramString, control, paramValue] : params) { - UNITTEST_INIT_MIP(); + MPSolver solver("XPRESS_MIP", MPSolver::XPRESS_MIXED_INTEGER_PROGRAMMING); + XPRSGetter getter(&solver); std::string xpressParamString = paramString + " " + paramValue; solver.SetSolverSpecificParametersAsString(xpressParamString); EXPECT_EQ(paramValue, getter.getStringControl(control)); @@ -949,7 +924,8 @@ TEST(XpressInterface, setDoubleControls) { {"REPAIRINFEASTIMELIMIT", XPRS_REPAIRINFEASTIMELIMIT, 1.}, }; for (const auto& [paramString, control, paramValue] : params) { - UNITTEST_INIT_MIP(); + MPSolver solver("XPRESS_MIP", MPSolver::XPRESS_MIXED_INTEGER_PROGRAMMING); + XPRSGetter getter(&solver); std::string xpressParamString = paramString + " " + std::to_string(paramValue); solver.SetSolverSpecificParametersAsString(xpressParamString); @@ -1193,7 +1169,8 @@ TEST(XpressInterface, setIntControl) { {"FEASIBILITYJUMP", XPRS_FEASIBILITYJUMP, 1}, }; for (const auto& [paramString, control, paramValue] : params) { - UNITTEST_INIT_MIP(); + MPSolver solver("XPRESS_MIP", MPSolver::XPRESS_MIXED_INTEGER_PROGRAMMING); + XPRSGetter getter(&solver); std::string xpressParamString = paramString + " " + std::to_string(paramValue); solver.SetSolverSpecificParametersAsString(xpressParamString); @@ -1207,7 +1184,8 @@ TEST(XpressInterface, setInt64Control) { {"EXTRASETELEMS", XPRS_EXTRASETELEMS, 1}, }; for (const auto& [paramString, control, paramValue] : params) { - UNITTEST_INIT_MIP(); + MPSolver solver("XPRESS_MIP", MPSolver::XPRESS_MIXED_INTEGER_PROGRAMMING); + XPRSGetter getter(&solver); std::string xpressParamString = paramString + " " + std::to_string(paramValue); solver.SetSolverSpecificParametersAsString(xpressParamString); @@ -1215,9 +1193,7 @@ TEST(XpressInterface, setInt64Control) { } } -TEST(XpressInterface, SolveMIP) { - UNITTEST_INIT_MIP(); - +TEST_F(XpressFixtureMIP, SolveMIP) { // max x + 2y // st. -x + y <= 1 // 2x + 3y <= 12 @@ -1249,9 +1225,7 @@ TEST(XpressInterface, SolveMIP) { EXPECT_EQ(y->solution_value(), 2); } -TEST(XpressInterface, SolveLP) { - UNITTEST_INIT_LP(); - +TEST_F(XpressFixtureLP, SolveLP) { // max x + 2y // st. -x + y <= 1 // 2x + 3y <= 12 @@ -1297,9 +1271,7 @@ TEST(XpressInterface, SolveLP) { // Ignore this test because the random generator is different // for windows and linux. #elif defined(__GNUC__) -TEST(XpressInterface, SetHint) { - UNITTEST_INIT_MIP(); - +TEST_F(XpressFixtureMIP, SetHint) { // Once a solution is added to XPRESS, it is actually impossible to get it // back using the API // In this test we send the (near) optimal solution as a hint (with @@ -1327,9 +1299,7 @@ TEST(XpressInterface, SetHint) { } #endif -TEST(XpressInterface, SetCallBack) { - UNITTEST_INIT_MIP(); - +TEST_F(XpressFixtureMIP, SetCallBack) { auto myMpCallback = buildLargeMipWithCallback(solver, 30, 30); solver.Solve(); @@ -1347,19 +1317,17 @@ TEST(XpressInterface, SetCallBack) { } } -TEST(XpressInterface, SetAndUnsetCallBack) { +TEST_F(XpressFixtureMIP, SetAndUnsetCallBack) { // Test that when we unset a callback it is not called - UNITTEST_INIT_MIP(); auto myMpCallback = buildLargeMipWithCallback(solver, 100, 5); solver.SetCallback(nullptr); solver.Solve(); EXPECT_EQ(myMpCallback->getNSolutions(), 0); } -TEST(XpressInterface, SetAndResetCallBack) { +TEST_F(XpressFixtureMIP, SetAndResetCallBack) { // Test that when we set a new callback then it is called, and old one is not // called - UNITTEST_INIT_MIP(); auto oldMpCallback = buildLargeMipWithCallback(solver, 100, 5); auto newMpCallback = new MyMPCallback(&solver, false); solver.SetCallback((MPCallback*)newMpCallback); @@ -1368,9 +1336,8 @@ TEST(XpressInterface, SetAndResetCallBack) { EXPECT_GT(newMpCallback->getNSolutions(), 1); } -TEST(XpressInterface, CallbackThrowsException) { +TEST_F(XpressFixtureMIP, CallbackThrowsException) { // Test that when the callback throws an exception, it is caught and logged - UNITTEST_INIT_MIP(); auto oldMpCallback = buildLargeMipWithCallback(solver, 30, 30); auto newMpCallback = new MyMPCallback(&solver, true); solver.SetCallback((MPCallback*)newMpCallback); From 5bdcc38cbdf38d6d6295b9bbf0514739d1520d68 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 11:20:29 +0100 Subject: [PATCH 014/392] algorithms: backport from main --- ortools/algorithms/BUILD.bazel | 111 +++- ortools/algorithms/binary_search_test.cc | 1 + ortools/algorithms/knapsack_solver.cc | 4 +- .../algorithms/knapsack_solver_for_cuts.cc | 473 ------------------ ortools/algorithms/knapsack_solver_for_cuts.h | 388 -------------- .../knapsack_solver_for_cuts_test.cc | 341 ------------- .../algorithms/python/knapsack_solver_test.py | 1 + ortools/algorithms/set_cover_mip.cc | 1 + 8 files changed, 108 insertions(+), 1212 deletions(-) delete mode 100644 ortools/algorithms/knapsack_solver_for_cuts.cc delete mode 100644 ortools/algorithms/knapsack_solver_for_cuts.h delete mode 100644 ortools/algorithms/knapsack_solver_for_cuts_test.cc diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel index ef7b1f6874..a2956585b7 100644 --- a/ortools/algorithms/BUILD.bazel +++ b/ortools/algorithms/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") load("@rules_proto//proto:defs.bzl", "proto_library") load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") package(default_visibility = ["//visibility:public"]) @@ -127,6 +127,20 @@ cc_test( ], ) +cc_library( + name = "duplicate_remover", + srcs = ["duplicate_remover.cc"], + hdrs = ["duplicate_remover.h"], + deps = [ + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/numeric:bits", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/types:span", + "@com_google_protobuf//:protobuf", + ], +) + # Hungarian algorithm cc_library( name = "hungarian", @@ -167,20 +181,45 @@ cc_library( "//conditions:default": [], }), deps = [ - "//ortools/base", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/time", "//ortools/base:stl_util", + # We don't link any underlying solver to let the linear_solver_knapsack + # decide what solvers to include. "//ortools/linear_solver", "//ortools/sat:cp_model", + "//ortools/sat:cp_model_cc_proto", + "//ortools/sat:cp_model_solver", "//ortools/util:bitset", "//ortools/util:time_limit", ], ) -# Weighted set covering +cc_test( + name = "knapsack_solver_test", + size = "medium", + srcs = ["knapsack_solver_test.cc"], + deps = [ + ":knapsack_solver_lib", # buildcleaner: keep + "//ortools/base", + "//ortools/base:gmock_main", + "//ortools/util:time_limit", + ], +) + +# Partitioning and splitting of vector. + +# query matching library. + +# Weighted set covering library. + proto_library( name = "set_cover_proto", srcs = ["set_cover.proto"], - deps = ["//ortools/util:int128_proto"], + deps = [ + "//ortools/util:int128_proto", + ], ) cc_proto_library( @@ -209,7 +248,6 @@ cc_library( ":set_cover_cc_proto", ":set_cover_model", "//ortools/base", - "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/types:span", @@ -258,6 +296,21 @@ cc_library( ], ) +cc_library( + name = "set_cover_reader", + srcs = ["set_cover_reader.cc"], + hdrs = ["set_cover_reader.h"], + deps = [ + ":set_cover_model", + "//ortools/base:file", + "//ortools/util:filelineiter", + "@com_google_absl//absl/log", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:string_view", + ], +) + cc_test( name = "set_cover_test", size = "medium", @@ -291,7 +344,6 @@ cc_library( srcs = ["dynamic_partition.cc"], hdrs = ["dynamic_partition.h"], deps = [ - "//ortools/base", "//ortools/base:murmur", "@com_google_absl//absl/log:check", "@com_google_absl//absl/strings", @@ -300,6 +352,19 @@ cc_library( ], ) +cc_test( + name = "dynamic_partition_test", + srcs = ["dynamic_partition_test.cc"], + deps = [ + ":dynamic_partition", + "//ortools/base:gmock_main", + "//ortools/base:stl_util", + "@com_google_absl//absl/memory", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:bit_gen_ref", + ], +) + cc_library( name = "sparse_permutation", srcs = ["sparse_permutation.cc"], @@ -307,6 +372,18 @@ cc_library( deps = [ "//ortools/base", "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", + ], +) + +cc_test( + name = "sparse_permutation_test", + srcs = ["sparse_permutation_test.cc"], + deps = [ + ":sparse_permutation", + "//ortools/base:gmock_main", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/random:distributions", ], ) @@ -339,7 +416,6 @@ cc_library( ":dynamic_partition", ":dynamic_permutation", ":sparse_permutation", - "//ortools/base", "//ortools/base:dump_vars", "//ortools/base:murmur", "//ortools/graph", @@ -368,13 +444,15 @@ cc_test( ":dynamic_permutation", ":find_graph_symmetries", ":sparse_permutation", - "//ortools/base", "//ortools/base:dump_vars", "//ortools/base:file", "//ortools/base:gmock_main", "//ortools/base:map_util", "//ortools/base:path", "//ortools/graph:io", + "//ortools/graph:random_graph", + "//ortools/graph:util", + "@com_google_absl//absl/numeric:bits", "@com_google_absl//absl/random", "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/status:statusor", @@ -383,3 +461,20 @@ cc_test( "@com_google_absl//absl/types:span", ], ) + +cc_library( + name = "binary_indexed_tree", + hdrs = ["binary_indexed_tree.h"], + deps = [ + "@com_google_absl//absl/log:check", + ], +) + +cc_test( + name = "binary_indexed_tree_test", + srcs = ["binary_indexed_tree_test.cc"], + deps = [ + ":binary_indexed_tree", + "//ortools/base:gmock_main", + ], +) diff --git a/ortools/algorithms/binary_search_test.cc b/ortools/algorithms/binary_search_test.cc index 6ffb2f425b..be670d2646 100644 --- a/ortools/algorithms/binary_search_test.cc +++ b/ortools/algorithms/binary_search_test.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include "absl/base/log_severity.h" #include "absl/numeric/int128.h" diff --git a/ortools/algorithms/knapsack_solver.cc b/ortools/algorithms/knapsack_solver.cc index 06f062f07b..bc1e4a5d11 100644 --- a/ortools/algorithms/knapsack_solver.cc +++ b/ortools/algorithms/knapsack_solver.cc @@ -1282,7 +1282,7 @@ int64_t KnapsackMIPSolver::Solve(TimeLimit* /*time_limit*/, // ----- KnapsackCpSat ----- class KnapsackCpSat : public BaseKnapsackSolver { public: - explicit KnapsackCpSat(const std::string& solver_name); + explicit KnapsackCpSat(absl::string_view solver_name); // Initializes the solver and enters the problem to be solved. void Init(const std::vector& profits, @@ -1305,7 +1305,7 @@ class KnapsackCpSat : public BaseKnapsackSolver { std::vector best_solution_; }; -KnapsackCpSat::KnapsackCpSat(const std::string& solver_name) +KnapsackCpSat::KnapsackCpSat(absl::string_view solver_name) : BaseKnapsackSolver(solver_name), profits_(), weights_(), diff --git a/ortools/algorithms/knapsack_solver_for_cuts.cc b/ortools/algorithms/knapsack_solver_for_cuts.cc deleted file mode 100644 index b9d3cfdd2a..0000000000 --- a/ortools/algorithms/knapsack_solver_for_cuts.cc +++ /dev/null @@ -1,473 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ortools/algorithms/knapsack_solver_for_cuts.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "absl/types/span.h" -#include "ortools/base/logging.h" - -namespace operations_research { -namespace { - -const int kNoSelection(-1); -const double kInfinity = std::numeric_limits::infinity(); - -// Comparator used to sort item in decreasing efficiency order -// (see KnapsackCapacityPropagator). -struct CompareKnapsackItemsInDecreasingEfficiencyOrder { - explicit CompareKnapsackItemsInDecreasingEfficiencyOrder(double _profit_max) - : profit_max(_profit_max) {} - bool operator()(const KnapsackItemForCutsPtr& item1, - const KnapsackItemForCutsPtr& item2) const { - return item1->GetEfficiency(profit_max) > item2->GetEfficiency(profit_max); - } - const double profit_max; -}; - -// Comparator used to sort search nodes in the priority queue in order -// to pop first the node with the highest profit upper bound -// (see KnapsackSearchNodeForCuts). When two nodes have the same upper bound, we -// prefer the one with the highest current profit. This is usually the one -// closer to a leaf. In practice, the main advantage is to have smaller path. -struct CompareKnapsackSearchNodePtrInDecreasingUpperBoundOrder { - bool operator()(const KnapsackSearchNodeForCuts* node_1, - const KnapsackSearchNodeForCuts* node_2) const { - const double profit_upper_bound_1 = node_1->profit_upper_bound(); - const double profit_upper_bound_2 = node_2->profit_upper_bound(); - if (profit_upper_bound_1 == profit_upper_bound_2) { - return node_1->current_profit() < node_2->current_profit(); - } - return profit_upper_bound_1 < profit_upper_bound_2; - } -}; - -using SearchQueue = std::priority_queue< - KnapsackSearchNodeForCuts*, std::vector, - CompareKnapsackSearchNodePtrInDecreasingUpperBoundOrder>; - -} // namespace - -// ----- KnapsackSearchNodeForCuts ----- -KnapsackSearchNodeForCuts::KnapsackSearchNodeForCuts( - const KnapsackSearchNodeForCuts* const parent, - const KnapsackAssignmentForCuts& assignment) - : depth_(parent == nullptr ? 0 : parent->depth() + 1), - parent_(parent), - assignment_(assignment), - current_profit_(0), - profit_upper_bound_(kInfinity), - next_item_id_(kNoSelection) {} - -// ----- KnapsackSearchPathForCuts ----- -KnapsackSearchPathForCuts::KnapsackSearchPathForCuts( - const KnapsackSearchNodeForCuts* from, const KnapsackSearchNodeForCuts* to) - : from_(from), via_(nullptr), to_(to) {} - -void KnapsackSearchPathForCuts::Init() { - const KnapsackSearchNodeForCuts* node_from = - MoveUpToDepth(from_, to_->depth()); - const KnapsackSearchNodeForCuts* node_to = MoveUpToDepth(to_, from_->depth()); - DCHECK_EQ(node_from->depth(), node_to->depth()); - - // Find common parent. - while (node_from != node_to) { - node_from = node_from->parent(); - node_to = node_to->parent(); - } - via_ = node_from; -} - -const KnapsackSearchNodeForCuts* MoveUpToDepth( - const KnapsackSearchNodeForCuts* node, int depth) { - while (node->depth() > depth) { - node = node->parent(); - } - return node; -} - -// ----- KnapsackStateForCuts ----- -KnapsackStateForCuts::KnapsackStateForCuts() : is_bound_(), is_in_() {} - -void KnapsackStateForCuts::Init(int number_of_items) { - is_bound_.assign(number_of_items, false); - is_in_.assign(number_of_items, false); -} - -// Returns false when the state is invalid. -bool KnapsackStateForCuts::UpdateState( - bool revert, const KnapsackAssignmentForCuts& assignment) { - if (revert) { - is_bound_[assignment.item_id] = false; - } else { - if (is_bound_[assignment.item_id] && - is_in_[assignment.item_id] != assignment.is_in) { - return false; - } - is_bound_[assignment.item_id] = true; - is_in_[assignment.item_id] = assignment.is_in; - } - return true; -} - -// ----- KnapsackPropagatorForCuts ----- -KnapsackPropagatorForCuts::KnapsackPropagatorForCuts( - const KnapsackStateForCuts* state) - : items_(), - current_profit_(0), - profit_lower_bound_(0), - profit_upper_bound_(kInfinity), - state_(state) {} - -KnapsackPropagatorForCuts::~KnapsackPropagatorForCuts() = default; - -void KnapsackPropagatorForCuts::Init(absl::Span profits, - absl::Span weights, - const double capacity) { - const int number_of_items = profits.size(); - items_.clear(); - - for (int i = 0; i < number_of_items; ++i) { - items_.emplace_back( - std::make_unique(i, weights[i], profits[i])); - } - capacity_ = capacity; - current_profit_ = 0; - profit_lower_bound_ = -kInfinity; - profit_upper_bound_ = kInfinity; - InitPropagator(); -} - -bool KnapsackPropagatorForCuts::Update( - bool revert, const KnapsackAssignmentForCuts& assignment) { - if (assignment.is_in) { - if (revert) { - current_profit_ -= items_[assignment.item_id]->profit; - consumed_capacity_ -= items()[assignment.item_id]->weight; - } else { - current_profit_ += items_[assignment.item_id]->profit; - consumed_capacity_ += items()[assignment.item_id]->weight; - if (consumed_capacity_ > capacity_) { - return false; - } - } - } - return true; -} - -void KnapsackPropagatorForCuts::CopyCurrentStateToSolution( - std::vector* solution) const { - DCHECK(solution != nullptr); - for (int i(0); i < items_.size(); ++i) { - const int item_id = items_[i]->id; - (*solution)[item_id] = state_->is_bound(item_id) && state_->is_in(item_id); - } - double remaining_capacity = capacity_ - consumed_capacity_; - for (const KnapsackItemForCutsPtr& item : sorted_items_) { - if (!state().is_bound(item->id)) { - if (remaining_capacity >= item->weight) { - remaining_capacity -= item->weight; - (*solution)[item->id] = true; - } else { - return; - } - } - } -} - -void KnapsackPropagatorForCuts::ComputeProfitBounds() { - set_profit_lower_bound(current_profit()); - break_item_id_ = kNoSelection; - - double remaining_capacity = capacity_ - consumed_capacity_; - int break_sorted_item_id = kNoSelection; - for (int sorted_id(0); sorted_id < sorted_items_.size(); ++sorted_id) { - if (!state().is_bound(sorted_items_[sorted_id]->id)) { - const KnapsackItemForCutsPtr& item = sorted_items_[sorted_id]; - break_item_id_ = item->id; - if (remaining_capacity >= item->weight) { - remaining_capacity -= item->weight; - set_profit_lower_bound(profit_lower_bound() + item->profit); - } else { - break_sorted_item_id = sorted_id; - break; - } - } - } - - set_profit_upper_bound(profit_lower_bound()); - // If break_sorted_item_id == kNoSelection, then all remaining items fit into - // the knapsack, and thus the lower bound on the profit equals the upper - // bound. Otherwise, we compute a tight upper bound by filling the remaining - // capacity of the knapsack with "fractional" items, in the decreasing order - // of their efficiency. - if (break_sorted_item_id != kNoSelection) { - const double additional_profit = - GetAdditionalProfitUpperBound(remaining_capacity, break_sorted_item_id); - set_profit_upper_bound(profit_upper_bound() + additional_profit); - } -} - -void KnapsackPropagatorForCuts::InitPropagator() { - consumed_capacity_ = 0; - break_item_id_ = kNoSelection; - sorted_items_.clear(); - sorted_items_.reserve(items().size()); - for (int i(0); i < items().size(); ++i) { - sorted_items_.emplace_back(std::make_unique( - i, items()[i]->weight, items()[i]->profit)); - } - profit_max_ = 0; - for (const KnapsackItemForCutsPtr& item : sorted_items_) { - profit_max_ = std::max(profit_max_, item->profit); - } - profit_max_ += 1.0; - CompareKnapsackItemsInDecreasingEfficiencyOrder compare_object(profit_max_); - std::sort(sorted_items_.begin(), sorted_items_.end(), compare_object); -} - -double KnapsackPropagatorForCuts::GetAdditionalProfitUpperBound( - double remaining_capacity, int break_item_id) const { - const int after_break_item_id = break_item_id + 1; - double additional_profit_when_no_break_item = 0; - if (after_break_item_id < sorted_items_.size()) { - // As items are sorted by decreasing profit / weight ratio, and the current - // weight is non-zero, the next_weight is non-zero too. - const double next_weight = sorted_items_[after_break_item_id]->weight; - const double next_profit = sorted_items_[after_break_item_id]->profit; - additional_profit_when_no_break_item = - std::max((remaining_capacity * next_profit) / next_weight, 0.0); - } - - const int before_break_item_id = break_item_id - 1; - double additional_profit_when_break_item = 0; - if (before_break_item_id >= 0) { - const double previous_weight = sorted_items_[before_break_item_id]->weight; - // Having previous_weight == 0 means the total capacity is smaller than - // the weight of the current item. In such a case the item cannot be part - // of a solution of the local one dimension problem. - if (previous_weight != 0) { - const double previous_profit = - sorted_items_[before_break_item_id]->profit; - const double overused_capacity = - sorted_items_[break_item_id]->weight - remaining_capacity; - const double lost_profit_from_previous_item = - (overused_capacity * previous_profit) / previous_weight; - additional_profit_when_break_item = std::max( - sorted_items_[break_item_id]->profit - lost_profit_from_previous_item, - 0.0); - } - } - - const double additional_profit = std::max( - additional_profit_when_no_break_item, additional_profit_when_break_item); - return additional_profit; -} - -// ----- KnapsackSolverForCuts ----- -KnapsackSolverForCuts::KnapsackSolverForCuts(std::string solver_name) - : propagator_(&state_), - best_solution_profit_(0), - solver_name_(std::move(solver_name)) {} - -void KnapsackSolverForCuts::Init(absl::Span profits, - absl::Span weights, - const double capacity) { - const int number_of_items(profits.size()); - state_.Init(number_of_items); - best_solution_.assign(number_of_items, false); - CHECK_EQ(number_of_items, weights.size()); - - propagator_.Init(profits, weights, capacity); -} - -void KnapsackSolverForCuts::GetLowerAndUpperBoundWhenItem(int item_id, - bool is_item_in, - double* lower_bound, - double* upper_bound) { - DCHECK(lower_bound != nullptr); - DCHECK(upper_bound != nullptr); - KnapsackAssignmentForCuts assignment(item_id, is_item_in); - const bool fail = !IncrementalUpdate(false, assignment); - if (fail) { - *lower_bound = 0; - *upper_bound = 0; - } else { - *lower_bound = propagator_.profit_lower_bound(); - *upper_bound = GetAggregatedProfitUpperBound(); - } - - const bool fail_revert = !IncrementalUpdate(true, assignment); - if (fail_revert) { - *lower_bound = 0; - *upper_bound = 0; - } -} - -double KnapsackSolverForCuts::Solve(TimeLimit* time_limit, - bool* is_solution_optimal) { - DCHECK(time_limit != nullptr); - DCHECK(is_solution_optimal != nullptr); - best_solution_profit_ = 0; - *is_solution_optimal = true; - - SearchQueue search_queue; - const KnapsackAssignmentForCuts assignment(kNoSelection, true); - auto root_node = - std::make_unique(nullptr, assignment); - root_node->set_current_profit(GetCurrentProfit()); - root_node->set_profit_upper_bound(GetAggregatedProfitUpperBound()); - root_node->set_next_item_id(GetNextItemId()); - search_nodes_.push_back(std::move(root_node)); - const KnapsackSearchNodeForCuts* current_node = - search_nodes_.back().get(); // Start with the root node. - - if (MakeNewNode(*current_node, false)) { - search_queue.push(search_nodes_.back().get()); - } - if (MakeNewNode(*current_node, true)) { - search_queue.push(search_nodes_.back().get()); - } - - int64_t number_of_nodes_visited = 0; - while (!search_queue.empty() && - search_queue.top()->profit_upper_bound() > best_solution_profit_) { - if (time_limit->LimitReached()) { - *is_solution_optimal = false; - break; - } - if (solution_upper_bound_threshold_ > -kInfinity && - GetAggregatedProfitUpperBound() < solution_upper_bound_threshold_) { - *is_solution_optimal = false; - break; - } - if (best_solution_profit_ > solution_lower_bound_threshold_) { - *is_solution_optimal = false; - break; - } - if (number_of_nodes_visited >= node_limit_) { - *is_solution_optimal = false; - break; - } - KnapsackSearchNodeForCuts* const node = search_queue.top(); - search_queue.pop(); - - if (node != current_node) { - KnapsackSearchPathForCuts path(current_node, node); - path.Init(); - CHECK_EQ(UpdatePropagators(path), true); - current_node = node; - } - number_of_nodes_visited++; - - if (MakeNewNode(*node, false)) { - search_queue.push(search_nodes_.back().get()); - } - if (MakeNewNode(*node, true)) { - search_queue.push(search_nodes_.back().get()); - } - } - return best_solution_profit_; -} - -// Returns false when at least one propagator fails. -bool KnapsackSolverForCuts::UpdatePropagators( - const KnapsackSearchPathForCuts& path) { - bool no_fail = true; - // Revert previous changes. - const KnapsackSearchNodeForCuts* node = &path.from(); - const KnapsackSearchNodeForCuts* const via = &path.via(); - while (node != via) { - no_fail = IncrementalUpdate(true, node->assignment()) && no_fail; - node = node->parent(); - } - // Apply current changes. - node = &path.to(); - while (node != via) { - no_fail = IncrementalUpdate(false, node->assignment()) && no_fail; - node = node->parent(); - } - return no_fail; -} - -double KnapsackSolverForCuts::GetAggregatedProfitUpperBound() { - propagator_.ComputeProfitBounds(); - const double propagator_upper_bound = propagator_.profit_upper_bound(); - return std::min(kInfinity, propagator_upper_bound); -} - -bool KnapsackSolverForCuts::MakeNewNode(const KnapsackSearchNodeForCuts& node, - bool is_in) { - if (node.next_item_id() == kNoSelection) { - return false; - } - KnapsackAssignmentForCuts assignment(node.next_item_id(), is_in); - KnapsackSearchNodeForCuts new_node(&node, assignment); - - KnapsackSearchPathForCuts path(&node, &new_node); - path.Init(); - const bool no_fail = UpdatePropagators(path); - if (no_fail) { - new_node.set_current_profit(GetCurrentProfit()); - new_node.set_profit_upper_bound(GetAggregatedProfitUpperBound()); - new_node.set_next_item_id(GetNextItemId()); - UpdateBestSolution(); - } - - // Revert to be able to create another node from parent. - KnapsackSearchPathForCuts revert_path(&new_node, &node); - revert_path.Init(); - UpdatePropagators(revert_path); - - if (!no_fail || new_node.profit_upper_bound() < best_solution_profit_) { - return false; - } - - // The node is relevant. - auto relevant_node = - std::make_unique(&node, assignment); - relevant_node->set_current_profit(new_node.current_profit()); - relevant_node->set_profit_upper_bound(new_node.profit_upper_bound()); - relevant_node->set_next_item_id(new_node.next_item_id()); - search_nodes_.push_back(std::move(relevant_node)); - - return true; -} - -bool KnapsackSolverForCuts::IncrementalUpdate( - bool revert, const KnapsackAssignmentForCuts& assignment) { - // Do not stop on a failure: To be able to be incremental on the update, - // partial solution (state) and propagators must all be in the same state. - bool no_fail = state_.UpdateState(revert, assignment); - no_fail = propagator_.Update(revert, assignment) && no_fail; - return no_fail; -} - -void KnapsackSolverForCuts::UpdateBestSolution() { - const double profit_lower_bound = propagator_.profit_lower_bound(); - - if (best_solution_profit_ < profit_lower_bound) { - best_solution_profit_ = profit_lower_bound; - propagator_.CopyCurrentStateToSolution(&best_solution_); - } -} - -} // namespace operations_research diff --git a/ortools/algorithms/knapsack_solver_for_cuts.h b/ortools/algorithms/knapsack_solver_for_cuts.h deleted file mode 100644 index 358f3725ae..0000000000 --- a/ortools/algorithms/knapsack_solver_for_cuts.h +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This library solves 0-1 one-dimensional knapsack problems with fractional -// profits and weights using the branch and bound algorithm. Note that -// algorithms/knapsack_solver uses 'int64_t' for the profits and the weights. -// TODO(user): Merge this code with algorithms/knapsack_solver. -// -// Given n items, each with a profit and a weight and a knapsack of -// capacity c, the goal is to find a subset of the items which fits inside c -// and maximizes the total profit. -// Without loss of generality, profits and weights are assumed to be positive. -// -// From a mathematical point of view, the one-dimensional knapsack problem -// can be modeled by linear constraint: -// Sum(i:1..n)(weight_i * item_i) <= c, -// where item_i is a 0-1 integer variable. -// The goal is to maximize: Sum(i:1..n)(profit_i * item_i). -// -// Example Usage: -// std::vector profits = {0, 0.5, 0.4, 1, 1, 1.1}; -// std::vector weights = {9, 6, 2, 1.5, 1.5, 1.5}; -// KnapsackSolverForCuts solver("solver"); -// solver.Init(profits, weights, capacity); -// bool is_solution_optimal = false; -// std::unique_ptr time_limit = -// std::make_unique(time_limit_seconds); // Set the time limit. -// const double profit = solver.Solve(time_limit.get(), &is_solution_optimal); -// const int number_of_items(profits.size()); -// for (int item_id(0); item_id < number_of_items; ++item_id) { -// solver.best_solution(item_id); // Access the solution. -// } - -#ifndef OR_TOOLS_ALGORITHMS_KNAPSACK_SOLVER_FOR_CUTS_H_ -#define OR_TOOLS_ALGORITHMS_KNAPSACK_SOLVER_FOR_CUTS_H_ - -#include -#include -#include -#include -#include - -#include "absl/memory/memory.h" -#include "absl/types/span.h" -#include "ortools/base/int_type.h" -#include "ortools/base/logging.h" -#include "ortools/util/time_limit.h" - -namespace operations_research { - -// ----- KnapsackAssignmentForCuts ----- -// KnapsackAssignmentForCuts is a small struct used to pair an item with -// its assignment. It is mainly used for search nodes and updates. -struct KnapsackAssignmentForCuts { - KnapsackAssignmentForCuts(int item_id, bool is_in) - : item_id(item_id), is_in(is_in) {} - - int item_id; - bool is_in; -}; - -// ----- KnapsackItemForCuts ----- -// KnapsackItemForCuts is a small struct to pair an item weight with its -// corresponding profit. -// The aim of the knapsack problem is to pack as many valuable items as -// possible. A straight forward heuristic is to take those with the greatest -// profit-per-unit-weight. This ratio is called efficiency in this -// implementation. So items will be grouped in vectors, and sorted by -// decreasing efficiency. -struct KnapsackItemForCuts { - KnapsackItemForCuts(int id, double weight, double profit) - : id(id), weight(weight), profit(profit) {} - - double GetEfficiency(double profit_max) const { - return (weight > 0) ? profit / weight : profit_max; - } - - // The 'id' field is used to retrieve the initial item in order to - // communicate with other propagators and state. - const int id; - const double weight; - const double profit; -}; -using KnapsackItemForCutsPtr = std::unique_ptr; - -// ----- KnapsackSearchNodeForCuts ----- -// KnapsackSearchNodeForCuts is a class used to describe a decision in the -// decision search tree. -// The node is defined by a pointer to the parent search node and an -// assignment (see KnapsackAssignmentForCuts). -// As the current state is not explicitly stored in a search node, one should -// go through the search tree to incrementally build a partial solution from -// a previous search node. -class KnapsackSearchNodeForCuts { - public: - KnapsackSearchNodeForCuts(const KnapsackSearchNodeForCuts* parent, - const KnapsackAssignmentForCuts& assignment); - - KnapsackSearchNodeForCuts(const KnapsackSearchNodeForCuts&) = delete; - KnapsackSearchNodeForCuts& operator=(const KnapsackSearchNodeForCuts&) = - delete; - - int depth() const { return depth_; } - const KnapsackSearchNodeForCuts* parent() const { return parent_; } - const KnapsackAssignmentForCuts& assignment() const { return assignment_; } - - double current_profit() const { return current_profit_; } - void set_current_profit(double profit) { current_profit_ = profit; } - - double profit_upper_bound() const { return profit_upper_bound_; } - void set_profit_upper_bound(double profit) { profit_upper_bound_ = profit; } - - int next_item_id() const { return next_item_id_; } - void set_next_item_id(int id) { next_item_id_ = id; } - - private: - // 'depth_' is used to navigate efficiently through the search tree. - int depth_; - const KnapsackSearchNodeForCuts* const parent_; - KnapsackAssignmentForCuts assignment_; - - // 'current_profit_' and 'profit_upper_bound_' fields are used to sort search - // nodes using a priority queue. That allows to pop the node with the best - // upper bound, and more importantly to stop the search when optimality is - // proved. - double current_profit_; - double profit_upper_bound_; - - // 'next_item_id_' field allows to avoid an O(number_of_items) scan to find - // next item to select. This is done for free by the upper bound computation. - int next_item_id_; -}; - -// ----- KnapsackSearchPathForCuts ----- -// KnapsackSearchPathForCuts is a small class used to represent the path between -// a node to another node in the search tree. -// As the solution state is not stored for each search node, the state should -// be rebuilt at each node. One simple solution is to apply all decisions -// between the node 'to' and the root. This can be computed in -// O(number_of_items). -// -// However, it is possible to achieve better average complexity. Two -// consecutively explored nodes are usually close enough (i.e., much less than -// number_of_items) to benefit from an incremental update from the node -// 'from' to the node 'to'. -// -// The 'via' field is the common parent of 'from' field and 'to' field. -// So the state can be built by reverting all decisions from 'from' to 'via' -// and then applying all decisions from 'via' to 'to'. -class KnapsackSearchPathForCuts { - public: - KnapsackSearchPathForCuts(const KnapsackSearchNodeForCuts* from, - const KnapsackSearchNodeForCuts* to); - - KnapsackSearchPathForCuts(const KnapsackSearchPathForCuts&) = delete; - KnapsackSearchPathForCuts& operator=(const KnapsackSearchPathForCuts&) = - delete; - - void Init(); - const KnapsackSearchNodeForCuts& from() const { return *from_; } - const KnapsackSearchNodeForCuts& via() const { return *via_; } - const KnapsackSearchNodeForCuts& to() const { return *to_; } - - private: - const KnapsackSearchNodeForCuts* from_; - const KnapsackSearchNodeForCuts* via_; // Computed in 'Init'. - const KnapsackSearchNodeForCuts* to_; -}; - -// From the given node, this method moves up the tree and returns the node at -// given depth. -const KnapsackSearchNodeForCuts* MoveUpToDepth( - const KnapsackSearchNodeForCuts* node, int depth); - -// ----- KnapsackStateForCuts ----- -// KnapsackStateForCuts represents a partial solution to the knapsack problem. -class KnapsackStateForCuts { - public: - KnapsackStateForCuts(); - - KnapsackStateForCuts(const KnapsackStateForCuts&) = delete; - KnapsackStateForCuts& operator=(const KnapsackStateForCuts&) = delete; - - // Initializes vectors with number_of_items set to false (i.e. not bound yet). - void Init(int number_of_items); - - // Updates the state by applying or reverting a decision. - // Returns false if fails, i.e. trying to apply an inconsistent decision - // to an already assigned item. - bool UpdateState(bool revert, const KnapsackAssignmentForCuts& assignment); - - int GetNumberOfItems() const { return is_bound_.size(); } - bool is_bound(int id) const { return is_bound_.at(id); } - bool is_in(int id) const { return is_in_.at(id); } - - private: - // Vectors 'is_bound_' and 'is_in_' contain a boolean value for each item. - // 'is_bound_(item_i)' is false when there is no decision for item_i yet. - // When item_i is bound, 'is_in_(item_i)' represents the presence (true) or - // the absence (false) of item_i in the current solution. - std::vector is_bound_; - std::vector is_in_; -}; - -// ----- KnapsackPropagatorForCuts ----- -// KnapsackPropagatorForCuts is used to enforce a capacity constraint. -// It is supposed to compute profit lower and upper bounds, and get the next -// item to select, it can be seen as a 0-1 Knapsack solver. The most efficient -// way to compute the upper bound is to iterate on items in -// profit-per-unit-weight decreasing order. The break item is commonly defined -// as the first item for which there is not enough remaining capacity. Selecting -// this break item as the next-item-to-assign usually gives the best results -// (see Greenberg & Hegerich). -// -// This is exactly what is implemented in this class. -// -// It is possible to compute a better profit lower bound almost for free. During -// the scan to find the break element all unbound items are added just as if -// they were part of the current solution. This is used in both -// ComputeProfitBounds() and CopyCurrentSolution(). For incrementality reasons, -// the ith item should be accessible in O(1). That's the reason why the item -// vector has to be duplicated 'sorted_items_'. -class KnapsackPropagatorForCuts { - public: - explicit KnapsackPropagatorForCuts(const KnapsackStateForCuts* state); - ~KnapsackPropagatorForCuts(); - - KnapsackPropagatorForCuts(const KnapsackPropagatorForCuts&) = delete; - KnapsackPropagatorForCuts& operator=(const KnapsackPropagatorForCuts&) = - delete; - - // Initializes the data structure and then calls InitPropagator. - void Init(absl::Span profits, absl::Span weights, - double capacity); - - // Updates data structure. Returns false on failure. - bool Update(bool revert, const KnapsackAssignmentForCuts& assignment); - // ComputeProfitBounds should set 'profit_lower_bound_' and - // 'profit_upper_bound_' which are constraint specific. - void ComputeProfitBounds(); - // Returns the id of next item to assign. - // Returns kNoSelection when all items are bound. - int GetNextItemId() const { return break_item_id_; } - - double current_profit() const { return current_profit_; } - double profit_lower_bound() const { return profit_lower_bound_; } - double profit_upper_bound() const { return profit_upper_bound_; } - - // Copies the current state into 'solution'. - // All unbound items are set to false (i.e. not in the knapsack). - void CopyCurrentStateToSolution(std::vector* solution) const; - - // Initializes the propagator. This method is called by Init() after filling - // the fields defined in this class. - void InitPropagator(); - - const KnapsackStateForCuts& state() const { return *state_; } - const std::vector& items() const { return items_; } - - void set_profit_lower_bound(double profit) { profit_lower_bound_ = profit; } - void set_profit_upper_bound(double profit) { profit_upper_bound_ = profit; } - - private: - // An obvious additional profit upper bound corresponds to the linear - // relaxation: remaining_capacity * efficiency of the break item. - // It is possible to do better in O(1), using Martello-Toth bound U2. - // The main idea is to enforce integrality constraint on the break item, - // i.e. either the break item is part of the solution, or it is not. - // So basically the linear relaxation is done on the item before the break - // item, or the one after the break item. This is what GetAdditionalProfit - // method implements. - double GetAdditionalProfitUpperBound(double remaining_capacity, - int break_item_id) const; - - double capacity_; - double consumed_capacity_; - int break_item_id_; - std::vector sorted_items_; - double profit_max_; - std::vector items_; - double current_profit_; - double profit_lower_bound_; - double profit_upper_bound_; - const KnapsackStateForCuts* const state_; -}; - -// ----- KnapsackSolverForCuts ----- -// KnapsackSolverForCuts is the one-dimensional knapsack solver class. -// In the current implementation, the next item to assign is given by the -// primary propagator. Using SetPrimaryPropagator allows changing the default -// (propagator of the first dimension). -class KnapsackSolverForCuts { - public: - explicit KnapsackSolverForCuts(std::string solver_name); - - KnapsackSolverForCuts(const KnapsackSolverForCuts&) = delete; - KnapsackSolverForCuts& operator=(const KnapsackSolverForCuts&) = delete; - - // Initializes the solver and enters the problem to be solved. - void Init(absl::Span profits, absl::Span weights, - double capacity); - int GetNumberOfItems() const { return state_.GetNumberOfItems(); } - - // Gets the lower and the upper bound when the item is in or out of the - // knapsack. To ensure objects are correctly initialized, this method should - // not be called before Init(). - void GetLowerAndUpperBoundWhenItem(int item_id, bool is_item_in, - double* lower_bound, double* upper_bound); - - // Get the best upper bound found so far. - double GetUpperBound() { return GetAggregatedProfitUpperBound(); } - - // The solver stops if a solution with profit better than - // 'solution_lower_bound_threshold' is found. - void set_solution_lower_bound_threshold( - const double solution_lower_bound_threshold) { - solution_lower_bound_threshold_ = solution_lower_bound_threshold; - } - - // The solver stops if the upper bound on profit drops below - // 'solution_upper_bound_threshold'. - void set_solution_upper_bound_threshold( - const double solution_upper_bound_threshold) { - solution_upper_bound_threshold_ = solution_upper_bound_threshold; - } - - // Stops the knapsack solver after processing 'node_limit' nodes. - void set_node_limit(const int64_t node_limit) { node_limit_ = node_limit; } - - // Solves the problem and returns the profit of the best solution found. - double Solve(TimeLimit* time_limit, bool* is_solution_optimal); - // Returns true if the item 'item_id' is packed in the optimal knapsack. - bool best_solution(int item_id) const { - DCHECK(item_id < best_solution_.size()); - return best_solution_[item_id]; - } - - const std::string& GetName() const { return solver_name_; } - - private: - // Updates propagator reverting/applying all decision on the path. Returns - // true if the propagation fails. Note that even if it fails, propagator - // should be updated to be in a stable state in order to stay incremental. - bool UpdatePropagators(const KnapsackSearchPathForCuts& path); - // Updates propagator reverting/applying one decision. Returns true if - // the propagation fails. Note that even if it fails, propagator should - // be updated to be in a stable state in order to stay incremental. - bool IncrementalUpdate(bool revert, - const KnapsackAssignmentForCuts& assignment); - // Updates the best solution if the current solution has a better profit. - void UpdateBestSolution(); - - // Returns true if new relevant search node was added to the nodes array. That - // means this node should be added to the search queue too. - bool MakeNewNode(const KnapsackSearchNodeForCuts& node, bool is_in); - - // Gets the aggregated (min) profit upper bound among all propagators. - double GetAggregatedProfitUpperBound(); - double GetCurrentProfit() const { return propagator_.current_profit(); } - int GetNextItemId() const { return propagator_.GetNextItemId(); } - - KnapsackPropagatorForCuts propagator_; - std::vector> search_nodes_; - KnapsackStateForCuts state_; - double best_solution_profit_; - std::vector best_solution_; - const std::string solver_name_; - double solution_lower_bound_threshold_ = - std::numeric_limits::infinity(); - double solution_upper_bound_threshold_ = - -std::numeric_limits::infinity(); - int64_t node_limit_ = std::numeric_limits::max(); -}; -// TODO(user) : Add reduction algorithm. - -} // namespace operations_research - -#endif // OR_TOOLS_ALGORITHMS_KNAPSACK_SOLVER_FOR_CUTS_H_ diff --git a/ortools/algorithms/knapsack_solver_for_cuts_test.cc b/ortools/algorithms/knapsack_solver_for_cuts_test.cc deleted file mode 100644 index ecf23f57d2..0000000000 --- a/ortools/algorithms/knapsack_solver_for_cuts_test.cc +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ortools/algorithms/knapsack_solver_for_cuts.h" - -#include -#include -#include - -#include "gtest/gtest.h" - -namespace operations_research { -namespace { - -const int kInvalidSolution = -1; -bool IsSolutionValid(const std::vector& profits, - const std::vector& weights, const double capacity, - const std::vector& best_solution, - double optimal_profit) { - double remaining_capacity = capacity; - double profit = 0; - const int number_of_items(profits.size()); - for (int item_id(0); item_id < number_of_items; ++item_id) { - if (best_solution.at(item_id)) { - profit += profits[item_id]; - remaining_capacity -= weights[item_id]; - } - } - - if (remaining_capacity < 0) { - return false; - } - return profit == optimal_profit; -} - -double SolveKnapsackProblem(KnapsackSolverForCuts* solver) { - bool is_solution_optimal = false; - auto time_limit = - std::make_unique(std::numeric_limits::infinity()); - return solver->Solve(time_limit.get(), &is_solution_optimal); -} - -TEST(KnapsackSearchNodeForCutsTest, Depth) { - KnapsackAssignmentForCuts assignment(0, false); - KnapsackSearchNodeForCuts root(nullptr, assignment); - EXPECT_EQ(0, root.depth()); - - KnapsackSearchNodeForCuts node_0(&root, assignment); - EXPECT_EQ(1, node_0.depth()); - - KnapsackSearchNodeForCuts node_00(&node_0, assignment); - EXPECT_EQ(2, node_00.depth()); -} - -TEST(KnapsackSearchPathTest, MoveUpToDepth) { - KnapsackAssignmentForCuts assignment(0, false); - KnapsackSearchNodeForCuts root(nullptr, assignment); - KnapsackSearchNodeForCuts node_0(&root, assignment); - KnapsackSearchPathForCuts from_root_to_0(&root, &node_0); - const KnapsackSearchNodeForCuts* root_ptr = MoveUpToDepth(&node_0, 0); - EXPECT_EQ(&root, root_ptr); -} - -TEST(KnapsackSearchPathTest, InitAndMoveUpToDepth) { - KnapsackAssignmentForCuts assignment(0, false); - KnapsackSearchNodeForCuts root(nullptr, assignment); - KnapsackSearchNodeForCuts node_0(&root, assignment); - KnapsackSearchNodeForCuts node_00(&node_0, assignment); - KnapsackSearchNodeForCuts node_01(&node_0, assignment); - KnapsackSearchNodeForCuts node_001(&node_00, assignment); - KnapsackSearchNodeForCuts node_010(&node_01, assignment); - KnapsackSearchNodeForCuts node_0101(&node_010, assignment); - KnapsackSearchNodeForCuts node_01011(&node_0101, assignment); - - KnapsackSearchPathForCuts from_01011_to_001(&node_01011, &node_001); - const KnapsackSearchNodeForCuts* node_01_ptr = MoveUpToDepth(&node_01011, 2); - EXPECT_EQ(&node_01, node_01_ptr); - - from_01011_to_001.Init(); - EXPECT_EQ(&node_0, &from_01011_to_001.via()); - - KnapsackSearchPathForCuts from_001_to_01011(&node_001, &node_01011); - from_001_to_01011.Init(); - EXPECT_EQ(&from_01011_to_001.via(), &from_001_to_01011.via()); -} - -TEST(KnapsackItemForCutsTest, GetEfficiency) { - const int kId(7); - const double kWeight = 52; - const double kProfit = 130; - const double kEfficiency = 2.5; - const double kProfitMax = 1000; - const double kNullWeight = 0; - - const KnapsackItemForCuts item(kId, kWeight, kProfit); - EXPECT_EQ(kId, item.id); - EXPECT_EQ(kWeight, item.weight); - EXPECT_EQ(kProfit, item.profit); - EXPECT_EQ(kEfficiency, item.GetEfficiency(kProfitMax)); - - const KnapsackItemForCuts item2(kId, kNullWeight, kProfit); - EXPECT_EQ(kProfitMax, item2.GetEfficiency(kProfitMax)); -} - -TEST(KnapsackStateForCutsTest, Init) { - const int kNumberOfItems(12); - KnapsackStateForCuts state; - state.Init(kNumberOfItems); - for (int i(0); i < kNumberOfItems; ++i) { - EXPECT_FALSE(state.is_bound(i)); - } - EXPECT_EQ(kNumberOfItems, state.GetNumberOfItems()); -} - -TEST(KnapsackStateForCutsTest, UpdateState) { - const int kNumberOfItems(12); - KnapsackStateForCuts state; - state.Init(kNumberOfItems); - - const int item_id(7); - bool is_in = true; - KnapsackAssignmentForCuts assignment1(item_id, is_in); - bool no_fail = state.UpdateState(false, assignment1); - for (int i(0); i < kNumberOfItems; ++i) { - EXPECT_EQ(i == item_id, state.is_bound(i)); - } - EXPECT_EQ(is_in, state.is_in(item_id)); - EXPECT_TRUE(no_fail); - - is_in = false; - KnapsackAssignmentForCuts assignment2(item_id, is_in); - no_fail = state.UpdateState(false, assignment2); - EXPECT_TRUE(state.is_bound(item_id)); - EXPECT_FALSE(no_fail); - - no_fail = state.UpdateState(true, assignment2); - EXPECT_FALSE(state.is_bound(item_id)); - EXPECT_TRUE(no_fail); -} - -TEST(KnapsackPropagatorForCutsTest, InitAndUpdatePropagator) { - const std::vector profits = {1, 2, 3, 4, 5, 6, 7, 8, 9}; - const std::vector weights = {1, 1, 1, 1, 1, 1, 1, 1, 1}; - ASSERT_EQ(profits.size(), weights.size()); - const int kNumItems(profits.size()); - const int kNoSelection(-1); - - KnapsackStateForCuts state; - state.Init(kNumItems); - - KnapsackPropagatorForCuts capacity_propagator(&state); - capacity_propagator.Init(profits, weights, 2); - EXPECT_EQ(kNoSelection, capacity_propagator.GetNextItemId()); - - KnapsackAssignmentForCuts assignment1(3, true); - EXPECT_TRUE(state.UpdateState(false, assignment1)); - EXPECT_TRUE(capacity_propagator.Update(false, assignment1)); - EXPECT_EQ(4, capacity_propagator.current_profit()); - capacity_propagator.ComputeProfitBounds(); - EXPECT_EQ(7, capacity_propagator.GetNextItemId()); - const double kProfit13 = profits[3] + profits[8]; - EXPECT_EQ(kProfit13, capacity_propagator.profit_lower_bound()); - EXPECT_EQ(kProfit13, capacity_propagator.profit_upper_bound()); - - KnapsackAssignmentForCuts assignment2(8, true); - EXPECT_TRUE(state.UpdateState(false, assignment2)); - EXPECT_TRUE(capacity_propagator.Update(false, assignment2)); - EXPECT_EQ(kProfit13, capacity_propagator.current_profit()); - capacity_propagator.ComputeProfitBounds(); - EXPECT_EQ(7, capacity_propagator.GetNextItemId()); - EXPECT_EQ(kProfit13, capacity_propagator.profit_lower_bound()); - EXPECT_EQ(kProfit13, capacity_propagator.profit_upper_bound()); - - KnapsackAssignmentForCuts assignment3(5, true); - EXPECT_TRUE(state.UpdateState(false, assignment3)); - EXPECT_FALSE(capacity_propagator.Update(false, assignment3)); - const double kProfit19 = profits[3] + profits[8] + profits[5]; - EXPECT_EQ(kProfit19, capacity_propagator.current_profit()); - - EXPECT_TRUE(state.UpdateState(true, assignment2)); - EXPECT_TRUE(capacity_propagator.Update(true, assignment2)); - const double kProfit10 = profits[3] + profits[5]; - EXPECT_EQ(kProfit10, capacity_propagator.current_profit()); - capacity_propagator.ComputeProfitBounds(); - EXPECT_EQ(8, capacity_propagator.GetNextItemId()); - EXPECT_EQ(kProfit10, capacity_propagator.profit_lower_bound()); - EXPECT_EQ(kProfit10, capacity_propagator.profit_upper_bound()); -} - -TEST(KnapsackSolverForCutsTest, SolveOneDimension) { - const std::vector profits = {1, 2, 3, 4, 5, 6, 7, 8, 9}; - const std::vector weights = {1, 2, 3, 4, 5, 6, 7, 8, 9}; - ASSERT_EQ(profits.size(), weights.size()); - const double kCapacity = 34; - const double kOptimalProfit = 34; - KnapsackSolverForCuts solver("solver"); - solver.Init(profits, weights, kCapacity); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_EQ(kOptimalProfit, profit); -} - -TEST(KnapsackSolverForCutsTest, SolveOneDimensionInfeasible) { - const std::vector profits = {1, 2, 3, 4, 5, 6, 7, 8, 9}; - const std::vector weights = {1, 2, 3, 4, 5, 6, 7, 8, 9}; - ASSERT_EQ(profits.size(), weights.size()); - const double kCapacity = -1; - KnapsackSolverForCuts solver("solver"); - solver.Init(profits, weights, kCapacity); - const double profit = SolveKnapsackProblem(&solver); - const int number_of_items(profits.size()); - std::vector best_solution(number_of_items, false); - for (int item_id(0); item_id < number_of_items; ++item_id) { - best_solution.at(item_id) = solver.best_solution(item_id); - } - EXPECT_FALSE( - IsSolutionValid(profits, weights, kCapacity, best_solution, profit)); -} - -TEST(KnapsackSolverForCutsTest, MultipleSolves) { - KnapsackSolverForCuts solver("solver"); - { - const std::vector profits = {1, 2, 3}; - const std::vector weights = {4, 5, 6}; - ASSERT_EQ(profits.size(), weights.size()); - const double kCapacity = 10; - const double kOptimalProfit = 4; - solver.Init(profits, weights, kCapacity); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_EQ(kOptimalProfit, profit); - } - { - const std::vector profits = {1, 2, 3, 7}; - const std::vector weights = {4, 5, 6, 8}; - ASSERT_EQ(profits.size(), weights.size()); - const double kCapacity = 10; - const double kOptimalProfit = 7; - solver.Init(profits, weights, kCapacity); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_EQ(kOptimalProfit, profit); - } - { - const std::vector profits = {1, 2}; - const std::vector weights = {4, 5}; - ASSERT_EQ(profits.size(), weights.size()); - const double kCapacity = 10; - const double kOptimalProfit = 3; - solver.Init(profits, weights, kCapacity); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_EQ(kOptimalProfit, profit); - } -} - -TEST(KnapsackSolverForCutsTest, SolveBigOneDimension) { - const std::vector profits = { - 360, 83, 59, 130, 431, 67, 230, 52, 93, 125, 670, 892, 600, - 38, 48, 147, 78, 256, 63, 17, 120, 164, 432, 35, 92, 110, - 22, 42, 50, 323, 514, 28, 87, 73, 78, 15, 26, 78, 210, - 36, 85, 189, 274, 43, 33, 10, 19, 389, 276, 312}; - const std::vector weights = { - 7, 0, 30, 22, 80, 94, 11, 81, 70, 64, 59, 18, 0, 36, 3, 8, 15, - 42, 9, 0, 42, 47, 52, 32, 26, 48, 55, 6, 29, 84, 2, 4, 18, 56, - 7, 29, 93, 44, 71, 3, 86, 66, 31, 65, 0, 79, 20, 65, 52, 13}; - ASSERT_EQ(profits.size(), weights.size()); - const double kCapacity = 850; - const double kOptimalProfit = 7534; - KnapsackSolverForCuts solver("solver"); - { - solver.Init(profits, weights, kCapacity); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_EQ(kOptimalProfit, profit); - } - { - // Solve with lower bound threshold. - solver.Init(profits, weights, kCapacity); - solver.set_solution_lower_bound_threshold(100); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_GT(kOptimalProfit, profit); - } - { - // Solve with upper bound threshold. - solver.Init(profits, weights, kCapacity); - solver.set_solution_upper_bound_threshold(10000); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_GT(kOptimalProfit, profit); - } - { - solver.Init(profits, weights, kCapacity); - solver.set_node_limit(1); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_GT(kOptimalProfit, profit); - } -} - -TEST(KnapsackSolverForCutsTest, SolveOneDimensionFractionalProfits) { - const std::vector profits = {0, 0.5, 0.4, 1, 1, 1.1}; - const std::vector weights = {9, 6, 2, 1, 1, 1}; - ASSERT_EQ(profits.size(), weights.size()); - const double kCapacity = 4; - const double kOptimalProfit = 3.1; - KnapsackSolverForCuts solver("solver"); - solver.Init(profits, weights, kCapacity); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_EQ(kOptimalProfit, profit); -} - -TEST(KnapsackSolverForCutsTest, SolveOneDimensionFractionalWeights) { - const std::vector profits = {0, 1, 1, 1, 1, 2}; - const std::vector weights = {9, 6, 2, 1.5, 1.5, 1.5}; - ASSERT_EQ(profits.size(), weights.size()); - const double kCapacity = 4; - const double kOptimalProfit = 3; - KnapsackSolverForCuts solver("solver"); - solver.Init(profits, weights, kCapacity); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_EQ(kOptimalProfit, profit); -} - -TEST(KnapsackSolverForCutsTest, SolveOneDimensionFractional) { - const std::vector profits = {0, 0.5, 0.4, 1, 1, 1.1}; - const std::vector weights = {9, 6, 2, 1.5, 1.5, 1.5}; - ASSERT_EQ(profits.size(), weights.size()); - const double kCapacity = 4; - const double kOptimalProfit = 2.1; - KnapsackSolverForCuts solver("solver"); - solver.Init(profits, weights, kCapacity); - const double profit = SolveKnapsackProblem(&solver); - EXPECT_EQ(kOptimalProfit, profit); -} - -} // namespace -} // namespace operations_research diff --git a/ortools/algorithms/python/knapsack_solver_test.py b/ortools/algorithms/python/knapsack_solver_test.py index 39f153ec56..8809980ca3 100755 --- a/ortools/algorithms/python/knapsack_solver_test.py +++ b/ortools/algorithms/python/knapsack_solver_test.py @@ -22,6 +22,7 @@ from ortools.algorithms.python import knapsack_solver class PyWrapAlgorithmsKnapsackSolverTest(absltest.TestCase): + def RealSolve(self, profits, weights, capacities, solver_type, use_reduction): solver = knapsack_solver.KnapsackSolver(solver_type, "solver") solver.set_use_reduction(use_reduction) diff --git a/ortools/algorithms/set_cover_mip.cc b/ortools/algorithms/set_cover_mip.cc index f54a6f0c2c..371260b27e 100644 --- a/ortools/algorithms/set_cover_mip.cc +++ b/ortools/algorithms/set_cover_mip.cc @@ -14,6 +14,7 @@ #include "ortools/algorithms/set_cover_mip.h" #include +#include #include "absl/types/span.h" #include "ortools/algorithms/set_cover_invariant.h" From bd97b225bd9498a2944ed8de8ed09671543eaab9 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 11:21:53 +0100 Subject: [PATCH 015/392] graph: backport from main --- ortools/graph/BUILD.bazel | 133 +++--- ortools/graph/CMakeLists.txt | 1 + ortools/graph/README.md | 10 +- ortools/graph/bidirectional_dijkstra_test.cc | 6 +- ortools/graph/k_shortest_paths.h | 444 +++++++++++++++++++ ortools/graph/k_shortest_paths_test.cc | 171 +++++++ ortools/graph/linear_assignment_test.cc | 3 +- ortools/graph/max_flow_test.cc | 4 +- ortools/graph/minimum_spanning_tree_test.cc | 4 +- ortools/graph/one_tree_lower_bound.h | 6 +- ortools/graph/one_tree_lower_bound_test.cc | 2 +- ortools/graph/perfect_matching_test.cc | 2 +- ortools/graph/random_graph.cc | 173 ++++++++ ortools/graph/random_graph.h | 51 +++ ortools/graph/shortest_paths.cc | 11 +- ortools/graph/testdata/BUILD.bazel | 2 +- ortools/graph/util.h | 4 +- 17 files changed, 941 insertions(+), 86 deletions(-) create mode 100644 ortools/graph/k_shortest_paths.h create mode 100644 ortools/graph/k_shortest_paths_test.cc create mode 100644 ortools/graph/random_graph.cc create mode 100644 ortools/graph/random_graph.h diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index e1427d5e58..67d996bbc4 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -11,7 +11,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@rules_cc//cc:defs.bzl", "cc_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") package(default_visibility = ["//visibility:public"]) @@ -65,10 +66,12 @@ cc_library( hdrs = ["bounded_dijkstra.h"], deps = [ ":graph", - "//ortools/base", "//ortools/base:iterator_adaptors", "//ortools/base:threadpool", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", ], ) @@ -76,8 +79,9 @@ cc_library( name = "multi_dijkstra", hdrs = ["multi_dijkstra.h"], deps = [ - "//ortools/base", - "@com_google_absl//absl/container:flat_hash_set", + "//ortools/base:map_util", + "//ortools/base:types", + "@com_google_absl//absl/container:flat_hash_map", ], ) @@ -86,6 +90,7 @@ cc_library( hdrs = ["bidirectional_dijkstra.h"], deps = [ "//ortools/base", + "//ortools/base:iterator_adaptors", "//ortools/base:threadpool", "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/strings", @@ -99,7 +104,7 @@ cc_library( hdrs = ["cliques.h"], deps = [ "//ortools/base", - "//ortools/base:intops", + "//ortools/base:int_type", "//ortools/base:strong_vector", "//ortools/util:time_limit", "@com_google_absl//absl/container:flat_hash_set", @@ -112,6 +117,7 @@ cc_library( hdrs = ["hamiltonian_path.h"], deps = [ "//ortools/base", + "//ortools/base:types", "//ortools/util:bitset", "//ortools/util:saturated_arithmetic", "//ortools/util:vector_or_function", @@ -123,11 +129,16 @@ cc_library( hdrs = ["christofides.h"], deps = [ ":eulerian_path", + ":graph", ":minimum_spanning_tree", ":perfect_matching", "//ortools/base", + "//ortools/base:types", "//ortools/linear_solver", "//ortools/linear_solver:linear_solver_cc_proto", + "//ortools/util:saturated_arithmetic", + "@com_google_absl//absl/status", + "@com_google_absl//absl/status:statusor", ], ) @@ -144,10 +155,10 @@ cc_library( hdrs = ["minimum_spanning_tree.h"], deps = [ ":connected_components", - ":graph", - "//ortools/base", "//ortools/base:adjustable_priority_queue", + "//ortools/base:types", "//ortools/util:vector_or_function", + "@com_google_absl//absl/types:span", ], ) @@ -157,8 +168,8 @@ cc_library( deps = [ ":christofides", ":minimum_spanning_tree", - "//ortools/base", - "@com_google_absl//absl/strings", + "//ortools/base:types", + "@com_google_absl//absl/types:span", ], ) @@ -167,8 +178,10 @@ cc_library( hdrs = ["ebert_graph.h"], deps = [ "//ortools/base", + "//ortools/base:types", "//ortools/util:permutation", "//ortools/util:zvector", + "@com_google_absl//absl/strings", ], ) @@ -181,12 +194,29 @@ cc_library( ":graph", "//ortools/base", "//ortools/base:adjustable_priority_queue", - "//ortools/base:file", "//ortools/base:map_util", "//ortools/base:stl_util", "//ortools/base:threadpool", "//ortools/base:timer", + "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/functional:bind_front", + "@com_google_absl//absl/log", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", + ], +) + +cc_library( + name = "k_shortest_paths", + hdrs = ["k_shortest_paths.h"], + deps = [ + ":bounded_dijkstra", + ":ebert_graph", + ":shortest_paths", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", ], ) @@ -212,9 +242,12 @@ cc_library( ":graph", ":graphs", "//ortools/base", + "//ortools/base:types", "//ortools/util:stats", "//ortools/util:zvector", "@com_google_absl//absl/memory", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", ], ) @@ -228,17 +261,15 @@ cc_test( ":graphs", ":max_flow", "//ortools/base", - "//ortools/base:gmock", - "//ortools/base:message_matchers", + "//ortools/base:gmock_main", "//ortools/base:path", - "//ortools/base:status_matchers", "//ortools/linear_solver", "//ortools/util:file_util", "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/random", "@com_google_absl//absl/strings:str_format", "@com_google_benchmark//:benchmark", - "@com_google_googletest//:gtest_main", + "@com_google_protobuf//:protobuf", ], ) @@ -254,7 +285,6 @@ cc_library( "//conditions:default": [], }), deps = [ - ":connected_components", ":ebert_graph", ":graph", ":graphs", @@ -262,9 +292,13 @@ cc_library( "//ortools/base", "//ortools/base:dump_vars", "//ortools/base:mathutil", + "//ortools/base:types", "//ortools/util:saturated_arithmetic", "//ortools/util:stats", "//ortools/util:zvector", + "@com_google_absl//absl/flags:flag", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", ], ) @@ -279,7 +313,8 @@ cc_binary( ":min_cost_flow", "//ortools/base", "//ortools/base:file", - "//ortools/base:filesystem", + "//ortools/base:status_macros", + "//ortools/base:timer", "//ortools/util:filelineiter", "//ortools/util:stats", "@com_google_absl//absl/flags:flag", @@ -298,7 +333,7 @@ cc_library( deps = [ ":ebert_graph", ":linear_assignment", - "//ortools/base", + "@com_google_absl//absl/flags:flag", ], ) @@ -314,50 +349,11 @@ cc_library( "//ortools/base:types", "//ortools/util:permutation", "//ortools/util:zvector", - "@com_google_absl//absl/strings", + "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/strings:str_format", ], ) -# Biconnected -#cc_library( -# name = "biconnected", -# srcs = ["biconnected.cc"], -# hdrs = ["biconnected.h"], -# deps = [ -# ":ebert_graph", -# "//ortools/base", -# "//ortools/base:types", -# ], -#) - -# Hopcroft-Karp (Old) -#cc_library( -# name = "hopcroft_karp", -# srcs = ["hopcroft_karp.c"], -# hdrs = ["hopcroft_karp.h"], -#) - -# Hopcroft-Karp (New) -#cc_library( -# name = "bipartite_matching", -# srcs = ["bipartite_matching.cc"], -# hdrs = ["bipartite_matching.h"], -# deps = [ -# "//ortools/base", -# ], -#) - -#cc_library( -# name = "dag_connectivity", -# srcs = ["dag_connectivity.cc"], -# hdrs = ["dag_connectivity.h"], -# deps = [ -# ":topologicalsorter", -# "//ortools/base", -# ], -#) - cc_library( name = "perfect_matching", srcs = ["perfect_matching.cc"], @@ -365,7 +361,7 @@ cc_library( deps = [ "//ortools/base", "//ortools/base:adjustable_priority_queue", - "//ortools/base:intops", + "//ortools/base:int_type", "//ortools/base:strong_vector", "//ortools/base:types", "//ortools/util:saturated_arithmetic", @@ -382,6 +378,8 @@ cc_library( deps = [ ":graph", ":topologicalsorter", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/base:log_severity", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/status", @@ -392,12 +390,16 @@ cc_library( cc_library( name = "dag_constrained_shortest_path", + testonly = True, srcs = ["dag_constrained_shortest_path.cc"], hdrs = ["dag_constrained_shortest_path.h"], deps = [ ":dag_shortest_path", ":graph", ":topologicalsorter", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/base:log_severity", + "@com_google_absl//absl/log:check", "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:span", @@ -441,6 +443,21 @@ cc_library( hdrs = ["iterators.h"], ) +cc_library( + name = "random_graph", + srcs = ["random_graph.cc"], + hdrs = ["random_graph.h"], + deps = [ + ":graph", + "//ortools/base:logging", + "//ortools/base:types", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/memory", + "@com_google_absl//absl/random", + "@com_google_absl//absl/random:bit_gen_ref", + ], +) + cc_library( name = "strongly_connected_components", hdrs = [ diff --git a/ortools/graph/CMakeLists.txt b/ortools/graph/CMakeLists.txt index d9b638e9d7..2b17e37e7b 100644 --- a/ortools/graph/CMakeLists.txt +++ b/ortools/graph/CMakeLists.txt @@ -23,6 +23,7 @@ list(REMOVE_ITEM _SRCS ${CMAKE_CURRENT_SOURCE_DIR}/ebert_graph_test.cc ${CMAKE_CURRENT_SOURCE_DIR}/eulerian_path_test.cc ${CMAKE_CURRENT_SOURCE_DIR}/hamiltonian_path_test.cc + ${CMAKE_CURRENT_SOURCE_DIR}/k_shortest_paths_test.cc ${CMAKE_CURRENT_SOURCE_DIR}/linear_assignment_test.cc ${CMAKE_CURRENT_SOURCE_DIR}/max_flow_test.cc ${CMAKE_CURRENT_SOURCE_DIR}/min_cost_flow_test.cc diff --git a/ortools/graph/README.md b/ortools/graph/README.md index 59d10b728c..e8940279a7 100644 --- a/ortools/graph/README.md +++ b/ortools/graph/README.md @@ -6,8 +6,8 @@ network flow problems. It contains in particular: * well-tuned algorithms (for example, shortest paths and - [Hamiltonian paths](https://en.wikipedia.org/wiki/Hamiltonian_path)). -* hard-to-find algorithms (Hamiltonian paths, push-relabel flow algorithms). + [Hamiltonian paths](https://en.wikipedia.org/wiki/Hamiltonian_path)). +* hard-to-find algorithms (Hamiltonian paths, push-relabel flow algorithms). * other, more common algorithms, that are useful to use with `EbertGraph`. Graph representations: @@ -69,11 +69,11 @@ Flow algorithms: * [`linear_assignment.h`][linear_assignment_h]: entry point for solving linear sum assignment problems (classical assignment problems where the total cost is the sum of the costs of each arc used) on directed graphs with arc costs, - based on the Goldberg-Kennedy push-relabel algorithm. + based on the Goldberg-Kennedy push-relabel algorithm. * [`max_flow.h`][max_flow_h]: entry point for computing maximum flows on - directed graphs with arc capacities, based on the Goldberg-Tarjan - push-relabel algorithm. + directed graphs with arc capacities, based on the Goldberg-Tarjan + push-relabel algorithm. * [`min_cost_flow.h`][min_cost_flow_h]: entry point for computing minimum-cost flows on directed graphs with arc capacities, arc costs, and diff --git a/ortools/graph/bidirectional_dijkstra_test.cc b/ortools/graph/bidirectional_dijkstra_test.cc index 9ce1d12ac1..87598aea41 100644 --- a/ortools/graph/bidirectional_dijkstra_test.cc +++ b/ortools/graph/bidirectional_dijkstra_test.cc @@ -20,15 +20,14 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/container/flat_hash_map.h" #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" -#include "ortools/base/map_util.h" #include "ortools/graph/bounded_dijkstra.h" #include "ortools/graph/graph.h" -#include "util/tuple/dump_vars.h" namespace operations_research { namespace { @@ -100,9 +99,6 @@ TEST(BidirectionalDijkstraTest, SmallTest) { TEST(BidirectionalDijkstraTest, RandomizedCorrectnessTest) { std::mt19937 random(12345); - // Performance on forge as of 2016-10-05 with these numbers, over 1000 runs: - // - fastbuild: max = 21.9s, avg = 10.7s. - // - opt: max = 23.2s, avg = 10.4s. const int kNumGraphs = DEBUG_MODE ? 100 : 300; const int kNumQueriesPerGraph = DEBUG_MODE ? 10 : 30; const int kNumNodes = 1000; diff --git a/ortools/graph/k_shortest_paths.h b/ortools/graph/k_shortest_paths.h new file mode 100644 index 0000000000..1c017647c2 --- /dev/null +++ b/ortools/graph/k_shortest_paths.h @@ -0,0 +1,444 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Algorithms to compute k-shortest paths. Currently, only Yen's algorithm is +// implemented. +// +// TODO(user): implement Lawler's modification: +// https://pubsonline.informs.org/doi/abs/10.1287/mnsc.18.7.401 +// +// | Algo. | Neg. weights | Neg.-weight loops | Graph type | Loopless paths | +// |-------|--------------|-------------------|--------------|----------------| +// | Yen | No | No | (Un)directed | Yes | +// +// +// Design choices +// ============== +// +// The design takes some inspiration from `shortest_paths.h` and +// `bounded_dijkstra.h`, but the shortest-path and k-shortest-path problems have +// vastly different structures. +// For instance, a path container that only stores distances, like +// `DistanceContainer` in `shortest_paths.h`, is irrelevant as an output for +// this problem: it can only characterize one path, the shortest one. +// This is why the results are stored in an intermediate structure, containing +// the paths (as a sequence of nodes, just like `PathContainerImpl` subclasses) +// and their distance. +// +// Only the one-to-one k-shortest-path problem is well-defined. Variants with +// multiple sources and/or destinations pose representational challenges whose +// solution is likely to be algorithm-dependent. +// Optimizations of path storage such as `PathTree` are not general enough to +// store k shortest paths: the set of paths for a given index for many +// sources/destinations is not ensured to form a set for each index. (While the +// first paths will form such a tree, storing *different* second paths for each +// source-destination pair may be impossible to do in a tree.) +// +// Unlike the functions in `shortest_paths.h`, the functions in this file +// directly return their result, to follow the current best practices. + +#ifndef OR_TOOLS_GRAPH_K_SHORTEST_PATHS_H_ +#define OR_TOOLS_GRAPH_K_SHORTEST_PATHS_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/optimization.h" +#include "absl/log/check.h" +#include "absl/types/span.h" +#include "ortools/graph/bounded_dijkstra.h" +#include "ortools/graph/ebert_graph.h" +#include "ortools/graph/shortest_paths.h" + +namespace operations_research { + +// Stores the solution to a k-shortest path problem. `paths` contains up to `k` +// paths from `source` to `destination` (these nodes are arguments to the +// algorithm), each having a distance stored in `distances`. +// +// The paths in `paths` start with `origin` and end at `destination`. +// +// If the computations are unsuccessful for any reason, the vectors are empty. +struct KShortestPaths { + // The paths are stored as vectors of nodes, like the other graph algorithms. + // TODO(user): what about vectors of arcs? That might be faster + // (potentially, add a function to transform it into a vector of nodes if the + // user really needs it). It would also have the nice benefit of removing the + // need for `distances` (compute it on the fly), with a reference to the graph + // and the costs. + std::vector> paths; + std::vector distances; +}; + +// Computes up to k shortest paths from the node `source` to the node +// `destination` in the given directed `graph`. The paths are guaranteed not to +// have loops. +// +// Hypotheses on input (which are not checked at runtime): +// - No multigraphs (more than one edge or a pair of nodes). The behavior is +// undefined otherwise. +// - The `arc_lengths` are supposed to be nonnegative. The behavior is +// undefined otherwise. +// TODO(user): relax to "no negative-weight cycles" (no Dijkstra). +// - The graphs might have loops. +// +// This function uses Yen's algorithm, which guarantees to find the first k +// shortest paths in O(k n (m + n log n)) for n nodes and m edges. This +// algorithm is an implementation of the idea of detours. +// +// Yen, Jin Y. "Finding the k Shortest Loopless Paths in a Network". Management +// Science. 17 (11): 712–716, 1971. +// https://doi.org/10.1287%2Fmnsc.17.11.712 +template +KShortestPaths YenKShortestPaths(const GraphType& graph, + const std::vector& arc_lengths, + NodeIndex source, NodeIndex destination, + unsigned k); + +// End of the interface. Below is the implementation. + +// TODO(user): introduce an enum to choose the algorithm. It's useless as +// long as this file only provides Yen. + +namespace internal { + +const PathDistance kMaxDistance = std::numeric_limits::max() - 1; +const PathDistance kDisconnectedDistance = + std::numeric_limits::max(); + +// Determines the arc index from a source to a destination. +// +// This operation requires iterating through the set of outgoing arcs from the +// source node, which might be expensive. +// +// In a multigraph, this function returns an index for one of the edges between +// the source and the destination. +template +ArcIndex FindArcIndex(const GraphType& graph, const NodeIndex source, + const NodeIndex destination) { + const auto outgoing_arcs_iter = graph.OutgoingArcs(source); + const auto arc = + std::find_if(outgoing_arcs_iter.begin(), outgoing_arcs_iter.end(), + [&graph, destination](const ArcIndex arc) { + return graph.Head(arc) == destination; + }); + return (arc != outgoing_arcs_iter.end()) ? *arc : GraphType::kNilArc; +} + +// Determines the shortest path from the given source and destination, returns a +// tuple with the path (as a vector of node indices) and its cost. +template +std::tuple, PathDistance> ComputeShortestPath( + const GraphType& graph, const std::vector& arc_lengths, + const NodeIndex source, const NodeIndex destination) { + BoundedDijkstraWrapper dijkstra(&graph, + &arc_lengths); + dijkstra.RunBoundedDijkstra(source, kMaxDistance); + const PathDistance path_length = dijkstra.distances()[destination]; + + if (path_length >= kMaxDistance) { + // There are shortest paths in this graph, just not from the source to this + // destination. + // This case only happens when some arcs have an infinite length (i.e. + // larger than `kMaxDistance`): `BoundedDijkstraWrapper::NodePathTo` fails + // to return a path, even empty. + return {{}, kDisconnectedDistance}; + } + + if (std::vector path = std::move(dijkstra.NodePathTo(destination)); + !path.empty()) { + return {std::move(path), path_length}; + } else { + return {{}, kDisconnectedDistance}; + } +} + +// Computes the total length of a path. +template +PathDistance ComputePathLength(const GraphType& graph, + const absl::Span arc_lengths, + const absl::Span path) { + PathDistance distance = 0; + for (NodeIndex i = 0; i < path.size() - 1; ++i) { + const ArcIndex arc = internal::FindArcIndex(graph, path[i], path[i + 1]); + DCHECK_NE(arc, GraphType::kNilArc); + distance += arc_lengths[arc]; + } + return distance; +} + +// Stores a path with a priority (typically, the distance), with a comparison +// operator that operates on the priority. +class PathWithPriority { + public: + PathWithPriority(PathDistance priority, std::vector path) + : path_(std::move(path)), priority_(priority) {} + bool operator<(const PathWithPriority& other) const { + return priority_ < other.priority_; + } + + [[nodiscard]] const std::vector& path() const { return path_; } + [[nodiscard]] PathDistance priority() const { return priority_; } + + private: + std::vector path_; + PathDistance priority_; +}; + +// Container adapter to be used with STL container adapters such as +// std::priority_queue. It gives access to the underlying container, which is a +// protected member in a standard STL container adapter. +template +class UnderlyingContainerAdapter : public Container { + public: + typedef typename Container::container_type container_type; + // No mutable version of `container`, so that the user cannot change the data + // within the container: they might destroy the container's invariants. + [[nodiscard]] const container_type& container() const { return this->c; } +}; + +} // namespace internal + +// TODO(user): Yen's algorithm can work with negative weights, but +// Dijkstra cannot. +// +// Yen, Jin Y. "Finding the k Shortest Loopless Paths in a Network". Management +// Science. 17 (11): 712–716, 1971. +// https://doi.org/10.1287%2Fmnsc.17.11.712 +// +// Yen's notations: +// - Source node: (1). +// - Destination node: (N). +// - Path from (1) to (j): (1) - (i) - ... - (j). +// - Cost for following the arc from (i) to (j), potentially negative: d_ij. +// - k-th shortest path: A^k == (1) - (2^k) - (3^k) - ... - (Q_k^k) - (N). +// - Deviation from A^k-1 at (i): A_i^k. This is the shortest path from (1) to +// (N) that is identical to A^k-1 from (1) to (i^k-1), then different from all +// the first k-1 shortest paths {A^1, A^2, ..., A^k-1}. +// - Root of A_i^k: R_i^k. This is the first subpath of A_i^k that coincides +// with A^k-1, i.e. A_i^k until i^k-1. +// - Spur of A_i^k: S_i^k. This is the last subpart of A_i^k with only one node +// coinciding with A_i^k, (i^k-1), i.e. A_i^k from i^k-1 onwards. +// +// Example graph, paths from A to H (more classical notations): +// C - D +// / / \ +// A - B / G - H +// \ / / +// E - F +// Source node: A. Destination node: H. +// Three paths from A to H, say they are ordered from the cheapest to the most +// expensive: +// - 1st path: A - B - C - D - G - H +// - 2nd path: A - B - E - F - G - H +// - 3rd path: A - B - E - D - G - H +// To start with, Yen's algorithm uses the shortest path: +// A^1 = A - B - C - D - G - H +// To compute the second path A^2, compute a detour around A^1. Consider the +// iteration where B is the spur node. +// - Spur node: 2^1 = B. +// - Root of A^1_2: R_1^2 = A - B (including the spur node 2^1 = B). +// - Spur path S_1^2 starts at the spur node 2^1 = B. There are two possible +// spur paths, the cheapest being: +// S_1^2 = B - E - F - G - H +template +KShortestPaths YenKShortestPaths(const GraphType& graph, + const std::vector& arc_lengths, + NodeIndex source, NodeIndex destination, + unsigned k) { + CHECK_GT(internal::kDisconnectedDistance, internal::kMaxDistance); + + CHECK_GE(k, 0) << "k must be nonnegative. Input value: " << k; + CHECK_NE(k, 0) << "k cannot be zero: you are requesting zero paths!"; + + CHECK_GT(graph.num_nodes(), 0) << "The graph is empty: it has no nodes"; + CHECK_GT(graph.num_arcs(), 0) << "The graph is empty: it has no arcs"; + + CHECK_GE(source, 0) << "The source node must be nonnegative. Input value: " + << source; + CHECK_LT(source, graph.num_nodes()) + << "The source node must be a valid node. Input value: " << source + << ". Number of nodes in the input graph: " << graph.num_nodes(); + CHECK_GE(destination, 0) + << "The source node must be nonnegative. Input value: " << destination; + CHECK_LT(destination, graph.num_nodes()) + << "The destination node must be a valid node. Input value: " + << destination + << ". Number of nodes in the input graph: " << graph.num_nodes(); + + KShortestPaths paths; + + // First step: compute the shortest path. + { + std::tuple, PathDistance> first_path = + internal::ComputeShortestPath(graph, arc_lengths, source, destination); + if (std::get<0>(first_path).empty()) return paths; + paths.paths.push_back(std::move(std::get<0>(first_path))); + paths.distances.push_back(std::get<1>(first_path)); + } + + if (k == 1) { + return paths; + } + + // Generate variant paths. + internal::UnderlyingContainerAdapter< + std::priority_queue> + variant_path_queue; + + for (; k > 0; --k) { + // Generate variant paths from the last shortest path. + const absl::Span last_shortest_path = + absl::MakeSpan(paths.paths.back()); + + // TODO(user): think about adding parallelism for this loop to improve + // running times. + for (int spur_node_position = 0; + spur_node_position < last_shortest_path.size() - 1; + ++spur_node_position) { + if (spur_node_position > 0) { + DCHECK_NE(last_shortest_path[spur_node_position], source); + } + DCHECK_NE(last_shortest_path[spur_node_position], destination); + + const NodeIndex spur_node = last_shortest_path[spur_node_position]; + // Consider the part of the last shortest path up to and excluding the + // spur node. If spur_node_position == 0, this span only contains the + // source node. + const absl::Span root_path = + last_shortest_path.subspan(0, spur_node_position + 1); + DCHECK_GE(root_path.length(), 1); + DCHECK_NE(root_path.back(), destination); + + // Simplify the graph to have different paths using infinite lengths: + // copy the weights, set some of them to infinity. There is no need to + // restore the graph to its previous state in this case. + // + // This trick is used in the original article (it's old-fashioned), but + // not in Wikipedia's pseudocode (it prefers mutating the graph, which is + // harder to do without copying the whole graph structure). + // Copying the whole graph might be quite expensive, especially as it is + // not useful for long (computing one shortest path). + std::vector arc_lengths_for_detour = arc_lengths; + for (absl::Span previous_path : paths.paths) { + // Check among the previous paths: if part of the path coincides with + // the first few nodes up to the spur node (included), forbid this part + // of the path in the search for the next shortest path. More + // precisely, in that case, avoid the arc from the spur node to the + // next node in the path. + if (previous_path.size() < spur_node_position) continue; + const bool has_same_prefix_as_root_path = std::equal( + root_path.begin(), root_path.end(), previous_path.begin(), + previous_path.begin() + root_path.length()); + if (has_same_prefix_as_root_path) { + const ArcIndex after_spur_node_arc = + internal::FindArcIndex(graph, previous_path[spur_node_position], + previous_path[spur_node_position + 1]); + arc_lengths_for_detour[after_spur_node_arc] = + internal::kDisconnectedDistance; + } + } + + // Generate a new candidate path from the spur node to the destination + // without using the forbidden arcs. + { + std::tuple, PathDistance> detour_path = + internal::ComputeShortestPath(graph, arc_lengths_for_detour, + spur_node, destination); + + if (std::get<0>(detour_path).empty()) { + // Node unreachable after some arcs are forbidden. + continue; + } + std::vector spur_path = std::move(std::get<0>(detour_path)); + if (ABSL_PREDICT_FALSE(spur_path.empty())) continue; + +#ifndef NDEBUG + CHECK_EQ(root_path.back(), spur_path.front()); + + if (spur_path.size() == 1) { + CHECK_EQ(spur_path.front(), destination); + } else { + // Ensure there is an edge between the end of the root path + // and the beginning of the spur path (knowing that both subpaths + // coincide at the spur node). + const bool root_path_leads_to_spur_path = absl::c_any_of( + graph.OutgoingArcs(root_path.back()), + [&graph, node_after_spur_in_spur_path = + *(spur_path.begin() + 1)](const ArcIndex arc_index) { + return graph.Head(arc_index) == node_after_spur_in_spur_path; + }); + CHECK(root_path_leads_to_spur_path); + } +#endif // !defined(NDEBUG) + + // Assemble the new path. + std::vector new_path; + absl::c_copy(root_path.subspan(0, spur_node_position), + std::back_inserter(new_path)); + absl::c_copy(spur_path, std::back_inserter(new_path)); + + DCHECK_EQ(new_path.front(), source); + DCHECK_EQ(new_path.back(), destination); + + // Ensure the new path is not one of the previously known ones. This + // operation is required, as there are two sources of paths from the + // source to the destination: + // - `paths`, the list of paths that is output by the function: there + // is no possible duplicate due to `arc_lengths_for_detour`, where + // edges that might generate a duplicate path are forbidden. + // - `variant_path_queue`, the list of potential paths, ordered by + // their cost, with no impact on `arc_lengths_for_detour`. + // TODO(user): would it be faster to fingerprint the paths and + // filter by fingerprints? Due to the probability of error with + // fingerprints, still use this slow-but-exact code, but after + // filtering. + const bool is_new_path_already_known = + std::any_of(variant_path_queue.container().cbegin(), + variant_path_queue.container().cend(), + [&new_path](const internal::PathWithPriority& element) { + return element.path() == new_path; + }); + if (is_new_path_already_known) continue; + + const PathDistance path_length = + internal::ComputePathLength(graph, arc_lengths, new_path); + variant_path_queue.emplace( + /*priority=*/path_length, /*path=*/std::move(new_path)); + } + } + + // Add the shortest spur path ever found that has not yet been added. This + // can be a spur path that has just been generated or a previous one, if + // this iteration found no shorter one. + if (variant_path_queue.empty()) break; + + const internal::PathWithPriority& next_shortest_path = + variant_path_queue.top(); + paths.paths.emplace_back(next_shortest_path.path()); + paths.distances.push_back(next_shortest_path.priority()); + variant_path_queue.pop(); + } + + return paths; +} + +} // namespace operations_research + +#endif // OR_TOOLS_GRAPH_K_SHORTEST_PATHS_H_ diff --git a/ortools/graph/k_shortest_paths_test.cc b/ortools/graph/k_shortest_paths_test.cc new file mode 100644 index 0000000000..f1b4808fbf --- /dev/null +++ b/ortools/graph/k_shortest_paths_test.cc @@ -0,0 +1,171 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/graph/k_shortest_paths.h" + +#include + +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/graph/graph.h" +#include "ortools/graph/shortest_paths.h" + +namespace operations_research { +namespace { + +using testing::ElementsAre; +using util::StaticGraph; + +TEST(KShortestPathsYenDeathTest, EmptyGraph) { + StaticGraph<> graph; + std::vector lengths; + + EXPECT_DEATH(YenKShortestPaths(graph, lengths, /*source=*/0, + /*destination=*/1, /*k=*/10), + "graph.num_nodes\\(\\) > 0"); +} + +TEST(KShortestPathsYenDeathTest, NoArcGraph) { + StaticGraph<> graph; + graph.AddNode(1); + (void)graph.Build(); + std::vector lengths; + + EXPECT_DEATH(YenKShortestPaths(graph, lengths, /*source=*/0, + /*destination=*/1, /*k=*/10), + "graph.num_arcs\\(\\) > 0"); +} + +TEST(KShortestPathsYenDeathTest, NonExistingSourceBecauseNegative) { + StaticGraph<> graph; + graph.AddNode(1); + graph.AddArc(0, 1); + (void)graph.Build(); + std::vector lengths{0}; + + EXPECT_DEATH(YenKShortestPaths(graph, lengths, /*source=*/-1, + /*destination=*/1, /*k=*/10), + "source >= 0"); +} + +TEST(KShortestPathsYenDeathTest, NonExistingSourceBecauseTooLarge) { + StaticGraph<> graph; + graph.AddNode(1); + graph.AddArc(0, 1); + (void)graph.Build(); + std::vector lengths{0}; + + EXPECT_DEATH(YenKShortestPaths(graph, lengths, /*source=*/1'000, + /*destination=*/1, /*k=*/10), + "source < graph.num_nodes()"); +} + +TEST(KShortestPathsYenDeathTest, NonExistingDestinationBecauseNegative) { + StaticGraph<> graph; + graph.AddNode(1); + graph.AddArc(0, 1); + (void)graph.Build(); + std::vector lengths{0}; + + EXPECT_DEATH(YenKShortestPaths(graph, lengths, /*source=*/0, + /*destination=*/-1, /*k=*/10), + "destination >= 0"); +} + +TEST(KShortestPathsYenDeathTest, NonExistingDestinationBecauseTooLarge) { + StaticGraph<> graph; + graph.AddNode(1); + graph.AddArc(0, 1); + (void)graph.Build(); + std::vector lengths{0}; + + EXPECT_DEATH(YenKShortestPaths(graph, lengths, /*source=*/0, + /*destination=*/1'000, /*k=*/10), + "destination < graph.num_nodes()"); +} + +TEST(KShortestPathsYenDeathTest, KEqualsZero) { + StaticGraph<> graph; + graph.AddArc(0, 1); + graph.AddArc(1, 2); + (void)graph.Build(); + std::vector lengths{1, 1}; + + EXPECT_DEATH(YenKShortestPaths(graph, lengths, /*source=*/0, + /*destination=*/2, /*k=*/0), + "k != 0"); +} + +TEST(KShortestPathsYenTest, ReducesToShortestPath) { + StaticGraph<> graph; + graph.AddArc(0, 1); + graph.AddArc(1, 2); + (void)graph.Build(); + std::vector lengths{1, 1}; + + const KShortestPaths paths = YenKShortestPaths(graph, lengths, /*source=*/0, + /*destination=*/2, /*k=*/1); + EXPECT_THAT(paths.paths, ElementsAre(std::vector{0, 1, 2})); + EXPECT_THAT(paths.distances, ElementsAre(2)); +} + +TEST(KShortestPathsYenTest, OnlyHasOnePath) { + StaticGraph<> graph; + graph.AddArc(0, 1); + graph.AddArc(1, 2); + (void)graph.Build(); + std::vector lengths{1, 1}; + + const KShortestPaths paths = YenKShortestPaths(graph, lengths, /*source=*/0, + /*destination=*/2, /*k=*/10); + EXPECT_THAT(paths.paths, ElementsAre(std::vector{0, 1, 2})); + EXPECT_THAT(paths.distances, ElementsAre(2)); +} + +TEST(KShortestPathsYenTest, HasTwoPaths) { + StaticGraph<> graph; + graph.AddArc(0, 1); + graph.AddArc(0, 2); + graph.AddArc(1, 2); + (void)graph.Build(); + std::vector lengths{1, 30, 1}; + + const KShortestPaths paths = YenKShortestPaths(graph, lengths, /*source=*/0, + /*destination=*/2, /*k=*/10); + EXPECT_THAT(paths.paths, + ElementsAre(std::vector{0, 1, 2}, std::vector{0, 2})); + EXPECT_THAT(paths.distances, ElementsAre(2, 30)); +} + +TEST(KShortestPathsYenTest, HasTwoPathsWithLongerPath) { + StaticGraph<> graph; + graph.AddArc(0, 1); + graph.AddArc(0, 4); + graph.AddArc(1, 2); + graph.AddArc(2, 3); + graph.AddArc(3, 4); + (void)graph.Build(); + std::vector lengths{1, 30, 1, 1, 1}; + + const KShortestPaths paths = YenKShortestPaths(graph, lengths, /*source=*/0, + /*destination=*/4, /*k=*/10); + EXPECT_THAT(paths.paths, ElementsAre(std::vector{0, 1, 2, 3, 4}, + std::vector{0, 4})); + EXPECT_THAT(paths.distances, ElementsAre(4, 30)); +} + +// TODO(user): randomized tests? Check validity with exhaustive +// exploration/IP formulation? + +} // namespace +} // namespace operations_research diff --git a/ortools/graph/linear_assignment_test.cc b/ortools/graph/linear_assignment_test.cc index 133ce2bf2d..5212ceefd0 100644 --- a/ortools/graph/linear_assignment_test.cc +++ b/ortools/graph/linear_assignment_test.cc @@ -19,6 +19,7 @@ #include #include "absl/random/distributions.h" +#include "absl/types/span.h" #include "benchmark/benchmark.h" #include "gtest/gtest.h" #include "ortools/base/commandlineflags.h" @@ -743,7 +744,7 @@ class ReorderedGraphTest : public testing::Test { ReorderedGraphTest() {} - void TestMe(const size_t left_nodes, const std::vector& ordered_edges) { + void TestMe(const size_t left_nodes, absl::Span ordered_edges) { std::vector edge_costs; typedef util::StaticGraph GraphType; GraphType graph(2 * left_nodes, ordered_edges.size()); diff --git a/ortools/graph/max_flow_test.cc b/ortools/graph/max_flow_test.cc index 96de73f8e5..d08ebf7992 100644 --- a/ortools/graph/max_flow_test.cc +++ b/ortools/graph/max_flow_test.cc @@ -191,8 +191,8 @@ TEST(SimpleMaxFlowTest, ProblematicProblemWithMaxCapacity) { FlowModelProto model, ReadFileToProto( file::JoinPathRespectAbsolute(absl::GetFlag(FLAGS_test_srcdir), - "ortools/graph/" - "testdata/max_flow_test1.pb.txt"))); + "ortools/graph/" + "testdata/max_flow_test1.pb.txt"))); SimpleMaxFlow solver; EXPECT_EQ(SimpleMaxFlow::OPTIMAL, LoadAndSolveFlowModel(model, &solver)); EXPECT_EQ(10290243, solver.OptimalFlow()); diff --git a/ortools/graph/minimum_spanning_tree_test.cc b/ortools/graph/minimum_spanning_tree_test.cc index 88b7930105..257006ceb0 100644 --- a/ortools/graph/minimum_spanning_tree_test.cc +++ b/ortools/graph/minimum_spanning_tree_test.cc @@ -78,10 +78,10 @@ void CheckMSTWithKruskal(const ListGraph& graph, // Helper function to check the expected MST is obtained with Prim. void CheckMSTWithPrim(const ListGraph& graph, - const std::vector& costs, + absl::Span costs, const std::vector& expected_arcs) { const std::vector prim_mst = BuildPrimMinimumSpanningTree( - graph, [&costs](int arc) { return costs[arc]; }); + graph, [costs](int arc) { return costs[arc]; }); EXPECT_THAT(expected_arcs, UnorderedElementsAreArray(prim_mst)); } diff --git a/ortools/graph/one_tree_lower_bound.h b/ortools/graph/one_tree_lower_bound.h index 586f1f2291..dce6895072 100644 --- a/ortools/graph/one_tree_lower_bound.h +++ b/ortools/graph/one_tree_lower_bound.h @@ -335,10 +335,10 @@ int GetNodeMinimizingEdgeCostToSource(const GraphType& graph, int source, template std::vector ComputeOneTree(const GraphType& graph, const CostFunction& cost, - const std::vector& weights, - const std::vector& sorted_arcs, + absl::Span weights, + absl::Span sorted_arcs, CostType* one_tree_cost) { - const auto weighed_cost = [&cost, &weights](int from, int to) { + const auto weighed_cost = [&cost, weights](int from, int to) { return cost(from, to) + weights[from] + weights[to]; }; // Compute MST on graph. diff --git a/ortools/graph/one_tree_lower_bound_test.cc b/ortools/graph/one_tree_lower_bound_test.cc index 6f988a92fe..977b0c5236 100644 --- a/ortools/graph/one_tree_lower_bound_test.cc +++ b/ortools/graph/one_tree_lower_bound_test.cc @@ -23,7 +23,7 @@ #include "gtest/gtest.h" #include "ortools/base/path.h" #include "ortools/base/types.h" -#include "ortools/routing/tsplib_parser.h" +#include "ortools/routing/parsers/tsplib_parser.h" namespace operations_research { namespace { diff --git a/ortools/graph/perfect_matching_test.cc b/ortools/graph/perfect_matching_test.cc index 2485c87684..4602c4c7cc 100644 --- a/ortools/graph/perfect_matching_test.cc +++ b/ortools/graph/perfect_matching_test.cc @@ -248,7 +248,7 @@ std::vector GenerateAndLoadRandomProblem( // condition if really needed. This is a bit involved though, and with the MIP // tests below, we should have a good enough confidence in the code already. void CheckOptimalSolution(const MinCostPerfectMatching& matcher, - const std::vector& edges) { + absl::Span edges) { const std::vector& matches = matcher.Matches(); std::vector seen(matches.size(), false); int num_seen = 0; diff --git a/ortools/graph/random_graph.cc b/ortools/graph/random_graph.cc new file mode 100644 index 0000000000..dc458fff9d --- /dev/null +++ b/ortools/graph/random_graph.cc @@ -0,0 +1,173 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/graph/random_graph.h" + +#include +#include +#include +#include +#include + +#include "absl/container/flat_hash_set.h" +#include "absl/memory/memory.h" +#include "absl/random/bit_gen_ref.h" +#include "absl/random/random.h" +#include "ortools/base/logging.h" +#include "ortools/base/types.h" + +namespace util { + +namespace { +// This function initializes the graph used by GenerateRandomMultiGraph() and +// GenerateRandomMultiGraph(), given their arguments. +// See the .h for documentation on those arguments. +std::unique_ptr> CreateGraphMaybeReserved(int num_nodes, + int num_arcs, + bool finalized, + absl::BitGenRef gen) { + std::unique_ptr> graph; + // We either "reserve" the number of nodes and arcs or not, depending on + // randomness and on the "finalized" bit (if false, we can't assume that the + // user won't add some nodes or arcs after we return the graph, so we can't + // cap those). + if (finalized && absl::Bernoulli(gen, 1.0 / 2)) { + graph = std::make_unique>(num_nodes, num_arcs); + } else { + graph = std::make_unique>(); + graph->AddNode(num_nodes - 1); + } + return graph; +} +} // namespace + +std::unique_ptr> GenerateRandomMultiGraph(int num_nodes, + int num_arcs, + bool finalized, + absl::BitGenRef gen) { + std::unique_ptr> graph = + CreateGraphMaybeReserved(num_nodes, num_arcs, finalized, gen); + if (num_nodes != 0) { + CHECK_GT(num_nodes, 0); + CHECK_GE(num_arcs, 0); + graph->AddNode(num_nodes - 1); + for (int a = 0; a < num_arcs; ++a) { + graph->AddArc(absl::Uniform(gen, 0, num_nodes), + absl::Uniform(gen, 0, num_nodes)); + } + } else { + CHECK_EQ(num_arcs, 0); + } + if (finalized) graph->Build(); + return graph; +} + +namespace { +// Parameterized method to generate both directed and undirected simple graphs. +std::unique_ptr> GenerateRandomSimpleGraph(int num_nodes, + int num_arcs, + bool finalized, + bool directed, + absl::BitGenRef gen) { + CHECK_GE(num_nodes, 0); + // For an undirected graph, the number of arcs should be even: a->b and b->a. + CHECK(directed || (num_arcs % 2 == 0)); + const int64_t max_num_arcs = + static_cast(num_nodes) * (num_nodes - 1); + CHECK_LE(num_arcs, max_num_arcs); + std::unique_ptr> graph = + CreateGraphMaybeReserved(num_nodes, num_arcs, finalized, gen); + + // If the number of arcs is greater than half the possible arcs of the graph, + // we generate the inverse graph and convert non-arcs to arcs. + if (num_arcs > max_num_arcs / 2) { + std::unique_ptr> inverse_graph = + GenerateRandomSimpleGraph(num_nodes, max_num_arcs - num_arcs, + /*finalized=*/true, directed, gen); + std::vector node_mask(num_nodes, false); + for (int from = 0; from < num_nodes; ++from) { + for (const int to : (*inverse_graph)[from]) { + node_mask[to] = true; + } + for (int to = 0; to < num_nodes; ++to) { + if (node_mask[to]) { + node_mask[to] = false; // So that the mask is reset to all false. + } else if (to != from) { + graph->AddArc(from, to); + } + } + } + if (finalized) graph->Build(); + return graph; + } + + // We use a trivial algorithm: pick an arc at random, uniformly, and add it to + // the graph unless it was already added. As we sometimes have to discard an + // arc, we expect to do this slightly more times than the desired number "m" + // of distinct arcs. But in the worst case, which is when m = M/2 (where M = + // N*(N-1) is the number of possible arcs), the expected number of steps is + // only ln(2)*M ~ 0.69*M, to produce 0.5*M arcs. So it's fine. + // + // Proof: The expected number of steps to get "m" distinct arcs across the M + // possible arcs is M/M + M/(M-1) + M/(M-2) + ... + M/(M-m+1), which is equal + // to M * (H(M) - H(M-m)), where H(x) is the harmonic sum up to x. + // H(M) - H(M-m) converges to ln(M) - ln(M-m) = ln(1 + m/(M-m)) as M grows, + // which stricly grows with m and is equal to ln(2) in the worst case m=M/2. + // + // NOTE(user): If some specialized users want a uniform generation method + // that uses less memory (this one uses a flat hash map on the arcs, which + // uses significant memory), it could be done. Reach out to me. + absl::flat_hash_set> arc_set; + // To detect bad user-provided random number generator which could lead to + // infinite loops, we bound the number of iterations to a value well beyond + // the expected number of iterations (which is less than 0.69 * max_num_arcs). + int64_t num_iterations = 0; + const int64_t max_num_iterations = 1000 + max_num_arcs; + while (graph->num_arcs() < num_arcs) { + ++num_iterations; + CHECK_LE(num_iterations, max_num_iterations) + << "The random number generator supplied to GenerateRandomSimpleGraph()" + << " is likely biased or broken."; + const int tail = absl::Uniform(gen, 0, num_nodes); + const int head = absl::Uniform(gen, 0, num_nodes); + if (tail == head) continue; + if (directed) { + if (!arc_set.insert({tail, head}).second) continue; + graph->AddArc(tail, head); + } else { // undirected + const std::pair arc = { + std::min(tail, head), + std::max(tail, head)}; // Canonic edge representative. + if (!arc_set.insert(arc).second) continue; + graph->AddArc(tail, head); + graph->AddArc(head, tail); + } + } + if (finalized) graph->Build(); + return graph; +} +} // namespace + +std::unique_ptr> GenerateRandomDirectedSimpleGraph( + int num_nodes, int num_arcs, bool finalized, absl::BitGenRef gen) { + return GenerateRandomSimpleGraph(num_nodes, num_arcs, finalized, + /*directed=*/true, gen); +} + +std::unique_ptr> GenerateRandomUndirectedSimpleGraph( + int num_nodes, int num_edges, bool finalized, absl::BitGenRef gen) { + return GenerateRandomSimpleGraph(num_nodes, 2 * num_edges, finalized, + /*directed=*/false, gen); +} + +} // namespace util diff --git a/ortools/graph/random_graph.h b/ortools/graph/random_graph.h new file mode 100644 index 0000000000..b22e012568 --- /dev/null +++ b/ortools/graph/random_graph.h @@ -0,0 +1,51 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A collection of functions to be used in unit tests involving the +// ortools/graph/... library. + +#ifndef UTIL_GRAPH_RANDOM_GRAPH_H_ +#define UTIL_GRAPH_RANDOM_GRAPH_H_ + +#include + +#include "absl/random/bit_gen_ref.h" +#include "ortools/graph/graph.h" + +namespace util { + +// Generates a random graph where multi-arcs and self-arcs are allowed (and +// therefore expected): exactly "num_arcs" are generated, each from a node +// picked uniformly at random to another node picked uniformly at random. +// Calls Build() on the graph iff "finalized" is true. +std::unique_ptr> GenerateRandomMultiGraph(int num_nodes, + int num_arcs, + bool finalized, + absl::BitGenRef gen); + +// Like GenerateRandomMultiGraph(), but with neither multi-arcs nor self-arcs: +// the generated graph will have exactly num_arcs arcs. It will be picked +// uniformly at random from the set of all simple graphs with that number of +// nodes and arcs. +std::unique_ptr> GenerateRandomDirectedSimpleGraph( + int num_nodes, int num_arcs, bool finalized, absl::BitGenRef gen); + +// Like GenerateRandomDirectedSimpleGraph(), but where an undirected edge is +// represented by two arcs: a->b and b->a. As a result, the amount of arcs in +// the generated graph is 2*num_edges. +std::unique_ptr> GenerateRandomUndirectedSimpleGraph( + int num_nodes, int num_edges, bool finalized, absl::BitGenRef gen); + +} // namespace util + +#endif // UTIL_GRAPH_RANDOM_GRAPH_H_ diff --git a/ortools/graph/shortest_paths.cc b/ortools/graph/shortest_paths.cc index c4a47c905e..c5978f06e7 100644 --- a/ortools/graph/shortest_paths.cc +++ b/ortools/graph/shortest_paths.cc @@ -21,6 +21,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/functional/bind_front.h" #include "absl/log/check.h" +#include "absl/types/span.h" #include "ortools/base/adjustable_priority_queue-inl.h" #include "ortools/base/adjustable_priority_queue.h" #include "ortools/base/logging.h" @@ -105,8 +106,8 @@ class PathTree { public: PathTree() : nodes_(), parents_() {} - void Initialize(const std::vector& paths, - const std::vector& destinations); + void Initialize(absl::Span paths, + absl::Span destinations); // Returns the parent (predecessor) of 'node' in the tree in // O(log(path_tree_size)), where path_tree_size is the size of nodes_. @@ -126,8 +127,8 @@ class PathTree { // Initializes the tree from a non-sparse representation of the path tree // represented by 'paths'. The tree is reduced to the subtree in which nodes in // 'destinations' are the leafs. -void PathTree::Initialize(const std::vector& paths, - const std::vector& destinations) { +void PathTree::Initialize(absl::Span paths, + absl::Span destinations) { const NodeIndex kNilNode = StarGraph::kNilNode; std::vector node_explored(paths.size(), false); const int destination_size = destinations.size(); @@ -256,7 +257,7 @@ class DistanceContainer : public PathContainerImpl { std::vector reverse_destinations_; private: - static void ComputeReverse(const std::vector& nodes, + static void ComputeReverse(absl::Span nodes, NodeIndex num_nodes, std::vector* reverse_nodes) { CHECK(reverse_nodes != nullptr); diff --git a/ortools/graph/testdata/BUILD.bazel b/ortools/graph/testdata/BUILD.bazel index d7fbe3ea0c..b1ebc2079f 100644 --- a/ortools/graph/testdata/BUILD.bazel +++ b/ortools/graph/testdata/BUILD.bazel @@ -12,5 +12,5 @@ # limitations under the License. exports_files([ - "max_flow_test1.pb.txt", + "max_flow_test1.pb.txt", ]) diff --git a/ortools/graph/util.h b/ortools/graph/util.h index 33e5d46546..da3c0b4cd0 100644 --- a/ortools/graph/util.h +++ b/ortools/graph/util.h @@ -72,7 +72,7 @@ std::unique_ptr CopyGraph(const Graph& graph); // Note that you can call IsValidPermutation() to check it yourself. template std::unique_ptr RemapGraph(const Graph& graph, - const std::vector& new_node_index); + absl::Span new_node_index); // Gets the induced subgraph of "graph" restricted to the nodes in "nodes": // the resulting graph will have exactly nodes.size() nodes, and its @@ -277,7 +277,7 @@ std::unique_ptr CopyGraph(const Graph& graph) { template std::unique_ptr RemapGraph(const Graph& old_graph, - const std::vector& new_node_index) { + absl::Span new_node_index) { DCHECK(IsValidPermutation(new_node_index)) << "Invalid permutation"; const int num_nodes = old_graph.num_nodes(); CHECK_EQ(new_node_index.size(), num_nodes); From 48ace85e136d1b0ed6e0d40f793fb310e090390d Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 11:22:45 +0100 Subject: [PATCH 016/392] base: backport from main --- ortools/base/BUILD.bazel | 17 +++++------------ ortools/base/filesystem.cc | 2 +- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index 1a754e05f9..a5dbaa5e45 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -195,9 +195,13 @@ cc_library( cc_library( name = "file", - srcs = ["file.cc"], + srcs = [ + "file.cc", + "filesystem.cc", + ], hdrs = [ "file.h", + "filesystem.h", "helpers.h", "options.h", ], @@ -211,17 +215,6 @@ cc_library( ], ) -cc_library( - name = "filesystem", - srcs = ["filesystem.cc"], - hdrs = ["filesystem.h"], - deps = [ - ":file", - "@com_google_absl//absl/status", - "@com_google_absl//absl/strings", - ], -) - cc_library( name = "status_matchers", hdrs = ["status_matchers.h"], diff --git a/ortools/base/filesystem.cc b/ortools/base/filesystem.cc index 37d9c074e5..39e9cd5f5b 100644 --- a/ortools/base/filesystem.cc +++ b/ortools/base/filesystem.cc @@ -14,7 +14,7 @@ #include "ortools/base/filesystem.h" #include // NOLINT(build/c++17) -#include // NOLINT +#include // NOLINT #include "absl/status/status.h" #include "absl/strings/str_replace.h" From 2b0085be7d3204ecea7152ab515467ab33df947f Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 11:52:51 +0100 Subject: [PATCH 017/392] backport ci/cd/release stuff from main --- .bazelignore | 4 + .github/workflows/amd64_macos_bazel.yml | 5 - cmake/docker/centos/Dockerfile | 10 +- cmake/docker/glop/Dockerfile | 11 +- cmake/docker/toolchain/Dockerfile | 10 +- cmake/docker/ubuntu/Dockerfile | 10 +- makefiles/docker/centos/Dockerfile | 10 +- makefiles/docker/ubuntu/Dockerfile | 10 +- tools/docker/Makefile | 10 +- tools/docker/images/centos-7.Dockerfile | 10 +- tools/docker/images/debian-10.Dockerfile | 10 +- tools/docker/images/debian-11.Dockerfile | 10 +- tools/docker/images/debian-12.Dockerfile | 114 ++++++++++++++++++ tools/docker/images/debian-13.Dockerfile | 114 ++++++++++++++++++ tools/docker/images/opensuse-leap.Dockerfile | 10 +- tools/docker/images/ubuntu-20.04.Dockerfile | 10 +- tools/docker/minizinc-challenge.Dockerfile | 10 +- .../docker/python/amd64/manylinux.Dockerfile | 10 +- .../python/arm64v8/manylinux.Dockerfile | 10 +- tools/docker/test/centos-7/cpp.Dockerfile | 10 +- tools/docker/test/debian-10/cpp.Dockerfile | 10 +- tools/docker/test/debian-10/python.Dockerfile | 10 +- tools/docker/test/debian-11/cpp.Dockerfile | 10 +- tools/docker/test/debian-11/python.Dockerfile | 10 +- tools/docker/test/debian-12/cpp.Dockerfile | 21 ++++ tools/docker/test/debian-12/dotnet.Dockerfile | 29 +++++ tools/docker/test/debian-12/java.Dockerfile | 16 +++ tools/docker/test/debian-12/python.Dockerfile | 21 ++++ tools/docker/test/debian-13/cpp.Dockerfile | 21 ++++ tools/docker/test/debian-13/dotnet.Dockerfile | 29 +++++ tools/docker/test/debian-13/java.Dockerfile | 16 +++ tools/docker/test/debian-13/python.Dockerfile | 21 ++++ .../docker/test/opensuse-leap/cpp.Dockerfile | 10 +- tools/docker/test/ubuntu-20.04/cpp.Dockerfile | 10 +- tools/release/amd64.Dockerfile | 10 +- tools/release/arm64.Dockerfile | 10 +- 36 files changed, 528 insertions(+), 124 deletions(-) create mode 100644 tools/docker/images/debian-12.Dockerfile create mode 100644 tools/docker/images/debian-13.Dockerfile create mode 100644 tools/docker/test/debian-12/cpp.Dockerfile create mode 100644 tools/docker/test/debian-12/dotnet.Dockerfile create mode 100644 tools/docker/test/debian-12/java.Dockerfile create mode 100644 tools/docker/test/debian-12/python.Dockerfile create mode 100644 tools/docker/test/debian-13/cpp.Dockerfile create mode 100644 tools/docker/test/debian-13/dotnet.Dockerfile create mode 100644 tools/docker/test/debian-13/java.Dockerfile create mode 100644 tools/docker/test/debian-13/python.Dockerfile diff --git a/.bazelignore b/.bazelignore index 807b1738f8..3d8c9b42e3 100644 --- a/.bazelignore +++ b/.bazelignore @@ -4,3 +4,7 @@ install_make build_cross dependencies/install dependencies/sources +temp_cpp +temp_dotnet +temp_java +temp_python diff --git a/.github/workflows/amd64_macos_bazel.yml b/.github/workflows/amd64_macos_bazel.yml index cb4ecc4901..813c2dffc8 100644 --- a/.github/workflows/amd64_macos_bazel.yml +++ b/.github/workflows/amd64_macos_bazel.yml @@ -32,11 +32,6 @@ jobs: python-version: ${{ matrix.python.version }} - name: Check Python run: python --version - - name: Install Bazel - run: | - brew update - brew unlink bazelisk - brew install bazel - name: Check Bazel run: bazel version - name: Build diff --git a/cmake/docker/centos/Dockerfile b/cmake/docker/centos/Dockerfile index 68748f2f18..b426c687c5 100644 --- a/cmake/docker/centos/Dockerfile +++ b/cmake/docker/centos/Dockerfile @@ -19,11 +19,11 @@ RUN dnf -y update \ RUN echo "source /opt/rh/gcc-toolset-11/enable" >> /etc/bashrc SHELL ["/bin/bash", "--login", "-c"] -# Install CMake 3.26.4 -RUN wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-x86_64.sh" \ -&& chmod a+x cmake-3.26.4-linux-x86_64.sh \ -&& ./cmake-3.26.4-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-x86_64.sh +# Install CMake 3.28.3 +RUN wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-x86_64.sh" \ +&& chmod a+x cmake-3.28.3-linux-x86_64.sh \ +&& ./cmake-3.28.3-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-x86_64.sh CMD [ "/usr/bin/bash" ] # Install SWIG 4.2.0 diff --git a/cmake/docker/glop/Dockerfile b/cmake/docker/glop/Dockerfile index 5de7ade82d..067037b064 100644 --- a/cmake/docker/glop/Dockerfile +++ b/cmake/docker/glop/Dockerfile @@ -7,11 +7,12 @@ RUN apt-get update -qq \ && apt-get install -yq git wget libssl-dev build-essential \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -# Install CMake 3.27.7 -RUN wget -q "https://cmake.org/files/v3.27/cmake-3.27.7-linux-x86_64.sh" \ -&& chmod a+x cmake-3.27.7-linux-x86_64.sh \ -&& ./cmake-3.27.7-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.27.7-linux-x86_64.sh + +# Install CMake 3.28.3 +RUN wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-x86_64.sh" \ +&& chmod a+x cmake-3.28.3-linux-x86_64.sh \ +&& ./cmake-3.28.3-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-x86_64.sh CMD [ "/usr/bin/bash" ] FROM env AS devel diff --git a/cmake/docker/toolchain/Dockerfile b/cmake/docker/toolchain/Dockerfile index 6ad1382bcb..abc8a45133 100644 --- a/cmake/docker/toolchain/Dockerfile +++ b/cmake/docker/toolchain/Dockerfile @@ -11,11 +11,11 @@ RUN apt-get update -qq \ ENTRYPOINT ["/usr/bin/bash", "-c"] CMD ["/usr/bin/bash"] -# Install CMake 3.25.2 -RUN wget "https://cmake.org/files/v3.25/cmake-3.25.2-linux-x86_64.sh" \ -&& chmod a+x cmake-3.25.2-linux-x86_64.sh \ -&& ./cmake-3.25.2-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.25.2-linux-x86_64.sh +# Install CMake 3.28.3 +RUN wget "https://cmake.org/files/v3.28/cmake-3.28.3-linux-x86_64.sh" \ +&& chmod a+x cmake-3.28.3-linux-x86_64.sh \ +&& ./cmake-3.28.3-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-x86_64.sh FROM env AS devel WORKDIR /home/project diff --git a/cmake/docker/ubuntu/Dockerfile b/cmake/docker/ubuntu/Dockerfile index c584391799..2b1f041393 100644 --- a/cmake/docker/ubuntu/Dockerfile +++ b/cmake/docker/ubuntu/Dockerfile @@ -9,11 +9,11 @@ RUN apt-get update -qq \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -# Install CMake 3.25.2 -RUN wget -q "https://cmake.org/files/v3.25/cmake-3.25.2-linux-x86_64.sh" \ -&& chmod a+x cmake-3.25.2-linux-x86_64.sh \ -&& ./cmake-3.25.2-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.25.2-linux-x86_64.sh +# Install CMake 3.28.3 +RUN wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-x86_64.sh" \ +&& chmod a+x cmake-3.28.3-linux-x86_64.sh \ +&& ./cmake-3.28.3-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-x86_64.sh CMD [ "/usr/bin/bash" ] FROM base AS swig diff --git a/makefiles/docker/centos/Dockerfile b/makefiles/docker/centos/Dockerfile index a48eb27631..9e5d275dea 100644 --- a/makefiles/docker/centos/Dockerfile +++ b/makefiles/docker/centos/Dockerfile @@ -20,11 +20,11 @@ RUN dnf -y update \ RUN echo "source /opt/rh/gcc-toolset-11/enable" >> /etc/bashrc SHELL ["/bin/bash", "--login", "-c"] -# Install CMake 3.26.4 -RUN wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-x86_64.sh" \ -&& chmod a+x cmake-3.26.4-linux-x86_64.sh \ -&& ./cmake-3.26.4-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-x86_64.sh +# Install CMake 3.28.3 +RUN wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-x86_64.sh" \ +&& chmod a+x cmake-3.28.3-linux-x86_64.sh \ +&& ./cmake-3.28.3-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-x86_64.sh CMD [ "/usr/bin/bash" ] # Install SWIG 4.1.1 diff --git a/makefiles/docker/ubuntu/Dockerfile b/makefiles/docker/ubuntu/Dockerfile index d97da04f5e..22d71efe5e 100644 --- a/makefiles/docker/ubuntu/Dockerfile +++ b/makefiles/docker/ubuntu/Dockerfile @@ -10,11 +10,11 @@ RUN apt-get update -qq \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -# Install CMake 3.25.2 -RUN wget -q "https://cmake.org/files/v3.25/cmake-3.25.2-linux-x86_64.sh" \ -&& chmod a+x cmake-3.25.2-linux-x86_64.sh \ -&& ./cmake-3.25.2-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.25.2-linux-x86_64.sh +# Install CMake 3.28.3 +RUN wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-x86_64.sh" \ +&& chmod a+x cmake-3.28.3-linux-x86_64.sh \ +&& ./cmake-3.28.3-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-x86_64.sh CMD [ "/usr/bin/bash" ] FROM base AS swig diff --git a/tools/docker/Makefile b/tools/docker/Makefile index 5a475c1f51..46ec34a799 100644 --- a/tools/docker/Makefile +++ b/tools/docker/Makefile @@ -93,9 +93,11 @@ help: @echo -e "\t\t${BOLD}archlinux${RESET} (latest)" @echo -e "\t\t${BOLD}centos-7${RESET} (Centos 7 LTS)" @echo -e "\t\t${BOLD}debian-sid${RESET} (unstable)" - @echo -e "\t\t${BOLD}debian-11${RESET} (bullseye)" + @echo -e "\t\t${BOLD}debian-13${RESET} (Trixie)" + @echo -e "\t\t${BOLD}debian-12${RESET} (Bookworm)" + @echo -e "\t\t${BOLD}debian-11${RESET} (Bullseye)" # @echo -e "\t\t${BOLD}debian-10${RESET} (buster)" -# @echo -e "\t\t${BOLD}fedora-39${RESET}" + @echo -e "\t\t${BOLD}fedora-39${RESET}" @echo -e "\t\t${BOLD}fedora-38${RESET}" @echo -e "\t\t${BOLD}fedora-37${RESET}" # @echo -e "\t\t${BOLD}opensuse-leap${RESET} (latest)" @@ -404,8 +406,8 @@ DISTROS := \ alpine-edge \ archlinux \ centos-7 \ - debian-11 debian-sid \ - fedora-37 fedora-38 \ + debian-11 debian-12 debian-13 debian-sid \ + fedora-37 fedora-38 fedora-39 \ ubuntu-20.04 ubuntu-22.04 ubuntu-23.04 ubuntu-23.10 # List of stages diff --git a/tools/docker/images/centos-7.Dockerfile b/tools/docker/images/centos-7.Dockerfile index edcdb76335..bcad15707a 100644 --- a/tools/docker/images/centos-7.Dockerfile +++ b/tools/docker/images/centos-7.Dockerfile @@ -24,12 +24,12 @@ ENTRYPOINT ["/usr/bin/bash", "--login", "-c"] CMD ["/usr/bin/bash", "--login"] # RUN g++ --version -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh # Install Swig 4.1.1 RUN curl --location-trusted \ diff --git a/tools/docker/images/debian-10.Dockerfile b/tools/docker/images/debian-10.Dockerfile index 351b1a6084..3010385279 100644 --- a/tools/docker/images/debian-10.Dockerfile +++ b/tools/docker/images/debian-10.Dockerfile @@ -13,12 +13,12 @@ RUN apt-get update -qq \ ENTRYPOINT ["/bin/bash", "-c"] CMD ["/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh # Install .Net # see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-10- diff --git a/tools/docker/images/debian-11.Dockerfile b/tools/docker/images/debian-11.Dockerfile index 1f3de4beb1..e54f4bf2fc 100644 --- a/tools/docker/images/debian-11.Dockerfile +++ b/tools/docker/images/debian-11.Dockerfile @@ -13,12 +13,12 @@ RUN apt-get update -qq \ ENTRYPOINT ["/bin/bash", "-c"] CMD ["/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh # Install .Net # see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-11- diff --git a/tools/docker/images/debian-12.Dockerfile b/tools/docker/images/debian-12.Dockerfile new file mode 100644 index 0000000000..89f23d52a1 --- /dev/null +++ b/tools/docker/images/debian-12.Dockerfile @@ -0,0 +1,114 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:12 AS env + +############# +## SETUP ## +############# +RUN apt-get update -qq \ +&& apt-get install -qq \ + git pkg-config wget make autoconf libtool zlib1g-dev gawk g++ curl subversion \ + swig lsb-release \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENTRYPOINT ["/bin/bash", "-c"] +CMD ["/bin/bash"] + +# Install CMake 3.28.3 +RUN ARCH=$(uname -m) \ +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh + +# Install .Net +# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-11- +RUN apt-get update -qq \ +&& apt-get install -qq gpg apt-transport-https \ +&& wget -q "https://packages.microsoft.com/config/debian/12/packages-microsoft-prod.deb" -O packages-microsoft-prod.deb \ +&& dpkg -i packages-microsoft-prod.deb \ +&& rm packages-microsoft-prod.deb \ +&& apt-get update -qq \ +&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +# Trigger first run experience by running arbitrary cmd +RUN dotnet --info + +# Java Install +RUN apt-get update -qq \ +&& apt-get install -qq default-jdk maven \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENV JAVA_HOME=/usr/lib/jvm/default-java + +# Install Python +RUN apt-get update -qq \ +&& apt-get install -qq python3 python3-dev python3-pip python3-venv \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +RUN python3 -m pip install absl-py mypy mypy-protobuf + +ENV TZ=America/Los_Angeles +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +################ +## OR-TOOLS ## +################ +FROM env AS devel +WORKDIR /root +# Copy the snk key +COPY or-tools.snk /root/or-tools.snk +ENV DOTNET_SNK=/root/or-tools.snk + +ARG SRC_GIT_BRANCH +ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ARG SRC_GIT_SHA1 +ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} + +ARG OR_TOOLS_PATCH +ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} + +# Download sources +# use SRC_GIT_SHA1 to modify the command +# i.e. avoid docker reusing the cache when new commit is pushed +SHELL ["/bin/bash", "-c"] +RUN git clone -b "${SRC_GIT_BRANCH}" --single-branch --depth=1 https://github.com/google/or-tools \ +&& [[ $(cd or-tools && git rev-parse --verify HEAD) == ${SRC_GIT_SHA1} ]] +WORKDIR /root/or-tools + +# C++ +## build +FROM devel AS cpp_build +RUN make detect_cpp \ +&& make cpp JOBS=8 +## archive +FROM cpp_build AS cpp_archive +RUN make archive_cpp + +# .Net +## build +FROM cpp_build AS dotnet_build +ENV USE_DOTNET_CORE_31=ON +RUN make detect_dotnet \ +&& make dotnet JOBS=8 +## archive +FROM dotnet_build AS dotnet_archive +RUN make archive_dotnet + +# Java +## build +FROM cpp_build AS java_build +RUN make detect_java \ +&& make java JOBS=8 +## archive +FROM java_build AS java_archive +RUN make archive_java + +# Python +## build +FROM cpp_build AS python_build +RUN make detect_python \ +&& make python JOBS=8 +## archive +FROM python_build AS python_archive +RUN make archive_python diff --git a/tools/docker/images/debian-13.Dockerfile b/tools/docker/images/debian-13.Dockerfile new file mode 100644 index 0000000000..cd2dfdcd4e --- /dev/null +++ b/tools/docker/images/debian-13.Dockerfile @@ -0,0 +1,114 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:13 AS env + +############# +## SETUP ## +############# +RUN apt-get update -qq \ +&& apt-get install -qq \ + git pkg-config wget make autoconf libtool zlib1g-dev gawk g++ curl subversion \ + swig lsb-release \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENTRYPOINT ["/bin/bash", "-c"] +CMD ["/bin/bash"] + +# Install CMake 3.28.3 +RUN ARCH=$(uname -m) \ +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh + +# Install .Net +# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-11- +RUN apt-get update -qq \ +&& apt-get install -qq gpg apt-transport-https \ +&& wget -q "https://packages.microsoft.com/config/debian/13/packages-microsoft-prod.deb" -O packages-microsoft-prod.deb \ +&& dpkg -i packages-microsoft-prod.deb \ +&& rm packages-microsoft-prod.deb \ +&& apt-get update -qq \ +&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +# Trigger first run experience by running arbitrary cmd +RUN dotnet --info + +# Java Install +RUN apt-get update -qq \ +&& apt-get install -qq default-jdk maven \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENV JAVA_HOME=/usr/lib/jvm/default-java + +# Install Python +RUN apt-get update -qq \ +&& apt-get install -qq python3 python3-dev python3-pip python3-venv \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +RUN python3 -m pip install absl-py mypy mypy-protobuf + +ENV TZ=America/Los_Angeles +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +################ +## OR-TOOLS ## +################ +FROM env AS devel +WORKDIR /root +# Copy the snk key +COPY or-tools.snk /root/or-tools.snk +ENV DOTNET_SNK=/root/or-tools.snk + +ARG SRC_GIT_BRANCH +ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ARG SRC_GIT_SHA1 +ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} + +ARG OR_TOOLS_PATCH +ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} + +# Download sources +# use SRC_GIT_SHA1 to modify the command +# i.e. avoid docker reusing the cache when new commit is pushed +SHELL ["/bin/bash", "-c"] +RUN git clone -b "${SRC_GIT_BRANCH}" --single-branch --depth=1 https://github.com/google/or-tools \ +&& [[ $(cd or-tools && git rev-parse --verify HEAD) == ${SRC_GIT_SHA1} ]] +WORKDIR /root/or-tools + +# C++ +## build +FROM devel AS cpp_build +RUN make detect_cpp \ +&& make cpp JOBS=8 +## archive +FROM cpp_build AS cpp_archive +RUN make archive_cpp + +# .Net +## build +FROM cpp_build AS dotnet_build +ENV USE_DOTNET_CORE_31=ON +RUN make detect_dotnet \ +&& make dotnet JOBS=8 +## archive +FROM dotnet_build AS dotnet_archive +RUN make archive_dotnet + +# Java +## build +FROM cpp_build AS java_build +RUN make detect_java \ +&& make java JOBS=8 +## archive +FROM java_build AS java_archive +RUN make archive_java + +# Python +## build +FROM cpp_build AS python_build +RUN make detect_python \ +&& make python JOBS=8 +## archive +FROM python_build AS python_archive +RUN make archive_python diff --git a/tools/docker/images/opensuse-leap.Dockerfile b/tools/docker/images/opensuse-leap.Dockerfile index f099edc6bd..61e91a0fbf 100644 --- a/tools/docker/images/opensuse-leap.Dockerfile +++ b/tools/docker/images/opensuse-leap.Dockerfile @@ -14,12 +14,12 @@ ENV CC=gcc-11 CXX=g++-11 ENTRYPOINT ["/usr/bin/bash", "-c"] CMD ["/usr/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh # Install SWIG RUN zypper refresh \ diff --git a/tools/docker/images/ubuntu-20.04.Dockerfile b/tools/docker/images/ubuntu-20.04.Dockerfile index 79c2a7940b..95aa1cdce3 100644 --- a/tools/docker/images/ubuntu-20.04.Dockerfile +++ b/tools/docker/images/ubuntu-20.04.Dockerfile @@ -13,12 +13,12 @@ RUN apt update -qq \ ENTRYPOINT ["/bin/bash", "-c"] CMD ["/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh # Install SWIG RUN apt-get update -qq \ diff --git a/tools/docker/minizinc-challenge.Dockerfile b/tools/docker/minizinc-challenge.Dockerfile index 6ba2856601..c521b0da1e 100644 --- a/tools/docker/minizinc-challenge.Dockerfile +++ b/tools/docker/minizinc-challenge.Dockerfile @@ -18,11 +18,11 @@ RUN apt update -qq \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ENV CC=gcc-11 CXX=g++-11 -# Install CMake v3.26.4 -RUN wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-x86_64.sh" \ -&& chmod a+x cmake-3.26.4-linux-x86_64.sh \ -&& ./cmake-3.26.4-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-x86_64.sh +# Install CMake 3.28.3 +RUN wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-x86_64.sh" \ +&& chmod a+x cmake-3.28.3-linux-x86_64.sh \ +&& ./cmake-3.28.3-linux-x86_64.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-x86_64.sh FROM env AS devel WORKDIR /root diff --git a/tools/docker/python/amd64/manylinux.Dockerfile b/tools/docker/python/amd64/manylinux.Dockerfile index 50b47cc799..ec2eb479eb 100644 --- a/tools/docker/python/amd64/manylinux.Dockerfile +++ b/tools/docker/python/amd64/manylinux.Dockerfile @@ -13,11 +13,11 @@ RUN yum -y update \ ENTRYPOINT ["/usr/bin/bash", "-c"] CMD ["/usr/bin/bash"] -# Install CMake v3.26.4 -RUN wget -q --no-check-certificate "https://cmake.org/files/v3.26/cmake-3.26.4-linux-x86_64.sh" \ -&& chmod a+x cmake-3.26.4-linux-x86_64.sh \ -&& ./cmake-3.26.4-linux-x86_64.sh --prefix=/usr --skip-license \ -&& rm cmake-3.26.4-linux-x86_64.sh +# Install CMake 3.28.3 +RUN wget -q --no-check-certificate "https://cmake.org/files/v3.28/cmake-3.28.3-linux-x86_64.sh" \ +&& chmod a+x cmake-3.28.3-linux-x86_64.sh \ +&& ./cmake-3.28.3-linux-x86_64.sh --prefix=/usr --skip-license \ +&& rm cmake-3.28.3-linux-x86_64.sh # Install Swig 4.1.1 RUN curl --location-trusted \ diff --git a/tools/docker/python/arm64v8/manylinux.Dockerfile b/tools/docker/python/arm64v8/manylinux.Dockerfile index b20a9c742f..10e51382ed 100644 --- a/tools/docker/python/arm64v8/manylinux.Dockerfile +++ b/tools/docker/python/arm64v8/manylinux.Dockerfile @@ -15,11 +15,11 @@ RUN yum -y update \ ENTRYPOINT ["/usr/bin/bash", "-c"] CMD ["/usr/bin/bash"] -# Install CMake v3.26.4 -RUN wget -q --no-check-certificate "https://cmake.org/files/v3.26/cmake-3.26.4-linux-aarch64.sh" \ -&& chmod a+x cmake-3.26.4-linux-aarch64.sh \ -&& ./cmake-3.26.4-linux-aarch64.sh --prefix=/usr --skip-license \ -&& rm cmake-3.26.4-linux-aarch64.sh +# Install CMake 3.28.3 +RUN wget -q --no-check-certificate "https://cmake.org/files/v3.28/cmake-3.28.3-linux-aarch64.sh" \ +&& chmod a+x cmake-3.28.3-linux-aarch64.sh \ +&& ./cmake-3.28.3-linux-aarch64.sh --prefix=/usr --skip-license \ +&& rm cmake-3.28.3-linux-aarch64.sh # Install Swig 4.1.1 RUN curl --location-trusted \ diff --git a/tools/docker/test/centos-7/cpp.Dockerfile b/tools/docker/test/centos-7/cpp.Dockerfile index 352ca8178b..9e8e090350 100644 --- a/tools/docker/test/centos-7/cpp.Dockerfile +++ b/tools/docker/test/centos-7/cpp.Dockerfile @@ -21,12 +21,12 @@ ENTRYPOINT ["/usr/bin/bash", "--login", "-c"] CMD ["/usr/bin/bash", "--login"] # RUN g++ --version -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh WORKDIR /root ADD or-tools_amd64_centos-7_cpp_v*.tar.gz . diff --git a/tools/docker/test/debian-10/cpp.Dockerfile b/tools/docker/test/debian-10/cpp.Dockerfile index 55d853a582..67583afb5b 100644 --- a/tools/docker/test/debian-10/cpp.Dockerfile +++ b/tools/docker/test/debian-10/cpp.Dockerfile @@ -8,12 +8,12 @@ RUN apt-get update \ ENTRYPOINT ["/bin/bash", "-c"] CMD ["/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh WORKDIR /root ADD or-tools_amd64_debian-10_cpp_v*.tar.gz . diff --git a/tools/docker/test/debian-10/python.Dockerfile b/tools/docker/test/debian-10/python.Dockerfile index cc3bbbd28d..97be42cc8b 100644 --- a/tools/docker/test/debian-10/python.Dockerfile +++ b/tools/docker/test/debian-10/python.Dockerfile @@ -8,12 +8,12 @@ RUN apt-get update \ ENTRYPOINT ["/bin/bash", "-c"] CMD ["/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh WORKDIR /root ADD or-tools_amd64_debian-10_python_v*.tar.gz . diff --git a/tools/docker/test/debian-11/cpp.Dockerfile b/tools/docker/test/debian-11/cpp.Dockerfile index d17571b399..41b44d6d80 100644 --- a/tools/docker/test/debian-11/cpp.Dockerfile +++ b/tools/docker/test/debian-11/cpp.Dockerfile @@ -8,12 +8,12 @@ RUN apt-get update \ ENTRYPOINT ["/bin/bash", "-c"] CMD ["/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh WORKDIR /root ADD or-tools_amd64_debian-11_cpp_v*.tar.gz . diff --git a/tools/docker/test/debian-11/python.Dockerfile b/tools/docker/test/debian-11/python.Dockerfile index 3fbabcdac6..689483dd6c 100644 --- a/tools/docker/test/debian-11/python.Dockerfile +++ b/tools/docker/test/debian-11/python.Dockerfile @@ -8,12 +8,12 @@ RUN apt-get update \ ENTRYPOINT ["/bin/bash", "-c"] CMD ["/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh WORKDIR /root ADD or-tools_amd64_debian-11_python_v*.tar.gz . diff --git a/tools/docker/test/debian-12/cpp.Dockerfile b/tools/docker/test/debian-12/cpp.Dockerfile new file mode 100644 index 0000000000..9225231842 --- /dev/null +++ b/tools/docker/test/debian-12/cpp.Dockerfile @@ -0,0 +1,21 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:12 + +RUN apt-get update \ +&& apt-get install -y -q wget build-essential zlib1g-dev \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENTRYPOINT ["/bin/bash", "-c"] +CMD ["/bin/bash"] + +# Install CMake 3.28.3 +RUN ARCH=$(uname -m) \ +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh + +WORKDIR /root +ADD or-tools_amd64_debian-12_cpp_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/debian-12/dotnet.Dockerfile b/tools/docker/test/debian-12/dotnet.Dockerfile new file mode 100644 index 0000000000..eb46d03985 --- /dev/null +++ b/tools/docker/test/debian-12/dotnet.Dockerfile @@ -0,0 +1,29 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:12 + +RUN apt-get update \ +&& apt-get install -y -q build-essential zlib1g-dev \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# Install .Net +# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-12- +RUN apt-get update -qq \ +&& apt-get install -qq gpg apt-transport-https \ +&& wget -q "https://packages.microsoft.com/config/debian/12/packages-microsoft-prod.deb" -O packages-microsoft-prod.deb \ +&& dpkg -i packages-microsoft-prod.deb \ +&& rm packages-microsoft-prod.deb \ +&& apt-get update -qq \ +&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +# Trigger first run experience by running arbitrary cmd +RUN dotnet --info + +#ENV TZ=America/Los_Angeles +#RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +WORKDIR /root +ADD or-tools_amd64_debian-12_dotnet_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/debian-12/java.Dockerfile b/tools/docker/test/debian-12/java.Dockerfile new file mode 100644 index 0000000000..2d8f90e755 --- /dev/null +++ b/tools/docker/test/debian-12/java.Dockerfile @@ -0,0 +1,16 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:12 + +RUN apt-get update \ +&& apt-get install -y -q build-essential zlib1g-dev default-jdk maven \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENV JAVA_HOME=/usr/lib/jvm/default-java + +#ENV TZ=America/Los_Angeles +#RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +WORKDIR /root +ADD or-tools_amd64_debian-12_java_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/debian-12/python.Dockerfile b/tools/docker/test/debian-12/python.Dockerfile new file mode 100644 index 0000000000..96410846d7 --- /dev/null +++ b/tools/docker/test/debian-12/python.Dockerfile @@ -0,0 +1,21 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:12 + +RUN apt-get update \ +&& apt-get install -y -q build-essential zlib1g-dev \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENTRYPOINT ["/bin/bash", "-c"] +CMD ["/bin/bash"] + +# Install CMake 3.28.3 +RUN ARCH=$(uname -m) \ +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh + +WORKDIR /root +ADD or-tools_amd64_debian-12_python_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/debian-13/cpp.Dockerfile b/tools/docker/test/debian-13/cpp.Dockerfile new file mode 100644 index 0000000000..1189e513a6 --- /dev/null +++ b/tools/docker/test/debian-13/cpp.Dockerfile @@ -0,0 +1,21 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:13 + +RUN apt-get update \ +&& apt-get install -y -q wget build-essential zlib1g-dev \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENTRYPOINT ["/bin/bash", "-c"] +CMD ["/bin/bash"] + +# Install CMake 3.28.3 +RUN ARCH=$(uname -m) \ +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh + +WORKDIR /root +ADD or-tools_amd64_debian-13_cpp_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/debian-13/dotnet.Dockerfile b/tools/docker/test/debian-13/dotnet.Dockerfile new file mode 100644 index 0000000000..35786df018 --- /dev/null +++ b/tools/docker/test/debian-13/dotnet.Dockerfile @@ -0,0 +1,29 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:13 + +RUN apt-get update \ +&& apt-get install -y -q build-essential zlib1g-dev \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# Install .Net +# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-13- +RUN apt-get update -qq \ +&& apt-get install -qq gpg apt-transport-https \ +&& wget -q "https://packages.microsoft.com/config/debian/13/packages-microsoft-prod.deb" -O packages-microsoft-prod.deb \ +&& dpkg -i packages-microsoft-prod.deb \ +&& rm packages-microsoft-prod.deb \ +&& apt-get update -qq \ +&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +# Trigger first run experience by running arbitrary cmd +RUN dotnet --info + +#ENV TZ=America/Los_Angeles +#RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +WORKDIR /root +ADD or-tools_amd64_debian-13_dotnet_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/debian-13/java.Dockerfile b/tools/docker/test/debian-13/java.Dockerfile new file mode 100644 index 0000000000..67f5c27df7 --- /dev/null +++ b/tools/docker/test/debian-13/java.Dockerfile @@ -0,0 +1,16 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:13 + +RUN apt-get update \ +&& apt-get install -y -q build-essential zlib1g-dev default-jdk maven \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENV JAVA_HOME=/usr/lib/jvm/default-java + +#ENV TZ=America/Los_Angeles +#RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +WORKDIR /root +ADD or-tools_amd64_debian-13_java_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/debian-13/python.Dockerfile b/tools/docker/test/debian-13/python.Dockerfile new file mode 100644 index 0000000000..576f7ea073 --- /dev/null +++ b/tools/docker/test/debian-13/python.Dockerfile @@ -0,0 +1,21 @@ +# ref: https://hub.docker.com/_/debian +FROM debian:13 + +RUN apt-get update \ +&& apt-get install -y -q build-essential zlib1g-dev \ +&& apt-get clean \ +&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ENTRYPOINT ["/bin/bash", "-c"] +CMD ["/bin/bash"] + +# Install CMake 3.28.3 +RUN ARCH=$(uname -m) \ +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh + +WORKDIR /root +ADD or-tools_amd64_debian-13_python_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/opensuse-leap/cpp.Dockerfile b/tools/docker/test/opensuse-leap/cpp.Dockerfile index b8a6de69de..20ccb8be93 100644 --- a/tools/docker/test/opensuse-leap/cpp.Dockerfile +++ b/tools/docker/test/opensuse-leap/cpp.Dockerfile @@ -12,12 +12,12 @@ ENV CC=gcc-11 CXX=g++-11 ENTRYPOINT ["/usr/bin/bash", "-c"] CMD ["/usr/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh WORKDIR /root ADD or-tools_amd64_opensuse-leap_cpp_v*.tar.gz . diff --git a/tools/docker/test/ubuntu-20.04/cpp.Dockerfile b/tools/docker/test/ubuntu-20.04/cpp.Dockerfile index 6c0e97455d..cf3801efe1 100644 --- a/tools/docker/test/ubuntu-20.04/cpp.Dockerfile +++ b/tools/docker/test/ubuntu-20.04/cpp.Dockerfile @@ -9,12 +9,12 @@ RUN apt-get update \ ENTRYPOINT ["/bin/bash", "-c"] CMD ["/bin/bash"] -# Install CMake v3.26.4 +# Install CMake 3.28.3 RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.26/cmake-3.26.4-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.26.4-linux-${ARCH}.sh \ -&& ./cmake-3.26.4-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.26.4-linux-${ARCH}.sh +&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ +&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.28.3-linux-${ARCH}.sh WORKDIR /root ADD or-tools_amd64_ubuntu-20.04_cpp_v*.tar.gz . diff --git a/tools/release/amd64.Dockerfile b/tools/release/amd64.Dockerfile index 995be6d789..d87b894a99 100644 --- a/tools/release/amd64.Dockerfile +++ b/tools/release/amd64.Dockerfile @@ -15,11 +15,11 @@ RUN yum -y update \ ENTRYPOINT ["/usr/bin/bash", "-c"] CMD ["/usr/bin/bash"] -# Install CMake v3.26.4 -RUN wget -q --no-check-certificate "https://cmake.org/files/v3.26/cmake-3.26.4-linux-x86_64.sh" \ -&& chmod a+x cmake-3.26.4-linux-x86_64.sh \ -&& ./cmake-3.26.4-linux-x86_64.sh --prefix=/usr --skip-license \ -&& rm cmake-3.26.4-linux-x86_64.sh +# Install CMake 3.28.3 +RUN wget -q --no-check-certificate "https://cmake.org/files/v3.28/cmake-3.28.3-linux-x86_64.sh" \ +&& chmod a+x cmake-3.28.3-linux-x86_64.sh \ +&& ./cmake-3.28.3-linux-x86_64.sh --prefix=/usr --skip-license \ +&& rm cmake-3.28.3-linux-x86_64.sh # Install Swig 4.1.1 RUN curl --location-trusted \ diff --git a/tools/release/arm64.Dockerfile b/tools/release/arm64.Dockerfile index 11c8d725db..3ecf2e3007 100644 --- a/tools/release/arm64.Dockerfile +++ b/tools/release/arm64.Dockerfile @@ -22,11 +22,11 @@ RUN dnf -y update \ ENTRYPOINT ["/usr/bin/bash", "-c"] CMD ["/usr/bin/bash"] -# Install CMake v3.26.4 -RUN wget -q --no-check-certificate "https://cmake.org/files/v3.26/cmake-3.26.4-linux-aarch64.sh" \ -&& chmod a+x cmake-3.26.4-linux-aarch64.sh \ -&& ./cmake-3.26.4-linux-aarch64.sh --prefix=/usr --skip-license \ -&& rm cmake-3.26.4-linux-aarch64.sh +# Install CMake 3.28.3 +RUN wget -q --no-check-certificate "https://cmake.org/files/v3.28/cmake-3.28.3-linux-aarch64.sh" \ +&& chmod a+x cmake-3.28.3-linux-aarch64.sh \ +&& ./cmake-3.28.3-linux-aarch64.sh --prefix=/usr --skip-license \ +&& rm cmake-3.28.3-linux-aarch64.sh # Install Swig 4.1.1 RUN curl --location-trusted \ From c9b1ad998af90251169d357df92bd9009b2b57bc Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 11:54:18 +0100 Subject: [PATCH 018/392] backport linear_solver, math_opt, pdlp and util from main --- ortools/gurobi/isv_public/gurobi_isv.cc | 13 +- ortools/linear_solver/BUILD.bazel | 1 + ortools/linear_solver/glop_interface.cc | 12 ++ ortools/linear_solver/gurobi_interface.cc | 43 +++-- ortools/linear_solver/highs_interface.cc | 37 ++--- ortools/linear_solver/linear_solver.cc | 63 ++++---- ortools/linear_solver/linear_solver.h | 39 +++-- ortools/linear_solver/model_validator.cc | 56 ++++--- ortools/linear_solver/model_validator.h | 10 +- ortools/linear_solver/pdlp_interface.cc | 34 ++-- .../linear_solver/proto_solver/BUILD.bazel | 3 + .../proto_solver/glop_proto_solver.cc | 75 +++++---- .../proto_solver/glop_proto_solver.h | 4 +- .../proto_solver/gurobi_proto_solver.cc | 23 +-- .../proto_solver/gurobi_proto_solver.h | 5 +- .../proto_solver/highs_proto_solver.cc | 4 +- .../proto_solver/highs_proto_solver.h | 4 +- .../proto_solver/pdlp_proto_solver.cc | 42 +++-- .../proto_solver/pdlp_proto_solver.h | 7 +- .../linear_solver/proto_solver/proto_utils.h | 23 +++ .../proto_solver/sat_proto_solver.cc | 68 ++++---- .../proto_solver/sat_proto_solver.h | 4 +- .../proto_solver/scip_proto_solver.cc | 21 +-- .../proto_solver/scip_proto_solver.h | 3 +- .../proto_solver/xpress_proto_solver.cc | 3 +- .../proto_solver/xpress_proto_solver.h | 6 +- .../python/linear_solver_natural_api.py | 20 ++- ortools/linear_solver/samples/BUILD.bazel | 2 + ortools/linear_solver/samples/BasicExample.cs | 36 ++++- .../linear_solver/samples/BasicExample.java | 36 ++++- .../linear_solver/samples/basic_example.cc | 47 +++++- .../linear_solver/samples/basic_example.py | 47 ++++-- .../linear_solver/samples/code_samples.bzl | 5 + ortools/linear_solver/sat_interface.cc | 17 +- ortools/linear_solver/scip_interface.cc | 36 ++--- ortools/linear_solver/solve.cc | 24 ++- ortools/linear_solver/solve_mp_model.cc | 30 +++- ortools/linear_solver/solve_mp_model.h | 29 ++-- .../users_allowing_model_storage.cc | 78 +++++++++ .../users_allowing_model_storage.h | 28 ++++ ortools/linear_solver/wrappers/BUILD.bazel | 1 + .../wrappers/model_builder_helper.cc | 16 +- .../wrappers/model_builder_helper.h | 3 +- .../compute_infeasible_subsystem_result.py | 4 +- .../ip_model_solve_parameters_tests.cc | 37 +++-- .../solver_tests/second_order_cone_tests.cc | 2 +- ortools/math_opt/solvers/gscip_solver.cc | 2 +- ortools/math_opt/solvers/gurobi/g_gurobi.cc | 9 +- ortools/math_opt/solvers/gurobi/g_gurobi.h | 5 +- ortools/math_opt/testing/BUILD.bazel | 7 +- ortools/pdlp/solve_log.proto | 4 + ortools/pdlp/solvers.proto | 4 + ortools/routing/BUILD.bazel | 2 +- ortools/util/BUILD.bazel | 14 ++ ortools/util/csharp/proto.i | 32 +++- ortools/util/filelineiter.h | 2 +- ortools/util/fp_utils.cc | 6 +- ortools/util/java/functions.i | 2 +- ortools/util/java/proto.i | 23 ++- ortools/util/lazy_mutable_copy.h | 72 +++++++-- ortools/util/solve_interrupter.cc | 102 ++++++++++++ ortools/util/solve_interrupter.h | 149 ++++++++++++++++++ 62 files changed, 1141 insertions(+), 395 deletions(-) create mode 100644 ortools/linear_solver/users_allowing_model_storage.cc create mode 100644 ortools/linear_solver/users_allowing_model_storage.h create mode 100644 ortools/util/solve_interrupter.cc create mode 100644 ortools/util/solve_interrupter.h diff --git a/ortools/gurobi/isv_public/gurobi_isv.cc b/ortools/gurobi/isv_public/gurobi_isv.cc index 6a70b86648..88364c7cd7 100644 --- a/ortools/gurobi/isv_public/gurobi_isv.cc +++ b/ortools/gurobi/isv_public/gurobi_isv.cc @@ -50,9 +50,16 @@ absl::StatusOr NewPrimaryEnvFromISVKey(const GurobiIsvKey& isv_key) { << "): " << GRBgeterrormsg(primary_env); }; RETURN_IF_ERROR(handle_failure(GRBemptyenv(&primary_env), "GRBemptyenv()")); + // We want to turn off logging before setting the ISV key so that it doesn't + // leak. We store the original logging state, and reset it at the end. + int original_output_flag; + RETURN_IF_ERROR( + handle_failure(GRBgetintparam(primary_env, GRB_INT_PAR_OUTPUTFLAG, + &original_output_flag), + "getting original GRB_INT_PAR_OUTPUTFLAG value")); RETURN_IF_ERROR( handle_failure(GRBsetintparam(primary_env, GRB_INT_PAR_OUTPUTFLAG, 0), - "setting GRB_INT_PAR_OUTPUTFLAG")); + "turning off GRB_INT_PAR_OUTPUTFLAG")); RETURN_IF_ERROR(handle_failure( GRBsetstrparam(primary_env, "GURO_PAR_ISVNAME", isv_key.name.c_str()), "setting GURO_PAR_ISVNAME")); @@ -70,6 +77,10 @@ absl::StatusOr NewPrimaryEnvFromISVKey(const GurobiIsvKey& isv_key) { GRBsetstrparam(primary_env, "GURO_PAR_ISVKEY", isv_key.key.c_str()), "setting GURO_PAR_ISVKEY")); RETURN_IF_ERROR(handle_failure(GRBstartenv(primary_env), "GRBstartenv()")); + // Reset output flag to its original value. + RETURN_IF_ERROR(handle_failure( + GRBsetintparam(primary_env, GRB_INT_PAR_OUTPUTFLAG, original_output_flag), + "resetting GRB_INT_PAR_OUTPUTFLAG")); // Environment initialization succeeded, we don't want to free it upon exiting // this function. std::move(primary_env_cleanup).Cancel(); diff --git a/ortools/linear_solver/BUILD.bazel b/ortools/linear_solver/BUILD.bazel index 5998d21f0c..b05f16c931 100644 --- a/ortools/linear_solver/BUILD.bazel +++ b/ortools/linear_solver/BUILD.bazel @@ -413,5 +413,6 @@ cc_library( deps = [ ":linear_solver", ":linear_solver_cc_proto", + "//ortools/util:solve_interrupter", ], ) diff --git a/ortools/linear_solver/glop_interface.cc b/ortools/linear_solver/glop_interface.cc index 0a34d3a47d..0b45c2dd31 100644 --- a/ortools/linear_solver/glop_interface.cc +++ b/ortools/linear_solver/glop_interface.cc @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "absl/base/attributes.h" @@ -26,9 +27,11 @@ #include "ortools/glop/parameters.pb.h" #include "ortools/linear_solver/glop_utils.h" #include "ortools/linear_solver/linear_solver.h" +#include "ortools/linear_solver/proto_solver/glop_proto_solver.h" #include "ortools/lp_data/lp_data.h" #include "ortools/lp_data/lp_types.h" #include "ortools/port/proto_utils.h" +#include "ortools/util/lazy_mutable_copy.h" #include "ortools/util/time_limit.h" namespace operations_research { @@ -43,6 +46,15 @@ class GLOPInterface : public MPSolverInterface { MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; bool InterruptSolve() override; + // ----- Directly solve proto is supported --- + bool SupportsDirectlySolveProto(std::atomic* interrupt) const override { + return true; + } + MPSolutionResponse DirectlySolveProto(LazyMutableCopy request, + std::atomic* interrupt) override { + return GlopSolveProto(std::move(request), interrupt); + } + // ----- Model modifications and extraction ----- void Reset() override; void SetOptimizationDirection(bool maximize) override; diff --git a/ortools/linear_solver/gurobi_interface.cc b/ortools/linear_solver/gurobi_interface.cc index 5b787a0ae8..13600d440e 100644 --- a/ortools/linear_solver/gurobi_interface.cc +++ b/ortools/linear_solver/gurobi_interface.cc @@ -58,6 +58,7 @@ #include "absl/base/attributes.h" #include "absl/container/flat_hash_set.h" +#include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/match.h" #include "absl/strings/str_format.h" @@ -70,6 +71,8 @@ #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver_callback.h" #include "ortools/linear_solver/proto_solver/gurobi_proto_solver.h" +#include "ortools/linear_solver/proto_solver/proto_utils.h" +#include "ortools/util/lazy_mutable_copy.h" #include "ortools/util/time_limit.h" ABSL_FLAG(int, num_gurobi_threads, 0, @@ -89,8 +92,22 @@ class GurobiInterface : public MPSolverInterface { // ----- Solve ----- // Solves the problem using the parameter values specified. MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; - std::optional DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) override; + + // ----- Directly solve proto is supported without interrupt --- + bool SupportsDirectlySolveProto(std::atomic* interrupt) const override { + return interrupt == nullptr; + } + MPSolutionResponse DirectlySolveProto(LazyMutableCopy request, + std::atomic* interrupt) override { + DCHECK_EQ(interrupt, nullptr); + const bool log_error = request->enable_internal_solver_output(); + + // Here we reuse the Gurobi environment to support single-use license that + // forbids creating a second environment if one already exists. + return ConvertStatusOrMPSolutionResponse( + log_error, GurobiSolveProto(std::move(request), global_env_)); + } + // Writes the model. void Write(const std::string& filename) override; @@ -1333,28 +1350,6 @@ MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters& param) { return result_status_; } -std::optional GurobiInterface::DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) { - // Interruption via atomic is not directly supported by Gurobi. - if (interrupt != nullptr) return std::nullopt; - - // Here we reuse the Gurobi environment to support single-use license that - // forbids creating a second environment if one already exists. - const auto status_or = GurobiSolveProto(request, global_env_); - if (status_or.ok()) return status_or.value(); - // Special case: if something is not implemented yet, fall back to solving - // through MPSolver. - if (absl::IsUnimplemented(status_or.status())) return std::nullopt; - - if (request.enable_internal_solver_output()) { - LOG(INFO) << "Invalid Gurobi status: " << status_or.status(); - } - MPSolutionResponse response; - response.set_status(MPSOLVER_NOT_SOLVED); - response.set_status_str(status_or.status().ToString()); - return response; -} - bool GurobiInterface::NextSolution() { // Next solution only supported for MIP if (!mip_) return false; diff --git a/ortools/linear_solver/highs_interface.cc b/ortools/linear_solver/highs_interface.cc index 5e21908564..240733052d 100644 --- a/ortools/linear_solver/highs_interface.cc +++ b/ortools/linear_solver/highs_interface.cc @@ -21,6 +21,7 @@ #include #include "absl/base/attributes.h" +#include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" @@ -30,7 +31,9 @@ #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/proto_solver/highs_proto_solver.h" +#include "ortools/linear_solver/proto_solver/proto_utils.h" #include "ortools/port/proto_utils.h" +#include "ortools/util/lazy_mutable_copy.h" namespace operations_research { @@ -41,8 +44,18 @@ class HighsInterface : public MPSolverInterface { // ----- Solve ----- MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; - std::optional DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) override; + + // ----- Directly solve proto is supported without interrupt --- + bool SupportsDirectlySolveProto(std::atomic* interrupt) const override { + return interrupt == nullptr; + } + MPSolutionResponse DirectlySolveProto(LazyMutableCopy request, + std::atomic* interrupt) override { + DCHECK_EQ(interrupt, nullptr); + const bool log_error = request->enable_internal_solver_output(); + return ConvertStatusOrMPSolutionResponse( + log_error, HighsSolveProto(std::move(request))); + } // ----- Model modifications and extraction ----- void Reset() override; @@ -150,10 +163,6 @@ MPSolver::ResultStatus HighsInterface::Solve(const MPSolverParameters& param) { sync_status_ = SOLUTION_SYNCHRONIZED; result_status_ = static_cast(response->status()); LOG_IF(DFATAL, !response->has_solver_specific_info()) << *response; - // if (!solve_log_.ParseFromString(response->solver_specific_info())) { - // LOG(DFATAL) << "Unable to parse Highs's SolveLog from - // solver_specific_info"; - // } if (response->status() == MPSOLVER_FEASIBLE || response->status() == MPSOLVER_OPTIMAL) { @@ -166,22 +175,6 @@ MPSolver::ResultStatus HighsInterface::Solve(const MPSolverParameters& param) { return result_status_; } -std::optional HighsInterface::DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) { - if (interrupt) return std::nullopt; - absl::StatusOr response = HighsSolveProto(request); - - if (!response.ok()) { - LOG(ERROR) << "Unexpected error solving with Highs: " << response.status(); - MPSolutionResponse error_response; - error_response.set_status(MPSolverResponseStatus::MPSOLVER_ABNORMAL); - error_response.set_status_str(response.status().ToString()); - return error_response; - } - - return *response; -} - void HighsInterface::Reset() { ResetExtractionInformation(); } void HighsInterface::SetOptimizationDirection(bool maximize) { diff --git a/ortools/linear_solver/linear_solver.cc b/ortools/linear_solver/linear_solver.cc index d9c5b8f9cc..7dceb5dcf2 100644 --- a/ortools/linear_solver/linear_solver.cc +++ b/ortools/linear_solver/linear_solver.cc @@ -1006,16 +1006,24 @@ void AppendStatusStr(const std::string& msg, MPSolutionResponse* response) { absl::StrCat(response->status_str(), (response->status_str().empty() ? "" : "\n"), msg)); } + } // namespace // static void MPSolver::SolveWithProto(const MPModelRequest& model_request, MPSolutionResponse* response, std::atomic* interrupt) { + return SolveLazyMutableRequest(model_request, response, interrupt); +} + +// static +void MPSolver::SolveLazyMutableRequest(LazyMutableCopy request, + MPSolutionResponse* response, + std::atomic* interrupt) { CHECK(response != nullptr); if (interrupt != nullptr && - !SolverTypeSupportsInterruption(model_request.solver_type())) { + !SolverTypeSupportsInterruption(request->solver_type())) { response->set_status(MPSOLVER_INCOMPATIBLE_OPTIONS); response->set_status_str( "Called MPSolver::SolveWithProto with an underlying solver that " @@ -1023,57 +1031,51 @@ void MPSolver::SolveWithProto(const MPModelRequest& model_request, return; } - MPSolver solver(model_request.model().name(), - static_cast( - model_request.solver_type())); - if (model_request.enable_internal_solver_output()) { + MPSolver solver( + request->model().name(), + static_cast(request->solver_type())); + if (request->enable_internal_solver_output()) { solver.EnableOutput(); std::cout << "MPModelRequest info:\n" - << GetMPModelRequestLoggingInfo(model_request) << std::endl; + << GetMPModelRequestLoggingInfo(*request) << std::endl; } - // If interruption support is not required, we don't need access to the - // underlying solver and can solve it directly if the interface supports it. - auto optional_response = - solver.interface_->DirectlySolveProto(model_request, interrupt); - if (optional_response) { - *response = std::move(optional_response).value(); + // If the solver supports it, we can std::move() the request since we will + // return right after this in all cases. + if (solver.interface_->SupportsDirectlySolveProto(interrupt)) { + *response = + solver.interface_->DirectlySolveProto(std::move(request), interrupt); return; } + // Validate and extract model delta. Also deal with trivial problems. const std::optional> optional_model = - ExtractValidMPModelOrPopulateResponseStatus(model_request, response); - if (!optional_model) { - LOG_IF(WARNING, model_request.enable_internal_solver_output()) - << "Failed to extract a valid model from protocol buffer. Status: " - << ProtoEnumToString(response->status()) << " (" - << response->status() << "): " << response->status_str(); - return; - } + GetMPModelOrPopulateResponse(request, response); + if (!optional_model) return; + std::string error_message; response->set_status(solver.LoadModelFromProtoInternal( - optional_model->get(), /*name_policy=*/DEFAULT_CLEAR_NAMES, + **optional_model, /*name_policy=*/DEFAULT_CLEAR_NAMES, /*check_model_validity=*/false, &error_message)); // Even though we don't re-check model validity here, there can be some // problems found by LoadModelFromProto, eg. unsupported features. if (response->status() != MPSOLVER_MODEL_IS_VALID) { response->set_status_str(error_message); - LOG_IF(WARNING, model_request.enable_internal_solver_output()) + LOG_IF(WARNING, request->enable_internal_solver_output()) << "LoadModelFromProtoInternal() failed even though the model was " << "valid! Status: " << ProtoEnumToString(response->status()) << " (" << response->status() << "); Error: " << error_message; return; } - if (model_request.has_solver_time_limit_seconds()) { - solver.SetTimeLimit( - absl::Seconds(model_request.solver_time_limit_seconds())); + if (request->has_solver_time_limit_seconds()) { + solver.SetTimeLimit(absl::Seconds(request->solver_time_limit_seconds())); } std::string warning_message; - if (model_request.has_solver_specific_parameters()) { + if (request->has_solver_specific_parameters()) { if (!solver.SetSolverSpecificParametersAsString( - model_request.solver_specific_parameters())) { - if (model_request.ignore_solver_specific_parameters_failure()) { + request->solver_specific_parameters())) { + if (request->ignore_solver_specific_parameters_failure()) { // We'll add a warning message in status_str after the solve. warning_message = "Warning: the solver specific parameters were not successfully " @@ -1097,8 +1099,7 @@ void MPSolver::SolveWithProto(const MPModelRequest& model_request, { absl::Notification solve_finished; auto polling_func = [&interrupt, &solve_finished, &solver, - &interrupted_by_user, &interrupt_time, - &model_request]() { + &interrupted_by_user, &interrupt_time, &request]() { constexpr absl::Duration kPollDelay = absl::Microseconds(100); constexpr absl::Duration kMaxInterruptionDelay = absl::Seconds(10); @@ -1141,7 +1142,7 @@ void MPSolver::SolveWithProto(const MPModelRequest& model_request, "underlying solver, despite repeated calls over at least " << absl::FormatDuration(kMaxInterruptionDelay) << ". Solver type used: " - << MPModelRequest_SolverType_Name(model_request.solver_type()); + << MPModelRequest_SolverType_Name(request->solver_type()); // Note that in opt builds, the polling thread terminates here with an // error message, but we let Solve() finish, ignoring the user diff --git a/ortools/linear_solver/linear_solver.h b/ortools/linear_solver/linear_solver.h index f75c92e465..dcb023ca57 100644 --- a/ortools/linear_solver/linear_solver.h +++ b/ortools/linear_solver/linear_solver.h @@ -164,6 +164,7 @@ #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/linear_solver_callback.h" #include "ortools/port/proto_utils.h" +#include "ortools/util/lazy_mutable_copy.h" ABSL_DECLARE_FLAG(bool, linear_solver_enable_verbose_output); ABSL_DECLARE_FLAG(bool, log_verification_errors); @@ -572,6 +573,9 @@ class MPSolver { * other solver type immediately returns an MPSOLVER_INCOMPATIBLE_OPTIONS * error. * + * `interrupt` is non-const because the internal solver may set it to true + * itself, in some cases. + * * Note(user): This attempts to first use `DirectlySolveProto()` (if * implemented). Consequently, this most likely does *not* override any of * the default parameters of the underlying solver. This behavior *differs* @@ -581,8 +585,20 @@ class MPSolver { ABSL_DEPRECATED("Prefer SolveMPModel() from solve_mp_model.h.") static void SolveWithProto(const MPModelRequest& model_request, MPSolutionResponse* response, - // `interrupt` is non-const because the internal - // solver may set it to true itself, in some cases. + std::atomic* interrupt = nullptr); + + /** + * This version support both `const MPModelRequest&` and `MPModelRequest&&` + * for the request. When using the second form, it will try to delete the + * request as soon as it is translated to the solver internal representation. + * This saves peak memory usage. + * + * Note that we need a different name and can't just accept MPModelRequest&& + * otherwise we have swig issues. + */ + ABSL_DEPRECATED("Prefer SolveMPModel() from solve_mp_model.h.") + static void SolveLazyMutableRequest(LazyMutableCopy request, + MPSolutionResponse* response, std::atomic* interrupt = nullptr); ABSL_DEPRECATED( @@ -1646,18 +1662,23 @@ class MPSolverInterface { // solution is optimal. virtual MPSolver::ResultStatus Solve(const MPSolverParameters& param) = 0; - // Attempts to directly solve a MPModelRequest, bypassing the MPSolver data + // DirectlySolveProto() shall only be used if SupportsDirectlySolveProto() is + // true. + // + // DirectlySolveProto() solves a MPModelRequest, bypassing the MPSolver data // structures entirely. Like MPSolver::SolveWithProto(), optionally takes in // an 'interrupt' boolean. - // Returns {} (eg. absl::nullopt) if direct-solve is not supported by the - // underlying solver (possibly because interrupt != nullptr), in which case - // the user should fall back to using MPSolver. - virtual std::optional DirectlySolveProto( - const MPModelRequest& /*request*/, + virtual bool SupportsDirectlySolveProto( + std::atomic* /*interrupt*/) const { + return false; + } + virtual MPSolutionResponse DirectlySolveProto( + LazyMutableCopy /*request*/, // `interrupt` is non-const because the internal // solver may set it to true itself, in some cases. std::atomic* /*interrupt*/) { - return std::nullopt; + LOG(DFATAL) << "Default implementation should never be called."; + return MPSolutionResponse(); } // Writes the model using the solver internal write function. Currently only diff --git a/ortools/linear_solver/model_validator.cc b/ortools/linear_solver/model_validator.cc index 1d922fc80e..b851fd6af1 100644 --- a/ortools/linear_solver/model_validator.cc +++ b/ortools/linear_solver/model_validator.cc @@ -685,14 +685,20 @@ std::string FindErrorInMPModelProto( std::optional> ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest& request, MPSolutionResponse* response) { + LazyMutableCopy ref(request); + return GetMPModelOrPopulateResponse(ref, response); +} + +std::optional> GetMPModelOrPopulateResponse( + LazyMutableCopy& request, MPSolutionResponse* response) { CHECK(response != nullptr); - if (!request.has_model() && !request.has_model_delta()) { + if (!request->has_model() && !request->has_model_delta()) { response->set_status(MPSOLVER_OPTIMAL); response->set_status_str("Requests without model are considered OPTIMAL"); return std::nullopt; } - if (request.has_model() && request.has_model_delta()) { + if (request->has_model() && request->has_model_delta()) { response->set_status(MPSOLVER_MODEL_INVALID); response->set_status_str( "Fields 'model' and 'model_delta' are mutually exclusive"); @@ -700,13 +706,22 @@ ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest& request, } // Extract the baseline model. - LazyMutableCopy model(request.model()); - if (request.has_model_delta()) { + // Note that we move it out of the request if we have ownership. + LazyMutableCopy model = [&]() { + if (request.has_ownership()) { + return LazyMutableCopy( + std::move(*(request.get_mutable()->mutable_model()))); + } else { + return LazyMutableCopy(request->model()); + } + }(); + + if (request->has_model_delta()) { // NOTE(user): This library needs to be portable, so we can't include - // ortools/base/helpers.h; see ../port/file.h. + // file/base/helpers.h; see ../port/file.h. std::string contents; const absl::Status file_read_status = PortableFileGetContents( - request.model_delta().baseline_model_file_path(), &contents); + request->model_delta().baseline_model_file_path(), &contents); if (!file_read_status.ok()) { response->set_status(MPSOLVER_MODEL_INVALID); response->set_status_str( @@ -719,25 +734,25 @@ ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest& request, response->set_status_str( absl::StrFormat("The contents of baseline model file '%s' couldn't " "be parsed as a raw serialized MPModelProto", - request.model_delta().baseline_model_file_path())); + request->model_delta().baseline_model_file_path())); return std::nullopt; } } // Validate the baseline model. - std::string error = FindErrorInMPModelProto(model.get()); + std::string error = FindErrorInMPModelProto(*model); // If the baseline is valid and we have a model delta, validate the delta, // then apply it. - if (error.empty() && request.has_model_delta()) { - const MPModelDeltaProto& delta = request.model_delta(); - error = FindErrorInMPModelDeltaProto(delta, model.get()); + if (error.empty() && request->has_model_delta()) { + const MPModelDeltaProto& delta = request->model_delta(); + error = FindErrorInMPModelDeltaProto(delta, *model); if (error.empty()) ApplyVerifiedMPModelDelta(delta, model.get_mutable()); } // Deal with errors. if (!error.empty()) { - if (request.enable_internal_solver_output()) { + if (request->enable_internal_solver_output()) { LOG(ERROR) << absl::StrCat("Invalid model: ", error); } response->set_status(absl::StrContains(error, "Infeasible") @@ -747,10 +762,10 @@ ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest& request, return std::nullopt; } - if (model.get().variable_size() == 0 && model.get().constraint_size() == 0 && - model.get().general_constraint_size() == 0) { + if (model->variable_size() == 0 && model->constraint_size() == 0 && + model->general_constraint_size() == 0) { response->set_status(MPSOLVER_OPTIMAL); - response->set_objective_value(model.get().objective_offset()); + response->set_objective_value(model->objective_offset()); response->set_best_objective_bound(response->objective_value()); response->set_status_str( "Requests without variables and constraints are considered OPTIMAL"); @@ -760,17 +775,6 @@ ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest& request, return std::move(model); } -bool ExtractValidMPModelInPlaceOrPopulateResponseStatus( - MPModelRequest* request, MPSolutionResponse* response) { - std::optional> lazy_copy = - ExtractValidMPModelOrPopulateResponseStatus(*request, response); - if (!lazy_copy) return false; - if (lazy_copy->was_copied()) { - lazy_copy->get_mutable()->Swap(request->mutable_model()); - } - return true; -} - // TODO(user): Add a general FindFeasibilityErrorInSolution() and factor out the // common code. std::string FindFeasibilityErrorInSolutionHint(const MPModelProto& model, diff --git a/ortools/linear_solver/model_validator.h b/ortools/linear_solver/model_validator.h index e62d9bac6d..dfbff51a33 100644 --- a/ortools/linear_solver/model_validator.h +++ b/ortools/linear_solver/model_validator.h @@ -59,12 +59,12 @@ ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest& request, MPSolutionResponse* response); /** - * Like ExtractValidMPModelOrPopulateResponseStatus(), but works in-place: - * if the MPModel needed extraction, it will be populated in the request, and - * it returns the success boolean. + * Same as ExtractValidMPModelOrPopulateResponseStatus() but if we already + * have ownership of the request, do not do any copy even when needed. + * Note that the MPModelProto in the request will be cleared in this case. */ -bool ExtractValidMPModelInPlaceOrPopulateResponseStatus( - MPModelRequest* request, MPSolutionResponse* response); +std::optional> GetMPModelOrPopulateResponse( + LazyMutableCopy& request, MPSolutionResponse* response); /** * Returns an empty string if the solution hint given in the model is a feasible diff --git a/ortools/linear_solver/pdlp_interface.cc b/ortools/linear_solver/pdlp_interface.cc index e5c5fea2db..3d756c4176 100644 --- a/ortools/linear_solver/pdlp_interface.cc +++ b/ortools/linear_solver/pdlp_interface.cc @@ -25,14 +25,15 @@ #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/types/optional.h" -#include "google/protobuf/text_format.h" #include "ortools/base/logging.h" #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/proto_solver/pdlp_proto_solver.h" +#include "ortools/linear_solver/proto_solver/proto_utils.h" #include "ortools/pdlp/solve_log.pb.h" #include "ortools/pdlp/solvers.pb.h" #include "ortools/port/proto_utils.h" +#include "ortools/util/lazy_mutable_copy.h" namespace operations_research { @@ -43,8 +44,17 @@ class PdlpInterface : public MPSolverInterface { // ----- Solve ----- MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; - std::optional DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) override; + + bool SupportsDirectlySolveProto(std::atomic* interrupt) const override { + return true; + } + MPSolutionResponse DirectlySolveProto(LazyMutableCopy request, + std::atomic* interrupt) override { + const bool log_error = request->enable_internal_solver_output(); + return ConvertStatusOrMPSolutionResponse( + log_error, PdlpSolveProto(std::move(request), + /*relax_integer_variables=*/true, interrupt)); + } // ----- Model modifications and extraction ----- void Reset() override; @@ -143,7 +153,7 @@ MPSolver::ResultStatus PdlpInterface::Solve(const MPSolverParameters& param) { if (!google::protobuf::TextFormat::PrintToString( parameters_, request.mutable_solver_specific_parameters())) { LOG(QFATAL) << "Error converting parameters to text format: " - << parameters_.DebugString(); + << ProtobufDebugString(parameters_); } absl::StatusOr response = PdlpSolveProto( request, /*relax_integer_variables=*/true, &interrupt_solver_); @@ -180,22 +190,6 @@ MPSolver::ResultStatus PdlpInterface::Solve(const MPSolverParameters& param) { return result_status_; } -std::optional PdlpInterface::DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) { - absl::StatusOr response = - PdlpSolveProto(request, /*relax_integer_variables=*/true, interrupt); - - if (!response.ok()) { - LOG(ERROR) << "Unexpected error solving with PDLP: " << response.status(); - MPSolutionResponse error_response; - error_response.set_status(MPSolverResponseStatus::MPSOLVER_ABNORMAL); - error_response.set_status_str(response.status().ToString()); - return error_response; - } - - return *response; -} - void PdlpInterface::Reset() { ResetExtractionInformation(); } void PdlpInterface::SetOptimizationDirection(bool maximize) { diff --git a/ortools/linear_solver/proto_solver/BUILD.bazel b/ortools/linear_solver/proto_solver/BUILD.bazel index 6fc8c3e616..deaca746d4 100644 --- a/ortools/linear_solver/proto_solver/BUILD.bazel +++ b/ortools/linear_solver/proto_solver/BUILD.bazel @@ -39,6 +39,7 @@ cc_library( "//ortools/lp_data:base", "//ortools/lp_data:proto_utils", "//ortools/port:proto_utils", + "//ortools/util:lazy_mutable_copy", "//ortools/util:logging", "//ortools/util:time_limit", "@com_google_absl//absl/log", @@ -99,6 +100,7 @@ cc_library( "//ortools/sat:model", "//ortools/sat:parameters_validation", "//ortools/sat:sat_parameters_cc_proto", + "//ortools/util:lazy_mutable_copy", "//ortools/util:logging", "//ortools/util:time_limit", "@com_google_absl//absl/log", @@ -169,6 +171,7 @@ cc_library( "//ortools/linear_solver:linear_solver_cc_proto", "//ortools/linear_solver:model_validator", "//ortools/port:proto_utils", + "//ortools/util:lazy_mutable_copy", "@com_google_absl//absl/status:statusor", ], ) diff --git a/ortools/linear_solver/proto_solver/glop_proto_solver.cc b/ortools/linear_solver/proto_solver/glop_proto_solver.cc index 2ff34e9fb9..5fb474e054 100644 --- a/ortools/linear_solver/proto_solver/glop_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/glop_proto_solver.cc @@ -17,8 +17,10 @@ #include #include #include +#include #include #include +#include #include "absl/log/check.h" #include "absl/strings/str_cat.h" @@ -32,6 +34,7 @@ #include "ortools/lp_data/lp_types.h" #include "ortools/lp_data/proto_utils.h" #include "ortools/port/proto_utils.h" +#include "ortools/util/lazy_mutable_copy.h" #include "ortools/util/logging.h" #include "ortools/util/time_limit.h" @@ -41,8 +44,7 @@ namespace { MPSolutionResponse ModelInvalidResponse(SolverLogger& logger, std::string message) { - SOLVER_LOG(&logger, "Invalid model/parameters in glop_solve_proto.\n", - message); + SOLVER_LOG(&logger, "Invalid model in glop_solve_proto.\n", message); MPSolutionResponse response; response.set_status(MPSolverResponseStatus::MPSOLVER_MODEL_INVALID); @@ -50,6 +52,17 @@ MPSolutionResponse ModelInvalidResponse(SolverLogger& logger, return response; } +MPSolutionResponse ModelInvalidParametersResponse(SolverLogger& logger, + std::string message) { + SOLVER_LOG(&logger, "Invalid parameters in glop_solve_proto.\n", message); + + MPSolutionResponse response; + response.set_status( + MPSolverResponseStatus::MPSOLVER_MODEL_INVALID_SOLVER_PARAMETERS); + response.set_status_str(message); + return response; +} + MPSolverResponseStatus ToMPSolverResultStatus(glop::ProblemStatus s) { switch (s) { case glop::ProblemStatus::OPTIMAL: @@ -92,10 +105,10 @@ MPSolverResponseStatus ToMPSolverResultStatus(glop::ProblemStatus s) { } // namespace MPSolutionResponse GlopSolveProto( - MPModelRequest request, std::atomic* interrupt_solve, + LazyMutableCopy request, std::atomic* interrupt_solve, std::function logging_callback) { glop::GlopParameters params; - params.set_log_search_progress(request.enable_internal_solver_output()); + params.set_log_search_progress(request->enable_internal_solver_output()); // TODO(user): We do not support all the parameters here. In particular the // logs before the solver is called will not be appended to the response. Fix @@ -111,54 +124,56 @@ MPSolutionResponse GlopSolveProto( logger.SetLogToStdOut(params.log_to_stdout()); // Set it now so that it can be overwritten by the solver specific parameters. - if (request.has_solver_specific_parameters()) { + if (request->has_solver_specific_parameters()) { // See EncodeParametersAsString() documentation. if (!std::is_base_of::value) { - if (!params.MergeFromString(request.solver_specific_parameters())) { - return ModelInvalidResponse( + if (!params.MergeFromString(request->solver_specific_parameters())) { + return ModelInvalidParametersResponse( logger, "solver_specific_parameters is not a valid binary stream of the " "GLOPParameters proto"); } } else { if (!ProtobufTextFormatMergeFromString( - request.solver_specific_parameters(), ¶ms)) { - return ModelInvalidResponse( + request->solver_specific_parameters(), ¶ms)) { + return ModelInvalidParametersResponse( logger, "solver_specific_parameters is not a valid textual representation " "of the GlopParameters proto"); } } } - if (request.has_solver_time_limit_seconds()) { - params.set_max_time_in_seconds(request.solver_time_limit_seconds()); - } - - if (!request.model().general_constraint().empty()) { - return ModelInvalidResponse(logger, - "GLOP does not support general constraints"); - } - - // Model validation and delta handling. - MPSolutionResponse response; - if (!ExtractValidMPModelInPlaceOrPopulateResponseStatus(&request, - &response)) { - // Note that the ExtractValidMPModelInPlaceOrPopulateResponseStatus() can - // also close trivial model (empty or trivially infeasible). So this is not - // always the MODEL_INVALID status. - return response; + if (request->has_solver_time_limit_seconds()) { + params.set_max_time_in_seconds(request->solver_time_limit_seconds()); } { const std::string error = glop::ValidateParameters(params); if (!error.empty()) { - return ModelInvalidResponse( + return ModelInvalidParametersResponse( logger, absl::StrCat("Invalid Glop parameters: ", error)); } } + MPSolutionResponse response; glop::LinearProgram linear_program; - MPModelProtoToLinearProgram(request.model(), &linear_program); + + // Model validation and delta handling. + { + std::optional> optional_model = + GetMPModelOrPopulateResponse(request, &response); + if (!optional_model) return response; + + const MPModelProto& mp_model = **optional_model; + if (!mp_model.general_constraint().empty()) { + return ModelInvalidResponse(logger, + "GLOP does not support general constraints"); + } + + // Convert and clear the request and mp_model as it is no longer needed. + MPModelProtoToLinearProgram(mp_model, &linear_program); + std::move(request).dispose(); + } glop::LPSolver lp_solver; lp_solver.SetParameters(params); @@ -189,7 +204,7 @@ MPSolutionResponse GlopSolveProto( if (result_status == MPSOLVER_OPTIMAL || result_status == MPSOLVER_FEASIBLE) { response.set_objective_value(lp_solver.GetObjectiveValue()); - const int num_vars = request.model().variable_size(); + const int num_vars = linear_program.num_variables().value(); for (int var_id = 0; var_id < num_vars; ++var_id) { const glop::Fractional solution_value = lp_solver.variable_values()[glop::ColIndex(var_id)]; @@ -206,7 +221,7 @@ MPSolutionResponse GlopSolveProto( response.set_status(MPSOLVER_CANCELLED_BY_USER); } - const size_t num_constraints = request.model().constraint_size(); + const int num_constraints = linear_program.num_constraints().value(); for (int ct_id = 0; ct_id < num_constraints; ++ct_id) { const glop::Fractional dual_value = lp_solver.dual_values()[glop::RowIndex(ct_id)]; diff --git a/ortools/linear_solver/proto_solver/glop_proto_solver.h b/ortools/linear_solver/proto_solver/glop_proto_solver.h index 4efa110c84..f3704dc488 100644 --- a/ortools/linear_solver/proto_solver/glop_proto_solver.h +++ b/ortools/linear_solver/proto_solver/glop_proto_solver.h @@ -20,6 +20,7 @@ #include "ortools/glop/parameters.pb.h" #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/lazy_mutable_copy.h" namespace operations_research { @@ -44,7 +45,8 @@ namespace operations_research { // too unless log_to_stdout is set to false. The enable_internal_solver_output // in the request will act as the GLOP parameter log_search_progress. MPSolutionResponse GlopSolveProto( - MPModelRequest request, std::atomic* interrupt_solve = nullptr, + LazyMutableCopy request, + std::atomic* interrupt_solve = nullptr, std::function logging_callback = nullptr); // Returns a string that describes the version of the GLOP solver. diff --git a/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc b/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc index 8ff8e2e89c..eb31bbcf7b 100644 --- a/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc @@ -272,12 +272,12 @@ absl::Status SetSolverSpecificParameters(absl::string_view parameters, } absl::StatusOr GurobiSolveProto( - const MPModelRequest& request, GRBenv* gurobi_env) { + LazyMutableCopy request, GRBenv* gurobi_env) { MPSolutionResponse response; const absl::optional> optional_model = - ExtractValidMPModelOrPopulateResponseStatus(request, &response); + GetMPModelOrPopulateResponse(request, &response); if (!optional_model) return response; - const MPModelProto& model = optional_model->get(); + const MPModelProto& model = **optional_model; // We set `gurobi_env` to point to a new environment if no existing one is // provided. We must make sure that we free this environment when we exit this @@ -316,9 +316,9 @@ absl::StatusOr GurobiSolveProto( /*varnames=*/nullptr)); GRBenv* const model_env = GRBgetenv(gurobi_model); - if (request.has_solver_specific_parameters()) { + if (request->has_solver_specific_parameters()) { const auto parameters_status = SetSolverSpecificParameters( - request.solver_specific_parameters(), model_env); + request->solver_specific_parameters(), model_env); if (!parameters_status.ok()) { response.set_status(MPSOLVER_MODEL_INVALID_SOLVER_PARAMETERS); response.set_status_str( @@ -326,13 +326,14 @@ absl::StatusOr GurobiSolveProto( return response; } } - if (request.solver_time_limit_seconds() > 0) { - RETURN_IF_GUROBI_ERROR(GRBsetdblparam(model_env, GRB_DBL_PAR_TIMELIMIT, - request.solver_time_limit_seconds())); + if (request->solver_time_limit_seconds() > 0) { + RETURN_IF_GUROBI_ERROR( + GRBsetdblparam(model_env, GRB_DBL_PAR_TIMELIMIT, + request->solver_time_limit_seconds())); } RETURN_IF_GUROBI_ERROR( GRBsetintparam(model_env, GRB_INT_PAR_OUTPUTFLAG, - request.enable_internal_solver_output())); + request->enable_internal_solver_output())); const int variable_size = model.variable_size(); bool has_integer_variables = false; @@ -348,7 +349,7 @@ absl::StatusOr GurobiSolveProto( lb[v] = variable.lower_bound(); ub[v] = variable.upper_bound(); ctype[v] = variable.is_integer() && - request.solver_type() == + request->solver_type() == MPModelRequest::GUROBI_MIXED_INTEGER_PROGRAMMING ? GRB_INTEGER : GRB_CONTINUOUS; @@ -575,7 +576,7 @@ absl::StatusOr GurobiSolveProto( response.mutable_dual_value()->mutable_data())); } const int additional_solutions = std::min( - solution_count, std::min(request.populate_additional_solutions_up_to(), + solution_count, std::min(request->populate_additional_solutions_up_to(), std::numeric_limits::max() - 1) + 1); for (int i = 1; i < additional_solutions; ++i) { diff --git a/ortools/linear_solver/proto_solver/gurobi_proto_solver.h b/ortools/linear_solver/proto_solver/gurobi_proto_solver.h index be8c98fc70..c91301108f 100644 --- a/ortools/linear_solver/proto_solver/gurobi_proto_solver.h +++ b/ortools/linear_solver/proto_solver/gurobi_proto_solver.h @@ -21,6 +21,7 @@ #include "absl/strings/string_view.h" #include "ortools/gurobi/environment.h" #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/lazy_mutable_copy.h" namespace operations_research { @@ -34,11 +35,11 @@ namespace operations_research { // Please note though that the provided environment should not be actively used // by another thread at the same time. absl::StatusOr GurobiSolveProto( - const MPModelRequest& request, GRBenv* gurobi_env = nullptr); + LazyMutableCopy request, GRBenv* gurobi_env = nullptr); // Set parameters specified in the string. The format of the string is a series // of tokens separated by either '\n' or by ',' characters. -// Any token whose first character is a '#' or has zero length is skiped. +// Any token whose first character is a '#' or has zero length is skipped. // Comment tokens (i.e. those starting with #) can contain ',' characters. // Any other token has the form: // parameter_name(separator)value diff --git a/ortools/linear_solver/proto_solver/highs_proto_solver.cc b/ortools/linear_solver/proto_solver/highs_proto_solver.cc index f3119373d8..d00189abdf 100644 --- a/ortools/linear_solver/proto_solver/highs_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/highs_proto_solver.cc @@ -26,10 +26,12 @@ #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/model_validator.h" #include "ortools/port/proto_utils.h" +#include "ortools/util/lazy_mutable_copy.h" namespace operations_research { -absl::StatusOr HighsSolveProto(MPModelRequest request) { +absl::StatusOr HighsSolveProto( + LazyMutableCopy request) { return absl::UnimplementedError("Highs support is not yet implemented"); } diff --git a/ortools/linear_solver/proto_solver/highs_proto_solver.h b/ortools/linear_solver/proto_solver/highs_proto_solver.h index cf94d22e10..a9abbf965d 100644 --- a/ortools/linear_solver/proto_solver/highs_proto_solver.h +++ b/ortools/linear_solver/proto_solver/highs_proto_solver.h @@ -19,11 +19,13 @@ #include "absl/status/statusor.h" #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/lazy_mutable_copy.h" namespace operations_research { // Solve the input MIP model with the HIGHS solver. -absl::StatusOr HighsSolveProto(MPModelRequest request); +absl::StatusOr HighsSolveProto( + LazyMutableCopy request); } // namespace operations_research diff --git a/ortools/linear_solver/proto_solver/pdlp_proto_solver.cc b/ortools/linear_solver/proto_solver/pdlp_proto_solver.cc index 10f71fdff6..a14f7eab38 100644 --- a/ortools/linear_solver/proto_solver/pdlp_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/pdlp_proto_solver.cc @@ -34,53 +34,49 @@ namespace operations_research { absl::StatusOr PdlpSolveProto( - const MPModelRequest& request, const bool relax_integer_variables, + LazyMutableCopy request, const bool relax_integer_variables, const std::atomic* interrupt_solve) { pdlp::PrimalDualHybridGradientParams params; - if (request.enable_internal_solver_output()) { + if (request->enable_internal_solver_output()) { params.set_verbosity_level(3); } else { params.set_verbosity_level(0); } - MPSolutionResponse error_response; - if (!ProtobufTextFormatMergeFromString(request.solver_specific_parameters(), + MPSolutionResponse response; + if (!ProtobufTextFormatMergeFromString(request->solver_specific_parameters(), ¶ms)) { - error_response.set_status( + response.set_status( MPSolverResponseStatus::MPSOLVER_MODEL_INVALID_SOLVER_PARAMETERS); - return error_response; + return response; } if (interrupt_solve != nullptr && interrupt_solve->load() == true) { - error_response.set_status(MPSolverResponseStatus::MPSOLVER_NOT_SOLVED); - return error_response; + response.set_status(MPSolverResponseStatus::MPSOLVER_NOT_SOLVED); + return response; } - if (request.has_solver_time_limit_seconds()) { + if (request->has_solver_time_limit_seconds()) { params.mutable_termination_criteria()->set_time_sec_limit( - request.solver_time_limit_seconds()); + request->solver_time_limit_seconds()); } - const absl::optional> optional_model = - ExtractValidMPModelOrPopulateResponseStatus(request, &error_response); - if (!optional_model) { - LOG_IF(WARNING, request.enable_internal_solver_output()) - << "Failed to extract a valid model from protocol buffer. Status: " - << ProtoEnumToString(error_response.status()) - << " (" << error_response.status() - << "): " << error_response.status_str(); - return error_response; - } + std::optional> optional_model = + GetMPModelOrPopulateResponse(request, &response); + if (!optional_model) return response; ASSIGN_OR_RETURN( pdlp::QuadraticProgram qp, - pdlp::QpFromMpModelProto(optional_model->get(), relax_integer_variables)); - const double objective_scaling_factor = qp.objective_scaling_factor; + pdlp::QpFromMpModelProto(**optional_model, relax_integer_variables)); + // We can now clear the request and optional_model. + std::move(request).dispose(); + optional_model.reset(); + + const double objective_scaling_factor = qp.objective_scaling_factor; pdlp::SolverResult pdhg_result = pdlp::PrimalDualHybridGradient(std::move(qp), params, interrupt_solve); // PDLP's statuses don't map very cleanly to MPSolver statuses. Do the best // we can for now. - MPSolutionResponse response; switch (pdhg_result.solve_log.termination_reason()) { case pdlp::TERMINATION_REASON_OPTIMAL: response.set_status(MPSOLVER_OPTIMAL); diff --git a/ortools/linear_solver/proto_solver/pdlp_proto_solver.h b/ortools/linear_solver/proto_solver/pdlp_proto_solver.h index 2c30b55036..b2b0dd4357 100644 --- a/ortools/linear_solver/proto_solver/pdlp_proto_solver.h +++ b/ortools/linear_solver/proto_solver/pdlp_proto_solver.h @@ -18,6 +18,7 @@ #include "absl/status/statusor.h" #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/lazy_mutable_copy.h" namespace operations_research { @@ -25,6 +26,9 @@ namespace operations_research { // MPModelRequest. Users of this interface should be aware of the size // limitations of MPModelProto (see, e.g., large_linear_program.proto). // +// If possible, std::move the request into this function call so that its +// memory can be reclaimed early. +// // The optional interrupt_solve can be used to interrupt the solve early. The // solver will periodically check its value and stop if it holds true. // @@ -37,7 +41,8 @@ namespace operations_research { // pdlp::QuadraticProgram fails. The lack of an error does not imply success. // Check the SolveLog's termination_reason for more refined status details. absl::StatusOr PdlpSolveProto( - const MPModelRequest& request, bool relax_integer_variables = false, + LazyMutableCopy request, + bool relax_integer_variables = false, const std::atomic* interrupt_solve = nullptr); } // namespace operations_research diff --git a/ortools/linear_solver/proto_solver/proto_utils.h b/ortools/linear_solver/proto_solver/proto_utils.h index 9184e7c46f..5561634b73 100644 --- a/ortools/linear_solver/proto_solver/proto_utils.h +++ b/ortools/linear_solver/proto_solver/proto_utils.h @@ -16,9 +16,13 @@ #include #include +#include #include "absl/log/check.h" +#include "absl/status/statusor.h" #include "google/protobuf/message.h" +#include "ortools/base/logging.h" +#include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/port/proto_utils.h" namespace operations_research { @@ -29,6 +33,25 @@ using google::protobuf::Message; using google::protobuf::Message; #endif +// Some SolveWithProto() returns a StatusOr, this utility +// just convert bad absl::StatusOr to a proper error in MPModelResponse. +// +// TODO(user): All SolveWithProto() should just fill the appropriate response +// instead. +inline MPSolutionResponse ConvertStatusOrMPSolutionResponse( + bool log_error, absl::StatusOr response) { + if (!response.ok()) { + if (log_error) { + LOG(ERROR) << "Error status: " << response.status(); + } + MPSolutionResponse error_response; + error_response.set_status(MPSolverResponseStatus::MPSOLVER_ABNORMAL); + error_response.set_status_str(response.status().ToString()); + return error_response; + } + return std::move(response).value(); +} + // Returns a string that should be used in MPModelRequest's // solver_specific_parameters field to encode the glop parameters. // diff --git a/ortools/linear_solver/proto_solver/sat_proto_solver.cc b/ortools/linear_solver/proto_solver/sat_proto_solver.cc index c5c44fb71d..d95f218c8b 100644 --- a/ortools/linear_solver/proto_solver/sat_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/sat_proto_solver.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +45,7 @@ #include "ortools/sat/model.h" #include "ortools/sat/parameters_validation.h" #include "ortools/sat/sat_parameters.pb.h" +#include "ortools/util/lazy_mutable_copy.h" #include "ortools/util/logging.h" #include "ortools/util/time_limit.h" @@ -140,11 +142,11 @@ MPSolutionResponse InvalidParametersResponse(SolverLogger& logger, } // namespace MPSolutionResponse SatSolveProto( - MPModelRequest request, std::atomic* interrupt_solve, + LazyMutableCopy request, std::atomic* interrupt_solve, std::function logging_callback, std::function solution_callback) { sat::SatParameters params; - params.set_log_search_progress(request.enable_internal_solver_output()); + params.set_log_search_progress(request->enable_internal_solver_output()); // TODO(user): We do not support all the parameters here. In particular the // logs before the solver is called will not be appended to the response. Fix @@ -160,10 +162,10 @@ MPSolutionResponse SatSolveProto( logger.SetLogToStdOut(params.log_to_stdout()); // Set it now so that it can be overwritten by the solver specific parameters. - if (request.has_solver_specific_parameters()) { + if (request->has_solver_specific_parameters()) { // See EncodeSatParametersAsString() documentation. if constexpr (!std::is_base_of::value) { - if (!params.MergeFromString(request.solver_specific_parameters())) { + if (!params.MergeFromString(request->solver_specific_parameters())) { return InvalidParametersResponse( logger, "solver_specific_parameters is not a valid binary stream of the " @@ -171,7 +173,7 @@ MPSolutionResponse SatSolveProto( } } else { if (!ProtobufTextFormatMergeFromString( - request.solver_specific_parameters(), ¶ms)) { + request->solver_specific_parameters(), ¶ms)) { return InvalidParametersResponse( logger, "solver_specific_parameters is not a valid textual representation " @@ -195,14 +197,15 @@ MPSolutionResponse SatSolveProto( logger.EnableLogging(params.log_search_progress()); logger.SetLogToStdOut(params.log_to_stdout()); - if (request.has_solver_time_limit_seconds()) { - params.set_max_time_in_seconds(request.solver_time_limit_seconds()); + if (request->has_solver_time_limit_seconds()) { + params.set_max_time_in_seconds(request->solver_time_limit_seconds()); } // Model validation and delta handling. MPSolutionResponse response; - if (!ExtractValidMPModelInPlaceOrPopulateResponseStatus(&request, - &response)) { + std::optional> optional_model = + GetMPModelOrPopulateResponse(request, &response); + if (!optional_model) { // Note that the ExtractValidMPModelInPlaceOrPopulateResponseStatus() can // also close trivial model (empty or trivially infeasible). So this is not // always the MODEL_INVALID status. @@ -217,12 +220,20 @@ MPSolutionResponse SatSolveProto( return response; } + // We will presolve directly on the MPModelProto, so get a copy or transfer + // ownership from the LazyMutableCopy(). + std::unique_ptr mp_model = + std::move(optional_model).value().copy_or_move_as_unique_ptr(); + + // The request is no longer needed after this. + // Important: we need to copy the model above before clearing this. + std::move(request).dispose(); + // We start by some extra validation since our code do not accept any kind // of input. - MPModelProto* const mp_model = request.mutable_model(); if (params.mip_treat_high_magnitude_bounds_as_infinity()) { - sat::ChangeLargeBoundsToInfinity(params.mip_max_valid_magnitude(), mp_model, - &logger); + sat::ChangeLargeBoundsToInfinity(params.mip_max_valid_magnitude(), + mp_model.get(), &logger); } if (!sat::MPModelProtoValidationBeforeConversion(params, *mp_model, &logger)) { @@ -230,22 +241,23 @@ MPSolutionResponse SatSolveProto( } // This is good to do before any presolve. - if (!sat::MakeBoundsOfIntegerVariablesInteger(params, mp_model, &logger)) { + if (!sat::MakeBoundsOfIntegerVariablesInteger(params, mp_model.get(), + &logger)) { return InfeasibleResponse(logger, "An integer variable has an empty domain"); } // Coefficients really close to zero can cause issues. // We remove them right away according to our parameters. - RemoveNearZeroTerms(params, mp_model, &logger); + RemoveNearZeroTerms(params, mp_model.get(), &logger); // Note(user): the LP presolvers API is a bit weird and keep a reference to // the given GlopParameters, so we need to make sure it outlive them. const glop::GlopParameters glop_params; std::vector> for_postsolve; if (!params.enumerate_all_solutions() && params.mip_presolve_level() > 0) { - const glop::ProblemStatus status = - ApplyMipPresolveSteps(glop_params, mp_model, &for_postsolve, &logger); + const glop::ProblemStatus status = ApplyMipPresolveSteps( + glop_params, mp_model.get(), &for_postsolve, &logger); switch (status) { case glop::ProblemStatus::INIT: // Continue with the solve. @@ -276,7 +288,7 @@ MPSolutionResponse SatSolveProto( } // We need to do that before the automatic detection of integers. - RemoveNearZeroTerms(params, mp_model, &logger); + RemoveNearZeroTerms(params, mp_model.get(), &logger); SOLVER_LOG(&logger, ""); SOLVER_LOG(&logger, "Scaling to pure integer problem."); @@ -284,8 +296,9 @@ MPSolutionResponse SatSolveProto( const int num_variables = mp_model->variable_size(); std::vector var_scaling(num_variables, 1.0); if (params.mip_automatically_scale_variables()) { - var_scaling = sat::DetectImpliedIntegers(mp_model, &logger); - if (!sat::MakeBoundsOfIntegerVariablesInteger(params, mp_model, &logger)) { + var_scaling = sat::DetectImpliedIntegers(mp_model.get(), &logger); + if (!sat::MakeBoundsOfIntegerVariablesInteger(params, mp_model.get(), + &logger)) { return InfeasibleResponse( logger, "A detected integer variable has an empty domain"); } @@ -295,7 +308,7 @@ MPSolutionResponse SatSolveProto( ? std::numeric_limits::infinity() : params.mip_max_bound(); const std::vector other_scaling = sat::ScaleContinuousVariables( - params.mip_var_scaling(), max_bound, mp_model); + params.mip_var_scaling(), max_bound, mp_model.get()); for (int i = 0; i < var_scaling.size(); ++i) { var_scaling[i] *= other_scaling[i]; } @@ -330,18 +343,17 @@ MPSolutionResponse SatSolveProto( DCHECK_EQ(cp_model.variables().size(), mp_model->variable().size()); // Copy and scale the hint if there is one. - if (request.model().has_solution_hint()) { + if (mp_model->has_solution_hint()) { auto* cp_model_hint = cp_model.mutable_solution_hint(); - const int size = request.model().solution_hint().var_index().size(); + const int size = mp_model->solution_hint().var_index().size(); for (int i = 0; i < size; ++i) { - const int var = request.model().solution_hint().var_index(i); + const int var = mp_model->solution_hint().var_index(i); if (var >= var_scaling.size()) continue; // To handle weird hint input values, we cap any large value to +/- // mip_max_bound() which is also the min/max value of any variable once // scaled. - double value = - request.model().solution_hint().var_value(i) * var_scaling[var]; + double value = mp_model->solution_hint().var_value(i) * var_scaling[var]; if (std::abs(value) > params.mip_max_bound()) { value = value > 0 ? params.mip_max_bound() : -params.mip_max_bound(); } @@ -351,10 +363,11 @@ MPSolutionResponse SatSolveProto( } } - // We no longer need the request. Reclaim its memory. + // We no longer need the mp_model after this, reclaime its memory. const int old_num_variables = mp_model->variable().size(); const int old_num_constraints = mp_model->constraint().size(); - request.Clear(); + const bool is_maximize = mp_model->maximize(); + mp_model.reset(); // Configure model. sat::Model sat_model; @@ -438,7 +451,6 @@ MPSolutionResponse SatSolveProto( temp.set_objective_value(obj); *response.add_additional_solutions() = post_solve(temp); } - const bool is_maximize = request.model().maximize(); std::sort(response.mutable_additional_solutions()->pointer_begin(), response.mutable_additional_solutions()->pointer_end(), [is_maximize](const MPSolution* left, const MPSolution* right) { diff --git a/ortools/linear_solver/proto_solver/sat_proto_solver.h b/ortools/linear_solver/proto_solver/sat_proto_solver.h index a038d009b3..ca4a5c2dc1 100644 --- a/ortools/linear_solver/proto_solver/sat_proto_solver.h +++ b/ortools/linear_solver/proto_solver/sat_proto_solver.h @@ -19,6 +19,7 @@ #include #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/lazy_mutable_copy.h" #include "ortools/util/logging.h" namespace operations_research { @@ -49,7 +50,8 @@ namespace operations_research { // threads, but it will ensure that at most one thread executes // solution_callback at a time. MPSolutionResponse SatSolveProto( - MPModelRequest request, std::atomic* interrupt_solve = nullptr, + LazyMutableCopy request, + std::atomic* interrupt_solve = nullptr, std::function logging_callback = nullptr, std::function solution_callback = nullptr); diff --git a/ortools/linear_solver/proto_solver/scip_proto_solver.cc b/ortools/linear_solver/proto_solver/scip_proto_solver.cc index 587989349a..f07b419a58 100644 --- a/ortools/linear_solver/proto_solver/scip_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/scip_proto_solver.cc @@ -690,12 +690,13 @@ std::string FindErrorInMPModelForScip(const MPModelProto& model, SCIP* scip) { } absl::StatusOr ScipSolveProto( - const MPModelRequest& request) { + LazyMutableCopy request) { MPSolutionResponse response; const absl::optional> optional_model = - ExtractValidMPModelOrPopulateResponseStatus(request, &response); + GetMPModelOrPopulateResponse(request, &response); if (!optional_model) return response; - const MPModelProto& model = optional_model->get(); + const MPModelProto& model = **optional_model; + SCIP* scip = nullptr; std::vector scip_variables(model.variable_size(), nullptr); std::vector scip_constraints( @@ -734,7 +735,7 @@ absl::StatusOr ScipSolveProto( } const auto parameters_status = LegacyScipSetSolverSpecificParameters( - request.solver_specific_parameters(), scip); + request->solver_specific_parameters(), scip); if (!parameters_status.ok()) { response.set_status(MPSOLVER_MODEL_INVALID_SOLVER_PARAMETERS); response.set_status_str( @@ -749,12 +750,12 @@ absl::StatusOr ScipSolveProto( // running SCIP with time limit 10s each will both terminate after ~5s. RETURN_IF_SCIP_ERROR( SCIPsetIntParam(scip, "timing/clocktype", SCIP_CLOCKTYPE_WALL)); - if (request.solver_time_limit_seconds() > 0 && - request.solver_time_limit_seconds() < 1e20) { - RETURN_IF_SCIP_ERROR(SCIPsetRealParam(scip, "limits/time", - request.solver_time_limit_seconds())); + if (request->solver_time_limit_seconds() > 0 && + request->solver_time_limit_seconds() < 1e20) { + RETURN_IF_SCIP_ERROR(SCIPsetRealParam( + scip, "limits/time", request->solver_time_limit_seconds())); } - SCIPsetMessagehdlrQuiet(scip, !request.enable_internal_solver_output()); + SCIPsetMessagehdlrQuiet(scip, !request->enable_internal_solver_output()); RETURN_IF_SCIP_ERROR(SCIPcreateProbBasic(scip, model.name().c_str())); if (model.maximize()) { @@ -901,7 +902,7 @@ absl::StatusOr ScipSolveProto( const int solution_count = std::min(SCIPgetNSols(scip), - std::min(request.populate_additional_solutions_up_to(), + std::min(request->populate_additional_solutions_up_to(), std::numeric_limits::max() - 1) + 1); if (solution_count > 0) { diff --git a/ortools/linear_solver/proto_solver/scip_proto_solver.h b/ortools/linear_solver/proto_solver/scip_proto_solver.h index 17fae5edc5..50bcfc43a7 100644 --- a/ortools/linear_solver/proto_solver/scip_proto_solver.h +++ b/ortools/linear_solver/proto_solver/scip_proto_solver.h @@ -18,6 +18,7 @@ #include "absl/status/statusor.h" #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/lazy_mutable_copy.h" #include "scip/type_scip.h" namespace operations_research { @@ -27,7 +28,7 @@ namespace operations_research { // 1e-7, and the gap limit to 0.0001 (whereas SCIP defaults are 1e-6 and 0, // respectively, and they are being used here). absl::StatusOr ScipSolveProto( - const MPModelRequest& request); + LazyMutableCopy request); std::string FindErrorInMPModelForScip(const MPModelProto& model, SCIP* scip); diff --git a/ortools/linear_solver/proto_solver/xpress_proto_solver.cc b/ortools/linear_solver/proto_solver/xpress_proto_solver.cc index d56b770cc9..5071c68d9e 100644 --- a/ortools/linear_solver/proto_solver/xpress_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/xpress_proto_solver.cc @@ -280,7 +280,8 @@ namespace operations_research { // return absl::StrJoin(error_messages, "\n"); // } -MPSolutionResponse XPressSolveProto(const MPModelRequest& request) { +MPSolutionResponse XPressSolveProto( + LazyMutableCopy request) { MPSolutionResponse response; response.set_status(MPSolverResponseStatus::MPSOLVER_SOLVER_TYPE_UNAVAILABLE); diff --git a/ortools/linear_solver/proto_solver/xpress_proto_solver.h b/ortools/linear_solver/proto_solver/xpress_proto_solver.h index e3b339dab2..02b8b99b32 100644 --- a/ortools/linear_solver/proto_solver/xpress_proto_solver.h +++ b/ortools/linear_solver/proto_solver/xpress_proto_solver.h @@ -14,14 +14,14 @@ #ifndef OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_XPRESS_PROTO_SOLVER_H_ #define OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_XPRESS_PROTO_SOLVER_H_ -#include - #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/lazy_mutable_copy.h" namespace operations_research { // Solves the input request. -MPSolutionResponse XPressSolveProto(const MPModelRequest& request); +MPSolutionResponse XPressSolveProto( + LazyMutableCopy request); } // namespace operations_research diff --git a/ortools/linear_solver/python/linear_solver_natural_api.py b/ortools/linear_solver/python/linear_solver_natural_api.py index 2d92391bd6..f869e8e9e3 100644 --- a/ortools/linear_solver/python/linear_solver_natural_api.py +++ b/ortools/linear_solver/python/linear_solver_natural_api.py @@ -30,7 +30,7 @@ import numbers inf = float("inf") -class _FakeMPVariableRepresentingTheConstantOffset(object): +class _FakeMPVariableRepresentingTheConstantOffset: """A dummy class for a singleton instance used to represent the constant. To represent linear expressions, we store a dictionary @@ -56,7 +56,7 @@ def CastToLinExp(v): return v -class LinearExpr(object): +class LinearExpr: """Holds linear expressions. A linear expression is essentially an offset (floating-point value), and a @@ -178,6 +178,9 @@ class VariableExpr(LinearExpr): def __init__(self, mpvar): self.__var = mpvar + def __str__(self): + return str(self.__var) + def AddSelfToCoeffMapOrStack(self, coeffs, multiplier, stack): coeffs[self.__var] += multiplier @@ -222,7 +225,16 @@ class SumArray(LinearExpr): self.__array = [CastToLinExp(elem) for elem in array] def __str__(self): - return "({})".format(" + ".join(map(str, self.__array))) + parts = [] + for term in map(str, self.__array): + if not parts: + parts.append(term) + continue + if term[0] == "-": + parts.append(" - " + term[1:]) + else: + parts.append(" + " + term) + return f'({"".join(parts)})' def AddSelfToCoeffMapOrStack(self, coeffs, multiplier, stack): # Append elements in reversed order so that the first popped from the stack @@ -240,7 +252,7 @@ def Sum(*args): SumCst = Sum # pylint: disable=invalid-name -class LinearConstraint(object): +class LinearConstraint: """Represents a linear constraint: LowerBound <= LinearExpr <= UpperBound.""" def __init__(self, expr, lb, ub): diff --git a/ortools/linear_solver/samples/BUILD.bazel b/ortools/linear_solver/samples/BUILD.bazel index e273128b33..d8dd4836ab 100644 --- a/ortools/linear_solver/samples/BUILD.bazel +++ b/ortools/linear_solver/samples/BUILD.bazel @@ -28,6 +28,8 @@ code_sample_cc(name = "mip_var_array") code_sample_cc(name = "multiple_knapsack_mip") +#code_sample_cc(name = "network_design_ilph") + code_sample_cc(name = "simple_lp_program") code_sample_cc(name = "simple_mip_program") diff --git a/ortools/linear_solver/samples/BasicExample.cs b/ortools/linear_solver/samples/BasicExample.cs index 8aa512a483..6f7c4b6527 100644 --- a/ortools/linear_solver/samples/BasicExample.cs +++ b/ortools/linear_solver/samples/BasicExample.cs @@ -15,6 +15,7 @@ // [START program] // [START import] using System; +using Google.OrTools.Init; using Google.OrTools.LinearSolver; // [END import] @@ -22,11 +23,14 @@ public class BasicExample { static void Main() { + Console.WriteLine("Google.OrTools version: " + OrToolsVersion.VersionString()); + // [START solver] // Create the linear solver with the GLOP backend. Solver solver = Solver.CreateSolver("GLOP"); if (solver is null) { + Console.WriteLine("Could not create solver GLOP"); return; } // [END solver] @@ -40,10 +44,10 @@ public class BasicExample // [END variables] // [START constraints] - // Create a linear constraint, 0 <= x + y <= 2. - Constraint ct = solver.MakeConstraint(0.0, 2.0, "ct"); - ct.SetCoefficient(x, 1); - ct.SetCoefficient(y, 1); + // Create a linear constraint, x + y <= 2. + Constraint constraint = solver.MakeConstraint(double.NegativeInfinity, 2.0, "constraint"); + constraint.SetCoefficient(x, 1); + constraint.SetCoefficient(y, 1); Console.WriteLine("Number of constraints = " + solver.NumConstraints()); // [END constraints] @@ -57,15 +61,37 @@ public class BasicExample // [END objective] // [START solve] - solver.Solve(); + Console.WriteLine("Solving with " + solver.SolverVersion()); + Solver.ResultStatus resultStatus = solver.Solve(); // [END solve] // [START print_solution] + Console.WriteLine("Status: " + resultStatus); + if (resultStatus != Solver.ResultStatus.OPTIMAL) + { + Console.WriteLine("The problem does not have an optimal solution!"); + if (resultStatus == Solver.ResultStatus.FEASIBLE) + { + Console.WriteLine("A potentially suboptimal solution was found"); + } + else + { + Console.WriteLine("The solver could not solve the problem."); + return; + } + } + Console.WriteLine("Solution:"); Console.WriteLine("Objective value = " + solver.Objective().Value()); Console.WriteLine("x = " + x.SolutionValue()); Console.WriteLine("y = " + y.SolutionValue()); // [END print_solution] + + // [START advanced] + Console.WriteLine("Advanced usage:"); + Console.WriteLine("Problem solved in " + solver.WallTime() + " milliseconds"); + Console.WriteLine("Problem solved in " + solver.Iterations() + " iterations"); + // [END advanced] } } // [END program] diff --git a/ortools/linear_solver/samples/BasicExample.java b/ortools/linear_solver/samples/BasicExample.java index 563c79085a..7b80279a4f 100644 --- a/ortools/linear_solver/samples/BasicExample.java +++ b/ortools/linear_solver/samples/BasicExample.java @@ -14,8 +14,10 @@ // Minimal example to call the GLOP solver. // [START program] package com.google.ortools.linearsolver.samples; + // [START import] import com.google.ortools.Loader; +import com.google.ortools.init.OrToolsVersion; import com.google.ortools.linearsolver.MPConstraint; import com.google.ortools.linearsolver.MPObjective; import com.google.ortools.linearsolver.MPSolver; @@ -25,10 +27,19 @@ import com.google.ortools.linearsolver.MPVariable; /** Minimal Linear Programming example to showcase calling the solver. */ public final class BasicExample { public static void main(String[] args) { + // [START loader] Loader.loadNativeLibraries(); + // [END loader] + + System.out.println("Google OR-Tools version: " + OrToolsVersion.getVersionString()); + // [START solver] // Create the linear solver with the GLOP backend. MPSolver solver = MPSolver.createSolver("GLOP"); + if (solver == null) { + System.out.println("Could not create solver GLOP"); + return; + } // [END solver] // [START variables] @@ -40,8 +51,9 @@ public final class BasicExample { // [END variables] // [START constraints] - // Create a linear constraint, 0 <= x + y <= 2. - MPConstraint ct = solver.makeConstraint(0.0, 2.0, "ct"); + double infinity = Double.POSITIVE_INFINITY; + // Create a linear constraint, x + y <= 2. + MPConstraint ct = solver.makeConstraint(-infinity, 2.0, "ct"); ct.setCoefficient(x, 1); ct.setCoefficient(y, 1); @@ -57,15 +69,33 @@ public final class BasicExample { // [END objective] // [START solve] - solver.solve(); + System.out.println("Solving with " + solver.solverVersion()); + final MPSolver.ResultStatus resultStatus = solver.solve(); // [END solve] // [START print_solution] + System.out.println("Status: " + resultStatus); + if (resultStatus != MPSolver.ResultStatus.OPTIMAL) { + System.out.println("The problem does not have an optimal solution!"); + if (resultStatus == MPSolver.ResultStatus.FEASIBLE) { + System.out.println("A potentially suboptimal solution was found"); + } else { + System.out.println("The solver could not solve the problem."); + return; + } + } + System.out.println("Solution:"); System.out.println("Objective value = " + objective.value()); System.out.println("x = " + x.solutionValue()); System.out.println("y = " + y.solutionValue()); // [END print_solution] + + // [START advanced] + System.out.println("Advanced usage:"); + System.out.println("Problem solved in " + solver.wallTime() + " milliseconds"); + System.out.println("Problem solved in " + solver.iterations() + " iterations"); + // [END advanced] } private BasicExample() {} diff --git a/ortools/linear_solver/samples/basic_example.cc b/ortools/linear_solver/samples/basic_example.cc index 0f46c5440d..22e1dc77e9 100644 --- a/ortools/linear_solver/samples/basic_example.cc +++ b/ortools/linear_solver/samples/basic_example.cc @@ -11,20 +11,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Minimal example to call the GLOP solver. // [START program] +// Minimal example to call the GLOP solver. // [START import] +#include #include -#include +#include "absl/flags/flag.h" +#include "absl/log/flags.h" +#include "ortools/base/init_google.h" +#include "ortools/base/logging.h" +#include "ortools/init/init.h" #include "ortools/linear_solver/linear_solver.h" // [END import] namespace operations_research { void BasicExample() { + LOG(INFO) << "Google OR-Tools version : " << OrToolsVersion::VersionString(); + // [START solver] // Create the linear solver with the GLOP backend. std::unique_ptr solver(MPSolver::CreateSolver("GLOP")); + if (!solver) { + LOG(WARNING) << "Could not create solver GLOP"; + return; + } // [END solver] // [START variables] @@ -36,8 +47,9 @@ void BasicExample() { // [END variables] // [START constraints] - // Create a linear constraint, 0 <= x + y <= 2. - MPConstraint* const ct = solver->MakeRowConstraint(0.0, 2.0, "ct"); + // Create a linear constraint, x + y <= 2. + const double infinity = solver->infinity(); + MPConstraint* const ct = solver->MakeRowConstraint(-infinity, 2.0, "ct"); ct->SetCoefficient(x, 1); ct->SetCoefficient(y, 1); @@ -53,19 +65,40 @@ void BasicExample() { // [END objective] // [START solve] - solver->Solve(); + LOG(INFO) << "Solving with " << solver->SolverVersion(); + const MPSolver::ResultStatus result_status = solver->Solve(); // [END solve] // [START print_solution] - LOG(INFO) << "Solution:" << std::endl; + // Check that the problem has an optimal solution. + LOG(INFO) << "Status: " << result_status; + if (result_status != MPSolver::OPTIMAL) { + LOG(INFO) << "The problem does not have an optimal solution!"; + if (result_status == MPSolver::FEASIBLE) { + LOG(INFO) << "A potentially suboptimal solution was found"; + } else { + LOG(WARNING) << "The solver could not solve the problem."; + return; + } + } + + LOG(INFO) << "Solution:"; LOG(INFO) << "Objective value = " << objective->Value(); LOG(INFO) << "x = " << x->solution_value(); LOG(INFO) << "y = " << y->solution_value(); // [END print_solution] + + // [START advanced] + LOG(INFO) << "Advanced usage:"; + LOG(INFO) << "Problem solved in " << solver->wall_time() << " milliseconds"; + LOG(INFO) << "Problem solved in " << solver->iterations() << " iterations"; + // [END advanced] } } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetFlag(&FLAGS_stderrthreshold, 0); operations_research::BasicExample(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/basic_example.py b/ortools/linear_solver/samples/basic_example.py index 2f3530b01b..1a2fbaced0 100644 --- a/ortools/linear_solver/samples/basic_example.py +++ b/ortools/linear_solver/samples/basic_example.py @@ -15,31 +15,36 @@ """Minimal example to call the GLOP solver.""" # [START program] # [START import] +from ortools.init.python import init from ortools.linear_solver import pywraplp # [END import] def main(): + print("Google OR-Tools version:", init.OrToolsVersion.version_string()) + # [START solver] # Create the linear solver with the GLOP backend. solver = pywraplp.Solver.CreateSolver("GLOP") if not solver: + print("Could not create solver GLOP") return # [END solver] # [START variables] # Create the variables x and y. - x = solver.NumVar(0, 1, "x") - y = solver.NumVar(0, 2, "y") + x_var = solver.NumVar(0, 1, "x") + y_var = solver.NumVar(0, 2, "y") print("Number of variables =", solver.NumVariables()) # [END variables] # [START constraints] - # Create a linear constraint, 0 <= x + y <= 2. - ct = solver.Constraint(0, 2, "ct") - ct.SetCoefficient(x, 1) - ct.SetCoefficient(y, 1) + infinity = solver.infinity() + # Create a linear constraint, x + y <= 2. + constraint = solver.Constraint(-infinity, 2, "ct") + constraint.SetCoefficient(x_var, 1) + constraint.SetCoefficient(y_var, 1) print("Number of constraints =", solver.NumConstraints()) # [END constraints] @@ -47,24 +52,44 @@ def main(): # [START objective] # Create the objective function, 3 * x + y. objective = solver.Objective() - objective.SetCoefficient(x, 3) - objective.SetCoefficient(y, 1) + objective.SetCoefficient(x_var, 3) + objective.SetCoefficient(y_var, 1) objective.SetMaximization() # [END objective] # [START solve] print(f"Solving with {solver.SolverVersion()}") - solver.Solve() + result_status = solver.Solve() # [END solve] # [START print_solution] + print(f"Status: {result_status}") + if result_status != pywraplp.Solver.OPTIMAL: + print("The problem does not have an optimal solution!") + if result_status == pywraplp.Solver.FEASIBLE: + print("A potentially suboptimal solution was found") + else: + print("The solver could not solve the problem.") + return + print("Solution:") print("Objective value =", objective.Value()) - print("x =", x.solution_value()) - print("y =", y.solution_value()) + print("x =", x_var.solution_value()) + print("y =", y_var.solution_value()) # [END print_solution] + # [START advanced] + print("Advanced usage:") + print(f"Problem solved in {solver.wall_time():d} milliseconds") + print(f"Problem solved in {solver.iterations():d} iterations") + # [END advanced] + if __name__ == "__main__": + init.CppBridge.init_logging("basic_example.py") + cpp_flags = init.CppFlags() + cpp_flags.stderrthreshold = True + cpp_flags.log_prefix = False + init.CppBridge.set_flags(cpp_flags) main() # [END program] diff --git a/ortools/linear_solver/samples/code_samples.bzl b/ortools/linear_solver/samples/code_samples.bzl index 50b5bf8f90..51eae70e1b 100644 --- a/ortools/linear_solver/samples/code_samples.bzl +++ b/ortools/linear_solver/samples/code_samples.bzl @@ -21,6 +21,7 @@ def code_sample_cc(name): srcs = [name + ".cc"], deps = [ "//ortools/base", + "//ortools/init", "//ortools/linear_solver", "//ortools/linear_solver:linear_solver_cc_proto", ], @@ -33,6 +34,7 @@ def code_sample_cc(name): deps = [ ":" + name + "_cc", "//ortools/base", + "//ortools/init", "//ortools/linear_solver", "//ortools/linear_solver:linear_solver_cc_proto", ], @@ -48,6 +50,7 @@ def code_sample_py(name): requirement("protobuf"), requirement("numpy"), requirement("pandas"), + "//ortools/init/python:init", "//ortools/linear_solver/python:model_builder", ], python_version = "PY3", @@ -60,6 +63,7 @@ def code_sample_py(name): srcs = [name + ".py"], main = name + ".py", data = [ + "//ortools/init/python:init", "//ortools/linear_solver/python:model_builder", ], deps = [ @@ -80,6 +84,7 @@ def code_sample_java(name): main_class = "com.google.ortools.linearsolver.samples." + name, test_class = "com.google.ortools.linearsolver.samples." + name, deps = [ + "//ortools/init/java:init", "//ortools/linear_solver/java:modelbuilder", "//ortools/java/com/google/ortools/modelbuilder", "//ortools/java/com/google/ortools:Loader", diff --git a/ortools/linear_solver/sat_interface.cc b/ortools/linear_solver/sat_interface.cc index 811b14d1a5..78b617eb62 100644 --- a/ortools/linear_solver/sat_interface.cc +++ b/ortools/linear_solver/sat_interface.cc @@ -29,6 +29,7 @@ #include "ortools/port/proto_utils.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_solver.h" +#include "ortools/util/lazy_mutable_copy.h" namespace operations_research { @@ -39,10 +40,17 @@ class SatInterface : public MPSolverInterface { // ----- Solve ----- MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; - std::optional DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) override; bool InterruptSolve() override; + // ----- Directly solve proto is supported --- + bool SupportsDirectlySolveProto(std::atomic* interrupt) const override { + return true; + } + MPSolutionResponse DirectlySolveProto(LazyMutableCopy request, + std::atomic* interrupt) override { + return SatSolveProto(std::move(request), interrupt); + } + // ----- Model modifications and extraction ----- void Reset() override; void SetOptimizationDirection(bool maximize) override; @@ -152,11 +160,6 @@ MPSolver::ResultStatus SatInterface::Solve(const MPSolverParameters& param) { return result_status_; } -std::optional SatInterface::DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) { - return SatSolveProto(request, interrupt); -} - bool SatInterface::InterruptSolve() { interrupt_solve_ = true; return true; diff --git a/ortools/linear_solver/scip_interface.cc b/ortools/linear_solver/scip_interface.cc index 86349a9d60..b2c780c78e 100644 --- a/ortools/linear_solver/scip_interface.cc +++ b/ortools/linear_solver/scip_interface.cc @@ -39,9 +39,11 @@ #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/linear_solver_callback.h" +#include "ortools/linear_solver/proto_solver/proto_utils.h" #include "ortools/linear_solver/proto_solver/scip_proto_solver.h" #include "ortools/linear_solver/scip_callback.h" #include "ortools/linear_solver/scip_helper_macros.h" +#include "ortools/util/lazy_mutable_copy.h" #include "scip/cons_indicator.h" #include "scip/scip.h" #include "scip/scip_copy.h" @@ -69,8 +71,11 @@ class SCIPInterface : public MPSolverInterface { void SetOptimizationDirection(bool maximize) override; MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; - std::optional DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) override; + + bool SupportsDirectlySolveProto(std::atomic* interrupt) const override; + MPSolutionResponse DirectlySolveProto(LazyMutableCopy request, + std::atomic* interrupt) override; + void Reset() override; double infinity() override; @@ -865,27 +870,22 @@ void SCIPInterface::SetSolution(SCIP_SOL* solution) { } } -std::optional SCIPInterface::DirectlySolveProto( - const MPModelRequest& request, std::atomic* interrupt) { +bool SCIPInterface::SupportsDirectlySolveProto( + std::atomic* interrupt) const { // ScipSolveProto doesn't solve concurrently. - if (solver_->GetNumThreads() > 1) return std::nullopt; + if (solver_->GetNumThreads() > 1) return false; // Interruption via atomic is not directly supported by SCIP. - if (interrupt != nullptr) return std::nullopt; + if (interrupt != nullptr) return false; - const auto status_or = ScipSolveProto(request); - if (status_or.ok()) return status_or.value(); - // Special case: if something is not implemented yet, fall back to solving - // through MPSolver. - if (absl::IsUnimplemented(status_or.status())) return std::nullopt; - - if (request.enable_internal_solver_output()) { - LOG(INFO) << "Invalid SCIP status: " << status_or.status(); + return true; } - MPSolutionResponse response; - response.set_status(MPSOLVER_NOT_SOLVED); - response.set_status_str(status_or.status().ToString()); - return response; + +MPSolutionResponse SCIPInterface::DirectlySolveProto( + LazyMutableCopy request, std::atomic* interrupt) { + const bool log_error = request->enable_internal_solver_output(); + return ConvertStatusOrMPSolutionResponse(log_error, + ScipSolveProto(std::move(request))); } int SCIPInterface::SolutionCount() { return SCIPgetNSols(scip_); } diff --git a/ortools/linear_solver/solve.cc b/ortools/linear_solver/solve.cc index e135ccc19c..7069b5e1f0 100644 --- a/ortools/linear_solver/solve.cc +++ b/ortools/linear_solver/solve.cc @@ -51,6 +51,7 @@ #include #include +#include #include #include @@ -78,9 +79,11 @@ ABSL_FLAG(std::string, input, "", "REQUIRED: Input file name."); ABSL_FLAG(std::string, sol_hint, "", "Input file name with solution in .sol format."); -ABSL_FLAG(std::string, solver, "glop", +ABSL_FLAG(std::optional, solver, std::nullopt, "The solver to use: bop, cbc, clp, glop, glpk_lp, glpk_mip, " - "gurobi_lp, gurobi_mip, pdlp, scip, knapsack, sat."); + "gurobi_lp, gurobi_mip, pdlp, scip, knapsack, sat. If unspecified " + "either use MPModelRequest.solver_type if the --input is an " + "MPModelRequest and the field is set or use glop."); ABSL_FLAG(int, num_threads, 1, "Number of threads to use by the underlying solver."); ABSL_FLAG(std::string, params_file, "", @@ -263,9 +266,15 @@ void Run() { QCHECK_GE(absl::GetFlag(FLAGS_time_limit), absl::ZeroDuration()) << "--time_limit must be given a positive duration"; - MPSolver::OptimizationProblemType type; - CHECK(MPSolver::ParseSolverType(absl::GetFlag(FLAGS_solver), &type)) - << "Unsupported --solver: " << absl::GetFlag(FLAGS_solver); + // Parses --solver if set. + std::optional type; + if (const std::optional type_flag = absl::GetFlag(FLAGS_solver); + type_flag.has_value()) { + MPSolver::OptimizationProblemType decoded_type; + QCHECK(MPSolver::ParseSolverType(type_flag.value(), &decoded_type)) + << "Unsupported --solver: " << type_flag.value(); + type = decoded_type; + } MPModelRequest request_proto = ReadMipModel(absl::GetFlag(FLAGS_input)); @@ -302,7 +311,10 @@ void Run() { } // Set or override request proto options from the command line flags. - request_proto.set_solver_type(static_cast(type)); + if (type.has_value() || !request_proto.has_solver_type()) { + request_proto.set_solver_type(static_cast( + type.value_or(MPSolver::GLOP_LINEAR_PROGRAMMING))); + } if (absl::GetFlag(FLAGS_time_limit) != absl::InfiniteDuration()) { LOG(INFO) << "Setting a time limit of " << absl::GetFlag(FLAGS_time_limit); request_proto.set_solver_time_limit_seconds( diff --git a/ortools/linear_solver/solve_mp_model.cc b/ortools/linear_solver/solve_mp_model.cc index de322ffd09..1a490b67f9 100644 --- a/ortools/linear_solver/solve_mp_model.cc +++ b/ortools/linear_solver/solve_mp_model.cc @@ -15,18 +15,42 @@ #include #include +#include #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/solve_interrupter.h" namespace operations_research { -MPSolutionResponse SolveMPModel(const MPModelRequest& model_request, - std::atomic* interrupt) { // TODO(b/311704821): this function should not delegate to MPSolver, also true // for the functions below. +MPSolutionResponse SolveMPModel(const MPModelRequest& model_request, + SolveInterrupter* interrupter) { MPSolutionResponse response; - MPSolver::SolveWithProto(model_request, &response, interrupt); + if (interrupter != nullptr) { + std::atomic atomic_bool = false; + ScopedSolveInterrupterCallback cleanup( + interrupter, [&atomic_bool] { atomic_bool.store(true); }); + MPSolver::SolveLazyMutableRequest(model_request, &response, &atomic_bool); + } else { + MPSolver::SolveLazyMutableRequest(model_request, &response); + } + return response; +} + +MPSolutionResponse SolveMPModel(MPModelRequest&& model_request, + SolveInterrupter* interrupter) { + MPSolutionResponse response; + if (interrupter != nullptr) { + std::atomic atomic_bool = false; + ScopedSolveInterrupterCallback cleanup( + interrupter, [&atomic_bool] { atomic_bool.store(true); }); + MPSolver::SolveLazyMutableRequest(std::move(model_request), &response, + &atomic_bool); + } else { + MPSolver::SolveLazyMutableRequest(std::move(model_request), &response); + } return response; } diff --git a/ortools/linear_solver/solve_mp_model.h b/ortools/linear_solver/solve_mp_model.h index b276c1d274..d56754d248 100644 --- a/ortools/linear_solver/solve_mp_model.h +++ b/ortools/linear_solver/solve_mp_model.h @@ -22,23 +22,30 @@ #include #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/solve_interrupter.h" namespace operations_research { /** * Solves the model encoded by a MPModelRequest protocol buffer and returns the - * solution encoded as a MPSolutionResponse. The solve is stopped prematurely - * if interrupt is non-null at set to true during (or before) solving. - * Interruption is only supported if SolverTypeSupportsInterruption() returns - * true for the requested solver. Passing a non-null interruption with any - * other solver type immediately returns an MPSOLVER_INCOMPATIBLE_OPTIONS - * error. + * solution encoded as a MPSolutionResponse. + * + * If interrupter is non-null, one can call interrupter->Interrupt() to stop the + * solver earlier. Interruption is only supported if + * SolverTypeSupportsInterruption() returns true for the requested solver. + * Passing a non-null pointer with any other solver type immediately returns an + * MPSOLVER_INCOMPATIBLE_OPTIONS error. */ -MPSolutionResponse SolveMPModel( - const MPModelRequest& model_request, - // `interrupt` is non-const because the internal - // solver may set it to true itself, in some cases. - std::atomic* interrupt = nullptr); +MPSolutionResponse SolveMPModel(const MPModelRequest& model_request, + SolveInterrupter* interrupter = nullptr); + +/** + * This version should be preferred if the request is not needed afterwards. + * It will allows to reclaim the request memory as soon as it is converted to + * one of the solver internal data representation. + */ +MPSolutionResponse SolveMPModel(MPModelRequest&& request, + SolveInterrupter* interrupter = nullptr); bool SolverTypeSupportsInterruption(MPModelRequest::SolverType solver); diff --git a/ortools/linear_solver/users_allowing_model_storage.cc b/ortools/linear_solver/users_allowing_model_storage.cc new file mode 100644 index 0000000000..9ec377becb --- /dev/null +++ b/ortools/linear_solver/users_allowing_model_storage.cc @@ -0,0 +1,78 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/linear_solver/users_allowing_model_storage.h" + +#include "absl/container/flat_hash_set.h" +#include "absl/strings/string_view.h" + +namespace operations_research { +const absl::flat_hash_set& UsersAllowingModelStorage() { + static const auto* const set = new absl::flat_hash_set{ + // Approved by default. + "operations-research", + + // Approved hmajaya@ on 2019/05/17 by e-mail. + "apex-eng", + + // Approved by jhuchette@ on 2024-02-29 by code review. + "apps-capacity-auxon", + "autocap-automation", + "autocap-solver-access", + + // Approved by mlubin@, dapplegate@, and bwydrowski@ on 2019/05/17 + // by e-mail. As of 2020/04/08, prod queries are sent by "muppet-packer". + "blokus-prod", + "blokus-planning", + "blokus-packer-dev", + "muppet-packer", + + // Approved by sjoakley@ on 2019/10/22 by e-mail. + "cloud-capacity", + "techinfra-capacity", + + // Approved by sgowal@ on 2019/05/17 by e-mail. + "deepmind-research", + + // Approved by yxz@ on 2019/05/17 by e-mail. As of 2020/04/09, many + // queries are sent by "logs-placement". + "logs-front-door", + "logs-front-door-unprivileged", + "logs-placement", + + // Approved by ansha@ on 2019/05/17 by e-mail. We add netarch-wand-* mdb + // groups explicitly, because as of 2019/10/22 our naive logic collects + // a model iff the mdb group listed here matches exactly the mdb group + // of the RPC sender (i.e., we do not check group transitive memberships, + // and here all netarch-wand-* groups belong to tetraligh-jobs). + "tetralight-jobs", + "netarch-wand-prod", + "netarch-wand-dev", + "netarch-wand-test", + + // Approved by haoxu@ on 2019/05/17 by e-mail. + // As of 2019/10/22, some models are sent by user xiaob@ (instead of + // raptical@), so we add the user explicitly to this allowlist. + "cluster-planning-urp-state-runner", + "cluster-planning-urp-compute", + "raptical", + "xiaob", + + // Approved by nharsha@ and mattard@ on 2019/05/17 by e-mail. + "resource-planning-optimization", + "resource-planning-optimization-eng-team", + "resource-portal-test", + }; + return *set; +} +} // namespace operations_research diff --git a/ortools/linear_solver/users_allowing_model_storage.h b/ortools/linear_solver/users_allowing_model_storage.h new file mode 100644 index 0000000000..481c9b7310 --- /dev/null +++ b/ortools/linear_solver/users_allowing_model_storage.h @@ -0,0 +1,28 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_LINEAR_SOLVER_USERS_ALLOWING_MODEL_STORAGE_H_ +#define OR_TOOLS_LINEAR_SOLVER_USERS_ALLOWING_MODEL_STORAGE_H_ + +#include "absl/container/flat_hash_set.h" +#include "absl/strings/string_view.h" + +namespace operations_research { +// List of *exact* MDB users who agreed that we store their MIP/LP/math +// (anonymized) models. +// IMPORTANT: The MDB user has to match exactly with an item in this list: we +// don't do ACL expansion, regexp matching or anything alike. +const absl::flat_hash_set& UsersAllowingModelStorage(); +} // namespace operations_research + +#endif // OR_TOOLS_LINEAR_SOLVER_USERS_ALLOWING_MODEL_STORAGE_H_ diff --git a/ortools/linear_solver/wrappers/BUILD.bazel b/ortools/linear_solver/wrappers/BUILD.bazel index 727d5eae32..f958ac2cc4 100644 --- a/ortools/linear_solver/wrappers/BUILD.bazel +++ b/ortools/linear_solver/wrappers/BUILD.bazel @@ -49,6 +49,7 @@ cc_library( "//ortools/lp_data:lp_parser", "//ortools/lp_data:mps_reader", "//ortools/util:logging", + "//ortools/util:solve_interrupter", "//ortools/xpress:environment", ], ) diff --git a/ortools/linear_solver/wrappers/model_builder_helper.cc b/ortools/linear_solver/wrappers/model_builder_helper.cc index 1ae565dc8f..a1f24be23b 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.cc +++ b/ortools/linear_solver/wrappers/model_builder_helper.cc @@ -466,7 +466,7 @@ std::optional ModelSolverHelper::SolveRequest( request.solver_type()))) { return std::nullopt; } - return SolveMPModel(request, &interrupt_solve_); + return SolveMPModel(request, &interrupter_); } namespace { @@ -567,19 +567,20 @@ void ModelSolverHelper::Solve(const ModelBuilderHelper& model) { } switch (solver_type_.value()) { case MPModelRequest::GLOP_LINEAR_PROGRAMMING: { - response_ = GlopSolveProto(request, &interrupt_solve_, log_callback_); + response_ = + GlopSolveProto(std::move(request), &interrupt_solve_, log_callback_); break; } case MPModelRequest::SAT_INTEGER_PROGRAMMING: { - response_ = - SatSolveProto(request, &interrupt_solve_, log_callback_, nullptr); + response_ = SatSolveProto(std::move(request), &interrupt_solve_, + log_callback_, nullptr); break; } #if defined(USE_SCIP) case MPModelRequest::SCIP_MIXED_INTEGER_PROGRAMMING: { // TODO(user): Enable log_callback support. // TODO(user): Enable interrupt_solve. - const auto temp = ScipSolveProto(request); + const auto temp = ScipSolveProto(std::move(request)); if (temp.ok()) { response_ = std::move(temp.value()); } @@ -588,7 +589,7 @@ void ModelSolverHelper::Solve(const ModelBuilderHelper& model) { #endif // defined(USE_SCIP) #if defined(USE_PDLP) case MPModelRequest::PDLP_LINEAR_PROGRAMMING: { - const auto temp = PdlpSolveProto(request); + const auto temp = PdlpSolveProto(std::move(request)); if (temp.ok()) { response_ = std::move(temp.value()); } @@ -598,7 +599,7 @@ void ModelSolverHelper::Solve(const ModelBuilderHelper& model) { case MPModelRequest:: GUROBI_LINEAR_PROGRAMMING: // ABSL_FALLTHROUGH_INTENDED case MPModelRequest::GUROBI_MIXED_INTEGER_PROGRAMMING: { - const auto temp = GurobiSolveProto(request); + const auto temp = GurobiSolveProto(std::move(request)); if (temp.ok()) { response_ = std::move(temp.value()); } @@ -640,6 +641,7 @@ void ModelSolverHelper::SetLogCallbackFromDirectorClass( void ModelSolverHelper::ClearLogCallback() { log_callback_ = nullptr; } bool ModelSolverHelper::InterruptSolve() { + interrupter_.Interrupt(); interrupt_solve_ = true; return true; } diff --git a/ortools/linear_solver/wrappers/model_builder_helper.h b/ortools/linear_solver/wrappers/model_builder_helper.h index 36221ad6bb..24525afaa8 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.h +++ b/ortools/linear_solver/wrappers/model_builder_helper.h @@ -24,6 +24,7 @@ #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/model_exporter.h" #include "ortools/util/logging.h" +#include "ortools/util/solve_interrupter.h" namespace operations_research { @@ -166,7 +167,6 @@ class ModelSolverHelper { void Solve(const ModelBuilderHelper& model); // Only used by the CVXPY interface. Does not store the response internally. - // interrupt_solve_ is passed to the solve method. std::optional SolveRequest(const MPModelRequest& request); // Returns true if the interrupt signal was correctly sent, that is if the @@ -203,6 +203,7 @@ class ModelSolverHelper { // TODO(user): set parameters. private: + SolveInterrupter interrupter_; std::atomic interrupt_solve_ = false; std::function log_callback_; std::optional response_; diff --git a/ortools/math_opt/python/compute_infeasible_subsystem_result.py b/ortools/math_opt/python/compute_infeasible_subsystem_result.py index bf77203ca0..4f981cfba7 100644 --- a/ortools/math_opt/python/compute_infeasible_subsystem_result.py +++ b/ortools/math_opt/python/compute_infeasible_subsystem_result.py @@ -14,7 +14,7 @@ """Data types for the result of calling `mathopt.compute_infeasible_subsystem.""" import dataclasses -from typing import Mapping +from typing import FrozenSet, Mapping import immutabledict @@ -75,7 +75,7 @@ class ModelSubset: variable_bounds: Mapping[ model.Variable, ModelSubsetBounds ] = immutabledict.immutabledict() - variable_integrality: frozenset[model.Variable] = frozenset() + variable_integrality: FrozenSet[model.Variable] = frozenset() linear_constraints: Mapping[ model.LinearConstraint, ModelSubsetBounds ] = immutabledict.immutabledict() diff --git a/ortools/math_opt/solver_tests/ip_model_solve_parameters_tests.cc b/ortools/math_opt/solver_tests/ip_model_solve_parameters_tests.cc index 27ec82508e..df7b5a3628 100644 --- a/ortools/math_opt/solver_tests/ip_model_solve_parameters_tests.cc +++ b/ortools/math_opt/solver_tests/ip_model_solve_parameters_tests.cc @@ -367,23 +367,27 @@ TEST_P(LazyConstraintsTest, LazyConstraintsImposedOnModel) { } // The problem is: -// min x -// s.t. x >= 0 (c) -// -1 <= x <= 1 -// x integer +// min y +// s.t. y >= x (c) +// y >= -x (d) +// -1 <= x, y <= 1 +// x, y integer // // With a node limit of 0 and solver parameters set to disable presolve, we // expect a dual bound equal to the LP relaxation bound (which is 0). However, -// if c is a lazy constraint, it is not included in the LP relaxation, and the -// bound instead is -1. +// if c and d are lazy constraints, they are not included in the LP relaxation, +// and the bound instead is -1. TEST_P(LazyConstraintsTest, AnnotationsAreSetProperly) { Model model; Variable x = model.AddIntegerVariable(-1, 1, "x"); - const LinearConstraint c = model.AddLinearConstraint(x >= 0); - model.Minimize(x); + Variable y = model.AddIntegerVariable(-1, 1, "y"); + const LinearConstraint c = model.AddLinearConstraint(y >= x); + const LinearConstraint d = model.AddLinearConstraint(y >= -x); + model.Minimize(y); - SolveArguments args = {.parameters = NerfedSolveParams(), - .model_parameters = {.lazy_linear_constraints = {c}}}; + SolveArguments args = { + .parameters = NerfedSolveParams(), + .model_parameters = {.lazy_linear_constraints = {c, d}}}; args.parameters.node_limit = 0; ASSERT_OK_AND_ASSIGN(const SolveResult result, Solve(model, TestedSolver(), args)); @@ -394,17 +398,20 @@ TEST_P(LazyConstraintsTest, AnnotationsAreSetProperly) { // Same setting as in AnnotationsAreSetProperly above, but we solve twice with // an incremental solver: first with the lazy constraint annotations, and then // without. If the annotations are cleared after the first, then we expect the -// second to solve the entire LP (including c), giving a dual bound of 0. +// second to solve the entire LP (including c and d), giving a dual bound of 0. TEST_P(LazyConstraintsTest, AnnotationsAreClearedAfterSolve) { Model model; Variable x = model.AddIntegerVariable(-1, 1, "x"); - const LinearConstraint c = model.AddLinearConstraint(x >= 0); - model.Minimize(x); + Variable y = model.AddIntegerVariable(-1, 1, "y"); + const LinearConstraint c = model.AddLinearConstraint(y >= x); + const LinearConstraint d = model.AddLinearConstraint(y >= -x); + model.Minimize(y); ASSERT_OK_AND_ASSIGN(const auto solver, IncrementalSolver::New(&model, TestedSolver())); - SolveArguments args = {.parameters = NerfedSolveParams(), - .model_parameters = {.lazy_linear_constraints = {c}}}; + SolveArguments args = { + .parameters = NerfedSolveParams(), + .model_parameters = {.lazy_linear_constraints = {c, d}}}; args.parameters.node_limit = 0; ASSERT_OK_AND_ASSIGN(const SolveResult bad_result, solver->Solve(args)); ASSERT_THAT(bad_result, TerminatesWithReasonNoSolutionFound(Limit::kNode)); diff --git a/ortools/math_opt/solver_tests/second_order_cone_tests.cc b/ortools/math_opt/solver_tests/second_order_cone_tests.cc index 0f2e6d08ae..a11a5dab6e 100644 --- a/ortools/math_opt/solver_tests/second_order_cone_tests.cc +++ b/ortools/math_opt/solver_tests/second_order_cone_tests.cc @@ -60,7 +60,7 @@ using ::testing::status::StatusIs; // A bit larger than expected; as of 2023-01-31 Gurobi produces slightly // inaccurate solutions on some of the tests. -constexpr double kTolerance = 1.0e-4; +constexpr double kTolerance = 1.0e-3; constexpr absl::string_view kNoSocSupportMessage = "This test is disabled as the solver does not support second-order cone " "constraints"; diff --git a/ortools/math_opt/solvers/gscip_solver.cc b/ortools/math_opt/solvers/gscip_solver.cc index 7ec575c15e..2d2361c48d 100644 --- a/ortools/math_opt/solvers/gscip_solver.cc +++ b/ortools/math_opt/solvers/gscip_solver.cc @@ -111,7 +111,7 @@ int64_t SafeId(const LinearConstraintsProto& linear_constraints, int index) { return linear_constraints.ids(index); } -const std::string& SafeName(const LinearConstraintsProto& linear_constraints, +absl::string_view SafeName(const LinearConstraintsProto& linear_constraints, int index) { if (linear_constraints.names().empty()) { return EmptyString(); diff --git a/ortools/math_opt/solvers/gurobi/g_gurobi.cc b/ortools/math_opt/solvers/gurobi/g_gurobi.cc index 221442aaef..bc035e1118 100644 --- a/ortools/math_opt/solvers/gurobi/g_gurobi.cc +++ b/ortools/math_opt/solvers/gurobi/g_gurobi.cc @@ -541,8 +541,15 @@ absl::StatusOr Gurobi::ComputeIIS(Callback cb) { RETURN_IF_ERROR(scope->Release()); return false; } else if (error == kGrbOk) { + // If Gurobi v11 terminates at a limit before determining if the model is + // feasible or not, it will return an OK error code but then will fail to + // return anything about the IIS it does not have. To detect this case, we + // query the minimality attribute: we know that our env is valid at this + // point, and this should fail iff an IIS is present, i.e., Gurobi proved + // that the model was infeasible. + const bool has_iis = GetIntAttr(GRB_INT_ATTR_IIS_MINIMAL).ok(); RETURN_IF_ERROR(scope->Release()); - return true; + return has_iis; } RETURN_IF_ERROR(ToStatus(error)); return scope->Release(); diff --git a/ortools/math_opt/solvers/gurobi/g_gurobi.h b/ortools/math_opt/solvers/gurobi/g_gurobi.h index d1af139ed4..5072db8215 100644 --- a/ortools/math_opt/solvers/gurobi/g_gurobi.h +++ b/ortools/math_opt/solvers/gurobi/g_gurobi.h @@ -466,9 +466,8 @@ class Gurobi { // // Returns: // * a status if Gurobi errors, - // * false if Gurobi determines that the model is feasible, or - // * true otherwise (e.g., infeasibility is proven or a limit is reached). - // setup/teardown, and true otherwise. + // * true if Gurobi proves that the model is infeasible, or + // * false otherwise (e.g., feasibility is proven or a limit is reached). // // The callback, if specified, is set before solving and cleared after. absl::StatusOr ComputeIIS(Callback cb = nullptr); diff --git a/ortools/math_opt/testing/BUILD.bazel b/ortools/math_opt/testing/BUILD.bazel index 6df27bf172..e80e4e0a4f 100644 --- a/ortools/math_opt/testing/BUILD.bazel +++ b/ortools/math_opt/testing/BUILD.bazel @@ -11,12 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -package(default_visibility = [ - "//ortools/graph:__subpackages__", - "//ortools/math_opt:__subpackages__", - "//ortools/models:__subpackages__", - "//ortools/stochastic_optimization:__subpackages__", -]) +package(default_visibility = ["//ortools:__subpackages__"]) cc_library( name = "param_name", diff --git a/ortools/pdlp/solve_log.proto b/ortools/pdlp/solve_log.proto index 282048499a..2b940dd0c5 100644 --- a/ortools/pdlp/solve_log.proto +++ b/ortools/pdlp/solve_log.proto @@ -18,6 +18,10 @@ syntax = "proto2"; package operations_research.pdlp; +option java_package = "com.google.ortools.pdlp"; +option java_multiple_files = true; +option csharp_namespace = "Google.OrTools.PDLP"; + import "ortools/pdlp/solvers.proto"; // Easy-to-compute statistics for the quadratic program. diff --git a/ortools/pdlp/solvers.proto b/ortools/pdlp/solvers.proto index d128b13079..215dfd594d 100644 --- a/ortools/pdlp/solvers.proto +++ b/ortools/pdlp/solvers.proto @@ -15,6 +15,10 @@ syntax = "proto2"; package operations_research.pdlp; +option java_package = "com.google.ortools.pdlp"; +option java_multiple_files = true; +option csharp_namespace = "Google.OrTools.PDLP"; + import "ortools/glop/parameters.proto"; enum OptimalityNorm { diff --git a/ortools/routing/BUILD.bazel b/ortools/routing/BUILD.bazel index c0ca227f4b..4a0f28d7c5 100644 --- a/ortools/routing/BUILD.bazel +++ b/ortools/routing/BUILD.bazel @@ -242,7 +242,7 @@ cc_test( deps = [ ":tsplib_parser", "//ortools/base", - "//ortools/base:filesystem", + "//ortools/base:file", "//ortools/base:memfile", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/strings", diff --git a/ortools/util/BUILD.bazel b/ortools/util/BUILD.bazel index 9b41007594..191060df45 100644 --- a/ortools/util/BUILD.bazel +++ b/ortools/util/BUILD.bazel @@ -506,3 +506,17 @@ cc_library( "@com_google_protobuf//:protobuf", ], ) + +cc_library( + name = "solve_interrupter", + srcs = ["solve_interrupter.cc"], + hdrs = ["solve_interrupter.h"], + deps = [ + "//ortools/base", + "//ortools/base:intops", + "//ortools/base:linked_hash_map", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/synchronization", + ], +) diff --git a/ortools/util/csharp/proto.i b/ortools/util/csharp/proto.i index 47d21fe6a8..1de439644e 100644 --- a/ortools/util/csharp/proto.i +++ b/ortools/util/csharp/proto.i @@ -32,19 +32,14 @@ // if the C++ function returns a protocol message: // MyProto* foo(); // Use PROTO2_RETURN macro: -// PROTO2_RETURN(MyProto, Google.Proto.Protos.Test.MyProto, true) -// -// Replace true by false if the C++ function returns a pointer to a -// protocol message object whose ownership is not transferred to the -// (C++) caller. +// PROTO2_RETURN(MyProto, Google.Proto.Protos.Test.MyProto) // // Passing each protocol message from C# to C++ by value. Each ProtocolMessage // is serialized into byte[] when it is passed from C# to C++, the C++ code // deserializes into C++ native protocol message. // // @param CppProtoType the fully qualified C++ protocol message type -// @param CSharpProtoType the corresponding fully qualified C# protocol message -// type +// @param CSharpProtoType the corresponding fully qualified C# protocol message type // @param param_name the parameter name %define PROTO_INPUT(CppProtoType, CSharpProtoType, param_name) %typemap(ctype) PROTO_TYPE* INPUT, PROTO_TYPE& INPUT "int " #param_name "_size, uint8_t*" @@ -69,6 +64,12 @@ %apply PROTO_TYPE* INPUT { CppProtoType* param_name } %enddef // end PROTO_INPUT +// Return protocol message from C++ to C#. +// Each protocol message is serialized into byte[] when it is returned +// from C++. +// +// @param CppProtoType the fully qualified C++ protocol message type +// @param CSharpProtoType the corresponding fully qualified C# protocol message type %define PROTO2_RETURN(CppProtoType, CSharpProtoType) %typemap(ctype) CppProtoType "uint8_t*" %typemap(imtype) CppProtoType "System.IntPtr" @@ -104,3 +105,20 @@ %enddef // end PROTO2_RETURN +// SWIG Macro for mapping protocol message enum type. +// @param CppEnumProto the C++ protocol message enum type +// @param CSharpEnumProto the corresponding C# protocol message enum type +%define PROTO_ENUM_RETURN(CppEnumProto, CSharpEnumProto) +%typemap(ctype) CppEnumProto "int" +%typemap(imtype) CppEnumProto "int" +%typemap(cstype) CppEnumProto "CSharpEnumProto" + +// From CppEnumProto to ctype (in wrap.cxx code) +%typemap(out) CppEnumProto %{ $result = $1; %} + +// From imtype to cstype (in .cs code) +%typemap(csout) CppEnumProto { + return (CSharpEnumProto) $imcall; +} +%enddef // end PROTO_ENUM_RETURN + diff --git a/ortools/util/filelineiter.h b/ortools/util/filelineiter.h index a2d7b689da..79a2dc5337 100644 --- a/ortools/util/filelineiter.h +++ b/ortools/util/filelineiter.h @@ -142,7 +142,7 @@ class FileLines { // Please prefer the other constructor combined with file::Open() in new code // so that missing files are properly detected. This version would only print // a warning and act as if the file was empty. - explicit FileLines(const std::string& filename, + explicit FileLines(absl::string_view filename, int options = FileLineIterator::DEFAULT) : FileLines( filename, diff --git a/ortools/util/fp_utils.cc b/ortools/util/fp_utils.cc index bf82c1dca4..e44d20cd56 100644 --- a/ortools/util/fp_utils.cc +++ b/ortools/util/fp_utils.cc @@ -72,9 +72,9 @@ void ComputeScalingErrors(absl::Span input, } template -void GetBestScalingOfDoublesToInt64(const std::vector& input, - const std::vector& lb, - const std::vector& ub, +void GetBestScalingOfDoublesToInt64(absl::Span input, + absl::Span lb, + absl::Span ub, int64_t max_absolute_sum, double* scaling_factor) { const double kInfinity = std::numeric_limits::infinity(); diff --git a/ortools/util/java/functions.i b/ortools/util/java/functions.i index 3c7d41cc5e..1835b7e449 100644 --- a/ortools/util/java/functions.i +++ b/ortools/util/java/functions.i @@ -54,7 +54,7 @@ // Abbreviation of the java type corresponding to the given CType. // Eg. JAVA_ABBREV(int64_t) expands to "J". -#define JAVA_ABBREV_int64 "J" +#define JAVA_ABBREV_int64_t "J" #define JAVA_ABBREV_int "I" #define JAVA_ABBREV_bool "Z" #define JAVA_ABBREV(x) JAVA_ABBREV_ ## x diff --git a/ortools/util/java/proto.i b/ortools/util/java/proto.i index e7ece58490..f686846aba 100644 --- a/ortools/util/java/proto.i +++ b/ortools/util/java/proto.i @@ -23,9 +23,7 @@ // if the C++ function returns a protocol message: // MyProto* foo(); // Use PROTO2_RETURN macro: -// PROTO2_RETURN(MyProto, com.google.proto.protos.test.MyProto, giveOwnership) -// -> the 'giveOwnership' parameter should be true iff the C++ function -// returns a new proto which should be deleted by the client. +// PROTO2_RETURN(MyProto, com.google.proto.protos.test.MyProto) // // Passing each protocol message from Java to C++ by value. Each ProtocolMessage // is serialized into byte[] when it is passed from Java to C++, the C++ code @@ -90,3 +88,22 @@ jenv->SetByteArrayRegion($result, 0, size, buf.get()); } %enddef // PROTO2_RETURN + +// SWIG Macro for mapping protocol message enum type. +// @param CppEnumProto the C++ protocol message enum type +// @param JavaEnumProto the corresponding Java protocol message enum type +%define PROTO_ENUM_RETURN(CppEnumProto, JavaEnumProto) +%typemap(jni) CppEnumProto "jint" +%typemap(jtype) CppEnumProto "int" +%typemap(jstype) CppEnumProto "JavaEnumProto" + +// From CppEnumProto to jni (in wrap.cxx code) +%typemap(out) CppEnumProto %{ $result = $1; %} + +// From jtype to jstype (in .java code) +%typemap(javaout) CppEnumProto { + return JavaEnumProto.forNumber($jnicall); +} + +%enddef // end PROTO_ENUM_RETURN + diff --git a/ortools/util/lazy_mutable_copy.h b/ortools/util/lazy_mutable_copy.h index a9a357bbe3..6af6c4f5aa 100644 --- a/ortools/util/lazy_mutable_copy.h +++ b/ortools/util/lazy_mutable_copy.h @@ -16,8 +16,6 @@ #include -#include "absl/memory/memory.h" - namespace operations_research { // LazyMutableCopy is a helper class for making an on-demand copy of an @@ -34,40 +32,84 @@ namespace operations_research { // void ProcessProto(LazyMutableCopy input) { // pass by copy // ... // } -// At the call site: ProcessProto({const_ref_to_my_proto}); +// At the call site: ProcessProto(const_ref_to_my_proto); // // In basic usage, a LazyMutableCopy is in one of two states: // - original: points to the const original. No memory allocated. // - copy: points to a mutable copy of the original and owns it. Owning the // copy means that the destructor will delete it, like std::unique_ptr<>. -// This is what you get by calling get_mutable(). +// This is what you get by calling get_mutable() or constructing it with +// a move. template class LazyMutableCopy { public: - // You always construct a LazyMutableCopy with a const reference to an object, + // You can construct a LazyMutableCopy with a const reference to an object, // which must outlive this class (unless get_mutable() was called). LazyMutableCopy(const T& obj) // NOLINT(google-explicit-constructor) - : original_(&obj) {} + : ptr_(&obj) {} - // You can move a LazyMutableCopy, much like a std::unique_ptr<> or a const*. - // We simply rely on the default move constructors being available. + // The other option is to construct a LazyMutableCopy with a std::move(T). + // In this case you transfer ownership and you can mutate it for free. + LazyMutableCopy(T&& obj) // NOLINT(google-explicit-constructor) + : copy_(std::make_unique(std::move(obj))), ptr_(copy_.get()) {} - const T& get() const { return copy_ != nullptr ? *copy_ : *original_; } + // You can move a LazyMutableCopy but not copy it, much like a + // std::unique_ptr<>. + LazyMutableCopy(LazyMutableCopy&&) = default; + LazyMutableCopy(const LazyMutableCopy&) = delete; + class LazyMutableCopy& operator=(LazyMutableCopy&&) = default; + class LazyMutableCopy& operator=(const LazyMutableCopy&) = delete; + + // This will copy the object if we don't already have ownership. T* get_mutable() { - if (copy_ == nullptr) { - copy_ = std::make_unique(*original_); - original_ = nullptr; + if (copy_ == nullptr && ptr_ != nullptr) { + copy_ = std::make_unique(*ptr_); + ptr_ = copy_.get(); } return copy_.get(); } + // Lazily make a copy if not already done and transfer ownership from this + // class to the returned std::unique_ptr. Calling this function leaves the + // class in a state where the only valid operations is to assign it a new + // value. + // + // We force a call via + // std::move(lazy_mutable_copy).copy_or_move_as_unique_ptr() to make it + // clearer that lazy_mutable_copy shouldn't really be used after this. + std::unique_ptr copy_or_move_as_unique_ptr() && { + if (copy_ == nullptr && ptr_ != nullptr) { + std::unique_ptr result = std::make_unique(*ptr_); + ptr_ = nullptr; + return result; + } + ptr_ = nullptr; + return std::move(copy_); + } + // True iff get_mutable() was called at least once (in which case the object - // was copied). - bool was_copied() const { return copy_ != nullptr; } + // was copied) or if we constructed this via std::move(). + bool has_ownership() const { return copy_ != nullptr; } + + // Standard smart pointer accessor, but only for const purpose. + // Undefined if the class contains no object. + const T* get() const { return ptr_; } + const T& operator*() const { return *ptr_; } + const T* operator->() const { return ptr_; } + + // Destroys any owned value. Calling this function leaves the class in a state + // where the only valid operations is to assign it a new value. + // + // We force a call via std::move(lazy_mutable_copy).dispose() to make it + // clearer that lazy_mutable_copy shouldn't really be used after this. + void dispose() && { + ptr_ = nullptr; + copy_ = nullptr; + } private: - const T* original_; std::unique_ptr copy_; + const T* ptr_ = nullptr; }; } // namespace operations_research diff --git a/ortools/util/solve_interrupter.cc b/ortools/util/solve_interrupter.cc new file mode 100644 index 0000000000..fc85b2b926 --- /dev/null +++ b/ortools/util/solve_interrupter.cc @@ -0,0 +1,102 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/util/solve_interrupter.h" + +#include +#include +#include +#include +#include + +#include "absl/synchronization/mutex.h" +#include "ortools/base/linked_hash_map.h" +#include "ortools/base/logging.h" +#include "ortools/base/strong_int.h" + +namespace operations_research { + +void SolveInterrupter::Interrupt() { + const absl::MutexLock lock(&mutex_); + + // Here we don't use compare_exchange_strong since we need to hold the lock + // before changing the value of interrupted_ anyway. So there is no need to + // use this complex function. + if (interrupted_.load()) { + // We must not call the callbacks more than once. + return; + } + + // We need to change this value while holding the lock since in + // AddInterruptionCallback() we must know if we need to call the new callback + // of if this function has called it. + interrupted_ = true; + + // We are holding the lock while calling callbacks. This make it impossible to + // call Interrupt(), AddInterruptionCallback(), or + // RemoveInterruptionCallback() from a callback but it ensures that external + // code that can modify callbacks_ will wait the end of Interrupt. + for (const auto& [callback_id, callback] : callbacks_) { + callback(); + } +} + +SolveInterrupter::CallbackId SolveInterrupter::AddInterruptionCallback( + Callback callback) { + const absl::MutexLock lock(&mutex_); + + // We must make this call while holding the lock since we want to be sure that + // the calls to the callbacks_ won't occur before we registered the new + // one. If we were not holding the lock, this could return false and before we + // could add the new callback to callbacks_, the Interrupt() function may + // still have called them. + // + // We make the call before putting the callback in the map to since we need to + // move it in place. + if (interrupted_.load()) { + callback(); + } + + const CallbackId id = next_callback_id_; + ++next_callback_id_; + CHECK(callbacks_.try_emplace(id, std::move(callback)).second); + return id; +} + +void SolveInterrupter::RemoveInterruptionCallback(CallbackId id) { + const absl::MutexLock lock(&mutex_); + CHECK_EQ(callbacks_.erase(id), 1) << "unregistered callback id: " << id; +} + +ScopedSolveInterrupterCallback::ScopedSolveInterrupterCallback( + SolveInterrupter* const interrupter, SolveInterrupter::Callback callback) + : interrupter_(interrupter), + callback_id_( + interrupter != nullptr + ? std::make_optional( + interrupter->AddInterruptionCallback(std::move(callback))) + : std::nullopt) {} + +ScopedSolveInterrupterCallback::~ScopedSolveInterrupterCallback() { + RemoveCallbackIfNecessary(); +} + +void ScopedSolveInterrupterCallback::RemoveCallbackIfNecessary() { + if (callback_id_) { + CHECK_NE(interrupter_, nullptr); + interrupter_->RemoveInterruptionCallback(*callback_id_); + callback_id_.reset(); + } +} + +} // namespace operations_research diff --git a/ortools/util/solve_interrupter.h b/ortools/util/solve_interrupter.h new file mode 100644 index 0000000000..7cfb044a10 --- /dev/null +++ b/ortools/util/solve_interrupter.h @@ -0,0 +1,149 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_UTIL_SOLVE_INTERRUPTER_H_ +#define OR_TOOLS_UTIL_SOLVE_INTERRUPTER_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/thread_annotations.h" +#include "absl/strings/string_view.h" +#include "absl/synchronization/mutex.h" +#include "ortools/base/linked_hash_map.h" +#include "ortools/base/strong_int.h" + +namespace operations_research { + +// Interrupter used by solvers to know if/when they should interrupt the solve. +// +// Once triggered with Interrupt(), an interrupter can't be reset. It can be +// triggered from any thread. +// +// Thread-safety: APIs on this class are safe to call concurrently from multiple +// threads. +class SolveInterrupter { + public: + // Id used to identify a callback. + DEFINE_STRONG_INT_TYPE(CallbackId, int64_t); + + using Callback = std::function; + + SolveInterrupter() = default; + + SolveInterrupter(const SolveInterrupter&) = delete; + SolveInterrupter& operator=(const SolveInterrupter&) = delete; + + // Interrupts the solve as soon as possible. + // + // Once requested the interruption can't be reset. The user should use a new + // SolveInterrupter for later solves. + // + // It is safe to call this function multiple times. Only the first call will + // have visible effects; other calls will be ignored. + void Interrupt(); + + // Returns true if the solve interruption has been requested. + // + // This API is fast; it costs the read of an atomic. + inline bool IsInterrupted() const { return interrupted_.load(); } + + // Registers a callback to be called when the interruption is requested. + // + // The callback is immediately called if the interrupter has already been + // triggered or if it is triggered during the registration. This is typically + // useful for a solver implementation so that it does not have to test + // IsInterrupted() to do the same thing it does in the callback. Simply + // registering the callback is enough. + // + // The callback function can't make calls to AddInterruptionCallback(), + // RemoveInterruptionCallback() and Interrupt(). This would result is a + // deadlock. Calling IsInterrupted() is fine though. + CallbackId AddInterruptionCallback(Callback callback); + + // Unregisters a callback previously registered. It fails (with a CHECK) if + // the callback was already unregistered or unkonwn. After this calls returns, + // the caller can assume the callback won't be called. + // + // This function can't be called from a callback since this would result in a + // deadlock. + void RemoveInterruptionCallback(CallbackId id); + + private: + // This atomic must never be reset to false! + // + // The mutex_ should be held when setting it to true. + std::atomic interrupted_ = false; + + absl::Mutex mutex_; + + // The id to use for the next registered callback. + CallbackId next_callback_id_ ABSL_GUARDED_BY(mutex_) = {}; + + // The list of callbacks. We use a linked_hash_map to make sure the order of + // calls to callback when the interrupter is triggered is stable. + gtl::linked_hash_map callbacks_ ABSL_GUARDED_BY(mutex_); +}; + +// Class implementing RAII for interruption callbacks. +// +// Usage: +// +// SolveInterrupter* const interrupter = ...; +// { +// const ScopedSolveInterrupterCallback scoped_intr_cb(interrupter, [](){ +// // Do something when/if interrupter is not nullptr and is triggered. +// } +// ... +// } +// // At this point, the callback will have been removed. +// +// The function RemoveCallbackIfNecessary() can be used to remove the callback +// before the destruction of this object. +class ScopedSolveInterrupterCallback { + public: + // Adds a callback to the interrupter if it is not nullptr. Does nothing when + // interrupter is nullptr. + ScopedSolveInterrupterCallback(SolveInterrupter* interrupter, + SolveInterrupter::Callback callback); + + ScopedSolveInterrupterCallback(const ScopedSolveInterrupterCallback&) = + delete; + ScopedSolveInterrupterCallback& operator=( + const ScopedSolveInterrupterCallback&) = delete; + + // Removes the callback if necessary. + ~ScopedSolveInterrupterCallback(); + + // Removes the callback from the interrupter. If it has already been removed + // by a previous call or if a null interrupter was passed to the constructor, + // this function has no effect. + void RemoveCallbackIfNecessary(); + + // Returns the optional interrupter. + SolveInterrupter* interrupter() const { return interrupter_; } + + private: + // Optional interrupter. + SolveInterrupter* const interrupter_; + + // Unset after the callback has been reset. + std::optional callback_id_; +}; + +} // namespace operations_research + +#endif // OR_TOOLS_UTIL_SOLVE_INTERRUPTER_H_ From c76a9a424a5faefa8d96e264b9734add02b05329 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 11:59:02 +0100 Subject: [PATCH 019/392] backport example/ from main --- examples/cpp/BUILD.bazel | 83 +---- examples/cpp/CMakeLists.txt | 2 - examples/cpp/binpacking_2d_sat.cc | 10 +- examples/cpp/constraint_programming_cp.cc | 3 +- examples/cpp/costas_array_sat.cc | 2 +- examples/cpp/cvrp_disjoint_tw.cc | 195 ---------- examples/cpp/cvrptw.cc | 181 --------- examples/cpp/cvrptw_lib.h | 348 ------------------ examples/cpp/cvrptw_with_breaks.cc | 235 ------------ examples/cpp/cvrptw_with_refueling.cc | 192 ---------- examples/cpp/cvrptw_with_resources.cc | 187 ---------- .../cvrptw_with_stop_times_and_resources.cc | 223 ----------- examples/cpp/frequency_assignment_problem.cc | 31 +- examples/cpp/jobshop_sat.cc | 15 +- examples/cpp/slitherlink_sat.cc | 2 +- examples/cpp/weighted_tardiness_sat.cc | 6 + examples/flatzinc/{README => README.md} | 0 17 files changed, 51 insertions(+), 1664 deletions(-) delete mode 100644 examples/cpp/cvrp_disjoint_tw.cc delete mode 100644 examples/cpp/cvrptw.cc delete mode 100644 examples/cpp/cvrptw_lib.h delete mode 100644 examples/cpp/cvrptw_with_breaks.cc delete mode 100644 examples/cpp/cvrptw_with_refueling.cc delete mode 100644 examples/cpp/cvrptw_with_resources.cc delete mode 100644 examples/cpp/cvrptw_with_stop_times_and_resources.cc rename examples/flatzinc/{README => README.md} (100%) diff --git a/examples/cpp/BUILD.bazel b/examples/cpp/BUILD.bazel index ff2e0d1da6..d77d27b996 100644 --- a/examples/cpp/BUILD.bazel +++ b/examples/cpp/BUILD.bazel @@ -611,79 +611,6 @@ cc_binary( ], ) -cc_library( - name = "cvrptw_lib", - hdrs = ["cvrptw_lib.h"], - deps = [ - "//ortools/base", - "//ortools/constraint_solver:routing", - "//ortools/util:random_engine", - ], -) - -cc_binary( - name = "cvrptw", - srcs = ["cvrptw.cc"], - deps = [ - ":cvrptw_lib", - "//ortools/base", - "//ortools/constraint_solver:routing", - ], -) - -cc_binary( - name = "cvrp_disjoint_tw", - srcs = ["cvrp_disjoint_tw.cc"], - deps = [ - ":cvrptw_lib", - "//ortools/base", - "//ortools/constraint_solver:routing", - ], -) - -cc_binary( - name = "cvrptw_with_breaks", - srcs = ["cvrptw_with_breaks.cc"], - deps = [ - ":cvrptw_lib", - "//ortools/base", - "//ortools/constraint_solver:routing", - "//ortools/constraint_solver:routing_enums_cc_proto", - "@com_google_absl//absl/strings", - ], -) - -cc_binary( - name = "cvrptw_with_resources", - srcs = ["cvrptw_with_resources.cc"], - deps = [ - ":cvrptw_lib", - "//ortools/base", - "//ortools/constraint_solver:routing", - ], -) - -cc_binary( - name = "cvrptw_with_stop_times_and_resources", - srcs = ["cvrptw_with_stop_times_and_resources.cc"], - deps = [ - ":cvrptw_lib", - "//ortools/base", - "//ortools/constraint_solver:routing", - "@com_google_absl//absl/strings", - ], -) - -cc_binary( - name = "cvrptw_with_refueling", - srcs = ["cvrptw_with_refueling.cc"], - deps = [ - ":cvrptw_lib", - "//ortools/base", - "//ortools/constraint_solver:routing", - ], -) - cc_binary( name = "pdptw", srcs = ["pdptw.cc"], @@ -692,6 +619,7 @@ cc_binary( "//ortools/base:file", "//ortools/base:mathutil", "//ortools/constraint_solver:routing", + "//ortools/routing/parsers:lilim_parser", "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/strings", "@com_google_absl//absl/strings:str_format", @@ -753,6 +681,7 @@ cc_binary( "//ortools/base", "//ortools/linear_solver", "//ortools/linear_solver:linear_solver_cc_proto", + "//ortools/linear_solver:solve_mp_model", ], ) @@ -1075,12 +1004,18 @@ cc_binary( deps = [ "//ortools/base", "//ortools/linear_solver:linear_solver_cc_proto", + "//ortools/pdlp:iteration_stats", "//ortools/pdlp:primal_dual_hybrid_gradient", + "//ortools/pdlp:quadratic_program", "//ortools/pdlp:quadratic_program_io", "//ortools/pdlp:solve_log_cc_proto", "//ortools/pdlp:solvers_cc_proto", "//ortools/port:proto_utils", + "//ortools/util:file_util", "//ortools/util:sigint", - "@com_google_absl//absl/time", + "@com_google_absl//absl/flags:flag", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/log:flags", + "@com_google_absl//absl/strings", ], ) diff --git a/examples/cpp/CMakeLists.txt b/examples/cpp/CMakeLists.txt index 08320df97c..7fe5b30fcd 100644 --- a/examples/cpp/CMakeLists.txt +++ b/examples/cpp/CMakeLists.txt @@ -42,8 +42,6 @@ file(GLOB CXX_SRCS "*.cc") list(FILTER CXX_SRCS EXCLUDE REGEX ".*/binpacking_2d_sat.cc") list(FILTER CXX_SRCS EXCLUDE REGEX ".*/course_scheduling_run.cc") # missing proto list(FILTER CXX_SRCS EXCLUDE REGEX ".*/course_scheduling.cc") # missing proto -list(FILTER CXX_SRCS EXCLUDE REGEX ".*/cvrptw_with_breaks.cc") # too long -list(FILTER CXX_SRCS EXCLUDE REGEX ".*/cvrptw_with_refueling.cc") # too long list(FILTER CXX_SRCS EXCLUDE REGEX ".*/dimacs_assignment.cc") # crash list(FILTER CXX_SRCS EXCLUDE REGEX ".*/dobble_ls.cc") # Too long list(FILTER CXX_SRCS EXCLUDE REGEX ".*/frequency_assignment_problem.cc") # crash diff --git a/examples/cpp/binpacking_2d_sat.cc b/examples/cpp/binpacking_2d_sat.cc index 8a197d2291..4a0334bb20 100644 --- a/examples/cpp/binpacking_2d_sat.cc +++ b/examples/cpp/binpacking_2d_sat.cc @@ -26,6 +26,7 @@ #include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/strings/str_cat.h" +#include "absl/types/span.h" #include "google/protobuf/text_format.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" @@ -171,7 +172,7 @@ absl::btree_set FindFixedItems( } // Solves a subset sum problem to find the maximum reachable max size. -int64_t MaxSubsetSumSize(const std::vector& sizes, int64_t max_size) { +int64_t MaxSubsetSumSize(absl::Span sizes, int64_t max_size) { CpModelBuilder builder; LinearExpr weighed_sum; for (const int size : sizes) { @@ -280,7 +281,7 @@ void LoadAndSolve(const std::string& file_name, int instance) { const absl::btree_set fixed_items = FindFixedItems(problem); // Fix the fixed_items to the first fixed_items.size() bins. - CHECK_LT(fixed_items.size(), max_bins) + CHECK_LE(fixed_items.size(), max_bins) << "Infeasible problem, increase max_bins"; int count = 0; for (const int item : fixed_items) { @@ -437,9 +438,10 @@ void LoadAndSolve(const std::string& file_name, int instance) { // Objective definition. cp_model.Minimize(obj); - for (int b = trivial_lb; b + 1 < max_bins; ++b) { + CHECK_GT(trivial_lb, 0); + for (int b = trivial_lb; b < max_bins; ++b) { cp_model.AddGreaterOrEqual(obj, b + 1).OnlyEnforceIf(bin_is_used[b]); - cp_model.AddImplication(bin_is_used[b + 1], bin_is_used[b]); + cp_model.AddImplication(bin_is_used[b], bin_is_used[b - 1]); } if (absl::GetFlag(FLAGS_symmetry_breaking)) { diff --git a/examples/cpp/constraint_programming_cp.cc b/examples/cpp/constraint_programming_cp.cc index c11cd27b8a..af3dc8c05b 100644 --- a/examples/cpp/constraint_programming_cp.cc +++ b/examples/cpp/constraint_programming_cp.cc @@ -43,8 +43,7 @@ void RunConstraintProgrammingExample() { solver.NewSearch(db); while (solver.NextSolution()) { - LOG(INFO) << "Solution" - << ": x = " << x->Value() << "; y = " << y->Value() + LOG(INFO) << "Solution" << ": x = " << x->Value() << "; y = " << y->Value() << "; z = " << z->Value(); } solver.EndSearch(); diff --git a/examples/cpp/costas_array_sat.cc b/examples/cpp/costas_array_sat.cc index b16f715fe4..fc7a4eb558 100644 --- a/examples/cpp/costas_array_sat.cc +++ b/examples/cpp/costas_array_sat.cc @@ -81,7 +81,7 @@ void CheckConstraintViolators(absl::Span vars, } // Check that all pairwise differences are unique -bool CheckCostas(const std::vector& vars) { +bool CheckCostas(absl::Span vars) { std::vector violators; CheckConstraintViolators(vars, &violators); diff --git a/examples/cpp/cvrp_disjoint_tw.cc b/examples/cpp/cvrp_disjoint_tw.cc deleted file mode 100644 index 1499d48389..0000000000 --- a/examples/cpp/cvrp_disjoint_tw.cc +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// -// Capacitated Vehicle Routing Problem with Disjoint Time Windows (and optional -// orders). -// A description of the problem can be found here: -// http://en.wikipedia.org/wiki/Vehicle_routing_problem. -// The variant which is tackled by this model includes a capacity dimension, -// disjoint time windows and optional orders, with a penalty cost if orders are -// not performed. For the sake of simplicity, orders are randomly located and -// distances are computed using the Manhattan distance. Distances are assumed -// to be in meters and times in seconds. - -#include -#include - -#include "absl/random/random.h" -#include "examples/cpp/cvrptw_lib.h" -#include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" -#include "ortools/base/init_google.h" -#include "ortools/base/types.h" -#include "ortools/base/logging.h" -#include "ortools/constraint_solver/routing.h" -#include "ortools/constraint_solver/routing_index_manager.h" -#include "ortools/constraint_solver/routing_parameters.h" -#include "ortools/constraint_solver/routing_parameters.pb.h" - -using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; -using operations_research::Solver; - -ABSL_FLAG(int, vrp_orders, 100, "Number of nodes in the problem."); -ABSL_FLAG(int, vrp_vehicles, 20, "Number of vehicles in the problem."); -ABSL_FLAG(int, vrp_windows, 5, "Number of disjoint windows per node."); -ABSL_FLAG(bool, vrp_use_deterministic_random_seed, false, - "Use deterministic random seeds."); -ABSL_FLAG(bool, vrp_use_same_vehicle_costs, false, - "Use same vehicle costs in the routing model"); -ABSL_FLAG(std::string, routing_search_parameters, "", - "Text proto RoutingSearchParameters (possibly partial) that will " - "override the DefaultRoutingSearchParameters()"); - -const char* kTime = "Time"; -const char* kCapacity = "Capacity"; -const int64_t kMaxNodesPerGroup = 10; -const int64_t kSameVehicleCost = 1000; - -int main(int argc, char** argv) { - InitGoogle(argv[0], &argc, &argv, true); - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) - << "Specify an instance size greater than 0."; - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) - << "Specify a non-null vehicle fleet size."; - // VRP of size absl::GetFlag(FLAGS_vrp_size). - // Nodes are indexed from 0 to absl::GetFlag(FLAGS_vrp_orders), the starts and - // ends of the routes are at node 0. - const RoutingIndexManager::NodeIndex kDepot(0); - RoutingIndexManager manager(absl::GetFlag(FLAGS_vrp_orders) + 1, - absl::GetFlag(FLAGS_vrp_vehicles), kDepot); - RoutingModel routing(manager); - - // Setting up locations. - const int64_t kXMax = 100000; - const int64_t kYMax = 100000; - const int64_t kSpeed = 10; - LocationContainer locations( - kSpeed, absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - for (int location = 0; location <= absl::GetFlag(FLAGS_vrp_orders); - ++location) { - locations.AddRandomLocation(kXMax, kYMax); - } - - // Setting the cost function. - const int vehicle_cost = routing.RegisterTransitCallback( - [&locations, &manager](int64_t i, int64_t j) { - return locations.ManhattanDistance(manager.IndexToNode(i), - manager.IndexToNode(j)); - }); - routing.SetArcCostEvaluatorOfAllVehicles(vehicle_cost); - - // Adding capacity dimension constraints. - const int64_t kVehicleCapacity = 40; - const int64_t kNullCapacitySlack = 0; - RandomDemand demand(manager.num_nodes(), kDepot, - absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - demand.Initialize(); - routing.AddDimension(routing.RegisterTransitCallback( - [&demand, &manager](int64_t i, int64_t j) { - return demand.Demand(manager.IndexToNode(i), - manager.IndexToNode(j)); - }), - kNullCapacitySlack, kVehicleCapacity, - /*fix_start_cumul_to_zero=*/true, kCapacity); - - // Adding time dimension constraints. - const int64_t kTimePerDemandUnit = 300; - const int64_t kHorizon = 24 * 3600; - ServiceTimePlusTransition time( - kTimePerDemandUnit, - [&demand](RoutingNodeIndex i, RoutingNodeIndex j) { - return demand.Demand(i, j); - }, - [&locations](RoutingNodeIndex i, RoutingNodeIndex j) { - return locations.ManhattanTime(i, j); - }); - routing.AddDimension( - routing.RegisterTransitCallback([&time, &manager](int64_t i, int64_t j) { - return time.Compute(manager.IndexToNode(i), manager.IndexToNode(j)); - }), - kHorizon, kHorizon, /*fix_start_cumul_to_zero=*/false, kTime); - const RoutingDimension& time_dimension = routing.GetDimensionOrDie(kTime); - - // Adding disjoint time windows. - Solver* solver = routing.solver(); - std::mt19937 randomizer( - GetSeed(absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed))); - for (int order = 1; order < manager.num_nodes(); ++order) { - std::vector forbid_points(2 * absl::GetFlag(FLAGS_vrp_windows), 0); - for (int i = 0; i < forbid_points.size(); ++i) { - forbid_points[i] = absl::Uniform(randomizer, 0, kHorizon); - } - std::sort(forbid_points.begin(), forbid_points.end()); - std::vector forbid_starts(1, 0); - std::vector forbid_ends; - for (int i = 0; i < forbid_points.size(); i += 2) { - forbid_ends.push_back(forbid_points[i]); - forbid_starts.push_back(forbid_points[i + 1]); - } - forbid_ends.push_back(kHorizon); - solver->AddConstraint(solver->MakeNotMemberCt( - time_dimension.CumulVar(order), forbid_starts, forbid_ends)); - } - - // Adding penalty costs to allow skipping orders. - const int64_t kPenalty = 10000000; - const RoutingIndexManager::NodeIndex kFirstNodeAfterDepot(1); - for (RoutingIndexManager::NodeIndex order = kFirstNodeAfterDepot; - order < manager.num_nodes(); ++order) { - std::vector orders(1, manager.NodeToIndex(order)); - routing.AddDisjunction(orders, kPenalty); - } - - // Adding same vehicle constraint costs for consecutive nodes. - if (absl::GetFlag(FLAGS_vrp_use_same_vehicle_costs)) { - std::vector group; - for (RoutingIndexManager::NodeIndex order = kFirstNodeAfterDepot; - order < manager.num_nodes(); ++order) { - group.push_back(manager.NodeToIndex(order)); - if (group.size() == kMaxNodesPerGroup) { - routing.AddSoftSameVehicleConstraint(group, kSameVehicleCost); - group.clear(); - } - } - if (!group.empty()) { - routing.AddSoftSameVehicleConstraint(group, kSameVehicleCost); - } - } - - // Solve, returns a solution if any (owned by RoutingModel). - RoutingSearchParameters parameters = DefaultRoutingSearchParameters(); - CHECK(google::protobuf::TextFormat::MergeFromString( - absl::GetFlag(FLAGS_routing_search_parameters), ¶meters)); - const Assignment* solution = routing.SolveWithParameters(parameters); - if (solution != nullptr) { - DisplayPlan(manager, routing, *solution, - absl::GetFlag(FLAGS_vrp_use_same_vehicle_costs), - kMaxNodesPerGroup, kSameVehicleCost, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); - } else { - LOG(INFO) << "No solution found."; - } - return EXIT_SUCCESS; -} diff --git a/examples/cpp/cvrptw.cc b/examples/cpp/cvrptw.cc deleted file mode 100644 index 28b62699ec..0000000000 --- a/examples/cpp/cvrptw.cc +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// -// Capacitated Vehicle Routing Problem with Time Windows (and optional orders). -// A description of the problem can be found here: -// http://en.wikipedia.org/wiki/Vehicle_routing_problem. -// The variant which is tackled by this model includes a capacity dimension, -// time windows and optional orders, with a penalty cost if orders are not -// performed. For the sake of simplicity, orders are randomly located and -// distances are computed using the Manhattan distance. Distances are assumed -// to be in meters and times in seconds. - -#include -#include - -#include "absl/random/random.h" -#include "examples/cpp/cvrptw_lib.h" -#include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" -#include "ortools/base/init_google.h" -#include "ortools/base/types.h" -#include "ortools/base/logging.h" -#include "ortools/constraint_solver/routing.h" -#include "ortools/constraint_solver/routing_index_manager.h" -#include "ortools/constraint_solver/routing_parameters.h" -#include "ortools/constraint_solver/routing_parameters.pb.h" - -using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; - -ABSL_FLAG(int, vrp_orders, 100, "Number of nodes in the problem"); -ABSL_FLAG(int, vrp_vehicles, 20, "Number of vehicles in the problem"); -ABSL_FLAG(bool, vrp_use_deterministic_random_seed, false, - "Use deterministic random seeds"); -ABSL_FLAG(bool, vrp_use_same_vehicle_costs, false, - "Use same vehicle costs in the routing model"); -ABSL_FLAG(std::string, routing_search_parameters, "", - "Text proto RoutingSearchParameters (possibly partial) that will " - "override the DefaultRoutingSearchParameters()"); - -const char* kTime = "Time"; -const char* kCapacity = "Capacity"; -const int64_t kMaxNodesPerGroup = 10; -const int64_t kSameVehicleCost = 1000; - -int main(int argc, char** argv) { - InitGoogle(argv[0], &argc, &argv, true); - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) - << "Specify an instance size greater than 0."; - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) - << "Specify a non-null vehicle fleet size."; - // VRP of size absl::GetFlag(FLAGS_vrp_size). - // Nodes are indexed from 0 to absl::GetFlag(FLAGS_vrp_orders), the starts and - // ends of the routes are at node 0. - const RoutingIndexManager::NodeIndex kDepot(0); - RoutingIndexManager manager(absl::GetFlag(FLAGS_vrp_orders) + 1, - absl::GetFlag(FLAGS_vrp_vehicles), kDepot); - RoutingModel routing(manager); - - // Setting up locations. - const int64_t kXMax = 100000; - const int64_t kYMax = 100000; - const int64_t kSpeed = 10; - LocationContainer locations( - kSpeed, absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - for (int location = 0; location <= absl::GetFlag(FLAGS_vrp_orders); - ++location) { - locations.AddRandomLocation(kXMax, kYMax); - } - - // Setting the cost function. - const int vehicle_cost = routing.RegisterTransitCallback( - [&locations, &manager](int64_t i, int64_t j) { - return locations.ManhattanDistance(manager.IndexToNode(i), - manager.IndexToNode(j)); - }); - routing.SetArcCostEvaluatorOfAllVehicles(vehicle_cost); - - // Adding capacity dimension constraints. - const int64_t kVehicleCapacity = 40; - const int64_t kNullCapacitySlack = 0; - RandomDemand demand(manager.num_nodes(), kDepot, - absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - demand.Initialize(); - routing.AddDimension(routing.RegisterTransitCallback( - [&demand, &manager](int64_t i, int64_t j) { - return demand.Demand(manager.IndexToNode(i), - manager.IndexToNode(j)); - }), - kNullCapacitySlack, kVehicleCapacity, - /*fix_start_cumul_to_zero=*/true, kCapacity); - - // Adding time dimension constraints. - const int64_t kTimePerDemandUnit = 300; - const int64_t kHorizon = 24 * 3600; - ServiceTimePlusTransition time( - kTimePerDemandUnit, - [&demand](RoutingNodeIndex i, RoutingNodeIndex j) { - return demand.Demand(i, j); - }, - [&locations](RoutingNodeIndex i, RoutingNodeIndex j) { - return locations.ManhattanTime(i, j); - }); - routing.AddDimension( - routing.RegisterTransitCallback([&time, &manager](int64_t i, int64_t j) { - return time.Compute(manager.IndexToNode(i), manager.IndexToNode(j)); - }), - kHorizon, kHorizon, /*fix_start_cumul_to_zero=*/true, kTime); - const RoutingDimension& time_dimension = routing.GetDimensionOrDie(kTime); - - // Adding time windows. - std::mt19937 randomizer( - GetSeed(absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed))); - const int64_t kTWDuration = 5 * 3600; - for (int order = 1; order < manager.num_nodes(); ++order) { - const int64_t start = - absl::Uniform(randomizer, 0, kHorizon - kTWDuration); - time_dimension.CumulVar(order)->SetRange(start, start + kTWDuration); - } - - // Adding penalty costs to allow skipping orders. - const int64_t kPenalty = 10000000; - const RoutingIndexManager::NodeIndex kFirstNodeAfterDepot(1); - for (RoutingIndexManager::NodeIndex order = kFirstNodeAfterDepot; - order < manager.num_nodes(); ++order) { - std::vector orders(1, manager.NodeToIndex(order)); - routing.AddDisjunction(orders, kPenalty); - } - - // Adding same vehicle constraint costs for consecutive nodes. - if (absl::GetFlag(FLAGS_vrp_use_same_vehicle_costs)) { - std::vector group; - for (RoutingIndexManager::NodeIndex order = kFirstNodeAfterDepot; - order < manager.num_nodes(); ++order) { - group.push_back(manager.NodeToIndex(order)); - if (group.size() == kMaxNodesPerGroup) { - routing.AddSoftSameVehicleConstraint(group, kSameVehicleCost); - group.clear(); - } - } - if (!group.empty()) { - routing.AddSoftSameVehicleConstraint(group, kSameVehicleCost); - } - } - - // Solve, returns a solution if any (owned by RoutingModel). - RoutingSearchParameters parameters = DefaultRoutingSearchParameters(); - CHECK(google::protobuf::TextFormat::MergeFromString( - absl::GetFlag(FLAGS_routing_search_parameters), ¶meters)); - const Assignment* solution = routing.SolveWithParameters(parameters); - if (solution != nullptr) { - DisplayPlan(manager, routing, *solution, - absl::GetFlag(FLAGS_vrp_use_same_vehicle_costs), - kMaxNodesPerGroup, kSameVehicleCost, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); - } else { - LOG(INFO) << "No solution found."; - } - return EXIT_SUCCESS; -} diff --git a/examples/cpp/cvrptw_lib.h b/examples/cpp/cvrptw_lib.h deleted file mode 100644 index edb6e3a938..0000000000 --- a/examples/cpp/cvrptw_lib.h +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This header provides functions to help creating random instaces of the -// vehicle routing problem; random capacities and random time windows. -#ifndef OR_TOOLS_EXAMPLES_CVRPTW_LIB_H_ -#define OR_TOOLS_EXAMPLES_CVRPTW_LIB_H_ - -#include -#include -#include - -#include "absl/strings/str_format.h" -#include "ortools/base/logging.h" -#include "ortools/constraint_solver/routing.h" -#include "ortools/util/random_engine.h" - -namespace operations_research { - -typedef std::function - RoutingNodeEvaluator2; - -// Random seed generator. -int32_t GetSeed(bool deterministic); - -// Location container, contains positions of orders and can be used to obtain -// Manhattan distances/times between locations. -class LocationContainer { - public: - LocationContainer(int64_t speed, bool use_deterministic_seed); - void AddLocation(int64_t x, int64_t y) { - locations_.push_back(Location(x, y)); - } - void AddRandomLocation(int64_t x_max, int64_t y_max); - void AddRandomLocation(int64_t x_max, int64_t y_max, int duplicates); - int64_t ManhattanDistance(RoutingIndexManager::NodeIndex from, - RoutingIndexManager::NodeIndex to) const; - int64_t NegManhattanDistance(RoutingIndexManager::NodeIndex from, - RoutingIndexManager::NodeIndex to) const; - int64_t ManhattanTime(RoutingIndexManager::NodeIndex from, - RoutingIndexManager::NodeIndex to) const; - - bool SameLocation(RoutingIndexManager::NodeIndex node1, - RoutingIndexManager::NodeIndex node2) const; - int64_t SameLocationFromIndex(int64_t node1, int64_t node2) const; - - private: - class Location { - public: - Location(); - Location(int64_t x, int64_t y); - int64_t DistanceTo(const Location& location) const; - bool IsAtSameLocation(const Location& location) const; - - private: - static int64_t Abs(int64_t value); - - int64_t x_; - int64_t y_; - }; - - random_engine_t randomizer_; - const int64_t speed_; - absl::StrongVector locations_; -}; - -// Random demand. -class RandomDemand { - public: - RandomDemand(int size, RoutingIndexManager::NodeIndex depot, - bool use_deterministic_seed); - void Initialize(); - int64_t Demand(RoutingIndexManager::NodeIndex from, - RoutingIndexManager::NodeIndex to) const; - - private: - std::unique_ptr demand_; - const int size_; - const RoutingIndexManager::NodeIndex depot_; - const bool use_deterministic_seed_; -}; - -// Service time (proportional to demand) + transition time callback. -class ServiceTimePlusTransition { - public: - ServiceTimePlusTransition( - int64_t time_per_demand_unit, - operations_research::RoutingNodeEvaluator2 demand, - operations_research::RoutingNodeEvaluator2 transition_time); - int64_t Compute(RoutingIndexManager::NodeIndex from, - RoutingIndexManager::NodeIndex to) const; - - private: - const int64_t time_per_demand_unit_; - operations_research::RoutingNodeEvaluator2 demand_; - operations_research::RoutingNodeEvaluator2 transition_time_; -}; - -// Stop service time + transition time callback. -class StopServiceTimePlusTransition { - public: - StopServiceTimePlusTransition( - int64_t stop_time, const LocationContainer& location_container, - operations_research::RoutingNodeEvaluator2 transition_time); - int64_t Compute(RoutingIndexManager::NodeIndex from, - RoutingIndexManager::NodeIndex to) const; - - private: - const int64_t stop_time_; - const LocationContainer& location_container_; - operations_research::RoutingNodeEvaluator2 demand_; - operations_research::RoutingNodeEvaluator2 transition_time_; -}; - -// Route plan displayer. -// TODO(user): Move the display code to the routing library. -void DisplayPlan( - const operations_research::RoutingIndexManager& manager, - const operations_research::RoutingModel& routing, - const operations_research::Assignment& plan, bool use_same_vehicle_costs, - int64_t max_nodes_per_group, int64_t same_vehicle_cost, - const operations_research::RoutingDimension& capacity_dimension, - const operations_research::RoutingDimension& time_dimension); - -using NodeIndex = RoutingIndexManager::NodeIndex; - -int32_t GetSeed(bool deterministic) { - if (deterministic) { - return 0; - } else { - return std::random_device()(); - } -} - -LocationContainer::LocationContainer(int64_t speed, bool use_deterministic_seed) - : randomizer_(GetSeed(use_deterministic_seed)), speed_(speed) { - CHECK_LT(0, speed_); -} - -void LocationContainer::AddRandomLocation(int64_t x_max, int64_t y_max) { - AddRandomLocation(x_max, y_max, 1); -} - -void LocationContainer::AddRandomLocation(int64_t x_max, int64_t y_max, - int duplicates) { - const int64_t x = absl::Uniform(randomizer_, 0, x_max + 1); - const int64_t y = absl::Uniform(randomizer_, 0, y_max + 1); - for (int i = 0; i < duplicates; ++i) { - AddLocation(x, y); - } -} - -int64_t LocationContainer::ManhattanDistance(NodeIndex from, - NodeIndex to) const { - return locations_[from].DistanceTo(locations_[to]); -} - -int64_t LocationContainer::NegManhattanDistance(NodeIndex from, - NodeIndex to) const { - return -ManhattanDistance(from, to); -} - -int64_t LocationContainer::ManhattanTime(NodeIndex from, NodeIndex to) const { - return ManhattanDistance(from, to) / speed_; -} - -bool LocationContainer::SameLocation(NodeIndex node1, NodeIndex node2) const { - if (node1 < locations_.size() && node2 < locations_.size()) { - return locations_[node1].IsAtSameLocation(locations_[node2]); - } - return false; -} -int64_t LocationContainer::SameLocationFromIndex(int64_t node1, - int64_t node2) const { - // The direct conversion from constraint model indices to routing model - // nodes is correct because the depot is node 0. - // TODO(user): Fetch proper indices from routing model. - return SameLocation(NodeIndex(node1), NodeIndex(node2)); -} - -LocationContainer::Location::Location() : x_(0), y_(0) {} - -LocationContainer::Location::Location(int64_t x, int64_t y) : x_(x), y_(y) {} - -int64_t LocationContainer::Location::DistanceTo( - const Location& location) const { - return Abs(x_ - location.x_) + Abs(y_ - location.y_); -} - -bool LocationContainer::Location::IsAtSameLocation( - const Location& location) const { - return x_ == location.x_ && y_ == location.y_; -} - -int64_t LocationContainer::Location::Abs(int64_t value) { - return std::max(value, -value); -} - -RandomDemand::RandomDemand(int size, NodeIndex depot, - bool use_deterministic_seed) - : size_(size), - depot_(depot), - use_deterministic_seed_(use_deterministic_seed) { - CHECK_LT(0, size_); -} - -void RandomDemand::Initialize() { - const int64_t kDemandMax = 5; - const int64_t kDemandMin = 1; - demand_ = absl::make_unique(size_); - random_engine_t randomizer; - for (int order = 0; order < size_; ++order) { - if (order == depot_) { - demand_[order] = 0; - } else { - demand_[order] = kDemandMin + absl::Uniform(randomizer, 0, - kDemandMax - kDemandMin + 1); - } - } -} - -int64_t RandomDemand::Demand(NodeIndex from, NodeIndex /*to*/) const { - return demand_[from.value()]; -} - -ServiceTimePlusTransition::ServiceTimePlusTransition( - int64_t time_per_demand_unit, RoutingNodeEvaluator2 demand, - RoutingNodeEvaluator2 transition_time) - : time_per_demand_unit_(time_per_demand_unit), - demand_(std::move(demand)), - transition_time_(std::move(transition_time)) {} - -int64_t ServiceTimePlusTransition::Compute(NodeIndex from, NodeIndex to) const { - return time_per_demand_unit_ * demand_(from, to) + transition_time_(from, to); -} - -StopServiceTimePlusTransition::StopServiceTimePlusTransition( - int64_t stop_time, const LocationContainer& location_container, - RoutingNodeEvaluator2 transition_time) - : stop_time_(stop_time), - location_container_(location_container), - transition_time_(std::move(transition_time)) {} - -int64_t StopServiceTimePlusTransition::Compute(NodeIndex from, - NodeIndex to) const { - return location_container_.SameLocation(from, to) - ? 0 - : stop_time_ + transition_time_(from, to); -} - -void DisplayPlan( - const RoutingIndexManager& manager, const RoutingModel& routing, - const operations_research::Assignment& plan, bool use_same_vehicle_costs, - int64_t max_nodes_per_group, int64_t same_vehicle_cost, - const operations_research::RoutingDimension& capacity_dimension, - const operations_research::RoutingDimension& time_dimension) { - // Display plan cost. - std::string plan_output = absl::StrFormat("Cost %d\n", plan.ObjectiveValue()); - - // Display dropped orders. - std::string dropped; - for (int64_t order = 0; order < routing.Size(); ++order) { - if (routing.IsStart(order) || routing.IsEnd(order)) continue; - if (plan.Value(routing.NextVar(order)) == order) { - if (dropped.empty()) { - absl::StrAppendFormat(&dropped, " %d", - manager.IndexToNode(order).value()); - } else { - absl::StrAppendFormat(&dropped, ", %d", - manager.IndexToNode(order).value()); - } - } - } - if (!dropped.empty()) { - plan_output += "Dropped orders:" + dropped + "\n"; - } - - if (use_same_vehicle_costs) { - int group_size = 0; - int64_t group_same_vehicle_cost = 0; - std::set visited; - for (int64_t order = 0; order < routing.Size(); ++order) { - if (routing.IsStart(order) || routing.IsEnd(order)) continue; - ++group_size; - visited.insert(plan.Value(routing.VehicleVar(order))); - if (group_size == max_nodes_per_group) { - if (visited.size() > 1) { - group_same_vehicle_cost += (visited.size() - 1) * same_vehicle_cost; - } - group_size = 0; - visited.clear(); - } - } - if (visited.size() > 1) { - group_same_vehicle_cost += (visited.size() - 1) * same_vehicle_cost; - } - LOG(INFO) << "Same vehicle costs: " << group_same_vehicle_cost; - } - - // Display actual output for each vehicle. - for (int route_number = 0; route_number < routing.vehicles(); - ++route_number) { - int64_t order = routing.Start(route_number); - absl::StrAppendFormat(&plan_output, "Route %d: ", route_number); - if (routing.IsEnd(plan.Value(routing.NextVar(order)))) { - plan_output += "Empty\n"; - } else { - while (true) { - operations_research::IntVar* const load_var = - capacity_dimension.CumulVar(order); - operations_research::IntVar* const time_var = - time_dimension.CumulVar(order); - operations_research::IntVar* const slack_var = - routing.IsEnd(order) ? nullptr : time_dimension.SlackVar(order); - if (slack_var != nullptr && plan.Contains(slack_var)) { - absl::StrAppendFormat( - &plan_output, "%d Load(%d) Time(%d, %d) Slack(%d, %d)", - manager.IndexToNode(order).value(), plan.Value(load_var), - plan.Min(time_var), plan.Max(time_var), plan.Min(slack_var), - plan.Max(slack_var)); - } else { - absl::StrAppendFormat(&plan_output, "%d Load(%d) Time(%d, %d)", - manager.IndexToNode(order).value(), - plan.Value(load_var), plan.Min(time_var), - plan.Max(time_var)); - } - if (routing.IsEnd(order)) break; - plan_output += " -> "; - order = plan.Value(routing.NextVar(order)); - } - plan_output += "\n"; - } - } - LOG(INFO) << plan_output; -} -} // namespace operations_research - -#endif // OR_TOOLS_EXAMPLES_CVRPTW_LIB_H_ diff --git a/examples/cpp/cvrptw_with_breaks.cc b/examples/cpp/cvrptw_with_breaks.cc deleted file mode 100644 index 25d790fa01..0000000000 --- a/examples/cpp/cvrptw_with_breaks.cc +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// -// Capacitated Vehicle Routing Problem with Time Windows and Breaks. -// A description of the Capacitated Vehicle Routing Problem with Time Windows -// can be found here: -// http://en.wikipedia.org/wiki/Vehicle_routing_problem. -// The variant which is tackled by this model includes a capacity dimension, -// time windows and optional orders, with a penalty cost if orders are not -// performed. For the sake of simplicty, orders are randomly located and -// distances are computed using the Manhattan distance. Distances are assumed -// to be in meters and times in seconds. -// This variant also includes vehicle breaks which must happen during the day -// with two alternate breaks schemes: either a long break in the middle of the -// day or two smaller ones which can be taken during a longer period of the day. - -#include -#include - -#include "absl/random/random.h" -#include "absl/strings/str_cat.h" -#include "examples/cpp/cvrptw_lib.h" -#include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" -#include "ortools/base/init_google.h" -#include "ortools/base/types.h" -#include "ortools/base/logging.h" -#include "ortools/constraint_solver/routing.h" -#include "ortools/constraint_solver/routing_enums.pb.h" -#include "ortools/constraint_solver/routing_index_manager.h" -#include "ortools/constraint_solver/routing_parameters.h" -#include "ortools/constraint_solver/routing_parameters.pb.h" - -using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::FirstSolutionStrategy; -using operations_research::GetSeed; -using operations_research::IntervalVar; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; -using operations_research::Solver; - -ABSL_FLAG(int, vrp_orders, 100, "Nodes in the problem."); -ABSL_FLAG(int, vrp_vehicles, 20, - "Size of Traveling Salesman Problem instance."); -ABSL_FLAG(bool, vrp_use_deterministic_random_seed, false, - "Use deterministic random seeds."); -ABSL_FLAG(std::string, routing_search_parameters, "", - "Text proto RoutingSearchParameters (possibly partial) that will " - "override the DefaultRoutingSearchParameters()"); - -const char* kTime = "Time"; -const char* kCapacity = "Capacity"; - -int main(int argc, char** argv) { - InitGoogle(argv[0], &argc, &argv, true); - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) - << "Specify an instance size greater than 0."; - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) - << "Specify a non-null vehicle fleet size."; - // VRP of size absl::GetFlag(FLAGS_vrp_size). - // Nodes are indexed from 0 to absl::GetFlag(FLAGS_vrp_orders), the starts and - // ends of the routes are at node 0. - const RoutingIndexManager::NodeIndex kDepot(0); - RoutingIndexManager manager(absl::GetFlag(FLAGS_vrp_orders) + 1, - absl::GetFlag(FLAGS_vrp_vehicles), kDepot); - RoutingModel routing(manager); - RoutingSearchParameters parameters = DefaultRoutingSearchParameters(); - CHECK(google::protobuf::TextFormat::MergeFromString( - absl::GetFlag(FLAGS_routing_search_parameters), ¶meters)); - parameters.set_first_solution_strategy( - FirstSolutionStrategy::PARALLEL_CHEAPEST_INSERTION); - - // Setting up locations. - const int64_t kXMax = 100000; - const int64_t kYMax = 100000; - const int64_t kSpeed = 10; - LocationContainer locations( - kSpeed, absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - for (int location = 0; location <= absl::GetFlag(FLAGS_vrp_orders); - ++location) { - locations.AddRandomLocation(kXMax, kYMax); - } - - // Setting the cost function. - const int vehicle_cost = routing.RegisterTransitCallback( - [&locations, &manager](int64_t i, int64_t j) { - return locations.ManhattanDistance(manager.IndexToNode(i), - manager.IndexToNode(j)); - }); - routing.SetArcCostEvaluatorOfAllVehicles(vehicle_cost); - - // Adding capacity dimension constraints. - const int64_t kVehicleCapacity = 40; - const int64_t kNullCapacitySlack = 0; - RandomDemand demand(manager.num_nodes(), kDepot, - absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - demand.Initialize(); - routing.AddDimension(routing.RegisterTransitCallback( - [&demand, &manager](int64_t i, int64_t j) { - return demand.Demand(manager.IndexToNode(i), - manager.IndexToNode(j)); - }), - kNullCapacitySlack, kVehicleCapacity, - /*fix_start_cumul_to_zero=*/true, kCapacity); - - // Adding time dimension constraints. - const int64_t kTimePerDemandUnit = 300; - const int64_t kHorizon = 24 * 3600; - ServiceTimePlusTransition time( - kTimePerDemandUnit, - [&demand](RoutingNodeIndex i, RoutingNodeIndex j) { - return demand.Demand(i, j); - }, - [&locations](RoutingNodeIndex i, RoutingNodeIndex j) { - return locations.ManhattanTime(i, j); - }); - routing.AddDimension( - routing.RegisterTransitCallback([&time, &manager](int64_t i, int64_t j) { - return time.Compute(manager.IndexToNode(i), manager.IndexToNode(j)); - }), - kHorizon, kHorizon, /*fix_start_cumul_to_zero=*/false, kTime); - RoutingDimension* const time_dimension = routing.GetMutableDimension(kTime); - - // Adding time windows. - std::mt19937 randomizer( - GetSeed(absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed))); - const int64_t kTWDuration = 5 * 3600; - for (int order = 1; order < manager.num_nodes(); ++order) { - const int64_t start = - absl::Uniform(randomizer, 0, kHorizon - kTWDuration); - time_dimension->CumulVar(order)->SetRange(start, start + kTWDuration); - routing.AddToAssignment(time_dimension->SlackVar(order)); - } - - // Minimize time variables. - for (int i = 0; i < routing.Size(); ++i) { - routing.AddVariableMinimizedByFinalizer(time_dimension->CumulVar(i)); - } - for (int j = 0; j < absl::GetFlag(FLAGS_vrp_vehicles); ++j) { - routing.AddVariableMinimizedByFinalizer( - time_dimension->CumulVar(routing.Start(j))); - routing.AddVariableMinimizedByFinalizer( - time_dimension->CumulVar(routing.End(j))); - } - - // Adding vehicle breaks: - // - 40min breaks between 11:00am and 1:00pm - // or - // - 2 x 30min breaks between 10:00am and 3:00pm, at least 1h apart - // First, fill service time vector. - std::vector service_times(routing.Size()); - for (int node = 0; node < routing.Size(); node++) { - if (node >= routing.nodes()) { - service_times[node] = 0; - } else { - const RoutingIndexManager::NodeIndex index(node); - service_times[node] = kTimePerDemandUnit * demand.Demand(index, index); - } - } - const std::vector> break_data = { - {/*start_min*/ 11, /*start_max*/ 13, /*duration*/ 2400}, - {/*start_min*/ 10, /*start_max*/ 15, /*duration*/ 1800}, - {/*start_min*/ 10, /*start_max*/ 15, /*duration*/ 1800}}; - Solver* const solver = routing.solver(); - for (int vehicle = 0; vehicle < absl::GetFlag(FLAGS_vrp_vehicles); - ++vehicle) { - std::vector breaks; - for (int i = 0; i < break_data.size(); ++i) { - IntervalVar* const break_interval = solver->MakeFixedDurationIntervalVar( - break_data[i][0] * 3600, break_data[i][1] * 3600, break_data[i][2], - true, absl::StrCat("Break ", i, " on vehicle ", vehicle)); - breaks.push_back(break_interval); - } - // break1 performed iff break2 performed - solver->AddConstraint(solver->MakeEquality(breaks[1]->PerformedExpr(), - breaks[2]->PerformedExpr())); - // break2 start 1h after break1. - solver->AddConstraint(solver->MakeIntervalVarRelationWithDelay( - breaks[2], Solver::STARTS_AFTER_END, breaks[1], 3600)); - // break0 performed iff break2 unperformed - solver->AddConstraint(solver->MakeNonEquality(breaks[0]->PerformedExpr(), - breaks[2]->PerformedExpr())); - - time_dimension->SetBreakIntervalsOfVehicle(std::move(breaks), vehicle, - service_times); - } - - // Adding penalty costs to allow skipping orders. - const int64_t kPenalty = 10000000; - const RoutingIndexManager::NodeIndex kFirstNodeAfterDepot(1); - for (RoutingIndexManager::NodeIndex order = kFirstNodeAfterDepot; - order < routing.nodes(); ++order) { - std::vector orders(1, manager.NodeToIndex(order)); - routing.AddDisjunction(orders, kPenalty); - } - - // Solve, returns a solution if any (owned by RoutingModel). - const Assignment* solution = routing.SolveWithParameters(parameters); - if (solution != nullptr) { - LOG(INFO) << "Breaks: "; - for (const auto& break_interval : - solution->IntervalVarContainer().elements()) { - if (break_interval.PerformedValue() == 1) { - LOG(INFO) << break_interval.Var()->name() << " " - << break_interval.DebugString(); - } else { - LOG(INFO) << break_interval.Var()->name() << " unperformed"; - } - } - DisplayPlan(manager, routing, *solution, false, 0, 0, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); - } else { - LOG(INFO) << "No solution found."; - } - return EXIT_SUCCESS; -} diff --git a/examples/cpp/cvrptw_with_refueling.cc b/examples/cpp/cvrptw_with_refueling.cc deleted file mode 100644 index 5464467771..0000000000 --- a/examples/cpp/cvrptw_with_refueling.cc +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Capacitated Vehicle Routing Problem with Time Windows and refueling -// constraints. -// This is an extension to the model in cvrptw.cc so refer to that file for -// more information on the common part of the model. The model implemented here -// takes into account refueling constraints using a specific dimension: vehicles -// must visit certain nodes (refueling nodes) before the quantity of fuel -// reaches zero. Fuel consumption is proportional to the distance traveled. - -#include -#include - -#include "absl/random/random.h" -#include "examples/cpp/cvrptw_lib.h" -#include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" -#include "ortools/base/init_google.h" -#include "ortools/base/types.h" -#include "ortools/base/logging.h" -#include "ortools/constraint_solver/routing.h" -#include "ortools/constraint_solver/routing_index_manager.h" -#include "ortools/constraint_solver/routing_parameters.h" -#include "ortools/constraint_solver/routing_parameters.pb.h" - -using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; - -ABSL_FLAG(int, vrp_orders, 100, "Nodes in the problem."); -ABSL_FLAG(int, vrp_vehicles, 20, - "Size of Traveling Salesman Problem instance."); -ABSL_FLAG(bool, vrp_use_deterministic_random_seed, false, - "Use deterministic random seeds."); -ABSL_FLAG(std::string, routing_search_parameters, "", - "Text proto RoutingSearchParameters (possibly partial) that will " - "override the DefaultRoutingSearchParameters()"); - -const char* kTime = "Time"; -const char* kCapacity = "Capacity"; -const char* kFuel = "Fuel"; - -// Returns true if node is a refueling node (based on node / refuel node ratio). -bool IsRefuelNode(int64_t node) { - const int64_t kRefuelNodeRatio = 10; - return (node % kRefuelNodeRatio == 0); -} - -int main(int argc, char** argv) { - InitGoogle(argv[0], &argc, &argv, true); - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) - << "Specify an instance size greater than 0."; - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) - << "Specify a non-null vehicle fleet size."; - // VRP of size absl::GetFlag(FLAGS_vrp_size). - // Nodes are indexed from 0 to absl::GetFlag(FLAGS_vrp_orders), the starts and - // ends of the routes are at node 0. - const RoutingIndexManager::NodeIndex kDepot(0); - RoutingIndexManager manager(absl::GetFlag(FLAGS_vrp_orders) + 1, - absl::GetFlag(FLAGS_vrp_vehicles), kDepot); - RoutingModel routing(manager); - - // Setting up locations. - const int64_t kXMax = 100000; - const int64_t kYMax = 100000; - const int64_t kSpeed = 10; - LocationContainer locations( - kSpeed, absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - for (int location = 0; location <= absl::GetFlag(FLAGS_vrp_orders); - ++location) { - locations.AddRandomLocation(kXMax, kYMax); - } - - // Setting the cost function. - const int vehicle_cost = routing.RegisterTransitCallback( - [&locations, &manager](int64_t i, int64_t j) { - return locations.ManhattanDistance(manager.IndexToNode(i), - manager.IndexToNode(j)); - }); - routing.SetArcCostEvaluatorOfAllVehicles(vehicle_cost); - - // Adding capacity dimension constraints. - const int64_t kVehicleCapacity = 40; - const int64_t kNullCapacitySlack = 0; - RandomDemand demand(manager.num_nodes(), kDepot, - absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - demand.Initialize(); - routing.AddDimension(routing.RegisterTransitCallback( - [&demand, &manager](int64_t i, int64_t j) { - return demand.Demand(manager.IndexToNode(i), - manager.IndexToNode(j)); - }), - kNullCapacitySlack, kVehicleCapacity, - /*fix_start_cumul_to_zero=*/true, kCapacity); - - // Adding time dimension constraints. - const int64_t kTimePerDemandUnit = 300; - const int64_t kHorizon = 24 * 3600; - ServiceTimePlusTransition time( - kTimePerDemandUnit, - [&demand](RoutingNodeIndex i, RoutingNodeIndex j) { - return demand.Demand(i, j); - }, - [&locations](RoutingNodeIndex i, RoutingNodeIndex j) { - return locations.ManhattanTime(i, j); - }); - routing.AddDimension( - routing.RegisterTransitCallback([&time, &manager](int64_t i, int64_t j) { - return time.Compute(manager.IndexToNode(i), manager.IndexToNode(j)); - }), - kHorizon, kHorizon, /*fix_start_cumul_to_zero=*/true, kTime); - const RoutingDimension& time_dimension = routing.GetDimensionOrDie(kTime); - // Adding time windows. - // NOTE(user): This randomized test case is quite sensible to the seed: - // the generated model can be much easier or harder to solve, depending on - // the seed. It turns out that most seeds yield pretty slow/bad solver - // performance: I got good performance for about 10% of the seeds. - std::mt19937 randomizer( - 144 + GetSeed(absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed))); - const int64_t kTWDuration = 5 * 3600; - for (int order = 1; order < manager.num_nodes(); ++order) { - if (!IsRefuelNode(order)) { - const int64_t start = - absl::Uniform(randomizer, 0, kHorizon - kTWDuration); - time_dimension.CumulVar(order)->SetRange(start, start + kTWDuration); - } - } - - // Adding fuel dimension. This dimension consumes a quantity equal to the - // distance traveled. Only refuel nodes can make the quantity of dimension - // increase by letting slack variable replenish the fuel. - const int64_t kFuelCapacity = kXMax + kYMax; - routing.AddDimension( - routing.RegisterTransitCallback( - [&locations, &manager](int64_t i, int64_t j) { - return locations.NegManhattanDistance(manager.IndexToNode(i), - manager.IndexToNode(j)); - }), - kFuelCapacity, kFuelCapacity, /*fix_start_cumul_to_zero=*/false, kFuel); - const RoutingDimension& fuel_dimension = routing.GetDimensionOrDie(kFuel); - for (int order = 0; order < routing.Size(); ++order) { - // Only let slack free for refueling nodes. - if (!IsRefuelNode(order) || routing.IsStart(order)) { - fuel_dimension.SlackVar(order)->SetValue(0); - } - // Needed to instantiate fuel quantity at each node. - routing.AddVariableMinimizedByFinalizer(fuel_dimension.CumulVar(order)); - } - - // Adding penalty costs to allow skipping orders. - const int64_t kPenalty = 100000; - const RoutingIndexManager::NodeIndex kFirstNodeAfterDepot(1); - for (RoutingIndexManager::NodeIndex order = kFirstNodeAfterDepot; - order < routing.nodes(); ++order) { - std::vector orders(1, manager.NodeToIndex(order)); - routing.AddDisjunction(orders, kPenalty); - } - - // Solve, returns a solution if any (owned by RoutingModel). - RoutingSearchParameters parameters = DefaultRoutingSearchParameters(); - CHECK(google::protobuf::TextFormat::MergeFromString( - absl::GetFlag(FLAGS_routing_search_parameters), ¶meters)); - const Assignment* solution = routing.SolveWithParameters(parameters); - if (solution != nullptr) { - DisplayPlan(manager, routing, *solution, /*use_same_vehicle_costs=*/false, - /*max_nodes_per_group=*/0, /*same_vehicle_cost=*/0, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); - } else { - LOG(INFO) << "No solution found."; - } - return EXIT_SUCCESS; -} diff --git a/examples/cpp/cvrptw_with_resources.cc b/examples/cpp/cvrptw_with_resources.cc deleted file mode 100644 index 5171c781a1..0000000000 --- a/examples/cpp/cvrptw_with_resources.cc +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Capacitated Vehicle Routing Problem with Time Windows and capacitated -// resources. -// This is an extension to the model in cvrptw.cc so refer to that file for -// more information on the common part of the model. The model implemented here -// limits the number of vehicles which can simultaneously leave or enter the -// depot due to limited resources (or capacity) available. -// TODO(user): The current model consumes resources even for vehicles with -// empty routes; fix this when we have an API on the cumulative constraints -// with variable demands. - -#include -#include - -#include "absl/random/random.h" -#include "examples/cpp/cvrptw_lib.h" -#include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" -#include "ortools/base/init_google.h" -#include "ortools/base/types.h" -#include "ortools/base/logging.h" -#include "ortools/constraint_solver/routing.h" -#include "ortools/constraint_solver/routing_index_manager.h" -#include "ortools/constraint_solver/routing_parameters.h" -#include "ortools/constraint_solver/routing_parameters.pb.h" - -using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::IntervalVar; -using operations_research::IntVar; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::ServiceTimePlusTransition; -using operations_research::Solver; - -ABSL_FLAG(int, vrp_orders, 100, "Nodes in the problem."); -ABSL_FLAG(int, vrp_vehicles, 20, - "Size of Traveling Salesman Problem instance."); -ABSL_FLAG(bool, vrp_use_deterministic_random_seed, false, - "Use deterministic random seeds."); -ABSL_FLAG(std::string, routing_search_parameters, "", - "Text proto RoutingSearchParameters (possibly partial) that will " - "override the DefaultRoutingSearchParameters()"); - -const char* kTime = "Time"; -const char* kCapacity = "Capacity"; - -int main(int argc, char** argv) { - InitGoogle(argv[0], &argc, &argv, true); - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) - << "Specify an instance size greater than 0."; - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) - << "Specify a non-null vehicle fleet size."; - // VRP of size absl::GetFlag(FLAGS_vrp_size). - // Nodes are indexed from 0 to absl::GetFlag(FLAGS_vrp_orders), the starts and - // ends of the routes are at node 0. - const RoutingIndexManager::NodeIndex kDepot(0); - RoutingIndexManager manager(absl::GetFlag(FLAGS_vrp_orders) + 1, - absl::GetFlag(FLAGS_vrp_vehicles), kDepot); - RoutingModel routing(manager); - - // Setting up locations. - const int64_t kXMax = 100000; - const int64_t kYMax = 100000; - const int64_t kSpeed = 10; - LocationContainer locations( - kSpeed, absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - for (int location = 0; location <= absl::GetFlag(FLAGS_vrp_orders); - ++location) { - locations.AddRandomLocation(kXMax, kYMax); - } - - // Setting the cost function. - const int vehicle_cost = routing.RegisterTransitCallback( - [&locations, &manager](int64_t i, int64_t j) { - return locations.ManhattanDistance(manager.IndexToNode(i), - manager.IndexToNode(j)); - }); - routing.SetArcCostEvaluatorOfAllVehicles(vehicle_cost); - - // Adding capacity dimension constraints. - const int64_t kVehicleCapacity = 40; - const int64_t kNullCapacitySlack = 0; - RandomDemand demand(manager.num_nodes(), kDepot, - absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - demand.Initialize(); - routing.AddDimension(routing.RegisterTransitCallback( - [&demand, &manager](int64_t i, int64_t j) { - return demand.Demand(manager.IndexToNode(i), - manager.IndexToNode(j)); - }), - kNullCapacitySlack, kVehicleCapacity, - /*fix_start_cumul_to_zero=*/true, kCapacity); - - // Adding time dimension constraints. - const int64_t kTimePerDemandUnit = 300; - const int64_t kHorizon = 24 * 3600; - ServiceTimePlusTransition time( - kTimePerDemandUnit, - [&demand](RoutingNodeIndex i, RoutingNodeIndex j) { - return demand.Demand(i, j); - }, - [&locations](RoutingNodeIndex i, RoutingNodeIndex j) { - return locations.ManhattanTime(i, j); - }); - routing.AddDimension( - routing.RegisterTransitCallback([&time, &manager](int64_t i, int64_t j) { - return time.Compute(manager.IndexToNode(i), manager.IndexToNode(j)); - }), - kHorizon, kHorizon, /*fix_start_cumul_to_zero=*/false, kTime); - const RoutingDimension& time_dimension = routing.GetDimensionOrDie(kTime); - - // Adding time windows. - std::mt19937 randomizer( - GetSeed(absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed))); - const int64_t kTWDuration = 5 * 3600; - for (int order = 1; order < manager.num_nodes(); ++order) { - const int64_t start = - absl::Uniform(randomizer, 0, kHorizon - kTWDuration); - time_dimension.CumulVar(order)->SetRange(start, start + kTWDuration); - } - - // Adding resource constraints at the depot (start and end location of - // routes). - std::vector start_end_times; - for (int i = 0; i < absl::GetFlag(FLAGS_vrp_vehicles); ++i) { - start_end_times.push_back(time_dimension.CumulVar(routing.End(i))); - start_end_times.push_back(time_dimension.CumulVar(routing.Start(i))); - } - // Build corresponding time intervals. - const int64_t kVehicleSetup = 180; - Solver* const solver = routing.solver(); - std::vector intervals; - solver->MakeFixedDurationIntervalVarArray(start_end_times, kVehicleSetup, - "depot_interval", &intervals); - // Constrain the number of maximum simultaneous intervals at depot. - const int64_t kDepotCapacity = 5; - std::vector depot_usage(start_end_times.size(), 1); - solver->AddConstraint( - solver->MakeCumulative(intervals, depot_usage, kDepotCapacity, "depot")); - // Instantiate route start and end times to produce feasible times. - for (int i = 0; i < start_end_times.size(); ++i) { - routing.AddVariableMinimizedByFinalizer(start_end_times[i]); - } - - // Adding penalty costs to allow skipping orders. - const int64_t kPenalty = 100000; - const RoutingIndexManager::NodeIndex kFirstNodeAfterDepot(1); - for (RoutingIndexManager::NodeIndex order = kFirstNodeAfterDepot; - order < manager.num_nodes(); ++order) { - std::vector orders(1, manager.NodeToIndex(order)); - routing.AddDisjunction(orders, kPenalty); - } - - // Solve, returns a solution if any (owned by RoutingModel). - RoutingSearchParameters parameters = DefaultRoutingSearchParameters(); - CHECK(google::protobuf::TextFormat::MergeFromString( - absl::GetFlag(FLAGS_routing_search_parameters), ¶meters)); - const Assignment* solution = routing.SolveWithParameters(parameters); - if (solution != nullptr) { - DisplayPlan(manager, routing, *solution, /*use_same_vehicle_costs=*/false, - /*max_nodes_per_group=*/0, /*same_vehicle_cost=*/0, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); - } else { - LOG(INFO) << "No solution found."; - } - return EXIT_SUCCESS; -} diff --git a/examples/cpp/cvrptw_with_stop_times_and_resources.cc b/examples/cpp/cvrptw_with_stop_times_and_resources.cc deleted file mode 100644 index d1c494638b..0000000000 --- a/examples/cpp/cvrptw_with_stop_times_and_resources.cc +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Capacitated Vehicle Routing Problem with Time Windows, fixed stop times and -// capacitated resources. A stop is defined as consecutive nodes at the same -// location. -// This is an extension to the model in cvrptw.cc so refer to that file for -// more information on the common part of the model. The model implemented here -// limits the number of vehicles which can simultaneously leave or enter a node -// to one. - -#include -#include - -#include "absl/random/random.h" -#include "absl/strings/str_cat.h" -#include "examples/cpp/cvrptw_lib.h" -#include "google/protobuf/text_format.h" -#include "ortools/base/commandlineflags.h" -#include "ortools/base/init_google.h" -#include "ortools/base/types.h" -#include "ortools/base/logging.h" -#include "ortools/constraint_solver/routing.h" -#include "ortools/constraint_solver/routing_index_manager.h" -#include "ortools/constraint_solver/routing_parameters.h" -#include "ortools/constraint_solver/routing_parameters.pb.h" - -using operations_research::Assignment; -using operations_research::DefaultRoutingSearchParameters; -using operations_research::GetSeed; -using operations_research::IntervalVar; -using operations_research::IntVar; -using operations_research::LocationContainer; -using operations_research::RandomDemand; -using operations_research::RoutingDimension; -using operations_research::RoutingIndexManager; -using operations_research::RoutingModel; -using operations_research::RoutingNodeIndex; -using operations_research::RoutingSearchParameters; -using operations_research::Solver; -using operations_research::StopServiceTimePlusTransition; - -ABSL_FLAG(int, vrp_stops, 25, "Stop locations in the problem."); -ABSL_FLAG(int, vrp_orders_per_stop, 5, "Nodes for each stop."); -ABSL_FLAG(int, vrp_vehicles, 20, - "Size of Traveling Salesman Problem instance."); -ABSL_FLAG(bool, vrp_use_deterministic_random_seed, false, - "Use deterministic random seeds."); -ABSL_FLAG(std::string, routing_search_parameters, "", - "Text proto RoutingSearchParameters (possibly partial) that will " - "override the DefaultRoutingSearchParameters()"); - -const char* kTime = "Time"; -const char* kCapacity = "Capacity"; - -int main(int argc, char** argv) { - InitGoogle(argv[0], &argc, &argv, true); - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_stops)) - << "Specify an instance size greater than 0."; - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders_per_stop)) - << "Specify an instance size greater than 0."; - CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) - << "Specify a non-null vehicle fleet size."; - const int vrp_orders = - absl::GetFlag(FLAGS_vrp_stops) * absl::GetFlag(FLAGS_vrp_orders_per_stop); - // Nodes are indexed from 0 to vrp_orders, the starts and ends of the routes - // are at node 0. - const RoutingIndexManager::NodeIndex kDepot(0); - RoutingIndexManager manager(vrp_orders + 1, absl::GetFlag(FLAGS_vrp_vehicles), - kDepot); - RoutingModel routing(manager); - - // Setting up locations. - const int64_t kXMax = 100000; - const int64_t kYMax = 100000; - const int64_t kSpeed = 10; - LocationContainer locations( - kSpeed, absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - for (int stop = 0; stop <= absl::GetFlag(FLAGS_vrp_stops); ++stop) { - const int num_orders = - stop == 0 ? 1 : absl::GetFlag(FLAGS_vrp_orders_per_stop); - locations.AddRandomLocation(kXMax, kYMax, num_orders); - } - - // Setting the cost function. - const int vehicle_cost = routing.RegisterTransitCallback( - [&locations, &manager](int64_t i, int64_t j) { - return locations.ManhattanDistance(manager.IndexToNode(i), - manager.IndexToNode(j)); - }); - routing.SetArcCostEvaluatorOfAllVehicles(vehicle_cost); - - // Adding capacity dimension constraints. - const int64_t kVehicleCapacity = 40; - const int64_t kNullCapacitySlack = 0; - RandomDemand demand(manager.num_nodes(), kDepot, - absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed)); - demand.Initialize(); - routing.AddDimension(routing.RegisterTransitCallback( - [&demand, &manager](int64_t i, int64_t j) { - return demand.Demand(manager.IndexToNode(i), - manager.IndexToNode(j)); - }), - kNullCapacitySlack, kVehicleCapacity, - /*fix_start_cumul_to_zero=*/true, kCapacity); - - // Adding time dimension constraints. - const int64_t kStopTime = 300; - const int64_t kHorizon = 24 * 3600; - StopServiceTimePlusTransition time( - kStopTime, locations, - [&locations](RoutingNodeIndex i, RoutingNodeIndex j) { - return locations.ManhattanTime(i, j); - }); - routing.AddDimension( - routing.RegisterTransitCallback([&time, &manager](int64_t i, int64_t j) { - return time.Compute(manager.IndexToNode(i), manager.IndexToNode(j)); - }), - kHorizon, kHorizon, /*fix_start_cumul_to_zero=*/false, kTime); - const RoutingDimension& time_dimension = routing.GetDimensionOrDie(kTime); - - // Adding time windows, for the sake of simplicty same for each stop. - std::mt19937 randomizer( - GetSeed(absl::GetFlag(FLAGS_vrp_use_deterministic_random_seed))); - const int64_t kTWDuration = 5 * 3600; - for (int stop = 0; stop < absl::GetFlag(FLAGS_vrp_stops); ++stop) { - const int64_t start = - absl::Uniform(randomizer, 0, kHorizon - kTWDuration); - for (int stop_order = 0; - stop_order < absl::GetFlag(FLAGS_vrp_orders_per_stop); ++stop_order) { - const int order = - stop * absl::GetFlag(FLAGS_vrp_orders_per_stop) + stop_order + 1; - time_dimension.CumulVar(order)->SetRange(start, start + kTWDuration); - } - } - - // Adding resource constraints at order locations. - Solver* const solver = routing.solver(); - std::vector intervals; - for (int stop = 0; stop < absl::GetFlag(FLAGS_vrp_stops); ++stop) { - std::vector stop_intervals; - for (int stop_order = 0; - stop_order < absl::GetFlag(FLAGS_vrp_orders_per_stop); ++stop_order) { - const int order = - stop * absl::GetFlag(FLAGS_vrp_orders_per_stop) + stop_order + 1; - IntervalVar* const interval = solver->MakeFixedDurationIntervalVar( - 0, kHorizon, kStopTime, true, absl::StrCat("Order", order)); - intervals.push_back(interval); - stop_intervals.push_back(interval); - // Link order and interval. - IntVar* const order_start = time_dimension.CumulVar(order); - solver->AddConstraint( - solver->MakeIsEqualCt(interval->SafeStartExpr(0), order_start, - interval->PerformedExpr()->Var())); - // Make interval performed iff corresponding order has service time. - // An order has no service time iff it is at the same location as the - // next order on the route. - IntVar* const is_null_duration = - solver - ->MakeElement( - [&locations, order](int64_t index) { - return locations.SameLocationFromIndex(order, index); - }, - routing.NextVar(order)) - ->Var(); - solver->AddConstraint( - solver->MakeNonEquality(interval->PerformedExpr(), is_null_duration)); - routing.AddIntervalToAssignment(interval); - // We are minimizing route durations by minimizing route ends; so we can - // maximize order starts to pack them together. - routing.AddVariableMaximizedByFinalizer(order_start); - } - // Only one order can happen at the same time at a given location. - std::vector location_usage(stop_intervals.size(), 1); - solver->AddConstraint(solver->MakeCumulative( - stop_intervals, location_usage, 1, absl::StrCat("Client", stop))); - } - // Minimizing route duration. - for (int vehicle = 0; vehicle < manager.num_vehicles(); ++vehicle) { - routing.AddVariableMinimizedByFinalizer( - time_dimension.CumulVar(routing.End(vehicle))); - } - - // Adding penalty costs to allow skipping orders. - const int64_t kPenalty = 100000; - const RoutingIndexManager::NodeIndex kFirstNodeAfterDepot(1); - for (RoutingIndexManager::NodeIndex order = kFirstNodeAfterDepot; - order < routing.nodes(); ++order) { - std::vector orders(1, manager.NodeToIndex(order)); - routing.AddDisjunction(orders, kPenalty); - } - - // Solve, returns a solution if any (owned by RoutingModel). - RoutingSearchParameters parameters = DefaultRoutingSearchParameters(); - CHECK(google::protobuf::TextFormat::MergeFromString( - absl::GetFlag(FLAGS_routing_search_parameters), ¶meters)); - const Assignment* solution = routing.SolveWithParameters(parameters); - if (solution != nullptr) { - DisplayPlan(manager, routing, *solution, /*use_same_vehicle_costs=*/false, - /*max_nodes_per_group=*/0, /*same_vehicle_cost=*/0, - routing.GetDimensionOrDie(kCapacity), - routing.GetDimensionOrDie(kTime)); - LOG(INFO) << "Stop intervals:"; - for (IntervalVar* const interval : intervals) { - if (solution->PerformedValue(interval)) { - LOG(INFO) << interval->name() << ": " << solution->StartValue(interval); - } - } - } else { - LOG(INFO) << "No solution found."; - } - return EXIT_SUCCESS; -} diff --git a/examples/cpp/frequency_assignment_problem.cc b/examples/cpp/frequency_assignment_problem.cc index 72914889ee..626e5aec5e 100644 --- a/examples/cpp/frequency_assignment_problem.cc +++ b/examples/cpp/frequency_assignment_problem.cc @@ -54,6 +54,7 @@ #include "absl/container/btree_map.h" #include "absl/strings/string_view.h" +#include "absl/types/span.h" #include "examples/cpp/fap_model_printer.h" #include "examples/cpp/fap_parser.h" #include "examples/cpp/fap_utilities.h" @@ -100,6 +101,10 @@ class OrderingDecision : public Decision { variable2_(variable2), value_(value), operator_(std::move(operation)) {} + + // This type is neither copyable nor movable. + OrderingDecision(const OrderingDecision&) = delete; + OrderingDecision& operator=(const OrderingDecision&) = delete; ~OrderingDecision() override = default; // Apply will be called first when the decision is executed. @@ -131,8 +136,6 @@ class OrderingDecision : public Decision { IntVar* const variable2_; const int value_; const std::string operator_; - - DISALLOW_COPY_AND_ASSIGN(OrderingDecision); }; // Decision on whether a soft constraint will be added to a model @@ -142,6 +145,10 @@ class ConstraintDecision : public Decision { explicit ConstraintDecision(IntVar* const constraint_violation) : constraint_violation_(constraint_violation) {} + // This type is neither copyable nor movable. + ConstraintDecision(const ConstraintDecision&) = delete; + ConstraintDecision& operator=(const ConstraintDecision&) = delete; + ~ConstraintDecision() override = default; // Apply will be called first when the decision is executed. @@ -158,8 +165,6 @@ class ConstraintDecision : public Decision { private: IntVar* const constraint_violation_; - - DISALLOW_COPY_AND_ASSIGN(ConstraintDecision); }; // The ordering builder resolves the relative order of the two variables @@ -192,6 +197,10 @@ class OrderingBuilder : public DecisionBuilder { CHECK_EQ(variable_state_.size(), variables_.size()); } + // This type is neither copyable nor movable. + OrderingBuilder(const OrderingBuilder&) = delete; + OrderingBuilder& operator=(const OrderingBuilder&) = delete; + ~OrderingBuilder() override = default; Decision* Next(Solver* const s) override { @@ -320,8 +329,6 @@ class OrderingBuilder : public DecisionBuilder { // Used by Hint() for indicating the most probable ordering. std::vector variable_state_; std::vector minimum_value_available_; - - DISALLOW_COPY_AND_ASSIGN(OrderingBuilder); }; // A comparator for sorting the constraints depending on their impact. @@ -373,7 +380,7 @@ int64_t ValueEvaluator( // The variables which participate in more constraints and have the // smaller domain should be in higher priority for assignment. int64_t VariableEvaluator( - const std::vector& key_from_index, + absl::Span key_from_index, const absl::btree_map& data_variables, int64_t variable_index) { FapVariable variable = @@ -414,7 +421,7 @@ void CreateModelVariables( } // Creates the constraints of the instance from the parsed data. -void CreateModelConstraints(const std::vector& data_constraints, +void CreateModelConstraints(absl::Span data_constraints, const std::vector& variables, const absl::btree_map& index_from_key, Solver* solver) { @@ -649,7 +656,7 @@ void SplitVariablesHardSoft( } // Splits constraints of the instance to hard and soft. -void SplitConstraintHardSoft(const std::vector& data_constraints, +void SplitConstraintHardSoft(absl::Span data_constraints, std::vector* hard_constraints, std::vector* soft_constraints) { for (const FapConstraint& ct : data_constraints) { @@ -683,8 +690,8 @@ void PenalizeVariablesViolation( // Penalize the violation of soft constraints of the instance. void PenalizeConstraintsViolation( - const std::vector& constraints, - const std::vector& soft_constraints, + absl::Span constraints, + absl::Span soft_constraints, const absl::btree_map& index_from_key, const std::vector& variables, std::vector* cost, std::vector* violated_constraints, Solver* solver) { @@ -733,7 +740,7 @@ void PenalizeConstraintsViolation( int SoftFapSolver(const absl::btree_map& data_variables, const std::vector& data_constraints, absl::string_view /*data_objective*/, - const std::vector& /*values*/) { + absl::Span /*values*/) { Solver solver("SoftFapSolver"); std::vector monitors; diff --git a/examples/cpp/jobshop_sat.cc b/examples/cpp/jobshop_sat.cc index 27b76dc289..11c64480f6 100644 --- a/examples/cpp/jobshop_sat.cc +++ b/examples/cpp/jobshop_sat.cc @@ -322,7 +322,7 @@ std::vector> GetDataPerMachine( void CreateMachines( const JsspInputProblem& problem, - const std::vector>>& + absl::Span>> job_task_to_alternatives, IntervalVar makespan_interval, CpModelBuilder& cp_model) { const int num_jobs = problem.jobs_size(); @@ -733,12 +733,6 @@ void Solve(const JsspInputProblem& problem) { // Setup parameters. SatParameters parameters; parameters.set_log_search_progress(true); - // Parse the --params flag. - if (!absl::GetFlag(FLAGS_params).empty()) { - CHECK(google::protobuf::TextFormat::MergeFromString( - absl::GetFlag(FLAGS_params), ¶meters)) - << absl::GetFlag(FLAGS_params); - } // Prefer objective_shaving_search over objective_lb_search. if (parameters.num_workers() >= 16 && parameters.num_workers() < 24) { @@ -751,6 +745,13 @@ void Solve(const JsspInputProblem& problem) { parameters.set_push_all_tasks_toward_start(true); parameters.set_use_dynamic_precedence_in_disjunctive(true); + // Parse the --params flag. + if (!absl::GetFlag(FLAGS_params).empty()) { + CHECK(google::protobuf::TextFormat::MergeFromString( + absl::GetFlag(FLAGS_params), ¶meters)) + << absl::GetFlag(FLAGS_params); + } + const CpSolverResponse response = SolveWithParameters(cp_model.Build(), parameters); diff --git a/examples/cpp/slitherlink_sat.cc b/examples/cpp/slitherlink_sat.cc index c9dba90dc4..e9fba86af6 100644 --- a/examples/cpp/slitherlink_sat.cc +++ b/examples/cpp/slitherlink_sat.cc @@ -66,7 +66,7 @@ void PrintSolution(absl::Span> data, std::cout << last_line << std::endl; } -void SlitherLink(const std::vector>& data) { +void SlitherLink(absl::Span> data) { const int num_rows = data.size(); const int num_columns = data[0].size(); diff --git a/examples/cpp/weighted_tardiness_sat.cc b/examples/cpp/weighted_tardiness_sat.cc index 1690e7f324..0462d6e325 100644 --- a/examples/cpp/weighted_tardiness_sat.cc +++ b/examples/cpp/weighted_tardiness_sat.cc @@ -18,14 +18,20 @@ #include #include "absl/flags/flag.h" +#include "absl/log/check.h" #include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" #include "absl/types/span.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_solver.h" #include "ortools/sat/model.h" +#include "ortools/sat/sat_parameters.pb.h" #include "ortools/util/filelineiter.h" +#include "ortools/util/sorted_interval_list.h" ABSL_FLAG(std::string, input, "examples/cpp/wt40.txt", "wt data file name."); ABSL_FLAG(int, size, 40, "Size of the problem in the wt file."); diff --git a/examples/flatzinc/README b/examples/flatzinc/README.md similarity index 100% rename from examples/flatzinc/README rename to examples/flatzinc/README.md From 1558b77419c2fc160035331e5fdabd6c6c44a105 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 11:59:16 +0100 Subject: [PATCH 020/392] backport routing/ from main --- ortools/routing/README.md | 32 +----- ortools/routing/{ => parsers}/BUILD.bazel | 73 ++++++++---- ortools/routing/parsers/README.md | 31 +++++ .../routing/parsers/capacity_planning.proto | 56 +++++++++ ortools/routing/{ => parsers}/carp_parser.cc | 2 +- ortools/routing/{ => parsers}/carp_parser.h | 8 +- .../routing/{ => parsers}/carp_parser_test.cc | 39 +++---- ortools/routing/{ => parsers}/cvrptw_lib.cc | 2 +- ortools/routing/{ => parsers}/cvrptw_lib.h | 6 +- ortools/routing/parsers/dow_parser.cc | 106 ++++++++++++++++++ ortools/routing/parsers/dow_parser.h | 29 +++++ ortools/routing/parsers/dow_parser_test.cc | 61 ++++++++++ ortools/routing/{ => parsers}/lilim_parser.cc | 2 +- ortools/routing/{ => parsers}/lilim_parser.h | 8 +- .../{ => parsers}/lilim_parser_test.cc | 13 ++- ortools/routing/{ => parsers}/nearp_parser.cc | 2 +- ortools/routing/{ => parsers}/nearp_parser.h | 8 +- .../{ => parsers}/nearp_parser_test.cc | 14 ++- ortools/routing/{ => parsers}/pdtsp_parser.cc | 2 +- ortools/routing/{ => parsers}/pdtsp_parser.h | 6 +- .../{ => parsers}/pdtsp_parser_test.cc | 4 +- ortools/routing/{ => parsers}/simple_graph.cc | 2 +- ortools/routing/{ => parsers}/simple_graph.h | 6 +- .../{ => parsers}/simple_graph_test.cc | 2 +- .../routing/{ => parsers}/solomon_parser.cc | 2 +- .../routing/{ => parsers}/solomon_parser.h | 8 +- .../{ => parsers}/solomon_parser_test.cc | 2 +- .../{ => parsers}/solution_serializer.cc | 2 +- .../{ => parsers}/solution_serializer.h | 8 +- .../{ => parsers}/solution_serializer_test.cc | 4 +- .../{ => parsers}/testdata/BUILD.bazel | 0 .../{ => parsers}/testdata/carp_gdb19.dat | 0 .../carp_gdb19_diferente_deposito.dat | 0 ...arp_gdb19_incorrecta_lista_aristas_req.dat | 0 .../carp_gdb19_incorrecto_arinoreq.dat | 0 .../testdata/carp_gdb19_incorrecto_arireq.dat | 0 .../testdata/carp_gdb19_incorrecto_arista.dat | 0 .../carp_gdb19_incorrecto_capacidad.dat | 0 .../testdata/carp_gdb19_incorrecto_coste.dat | 0 .../carp_gdb19_incorrecto_deposito.dat | 0 .../testdata/carp_gdb19_incorrecto_tipo.dat | 0 .../carp_gdb19_incorrecto_vehiculos.dat | 0 .../carp_gdb19_incorrecto_vertices.dat | 0 .../testdata/carp_gdb19_mixed_arcs.dat | 0 .../testdata/carp_gdb19_no_arista_req.dat | 0 .../{ => parsers}/testdata/carp_toy.dat | 0 .../{ => parsers}/testdata/carp_toy.sol | 0 .../routing/{ => parsers}/testdata/lilim.zip | Bin .../{ => parsers}/testdata/n20w20.001.txt | 0 .../{ => parsers}/testdata/n20w20.002.txt | 0 .../{ => parsers}/testdata/nearp_BHW1.dat | 0 .../{ => parsers}/testdata/nearp_toy.dat | 0 .../testdata/pdptw_LRC2_10_6.txt | 0 .../{ => parsers}/testdata/pdtsp_prob10b.txt | 0 .../testdata/pdtsp_prob10b.txt.gz | Bin .../routing/{ => parsers}/testdata/rc201.0 | 0 .../{ => parsers}/testdata/solomon.zip | Bin .../testdata/solomon_bp_c101.mps | 0 .../{ => parsers}/testdata/solomon_bp_c101.pb | Bin .../testdata/solomon_check_id.md | 0 .../testdata/solomon_check_id.txt | 0 .../testdata/tsplib_F-n45-k4.vrp | 0 .../testdata/tsplib_Kytojoki_33.vrp | 0 .../{ => parsers}/testdata/tsplib_ar9152.tour | 0 .../{ => parsers}/testdata/tsplib_ar9152.tsp | 0 .../routing/{ => parsers}/tsplib_parser.cc | 2 +- ortools/routing/{ => parsers}/tsplib_parser.h | 8 +- .../{ => parsers}/tsplib_parser_test.cc | 8 +- ortools/routing/{ => parsers}/tsptw_parser.cc | 2 +- ortools/routing/{ => parsers}/tsptw_parser.h | 8 +- .../{ => parsers}/tsptw_parser_test.cc | 10 +- ortools/routing/samples/BUILD.bazel | 12 +- ortools/routing/samples/cvrp_disjoint_tw.cc | 2 +- ortools/routing/samples/cvrptw.cc | 2 +- .../routing/samples/cvrptw_soft_capacity.cc | 2 +- ortools/routing/samples/cvrptw_with_breaks.cc | 2 +- .../samples/cvrptw_with_precedences.cc | 2 +- .../routing/samples/cvrptw_with_refueling.cc | 2 +- .../routing/samples/cvrptw_with_resources.cc | 2 +- .../cvrptw_with_stop_times_and_resources.cc | 2 +- .../cvrptw_with_time_dependent_costs.cc | 2 +- 81 files changed, 449 insertions(+), 159 deletions(-) rename ortools/routing/{ => parsers}/BUILD.bazel (77%) create mode 100644 ortools/routing/parsers/README.md create mode 100644 ortools/routing/parsers/capacity_planning.proto rename ortools/routing/{ => parsers}/carp_parser.cc (99%) rename ortools/routing/{ => parsers}/carp_parser.h (97%) rename ortools/routing/{ => parsers}/carp_parser_test.cc (88%) rename ortools/routing/{ => parsers}/cvrptw_lib.cc (99%) rename ortools/routing/{ => parsers}/cvrptw_lib.h (97%) create mode 100644 ortools/routing/parsers/dow_parser.cc create mode 100644 ortools/routing/parsers/dow_parser.h create mode 100644 ortools/routing/parsers/dow_parser_test.cc rename ortools/routing/{ => parsers}/lilim_parser.cc (98%) rename ortools/routing/{ => parsers}/lilim_parser.h (96%) rename ortools/routing/{ => parsers}/lilim_parser_test.cc (90%) rename ortools/routing/{ => parsers}/nearp_parser.cc (99%) rename ortools/routing/{ => parsers}/nearp_parser.h (98%) rename ortools/routing/{ => parsers}/nearp_parser_test.cc (92%) rename ortools/routing/{ => parsers}/pdtsp_parser.cc (98%) rename ortools/routing/{ => parsers}/pdtsp_parser.h (92%) rename ortools/routing/{ => parsers}/pdtsp_parser_test.cc (94%) rename ortools/routing/{ => parsers}/simple_graph.cc (94%) rename ortools/routing/{ => parsers}/simple_graph.h (96%) rename ortools/routing/{ => parsers}/simple_graph_test.cc (98%) rename ortools/routing/{ => parsers}/solomon_parser.cc (98%) rename ortools/routing/{ => parsers}/solomon_parser.h (96%) rename ortools/routing/{ => parsers}/solomon_parser_test.cc (97%) rename ortools/routing/{ => parsers}/solution_serializer.cc (99%) rename ortools/routing/{ => parsers}/solution_serializer.h (98%) rename ortools/routing/{ => parsers}/solution_serializer_test.cc (99%) rename ortools/routing/{ => parsers}/testdata/BUILD.bazel (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_diferente_deposito.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecta_lista_aristas_req.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecto_arinoreq.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecto_arireq.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecto_arista.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecto_capacidad.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecto_coste.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecto_deposito.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecto_tipo.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecto_vehiculos.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_incorrecto_vertices.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_mixed_arcs.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_gdb19_no_arista_req.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_toy.dat (100%) rename ortools/routing/{ => parsers}/testdata/carp_toy.sol (100%) rename ortools/routing/{ => parsers}/testdata/lilim.zip (100%) rename ortools/routing/{ => parsers}/testdata/n20w20.001.txt (100%) rename ortools/routing/{ => parsers}/testdata/n20w20.002.txt (100%) rename ortools/routing/{ => parsers}/testdata/nearp_BHW1.dat (100%) rename ortools/routing/{ => parsers}/testdata/nearp_toy.dat (100%) rename ortools/routing/{ => parsers}/testdata/pdptw_LRC2_10_6.txt (100%) rename ortools/routing/{ => parsers}/testdata/pdtsp_prob10b.txt (100%) rename ortools/routing/{ => parsers}/testdata/pdtsp_prob10b.txt.gz (100%) rename ortools/routing/{ => parsers}/testdata/rc201.0 (100%) rename ortools/routing/{ => parsers}/testdata/solomon.zip (100%) rename ortools/routing/{ => parsers}/testdata/solomon_bp_c101.mps (100%) rename ortools/routing/{ => parsers}/testdata/solomon_bp_c101.pb (100%) rename ortools/routing/{ => parsers}/testdata/solomon_check_id.md (100%) rename ortools/routing/{ => parsers}/testdata/solomon_check_id.txt (100%) rename ortools/routing/{ => parsers}/testdata/tsplib_F-n45-k4.vrp (100%) rename ortools/routing/{ => parsers}/testdata/tsplib_Kytojoki_33.vrp (100%) rename ortools/routing/{ => parsers}/testdata/tsplib_ar9152.tour (100%) rename ortools/routing/{ => parsers}/testdata/tsplib_ar9152.tsp (100%) rename ortools/routing/{ => parsers}/tsplib_parser.cc (99%) rename ortools/routing/{ => parsers}/tsplib_parser.h (97%) rename ortools/routing/{ => parsers}/tsplib_parser_test.cc (98%) rename ortools/routing/{ => parsers}/tsptw_parser.cc (99%) rename ortools/routing/{ => parsers}/tsptw_parser.h (94%) rename ortools/routing/{ => parsers}/tsptw_parser_test.cc (89%) diff --git a/ortools/routing/README.md b/ortools/routing/README.md index 98e47ea40b..3fa3de7a65 100644 --- a/ortools/routing/README.md +++ b/ortools/routing/README.md @@ -1,30 +1,6 @@ # Routing -This folder contains utilities related to routing problems. For now, it only -contains parsers for usual file formats and utilities directly related to these -file formats. - -`solution_serializer.h` contains a generic serializer for routing solutions for -many formats. - -| Problem type | File format | Corresponding parser | Data sets | -| ------------ | ----------- | -------------------- | --------- | -| TSP | TSPLIB | `tsplib_parser.h` | [TSPLIB95][tsplib95] | -| TSPTW | TSPTW | `tsptw_parser.h` | [TSPTW][tsptw] | -| PDTSP / TSPPD | PDTSP | `pdtsp_parser.h` | [PDTSP][pdtsp] | -| CVRP | TSPLIB | `tsplib_parser.h` | [TSPLIB95][tsplib95] | -| VRPTW | Solomon | `solomon_parser.h` | [Solomon][solomon], [Homberger][homberger] | -| CARP | CARPLIB | `carplib_parser.h` | [CARPLIB][carplib] | -| NEARP | NEARPLIB | `nearplib_parser.h` | [NEARPLIB][nearplib] | -| PDPTW | LiLim | `lilim_parser.h` | [LiLim][lilim] | - -In the future, this folder will contain the whole routing solver. - -[tsplib95]: http://www.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/DOC.PS -[tsptw]: https://homepages.dcc.ufmg.br/~rfsilva/tsptw/ -[solomon]: https://www.sintef.no/projectweb/top/vrptw/solomon-benchmark/ -[homberger]: https://www.sintef.no/projectweb/top/vrptw/homberger-benchmark/ -[pdtsp]: https://web.archive.org/web/20080318001744/http://www.diku.dk/~sropke/ -[nearplib]: https://www.sintef.no/projectweb/top/nearp/ -[carplib]: https://www.uv.es/belengue/carp.html -[lilim]: https://www.sintef.no/projectweb/top/pdptw/li-lim-benchmark/ +This folder contains utilities related to routing problems, currently only +utilities for file formats in the +[`parsers`](../routing/parsers) +subfolder. diff --git a/ortools/routing/BUILD.bazel b/ortools/routing/parsers/BUILD.bazel similarity index 77% rename from ortools/routing/BUILD.bazel rename to ortools/routing/parsers/BUILD.bazel index 4a0f28d7c5..94690f386f 100644 --- a/ortools/routing/BUILD.bazel +++ b/ortools/routing/parsers/BUILD.bazel @@ -11,8 +11,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") + package(default_visibility = ["//visibility:public"]) +proto_library( + name = "capacity_planning_proto", + srcs = ["capacity_planning.proto"], +) + +cc_proto_library( + name = "capacity_planning_cc_proto", + deps = [":capacity_planning_proto"], +) + cc_library( name = "simple_graph", srcs = ["simple_graph.cc"], @@ -53,7 +66,7 @@ cc_test( name = "solomon_parser_test", size = "small", srcs = ["solomon_parser_test.cc"], - data = ["//ortools/routing/testdata:solomon.zip"], + data = ["//ortools/routing/parsers/testdata:solomon.zip"], deps = [ ":solomon_parser", "//ortools/base", @@ -82,8 +95,8 @@ cc_test( size = "small", srcs = ["lilim_parser_test.cc"], data = [ - "//ortools/routing/testdata:lilim.zip", - "//ortools/routing/testdata:pdptw_LRC2_10_6.txt", + "//ortools/routing/parsers/testdata:lilim.zip", + "//ortools/routing/parsers/testdata:pdptw_LRC2_10_6.txt", ], deps = [ ":lilim_parser", @@ -111,19 +124,19 @@ cc_test( size = "small", srcs = ["carp_parser_test.cc"], data = [ - "//ortools/routing/testdata:carp_gdb19.dat", - "//ortools/routing/testdata:carp_gdb19_diferente_deposito.dat", - "//ortools/routing/testdata:carp_gdb19_incorrecta_lista_aristas_req.dat", - "//ortools/routing/testdata:carp_gdb19_incorrecto_arinoreq.dat", - "//ortools/routing/testdata:carp_gdb19_incorrecto_arireq.dat", - "//ortools/routing/testdata:carp_gdb19_incorrecto_capacidad.dat", - "//ortools/routing/testdata:carp_gdb19_incorrecto_coste.dat", - "//ortools/routing/testdata:carp_gdb19_incorrecto_deposito.dat", - "//ortools/routing/testdata:carp_gdb19_incorrecto_tipo.dat", - "//ortools/routing/testdata:carp_gdb19_incorrecto_vehiculos.dat", - "//ortools/routing/testdata:carp_gdb19_incorrecto_vertices.dat", - "//ortools/routing/testdata:carp_gdb19_mixed_arcs.dat", - "//ortools/routing/testdata:carp_gdb19_no_arista_req.dat", + "//ortools/routing/parsers/testdata:carp_gdb19.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_diferente_deposito.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_incorrecta_lista_aristas_req.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_incorrecto_arinoreq.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_incorrecto_arireq.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_incorrecto_capacidad.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_incorrecto_coste.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_incorrecto_deposito.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_incorrecto_tipo.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_incorrecto_vehiculos.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_incorrecto_vertices.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_mixed_arcs.dat", + "//ortools/routing/parsers/testdata:carp_gdb19_no_arista_req.dat", ], deps = [ ":carp_parser", @@ -156,8 +169,8 @@ cc_test( size = "small", srcs = ["nearp_parser_test.cc"], data = [ - "//ortools/routing/testdata:nearp_BHW1.dat", - "//ortools/routing/testdata:nearp_toy.dat", + "//ortools/routing/parsers/testdata:nearp_BHW1.dat", + "//ortools/routing/parsers/testdata:nearp_toy.dat", ], deps = [ ":nearp_parser", @@ -192,8 +205,8 @@ cc_test( size = "small", srcs = ["pdtsp_parser_test.cc"], data = [ - "//ortools/routing/testdata:pdtsp_prob10b.txt", - "//ortools/routing/testdata:pdtsp_prob10b.txt.gz", + "//ortools/routing/parsers/testdata:pdtsp_prob10b.txt", + "//ortools/routing/parsers/testdata:pdtsp_prob10b.txt.gz", ], deps = [ ":pdtsp_parser", @@ -237,7 +250,7 @@ cc_test( #"@com_google_ortools_data//TSPLIB95:ALL_tsp.zip", #"@com_google_ortools_data//TSPLIB95:ALL_vrp.tar", #"@com_google_ortools_data//TSPLIB95:ALL_vrp.zip", - "//ortools/routing/testdata:tsplib_Kytojoki_33.vrp", + "//ortools/routing/parsers/testdata:tsplib_Kytojoki_33.vrp", ], deps = [ ":tsplib_parser", @@ -274,9 +287,9 @@ cc_test( size = "small", srcs = ["tsptw_parser_test.cc"], data = [ - "//ortools/routing/testdata:n20w20.001.txt", - "//ortools/routing/testdata:n20w20.002.txt", - "//ortools/routing/testdata:rc201.0", + "//ortools/routing/parsers/testdata:n20w20.001.txt", + "//ortools/routing/parsers/testdata:n20w20.002.txt", + "//ortools/routing/parsers/testdata:rc201.0", ], deps = [ ":tsptw_parser", @@ -325,3 +338,15 @@ cc_library( "//ortools/util:random_engine", ], ) + +cc_library( + name = "dow_parser", + srcs = ["dow_parser.cc"], + hdrs = ["dow_parser.h"], + deps = [ + ":capacity_planning_cc_proto", + "//ortools/base", + "//ortools/constraint_solver:routing", + "//ortools/util:random_engine", + ], +) diff --git a/ortools/routing/parsers/README.md b/ortools/routing/parsers/README.md new file mode 100644 index 0000000000..7019481387 --- /dev/null +++ b/ortools/routing/parsers/README.md @@ -0,0 +1,31 @@ +# Routing + +This folder contains utilities for usual file formats useful in routing problems +and utilities directly related to these file formats. + +`solution_serializer.h` contains a generic serializer for routing solutions for +many formats. + +| Problem type | File format | Corresponding parser | Data storage | Data sets | +| ------------ | ----------- | -------------------- | ------------ | --------- | +| TSP | TSPLIB | `tsplib_parser.h` | | [TSPLIB95][tsplib95] | +| TSPTW | TSPTW | `tsptw_parser.h` | | [TSPTW][tsptw] | +| PDTSP / TSPPD | PDTSP | `pdtsp_parser.h` | | [PDTSP][pdtsp] | +| CVRP | TSPLIB | `tsplib_parser.h` | | [TSPLIB95][tsplib95] | +| VRPTW | Solomon | `solomon_parser.h` | | [Solomon][solomon], [Homberger][homberger] | +| CARP | CARPLIB | `carplib_parser.h` | | [CARPLIB][carplib] | +| NEARP | NEARPLIB | `nearplib_parser.h` | | [NEARPLIB][nearplib] | +| PDPTW | LiLim | `lilim_parser.h` | | [LiLim][lilim] | +| MCND | Dow | `dow_parser.h` | `capacity_planning.proto` | [Canad][canad] | + +In the future, this folder will contain the whole routing solver. + +[tsplib95]: http://www.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/DOC.PS +[tsptw]: https://homepages.dcc.ufmg.br/~rfsilva/tsptw/ +[solomon]: https://www.sintef.no/projectweb/top/vrptw/solomon-benchmark/ +[homberger]: https://www.sintef.no/projectweb/top/vrptw/homberger-benchmark/ +[pdtsp]: https://web.archive.org/web/20080318001744/http://www.diku.dk/~sropke/ +[nearplib]: https://www.sintef.no/projectweb/top/nearp/ +[carplib]: https://www.uv.es/belengue/carp.html +[lilim]: https://www.sintef.no/projectweb/top/pdptw/li-lim-benchmark/ +[canad]: http://groups.di.unipi.it/optimize/Data/MMCF.html diff --git a/ortools/routing/parsers/capacity_planning.proto b/ortools/routing/parsers/capacity_planning.proto new file mode 100644 index 0000000000..2d089df73a --- /dev/null +++ b/ortools/routing/parsers/capacity_planning.proto @@ -0,0 +1,56 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package operations_research; + +// This is the proto for describing the multicommodity fixed-charged network +// design problem. + +message NetworkTopology { + // The start node of an arc. Must be >= 0. + // We do allow multi-arcs but not self-arcs. + repeated int32 from_node = 1; + + // The end node of arcs. Must be >= 0. + repeated int32 to_node = 2; + + // The variable cost per unit of commodity demand on arcs. Must be >= 0. + repeated double variable_cost = 3; + + // The fixed charge for using an arc. Must be >= 0. + repeated double fixed_cost = 4; + + // The total capacity of arcs. Must be > 0. + repeated double capacity = 5; +} + +message Commodities { + // The departure node of the demand. Must be >= 0. + repeated int32 from_node = 1; + + // The destination node of the demand. Must be >= 0. + repeated int32 to_node = 2; + + // The quantity to carry (must be > 0). + repeated double demand = 3; +} + +message CapacityPlanningInstance { + // The network on which to operate. + NetworkTopology topology = 1; + + // What to deliver. + Commodities commodities = 2; +} diff --git a/ortools/routing/carp_parser.cc b/ortools/routing/parsers/carp_parser.cc similarity index 99% rename from ortools/routing/carp_parser.cc rename to ortools/routing/parsers/carp_parser.cc index c4c9efa914..a259974dff 100644 --- a/ortools/routing/carp_parser.cc +++ b/ortools/routing/parsers/carp_parser.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/carp_parser.h" +#include "ortools/routing/parsers/carp_parser.h" #include #include diff --git a/ortools/routing/carp_parser.h b/ortools/routing/parsers/carp_parser.h similarity index 97% rename from ortools/routing/carp_parser.h rename to ortools/routing/parsers/carp_parser.h index 95bd89a050..8a40cd0c73 100644 --- a/ortools/routing/carp_parser.h +++ b/ortools/routing/parsers/carp_parser.h @@ -52,8 +52,8 @@ // parser is always 0-based. Users of this parser should never see any 1-based // index; only 0-based index should be used to query values. -#ifndef OR_TOOLS_ROUTING_CARP_PARSER_H_ -#define OR_TOOLS_ROUTING_CARP_PARSER_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_CARP_PARSER_H_ +#define OR_TOOLS_ROUTING_PARSERS_CARP_PARSER_H_ #include #include @@ -63,7 +63,7 @@ #include "absl/types/span.h" #include "ortools/base/linked_hash_map.h" #include "ortools/base/logging.h" -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" namespace operations_research { class CarpParser { @@ -183,4 +183,4 @@ class CarpParser { }; } // namespace operations_research -#endif // OR_TOOLS_ROUTING_CARP_PARSER_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_CARP_PARSER_H_ diff --git a/ortools/routing/carp_parser_test.cc b/ortools/routing/parsers/carp_parser_test.cc similarity index 88% rename from ortools/routing/carp_parser_test.cc rename to ortools/routing/parsers/carp_parser_test.cc index b3b70e1ade..977737f57b 100644 --- a/ortools/routing/carp_parser_test.cc +++ b/ortools/routing/parsers/carp_parser_test.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/carp_parser.h" +#include "ortools/routing/parsers/carp_parser.h" #include @@ -70,7 +70,7 @@ TEST(CarpParserTest, LoadInvalidFileIncorrectNumberOfNodes) { CarpParser parser; EXPECT_FALSE( parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/" + "ortools/routing/parsers/testdata/" "carp_gdb19_incorrecto_vertices.dat"))); } @@ -87,7 +87,7 @@ TEST(CarpParserTest, LoadInvalidFileIncorrectNumberOfArcsWithServicings) { CarpParser parser; EXPECT_FALSE( parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/" + "ortools/routing/parsers/testdata/" "carp_gdb19_incorrecto_arireq.dat"))); } @@ -104,7 +104,7 @@ TEST(CarpParserTest, LoadInvalidFileIncorrectNumberOfArcsWithoutServicings) { CarpParser parser; EXPECT_FALSE( parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/" + "ortools/routing/parsers/testdata/" "carp_gdb19_incorrecto_arinoreq.dat"))); } @@ -121,7 +121,7 @@ TEST(CarpParserTest, LoadInvalidFileIncorrectNumberOfVehicles) { CarpParser parser; EXPECT_FALSE( parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/" + "ortools/routing/parsers/testdata/" "carp_gdb19_incorrecto_vehiculos.dat"))); } @@ -138,7 +138,7 @@ TEST(CarpParserTest, LoadInvalidFileIncorrectCapacity) { CarpParser parser; EXPECT_FALSE( parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/" + "ortools/routing/parsers/testdata/" "carp_gdb19_incorrecto_capacidad.dat"))); } @@ -155,7 +155,7 @@ TEST(CarpParserTest, LoadInvalidFileIncorrectTypeOfArcCost) { CarpParser parser; EXPECT_FALSE( parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/" + "ortools/routing/parsers/testdata/" "carp_gdb19_incorrecto_tipo.dat"))); } @@ -171,7 +171,7 @@ TEST(CarpParserTest, LoadInvalidFileIncorrectTotalServicingCost) { CarpParser parser; EXPECT_FALSE( parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/" + "ortools/routing/parsers/testdata/" "carp_gdb19_incorrecto_coste.dat"))); } @@ -190,7 +190,7 @@ TEST(CarpParserTest, LoadInvalidFileIncorrectDepot) { CarpParser parser; EXPECT_FALSE( parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/" + "ortools/routing/parsers/testdata/" "carp_gdb19_incorrecto_deposito.dat"))); } @@ -206,10 +206,10 @@ TEST(CarpParserTest, LoadInvalidFileNoEdgeWithServicing) { log.StartCapturingLogs(); CarpParser parser; - EXPECT_FALSE( - parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/" - "testdata/carp_gdb19_no_arista_req.dat"))); + EXPECT_FALSE(parser.LoadFile( + file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), + ROOT_DIR "ortools/routing/parsers" + "/testdata/carp_gdb19_no_arista_req.dat"))); } TEST(CarpParserTest, LoadInvalidFileServicingForArcsWithoutServicing) { @@ -224,8 +224,8 @@ TEST(CarpParserTest, LoadInvalidFileServicingForArcsWithoutServicing) { CarpParser parser; EXPECT_FALSE( parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/" - "testdata/carp_gdb19_mixed_arcs.dat"))); + "ortools/routing/parsers" + "/testdata/carp_gdb19_mixed_arcs.dat"))); } TEST(CarpParserTest, LoadInvalidFileServicingForArcsInWrongOrder) { @@ -240,14 +240,15 @@ TEST(CarpParserTest, LoadInvalidFileServicingForArcsInWrongOrder) { CarpParser parser; EXPECT_FALSE(parser.LoadFile( file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), - ROOT_DIR "ortools/routing/testdata/" + ROOT_DIR "ortools/routing/parsers/testdata/" "carp_gdb19_incorrecta_lista_aristas_req.dat"))); } TEST(CarpParserTest, LoadInstanceFile) { std::string file_name = - file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), - ROOT_DIR "ortools/routing/testdata/carp_gdb19.dat"); + file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR + "ortools/routing/parsers/testdata/" + "carp_gdb19.dat"); CarpParser parser; EXPECT_TRUE(parser.LoadFile(file_name)); EXPECT_EQ(parser.name(), "gdb19"); @@ -272,7 +273,7 @@ TEST(CarpParserTest, LoadInstanceFile) { TEST(CarpParserTest, LoadInstanceFileWithDifferentDepot) { std::string file_name = file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/" + "ortools/routing/parsers/testdata/" "carp_gdb19_diferente_deposito.dat"); CarpParser parser; EXPECT_TRUE(parser.LoadFile(file_name)); diff --git a/ortools/routing/cvrptw_lib.cc b/ortools/routing/parsers/cvrptw_lib.cc similarity index 99% rename from ortools/routing/cvrptw_lib.cc rename to ortools/routing/parsers/cvrptw_lib.cc index 2ab8d36bcc..5645ae0684 100644 --- a/ortools/routing/cvrptw_lib.cc +++ b/ortools/routing/parsers/cvrptw_lib.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" #include #include diff --git a/ortools/routing/cvrptw_lib.h b/ortools/routing/parsers/cvrptw_lib.h similarity index 97% rename from ortools/routing/cvrptw_lib.h rename to ortools/routing/parsers/cvrptw_lib.h index e5e96fc10d..1561a7e586 100644 --- a/ortools/routing/cvrptw_lib.h +++ b/ortools/routing/parsers/cvrptw_lib.h @@ -13,8 +13,8 @@ // This header provides functions to help create random instances of the // vehicle routing problem; random capacities and random time windows. -#ifndef OR_TOOLS_ROUTING_CVRPTW_LIB_H_ -#define OR_TOOLS_ROUTING_CVRPTW_LIB_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_CVRPTW_LIB_H_ +#define OR_TOOLS_ROUTING_PARSERS_CVRPTW_LIB_H_ #include #include @@ -133,4 +133,4 @@ void DisplayPlan( } // namespace operations_research -#endif // OR_TOOLS_ROUTING_CVRPTW_LIB_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_CVRPTW_LIB_H_ diff --git a/ortools/routing/parsers/dow_parser.cc b/ortools/routing/parsers/dow_parser.cc new file mode 100644 index 0000000000..29dc375a44 --- /dev/null +++ b/ortools/routing/parsers/dow_parser.cc @@ -0,0 +1,106 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/routing/parsers/dow_parser.h" + +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/status/status.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" +#include "ortools/base/filesystem.h" +#include "ortools/base/logging.h" +#include "ortools/base/options.h" +#include "ortools/routing/parsers/capacity_planning.pb.h" +#include "ortools/util/filelineiter.h" + +namespace operations_research { +::absl::Status ReadFile(absl::string_view file_name, + CapacityPlanningInstance* request) { + if (!file::Exists(file_name, file::Defaults()).ok()) { + return absl::NotFoundError(absl::StrCat(file_name, " not found")); + } + int line_num = 0; + int num_nodes = 0; + int num_arcs = 0; + int num_commodities = 0; + int arc_num = 0; + int commodity_num = 0; + for (const std::string& line : + FileLines(file_name, FileLineIterator::REMOVE_INLINE_CR)) { + if (line == "MULTIGEN.DAT:") { + CHECK_EQ(line_num, 0); + ++line_num; + continue; + } + const std::vector fields = + absl::StrSplit(line, absl::ByAnyChar(" \t"), absl::SkipEmpty()); + if (fields.size() == 3) { + if (line_num == 1) { // Sizes. + CHECK(absl::SimpleAtoi(fields[0], &num_nodes)); + CHECK(absl::SimpleAtoi(fields[1], &num_arcs)); + CHECK(absl::SimpleAtoi(fields[2], &num_commodities)); + VLOG(1) << "num_nodes = " << num_nodes << ", num_arcs = " << num_arcs + << ", num_commodities = " << num_commodities; + } else { // Demand per commodity. The commodity number is implicit. + CHECK_GT(line_num, num_arcs + 1); + CHECK_LE(line_num, num_arcs + num_commodities + 1); + int from_node; + CHECK(absl::SimpleAtoi(fields[0], &from_node)); + int to_node; + CHECK(absl::SimpleAtoi(fields[1], &to_node)); + int64_t demand; + CHECK(absl::SimpleAtoi(fields[2], &demand)); + CHECK_GT(demand, 0); + ++commodity_num; + request->mutable_commodities()->add_from_node(from_node); + request->mutable_commodities()->add_to_node(to_node); + request->mutable_commodities()->add_demand(demand); + } + } else if (fields.size() == 7) { // Information per arc. Arc number is + // implicit. + CHECK_GT(line_num, 1); + CHECK_LE(line_num, num_arcs + 1); + CHECK_LT(arc_num, num_arcs); + int from_node; + CHECK(absl::SimpleAtoi(fields[0], &from_node)); + int to_node; + CHECK(absl::SimpleAtoi(fields[1], &to_node)); + int variable_cost; + CHECK(absl::SimpleAtoi(fields[2], &variable_cost)); + int capacity; + CHECK(absl::SimpleAtoi(fields[3], &capacity)); + int fixed_cost; + CHECK(absl::SimpleAtoi(fields[4], &fixed_cost)); + int unused; + CHECK(absl::SimpleAtoi(fields[5], &unused)); + CHECK(absl::SimpleAtoi(fields[6], &unused)); + ++arc_num; + request->mutable_topology()->add_from_node(from_node); + request->mutable_topology()->add_to_node(to_node); + request->mutable_topology()->add_variable_cost(variable_cost); + request->mutable_topology()->add_capacity(capacity); + request->mutable_topology()->add_fixed_cost(fixed_cost); + } + ++line_num; + } + CHECK_EQ(commodity_num, num_commodities); + return absl::OkStatus(); +} + +} // namespace operations_research diff --git a/ortools/routing/parsers/dow_parser.h b/ortools/routing/parsers/dow_parser.h new file mode 100644 index 0000000000..8d78b8443e --- /dev/null +++ b/ortools/routing/parsers/dow_parser.h @@ -0,0 +1,29 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_ROUTING_PARSERS_DOW_PARSER_H_ +#define OR_TOOLS_ROUTING_PARSERS_DOW_PARSER_H_ + +#include "absl/status/status.h" +#include "absl/strings/string_view.h" +#include "ortools/routing/parsers/capacity_planning.pb.h" + +// Reader for Multicommodity fixed-charge Network Design (MCND) files using the +// .dow format. + +namespace operations_research { +::absl::Status ReadFile(absl::string_view file_name, + CapacityPlanningInstance* request); +} // namespace operations_research + +#endif // OR_TOOLS_ROUTING_PARSERS_DOW_PARSER_H_ diff --git a/ortools/routing/parsers/dow_parser_test.cc b/ortools/routing/parsers/dow_parser_test.cc new file mode 100644 index 0000000000..99f74430ee --- /dev/null +++ b/ortools/routing/parsers/dow_parser_test.cc @@ -0,0 +1,61 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/routing/parsers/dow_parser.h" + +#include + +#include "absl/flags/flag.h" +#include "absl/status/status.h" +#include "gtest/gtest.h" +#include "ortools/base/gmock.h" +#include "ortools/base/path.h" +#include "ortools/routing/parsers/capacity_planning.pb.h" + +namespace operations_research { +namespace { +TEST(CapacityPlanningReaderTest, C33PassesOK) { + CapacityPlanningInstance request; + ::absl::Status status = ReadFile( + file::JoinPathRespectAbsolute( + absl::GetFlag(FLAGS_test_srcdir), "operations_research_data/", + "MULTICOM_FIXED_CHARGE_NETWORK_DESIGN/C/c33.dow"), + &request); + EXPECT_OK(status); + const NetworkTopology& topology = request.topology(); + const int num_arcs = topology.from_node_size(); + EXPECT_EQ(num_arcs, topology.to_node_size()); + EXPECT_EQ(num_arcs, topology.fixed_cost_size()); + EXPECT_EQ(num_arcs, topology.variable_cost_size()); + EXPECT_EQ(num_arcs, topology.capacity_size()); + EXPECT_EQ(num_arcs, 228); + const Commodities& commodities = request.commodities(); + const int num_commodities = commodities.from_node_size(); + EXPECT_EQ(num_commodities, 39); + EXPECT_EQ(commodities.to_node_size(), num_commodities); + EXPECT_EQ(commodities.demand_size(), num_commodities); +} + +TEST(CapacityPlanningReaderTest, C34DoesNotExist) { + CapacityPlanningInstance request; + ::absl::Status status = ReadFile( + file::JoinPathRespectAbsolute( + absl::GetFlag(FLAGS_test_srcdir), "operations_research_data/", + "MULTICOM_FIXED_CHARGE_NETWORK_DESIGN/C/c34.dow"), + &request); + EXPECT_THAT(::util::StatusToString(status), + testing::HasSubstr("generic::not_found")); +} + +} // namespace +} // namespace operations_research diff --git a/ortools/routing/lilim_parser.cc b/ortools/routing/parsers/lilim_parser.cc similarity index 98% rename from ortools/routing/lilim_parser.cc rename to ortools/routing/parsers/lilim_parser.cc index 511926a137..5fb8deb0ab 100644 --- a/ortools/routing/lilim_parser.cc +++ b/ortools/routing/parsers/lilim_parser.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/lilim_parser.h" +#include "ortools/routing/parsers/lilim_parser.h" #include #include diff --git a/ortools/routing/lilim_parser.h b/ortools/routing/parsers/lilim_parser.h similarity index 96% rename from ortools/routing/lilim_parser.h rename to ortools/routing/parsers/lilim_parser.h index eb856fba8a..2c2f441667 100644 --- a/ortools/routing/lilim_parser.h +++ b/ortools/routing/parsers/lilim_parser.h @@ -38,8 +38,8 @@ // corresponding pickup node. The value of travel time is equal to the value of // distance. -#ifndef OR_TOOLS_ROUTING_LILIM_PARSER_H_ -#define OR_TOOLS_ROUTING_LILIM_PARSER_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_LILIM_PARSER_H_ +#define OR_TOOLS_ROUTING_PARSERS_LILIM_PARSER_H_ #include @@ -49,7 +49,7 @@ #include #include "absl/strings/string_view.h" -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" namespace operations_research { @@ -124,4 +124,4 @@ class LiLimParser { }; } // namespace operations_research -#endif // OR_TOOLS_ROUTING_LILIM_PARSER_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_LILIM_PARSER_H_ diff --git a/ortools/routing/lilim_parser_test.cc b/ortools/routing/parsers/lilim_parser_test.cc similarity index 90% rename from ortools/routing/lilim_parser_test.cc rename to ortools/routing/parsers/lilim_parser_test.cc index ee0eca8b08..5b3d0b024d 100644 --- a/ortools/routing/lilim_parser_test.cc +++ b/ortools/routing/parsers/lilim_parser_test.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/lilim_parser.h" +#include "ortools/routing/parsers/lilim_parser.h" #include @@ -56,9 +56,9 @@ TEST(LiLimParserTest, LoadNonExistingFile) { TEST(LiLimParserTest, LoadExistingFile) { LiLimParser parser; EXPECT_TRUE( - parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), - ROOT_DIR "ortools/routing/" - "testdata/pdptw_LRC2_10_6.txt"))); + parser.LoadFile(file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR + "ortools/routing/parsers" + "/testdata/pdptw_LRC2_10_6.txt"))); CheckData(parser); // Load a non-existing file to check the parser was cleaned. EXPECT_FALSE(parser.LoadFile("doesnotexist.txt")); @@ -80,8 +80,9 @@ TEST(LiLimParserTest, LoadNonExistingInstance) { LiLimParser parser; EXPECT_FALSE( parser.LoadFile("doesnotexist.txt", - file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR - "ortools/routing/testdata/lilim.zip"))); + file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), + ROOT_DIR "ortools/routing/" + "/parsers/testdata/lilim.zip"))); } } // namespace diff --git a/ortools/routing/nearp_parser.cc b/ortools/routing/parsers/nearp_parser.cc similarity index 99% rename from ortools/routing/nearp_parser.cc rename to ortools/routing/parsers/nearp_parser.cc index 6b3cb220ce..ece0e93fba 100644 --- a/ortools/routing/nearp_parser.cc +++ b/ortools/routing/parsers/nearp_parser.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/nearp_parser.h" +#include "ortools/routing/parsers/nearp_parser.h" #include #include diff --git a/ortools/routing/nearp_parser.h b/ortools/routing/parsers/nearp_parser.h similarity index 98% rename from ortools/routing/nearp_parser.h rename to ortools/routing/parsers/nearp_parser.h index d3abc1b01c..a07535c0ea 100644 --- a/ortools/routing/nearp_parser.h +++ b/ortools/routing/parsers/nearp_parser.h @@ -72,8 +72,8 @@ // parser is always 0-based. Users of this parser should never see any 1-based // index; only 0-based index should be used to query values. -#ifndef OR_TOOLS_ROUTING_NEARP_PARSER_H_ -#define OR_TOOLS_ROUTING_NEARP_PARSER_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_NEARP_PARSER_H_ +#define OR_TOOLS_ROUTING_PARSERS_NEARP_PARSER_H_ #include #include @@ -82,7 +82,7 @@ #include "ortools/base/linked_hash_map.h" #include "ortools/base/logging.h" -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" namespace operations_research { class NearpParser { @@ -254,4 +254,4 @@ class NearpParser { }; } // namespace operations_research -#endif // OR_TOOLS_ROUTING_NEARP_PARSER_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_NEARP_PARSER_H_ diff --git a/ortools/routing/nearp_parser_test.cc b/ortools/routing/parsers/nearp_parser_test.cc similarity index 92% rename from ortools/routing/nearp_parser_test.cc rename to ortools/routing/parsers/nearp_parser_test.cc index 4a30dabe97..d28abd65f2 100644 --- a/ortools/routing/nearp_parser_test.cc +++ b/ortools/routing/parsers/nearp_parser_test.cc @@ -11,14 +11,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/nearp_parser.h" +#include "ortools/routing/parsers/nearp_parser.h" #include #include "absl/flags/flag.h" #include "gtest/gtest.h" #include "ortools/base/path.h" -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" #if defined(_MSC_VER) #define ROOT_DIR "../../../../../../../" @@ -56,8 +56,9 @@ TEST(NearpParserTest, LoadNonExistingFile) { TEST(NearpParserTest, LoadBHW1) { std::string file_name = - file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), - ROOT_DIR "ortools/routing/testdata/nearp_BHW1.dat"); + file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR + "ortools/routing/parsers/testdata/" + "nearp_BHW1.dat"); NearpParser parser; EXPECT_TRUE(parser.LoadFile(file_name)); EXPECT_EQ(parser.name(), "BHW1"); @@ -97,8 +98,9 @@ TEST(NearpParserTest, LoadBHW1) { TEST(NearpParserTest, LoadToy) { std::string file_name = - file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), - ROOT_DIR "ortools/routing/testdata/nearp_toy.dat"); + file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), ROOT_DIR + "ortools/routing/parsers/" + "testdata/nearp_toy.dat"); NearpParser parser; EXPECT_TRUE(parser.LoadFile(file_name)); EXPECT_EQ(parser.name(), "Toy"); diff --git a/ortools/routing/pdtsp_parser.cc b/ortools/routing/parsers/pdtsp_parser.cc similarity index 98% rename from ortools/routing/pdtsp_parser.cc rename to ortools/routing/parsers/pdtsp_parser.cc index 86fd3a7bb8..426c2ab511 100644 --- a/ortools/routing/pdtsp_parser.cc +++ b/ortools/routing/parsers/pdtsp_parser.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/pdtsp_parser.h" +#include "ortools/routing/parsers/pdtsp_parser.h" #include #include diff --git a/ortools/routing/pdtsp_parser.h b/ortools/routing/parsers/pdtsp_parser.h similarity index 92% rename from ortools/routing/pdtsp_parser.h rename to ortools/routing/parsers/pdtsp_parser.h index 1d4d8af4cd..2039c68463 100644 --- a/ortools/routing/pdtsp_parser.h +++ b/ortools/routing/parsers/pdtsp_parser.h @@ -15,8 +15,8 @@ // pickup and delivery constraints. This format was created by Stefan Ropke. // https://link.springer.com/article/10.1007%2Fs10107-008-0234-9 -#ifndef OR_TOOLS_ROUTING_PDTSP_PARSER_H_ -#define OR_TOOLS_ROUTING_PDTSP_PARSER_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_PDTSP_PARSER_H_ +#define OR_TOOLS_ROUTING_PARSERS_PDTSP_PARSER_H_ #include #include @@ -56,4 +56,4 @@ class PdTspParser { } // namespace operations_research -#endif // OR_TOOLS_ROUTING_PDTSP_PARSER_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_PDTSP_PARSER_H_ diff --git a/ortools/routing/pdtsp_parser_test.cc b/ortools/routing/parsers/pdtsp_parser_test.cc similarity index 94% rename from ortools/routing/pdtsp_parser_test.cc rename to ortools/routing/parsers/pdtsp_parser_test.cc index 36afcf0c25..b2ced79f49 100644 --- a/ortools/routing/pdtsp_parser_test.cc +++ b/ortools/routing/parsers/pdtsp_parser_test.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/pdtsp_parser.h" +#include "ortools/routing/parsers/pdtsp_parser.h" #include #include @@ -33,7 +33,7 @@ namespace operations_research { namespace { TEST(PdTspParserTest, LoadDataSet) { for (const std::string& data : { - ROOT_DIR "ortools/routing/testdata/" + ROOT_DIR "ortools/routing/parsers/testdata/" "pdtsp_prob10b.txt", }) { PdTspParser parser; diff --git a/ortools/routing/simple_graph.cc b/ortools/routing/parsers/simple_graph.cc similarity index 94% rename from ortools/routing/simple_graph.cc rename to ortools/routing/parsers/simple_graph.cc index 69bbb83701..b5d95d35a7 100644 --- a/ortools/routing/simple_graph.cc +++ b/ortools/routing/parsers/simple_graph.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" namespace operations_research { diff --git a/ortools/routing/simple_graph.h b/ortools/routing/parsers/simple_graph.h similarity index 96% rename from ortools/routing/simple_graph.h rename to ortools/routing/parsers/simple_graph.h index 5e956da0ce..13dc5d0eed 100644 --- a/ortools/routing/simple_graph.h +++ b/ortools/routing/parsers/simple_graph.h @@ -13,8 +13,8 @@ // Common utilities for parsing routing instances. -#ifndef OR_TOOLS_ROUTING_SIMPLE_GRAPH_H_ -#define OR_TOOLS_ROUTING_SIMPLE_GRAPH_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_SIMPLE_GRAPH_H_ +#define OR_TOOLS_ROUTING_PARSERS_SIMPLE_GRAPH_H_ #include #include @@ -152,4 +152,4 @@ struct SimpleTimeWindow { } // namespace operations_research -#endif // OR_TOOLS_ROUTING_SIMPLE_GRAPH_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_SIMPLE_GRAPH_H_ diff --git a/ortools/routing/simple_graph_test.cc b/ortools/routing/parsers/simple_graph_test.cc similarity index 98% rename from ortools/routing/simple_graph_test.cc rename to ortools/routing/parsers/simple_graph_test.cc index 78687797b4..88c6846b26 100644 --- a/ortools/routing/simple_graph_test.cc +++ b/ortools/routing/parsers/simple_graph_test.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" #include #include diff --git a/ortools/routing/solomon_parser.cc b/ortools/routing/parsers/solomon_parser.cc similarity index 98% rename from ortools/routing/solomon_parser.cc rename to ortools/routing/parsers/solomon_parser.cc index 3d3919507d..00d2ebc4cb 100644 --- a/ortools/routing/solomon_parser.cc +++ b/ortools/routing/parsers/solomon_parser.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/solomon_parser.h" +#include "ortools/routing/parsers/solomon_parser.h" #include #include diff --git a/ortools/routing/solomon_parser.h b/ortools/routing/parsers/solomon_parser.h similarity index 96% rename from ortools/routing/solomon_parser.h rename to ortools/routing/parsers/solomon_parser.h index ea3b778195..d02a7e357d 100644 --- a/ortools/routing/solomon_parser.h +++ b/ortools/routing/parsers/solomon_parser.h @@ -39,8 +39,8 @@ // containing multiple instances. // -#ifndef OR_TOOLS_ROUTING_SOLOMON_PARSER_H_ -#define OR_TOOLS_ROUTING_SOLOMON_PARSER_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_SOLOMON_PARSER_H_ +#define OR_TOOLS_ROUTING_PARSERS_SOLOMON_PARSER_H_ #include @@ -52,7 +52,7 @@ #include "absl/strings/string_view.h" #include "ortools/base/macros.h" #include "ortools/base/types.h" -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" namespace operations_research { @@ -136,4 +136,4 @@ class SolomonParser { }; } // namespace operations_research -#endif // OR_TOOLS_ROUTING_SOLOMON_PARSER_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_SOLOMON_PARSER_H_ diff --git a/ortools/routing/solomon_parser_test.cc b/ortools/routing/parsers/solomon_parser_test.cc similarity index 97% rename from ortools/routing/solomon_parser_test.cc rename to ortools/routing/parsers/solomon_parser_test.cc index e656aa2624..e9426ac60f 100644 --- a/ortools/routing/solomon_parser_test.cc +++ b/ortools/routing/parsers/solomon_parser_test.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/solomon_parser.h" +#include "ortools/routing/parsers/solomon_parser.h" #include diff --git a/ortools/routing/solution_serializer.cc b/ortools/routing/parsers/solution_serializer.cc similarity index 99% rename from ortools/routing/solution_serializer.cc rename to ortools/routing/parsers/solution_serializer.cc index d904b7edb0..5be2974aca 100644 --- a/ortools/routing/solution_serializer.cc +++ b/ortools/routing/parsers/solution_serializer.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools//routing/solution_serializer.h" +#include "ortools/routing/parsers/solution_serializer.h" #include #include diff --git a/ortools/routing/solution_serializer.h b/ortools/routing/parsers/solution_serializer.h similarity index 98% rename from ortools/routing/solution_serializer.h rename to ortools/routing/parsers/solution_serializer.h index 26a35aa550..874093bab4 100644 --- a/ortools/routing/solution_serializer.h +++ b/ortools/routing/parsers/solution_serializer.h @@ -14,8 +14,8 @@ // Utilities to serialize VRP-like solutions in standardised formats: either // TSPLIB or CVRPLIB. -#ifndef OR_TOOLS_ROUTING_SOLUTION_SERIALIZER_H_ -#define OR_TOOLS_ROUTING_SOLUTION_SERIALIZER_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_SOLUTION_SERIALIZER_H_ +#define OR_TOOLS_ROUTING_PARSERS_SOLUTION_SERIALIZER_H_ #include #include @@ -30,7 +30,7 @@ #include "ortools/base/file.h" #include "ortools/base/helpers.h" #include "ortools/base/logging.h" -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" namespace operations_research { @@ -293,4 +293,4 @@ void PrintStatistic(absl::string_view name, T value, } } // namespace operations_research -#endif // OR_TOOLS_ROUTING_SOLUTION_SERIALIZER_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_SOLUTION_SERIALIZER_H_ diff --git a/ortools/routing/solution_serializer_test.cc b/ortools/routing/parsers/solution_serializer_test.cc similarity index 99% rename from ortools/routing/solution_serializer_test.cc rename to ortools/routing/parsers/solution_serializer_test.cc index 22a59659b8..726f143496 100644 --- a/ortools/routing/solution_serializer_test.cc +++ b/ortools/routing/parsers/solution_serializer_test.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/solution_serializer.h" +#include "ortools/routing/parsers/solution_serializer.h" #include #include @@ -26,7 +26,7 @@ #include "ortools/base/helpers.h" #include "ortools/base/mutable_memfile.h" #include "ortools/base/options.h" -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" namespace operations_research { namespace { diff --git a/ortools/routing/testdata/BUILD.bazel b/ortools/routing/parsers/testdata/BUILD.bazel similarity index 100% rename from ortools/routing/testdata/BUILD.bazel rename to ortools/routing/parsers/testdata/BUILD.bazel diff --git a/ortools/routing/testdata/carp_gdb19.dat b/ortools/routing/parsers/testdata/carp_gdb19.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19.dat rename to ortools/routing/parsers/testdata/carp_gdb19.dat diff --git a/ortools/routing/testdata/carp_gdb19_diferente_deposito.dat b/ortools/routing/parsers/testdata/carp_gdb19_diferente_deposito.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_diferente_deposito.dat rename to ortools/routing/parsers/testdata/carp_gdb19_diferente_deposito.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecta_lista_aristas_req.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecta_lista_aristas_req.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecta_lista_aristas_req.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecta_lista_aristas_req.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecto_arinoreq.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecto_arinoreq.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecto_arinoreq.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecto_arinoreq.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecto_arireq.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecto_arireq.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecto_arireq.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecto_arireq.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecto_arista.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecto_arista.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecto_arista.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecto_arista.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecto_capacidad.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecto_capacidad.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecto_capacidad.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecto_capacidad.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecto_coste.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecto_coste.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecto_coste.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecto_coste.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecto_deposito.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecto_deposito.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecto_deposito.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecto_deposito.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecto_tipo.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecto_tipo.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecto_tipo.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecto_tipo.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecto_vehiculos.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecto_vehiculos.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecto_vehiculos.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecto_vehiculos.dat diff --git a/ortools/routing/testdata/carp_gdb19_incorrecto_vertices.dat b/ortools/routing/parsers/testdata/carp_gdb19_incorrecto_vertices.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_incorrecto_vertices.dat rename to ortools/routing/parsers/testdata/carp_gdb19_incorrecto_vertices.dat diff --git a/ortools/routing/testdata/carp_gdb19_mixed_arcs.dat b/ortools/routing/parsers/testdata/carp_gdb19_mixed_arcs.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_mixed_arcs.dat rename to ortools/routing/parsers/testdata/carp_gdb19_mixed_arcs.dat diff --git a/ortools/routing/testdata/carp_gdb19_no_arista_req.dat b/ortools/routing/parsers/testdata/carp_gdb19_no_arista_req.dat similarity index 100% rename from ortools/routing/testdata/carp_gdb19_no_arista_req.dat rename to ortools/routing/parsers/testdata/carp_gdb19_no_arista_req.dat diff --git a/ortools/routing/testdata/carp_toy.dat b/ortools/routing/parsers/testdata/carp_toy.dat similarity index 100% rename from ortools/routing/testdata/carp_toy.dat rename to ortools/routing/parsers/testdata/carp_toy.dat diff --git a/ortools/routing/testdata/carp_toy.sol b/ortools/routing/parsers/testdata/carp_toy.sol similarity index 100% rename from ortools/routing/testdata/carp_toy.sol rename to ortools/routing/parsers/testdata/carp_toy.sol diff --git a/ortools/routing/testdata/lilim.zip b/ortools/routing/parsers/testdata/lilim.zip similarity index 100% rename from ortools/routing/testdata/lilim.zip rename to ortools/routing/parsers/testdata/lilim.zip diff --git a/ortools/routing/testdata/n20w20.001.txt b/ortools/routing/parsers/testdata/n20w20.001.txt similarity index 100% rename from ortools/routing/testdata/n20w20.001.txt rename to ortools/routing/parsers/testdata/n20w20.001.txt diff --git a/ortools/routing/testdata/n20w20.002.txt b/ortools/routing/parsers/testdata/n20w20.002.txt similarity index 100% rename from ortools/routing/testdata/n20w20.002.txt rename to ortools/routing/parsers/testdata/n20w20.002.txt diff --git a/ortools/routing/testdata/nearp_BHW1.dat b/ortools/routing/parsers/testdata/nearp_BHW1.dat similarity index 100% rename from ortools/routing/testdata/nearp_BHW1.dat rename to ortools/routing/parsers/testdata/nearp_BHW1.dat diff --git a/ortools/routing/testdata/nearp_toy.dat b/ortools/routing/parsers/testdata/nearp_toy.dat similarity index 100% rename from ortools/routing/testdata/nearp_toy.dat rename to ortools/routing/parsers/testdata/nearp_toy.dat diff --git a/ortools/routing/testdata/pdptw_LRC2_10_6.txt b/ortools/routing/parsers/testdata/pdptw_LRC2_10_6.txt similarity index 100% rename from ortools/routing/testdata/pdptw_LRC2_10_6.txt rename to ortools/routing/parsers/testdata/pdptw_LRC2_10_6.txt diff --git a/ortools/routing/testdata/pdtsp_prob10b.txt b/ortools/routing/parsers/testdata/pdtsp_prob10b.txt similarity index 100% rename from ortools/routing/testdata/pdtsp_prob10b.txt rename to ortools/routing/parsers/testdata/pdtsp_prob10b.txt diff --git a/ortools/routing/testdata/pdtsp_prob10b.txt.gz b/ortools/routing/parsers/testdata/pdtsp_prob10b.txt.gz similarity index 100% rename from ortools/routing/testdata/pdtsp_prob10b.txt.gz rename to ortools/routing/parsers/testdata/pdtsp_prob10b.txt.gz diff --git a/ortools/routing/testdata/rc201.0 b/ortools/routing/parsers/testdata/rc201.0 similarity index 100% rename from ortools/routing/testdata/rc201.0 rename to ortools/routing/parsers/testdata/rc201.0 diff --git a/ortools/routing/testdata/solomon.zip b/ortools/routing/parsers/testdata/solomon.zip similarity index 100% rename from ortools/routing/testdata/solomon.zip rename to ortools/routing/parsers/testdata/solomon.zip diff --git a/ortools/routing/testdata/solomon_bp_c101.mps b/ortools/routing/parsers/testdata/solomon_bp_c101.mps similarity index 100% rename from ortools/routing/testdata/solomon_bp_c101.mps rename to ortools/routing/parsers/testdata/solomon_bp_c101.mps diff --git a/ortools/routing/testdata/solomon_bp_c101.pb b/ortools/routing/parsers/testdata/solomon_bp_c101.pb similarity index 100% rename from ortools/routing/testdata/solomon_bp_c101.pb rename to ortools/routing/parsers/testdata/solomon_bp_c101.pb diff --git a/ortools/routing/testdata/solomon_check_id.md b/ortools/routing/parsers/testdata/solomon_check_id.md similarity index 100% rename from ortools/routing/testdata/solomon_check_id.md rename to ortools/routing/parsers/testdata/solomon_check_id.md diff --git a/ortools/routing/testdata/solomon_check_id.txt b/ortools/routing/parsers/testdata/solomon_check_id.txt similarity index 100% rename from ortools/routing/testdata/solomon_check_id.txt rename to ortools/routing/parsers/testdata/solomon_check_id.txt diff --git a/ortools/routing/testdata/tsplib_F-n45-k4.vrp b/ortools/routing/parsers/testdata/tsplib_F-n45-k4.vrp similarity index 100% rename from ortools/routing/testdata/tsplib_F-n45-k4.vrp rename to ortools/routing/parsers/testdata/tsplib_F-n45-k4.vrp diff --git a/ortools/routing/testdata/tsplib_Kytojoki_33.vrp b/ortools/routing/parsers/testdata/tsplib_Kytojoki_33.vrp similarity index 100% rename from ortools/routing/testdata/tsplib_Kytojoki_33.vrp rename to ortools/routing/parsers/testdata/tsplib_Kytojoki_33.vrp diff --git a/ortools/routing/testdata/tsplib_ar9152.tour b/ortools/routing/parsers/testdata/tsplib_ar9152.tour similarity index 100% rename from ortools/routing/testdata/tsplib_ar9152.tour rename to ortools/routing/parsers/testdata/tsplib_ar9152.tour diff --git a/ortools/routing/testdata/tsplib_ar9152.tsp b/ortools/routing/parsers/testdata/tsplib_ar9152.tsp similarity index 100% rename from ortools/routing/testdata/tsplib_ar9152.tsp rename to ortools/routing/parsers/testdata/tsplib_ar9152.tsp diff --git a/ortools/routing/tsplib_parser.cc b/ortools/routing/parsers/tsplib_parser.cc similarity index 99% rename from ortools/routing/tsplib_parser.cc rename to ortools/routing/parsers/tsplib_parser.cc index 09e1a3f0c6..836625285c 100644 --- a/ortools/routing/tsplib_parser.cc +++ b/ortools/routing/parsers/tsplib_parser.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/tsplib_parser.h" +#include "ortools/routing/parsers/tsplib_parser.h" #include #include diff --git a/ortools/routing/tsplib_parser.h b/ortools/routing/parsers/tsplib_parser.h similarity index 97% rename from ortools/routing/tsplib_parser.h rename to ortools/routing/parsers/tsplib_parser.h index 61bf932d6b..75e48f24a3 100644 --- a/ortools/routing/tsplib_parser.h +++ b/ortools/routing/parsers/tsplib_parser.h @@ -21,8 +21,8 @@ // follow the TSPLIB95 format (described at // http://www.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/DOC.PS). -#ifndef OR_TOOLS_ROUTING_TSPLIB_PARSER_H_ -#define OR_TOOLS_ROUTING_TSPLIB_PARSER_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_TSPLIB_PARSER_H_ +#define OR_TOOLS_ROUTING_PARSERS_TSPLIB_PARSER_H_ #include #include @@ -33,7 +33,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/types/span.h" #include "ortools/base/types.h" -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" namespace operations_research { @@ -249,4 +249,4 @@ class CVRPToursParser final { }; } // namespace operations_research -#endif // OR_TOOLS_ROUTING_TSPLIB_PARSER_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_TSPLIB_PARSER_H_ diff --git a/ortools/routing/tsplib_parser_test.cc b/ortools/routing/parsers/tsplib_parser_test.cc similarity index 98% rename from ortools/routing/tsplib_parser_test.cc rename to ortools/routing/parsers/tsplib_parser_test.cc index 16981f9c94..089ba721c2 100644 --- a/ortools/routing/tsplib_parser_test.cc +++ b/ortools/routing/parsers/tsplib_parser_test.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/tsplib_parser.h" +#include "ortools/routing/parsers/tsplib_parser.h" #include #include @@ -250,9 +250,9 @@ TEST(TspLibParserTest, ParseHCPAdjList) { TEST(TspLibParserTest, ParseKytojoki33Depot) { // This file inverts EDGE_WEIGHT_TYPE and EDGE_WEIGHT_FORMAT. - std::string file_name = file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), - ROOT_DIR "ortools/routing/testdata/", - "tsplib_Kytojoki_33.vrp"); + std::string file_name = file::JoinPath( + absl::GetFlag(FLAGS_test_srcdir), + ROOT_DIR "ortools/routing/parsers/testdata/", "tsplib_Kytojoki_33.vrp"); TspLibParser parser; EXPECT_TRUE(parser.LoadFile(file_name)); // The depot is a new node, given by its coordinates, instead of an existing diff --git a/ortools/routing/tsptw_parser.cc b/ortools/routing/parsers/tsptw_parser.cc similarity index 99% rename from ortools/routing/tsptw_parser.cc rename to ortools/routing/parsers/tsptw_parser.cc index 052f715885..208a3bf64e 100644 --- a/ortools/routing/tsptw_parser.cc +++ b/ortools/routing/parsers/tsptw_parser.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/tsptw_parser.h" +#include "ortools/routing/parsers/tsptw_parser.h" #include #include diff --git a/ortools/routing/tsptw_parser.h b/ortools/routing/parsers/tsptw_parser.h similarity index 94% rename from ortools/routing/tsptw_parser.h rename to ortools/routing/parsers/tsptw_parser.h index dd074c2037..b853151e18 100644 --- a/ortools/routing/tsptw_parser.h +++ b/ortools/routing/parsers/tsptw_parser.h @@ -18,15 +18,15 @@ // http://lopez-ibanez.eu/tsptw-instances and // https://homepages.dcc.ufmg.br/~rfsilva/tsptw. -#ifndef OR_TOOLS_ROUTING_TSPTW_PARSER_H_ -#define OR_TOOLS_ROUTING_TSPTW_PARSER_H_ +#ifndef OR_TOOLS_ROUTING_PARSERS_TSPTW_PARSER_H_ +#define OR_TOOLS_ROUTING_PARSERS_TSPTW_PARSER_H_ #include #include #include #include "ortools/base/types.h" -#include "ortools/routing/simple_graph.h" +#include "ortools/routing/parsers/simple_graph.h" namespace operations_research { @@ -85,4 +85,4 @@ class TspTWParser final { } // namespace operations_research -#endif // OR_TOOLS_ROUTING_TSPTW_PARSER_H_ +#endif // OR_TOOLS_ROUTING_PARSERS_TSPTW_PARSER_H_ diff --git a/ortools/routing/tsptw_parser_test.cc b/ortools/routing/parsers/tsptw_parser_test.cc similarity index 89% rename from ortools/routing/tsptw_parser_test.cc rename to ortools/routing/parsers/tsptw_parser_test.cc index e2b675d340..1632e6c59a 100644 --- a/ortools/routing/tsptw_parser_test.cc +++ b/ortools/routing/parsers/tsptw_parser_test.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/routing/tsptw_parser.h" +#include "ortools/routing/parsers/tsptw_parser.h" #include @@ -40,9 +40,11 @@ TEST(TspTWParserTest, LoadDataSet) { const bool has_coordinates[] = {false, false, true}; int count = 0; for (const std::string& data : - {ROOT_DIR "ortools/routing/testdata/rc201.0", - ROOT_DIR "ortools/routing/testdata/n20w20.001.txt", - ROOT_DIR "ortools/routing/testdata/n20w20.002.txt"}) { + {ROOT_DIR "ortools/routing/parsers/testdata/rc201.0", + ROOT_DIR "ortools/routing/parsers/testdata/" + "n20w20.001.txt", + ROOT_DIR "ortools/routing/parsers/testdata/" + "n20w20.002.txt"}) { TspTWParser parser; EXPECT_TRUE(parser.LoadFile( file::JoinPath(absl::GetFlag(FLAGS_test_srcdir), data))); diff --git a/ortools/routing/samples/BUILD.bazel b/ortools/routing/samples/BUILD.bazel index 7c50189233..4386ad8982 100644 --- a/ortools/routing/samples/BUILD.bazel +++ b/ortools/routing/samples/BUILD.bazel @@ -17,7 +17,7 @@ cc_binary( deps = [ "//ortools/base", "//ortools/constraint_solver:routing", - "//ortools/routing:cvrptw_lib", + "//ortools/routing/parsers:cvrptw_lib", ], ) @@ -27,7 +27,7 @@ cc_binary( deps = [ "//ortools/base", "//ortools/constraint_solver:routing", - "//ortools/routing:cvrptw_lib", + "//ortools/routing/parsers:cvrptw_lib", ], ) @@ -38,7 +38,7 @@ cc_binary( "//ortools/base", "//ortools/constraint_solver:routing", "//ortools/constraint_solver:routing_enums_cc_proto", - "//ortools/routing:cvrptw_lib", + "//ortools/routing/parsers:cvrptw_lib", "@com_google_absl//absl/strings", ], ) @@ -49,7 +49,7 @@ cc_binary( deps = [ "//ortools/base", "//ortools/constraint_solver:routing", - "//ortools/routing:cvrptw_lib", + "//ortools/routing/parsers:cvrptw_lib", ], ) @@ -59,7 +59,7 @@ cc_binary( deps = [ "//ortools/base", "//ortools/constraint_solver:routing", - "//ortools/routing:cvrptw_lib", + "//ortools/routing/parsers:cvrptw_lib", "@com_google_absl//absl/strings", ], ) @@ -70,6 +70,6 @@ cc_binary( deps = [ "//ortools/base", "//ortools/constraint_solver:routing", - "//ortools/routing:cvrptw_lib", + "//ortools/routing/parsers:cvrptw_lib", ], ) diff --git a/ortools/routing/samples/cvrp_disjoint_tw.cc b/ortools/routing/samples/cvrp_disjoint_tw.cc index 9a1eed4977..d16da26daf 100644 --- a/ortools/routing/samples/cvrp_disjoint_tw.cc +++ b/ortools/routing/samples/cvrp_disjoint_tw.cc @@ -37,7 +37,7 @@ #include "ortools/constraint_solver/routing_index_manager.h" #include "ortools/constraint_solver/routing_parameters.h" #include "ortools/constraint_solver/routing_parameters.pb.h" -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" using operations_research::Assignment; using operations_research::DefaultRoutingSearchParameters; diff --git a/ortools/routing/samples/cvrptw.cc b/ortools/routing/samples/cvrptw.cc index f12f5cffab..32ba7b4554 100644 --- a/ortools/routing/samples/cvrptw.cc +++ b/ortools/routing/samples/cvrptw.cc @@ -35,7 +35,7 @@ #include "ortools/constraint_solver/routing_index_manager.h" #include "ortools/constraint_solver/routing_parameters.h" #include "ortools/constraint_solver/routing_parameters.pb.h" -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" using operations_research::Assignment; using operations_research::DefaultRoutingSearchParameters; diff --git a/ortools/routing/samples/cvrptw_soft_capacity.cc b/ortools/routing/samples/cvrptw_soft_capacity.cc index 96060b8e8f..8d0f91071a 100644 --- a/ortools/routing/samples/cvrptw_soft_capacity.cc +++ b/ortools/routing/samples/cvrptw_soft_capacity.cc @@ -34,7 +34,7 @@ #include "ortools/constraint_solver/routing_index_manager.h" #include "ortools/constraint_solver/routing_parameters.h" #include "ortools/constraint_solver/routing_parameters.pb.h" -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" using operations_research::Assignment; using operations_research::DefaultRoutingSearchParameters; diff --git a/ortools/routing/samples/cvrptw_with_breaks.cc b/ortools/routing/samples/cvrptw_with_breaks.cc index e87ebee9e2..52cecec566 100644 --- a/ortools/routing/samples/cvrptw_with_breaks.cc +++ b/ortools/routing/samples/cvrptw_with_breaks.cc @@ -42,7 +42,7 @@ #include "ortools/constraint_solver/routing_index_manager.h" #include "ortools/constraint_solver/routing_parameters.h" #include "ortools/constraint_solver/routing_parameters.pb.h" -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" using operations_research::Assignment; using operations_research::DefaultRoutingSearchParameters; diff --git a/ortools/routing/samples/cvrptw_with_precedences.cc b/ortools/routing/samples/cvrptw_with_precedences.cc index 43ef7c635b..dedf0ba2e6 100644 --- a/ortools/routing/samples/cvrptw_with_precedences.cc +++ b/ortools/routing/samples/cvrptw_with_precedences.cc @@ -37,7 +37,7 @@ #include "ortools/constraint_solver/routing_parameters.h" #include "ortools/constraint_solver/routing_parameters.pb.h" #include "ortools/graph/graph_builder.h" -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" using operations_research::Assignment; using operations_research::DefaultRoutingSearchParameters; diff --git a/ortools/routing/samples/cvrptw_with_refueling.cc b/ortools/routing/samples/cvrptw_with_refueling.cc index 86cbdddb94..b48db7b9d3 100644 --- a/ortools/routing/samples/cvrptw_with_refueling.cc +++ b/ortools/routing/samples/cvrptw_with_refueling.cc @@ -33,7 +33,7 @@ #include "ortools/constraint_solver/routing_index_manager.h" #include "ortools/constraint_solver/routing_parameters.h" #include "ortools/constraint_solver/routing_parameters.pb.h" -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" using operations_research::Assignment; using operations_research::DefaultRoutingSearchParameters; diff --git a/ortools/routing/samples/cvrptw_with_resources.cc b/ortools/routing/samples/cvrptw_with_resources.cc index 4a761afa15..a5d57e8863 100644 --- a/ortools/routing/samples/cvrptw_with_resources.cc +++ b/ortools/routing/samples/cvrptw_with_resources.cc @@ -35,7 +35,7 @@ #include "ortools/constraint_solver/routing_index_manager.h" #include "ortools/constraint_solver/routing_parameters.h" #include "ortools/constraint_solver/routing_parameters.pb.h" -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" using operations_research::Assignment; using operations_research::DefaultRoutingSearchParameters; diff --git a/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc b/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc index cf87aab69f..19f734435b 100644 --- a/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc +++ b/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc @@ -34,7 +34,7 @@ #include "ortools/constraint_solver/routing_index_manager.h" #include "ortools/constraint_solver/routing_parameters.h" #include "ortools/constraint_solver/routing_parameters.pb.h" -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" using operations_research::Assignment; using operations_research::DefaultRoutingSearchParameters; diff --git a/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc b/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc index d662d15492..d57e087ad8 100644 --- a/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc +++ b/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc @@ -33,7 +33,7 @@ #include "ortools/constraint_solver/routing_index_manager.h" #include "ortools/constraint_solver/routing_parameters.h" #include "ortools/constraint_solver/routing_parameters.pb.h" -#include "ortools/routing/cvrptw_lib.h" +#include "ortools/routing/parsers/cvrptw_lib.h" #include "ortools/util/range_query_function.h" #include "ortools/util/step_function.h" From e3d3c610a45febdfeb5d7c6af32f40c23272c0f1 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 14:12:22 +0100 Subject: [PATCH 021/392] version: Bump to 9.10 --- Version.txt | 2 +- ortools/base/BUILD.bazel | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Version.txt b/Version.txt index 90a08d0df4..c52d900720 100644 --- a/Version.txt +++ b/Version.txt @@ -1,3 +1,3 @@ OR_TOOLS_MAJOR=9 -OR_TOOLS_MINOR=9 +OR_TOOLS_MINOR=10 #PRE_RELEASE=YES diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index a5dbaa5e45..112f6357ba 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -51,7 +51,7 @@ cc_library( ], copts = [ "-DOR_TOOLS_MAJOR=9", - "-DOR_TOOLS_MINOR=9", + "-DOR_TOOLS_MINOR=10", "-DOR_TOOLS_PATCH=9999", ], linkopts = select({ From 52073a163e21dd0af6dfd5087906b5ea7c792ad6 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 15:13:28 +0100 Subject: [PATCH 022/392] Fix GScip math_opt test --- ortools/math_opt/solver_tests/callback_tests.cc | 9 +++++++-- ortools/math_opt/solver_tests/ip_parameter_tests.cc | 6 ++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/ortools/math_opt/solver_tests/callback_tests.cc b/ortools/math_opt/solver_tests/callback_tests.cc index 787e6b2ef6..c40f9edcd3 100644 --- a/ortools/math_opt/solver_tests/callback_tests.cc +++ b/ortools/math_opt/solver_tests/callback_tests.cc @@ -730,10 +730,15 @@ TEST_P(CallbackTest, EventNodeCut) { } ASSERT_OK_AND_ASSIGN(const SolveResult result, Solve(model, GetParam().solver_type, args)); - if (use_cut) { + // Even with use_cut: False, SCIP v900 return OPTIMAL + if (GetParam().solver_type == SolverType::kGscip) { EXPECT_THAT(result, IsOptimal(2.0)); } else { - EXPECT_THAT(result.termination, LimitIs(Limit::kNode)); + if (use_cut) { + EXPECT_THAT(result, IsOptimal(2.0)); + } else { + EXPECT_THAT(result.termination, LimitIs(Limit::kNode)); + } } } } diff --git a/ortools/math_opt/solver_tests/ip_parameter_tests.cc b/ortools/math_opt/solver_tests/ip_parameter_tests.cc index ad987178d1..874917100e 100644 --- a/ortools/math_opt/solver_tests/ip_parameter_tests.cc +++ b/ortools/math_opt/solver_tests/ip_parameter_tests.cc @@ -621,6 +621,9 @@ TEST_P(IpParameterTest, NodeLimit) { "where disabling primal heuristics seems to have little effect, see " "https://paste.googleplex.com/5694421105377280"; } + if (GetParam().solver_type == SolverType::kGscip) { + GTEST_SKIP() << "This test does not work for SCIP v900"; + } const std::unique_ptr model = DenseIndependentSet(true); SolveParameters params = {.node_limit = 1}; // Weaken the solver as much as possible so it does not solve the problem to @@ -997,6 +1000,9 @@ TEST_P(IpParameterTest, SolutionLimitOneAndCutoff) { // Tests the interaction between cutoff and an additional limit. TEST_P(IpParameterTest, NoSolutionsBelowCutoffEarlyTermination) { + if (GetParam().solver_type == SolverType::kGscip) { + GTEST_SKIP() << "This test does not work for SCIP v900"; + } if (!(GetParam().parameter_support.supports_cutoff)) { // We have already tested that the right error message is returned. return; From e2221e0e31a7d4b238122cf08aa0cb4a5c00de6f Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 16:40:39 +0100 Subject: [PATCH 023/392] cleanup scip.BUILD.bazel --- bazel/scip.BUILD.bazel | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/bazel/scip.BUILD.bazel b/bazel/scip.BUILD.bazel index d31e346730..ff358ded2b 100644 --- a/bazel/scip.BUILD.bazel +++ b/bazel/scip.BUILD.bazel @@ -40,12 +40,10 @@ PLATFORM_FLAGS = select({ "on_linux": [ "-Wunknown-pragmas", "-fexceptions", - "-DSYM=bliss" ], "on_macos": [ "-Wunknown-pragmas", "-fexceptions", - "-DSYM=bliss" ], "on_windows": [ "/DSYM=none", @@ -106,26 +104,11 @@ cc_library( ]), copts = [ "$(STACK_FRAME_UNLIMITED)", # src/scip/reader_cnf.c - "-DSCIP_WITH_ZLIB", - "-DWITH_SCIPDEF", - "-DSCIP_ROUNDING_FE", "-DTPI_TNY", # src/tpi/type_tpi_tnycthrd.h - # Compile in thead-safe mode (required since we use TPI_TNYC). Note, - # one does not technically need to add this, as SCIP code always - # uses syntax like "#ifndef NPARASCIP". But let's be explicit here. - "-DPARASCIP", "-Isrc", "-Isrc/scip", ] + PLATFORM_FLAGS, - defines = [ - # Scip v800 optionally depends on scip/config.h and - # scip/scip_export.h that are generated by build system. - # - # We need every library and binary that depends on SCIP libraries to - # define this macro. That is why we use `defines' here instead of - # `copts' or `local_defines'. - "NO_CONFIG_HEADER", - ], + defines = [], features = ["-parse_headers"], includes = [ "src", From 7bd7a29e519c104f84137df61fc50656e344c814 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 16:53:12 +0100 Subject: [PATCH 024/392] linear_solver: remove dead code --- .../users_allowing_model_storage.cc | 78 ------------------- .../users_allowing_model_storage.h | 28 ------- 2 files changed, 106 deletions(-) delete mode 100644 ortools/linear_solver/users_allowing_model_storage.cc delete mode 100644 ortools/linear_solver/users_allowing_model_storage.h diff --git a/ortools/linear_solver/users_allowing_model_storage.cc b/ortools/linear_solver/users_allowing_model_storage.cc deleted file mode 100644 index 9ec377becb..0000000000 --- a/ortools/linear_solver/users_allowing_model_storage.cc +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ortools/linear_solver/users_allowing_model_storage.h" - -#include "absl/container/flat_hash_set.h" -#include "absl/strings/string_view.h" - -namespace operations_research { -const absl::flat_hash_set& UsersAllowingModelStorage() { - static const auto* const set = new absl::flat_hash_set{ - // Approved by default. - "operations-research", - - // Approved hmajaya@ on 2019/05/17 by e-mail. - "apex-eng", - - // Approved by jhuchette@ on 2024-02-29 by code review. - "apps-capacity-auxon", - "autocap-automation", - "autocap-solver-access", - - // Approved by mlubin@, dapplegate@, and bwydrowski@ on 2019/05/17 - // by e-mail. As of 2020/04/08, prod queries are sent by "muppet-packer". - "blokus-prod", - "blokus-planning", - "blokus-packer-dev", - "muppet-packer", - - // Approved by sjoakley@ on 2019/10/22 by e-mail. - "cloud-capacity", - "techinfra-capacity", - - // Approved by sgowal@ on 2019/05/17 by e-mail. - "deepmind-research", - - // Approved by yxz@ on 2019/05/17 by e-mail. As of 2020/04/09, many - // queries are sent by "logs-placement". - "logs-front-door", - "logs-front-door-unprivileged", - "logs-placement", - - // Approved by ansha@ on 2019/05/17 by e-mail. We add netarch-wand-* mdb - // groups explicitly, because as of 2019/10/22 our naive logic collects - // a model iff the mdb group listed here matches exactly the mdb group - // of the RPC sender (i.e., we do not check group transitive memberships, - // and here all netarch-wand-* groups belong to tetraligh-jobs). - "tetralight-jobs", - "netarch-wand-prod", - "netarch-wand-dev", - "netarch-wand-test", - - // Approved by haoxu@ on 2019/05/17 by e-mail. - // As of 2019/10/22, some models are sent by user xiaob@ (instead of - // raptical@), so we add the user explicitly to this allowlist. - "cluster-planning-urp-state-runner", - "cluster-planning-urp-compute", - "raptical", - "xiaob", - - // Approved by nharsha@ and mattard@ on 2019/05/17 by e-mail. - "resource-planning-optimization", - "resource-planning-optimization-eng-team", - "resource-portal-test", - }; - return *set; -} -} // namespace operations_research diff --git a/ortools/linear_solver/users_allowing_model_storage.h b/ortools/linear_solver/users_allowing_model_storage.h deleted file mode 100644 index 481c9b7310..0000000000 --- a/ortools/linear_solver/users_allowing_model_storage.h +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2010-2024 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef OR_TOOLS_LINEAR_SOLVER_USERS_ALLOWING_MODEL_STORAGE_H_ -#define OR_TOOLS_LINEAR_SOLVER_USERS_ALLOWING_MODEL_STORAGE_H_ - -#include "absl/container/flat_hash_set.h" -#include "absl/strings/string_view.h" - -namespace operations_research { -// List of *exact* MDB users who agreed that we store their MIP/LP/math -// (anonymized) models. -// IMPORTANT: The MDB user has to match exactly with an item in this list: we -// don't do ACL expansion, regexp matching or anything alike. -const absl::flat_hash_set& UsersAllowingModelStorage(); -} // namespace operations_research - -#endif // OR_TOOLS_LINEAR_SOLVER_USERS_ALLOWING_MODEL_STORAGE_H_ From 8f85d85be8c26c5861ec11a6907cbd503aaa25fe Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 25 Mar 2024 17:19:45 +0100 Subject: [PATCH 025/392] bazel: use scip TPI_NONE TPI_TNY won't work in JAVA wrapper --- bazel/scip-v900.patch | 2 +- bazel/scip.BUILD.bazel | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/bazel/scip-v900.patch b/bazel/scip-v900.patch index fb82f42a48..520e019eb9 100644 --- a/bazel/scip-v900.patch +++ b/bazel/scip-v900.patch @@ -64,7 +64,7 @@ index 0000000000..871fde8e55 +/* #undef SCIP_NO_SIGACTION */ +/* #undef SCIP_NO_STRTOK_R */ +/* #undef TPI_NONE */ -+#define TPI_TNY ++#define TPI_NONE +/* #undef TPI_OMP */ +#define SCIP_THREADSAFE +#define WITH_SCIPDEF diff --git a/bazel/scip.BUILD.bazel b/bazel/scip.BUILD.bazel index ff358ded2b..8271c09fb9 100644 --- a/bazel/scip.BUILD.bazel +++ b/bazel/scip.BUILD.bazel @@ -88,7 +88,8 @@ cc_library( ], ) + BLISS_FILE + [ "src/scip/exprinterpret_none.c", - "src/tpi/tpi_tnycthrd.c", + #"src/tpi/tpi_tnycthrd.c", + "src/tpi/tpi_none.c", ], hdrs = glob( [ @@ -104,7 +105,8 @@ cc_library( ]), copts = [ "$(STACK_FRAME_UNLIMITED)", # src/scip/reader_cnf.c - "-DTPI_TNY", # src/tpi/type_tpi_tnycthrd.h + #"-DTPI_TNY", # src/tpi/type_tpi_tnycthrd.h + "-DTPI_NONE", # src/tpi/type_tpi_none.h "-Isrc", "-Isrc/scip", ] + PLATFORM_FLAGS, From 42e3414d5f2747f155148132d63d92aa0ec7e1f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20Omn=C3=A8s?= <26088210+flomnes@users.noreply.github.com> Date: Mon, 25 Mar 2024 13:56:23 +0100 Subject: [PATCH 026/392] [CMAKE] Required version 3.18 -> 3.20 (#4155) ``` elseif(UNIX) cmake_path(RELATIVE_PATH CMAKE_INSTALL_FULL_LIBDIR BASE_DIRECTORY ${CMAKE_INSTALL_FULL_BINDIR} OUTPUT_VARIABLE libdir_relative_path) set_target_properties(solve PROPERTIES INSTALL_RPATH "$ORIGIN/${libdir_relative_path}") endif() ``` `cmake_path` was added in CMake 3.20, see https://cmake.org/cmake/help/latest/command/cmake_path.html --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9f254ba579..49a996ba4a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,7 +12,7 @@ # limitations under the License. # This file is just an orchestration -cmake_minimum_required(VERSION 3.18) +cmake_minimum_required(VERSION 3.20) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(utils) From 91fe48478d609deaace1dffc90aa6326cdba913b Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 26 Mar 2024 12:33:42 +0100 Subject: [PATCH 027/392] reformat --- ortools/init/python/init_test.py | 1 + ortools/sat/docs/channeling.md | 7 +++---- ortools/sat/samples/index_first_boolvar_true_sample_sat.py | 7 +++---- ortools/util/python/sorted_interval_list_test.py | 1 + 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ortools/init/python/init_test.py b/ortools/init/python/init_test.py index 62060a96a1..7eb2402a91 100755 --- a/ortools/init/python/init_test.py +++ b/ortools/init/python/init_test.py @@ -19,6 +19,7 @@ from ortools.init.python import init class InitTest(absltest.TestCase): + def test_logging(self): print("test_logging") init.CppBridge.init_logging("pywrapinit_test.py") diff --git a/ortools/sat/docs/channeling.md b/ortools/sat/docs/channeling.md index e29c647a8d..3b99d3e6f0 100644 --- a/ortools/sat/docs/channeling.md +++ b/ortools/sat/docs/channeling.md @@ -416,7 +416,7 @@ class VarArraySolutionPrinter(cp_model.CpSolverSolutionCallback): print(line) -def index_first_solution_true_sample_sat(): +def index_of_first_bool_at_true_sample_sat(): """Compute the index of the first Boolean variable set to true.""" # Model. @@ -446,15 +446,14 @@ def index_first_solution_true_sample_sat(): # Force the solver to follow the decision strategy exactly. solver.parameters.search_branching = cp_model.FIXED_SEARCH - # Enumerate all solutions. - solver.parameters.enumerate_all_solutions = True # Search and print out all solutions. + solver.parameters.enumerate_all_solutions = True solution_printer = VarArraySolutionPrinter(index, bool_vars) solver.solve(model, solution_printer) -index_first_solution_true_sample_sat() +index_of_first_bool_at_true_sample_sat() ``` This displays the following: diff --git a/ortools/sat/samples/index_first_boolvar_true_sample_sat.py b/ortools/sat/samples/index_first_boolvar_true_sample_sat.py index 7d70de0528..e93b776fbb 100644 --- a/ortools/sat/samples/index_first_boolvar_true_sample_sat.py +++ b/ortools/sat/samples/index_first_boolvar_true_sample_sat.py @@ -33,7 +33,7 @@ class VarArraySolutionPrinter(cp_model.CpSolverSolutionCallback): print(line) -def index_first_solution_true_sample_sat(): +def index_of_first_bool_at_true_sample_sat(): """Compute the index of the first Boolean variable set to true.""" # Model. @@ -63,12 +63,11 @@ def index_first_solution_true_sample_sat(): # Force the solver to follow the decision strategy exactly. solver.parameters.search_branching = cp_model.FIXED_SEARCH - # Enumerate all solutions. - solver.parameters.enumerate_all_solutions = True # Search and print out all solutions. + solver.parameters.enumerate_all_solutions = True solution_printer = VarArraySolutionPrinter(index, bool_vars) solver.solve(model, solution_printer) -index_first_solution_true_sample_sat() +index_of_first_bool_at_true_sample_sat() diff --git a/ortools/util/python/sorted_interval_list_test.py b/ortools/util/python/sorted_interval_list_test.py index 99f2887f61..b33eb7d878 100755 --- a/ortools/util/python/sorted_interval_list_test.py +++ b/ortools/util/python/sorted_interval_list_test.py @@ -19,6 +19,7 @@ from ortools.util.python import sorted_interval_list class SortedIntervalListTest(absltest.TestCase): + def testCtorAndGetter(self): bool_domain = sorted_interval_list.Domain(0, 1) self.assertEqual(2, bool_domain.size()) From ee2e20ce4d6833de2dac926692acb704b6cb1524 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 26 Mar 2024 12:33:58 +0100 Subject: [PATCH 028/392] reformat --- ortools/linear_solver/python/model_builder.py | 12 ++++-------- .../python/model_builder_helper_test.py | 1 + ortools/linear_solver/python/model_builder_test.py | 7 +++++++ ortools/linear_solver/python/pywraplp_test.py | 1 + 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/ortools/linear_solver/python/model_builder.py b/ortools/linear_solver/python/model_builder.py index 24e78f94a4..715506616c 100644 --- a/ortools/linear_solver/python/model_builder.py +++ b/ortools/linear_solver/python/model_builder.py @@ -883,12 +883,10 @@ class Model: return clone @typing.overload - def _get_linear_constraints(self, constraints: Optional[pd.Index]) -> pd.Index: - ... + def _get_linear_constraints(self, constraints: Optional[pd.Index]) -> pd.Index: ... @typing.overload - def _get_linear_constraints(self, constraints: pd.Series) -> pd.Series: - ... + def _get_linear_constraints(self, constraints: pd.Series) -> pd.Series: ... def _get_linear_constraints( self, constraints: Optional[_IndexOrSeries] = None @@ -898,12 +896,10 @@ class Model: return constraints @typing.overload - def _get_variables(self, variables: Optional[pd.Index]) -> pd.Index: - ... + def _get_variables(self, variables: Optional[pd.Index]) -> pd.Index: ... @typing.overload - def _get_variables(self, variables: pd.Series) -> pd.Series: - ... + def _get_variables(self, variables: pd.Series) -> pd.Series: ... def _get_variables( self, variables: Optional[_IndexOrSeries] = None diff --git a/ortools/linear_solver/python/model_builder_helper_test.py b/ortools/linear_solver/python/model_builder_helper_test.py index ab6ff00ea4..a2b143a392 100644 --- a/ortools/linear_solver/python/model_builder_helper_test.py +++ b/ortools/linear_solver/python/model_builder_helper_test.py @@ -27,6 +27,7 @@ from ortools.linear_solver.python import model_builder_helper class PywrapModelBuilderHelperTest(absltest.TestCase): + def test_export_model_proto_to_mps_string(self): model = model_builder_helper.ModelBuilderHelper() model.set_name("testmodel") diff --git a/ortools/linear_solver/python/model_builder_test.py b/ortools/linear_solver/python/model_builder_test.py index 32d1c2d640..fa070b6cbe 100644 --- a/ortools/linear_solver/python/model_builder_test.py +++ b/ortools/linear_solver/python/model_builder_test.py @@ -421,6 +421,7 @@ ENDATA class InternalHelperTest(absltest.TestCase): + def test_anonymous_variables(self): helper = mb.Model().helper index = helper.add_var() @@ -435,6 +436,7 @@ class InternalHelperTest(absltest.TestCase): class LinearBaseTest(parameterized.TestCase): + def setUp(self): super().setUp() simple_model = mb.Model() @@ -615,6 +617,7 @@ class LinearBaseTest(parameterized.TestCase): class LinearBaseErrorsTest(absltest.TestCase): + def test_unknown_linear_type(self): with self.assertRaisesRegex(TypeError, r"Unrecognized linear expression"): @@ -637,6 +640,7 @@ class LinearBaseErrorsTest(absltest.TestCase): class BoundedLinearBaseTest(parameterized.TestCase): + def setUp(self): super().setUp() simple_model = mb.Model() @@ -730,6 +734,7 @@ class BoundedLinearBaseTest(parameterized.TestCase): class BoundedLinearBaseErrorsTest(absltest.TestCase): + def test_bounded_linear_expression_as_bool(self): with self.assertRaisesRegex(NotImplementedError, "Boolean value"): model = mb.Model() @@ -738,6 +743,7 @@ class BoundedLinearBaseErrorsTest(absltest.TestCase): class ModelBuilderErrorsTest(absltest.TestCase): + def test_new_var_series_errors(self): with self.assertRaisesRegex(TypeError, r"Non-index object"): model = mb.Model() @@ -1566,6 +1572,7 @@ class ModelBuilderObjectiveTest(parameterized.TestCase): class ModelBuilderProtoTest(absltest.TestCase): + def test_export_to_proto(self): expected = linear_solver_pb2.MPModelProto() text_format.Parse( diff --git a/ortools/linear_solver/python/pywraplp_test.py b/ortools/linear_solver/python/pywraplp_test.py index a683d4be0c..e0633e390f 100644 --- a/ortools/linear_solver/python/pywraplp_test.py +++ b/ortools/linear_solver/python/pywraplp_test.py @@ -42,6 +42,7 @@ constraint { class PyWrapLp(unittest.TestCase): + def test_proto(self): input_proto = linear_solver_pb2.MPModelProto() text_format.Merge(TEXT_MODEL, input_proto) From 2d92deebea3fef717aa60d315fd536bdc390d28e Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 26 Mar 2024 12:34:16 +0100 Subject: [PATCH 029/392] tweaks --- ortools/gurobi/environment.cc | 1 + ortools/linear_solver/proto_solver/gurobi_proto_solver.cc | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ortools/gurobi/environment.cc b/ortools/gurobi/environment.cc index 5cc0217c48..aea8955f08 100644 --- a/ortools/gurobi/environment.cc +++ b/ortools/gurobi/environment.cc @@ -16,6 +16,7 @@ #include #include +#include "absl/flags/flag.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/match.h" diff --git a/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc b/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc index eb31bbcf7b..9f4cbf0734 100644 --- a/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc @@ -316,6 +316,9 @@ absl::StatusOr GurobiSolveProto( /*varnames=*/nullptr)); GRBenv* const model_env = GRBgetenv(gurobi_model); + RETURN_IF_GUROBI_ERROR( + GRBsetintparam(model_env, GRB_INT_PAR_OUTPUTFLAG, + request->enable_internal_solver_output())); if (request->has_solver_specific_parameters()) { const auto parameters_status = SetSolverSpecificParameters( request->solver_specific_parameters(), model_env); @@ -331,9 +334,6 @@ absl::StatusOr GurobiSolveProto( GRBsetdblparam(model_env, GRB_DBL_PAR_TIMELIMIT, request->solver_time_limit_seconds())); } - RETURN_IF_GUROBI_ERROR( - GRBsetintparam(model_env, GRB_INT_PAR_OUTPUTFLAG, - request->enable_internal_solver_output())); const int variable_size = model.variable_size(); bool has_integer_variables = false; From f2187ed41c6a81b1874399a748a556ac9f90bd8c Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 26 Mar 2024 12:34:22 +0100 Subject: [PATCH 030/392] reformat --- ortools/scheduling/python/rcpsp_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ortools/scheduling/python/rcpsp_test.py b/ortools/scheduling/python/rcpsp_test.py index 4b3bfa1396..b50aaa1ee4 100644 --- a/ortools/scheduling/python/rcpsp_test.py +++ b/ortools/scheduling/python/rcpsp_test.py @@ -23,6 +23,7 @@ FLAGS = flags.FLAGS class RcpspTest(absltest.TestCase): + def testParseAndAccess(self): parser = rcpsp.RcpspParser() data = "ortools/scheduling/testdata/j301_1.sm" From f3f9bc44a2b297935901c9f5e5d461e0a79b0bcd Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 26 Mar 2024 12:35:12 +0100 Subject: [PATCH 031/392] [CP-SAT] improve diffn and linear loading --- ortools/sat/cp_model_loader.cc | 54 +++---- ortools/sat/diffn.cc | 2 +- ortools/sat/integer.h | 2 +- ortools/sat/integer_expr.h | 280 +++++++++++---------------------- ortools/sat/precedences.h | 46 ++---- 5 files changed, 130 insertions(+), 254 deletions(-) diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index 09c9b13d7b..afeb1caa85 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -1384,39 +1384,27 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { } if (ct.linear().domain_size() == 2) { - int64_t lb = ct.linear().domain(0); - int64_t ub = ct.linear().domain(1); - if (min_sum >= lb) lb = std::numeric_limits::min(); - if (max_sum <= ub) ub = std::numeric_limits::max(); - - if (!HasEnforcementLiteral(ct)) { - if (all_booleans) { - // TODO(user): we should probably also implement an - // half-reified version of this constraint. - std::vector cst; - for (int i = 0; i < vars.size(); ++i) { - const int ref = ct.linear().vars(i); - cst.push_back({mapping->Literal(ref), coeffs[i]}); - } - m->Add(BooleanLinearConstraint(lb, ub, &cst)); - } else { - if (lb != std::numeric_limits::min()) { - m->Add(WeightedSumGreaterOrEqual(vars, coeffs, lb)); - } - if (ub != std::numeric_limits::max()) { - m->Add(WeightedSumLowerOrEqual(vars, coeffs, ub)); - } + const int64_t lb = ct.linear().domain(0); + const int64_t ub = ct.linear().domain(1); + const std::vector enforcement_literals = + mapping->Literals(ct.enforcement_literal()); + if (all_booleans && enforcement_literals.empty()) { + // TODO(user): we should probably also implement an + // half-reified version of this constraint. + std::vector cst; + for (int i = 0; i < vars.size(); ++i) { + const int ref = ct.linear().vars(i); + cst.push_back({mapping->Literal(ref), coeffs[i]}); } + m->GetOrCreate()->AddLinearConstraint( + /*use_lower_bound=*/(min_sum < lb), lb, + /*use_upper_bound=*/(max_sum > ub), ub, &cst); } else { - const std::vector enforcement_literals = - mapping->Literals(ct.enforcement_literal()); - if (lb != std::numeric_limits::min()) { - m->Add(ConditionalWeightedSumGreaterOrEqual(enforcement_literals, vars, - coeffs, lb)); + if (min_sum < lb) { + AddWeightedSumGreaterOrEqual(enforcement_literals, vars, coeffs, lb, m); } - if (ub != std::numeric_limits::max()) { - m->Add(ConditionalWeightedSumLowerOrEqual(enforcement_literals, vars, - coeffs, ub)); + if (max_sum > ub) { + AddWeightedSumLowerOrEqual(enforcement_literals, vars, coeffs, ub, m); } } return; @@ -1463,12 +1451,10 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { for_enumeration.push_back(subdomain_literal); if (min_sum < lb) { - m->Add(ConditionalWeightedSumGreaterOrEqual({subdomain_literal}, vars, - coeffs, lb)); + AddWeightedSumGreaterOrEqual({subdomain_literal}, vars, coeffs, lb, m); } if (max_sum > ub) { - m->Add(ConditionalWeightedSumLowerOrEqual({subdomain_literal}, vars, - coeffs, ub)); + AddWeightedSumLowerOrEqual({subdomain_literal}, vars, coeffs, ub, m); } } diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index 2ffc5028da..cd2d84b735 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -428,7 +428,7 @@ NonOverlappingRectanglesEnergyPropagator::FindConflict( .use_pairwise = true, .use_dff_f0 = true, .use_dff_f2 = true, - .brute_force_threshold = 6, + .brute_force_threshold = 7, .dff2_max_number_of_parameters_to_check = 100}); if (opp_result.GetResult() == OrthogonalPackingResult::Status::INFEASIBLE && (best_conflict.opp_result.GetResult() != diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index 42f0968268..b1856dc3ad 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -1912,7 +1912,7 @@ inline std::function Equality(IntegerVariable v, int64_t value) { // is the same as using different underlying variable for an integer literal and // its negation. inline std::function Implication( - const std::vector& enforcement_literals, IntegerLiteral i) { + absl::Span enforcement_literals, IntegerLiteral i) { return [=](Model* model) { IntegerTrail* integer_trail = model->GetOrCreate(); if (i.bound <= integer_trail->LowerBound(i.var)) { diff --git a/ortools/sat/integer_expr.h b/ortools/sat/integer_expr.h index 2c9e099dea..2c4289857a 100644 --- a/ortools/sat/integer_expr.h +++ b/ortools/sat/integer_expr.h @@ -417,65 +417,9 @@ template inline std::function WeightedSumLowerOrEqual( const std::vector& vars, const VectorInt& coefficients, int64_t upper_bound) { - // Special cases. - CHECK_GE(vars.size(), 1); - if (vars.size() == 1) { - const int64_t c = coefficients[0]; - CHECK_NE(c, 0); - if (c > 0) { - return LowerOrEqual( - vars[0], - FloorRatio(IntegerValue(upper_bound), IntegerValue(c)).value()); - } else { - return GreaterOrEqual( - vars[0], - CeilRatio(IntegerValue(-upper_bound), IntegerValue(-c)).value()); - } - } - return [=](Model* model) { - const SatParameters& params = *model->GetOrCreate(); - if (!params.new_linear_propagation()) { - if (vars.size() == 2 && (coefficients[0] == 1 || coefficients[0] == -1) && - (coefficients[1] == 1 || coefficients[1] == -1)) { - return Sum2LowerOrEqual( - coefficients[0] == 1 ? vars[0] : NegationOf(vars[0]), - coefficients[1] == 1 ? vars[1] : NegationOf(vars[1]), - upper_bound)(model); - } - if (vars.size() == 3 && (coefficients[0] == 1 || coefficients[0] == -1) && - (coefficients[1] == 1 || coefficients[1] == -1) && - (coefficients[2] == 1 || coefficients[2] == -1)) { - return Sum3LowerOrEqual( - coefficients[0] == 1 ? vars[0] : NegationOf(vars[0]), - coefficients[1] == 1 ? vars[1] : NegationOf(vars[1]), - coefficients[2] == 1 ? vars[2] : NegationOf(vars[2]), - upper_bound)(model); - } - } - - if (params.new_linear_propagation()) { - const bool ok = model->GetOrCreate()->AddConstraint( - {}, vars, - std::vector(coefficients.begin(), coefficients.end()), - IntegerValue(upper_bound)); - if (!ok) { - auto* sat_solver = model->GetOrCreate(); - if (sat_solver->CurrentDecisionLevel() == 0) { - sat_solver->NotifyThatModelIsUnsat(); - } else { - LOG(FATAL) << "We currently do not support adding conflicting " - "constraint at positive level."; - } - } - } else { - IntegerSumLE* constraint = new IntegerSumLE( - {}, vars, - std::vector(coefficients.begin(), coefficients.end()), - IntegerValue(upper_bound), model); - constraint->RegisterWith(model->GetOrCreate()); - model->TakeOwnership(constraint); - } + return AddWeightedSumLowerOrEqual({}, vars, coefficients, upper_bound, + model); }; } @@ -502,52 +446,60 @@ inline std::function FixedWeightedSum( } // enforcement_literals => sum <= upper_bound -template -inline std::function ConditionalWeightedSumLowerOrEqual( - const std::vector& enforcement_literals, - const std::vector& vars, const VectorInt& coefficients, - int64_t upper_bound) { - // Special cases. - CHECK_GE(vars.size(), 1); +inline void AddWeightedSumLowerOrEqual( + absl::Span enforcement_literals, + absl::Span vars, + absl::Span coefficients, int64_t upper_bound, Model* model) { + // Linear1. + DCHECK_GE(vars.size(), 1); if (vars.size() == 1) { - CHECK_NE(coefficients[0], 0); - if (coefficients[0] > 0) { - return Implication( - enforcement_literals, - IntegerLiteral::LowerOrEqual( - vars[0], FloorRatio(IntegerValue(upper_bound), - IntegerValue(coefficients[0])))); + DCHECK_NE(coefficients[0], 0); + IntegerVariable var = vars[0]; + IntegerValue coeff(coefficients[0]); + if (coeff < 0) { + var = NegationOf(var); + coeff = -coeff; + } + const IntegerValue rhs = FloorRatio(IntegerValue(upper_bound), coeff); + if (enforcement_literals.empty()) { + model->Add(LowerOrEqual(var, rhs.value())); } else { - return Implication( + model->Add(Implication(enforcement_literals, + IntegerLiteral::LowerOrEqual(var, rhs))); + } + return; + } + + // Detect precedences with 2 and 3 terms. + const SatParameters& params = *model->GetOrCreate(); + if (!params.new_linear_propagation()) { + if (vars.size() == 2 && (coefficients[0] == 1 || coefficients[0] == -1) && + (coefficients[1] == 1 || coefficients[1] == -1)) { + AddConditionalSum2LowerOrEqual( enforcement_literals, - IntegerLiteral::GreaterOrEqual( - vars[0], CeilRatio(IntegerValue(-upper_bound), - IntegerValue(-coefficients[0])))); + coefficients[0] == 1 ? vars[0] : NegationOf(vars[0]), + coefficients[1] == 1 ? vars[1] : NegationOf(vars[1]), upper_bound, + model); + return; + } + if (vars.size() == 3 && (coefficients[0] == 1 || coefficients[0] == -1) && + (coefficients[1] == 1 || coefficients[1] == -1) && + (coefficients[2] == 1 || coefficients[2] == -1)) { + AddConditionalSum3LowerOrEqual( + enforcement_literals, + coefficients[0] == 1 ? vars[0] : NegationOf(vars[0]), + coefficients[1] == 1 ? vars[1] : NegationOf(vars[1]), + coefficients[2] == 1 ? vars[2] : NegationOf(vars[2]), upper_bound, + model); + return; } } - return [=](Model* model) { - const SatParameters& params = *model->GetOrCreate(); - if (!params.new_linear_propagation()) { - if (vars.size() == 2 && (coefficients[0] == 1 || coefficients[0] == -1) && - (coefficients[1] == 1 || coefficients[1] == -1)) { - return ConditionalSum2LowerOrEqual( - coefficients[0] == 1 ? vars[0] : NegationOf(vars[0]), - coefficients[1] == 1 ? vars[1] : NegationOf(vars[1]), upper_bound, - enforcement_literals)(model); - } - if (vars.size() == 3 && (coefficients[0] == 1 || coefficients[0] == -1) && - (coefficients[1] == 1 || coefficients[1] == -1) && - (coefficients[2] == 1 || coefficients[2] == -1)) { - return ConditionalSum3LowerOrEqual( - coefficients[0] == 1 ? vars[0] : NegationOf(vars[0]), - coefficients[1] == 1 ? vars[1] : NegationOf(vars[1]), - coefficients[2] == 1 ? vars[2] : NegationOf(vars[2]), upper_bound, - enforcement_literals)(model); - } - } - - // If value == min(expression), then we can avoid creating the sum. + // If value == min(expression), then we can avoid creating the sum. + // + // TODO(user): Deal with the case with no enforcement literal, in case the + // presolve was turned off? + if (!enforcement_literals.empty()) { IntegerValue expression_min(0); auto* integer_trail = model->GetOrCreate(); for (int i = 0; i < vars.size(); ++i) { @@ -581,69 +533,63 @@ inline std::function ConditionalWeightedSumLowerOrEqual( } model->Add(ClauseConstraint(clause)); } - } else { - if (params.new_linear_propagation()) { - const bool ok = model->GetOrCreate()->AddConstraint( - enforcement_literals, vars, - std::vector(coefficients.begin(), coefficients.end()), - IntegerValue(upper_bound)); - if (!ok) { - auto* sat_solver = model->GetOrCreate(); - if (sat_solver->CurrentDecisionLevel() == 0) { - sat_solver->NotifyThatModelIsUnsat(); - } else { - LOG(FATAL) << "We currently do not support adding conflicting " - "constraint at positive level."; - } - } + return; + } + } + + if (params.new_linear_propagation()) { + const bool ok = model->GetOrCreate()->AddConstraint( + enforcement_literals, vars, + std::vector(coefficients.begin(), coefficients.end()), + IntegerValue(upper_bound)); + if (!ok) { + auto* sat_solver = model->GetOrCreate(); + if (sat_solver->CurrentDecisionLevel() == 0) { + sat_solver->NotifyThatModelIsUnsat(); } else { - IntegerSumLE* constraint = new IntegerSumLE( - enforcement_literals, vars, - std::vector(coefficients.begin(), coefficients.end()), - IntegerValue(upper_bound), model); - constraint->RegisterWith(model->GetOrCreate()); - model->TakeOwnership(constraint); + LOG(FATAL) << "We currently do not support adding conflicting " + "constraint at positive level."; } } - }; + } else { + IntegerSumLE* constraint = new IntegerSumLE( + enforcement_literals, vars, + std::vector(coefficients.begin(), coefficients.end()), + IntegerValue(upper_bound), model); + constraint->RegisterWith(model->GetOrCreate()); + model->TakeOwnership(constraint); + } } // enforcement_literals => sum >= lower_bound -template -inline std::function ConditionalWeightedSumGreaterOrEqual( - const std::vector& enforcement_literals, - const std::vector& vars, const VectorInt& coefficients, - int64_t lower_bound) { +inline void AddWeightedSumGreaterOrEqual( + absl::Span enforcement_literals, + absl::Span vars, + absl::Span coefficients, int64_t lower_bound, Model* model) { // We just negate everything and use an <= constraint. std::vector negated_coeffs(coefficients.begin(), coefficients.end()); for (int64_t& ref : negated_coeffs) ref = -ref; - return ConditionalWeightedSumLowerOrEqual(enforcement_literals, vars, - negated_coeffs, -lower_bound); + AddWeightedSumLowerOrEqual(enforcement_literals, vars, negated_coeffs, + -lower_bound, model); } -// Weighted sum <= constant reified. -template -inline std::function WeightedSumLowerOrEqualReif( - Literal is_le, const std::vector& vars, - const VectorInt& coefficients, int64_t upper_bound) { +// TODO(user): Delete once Telamon use new function. +inline std::function ConditionalWeightedSumLowerOrEqual( + const std::vector& enforcement_literals, + const std::vector& vars, + const std::vector& coefficients, int64_t upper_bound) { return [=](Model* model) { - model->Add(ConditionalWeightedSumLowerOrEqual({is_le}, vars, coefficients, - upper_bound)); - model->Add(ConditionalWeightedSumGreaterOrEqual( - {is_le.Negated()}, vars, coefficients, upper_bound + 1)); + AddWeightedSumLowerOrEqual(enforcement_literals, vars, coefficients, + upper_bound, model); }; } - -// Weighted sum >= constant reified. -template -inline std::function WeightedSumGreaterOrEqualReif( - Literal is_ge, const std::vector& vars, - const VectorInt& coefficients, int64_t lower_bound) { +inline std::function ConditionalWeightedSumGreaterOrEqual( + const std::vector& enforcement_literals, + const std::vector& vars, + const std::vector& coefficients, int64_t upper_bound) { return [=](Model* model) { - model->Add(ConditionalWeightedSumGreaterOrEqual({is_ge}, vars, coefficients, - lower_bound)); - model->Add(ConditionalWeightedSumLowerOrEqual( - {is_ge.Negated()}, vars, coefficients, lower_bound - 1)); + AddWeightedSumGreaterOrEqual(enforcement_literals, vars, coefficients, + upper_bound, model); }; } @@ -702,12 +648,12 @@ inline void LoadConditionalLinearConstraint( } if (cst.ub < kMaxIntegerValue) { - model->Add(ConditionalWeightedSumLowerOrEqual( - converted_literals, vars, converted_coeffs, cst.ub.value())); + AddWeightedSumLowerOrEqual(converted_literals, vars, converted_coeffs, + cst.ub.value(), model); } if (cst.lb > kMinIntegerValue) { - model->Add(ConditionalWeightedSumGreaterOrEqual( - converted_literals, vars, converted_coeffs, cst.lb.value())); + AddWeightedSumGreaterOrEqual(converted_literals, vars, converted_coeffs, + cst.lb.value(), model); } } @@ -720,40 +666,6 @@ inline void AddConditionalAffinePrecedence( LoadConditionalLinearConstraint(enforcement_literals, builder.Build(), model); } -// Weighted sum == constant reified. -// TODO(user): Simplify if the constant is at the edge of the possible values. -template -inline std::function FixedWeightedSumReif( - Literal is_eq, const std::vector& vars, - const VectorInt& coefficients, int64_t value) { - return [=](Model* model) { - // We creates two extra Boolean variables in this case. The alternative is - // to code a custom propagator for the direction equality => reified. - const Literal is_le = Literal(model->Add(NewBooleanVariable()), true); - const Literal is_ge = Literal(model->Add(NewBooleanVariable()), true); - model->Add(ReifiedBoolAnd({is_le, is_ge}, is_eq)); - model->Add(WeightedSumLowerOrEqualReif(is_le, vars, coefficients, value)); - model->Add(WeightedSumGreaterOrEqualReif(is_ge, vars, coefficients, value)); - }; -} - -// Weighted sum != constant. -// TODO(user): Simplify if the constant is at the edge of the possible values. -template -inline std::function WeightedSumNotEqual( - const std::vector& vars, const VectorInt& coefficients, - int64_t value) { - return [=](Model* model) { - // Exactly one of these alternative must be true. - const Literal is_lt = Literal(model->Add(NewBooleanVariable()), true); - const Literal is_gt = is_lt.Negated(); - model->Add(ConditionalWeightedSumLowerOrEqual(is_lt, vars, coefficients, - value - 1)); - model->Add(ConditionalWeightedSumGreaterOrEqual(is_gt, vars, coefficients, - value + 1)); - }; -} - // Model-based function to create an IntegerVariable that corresponds to the // given weighted sum of other IntegerVariables. // diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 41245d5395..b021d55154 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -554,44 +554,22 @@ inline std::function AffineCoeffOneLowerOrEqualWithOffset( }; } -// a + b <= ub. -inline std::function Sum2LowerOrEqual(IntegerVariable a, - IntegerVariable b, - int64_t ub) { - return LowerOrEqualWithOffset(a, NegationOf(b), -ub); -} - // l => (a + b <= ub). -inline std::function ConditionalSum2LowerOrEqual( - IntegerVariable a, IntegerVariable b, int64_t ub, - const std::vector& enforcement_literals) { - return [=](Model* model) { - PrecedencesPropagator* p = model->GetOrCreate(); - p->AddPrecedenceWithAllOptions(a, NegationOf(b), IntegerValue(-ub), - kNoIntegerVariable, enforcement_literals); - }; -} - -// a + b + c <= ub. -inline std::function Sum3LowerOrEqual(IntegerVariable a, - IntegerVariable b, - IntegerVariable c, - int64_t ub) { - return [=](Model* model) { - PrecedencesPropagator* p = model->GetOrCreate(); - p->AddPrecedenceWithAllOptions(a, NegationOf(c), IntegerValue(-ub), b, {}); - }; +inline void AddConditionalSum2LowerOrEqual( + absl::Span enforcement_literals, IntegerVariable a, + IntegerVariable b, int64_t ub, Model* model) { + PrecedencesPropagator* p = model->GetOrCreate(); + p->AddPrecedenceWithAllOptions(a, NegationOf(b), IntegerValue(-ub), + kNoIntegerVariable, enforcement_literals); } // l => (a + b + c <= ub). -inline std::function ConditionalSum3LowerOrEqual( - IntegerVariable a, IntegerVariable b, IntegerVariable c, int64_t ub, - const std::vector& enforcement_literals) { - return [=](Model* model) { - PrecedencesPropagator* p = model->GetOrCreate(); - p->AddPrecedenceWithAllOptions(a, NegationOf(c), IntegerValue(-ub), b, - enforcement_literals); - }; +inline void AddConditionalSum3LowerOrEqual( + absl::Span enforcement_literals, IntegerVariable a, + IntegerVariable b, IntegerVariable c, int64_t ub, Model* model) { + PrecedencesPropagator* p = model->GetOrCreate(); + p->AddPrecedenceWithAllOptions(a, NegationOf(c), IntegerValue(-ub), b, + enforcement_literals); } // a >= b. From a1ed8e3e85871c0a9d687058e2867c4ca3cfeb10 Mon Sep 17 00:00:00 2001 From: dongjinlong Date: Tue, 26 Mar 2024 22:56:14 +0800 Subject: [PATCH 032/392] chore: remove repetitive words in comments Signed-off-by: dongjinlong --- examples/contrib/costas_array.cs | 2 +- examples/contrib/costas_array.py | 2 +- examples/contrib/debruijn_binary.py | 2 +- examples/contrib/kenken2.cs | 2 +- examples/contrib/lectures.cs | 2 +- examples/contrib/lectures.py | 2 +- examples/contrib/pandigital_numbers.py | 2 +- examples/contrib/set_covering_deployment.py | 2 +- examples/notebook/contrib/costas_array.ipynb | 2 +- examples/notebook/contrib/debruijn_binary.ipynb | 2 +- examples/notebook/contrib/lectures.ipynb | 2 +- examples/notebook/contrib/pandigital_numbers.ipynb | 2 +- examples/notebook/contrib/set_covering_deployment.ipynb | 2 +- examples/python/README.md | 2 +- makefiles/Makefile.java.mk | 2 +- 15 files changed, 15 insertions(+), 15 deletions(-) diff --git a/examples/contrib/costas_array.cs b/examples/contrib/costas_array.cs index 97bcfe9807..15a3d85e82 100644 --- a/examples/contrib/costas_array.cs +++ b/examples/contrib/costas_array.cs @@ -62,7 +62,7 @@ public class CostasArray // Fix the values in the lower triangle in the // difference matrix to -n+1. This removes variants - // of the difference matrix for the the same Costas array. + // of the difference matrix for the same Costas array. for (int i = 0; i < n; i++) { for (int j = 0; j <= i; j++) diff --git a/examples/contrib/costas_array.py b/examples/contrib/costas_array.py index a83970571c..b6a2d38380 100644 --- a/examples/contrib/costas_array.py +++ b/examples/contrib/costas_array.py @@ -91,7 +91,7 @@ def main(n=6): # Fix the values in the lower triangle in the # difference matrix to -n+1. This removes variants - # of the difference matrix for the the same Costas array. + # of the difference matrix for the same Costas array. for i in range(n): for j in range(i + 1): solver.Add(differences[i, j] == -n + 1) diff --git a/examples/contrib/debruijn_binary.py b/examples/contrib/debruijn_binary.py index 7fdb00d190..c90fc3d0c4 100644 --- a/examples/contrib/debruijn_binary.py +++ b/examples/contrib/debruijn_binary.py @@ -21,7 +21,7 @@ base**n. - Compare with the the web based programs: + Compare with the web based programs: http://www.hakank.org/comb/debruijn.cgi http://www.hakank.org/comb/debruijn_arb.cgi diff --git a/examples/contrib/kenken2.cs b/examples/contrib/kenken2.cs index bd1bfe18d8..9420eb6772 100644 --- a/examples/contrib/kenken2.cs +++ b/examples/contrib/kenken2.cs @@ -48,7 +48,7 @@ public class KenKen2 else { // For length > 2 then res is either the sum - // the the product of the segment + // the product of the segment // sum the numbers int len = cc.Length / 2; diff --git a/examples/contrib/lectures.cs b/examples/contrib/lectures.cs index f6eef390b8..5056a89d2c 100644 --- a/examples/contrib/lectures.cs +++ b/examples/contrib/lectures.cs @@ -27,7 +27,7 @@ public class Lectures * Biggs: Discrete Mathematics (2nd ed), page 187. * """ * Suppose we wish to schedule six one-hour lectures, v1, v2, v3, v4, v5, v6. - * Among the the potential audience there are people who wish to hear both + * Among the potential audience there are people who wish to hear both * * - v1 and v2 * - v1 and v4 diff --git a/examples/contrib/lectures.py b/examples/contrib/lectures.py index ee53ffb80b..cfa75d7bce 100644 --- a/examples/contrib/lectures.py +++ b/examples/contrib/lectures.py @@ -18,7 +18,7 @@ Biggs: Discrete Mathematics (2nd ed), page 187. ''' Suppose we wish to schedule six one-hour lectures, v1, v2, v3, v4, v5, v6. - Among the the potential audience there are people who wish to hear both + Among the potential audience there are people who wish to hear both - v1 and v2 - v1 and v4 diff --git a/examples/contrib/pandigital_numbers.py b/examples/contrib/pandigital_numbers.py index 5fb81f1a53..d180cf3d01 100644 --- a/examples/contrib/pandigital_numbers.py +++ b/examples/contrib/pandigital_numbers.py @@ -47,7 +47,7 @@ * Wikipedia http://en.wikipedia.org/wiki/Pandigital_number - Compare with the the following models: + Compare with the following models: * MiniZinc: http://www.hakank.org/minizinc/pandigital_numbers.mzn * Comet : http://www.hakank.org/comet/pandigital_numbers.co * ECLiPSe : http://www.hakank.org/eclipse/pandigital_numbers.ecl diff --git a/examples/contrib/set_covering_deployment.py b/examples/contrib/set_covering_deployment.py index 5ee5796dc7..92440fc4bd 100644 --- a/examples/contrib/set_covering_deployment.py +++ b/examples/contrib/set_covering_deployment.py @@ -26,7 +26,7 @@ army placements to secure the Roman Empire. ''' - Compare with the the following models: + Compare with the following models: * MiniZinc: http://www.hakank.org/minizinc/set_covering_deployment.mzn * Comet : http://www.hakank.org/comet/set_covering_deployment.co * Gecode : http://www.hakank.org/gecode/set_covering_deployment.cpp diff --git a/examples/notebook/contrib/costas_array.ipynb b/examples/notebook/contrib/costas_array.ipynb index 6d0551af58..71ee63633d 100644 --- a/examples/notebook/contrib/costas_array.ipynb +++ b/examples/notebook/contrib/costas_array.ipynb @@ -160,7 +160,7 @@ "\n", " # Fix the values in the lower triangle in the\n", " # difference matrix to -n+1. This removes variants\n", - " # of the difference matrix for the the same Costas array.\n", + " # of the difference matrix for the same Costas array.\n", " for i in range(n):\n", " for j in range(i + 1):\n", " solver.Add(differences[i, j] == -n + 1)\n", diff --git a/examples/notebook/contrib/debruijn_binary.ipynb b/examples/notebook/contrib/debruijn_binary.ipynb index 00f1120fb8..55b7012b6a 100644 --- a/examples/notebook/contrib/debruijn_binary.ipynb +++ b/examples/notebook/contrib/debruijn_binary.ipynb @@ -82,7 +82,7 @@ " base**n.\n", "\n", "\n", - " Compare with the the web based programs:\n", + " Compare with the web based programs:\n", " http://www.hakank.org/comb/debruijn.cgi\n", " http://www.hakank.org/comb/debruijn_arb.cgi\n", "\n", diff --git a/examples/notebook/contrib/lectures.ipynb b/examples/notebook/contrib/lectures.ipynb index 0635b926f0..a17cfbb0c0 100644 --- a/examples/notebook/contrib/lectures.ipynb +++ b/examples/notebook/contrib/lectures.ipynb @@ -79,7 +79,7 @@ " Biggs: Discrete Mathematics (2nd ed), page 187.\n", " '''\n", " Suppose we wish to schedule six one-hour lectures, v1, v2, v3, v4, v5, v6.\n", - " Among the the potential audience there are people who wish to hear both\n", + " Among the potential audience there are people who wish to hear both\n", "\n", " - v1 and v2\n", " - v1 and v4\n", diff --git a/examples/notebook/contrib/pandigital_numbers.ipynb b/examples/notebook/contrib/pandigital_numbers.ipynb index 5252767074..3a2391e8cd 100644 --- a/examples/notebook/contrib/pandigital_numbers.ipynb +++ b/examples/notebook/contrib/pandigital_numbers.ipynb @@ -108,7 +108,7 @@ " * Wikipedia http://en.wikipedia.org/wiki/Pandigital_number\n", "\n", "\n", - " Compare with the the following models:\n", + " Compare with the following models:\n", " * MiniZinc: http://www.hakank.org/minizinc/pandigital_numbers.mzn\n", " * Comet : http://www.hakank.org/comet/pandigital_numbers.co\n", " * ECLiPSe : http://www.hakank.org/eclipse/pandigital_numbers.ecl\n", diff --git a/examples/notebook/contrib/set_covering_deployment.ipynb b/examples/notebook/contrib/set_covering_deployment.ipynb index 89a308d8e0..779bc0b556 100644 --- a/examples/notebook/contrib/set_covering_deployment.ipynb +++ b/examples/notebook/contrib/set_covering_deployment.ipynb @@ -87,7 +87,7 @@ " army placements to secure the Roman Empire.\n", " '''\n", "\n", - " Compare with the the following models:\n", + " Compare with the following models:\n", " * MiniZinc: http://www.hakank.org/minizinc/set_covering_deployment.mzn\n", " * Comet : http://www.hakank.org/comet/set_covering_deployment.co\n", " * Gecode : http://www.hakank.org/gecode/set_covering_deployment.cpp\n", diff --git a/examples/python/README.md b/examples/python/README.md index 8acef41e29..6973b81922 100644 --- a/examples/python/README.md +++ b/examples/python/README.md @@ -8,7 +8,7 @@ as this allows you to keep up-to-date with the latest Python frameworks. Wherever you have `ortools` package installed, be sure to import it from your python file. # Execution -For running the examples you can use the the following command: +For running the examples you can use the following command: ```shell python3 -m pip install --upgrade --user ortools python3 .py diff --git a/makefiles/Makefile.java.mk b/makefiles/Makefile.java.mk index 04a8df77ee..0184b12ae0 100644 --- a/makefiles/Makefile.java.mk +++ b/makefiles/Makefile.java.mk @@ -40,7 +40,7 @@ java: @echo JAR_BIN = $(JAR_BIN) @echo JAVA_BIN = $(JAVA_BIN) @echo MVN_BIN = $(MVN_BIN) - $(warning Either JAVA support was turned off, or the the makefile cannot\ + $(warning Either JAVA support was turned off, or the makefile cannot\ find 'java' or 'maven' command which is needed for build. \ Please make sure it is installed and in system path. \ Or turn java support ON.) From 5828741c75bf0f2aa7c3d80df5827e93eecee7ac Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 29 Mar 2024 15:05:05 +0100 Subject: [PATCH 033/392] [CP-SAT] improve no_overlap_2d propagation; improve linear propagator + precedences + disjunctive connection; change the way maximization is implemented in python --- ortools/sat/2d_packing_brute_force.cc | 250 +++++++++++++++++--------- ortools/sat/BUILD.bazel | 6 +- ortools/sat/disjunctive.cc | 193 ++++++++++++++------ ortools/sat/disjunctive.h | 11 +- ortools/sat/integer_expr.h | 4 + ortools/sat/intervals.cc | 34 ++-- ortools/sat/java/BUILD.bazel | 4 +- ortools/sat/linear_propagation.cc | 38 +++- ortools/sat/linear_propagation.h | 21 +-- ortools/sat/precedences.cc | 229 +++++++++++++++-------- ortools/sat/precedences.h | 158 +++++++++------- ortools/sat/python/cp_model.py | 12 +- ortools/sat/theta_tree.h | 2 +- 13 files changed, 635 insertions(+), 327 deletions(-) diff --git a/ortools/sat/2d_packing_brute_force.cc b/ortools/sat/2d_packing_brute_force.cc index 542fc2a5e4..3e92d3a6dc 100644 --- a/ortools/sat/2d_packing_brute_force.cc +++ b/ortools/sat/2d_packing_brute_force.cc @@ -14,19 +14,24 @@ #include "ortools/sat/2d_packing_brute_force.h" #include +#include #include #include #include #include "absl/container/inlined_vector.h" +#include "absl/log/check.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" +#include "ortools/util/bitset.h" namespace operations_research { namespace sat { +static constexpr int kMaxProblemSize = 16; + namespace { enum class RectangleRelationship { @@ -36,40 +41,29 @@ enum class RectangleRelationship { OVERLAP, }; -// TODO(user): write faster and less hacky implementation RectangleRelationship GetRectangleRelationship(const Rectangle& rectangle, const Rectangle& other) { - if (!rectangle.IsDisjoint(other)) { + if (rectangle.x_min < other.x_max && other.x_min < rectangle.x_max && + rectangle.y_min < other.y_max && other.y_min < rectangle.y_max) { return RectangleRelationship::OVERLAP; } - const Rectangle item_position_left = {.x_min = rectangle.x_min - 1, - .x_max = rectangle.x_max - 1, - .y_min = rectangle.y_min, - .y_max = rectangle.y_max}; - const Rectangle item_position_bottom = {.x_min = rectangle.x_min, - .x_max = rectangle.x_max, - .y_min = rectangle.y_min - 1, - .y_max = rectangle.y_max - 1}; - if (!item_position_left.IsDisjoint(other)) { + + if (rectangle.x_min == other.x_max && rectangle.y_min < other.y_max && + other.y_min < rectangle.y_max) { return RectangleRelationship::TOUCHING_LEFT; } - if (!item_position_bottom.IsDisjoint(other)) { + if (rectangle.x_min < other.x_max && other.x_min < rectangle.x_max && + rectangle.y_min == other.y_max) { return RectangleRelationship::TOUCHING_BOTTOM; } return RectangleRelationship::TOUCHING_NEITHER_LEFT_OR_BOTTOM; } bool ShouldPlaceItemAtPosition( - int i, IntegerValue x, IntegerValue y, - absl::Span sizes_x, - absl::Span sizes_y, + int i, const Rectangle& item_position, std::pair bounding_box_size, - absl::InlinedVector& item_positions, - absl::InlinedVector& placed_item_indexes) { - const int num_items = sizes_x.size(); - const Rectangle item_position = { - .x_min = x, .x_max = x + sizes_x[i], .y_min = y, .y_max = y + sizes_y[i]}; - + absl::Span item_positions, + const Bitset64& placed_item_indexes) { // Check if it fits in the BB. if (item_position.x_max > bounding_box_size.first || item_position.y_max > bounding_box_size.second) { @@ -77,36 +71,68 @@ bool ShouldPlaceItemAtPosition( } // Break symmetry: force 0th item to be in the bottom left quarter. - if (i == 0 && - (2 * item_position.x_min > bounding_box_size.first - sizes_x[i] || - 2 * item_position.y_min > bounding_box_size.second - sizes_y[i])) { + if (i == 0 && (2 * item_position.x_min > + bounding_box_size.first - item_position.SizeX() || + 2 * item_position.y_min > + bounding_box_size.second - item_position.SizeY())) { return false; } // Check if it is conflicting with another item. - bool is_conflicting_left = x == 0; - bool is_conflicting_bottom = y == 0; - for (int j = 0; j < num_items; ++j) { - if (i != j && placed_item_indexes[j]) { - const RectangleRelationship pos = - GetRectangleRelationship(item_position, item_positions[j]); - if (pos == RectangleRelationship::OVERLAP) { - return false; - } - is_conflicting_left = - is_conflicting_left || pos == RectangleRelationship::TOUCHING_LEFT; - is_conflicting_bottom = is_conflicting_bottom || - pos == RectangleRelationship::TOUCHING_BOTTOM; + bool touches_something_on_left = item_position.x_min == 0; + bool touches_something_on_bottom = item_position.y_min == 0; + for (const int j : placed_item_indexes) { + DCHECK_NE(i, j); + const RectangleRelationship pos = + GetRectangleRelationship(item_position, item_positions[j]); + if (pos == RectangleRelationship::OVERLAP) { + return false; } + touches_something_on_left = touches_something_on_left || + pos == RectangleRelationship::TOUCHING_LEFT; + touches_something_on_bottom = touches_something_on_bottom || + pos == RectangleRelationship::TOUCHING_BOTTOM; } // Finally, check if it touching something both on the bottom and to the left. - if (!is_conflicting_left || !is_conflicting_bottom) { + if (!touches_something_on_left || !touches_something_on_bottom) { return false; } return true; } +struct PotentialPositionForItem { + IntegerValue x; + IntegerValue y; + bool already_explored; + + Rectangle GetRectangle(IntegerValue x_size, IntegerValue y_size) const { + return {.x_min = x, .x_max = x + x_size, .y_min = y, .y_max = y + y_size}; + } +}; + +// This implementation search for a solution in the following order: +// - first place the 0-th item in the bottom left corner; +// - then place the 1-th item either on the bottom of the bounding box to the +// right of the 0-th item, or on the left of the bounding box on top of it; +// - keep placing items, while respecting that each item should touch something +// on both its bottom and left sides until either all items are placed (in +// this case a solution is found and return) or we found an item that cannot +// be placed on any possible solution. +// - if an item cannot be placed, backtrack: try to place the last successfully +// placed item in another position. +// +// This is a recursive implementation, each call will place the first non placed +// item in a fixed order. Backtrack occur when we return from a recursive call. +// +// This return false iff it is infeasible to place the other items given the +// already placed ones. +// +// This implementation is very similar to the "Left-Most Active Only" method +// described in Clautiaux, François, Jacques Carlier, and Aziz Moukrim. "A new +// exact method for the two-dimensional orthogonal packing problem." European +// Journal of Operational Research 183.3 (2007): 1196-1211. +// // TODO(user): try the graph-based algorithm by S. Fekete, J. Shepers, and // J. Van Der Ween, https://arxiv.org/abs/cs/0604045. bool BruteForceOrthogonalPackingImpl( @@ -114,21 +140,22 @@ bool BruteForceOrthogonalPackingImpl( absl::Span sizes_y, std::pair bounding_box_size, IntegerValue smallest_x, IntegerValue smallest_y, - absl::InlinedVector& item_positions, - absl::InlinedVector& placed_item_indexes, - const absl::InlinedVector< - absl::InlinedVector, 16>, 16>& - potential_item_positions) { + absl::Span item_positions, Bitset64& placed_item_indexes, + absl::Span> + potential_item_positions, + IntegerValue slack) { const auto add_position_if_valid = [&item_positions, bounding_box_size, &sizes_x, &sizes_y, &placed_item_indexes]( - absl::InlinedVector, 16>& - positions, - int i, IntegerValue x, IntegerValue y) { - if (ShouldPlaceItemAtPosition(i, x, y, sizes_x, sizes_y, - bounding_box_size, item_positions, - placed_item_indexes)) { - positions.push_back({x, y}); + absl::InlinedVector& positions, int i, + IntegerValue x, IntegerValue y) { + const Rectangle rect = {.x_min = x, + .x_max = x + sizes_x[i], + .y_min = y, + .y_max = y + sizes_y[i]}; + if (ShouldPlaceItemAtPosition(i, rect, bounding_box_size, + item_positions, placed_item_indexes)) { + positions.push_back({x, y, false}); } }; @@ -143,16 +170,34 @@ bool BruteForceOrthogonalPackingImpl( } has_unplaced_item = true; - placed_item_indexes[i] = true; - for (std::pair potential_position : + placed_item_indexes.Set(i); + for (const PotentialPositionForItem& potential_position : potential_item_positions[i]) { + if (potential_position.already_explored) { + continue; + } // Place the item on its candidate position. - item_positions[i] = {.x_min = potential_position.first, - .x_max = potential_position.first + sizes_x[i], - .y_min = potential_position.second, - .y_max = potential_position.second + sizes_y[i]}; + item_positions[i] = + potential_position.GetRectangle(sizes_x[i], sizes_y[i]); const Rectangle& item_position = item_positions[i]; + IntegerValue slack_loss = 0; + if (bounding_box_size.first - item_position.x_max < smallest_x) { + // After placing this item, nothing will fit between it and the top of + // the bounding box. Thus we have some space that will remain empty and + // we can deduce it from our budget. + slack_loss += item_position.SizeY() * + (bounding_box_size.first - item_position.x_max); + } + if (bounding_box_size.second - item_position.y_max < smallest_y) { + // Same as above but with the right edge. + slack_loss += item_position.SizeX() * + (bounding_box_size.second - item_position.y_max); + } + if (slack < slack_loss) { + continue; + } + // Now the hard part of the algorithm: create the new "potential // positions" vector after placing this item. Describing the actual set of // acceptable places to put consider for the next item in the search would @@ -177,7 +222,12 @@ bool BruteForceOrthogonalPackingImpl( // |####|OOOOOOOOO|......|x | // +----+---------+------+------+ // - // To make things simpler, we just consider: + // We consider that every item must be touching something (other item or + // the box boundaries) to the left and to the bottom. Thus, when we add a + // new item, it is enough to consider at all positions where it would + // touch the new item on the bottom and something else on the left or + // touch the new item on the left and something else on the bottom. So we + // consider the following points: // - all previous positions if they didn't got invalid due to the new // item; // - new position are derived getting the right-top most corner of the @@ -206,23 +256,28 @@ bool BruteForceOrthogonalPackingImpl( // +----+---------+------+------+ // // This method finds potential locations that are not useful for any item, - // but we will detect that by testing each item one by one. - absl::InlinedVector< - absl::InlinedVector, 16>, 16> - new_potential_positions(num_items); - for (int k = 0; k < num_items; ++k) { - if (k == i || !placed_item_indexes[k]) { + // (like the point in the left boundary in the example above) but we will + // detect that by testing each item one by one. Importantly, we only pass + // valid positions down to the next search level. + std::array, + kMaxProblemSize> + new_potential_positions_storage; + absl::Span> + new_potential_positions(new_potential_positions_storage.data(), + num_items); + for (const int k : placed_item_indexes) { + if (k == i) { continue; } - bool add_below = + const bool add_below = // We only add points below this one... - item_positions[k].y_max <= item_position.y_min && + item_positions[k].y_max <= item_position.y_max && // ...and where we can fit at least the smallest element. item_position.x_max + smallest_x <= bounding_box_size.first && item_positions[k].y_max + smallest_y <= bounding_box_size.second; - bool add_left = - item_positions[k].x_max <= item_position.x_min && + const bool add_left = + item_positions[k].x_max <= item_position.x_max && item_positions[k].x_max + smallest_x <= bounding_box_size.first && item_position.y_max + smallest_y <= bounding_box_size.second; for (int j = 0; j < num_items; ++j) { @@ -246,20 +301,26 @@ bool BruteForceOrthogonalPackingImpl( continue; } // First copy previously valid positions that remain valid. - for (const std::pair& original_position : + for (const PotentialPositionForItem& original_position : potential_item_positions[j]) { - const Rectangle item_in_pos = { - .x_min = original_position.first, - .x_max = original_position.first + sizes_x[j], - .y_min = original_position.second, - .y_max = original_position.second + sizes_y[j]}; - - if (!item_in_pos.IsDisjoint(item_position)) { + if (!original_position.GetRectangle(sizes_x[j], sizes_y[j]) + .IsDisjoint(item_position)) { // That was a valid position for item j, but now it is in conflict // with newly added item i. continue; } - new_potential_positions[j].push_back(original_position); + if (j < i) { + // We already explored all items of index less than i in all their + // current possible positions and they are all unfeasible. We still + // keep track of whether it fit there or not, since having any item + // that don't fit anywhere is a good stopping criteria. But we don't + // have to retest those positions down in the search tree. + PotentialPositionForItem position = original_position; + position.already_explored = true; + new_potential_positions[j].push_back(position); + } else { + new_potential_positions[j].push_back(original_position); + } } add_position_if_valid(new_potential_positions[j], j, item_positions[i].x_max, 0); @@ -277,13 +338,14 @@ bool BruteForceOrthogonalPackingImpl( } if (BruteForceOrthogonalPackingImpl( sizes_x, sizes_y, bounding_box_size, smallest_x, smallest_y, - item_positions, placed_item_indexes, new_potential_positions)) { + item_positions, placed_item_indexes, new_potential_positions, + slack - slack_loss)) { return true; } } // Placing this item at the current bottom-left positions level failed. // Restore placed_item_indexes to its original value and try another one. - placed_item_indexes[i] = false; + placed_item_indexes.Set(i, false); } return !has_unplaced_item; } @@ -297,32 +359,44 @@ std::vector BruteForceOrthogonalPacking( IntegerValue smallest_x = std::numeric_limits::max(); IntegerValue smallest_y = std::numeric_limits::max(); int num_items = sizes_x.size(); - absl::InlinedVector item_index_sorted_by_area_desc(num_items); - absl::InlinedVector< - absl::InlinedVector, 16>, 16> - potential_item_positions(num_items); + CHECK_LE(num_items, kMaxProblemSize); + std::vector item_index_sorted_by_area_desc(num_items); + std::array, kMaxProblemSize> + potential_item_positions_storage; + absl::Span> + potential_item_positions(potential_item_positions_storage.data(), + num_items); for (int i = 0; i < num_items; ++i) { smallest_x = std::min(smallest_x, sizes_x[i]); smallest_y = std::min(smallest_y, sizes_y[i]); item_index_sorted_by_area_desc[i] = i; - potential_item_positions[i].push_back({0, 0}); + potential_item_positions[i].push_back({0, 0, false}); } std::sort(item_index_sorted_by_area_desc.begin(), item_index_sorted_by_area_desc.end(), [sizes_x, sizes_y](int a, int b) { return sizes_x[a] * sizes_y[a] > sizes_x[b] * sizes_y[b]; }); - absl::InlinedVector new_sizes_x(num_items); - absl::InlinedVector new_sizes_y(num_items); + std::array new_sizes_x_storage, + new_sizes_y_storage; + absl::Span new_sizes_x(new_sizes_x_storage.data(), num_items); + absl::Span new_sizes_y(new_sizes_y_storage.data(), num_items); + IntegerValue slack = bounding_box_size.first * bounding_box_size.second; for (int i = 0; i < num_items; ++i) { new_sizes_x[i] = sizes_x[item_index_sorted_by_area_desc[i]]; new_sizes_y[i] = sizes_y[item_index_sorted_by_area_desc[i]]; + slack -= sizes_x[i] * sizes_y[i]; } - absl::InlinedVector item_positions(num_items); - absl::InlinedVector placed_item_indexes(num_items); + if (slack < 0) { + return {}; + } + std::array item_positions_storage; + absl::Span item_positions(item_positions_storage.data(), + num_items); + Bitset64 placed_item_indexes(num_items); const bool found_solution = BruteForceOrthogonalPackingImpl( new_sizes_x, new_sizes_y, bounding_box_size, smallest_x, smallest_y, - item_positions, placed_item_indexes, potential_item_positions); + item_positions, placed_item_indexes, potential_item_positions, slack); if (!found_solution) { return {}; } diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 4c35261e21..48c412b7cd 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -13,9 +13,9 @@ # Home of CP/SAT solver (which includes SAT, max-SAT and PB problems). -load("@rules_proto//proto:defs.bzl", "proto_library") -load("@rules_java//java:defs.bzl", "java_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@rules_java//java:defs.bzl", "java_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) @@ -1139,6 +1139,7 @@ cc_library( deps = [ ":integer", ":model", + ":precedences", ":sat_base", ":sat_solver", ":synchronization", @@ -2267,6 +2268,7 @@ cc_library( ":integer", ":integer_search", ":model", + ":restart", ":sat_base", ":sat_parameters_cc_proto", ":sat_solver", diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index 07c34c86ed..d1c1509a63 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -15,7 +15,6 @@ #include #include -#include #include #include #include @@ -26,13 +25,11 @@ #include "ortools/base/logging.h" #include "ortools/sat/all_different.h" #include "ortools/sat/integer.h" -#include "ortools/sat/integer_expr.h" #include "ortools/sat/intervals.h" #include "ortools/sat/model.h" #include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_parameters.pb.h" -#include "ortools/sat/sat_solver.h" #include "ortools/sat/theta_tree.h" #include "ortools/sat/timetable.h" #include "ortools/util/sort.h" @@ -923,6 +920,9 @@ DisjunctivePrecedences::~DisjunctivePrecedences() { bool DisjunctivePrecedences::Propagate() { if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) return false; window_.clear(); + + // We only need to consider "critical" set of tasks given how we compute the + // min-offset in PropagateSubwindow(). IntegerValue window_end = kMinIntegerValue; for (const TaskTime task_time : helper_->TaskByIncreasingShiftedStartMin()) { const int task = task_time.task_index; @@ -969,66 +969,151 @@ bool DisjunctivePrecedences::PropagateSubwindow() { index_to_end_vars_.push_back(end_exp.var); } window_.resize(new_size); - precedences_->ComputePrecedences(index_to_end_vars_, &before_); + + // Because we use the cached value in the window, we don't really care + // on which order we process them. + precedences_->ComputePrecedences(index_to_end_vars_, &before_, + /*sort_by_var_lb=*/false); const int size = before_.size(); - for (int i = 0; i < size;) { - const IntegerVariable var = before_[i].var; + for (int global_i = 0; global_i < size;) { + const int global_start_i = global_i; + const IntegerVariable var = before_[global_i].var; DCHECK_NE(var, kNoIntegerVariable); - task_set_.Clear(); - const int initial_i = i; - IntegerValue min_offset = kMaxIntegerValue; - for (; i < size && before_[i].var == var; ++i) { - // Because we resized the window, the index is valid. - const TaskTime task_time = window_[before_[i].index]; - - // We have var >= end_exp.var + offset, so - // var >= (end_exp.var + end_exp.constant) + (offset - end_exp.constant) - // var >= task end + new_offset. - const AffineExpression& end_exp = helper_->Ends()[task_time.task_index]; - min_offset = std::min(min_offset, before_[i].offset - end_exp.constant); - - // The task are actually in sorted order, so we do not need to call - // task_set_.Sort(). This property is DCHECKed. - task_set_.AddUnsortedEntry({task_time.task_index, task_time.time, - helper_->SizeMin(task_time.task_index)}); + // Decode the set of task before var. + // Note that like in Propagate() we split this set of task into critical + // subpart as there is no point considering them together. + // + // TODO(user): we should probably change the api to return a Span. + // + // TODO(user): If more than one set of task push the same variable, we + // probabaly only want to keep the best push? Maybe we want to process them + // in reverse order of what we do here? + // + // TODO(user): Currently we don't really use the inner_offsets_ except for + // checking that our hash-maps are up to date. The idea is to get rid of + // them for a faster and maybe more dynamic ComputePrecedences(). + indices_before_.clear(); + inner_offsets_.clear(); + IntegerValue local_start; + IntegerValue local_end; + for (; global_i < size; ++global_i) { + const PrecedencesPropagator::IntegerPrecedences& data = before_[global_i]; + if (data.var != var) break; + const int index = data.index; + const auto [t, start_of_t] = window_[index]; + if (global_i == global_start_i) { + local_start = start_of_t; + local_end = local_start + helper_->SizeMin(t); + } else { + if (start_of_t >= local_end) break; + local_end += helper_->SizeMin(t); + } + indices_before_.push_back(index); + inner_offsets_.push_back(data.offset); } - DCHECK_GE(task_set_.SortedTasks().size(), 2); - // TODO(user): Only use the min_offset of the critical task? Or maybe do a - // more general computation to find by how much we can push var? - const IntegerValue new_lb = task_set_.ComputeEndMin() + min_offset; - if (new_lb > integer_trail_->LowerBound(var)) { - const std::vector& sorted_tasks = task_set_.SortedTasks(); + // No need to consider if we don't have at least two tasks before var. + const int num_before = indices_before_.size(); + if (num_before < 2) continue; + skip_.assign(num_before, false); + + // Heuristic. + // We will use the current end-min of all the task in indices_before_ + // to skip task with an offset not large enough. + const IntegerValue best_end_min = local_end; + + // We will consider the end-min of all the subsets [i, num_items) to try to + // push var using the min-offset between var and items of such subset. This + // can be done in linear time by scanning from i = num_items - 1 to 0. + // + // Note that this needs the items in indices_before_ to be sorted by + // their shifted start min (it should be the case). + int best_index = -1; + IntegerValue best_new_lb = kMinIntegerValue; + IntegerValue min_offset = kMaxIntegerValue; + IntegerValue sum_of_duration = 0; + const IntegerValue current_var_lb = integer_trail_->LowerBound(var); + for (int i = num_before; --i >= 0;) { + const TaskTime task_time = window_[indices_before_[i]]; + const AffineExpression& end_exp = helper_->Ends()[task_time.task_index]; + + // Heuristic: do not consider this relations if its offset is clearly bad. + // If we want to get rid of inner_offsets_[], we will have to only do it + // below after the somewhat costly hash lookup to find the offset. + const IntegerValue known_offset = inner_offsets_[i] - end_exp.constant; + if (best_end_min + known_offset <= current_var_lb) { + skip_[i] = true; + continue; + } + + // TODO(user): The hash lookup here is a bit slow. + const IntegerValue inner_offset = + precedence_relations_->GetConditionalOffset(end_exp.var, var); + + // TODO(user): This happens for relations true at level zero, maybe we + // should deal with them differently. + if (inner_offset == kMinIntegerValue) { + skip_[i] = true; + continue; + } + + // TODO(user): The code should work in all cases, but this DCHECK still + // fail rarely in multithread. I think this happens for linear of size 3 + // with some fixed variable that get converted in the precedence + // propagator to size 2. + DCHECK_GE(inner_offset, inner_offsets_[i]); + + // We have var >= end_exp.var + inner_offset, so + // var >= (end_exp.var + end_exp.constant) + // + (inner_offset - end_exp.constant) + // var >= task end + offset. + const IntegerValue offset = inner_offset - end_exp.constant; + + // Heuristic: do not consider this relations if its offset is clearly bad. + // Same as what is done above with inner_offsets_[i]. + if (best_end_min + offset <= current_var_lb) { + skip_[i] = true; + continue; + } + + // Add this task to the current subset and compute the new bound. + min_offset = std::min(min_offset, offset); + sum_of_duration += helper_->SizeMin(task_time.task_index); + const IntegerValue start = task_time.time; + const IntegerValue new_lb = start + sum_of_duration + min_offset; + + if (new_lb > best_new_lb) { + best_new_lb = new_lb; + best_index = i; + } + } + + // Push? + if (best_new_lb > current_var_lb) { + DCHECK_NE(best_index, -1); helper_->ClearReason(); - - // Fill task_to_arc_index_ since we need it for the reason. - // Note that we do not care about the initial content of this vector. - for (int j = initial_i; j < i; ++j) { - const int task = window_[before_[j].index].task_index; - task_to_arc_index_[task] = before_[j].arc_index; - } - - const int critical_index = task_set_.GetCriticalIndex(); - const IntegerValue window_start = sorted_tasks[critical_index].start_min; - for (int i = critical_index; i < sorted_tasks.size(); ++i) { - const int ct = sorted_tasks[i].task; + const IntegerValue window_start = + window_[indices_before_[best_index]].time; + for (int i = best_index; i < num_before; ++i) { + if (skip_[i]) continue; + const int ct = window_[indices_before_[i]].task_index; helper_->AddPresenceReason(ct); - helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min, - window_start); + helper_->AddEnergyAfterReason(ct, helper_->SizeMin(ct), window_start); + // Fetch the explanation. + // This is okay if a bit slow since we only do that when we push. const AffineExpression& end_exp = helper_->Ends()[ct]; - precedences_->AddPrecedenceReason( - task_to_arc_index_[ct], min_offset + end_exp.constant, - helper_->MutableLiteralReason(), helper_->MutableIntegerReason()); + for (const Literal l : + precedence_relations_->GetConditionalEnforcements(end_exp.var, + var)) { + helper_->MutableLiteralReason()->push_back(l.Negated()); + } } - - // TODO(user): If var is actually a start-min of an interval, we - // could push the end-min and check the interval consistency right away. ++num_propagations_; if (!helper_->PushIntegerLiteral( - IntegerLiteral::GreaterOrEqual(var, new_lb))) { + IntegerLiteral::GreaterOrEqual(var, best_new_lb))) { return false; } } @@ -1219,6 +1304,14 @@ bool DisjunctiveNotLast::PropagateSubwindow() { // Add the reason for t, we only need the start-max. helper_->AddStartMaxReason(t, end_min_of_critical_tasks - 1); + // If largest_ct_start_max == kMinIntegerValue, we have a conflict. To + // avoid integer overflow, we report it directly. This might happen + // because the task is known to be after all the other, and thus it cannot + // be "not last". + if (largest_ct_start_max == kMinIntegerValue) { + return helper_->ReportConflict(); + } + // Enqueue the new end-max for t. // Note that changing it will not influence the rest of the loop. if (!helper_->DecreaseEndMax(t, largest_ct_start_max)) return false; diff --git a/ortools/sat/disjunctive.h b/ortools/sat/disjunctive.h index 7d9c6b53ab..a103f45a78 100644 --- a/ortools/sat/disjunctive.h +++ b/ortools/sat/disjunctive.h @@ -267,9 +267,8 @@ class DisjunctivePrecedences : public PropagatorInterface { helper_(helper), integer_trail_(model->GetOrCreate()), precedences_(model->GetOrCreate()), - shared_stats_(model->GetOrCreate()), - task_set_(helper->NumTasks()), - task_to_arc_index_(helper->NumTasks()) {} + precedence_relations_(model->GetOrCreate()), + shared_stats_(model->GetOrCreate()) {} ~DisjunctivePrecedences() override; bool Propagate() final; @@ -282,6 +281,7 @@ class DisjunctivePrecedences : public PropagatorInterface { SchedulingConstraintHelper* helper_; IntegerTrail* integer_trail_; PrecedencesPropagator* precedences_; + PrecedenceRelations* precedence_relations_; SharedStatistics* shared_stats_; int64_t num_propagations_ = 0; @@ -289,8 +289,9 @@ class DisjunctivePrecedences : public PropagatorInterface { std::vector window_; std::vector index_to_end_vars_; - TaskSet task_set_; - std::vector task_to_arc_index_; + std::vector indices_before_; + std::vector inner_offsets_; + std::vector skip_; std::vector before_; }; diff --git a/ortools/sat/integer_expr.h b/ortools/sat/integer_expr.h index 2c4289857a..7c7bda5961 100644 --- a/ortools/sat/integer_expr.h +++ b/ortools/sat/integer_expr.h @@ -155,6 +155,10 @@ class LinearConstraintPropagator : public PropagatorInterface { using IntegerSumLE = LinearConstraintPropagator; using IntegerSumLE128 = LinearConstraintPropagator; +// Explicit instantiations in integer_expr.cc. +extern template class LinearConstraintPropagator; +extern template class LinearConstraintPropagator; + // This assumes target = SUM_i coeffs[i] * vars[i], and detects that the target // must be of the form (a*X + b). // diff --git a/ortools/sat/intervals.cc b/ortools/sat/intervals.cc index ee1a1e771c..b01fff7707 100644 --- a/ortools/sat/intervals.cc +++ b/ortools/sat/intervals.cc @@ -499,24 +499,24 @@ IntegerValue SchedulingConstraintHelper::GetCurrentMinDistanceBetweenTasks( int a, int b, bool add_reason_if_after) { const AffineExpression before = ends_[a]; const AffineExpression after = starts_[b]; - if (before.var == kNoIntegerVariable) return kMinIntegerValue; - if (after.var == kNoIntegerVariable) return kMinIntegerValue; - - const IntegerValue needed = before.constant - after.constant; - const IntegerValue static_known = - precedence_relations_->GetOffset(before.var, after.var); - - const std::pair dynamic_known = - precedences_->GetConditionalOffset(before.var, after.var); - - const IntegerValue best = std::max(static_known, dynamic_known.second); - if (best == kMinIntegerValue) return kMinIntegerValue; - - if (add_reason_if_after && dynamic_known.second > static_known && - dynamic_known.second >= needed) { - literal_reason_.push_back(dynamic_known.first.Negated()); + if (before.var == kNoIntegerVariable || before.coeff != 1 || + after.var == kNoIntegerVariable || after.coeff != 1) { + return kMinIntegerValue; } - return best - needed; + + const IntegerValue offset = + precedence_relations_->GetConditionalOffset(before.var, after.var); + if (offset == kMinIntegerValue) return kMinIntegerValue; + + const IntegerValue needed_offset = before.constant - after.constant; + const IntegerValue distance = offset - needed_offset; + if (add_reason_if_after && distance >= 0) { + for (const Literal l : precedence_relations_->GetConditionalEnforcements( + before.var, after.var)) { + literal_reason_.push_back(l.Negated()); + } + } + return distance; } void SchedulingConstraintHelper::AddLevelZeroPrecedence(int a, int b) { diff --git a/ortools/sat/java/BUILD.bazel b/ortools/sat/java/BUILD.bazel index 927330a46e..4945fad8dc 100644 --- a/ortools/sat/java/BUILD.bazel +++ b/ortools/sat/java/BUILD.bazel @@ -13,9 +13,9 @@ # Description: java wrapping of the code in ../ -load("//bazel:swig_java.bzl", "ortools_java_wrap_cc") -load("@rules_jvm_external//:defs.bzl", "artifact") load("@contrib_rules_jvm//java:defs.bzl", "java_junit5_test") +load("@rules_jvm_external//:defs.bzl", "artifact") +load("//bazel:swig_java.bzl", "ortools_java_wrap_cc") ortools_java_wrap_cc( name = "sat", diff --git a/ortools/sat/linear_propagation.cc b/ortools/sat/linear_propagation.cc index 6d1e9a5151..1eb757544a 100644 --- a/ortools/sat/linear_propagation.cc +++ b/ortools/sat/linear_propagation.cc @@ -35,6 +35,7 @@ #include "ortools/base/strong_vector.h" #include "ortools/sat/integer.h" #include "ortools/sat/model.h" +#include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_solver.h" #include "ortools/sat/synchronization.h" @@ -230,7 +231,7 @@ void EnforcementPropagator::Untrail(const Trail& /*trail*/, int trail_index) { for (int i = size - 1; i >= rev_stack_size_; --i) { const auto [id, status] = untrail_stack_[i]; statuses_[id] = status; - if (callbacks_[id] != nullptr) callbacks_[id](status); + if (callbacks_[id] != nullptr) callbacks_[id](id, status); } untrail_stack_.resize(rev_stack_size_); propagation_trail_index_ = trail_index; @@ -243,7 +244,7 @@ void EnforcementPropagator::Untrail(const Trail& /*trail*/, int trail_index) { // constraint is never enforced, and should be ignored. EnforcementId EnforcementPropagator::Register( absl::Span enforcement, - std::function callback) { + std::function callback) { int num_true = 0; int num_false = 0; bool is_always_false = false; @@ -271,11 +272,13 @@ EnforcementId EnforcementPropagator::Register( // Return special indices if never/always enforced. if (is_always_false) { - if (callback != nullptr) callback(EnforcementStatus::IS_FALSE); + if (callback != nullptr) + callback(EnforcementId(-1), EnforcementStatus::IS_FALSE); return EnforcementId(-1); } if (temp_literals_.empty()) { - if (callback != nullptr) callback(EnforcementStatus::IS_ENFORCED); + if (callback != nullptr) + callback(EnforcementId(-1), EnforcementStatus::IS_ENFORCED); return EnforcementId(-1); } @@ -331,7 +334,7 @@ EnforcementId EnforcementPropagator::Register( // Because this is the default status, we still need to call the callback. if (temp_literals_.size() == 1) { if (callbacks_[id] != nullptr) { - callbacks_[id](EnforcementStatus::CAN_PROPAGATE); + callbacks_[id](id, EnforcementStatus::CAN_PROPAGATE); } } } @@ -445,7 +448,7 @@ void EnforcementPropagator::ChangeStatus(EnforcementId id, untrail_stack_.push_back({id, old_status}); } statuses_[id] = new_status; - if (callbacks_[id] != nullptr) callbacks_[id](new_status); + if (callbacks_[id] != nullptr) callbacks_[id](id, new_status); } EnforcementStatus EnforcementPropagator::DebugStatus(EnforcementId id) { @@ -473,6 +476,7 @@ LinearPropagator::LinearPropagator(Model* model) rev_int_repository_(model->GetOrCreate()), rev_integer_value_repository_( model->GetOrCreate()), + precedences_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), watcher_id_(watcher_->Register(this)) { // Note that we need this class always in sync. @@ -660,7 +664,8 @@ bool LinearPropagator::AddConstraint( infos_.back().enf_status = static_cast(EnforcementStatus::CANNOT_PROPAGATE); infos_.back().enf_id = enforcement_propagator_->Register( - enforcement_literals, [this, id](EnforcementStatus status) { + enforcement_literals, + [this, id](EnforcementId enf_id, EnforcementStatus status) { infos_[id].enf_status = static_cast(status); // TODO(user): With some care, when we cannot propagate or the // constraint is not enforced, we could leave in_queue_[] at true but @@ -670,8 +675,27 @@ bool LinearPropagator::AddConstraint( AddToQueueIfNeeded(id); watcher_->CallOnNextPropagate(watcher_id_); } + + // When a conditional precedence becomes enforced, add it. Note that + // we cannot just use rev_size == 2 since we might miss some + // explanation if a longer constraint only have 2 non-fixed variable + // now.. It is however okay not to push precedence involving a fixed + // variable, since these should be reflected in the variable domain + // anyway. + if (status == EnforcementStatus::IS_ENFORCED) { + const auto info = infos_[id]; + if (info.initial_size == 2 && info.rev_size == 2 && + info.all_coeffs_are_one) { + const auto vars = GetVariables(info); + precedences_->PushConditionalRelation( + enforcement_propagator_->GetEnforcementLiterals(enf_id), + vars[0], vars[1], info.rev_rhs); + } + } }); } else { + // TODO(user): Shall we register root level precedence from here rather than + // separately? AddToQueueIfNeeded(id); infos_.back().enf_id = -1; infos_.back().enf_status = static_cast(EnforcementStatus::IS_ENFORCED); diff --git a/ortools/sat/linear_propagation.h b/ortools/sat/linear_propagation.h index 72757ab68d..8fccd247c9 100644 --- a/ortools/sat/linear_propagation.h +++ b/ortools/sat/linear_propagation.h @@ -30,6 +30,7 @@ #include "ortools/base/strong_vector.h" #include "ortools/sat/integer.h" #include "ortools/sat/model.h" +#include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_solver.h" #include "ortools/sat/synchronization.h" @@ -114,7 +115,7 @@ class EnforcementPropagator : SatPropagator { // first call to callback() should be necessary, we don't save it. EnforcementId Register( absl::Span enforcement, - std::function callback = nullptr); + std::function callback = nullptr); // Add the enforcement reason to the given vector. void AddEnforcementReason(EnforcementId id, @@ -132,6 +133,12 @@ class EnforcementPropagator : SatPropagator { // This should only used in DCHECK(). EnforcementStatus DebugStatus(EnforcementId id); + // Returns the enforcement literals of the given id. + absl::Span GetEnforcementLiterals(EnforcementId id) const { + if (id < 0) return {}; + return GetSpan(id); + } + private: absl::Span GetSpan(EnforcementId id); absl::Span GetSpan(EnforcementId id) const; @@ -154,7 +161,8 @@ class EnforcementPropagator : SatPropagator { std::vector buffer_; absl::StrongVector statuses_; - absl::StrongVector> + absl::StrongVector> callbacks_; // Used to restore status and call callback on untrail. @@ -241,6 +249,7 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { TimeLimit* time_limit_; RevIntRepository* rev_int_repository_; RevIntegerValueRepository* rev_integer_value_repository_; + PrecedenceRelations* precedences_; SharedStatistics* shared_stats_ = nullptr; const int watcher_id_; @@ -306,14 +315,6 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { std::vector id_propagated_something_; std::vector tmp_delayed_; - // Staging queue. - // Initially, we add the constraint to the priority queue, and we extract - // them one by one, each time reaching the propagation fixed point. - std::vector pq_was_added_; - bool pq_in_heap_form_ = false; - std::vector pq_; - std::vector pq_to_clean_; - // Stats. Allow to track the time a constraint is scanned more than once. // This is only used in --v 1. SparseBitset id_scanned_at_least_once_; diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 50e01cf5ad..25ab93b86b 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -61,8 +61,8 @@ void PrecedenceRelations::Add(IntegerVariable tail, IntegerVariable head, // TODO(user): if tail = Negation(head) also update Domain. if (tail == head) return; - AddToHashTable(tail, head, offset); - AddToHashTable(NegationOf(head), NegationOf(tail), offset); + // Add to root_relations_. + AddInternal(tail, head, offset); // If we are not built, make sure there is enough room in the graph. // TODO(user): Alternatively, force caller to do a Resize(). @@ -73,6 +73,105 @@ void PrecedenceRelations::Add(IntegerVariable tail, IntegerVariable head, } } +void PrecedenceRelations::PushConditionalRelation( + absl::Span enforcements, IntegerVariable a, + IntegerVariable b, IntegerValue rhs) { + if (enforcements.empty()) { + Add(a, NegationOf(b), -rhs); + return; + } + + // This must be currently true. + if (DEBUG_MODE) { + for (const Literal l : enforcements) { + CHECK(trail_->Assignment().LiteralIsTrue(l)); + } + } + + const int new_index = conditional_stack_.size(); + const auto key = GetKey(a, b); + const auto [it, inserted] = conditional_relations_.insert({key, new_index}); + if (inserted) { + CreateLevelEntryIfNeeded(); + conditional_stack_.emplace_back(/*prev_entry=*/-1, rhs, key, enforcements); + } else { + const int prev_entry = it->second; + if (conditional_stack_[prev_entry].rhs <= rhs) return; // ignore. + + // Update. + it->second = new_index; + CreateLevelEntryIfNeeded(); + conditional_stack_.emplace_back(prev_entry, rhs, key, enforcements); + } +} + +void PrecedenceRelations::CreateLevelEntryIfNeeded() { + const int current = trail_->CurrentDecisionLevel(); + if (!level_to_stack_size_.empty() && + level_to_stack_size_.back().first == current) + return; + level_to_stack_size_.push_back({current, conditional_stack_.size()}); +} + +// We only pop what is needed. +void PrecedenceRelations::SetLevel(int level) { + while (!level_to_stack_size_.empty() && + level_to_stack_size_.back().first > level) { + const int target = level_to_stack_size_.back().second; + CHECK_GE(conditional_stack_.size(), target); + while (conditional_stack_.size() > target) { + const ConditionalEntry& back = conditional_stack_.back(); + if (back.prev_entry != -1) { + conditional_relations_[back.key] = back.prev_entry; + } else { + conditional_relations_.erase(back.key); + } + conditional_stack_.pop_back(); + } + level_to_stack_size_.pop_back(); + } +} + +IntegerValue PrecedenceRelations::GetOffset(IntegerVariable a, + IntegerVariable b) const { + const auto it = root_relations_.find(GetKey(a, NegationOf(b))); + if (it != root_relations_.end()) { + return -it->second; + } + return kMinIntegerValue; +} + +absl::Span PrecedenceRelations::GetConditionalEnforcements( + IntegerVariable a, IntegerVariable b) const { + const auto it = conditional_relations_.find(GetKey(a, NegationOf(b))); + if (it == conditional_relations_.end()) return {}; + + const ConditionalEntry& entry = conditional_stack_[it->second]; + if (DEBUG_MODE) { + for (const Literal l : entry.enforcements) { + CHECK(trail_->Assignment().LiteralIsTrue(l)); + } + } + const IntegerValue root_level_offset = GetOffset(a, b); + const IntegerValue conditional_offset = -entry.rhs; + if (conditional_offset <= root_level_offset) return {}; + + return entry.enforcements; +} + +// TODO(user): optimize this with a single direct hash lookup? +IntegerValue PrecedenceRelations::GetConditionalOffset( + IntegerVariable a, IntegerVariable b) const { + const IntegerValue root_level_offset = GetOffset(a, b); + const auto it = conditional_relations_.find(GetKey(a, NegationOf(b))); + if (it != conditional_relations_.end()) { + const ConditionalEntry& entry = conditional_stack_[it->second]; + const IntegerValue conditional_offset = -entry.rhs; + return std::max(conditional_offset, root_level_offset); + } + return root_level_offset; +} + void PrecedenceRelations::Build() { if (is_built_) return; is_built_ = true; @@ -85,18 +184,33 @@ void PrecedenceRelations::Build() { // And use this to compute the "closure". // Note that the non-determinism of the arcs order shouldn't matter. CHECK(arc_offsets_.empty()); - graph_.ReserveArcs(all_relations_.size()); - for (const auto [var_pair, offset] : all_relations_) { + graph_.ReserveArcs(2 * root_relations_.size()); + for (const auto [var_pair, negated_offset] : root_relations_) { // TODO(user): Support negative offset? // // Note that if we only have >= 0 ones, if we do have a cycle, we could // make sure all variales are the same, and otherwise, we have a DAG or a // conflict. + const IntegerValue offset = -negated_offset; if (offset < 0) continue; - graph_.AddArc(var_pair.first.value(), var_pair.second.value()); - arc_offsets_.push_back(offset); - CHECK_LT(var_pair.second, before.size()); - before[var_pair.second].push_back(var_pair.first); + + // We have two arcs. + { + const IntegerVariable tail = var_pair.first; + const IntegerVariable head = NegationOf(var_pair.second); + graph_.AddArc(tail.value(), head.value()); + arc_offsets_.push_back(offset); + CHECK_LT(var_pair.second, before.size()); + before[head].push_back(tail); + } + { + const IntegerVariable tail = var_pair.second; + const IntegerVariable head = NegationOf(var_pair.first); + graph_.AddArc(tail.value(), head.value()); + arc_offsets_.push_back(offset); + CHECK_LT(var_pair.second, before.size()); + before[head].push_back(tail); + } } std::vector permutation; @@ -134,16 +248,6 @@ void PrecedenceRelations::Build() { int work = 0; const int kWorkLimit = 1e6; - const auto add = [&before, this](IntegerVariable a, IntegerVariable b, - IntegerValue offset) { - const auto [it, inserted] = all_relations_.insert({{a, b}, offset}); - if (inserted) { - before[b].push_back(a); - } else { - it->second = std::max(it->second, offset); - } - }; - for (const IntegerVariable tail_var : topological_order_) { if (++work > kWorkLimit) break; for (const int arc : graph_.OutgoingArcs(tail_var.value())) { @@ -152,19 +256,24 @@ void PrecedenceRelations::Build() { const IntegerValue arc_offset = arc_offsets_[arc]; if (++work > kWorkLimit) break; - add(tail_var, head_var, arc_offset); + if (AddInternal(tail_var, head_var, arc_offset)) { + before[head_var].push_back(tail_var); + } for (const IntegerVariable before_var : before[tail_var]) { if (++work > kWorkLimit) break; const IntegerValue offset = - all_relations_.at({before_var, tail_var}) + arc_offset; - add(before_var, head_var, offset); + -root_relations_.at(GetKey(before_var, NegationOf(tail_var))) + + arc_offset; + if (AddInternal(before_var, head_var, offset)) { + before[head_var].push_back(before_var); + } } } } VLOG(2) << "Full precedences. Work=" << work - << " Relations=" << all_relations_.size(); + << " Relations=" << root_relations_.size(); } void PrecedenceRelations::ComputeFullPrecedences( @@ -297,7 +406,7 @@ bool PrecedencesPropagator::Propagate() { literal_to_new_impacted_arcs_[literal.Index()]) { if (--arc_counts_[arc_index] == 0) { const ArcInfo& arc = arcs_[arc_index]; - AddToConditionalRelations(arc); + PushConditionalRelations(arc); impacted_arcs_[arc.tail_var].push_back(arc_index); } } @@ -353,34 +462,15 @@ bool PrecedencesPropagator::PropagateOutgoingArcs(IntegerVariable var) { return true; } -// TODO(user): Add as fixed precedence if we fix at level zero. -void PrecedencesPropagator::AddToConditionalRelations(const ArcInfo& arc) { - if (arc.presence_literals.size() != 1) return; - +// TODO(user): Remove literal fixed at level zero from there. +void PrecedencesPropagator::PushConditionalRelations(const ArcInfo& arc) { // We currently do not handle variable size in the reasons. // TODO(user): we could easily take a level zero ArcOffset() instead, or // add this to the reason though. if (arc.offset_var != kNoIntegerVariable) return; - const std::pair key = {arc.tail_var, - arc.head_var}; const IntegerValue offset = ArcOffset(arc); - - // We only insert if it is not already present! - conditional_relations_.insert({key, {arc.presence_literals[0], offset}}); -} - -void PrecedencesPropagator::RemoveFromConditionalRelations(const ArcInfo& arc) { - if (arc.presence_literals.size() != 1) return; - if (arc.offset_var != kNoIntegerVariable) return; - const std::pair key = {arc.tail_var, - arc.head_var}; - const auto it = conditional_relations_.find(key); - if (it == conditional_relations_.end()) return; - if (it->second.first != arc.presence_literals[0]) return; - - // It is okay if we erase a wrong one on untrail, what is important is not to - // forget to erase one we added. - conditional_relations_.erase(it); + relations_->PushConditionalRelation(arc.presence_literals, arc.tail_var, + NegationOf(arc.head_var), -offset); } void PrecedencesPropagator::Untrail(const Trail& trail, int trail_index) { @@ -397,7 +487,6 @@ void PrecedencesPropagator::Untrail(const Trail& trail, int trail_index) { literal_to_new_impacted_arcs_[literal.Index()]) { if (arc_counts_[arc_index]++ == 0) { const ArcInfo& arc = arcs_[arc_index]; - RemoveFromConditionalRelations(arc); impacted_arcs_[arc.tail_var].pop_back(); } } @@ -410,7 +499,7 @@ void PrecedencesPropagator::Untrail(const Trail& trail, int trail_index) { // permutation. void PrecedencesPropagator::ComputePrecedences( const std::vector& vars, - std::vector* output) { + std::vector* output, bool sort_by_var_lb) { tmp_sorted_vars_.clear(); tmp_precedences_.clear(); for (int index = 0; index < vars.size(); ++index) { @@ -420,9 +509,11 @@ void PrecedencesPropagator::ComputePrecedences( for (const ArcIndex arc_index : impacted_arcs_[var]) { const ArcInfo& arc = arcs_[arc_index]; IntegerValue offset = arc.offset; - if (arc.offset_var != kNoIntegerVariable) { - offset += integer_trail_->LowerBound(arc.offset_var); - } + + // TODO(user): we don't support linear3 with the new code. + // Mainly due to the need for explaining the bound. We could use level + // zero bound of linear3 for precedences. + if (arc.offset_var != kNoIntegerVariable) continue; // TODO(user): it seems better to ignore negative min offset as we will // often have relation of the form interval_start >= interval_end - @@ -442,19 +533,20 @@ void PrecedencesPropagator::ComputePrecedences( } var_to_last_index_[arc.head_var] = index; var_to_degree_[arc.head_var]++; - tmp_precedences_.push_back( - {index, arc.head_var, arc_index.value(), offset}); + tmp_precedences_.push_back({index, arc.head_var, offset}); } } - // This order is a topological order for the precedences relation order - // provided that all the offset between the involved IntegerVariable are - // positive. - // - // TODO(user): use an order that is always topological? This is not clear - // since it may be slower to compute and not worth it because the order below - // is more natural and may work better. - std::sort(tmp_sorted_vars_.begin(), tmp_sorted_vars_.end()); + if (sort_by_var_lb) { + // This order is a topological order for the precedences relation order + // provided that all the offset between the involved IntegerVariable are + // positive. + // + // TODO(user): use an order that is always topological? This is not clear + // since it may be slower to compute and not worth it because the order + // below is more natural and may work better. + std::sort(tmp_sorted_vars_.begin(), tmp_sorted_vars_.end()); + } // Permute tmp_precedences_ into the output to put it in the correct order. // For that we transform var_to_degree_ to point to the first position of @@ -507,21 +599,6 @@ void PrecedencesPropagator::ComputePartialPrecedences( } } -void PrecedencesPropagator::AddPrecedenceReason( - int arc_index, IntegerValue min_offset, - std::vector* literal_reason, - std::vector* integer_reason) const { - const ArcInfo& arc = arcs_[ArcIndex(arc_index)]; - for (const Literal l : arc.presence_literals) { - literal_reason->push_back(l.Negated()); - } - if (arc.offset_var != kNoIntegerVariable) { - // Reason for ArcOffset(arc) to be >= min_offset. - integer_reason->push_back(IntegerLiteral::GreaterOrEqual( - arc.offset_var, min_offset - arc.offset)); - } -} - void PrecedencesPropagator::AdjustSizeFor(IntegerVariable i) { const int index = std::max(i.value(), NegationOf(i).value()); if (index >= impacted_arcs_.size()) { diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index b021d55154..10dd5fd860 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -52,10 +52,13 @@ struct FullIntegerPrecedence { // TODO(user): Support conditional relation. // TODO(user): Support non-DAG like graph. // TODO(user): Support variable offset that can be updated as search progress. -class PrecedenceRelations { +class PrecedenceRelations : public ReversibleInterface { public: explicit PrecedenceRelations(Model* model) - : integer_trail_(model->GetOrCreate()) {} + : trail_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()) { + integer_trail_->RegisterReversibleClass(this); + } void Resize(int num_variables) { graph_.ReserveNodes(num_variables); @@ -65,6 +68,21 @@ class PrecedenceRelations { // Add a relation tail + offset <= head. void Add(IntegerVariable tail, IntegerVariable head, IntegerValue offset); + // Adds add relation (enf => a + b <= rhs) that is assumed to be true at + // the current level. + // + // It will be automatically reverted via the SetLevel() functions that is + // called before any integer propagations trigger. + // + // This is assumed to be called when a relation becomes true (enforcement are + // assigned) and when it becomes false in reverse order (CHECKed). + void PushConditionalRelation(absl::Span enforcements, + IntegerVariable a, IntegerVariable b, + IntegerValue rhs); + + // Called each time we change decision level. + void SetLevel(int level) final; + // Returns a set of relations var >= max_i(vars[index[i]] + offsets[i]). // // This currently only works if the precedence relation form a DAG. @@ -89,10 +107,21 @@ class PrecedenceRelations { // // Returns kMinIntegerValue if there are none. // Otherwise a + offset <= b. - IntegerValue GetOffset(IntegerVariable a, IntegerVariable b) { - const auto it = all_relations_.find({a, b}); - return it == all_relations_.end() ? kMinIntegerValue : it->second; - } + IntegerValue GetOffset(IntegerVariable a, IntegerVariable b) const; + + // Returns the minimum distance between a and b, and the reason for it (all + // true). Note that we always check GetOffset() so if it is better, the + // returned literal reason will be empty. + // + // We separate the two because usually the reason is only needed when we push, + // which happen less often, so we don't mind doing two hash lookups, and we + // really want to optimize the GetConditionalOffset() instead. + // + // Important: This doesn't contains the transitive closure. + // Important: The span is only valid in a narrow scope. + IntegerValue GetConditionalOffset(IntegerVariable a, IntegerVariable b) const; + absl::Span GetConditionalEnforcements(IntegerVariable a, + IntegerVariable b) const; // The current code requires the internal data to be processed once all // relations are loaded. @@ -101,14 +130,25 @@ class PrecedenceRelations { void Build(); private: - void AddToHashTable(IntegerVariable a, IntegerVariable b, - IntegerValue offset) { - const auto [it, inserted] = all_relations_.insert({{a, b}, offset}); - if (!inserted) { - it->second = std::max(it->second, offset); - } + void CreateLevelEntryIfNeeded(); + + std::pair GetKey(IntegerVariable a, + IntegerVariable b) const { + return a <= b ? std::make_pair(a, b) : std::make_pair(b, a); } + // tail + offset <= head. + // Which is the same as tail - head <= -offset. + bool AddInternal(IntegerVariable tail, IntegerVariable head, + IntegerValue offset) { + const auto [it, inserted] = + root_relations_.insert({GetKey(tail, NegationOf(head)), -offset}); + if (inserted) return true; + it->second = std::min(it->second, -offset); + return false; + } + + Trail* trail_; IntegerTrail* integer_trail_; util::StaticGraph<> graph_; @@ -118,8 +158,30 @@ class PrecedenceRelations { bool is_dag_ = false; std::vector topological_order_; + // Conditional stack for push/pop of conditional relations. + // + // TODO(user): this kind of reversible hash_map is already implemented in + // other part of the code. Consolidate. + struct ConditionalEntry { + ConditionalEntry(int p, IntegerValue r, + std::pair k, + absl::Span e) + : prev_entry(p), rhs(r), key(k), enforcements(e.begin(), e.end()) {} + + int prev_entry; + IntegerValue rhs; + std::pair key; + absl::InlinedVector enforcements; + }; + std::vector conditional_stack_; + std::vector> level_to_stack_size_; + + // This is always stored in the form (a + b <= rhs). + // The conditional relations contains indices in the conditional_stack_. absl::flat_hash_map, IntegerValue> - all_relations_; + root_relations_; + absl::flat_hash_map, int> + conditional_relations_; }; // This class implement a propagator on simple inequalities between integer @@ -142,6 +204,7 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { public: explicit PrecedencesPropagator(Model* model) : SatPropagator("PrecedencesPropagator"), + relations_(model->GetOrCreate()), trail_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), shared_stats_(model->Mutable()), @@ -176,10 +239,6 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { IntegerValue offset); void AddPrecedenceWithVariableOffset(IntegerVariable i1, IntegerVariable i2, IntegerVariable offset_var); - // Add a precedence relation (e1 + offset <= i2) between affine expressions. - // It will check that both e1 and e2 avec a variable with a coefficient of 1. - // This is used in tests. - void AddAffineCoeffOnePrecedence(AffineExpression e1, AffineExpression e2); // Same as above, but the relation is only true when the given literal is. void AddConditionalPrecedence(IntegerVariable i1, IntegerVariable i2, @@ -199,30 +258,26 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { IntegerValue offset); // Finds all the IntegerVariable that are "after" at least two of the - // IntegerVariable in vars. Returns a vector of these precedences relation - // sorted by IntegerPrecedences.var so that it is efficient to find all the - // IntegerVariable "before" another one. + // IntegerVariable in vars. Returns a vector of these precedences relation so + // that it is efficient to find all the IntegerVariable "before" another one. + // + // If sort_by_var_lb is true, then the returned precedences will be in order + // of the var current lower bound. This should be a topological order for + // the relations with positive offset. // // Note that we only consider direct precedences here. Given our usage, it may // be better to compute the full reachability in the precedence graph, but in // pratice that may be too slow. // - // Note that the IntegerVariable in the vector are also returned in - // topological order for a more efficient propagation in - // DisjunctivePrecedences::Propagate() where this is used. - // // Important: For identical vars, the entry are sorted by index. struct IntegerPrecedences { int index; // position in vars. IntegerVariable var; // An IntegerVariable that is >= to vars[index]. - int arc_index; // Used by AddPrecedenceReason(). IntegerValue offset; // we have: vars[index] + offset <= var }; void ComputePrecedences(const std::vector& vars, - std::vector* output); - void AddPrecedenceReason(int arc_index, IntegerValue min_offset, - std::vector* literal_reason, - std::vector* integer_reason) const; + std::vector* output, + bool sort_by_var_lb = true); // This just wrap ComputePrecedences() above and convert its output format to // the same format as PrecedenceRelations::ComputeFullPrecedences(). This is @@ -234,20 +289,6 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { void ComputePartialPrecedences(const std::vector& vars, std::vector* output); - // If known, return an offset such that we have a + offset <= b. - // Note that this only cover the case where this was conditionned by a single - // literal. - // - // TODO(user): Support list of literals, it isn't that much harder. - std::pair GetConditionalOffset(IntegerVariable a, - IntegerVariable b) { - const auto it = conditional_relations_.find({a, b}); - if (it == conditional_relations_.end()) { - return {Literal(), kMinIntegerValue}; - } - return it->second; - } - private: DEFINE_STRONG_INDEX_TYPE(ArcIndex); DEFINE_STRONG_INDEX_TYPE(OptionalArcIndex); @@ -318,12 +359,12 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // This is only meant to be used in a DCHECK() and is not optimized. bool NoPropagationLeft(const Trail& trail) const; - // Update conditional_relations_. - void AddToConditionalRelations(const ArcInfo& arc); - void RemoveFromConditionalRelations(const ArcInfo& arc); + // Update relations_. + void PushConditionalRelations(const ArcInfo& arc); // External class needed to get the IntegerVariable lower bounds and Enqueue // new ones. + PrecedenceRelations* relations_; Trail* trail_; IntegerTrail* integer_trail_; SharedStatistics* shared_stats_ = nullptr; @@ -395,14 +436,6 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // Temp vector used by the tree traversal in DisassembleSubtree(). std::vector tmp_vector_; - // When a literal => X + offset <= Y become true, we add it here if X and Y - // do not already have a conditial relation. We also remove it on untrail. - // This is especially useful when we create all the literal between pair of - // interval for a disjunctive constraint. - absl::flat_hash_map, - std::pair> - conditional_relations_; - // Stats. int64_t num_cycles_ = 0; int64_t num_pushes_ = 0; @@ -484,15 +517,6 @@ inline void PrecedencesPropagator::AddPrecedenceWithOffset( AddArc(i1, i2, offset, /*offset_var=*/kNoIntegerVariable, {}); } -inline void PrecedencesPropagator::AddAffineCoeffOnePrecedence( - AffineExpression e1, AffineExpression e2) { - CHECK_NE(e1.var, kNoIntegerVariable); - CHECK_EQ(e1.coeff, 1); - CHECK_NE(e2.var, kNoIntegerVariable); - CHECK_EQ(e2.coeff, 1); - AddPrecedenceWithOffset(e1.var, e2.var, e1.constant - e2.constant); -} - inline void PrecedencesPropagator::AddConditionalPrecedence(IntegerVariable i1, IntegerVariable i2, Literal l) { @@ -558,12 +582,20 @@ inline std::function AffineCoeffOneLowerOrEqualWithOffset( inline void AddConditionalSum2LowerOrEqual( absl::Span enforcement_literals, IntegerVariable a, IntegerVariable b, int64_t ub, Model* model) { + // TODO(user): Refactor to be sure we do not miss any level zero relations. + if (enforcement_literals.empty()) { + model->GetOrCreate()->Add(a, NegationOf(b), + IntegerValue(-ub)); + } + PrecedencesPropagator* p = model->GetOrCreate(); p->AddPrecedenceWithAllOptions(a, NegationOf(b), IntegerValue(-ub), kNoIntegerVariable, enforcement_literals); } // l => (a + b + c <= ub). +// +// TODO(user): Use level zero bounds to infer binary precedence relations? inline void AddConditionalSum3LowerOrEqual( absl::Span enforcement_literals, IntegerVariable a, IntegerVariable b, IntegerVariable c, int64_t ub, Model* model) { diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index 943f8a354e..e8e88a1b8e 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -2829,13 +2829,13 @@ class CpModel: """Sets the objective of the model.""" self.clear_objective() if isinstance(obj, IntVar): - self.__model.objective.coeffs.append(1) + self.__model.objective.vars.append(obj.index) self.__model.objective.offset = 0 if minimize: - self.__model.objective.vars.append(obj.index) + self.__model.objective.coeffs.append(1) self.__model.objective.scaling_factor = 1 else: - self.__model.objective.vars.append(self.negated(obj.index)) + self.__model.objective.coeffs.append(-1) self.__model.objective.scaling_factor = -1 elif isinstance(obj, LinearExpr): coeffs_map, constant, is_integer = obj.get_float_var_value_map() @@ -2848,11 +2848,11 @@ class CpModel: self.__model.objective.offset = -constant for v, c in coeffs_map.items(): c_as_int = int(c) - self.__model.objective.coeffs.append(c_as_int) + self.__model.objective.vars.append(v.index) if minimize: - self.__model.objective.vars.append(v.index) + self.__model.objective.coeffs.append(c_as_int) else: - self.__model.objective.vars.append(self.negated(v.index)) + self.__model.objective.coeffs.append(-c_as_int) else: self.__model.floating_point_objective.maximize = not minimize self.__model.floating_point_objective.offset = constant diff --git a/ortools/sat/theta_tree.h b/ortools/sat/theta_tree.h index caa825c824..bd833b6b5d 100644 --- a/ortools/sat/theta_tree.h +++ b/ortools/sat/theta_tree.h @@ -244,7 +244,7 @@ class ThetaLambdaTree { std::vector tree_; }; -// Explicit instantiations in theta_Tree.cc. +// Explicit instantiations in theta_tree.cc. extern template class ThetaLambdaTree; extern template class ThetaLambdaTree; From 2afacf647230bc92a3436bf9964308129e2ea35e Mon Sep 17 00:00:00 2001 From: dongjinlong Date: Tue, 26 Mar 2024 22:56:14 +0800 Subject: [PATCH 034/392] chore: remove repetitive words in comments Signed-off-by: dongjinlong From bf2471515f3d81ac38b620897d9f66919e21ffff Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Sat, 30 Mar 2024 10:53:14 +0100 Subject: [PATCH 035/392] cleanup semantics of under-specified constraint in model_builder python --- ortools/linear_solver/python/model_builder.py | 85 ++++++++++++------- .../python/model_builder_test.py | 7 +- 2 files changed, 56 insertions(+), 36 deletions(-) diff --git a/ortools/linear_solver/python/model_builder.py b/ortools/linear_solver/python/model_builder.py index 715506616c..347750e0ff 100644 --- a/ortools/linear_solver/python/model_builder.py +++ b/ortools/linear_solver/python/model_builder.py @@ -432,7 +432,7 @@ def _add_linear_constraint_to_helper( TypeError: If constraint is an invalid type. """ if isinstance(bounded_expr, bool): - c = LinearConstraint(helper) + c = LinearConstraint(helper, None, True) if name is not None: helper.set_constraint_name(c.index, name) if bounded_expr: @@ -477,7 +477,8 @@ def _add_enforced_linear_constraint_to_helper( TypeError: If constraint is an invalid type. """ if isinstance(bounded_expr, bool): - c = EnforcedLinearConstraint(helper) + # TODO(user): create indicator variable assignment instead ? + c = EnforcedLinearConstraint(helper, None, True) c.indicator_variable = var c.indicator_value = value if name is not None: @@ -651,13 +652,17 @@ class LinearConstraint: """ def __init__( - self, helper: mbh.ModelBuilderHelper, index: Optional[IntegerT] = None + self, + helper: mbh.ModelBuilderHelper, + index: Optional[IntegerT] = None, + is_under_specified: bool = False, ): if index is None: self.__index = helper.add_linear_constraint() else: self.__index = index self.__helper: mbh.ModelBuilderHelper = helper + self.__is_under_specified = is_under_specified @property def index(self) -> IntegerT: @@ -696,12 +701,21 @@ class LinearConstraint: def name(self, name: str) -> None: return self.__helper.set_constraint_name(self.__index, name) - def is_always_false(self) -> bool: - """Returns True if the constraint is always false. + @property + def is_under_specified(self) -> bool: + """Returns True if the constraint is under specified. - Usually, it means that it was created by model.add(False) + Usually, it means that it was created by model.add(False) or model.add(True) + The effect is that modifying the constraint will raise an exception. """ - return self.lower_bound > self.upper_bound + return self.__is_under_specified + + def assert_constraint_is_well_defined(self) -> None: + """Raises an exception if the constraint is under specified.""" + if self.__is_under_specified: + raise ValueError( + f"Constraint {self.index} is under specified and cannot be modified" + ) def __str__(self): return self.name @@ -716,22 +730,17 @@ class LinearConstraint: def set_coefficient(self, var: Variable, coeff: NumberT) -> None: """Sets the coefficient of the variable in the constraint.""" - if self.is_always_false(): - raise ValueError( - f"Constraint {self.index} is always false and cannot be modified" - ) + self.assert_constraint_is_well_defined() self.__helper.set_constraint_coefficient(self.__index, var.index, coeff) def add_term(self, var: Variable, coeff: NumberT) -> None: """Adds var * coeff to the constraint.""" - if self.is_always_false(): - raise ValueError( - f"Constraint {self.index} is always false and cannot be modified" - ) + self.assert_constraint_is_well_defined() self.__helper.safe_add_term_to_constraint(self.__index, var.index, coeff) def clear_terms(self) -> None: """Clear all terms of the constraint.""" + self.assert_constraint_is_well_defined() self.__helper.clear_constraint_terms(self.__index) @@ -747,7 +756,10 @@ class EnforcedLinearConstraint: """ def __init__( - self, helper: mbh.ModelBuilderHelper, index: Optional[IntegerT] = None + self, + helper: mbh.ModelBuilderHelper, + index: Optional[IntegerT] = None, + is_under_specified: bool = False, ): if index is None: self.__index = helper.add_enforced_linear_constraint() @@ -760,6 +772,7 @@ class EnforcedLinearConstraint: self.__index = index self.__helper: mbh.ModelBuilderHelper = helper + self.__is_under_specified = is_under_specified @property def index(self) -> IntegerT: @@ -819,12 +832,21 @@ class EnforcedLinearConstraint: def name(self, name: str) -> None: return self.__helper.set_enforced_constraint_name(self.__index, name) - def is_always_false(self) -> bool: - """Returns True if the constraint is always false. + @property + def is_under_specified(self) -> bool: + """Returns True if the constraint is under specified. - Usually, it means that it was created by model.add(False) + Usually, it means that it was created by model.add(False) or model.add(True) + The effect is that modifying the constraint will raise an exception. """ - return self.lower_bound > self.upper_bound + return self.__is_under_specified + + def assert_constraint_is_well_defined(self) -> None: + """Raises an exception if the constraint is under specified.""" + if self.__is_under_specified: + raise ValueError( + f"Constraint {self.index} is under specified and cannot be modified" + ) def __str__(self): return self.name @@ -841,26 +863,21 @@ class EnforcedLinearConstraint: def set_coefficient(self, var: Variable, coeff: NumberT) -> None: """Sets the coefficient of the variable in the constraint.""" - if self.is_always_false(): - raise ValueError( - f"Constraint {self.index} is always false and cannot be modified" - ) + self.assert_constraint_is_well_defined() self.__helper.set_enforced_constraint_coefficient( self.__index, var.index, coeff ) def add_term(self, var: Variable, coeff: NumberT) -> None: """Adds var * coeff to the constraint.""" - if self.is_always_false(): - raise ValueError( - f"Constraint {self.index} is always false and cannot be modified" - ) + self.assert_constraint_is_well_defined() self.__helper.safe_add_term_to_enforced_constraint( self.__index, var.index, coeff ) def clear_terms(self) -> None: """Clear all terms of the constraint.""" + self.assert_constraint_is_well_defined() self.__helper.clear_enforced_constraint_terms(self.__index) @@ -1321,12 +1338,16 @@ class Model: Note that a special treatment is done when the argument does not contain any variable, and thus evaluates to True or False. - model.add(True) will create a constraint 0 <= empty sum <= 0 + model.add(True) will create a constraint 0 <= empty sum <= 0. + The constraint will be marked as under specified, and cannot be modified + further. - model.add(False) will create a constraint inf <= empty sum <= -inf + model.add(False) will create a constraint inf <= empty sum <= -inf. The + constraint will be marked as under specified, and cannot be modified + further. - you can check the if a constraint is always false (lb=inf, ub=-inf) by - calling LinearConstraint.is_always_false() + you can check the if a constraint is under specified by + checking LinearConstraint.is_under_specified. """ if isinstance(ct, _BoundedLinearExpr): return ct._add_linear_constraint(self.__helper, name) diff --git a/ortools/linear_solver/python/model_builder_test.py b/ortools/linear_solver/python/model_builder_test.py index fa070b6cbe..da6f7522cc 100644 --- a/ortools/linear_solver/python/model_builder_test.py +++ b/ortools/linear_solver/python/model_builder_test.py @@ -391,7 +391,7 @@ ENDATA x = model.new_num_var(0.0, math.inf, "x") ct = model.add(False) - self.assertTrue(ct.is_always_false()) + self.assertTrue(ct.is_under_specified) self.assertRaises(ValueError, ct.add_term, x, 1) model.maximize(x) @@ -408,7 +408,8 @@ ENDATA ct = model.add(True) self.assertEqual(ct.lower_bound, 0.0) self.assertEqual(ct.upper_bound, 0.0) - ct.add_term(var=x, coeff=1) + self.assertTrue(ct.is_under_specified) + self.assertRaises(ValueError, ct.add_term, x, 1) model.maximize(x) @@ -416,8 +417,6 @@ ENDATA status = solver.solve(model) self.assertEqual(status, mb.SolveStatus.OPTIMAL) - # Note that ct is binding. - self.assertEqual(0.0, solver.objective_value) class InternalHelperTest(absltest.TestCase): From 0ec48be40cb971cfa202efaf6f15d2c8888169b6 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 2 Apr 2024 16:15:48 +0200 Subject: [PATCH 036/392] improve model_builder python --- ortools/linear_solver/python/model_builder.py | 60 ++++++++++++++----- .../python/model_builder_test.py | 2 +- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/ortools/linear_solver/python/model_builder.py b/ortools/linear_solver/python/model_builder.py index 347750e0ff..9a759ceb2f 100644 --- a/ortools/linear_solver/python/model_builder.py +++ b/ortools/linear_solver/python/model_builder.py @@ -252,7 +252,7 @@ class Variable(LinearExpr): ub: Optional[NumberT], is_integral: Optional[bool], name: Optional[str], - ): + ) -> None: """See Model.new_var below.""" LinearExpr.__init__(self) self.__helper: mbh.ModelBuilderHelper = helper @@ -420,6 +420,10 @@ def _add_linear_constraint_to_helper( It handles boolean values (which might arise in the construction of BoundedLinearExpressions). + If bounded_expr is a Boolean value, the created constraint is different. + In that case, the constraint will be immutable and marked as under-specified. + It will be always feasible or infeasible whether the value is True or False. + Args: bounded_expr: The bounded expression used to create the constraint. helper: The helper to create the constraint. @@ -432,7 +436,7 @@ def _add_linear_constraint_to_helper( TypeError: If constraint is an invalid type. """ if isinstance(bounded_expr, bool): - c = LinearConstraint(helper, None, True) + c = LinearConstraint(helper, is_under_specified=True) if name is not None: helper.set_constraint_name(c.index, name) if bounded_expr: @@ -462,6 +466,12 @@ def _add_enforced_linear_constraint_to_helper( It handles boolean values (which might arise in the construction of BoundedLinearExpressions). + If bounded_expr is a Boolean value, the linear part of the constraint is + different. + In that case, the constraint will be immutable and marked as under-specified. + Its linear part will be always feasible or infeasible whether the value is + True or False. + Args: bounded_expr: The bounded expression used to create the constraint. helper: The helper to create the constraint. @@ -478,7 +488,7 @@ def _add_enforced_linear_constraint_to_helper( """ if isinstance(bounded_expr, bool): # TODO(user): create indicator variable assignment instead ? - c = EnforcedLinearConstraint(helper, None, True) + c = EnforcedLinearConstraint(helper, is_under_specified=True) c.indicator_variable = var c.indicator_value = value if name is not None: @@ -559,7 +569,7 @@ class BoundedLinearExpression(_BoundedLinearExpr): model.Add(x + 2 * y -1 >= z) """ - def __init__(self, expr: LinearExprT, lb: NumberT, ub: NumberT): + def __init__(self, expr: LinearExprT, lb: NumberT, ub: NumberT) -> None: self.__expr: LinearExprT = expr self.__lb: np.double = mbn.assert_is_a_number(lb) self.__ub: np.double = mbn.assert_is_a_number(ub) @@ -654,9 +664,18 @@ class LinearConstraint: def __init__( self, helper: mbh.ModelBuilderHelper, + *, index: Optional[IntegerT] = None, is_under_specified: bool = False, - ): + ) -> None: + """LinearConstraint constructor. + + Args: + helper: The pybind11 ModelBuilderHelper. + index: If specified, recreates a wrapper to an existing linear constraint. + is_under_specified: indicates if the constraint was created by + model.add(bool). + """ if index is None: self.__index = helper.add_linear_constraint() else: @@ -680,6 +699,7 @@ class LinearConstraint: @lower_bound.setter def lower_bound(self, bound: NumberT) -> None: + self.assert_constraint_is_well_defined() self.__helper.set_constraint_lower_bound(self.__index, bound) @property @@ -688,6 +708,7 @@ class LinearConstraint: @upper_bound.setter def upper_bound(self, bound: NumberT) -> None: + self.assert_constraint_is_well_defined() self.__helper.set_constraint_upper_bound(self.__index, bound) @property @@ -758,9 +779,18 @@ class EnforcedLinearConstraint: def __init__( self, helper: mbh.ModelBuilderHelper, + *, index: Optional[IntegerT] = None, is_under_specified: bool = False, - ): + ) -> None: + """EnforcedLinearConstraint constructor. + + Args: + helper: The pybind11 ModelBuilderHelper. + index: If specified, recreates a wrapper to an existing linear constraint. + is_under_specified: indicates if the constraint was created by + model.add(bool). + """ if index is None: self.__index = helper.add_enforced_linear_constraint() else: @@ -790,6 +820,7 @@ class EnforcedLinearConstraint: @lower_bound.setter def lower_bound(self, bound: NumberT) -> None: + self.assert_constraint_is_well_defined() self.__helper.set_enforced_constraint_lower_bound(self.__index, bound) @property @@ -798,6 +829,7 @@ class EnforcedLinearConstraint: @upper_bound.setter def upper_bound(self, bound: NumberT) -> None: + self.assert_constraint_is_well_defined() self.__helper.set_enforced_constraint_upper_bound(self.__index, bound) @property @@ -1338,16 +1370,16 @@ class Model: Note that a special treatment is done when the argument does not contain any variable, and thus evaluates to True or False. - model.add(True) will create a constraint 0 <= empty sum <= 0. + `model.add(True)` will create a constraint 0 <= empty sum <= 0. The constraint will be marked as under specified, and cannot be modified - further. + thereafter. - model.add(False) will create a constraint inf <= empty sum <= -inf. The + `model.add(False)` will create a constraint inf <= empty sum <= -inf. The constraint will be marked as under specified, and cannot be modified - further. + thereafter. - you can check the if a constraint is under specified by - checking LinearConstraint.is_under_specified. + you can check the if a constraint is under specified by reading the + `LinearConstraint.is_under_specified` property. """ if isinstance(ct, _BoundedLinearExpr): return ct._add_linear_constraint(self.__helper, name) @@ -1368,7 +1400,7 @@ class Model: def linear_constraint_from_index(self, index: IntegerT) -> LinearConstraint: """Rebuilds a linear constraint object from the model and its index.""" - return LinearConstraint(self.__helper, index) + return LinearConstraint(self.__helper, index=index) # EnforcedLinear constraints. @@ -1473,7 +1505,7 @@ class Model: self, index: IntegerT ) -> EnforcedLinearConstraint: """Rebuilds an enforced linear constraint object from the model and its index.""" - return EnforcedLinearConstraint(self.__helper, index) + return EnforcedLinearConstraint(self.__helper, index=index) # Objective. def minimize(self, linear_expr: LinearExprT) -> None: diff --git a/ortools/linear_solver/python/model_builder_test.py b/ortools/linear_solver/python/model_builder_test.py index da6f7522cc..8cb8cb0e4d 100644 --- a/ortools/linear_solver/python/model_builder_test.py +++ b/ortools/linear_solver/python/model_builder_test.py @@ -430,7 +430,7 @@ class InternalHelperTest(absltest.TestCase): def test_anonymous_constraints(self): helper = mb.Model().helper index = helper.add_linear_constraint() - constraint = mb.LinearConstraint(helper, index) + constraint = mb.LinearConstraint(helper, index=index) self.assertEqual(constraint.name, f"linear_constraint#{index}") From 3a08e10bcf612edb646d7dbd5f20ed1df28d12dd Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 2 Apr 2024 16:17:01 +0200 Subject: [PATCH 037/392] [CP-SAT] fix bug with negative sized intervals; improve cumulative variable profile --- ortools/sat/cp_model_search.cc | 15 --- ortools/sat/diffn_util.h | 1 + ortools/sat/docs/integer_arithmetic.md | 2 +- ortools/sat/docs/scheduling.md | 123 ++++++++++++++---- ortools/sat/intervals.cc | 1 + ortools/sat/restart.h | 4 + .../cumulative_variable_profile_sample_sat.py | 114 ++++++++++++---- .../sat/samples/step_function_sample_sat.go | 2 +- 8 files changed, 192 insertions(+), 70 deletions(-) diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index 2d201bb642..71fae1e0bb 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -632,21 +632,6 @@ absl::flat_hash_map GetNamedParameters( strategies["fixed"] = new_params; } - // Inprocessing - { - SatParameters new_params = base_params; - new_params.set_search_branching(SatParameters::AUTOMATIC_SEARCH); - new_params.set_use_sat_inprocessing(false); - strategies["no_inprocessing"] = new_params; - - new_params.set_use_sat_inprocessing(true); - new_params.set_inprocessing_dtime_ratio(1.0); - strategies["max_inprocessing"] = new_params; - - new_params.set_linearization_level(0); - strategies["max_inprocessing_no_lp"] = new_params; - } - // Quick restart. { // TODO(user): Experiment with search_random_variable_pool_size. diff --git a/ortools/sat/diffn_util.h b/ortools/sat/diffn_util.h index 685cb0ff99..4cd568edab 100644 --- a/ortools/sat/diffn_util.h +++ b/ortools/sat/diffn_util.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/ortools/sat/docs/integer_arithmetic.md b/ortools/sat/docs/integer_arithmetic.md index efdf3884f6..6907e4bb5a 100644 --- a/ortools/sat/docs/integer_arithmetic.md +++ b/ortools/sat/docs/integer_arithmetic.md @@ -1163,7 +1163,7 @@ func stepFunctionSampleSat() error { // expr == 2 on [0, 1] U [3, 4] U [11, 20] b2 := model.NewBoolVar() - d2 := cpmodel.FromIntervals([]cpmodel.ClosedInterval{{0, 1}, {3, 4}, {11, 20}}) + d2 := cpmodel.FromIntervals([]cpmodel.ClosedInterval{{Start: 0, End: 1}, {Start: 3, End: 4}, {Start: 11, End: 20}}) model.AddLinearConstraintForDomain(x, d2).OnlyEnforceIf(b2) model.AddEquality(expr, cpmodel.NewConstant(2)).OnlyEnforceIf(b2) diff --git a/ortools/sat/docs/scheduling.md b/ortools/sat/docs/scheduling.md index 87a186bdad..5aad73d5c2 100644 --- a/ortools/sat/docs/scheduling.md +++ b/ortools/sat/docs/scheduling.md @@ -826,33 +826,37 @@ func main() { } ``` -## Cumulative constraint with varying capacity profile. +## Cumulative constraint with min and max capacity profile. A cumulative constraint takes a list of intervals, and a list of demands, and a capacity. It enforces that at any time point, the sum of demands of tasks active at that time point is less than a given capacity. -Modeling a varying profile can be done using fixed (interval, demand) to occupy -the capacity between the actual profile and it max capacity. +Modeling a non constant max profile can be done using fixed (interval, demand) +to occupy the capacity between the actual profile and it max capacity. + +Modeling a non zero min profile can be done using fixed (interval, demand) +on the complementary cumulative constraint. ### Python code ```python #!/usr/bin/env python3 -"""Solves a simple scheduling problem with a variable work load.""" +"""Solves a scheduling problem with a min and max profile for the work load.""" import io +from absl import app import pandas as pd from ortools.sat.python import cp_model -def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]: +def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: """Creates the two dataframes that describes the model.""" - capacity_str: str = """ - start_hour capacity + max_load_str: str = """ + start_hour max_load 0 0 2 0 4 1 @@ -867,6 +871,22 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]: 22 0 """ + min_load_str: str = """ + start_hour min_load + 0 0 + 2 0 + 4 0 + 6 0 + 8 3 + 10 3 + 12 1 + 14 3 + 16 3 + 18 1 + 20 1 + 22 0 + """ + tasks_str: str = """ name duration load priority t1 60 3 2 @@ -901,24 +921,25 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]: t30 90 4 2 """ - capacity_df = pd.read_table(io.StringIO(capacity_str), sep=r"\s+") + max_load_df = pd.read_table(io.StringIO(max_load_str), sep=r"\s+") + min_load_df = pd.read_table(io.StringIO(min_load_str), sep=r"\s+") tasks_df = pd.read_table(io.StringIO(tasks_str), index_col=0, sep=r"\s+") - return capacity_df, tasks_df + return max_load_df, min_load_df, tasks_df -def main() -> None: +def main(_) -> None: """Create the model and solves it.""" - capacity_df, tasks_df = create_data_model() + max_load_df, min_load_df, tasks_df = create_data_model() # Create the model. model = cp_model.CpModel() # Get the max capacity from the capacity dataframe. - max_capacity = capacity_df.capacity.max() - print(f"Max capacity = {max_capacity}") + max_load = max_load_df.max_load.max() + print(f"Max capacity = {max_load}") print(f"#tasks = {len(tasks_df)}") - minutes_per_period: int = 120 + minutes_per_hour: int = 60 horizon: int = 24 * 60 # Variables @@ -935,21 +956,67 @@ def main() -> None: are_present=performed, ) - # Set up the profile. We use fixed (intervals, demands) to fill in the space - # between the actual load profile and the max capacity. - time_period_intervals = model.new_fixed_size_interval_var_series( - name="time_period_intervals", - index=capacity_df.index, - starts=capacity_df.start_hour * minutes_per_period, - sizes=minutes_per_period, + # Set up complement intervals (from 0 to start, and from start + size to + # horizon). + prefix_intervals = model.new_optional_interval_var_series( + name="prefix_intervals", + index=tasks_df.index, + starts=0, + sizes=starts, + ends=starts, + are_present=performed, ) - time_period_heights = max_capacity - capacity_df.capacity - # Cumulative constraint. + suffix_intervals = model.new_optional_interval_var_series( + name="suffix_intervals", + index=tasks_df.index, + starts=starts + tasks_df.duration, + sizes=horizon - starts - tasks_df.duration, + ends=horizon, + are_present=performed, + ) + + # Set up the max profile. We use fixed (intervals, demands) to fill in the + # space between the actual max load profile and the max capacity. + time_period_max_intervals = model.new_fixed_size_interval_var_series( + name="time_period_max_intervals", + index=max_load_df.index, + starts=max_load_df.start_hour * minutes_per_hour, + sizes=minutes_per_hour * 2, + ) + time_period_max_heights = max_load - max_load_df.max_load + + # Cumulative constraint for the max profile. model.add_cumulative( - intervals.to_list() + time_period_intervals.to_list(), - tasks_df.load.to_list() + time_period_heights.to_list(), - max_capacity, + intervals.to_list() + time_period_max_intervals.to_list(), + tasks_df.load.to_list() + time_period_max_heights.to_list(), + max_load, + ) + + # Set up the min profile. We use complement intervals to maintain the + # complement of the work load, and fixed intervals to enforce the min + # number of active workers per time period. + time_period_min_intervals = model.new_fixed_size_interval_var_series( + name="time_period_min_intervals", + index=min_load_df.index, + starts=min_load_df.start_hour * minutes_per_hour, + sizes=minutes_per_hour * 2, + ) + time_period_min_heights = min_load_df.min_load + + sum_of_demands = sum(tasks_df.load) + complement_capacity = model.new_int_var(0, sum_of_demands, "complement_capacity") + model.add(complement_capacity == performed.dot(tasks_df.load)) + + # Cumulative constraint for the min profile. + model.add_cumulative( + prefix_intervals.to_list() + + suffix_intervals.to_list() + + time_period_min_intervals.to_list(), + tasks_df.load.to_list() + + tasks_df.load.to_list() + + time_period_min_heights.to_list(), + complement_capacity, ) # Objective: maximize the value of performed intervals. @@ -960,7 +1027,7 @@ def main() -> None: # Create the solver and solve the model. solver = cp_model.CpSolver() solver.parameters.log_search_progress = True - solver.parameters.num_workers = 8 + solver.parameters.num_workers = 16 solver.parameters.max_time_in_seconds = 30.0 status = solver.solve(model) @@ -979,7 +1046,7 @@ def main() -> None: if __name__ == "__main__": - main() + app.run(main) ``` ## Alternative resources for one interval diff --git a/ortools/sat/intervals.cc b/ortools/sat/intervals.cc index b01fff7707..5fef5f10c8 100644 --- a/ortools/sat/intervals.cc +++ b/ortools/sat/intervals.cc @@ -344,6 +344,7 @@ bool SchedulingConstraintHelper::UpdateCachedValues(int t) { // Detect first if we have a conflict using the relation start + size = end. if (dmax < 0) { + ClearReason(); AddSizeMaxReason(t, dmax); return PushTaskAbsence(t); } diff --git a/ortools/sat/restart.h b/ortools/sat/restart.h index 7ec167c476..ea8a9d4c2e 100644 --- a/ortools/sat/restart.h +++ b/ortools/sat/restart.h @@ -57,6 +57,10 @@ class RestartPolicy { // Returns a string with the current restart statistics. std::string InfoString() const; + const RunningAverage& LbdRunningAverage() const { + return lbd_running_average_; + } + private: const SatParameters& parameters_; SatDecisionPolicy* decision_policy_; diff --git a/ortools/sat/samples/cumulative_variable_profile_sample_sat.py b/ortools/sat/samples/cumulative_variable_profile_sample_sat.py index 21736d5991..8e6df0a3b5 100644 --- a/ortools/sat/samples/cumulative_variable_profile_sample_sat.py +++ b/ortools/sat/samples/cumulative_variable_profile_sample_sat.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Solves a simple scheduling problem with a variable work load.""" +"""Solves a scheduling problem with a min and max profile for the work load.""" # [START program] # [START import] import io +from absl import app import pandas as pd from ortools.sat.python import cp_model @@ -26,11 +27,11 @@ from ortools.sat.python import cp_model # [START program_part1] # [START data_model] -def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]: +def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: """Creates the two dataframes that describes the model.""" - capacity_str: str = """ - start_hour capacity + max_load_str: str = """ + start_hour max_load 0 0 2 0 4 1 @@ -45,6 +46,22 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]: 22 0 """ + min_load_str: str = """ + start_hour min_load + 0 0 + 2 0 + 4 0 + 6 0 + 8 3 + 10 3 + 12 1 + 14 3 + 16 3 + 18 1 + 20 1 + 22 0 + """ + tasks_str: str = """ name duration load priority t1 60 3 2 @@ -79,16 +96,17 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]: t30 90 4 2 """ - capacity_df = pd.read_table(io.StringIO(capacity_str), sep=r"\s+") + max_load_df = pd.read_table(io.StringIO(max_load_str), sep=r"\s+") + min_load_df = pd.read_table(io.StringIO(min_load_str), sep=r"\s+") tasks_df = pd.read_table(io.StringIO(tasks_str), index_col=0, sep=r"\s+") - return capacity_df, tasks_df + return max_load_df, min_load_df, tasks_df # [END data_model] -def main() -> None: +def main(_) -> None: """Create the model and solves it.""" # [START data] - capacity_df, tasks_df = create_data_model() + max_load_df, min_load_df, tasks_df = create_data_model() # [END data] # [END program_part1] @@ -98,11 +116,11 @@ def main() -> None: # [END model] # Get the max capacity from the capacity dataframe. - max_capacity = capacity_df.capacity.max() - print(f"Max capacity = {max_capacity}") + max_load = max_load_df.max_load.max() + print(f"Max capacity = {max_load}") print(f"#tasks = {len(tasks_df)}") - minutes_per_period: int = 120 + minutes_per_hour: int = 60 horizon: int = 24 * 60 # [START program_part2] @@ -120,24 +138,70 @@ def main() -> None: sizes=tasks_df.duration, are_present=performed, ) + + # Set up complement intervals (from 0 to start, and from start + size to + # horizon). + prefix_intervals = model.new_optional_interval_var_series( + name="prefix_intervals", + index=tasks_df.index, + starts=0, + sizes=starts, + ends=starts, + are_present=performed, + ) + + suffix_intervals = model.new_optional_interval_var_series( + name="suffix_intervals", + index=tasks_df.index, + starts=starts + tasks_df.duration, + sizes=horizon - starts - tasks_df.duration, + ends=horizon, + are_present=performed, + ) # [END variables] # [START constraints] - # Set up the profile. We use fixed (intervals, demands) to fill in the space - # between the actual load profile and the max capacity. - time_period_intervals = model.new_fixed_size_interval_var_series( - name="time_period_intervals", - index=capacity_df.index, - starts=capacity_df.start_hour * minutes_per_period, - sizes=minutes_per_period, + # Set up the max profile. We use fixed (intervals, demands) to fill in the + # space between the actual max load profile and the max capacity. + time_period_max_intervals = model.new_fixed_size_interval_var_series( + name="time_period_max_intervals", + index=max_load_df.index, + starts=max_load_df.start_hour * minutes_per_hour, + sizes=minutes_per_hour * 2, ) - time_period_heights = max_capacity - capacity_df.capacity + time_period_max_heights = max_load - max_load_df.max_load - # Cumulative constraint. + # Cumulative constraint for the max profile. model.add_cumulative( - intervals.to_list() + time_period_intervals.to_list(), - tasks_df.load.to_list() + time_period_heights.to_list(), - max_capacity, + intervals.to_list() + time_period_max_intervals.to_list(), + tasks_df.load.to_list() + time_period_max_heights.to_list(), + max_load, + ) + + # Set up the min profile. We use complement intervals to maintain the + # complement of the work load, and fixed intervals to enforce the min + # number of active workers per time period. + time_period_min_intervals = model.new_fixed_size_interval_var_series( + name="time_period_min_intervals", + index=min_load_df.index, + starts=min_load_df.start_hour * minutes_per_hour, + sizes=minutes_per_hour * 2, + ) + time_period_min_heights = min_load_df.min_load + + sum_of_demands = sum(tasks_df.load) + complement_capacity = model.new_int_var(0, sum_of_demands, "complement_capacity") + model.add(complement_capacity == performed.dot(tasks_df.load)) + + # Cumulative constraint for the min profile. + model.add_cumulative( + prefix_intervals.to_list() + + suffix_intervals.to_list() + + time_period_min_intervals.to_list(), + tasks_df.load.to_list() + + tasks_df.load.to_list() + + time_period_min_heights.to_list(), + complement_capacity, ) # [END constraints] @@ -152,7 +216,7 @@ def main() -> None: # Create the solver and solve the model. solver = cp_model.CpSolver() solver.parameters.log_search_progress = True - solver.parameters.num_workers = 8 + solver.parameters.num_workers = 16 solver.parameters.max_time_in_seconds = 30.0 status = solver.solve(model) # [END solve] @@ -174,6 +238,6 @@ def main() -> None: if __name__ == "__main__": - main() + app.run(main) # [END program_part2] # [END program] diff --git a/ortools/sat/samples/step_function_sample_sat.go b/ortools/sat/samples/step_function_sample_sat.go index 3545973734..5fb4e66f9f 100644 --- a/ortools/sat/samples/step_function_sample_sat.go +++ b/ortools/sat/samples/step_function_sample_sat.go @@ -51,7 +51,7 @@ func stepFunctionSampleSat() error { // expr == 2 on [0, 1] U [3, 4] U [11, 20] b2 := model.NewBoolVar() - d2 := cpmodel.FromIntervals([]cpmodel.ClosedInterval{{0, 1}, {3, 4}, {11, 20}}) + d2 := cpmodel.FromIntervals([]cpmodel.ClosedInterval{{Start: 0, End: 1}, {Start: 3, End: 4}, {Start: 11, End: 20}}) model.AddLinearConstraintForDomain(x, d2).OnlyEnforceIf(b2) model.AddEquality(expr, cpmodel.NewConstant(2)).OnlyEnforceIf(b2) From 45ca1cb2bc109a56e1b818d2b1bf519c6b3faa18 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 2 Apr 2024 16:16:27 +0200 Subject: [PATCH 038/392] use lazy copy in linear solver --- ortools/linear_solver/linear_solver.h | 2 +- ortools/linear_solver/scip_interface.cc | 2 +- ortools/linear_solver/solve.cc | 2 +- ortools/linear_solver/solve_mp_model.cc | 25 ++++++---------------- ortools/linear_solver/solve_mp_model.h | 16 ++++++-------- ortools/linear_solver/wrappers/BUILD.bazel | 1 - 6 files changed, 16 insertions(+), 32 deletions(-) diff --git a/ortools/linear_solver/linear_solver.h b/ortools/linear_solver/linear_solver.h index dcb023ca57..8315b5aeb3 100644 --- a/ortools/linear_solver/linear_solver.h +++ b/ortools/linear_solver/linear_solver.h @@ -599,7 +599,7 @@ class MPSolver { ABSL_DEPRECATED("Prefer SolveMPModel() from solve_mp_model.h.") static void SolveLazyMutableRequest(LazyMutableCopy request, MPSolutionResponse* response, - std::atomic* interrupt = nullptr); + std::atomic* interrupt = nullptr); ABSL_DEPRECATED( "Prefer SolverTypeSupportsInterruption() from solve_mp_model.h.") diff --git a/ortools/linear_solver/scip_interface.cc b/ortools/linear_solver/scip_interface.cc index b2c780c78e..7777f99c2e 100644 --- a/ortools/linear_solver/scip_interface.cc +++ b/ortools/linear_solver/scip_interface.cc @@ -879,7 +879,7 @@ bool SCIPInterface::SupportsDirectlySolveProto( if (interrupt != nullptr) return false; return true; - } +} MPSolutionResponse SCIPInterface::DirectlySolveProto( LazyMutableCopy request, std::atomic* interrupt) { diff --git a/ortools/linear_solver/solve.cc b/ortools/linear_solver/solve.cc index 7069b5e1f0..b5e914fe9e 100644 --- a/ortools/linear_solver/solve.cc +++ b/ortools/linear_solver/solve.cc @@ -282,7 +282,7 @@ void Run() { const auto read_sol = ParseSolFile(absl::GetFlag(FLAGS_sol_hint), request_proto.model()); CHECK_OK(read_sol.status()); - const MPSolutionResponse sol = read_sol.value(); + const MPSolutionResponse& sol = read_sol.value(); if (request_proto.model().has_solution_hint()) { LOG(WARNING) << "Overwriting solution hint found in the request with " << "solution from " << absl::GetFlag(FLAGS_sol_hint); diff --git a/ortools/linear_solver/solve_mp_model.cc b/ortools/linear_solver/solve_mp_model.cc index 1a490b67f9..1104f38d49 100644 --- a/ortools/linear_solver/solve_mp_model.cc +++ b/ortools/linear_solver/solve_mp_model.cc @@ -19,37 +19,24 @@ #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/lazy_mutable_copy.h" #include "ortools/util/solve_interrupter.h" namespace operations_research { - // TODO(b/311704821): this function should not delegate to MPSolver, also true - // for the functions below. -MPSolutionResponse SolveMPModel(const MPModelRequest& model_request, +// TODO(b/311704821): this function should not delegate to MPSolver, also true +// for the functions below. +MPSolutionResponse SolveMPModel(LazyMutableCopy request, SolveInterrupter* interrupter) { MPSolutionResponse response; if (interrupter != nullptr) { std::atomic atomic_bool = false; ScopedSolveInterrupterCallback cleanup( interrupter, [&atomic_bool] { atomic_bool.store(true); }); - MPSolver::SolveLazyMutableRequest(model_request, &response, &atomic_bool); - } else { - MPSolver::SolveLazyMutableRequest(model_request, &response); - } - return response; -} - -MPSolutionResponse SolveMPModel(MPModelRequest&& model_request, - SolveInterrupter* interrupter) { - MPSolutionResponse response; - if (interrupter != nullptr) { - std::atomic atomic_bool = false; - ScopedSolveInterrupterCallback cleanup( - interrupter, [&atomic_bool] { atomic_bool.store(true); }); - MPSolver::SolveLazyMutableRequest(std::move(model_request), &response, + MPSolver::SolveLazyMutableRequest(std::move(request), &response, &atomic_bool); } else { - MPSolver::SolveLazyMutableRequest(std::move(model_request), &response); + MPSolver::SolveLazyMutableRequest(std::move(request), &response); } return response; } diff --git a/ortools/linear_solver/solve_mp_model.h b/ortools/linear_solver/solve_mp_model.h index d56754d248..ea6917dccb 100644 --- a/ortools/linear_solver/solve_mp_model.h +++ b/ortools/linear_solver/solve_mp_model.h @@ -22,6 +22,7 @@ #include #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/util/lazy_mutable_copy.h" #include "ortools/util/solve_interrupter.h" namespace operations_research { @@ -30,21 +31,18 @@ namespace operations_research { * Solves the model encoded by a MPModelRequest protocol buffer and returns the * solution encoded as a MPSolutionResponse. * + * LazyMutableCopy<> accept both 'const MPModelRequest&' and 'MPModelRequest&&' + * prefer to call this with the std::move() version if you no longer need the + * request. It will allows to reclaim the request memory as soon as it is + * converted to one of the solver internal data representation. + * * If interrupter is non-null, one can call interrupter->Interrupt() to stop the * solver earlier. Interruption is only supported if * SolverTypeSupportsInterruption() returns true for the requested solver. * Passing a non-null pointer with any other solver type immediately returns an * MPSOLVER_INCOMPATIBLE_OPTIONS error. */ -MPSolutionResponse SolveMPModel(const MPModelRequest& model_request, - SolveInterrupter* interrupter = nullptr); - -/** - * This version should be preferred if the request is not needed afterwards. - * It will allows to reclaim the request memory as soon as it is converted to - * one of the solver internal data representation. - */ -MPSolutionResponse SolveMPModel(MPModelRequest&& request, +MPSolutionResponse SolveMPModel(LazyMutableCopy request, SolveInterrupter* interrupter = nullptr); bool SolverTypeSupportsInterruption(MPModelRequest::SolverType solver); diff --git a/ortools/linear_solver/wrappers/BUILD.bazel b/ortools/linear_solver/wrappers/BUILD.bazel index f958ac2cc4..727d5eae32 100644 --- a/ortools/linear_solver/wrappers/BUILD.bazel +++ b/ortools/linear_solver/wrappers/BUILD.bazel @@ -49,7 +49,6 @@ cc_library( "//ortools/lp_data:lp_parser", "//ortools/lp_data:mps_reader", "//ortools/util:logging", - "//ortools/util:solve_interrupter", "//ortools/xpress:environment", ], ) From 382ab5d35beaa984ee20b1429e3abbdda86f9df2 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 3 Apr 2024 11:43:20 +0200 Subject: [PATCH 039/392] [CP-SAT] do not add the linear part of an interval in the model, create it when copying the model for presolve; improve work sharing in shared_tree_mode --- .../java/com/google/ortools/sat/CpModel.java | 2 - ortools/sat/BUILD.bazel | 1 + ortools/sat/cp_model.cc | 2 - ortools/sat/cp_model_presolve.cc | 44 +++++++++++++++++-- ortools/sat/cp_model_presolve.h | 5 +++ ortools/sat/csharp/CpModel.cs | 2 - ortools/sat/parameters_validation.cc | 2 + ortools/sat/probing.cc | 4 +- ortools/sat/python/cp_model.py | 9 ---- ortools/sat/python/cp_model_test.py | 34 +++++++------- ortools/sat/restart.h | 4 +- ortools/sat/sat_parameters.proto | 10 ++++- ortools/sat/work_assignment.cc | 43 ++++++++++++------ ortools/sat/work_assignment.h | 10 +++++ ortools/util/running_stat.h | 2 +- 15 files changed, 119 insertions(+), 55 deletions(-) diff --git a/ortools/java/com/google/ortools/sat/CpModel.java b/ortools/java/com/google/ortools/sat/CpModel.java index f9e98c73b7..4555ade33a 100644 --- a/ortools/java/com/google/ortools/sat/CpModel.java +++ b/ortools/java/com/google/ortools/sat/CpModel.java @@ -730,7 +730,6 @@ public final class CpModel { */ public IntervalVar newIntervalVar( LinearArgument start, LinearArgument size, LinearArgument end, String name) { - addEquality(LinearExpr.newBuilder().add(start).add(size), end); return new IntervalVar(modelBuilder, getLinearExpressionProtoBuilderFromLinearArgument(start, /*negate=*/false), getLinearExpressionProtoBuilderFromLinearArgument(size, /*negate=*/false), @@ -784,7 +783,6 @@ public final class CpModel { */ public IntervalVar newOptionalIntervalVar(LinearArgument start, LinearArgument size, LinearArgument end, Literal isPresent, String name) { - addEquality(LinearExpr.newBuilder().add(start).add(size), end).onlyEnforceIf(isPresent); return new IntervalVar(modelBuilder, getLinearExpressionProtoBuilderFromLinearArgument(start, /*negate=*/false), getLinearExpressionProtoBuilderFromLinearArgument(size, /*negate=*/false), diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 48c412b7cd..3db7378c9f 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -2274,6 +2274,7 @@ cc_library( ":sat_solver", ":synchronization", ":util", + "//ortools/util:running_stat", "//ortools/util:strong_integers", "//ortools/util:time_limit", "@com_google_absl//absl/base:core_headers", diff --git a/ortools/sat/cp_model.cc b/ortools/sat/cp_model.cc index 5c9da884f8..94e097d4f8 100644 --- a/ortools/sat/cp_model.cc +++ b/ortools/sat/cp_model.cc @@ -724,8 +724,6 @@ IntervalVar CpModelBuilder::NewOptionalIntervalVar(const LinearExpr& start, const LinearExpr& size, const LinearExpr& end, BoolVar presence) { - AddEquality(LinearExpr(start) + size, end).OnlyEnforceIf(presence); - const int index = cp_model_.constraints_size(); ConstraintProto* const ct = cp_model_.add_constraints(); ct->add_enforcement_literal(presence.index_); diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 3a461cc417..1d29544102 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -11395,6 +11395,9 @@ bool ModelCopy::ImportAndSimplifyConstraints(const CpModelProto& in_model, break; case ConstraintProto::kInterval: if (!CopyInterval(ct, c, ignore_names)) return CreateUnsatModel(c, ct); + if (first_copy) { + AddLinearConstraintForInterval(ct); + } break; case ConstraintProto::kNoOverlap: if (first_copy) { @@ -11762,9 +11765,7 @@ bool ModelCopy::CopyInterval(const ConstraintProto& ct, int c, ConstraintProto* new_ct = context_->working_model->add_constraints(); if (ignore_names) { *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); - *new_ct->mutable_interval()->mutable_start() = ct.interval().start(); - *new_ct->mutable_interval()->mutable_size() = ct.interval().size(); - *new_ct->mutable_interval()->mutable_end() = ct.interval().end(); + *new_ct->mutable_interval() = ct.interval(); } else { *new_ct = ct; } @@ -11772,6 +11773,43 @@ bool ModelCopy::CopyInterval(const ConstraintProto& ct, int c, return true; } +void ModelCopy::AddLinearConstraintForInterval(const ConstraintProto& ct) { + // Add the linear constraint enforcement => (start + size == end). + // + // We rely on the presolve for simplification, but deal with the trivial + // case of (start, offset, start + offset) here. + const IntervalConstraintProto& itv = ct.interval(); + if (itv.size().vars().empty() && + itv.start().offset() + itv.size().offset() == itv.end().offset() && + absl::Span(itv.start().vars()) == + absl::Span(itv.end().vars()) && + absl::Span(itv.start().coeffs()) == + absl::Span(itv.end().coeffs())) { + // Trivial constraint, nothing to do. + } else { + ConstraintProto* new_ct = context_->working_model->add_constraints(); + *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); + + LinearConstraintProto* mutable_linear = new_ct->mutable_linear(); + mutable_linear->add_domain(0); + mutable_linear->add_domain(0); + AddLinearExpressionToLinearConstraint(itv.start(), 1, mutable_linear); + AddLinearExpressionToLinearConstraint(itv.size(), 1, mutable_linear); + AddLinearExpressionToLinearConstraint(itv.end(), -1, mutable_linear); + } + + // An enforced interval must have is size non-negative. + const LinearExpressionProto& size_expr = itv.size(); + if (context_->MinOf(size_expr) < 0) { + ConstraintProto* new_ct = context_->working_model->add_constraints(); + *new_ct->mutable_enforcement_literal() = ct.enforcement_literal(); + *new_ct->mutable_linear()->mutable_vars() = size_expr.vars(); + *new_ct->mutable_linear()->mutable_coeffs() = size_expr.coeffs(); + new_ct->mutable_linear()->add_domain(-size_expr.offset()); + new_ct->mutable_linear()->add_domain(std::numeric_limits::max()); + } +} + void ModelCopy::CopyAndMapNoOverlap(const ConstraintProto& ct) { // Note that we don't copy names or enforcement_literal (not supported) here. auto* new_ct = diff --git a/ortools/sat/cp_model_presolve.h b/ortools/sat/cp_model_presolve.h index 4bc811475c..d8d1741cbd 100644 --- a/ortools/sat/cp_model_presolve.h +++ b/ortools/sat/cp_model_presolve.h @@ -398,7 +398,12 @@ class ModelCopy { bool CopyLinear(const ConstraintProto& ct); bool CopyAtMostOne(const ConstraintProto& ct); bool CopyExactlyOne(const ConstraintProto& ct); + + // If we "copy" an interval for a first time, we make sure to create the + // linear constraint between the start, size and end. This allow to simplify + // the input proto and client side code. bool CopyInterval(const ConstraintProto& ct, int c, bool ignore_names); + void AddLinearConstraintForInterval(const ConstraintProto& ct); // These function remove unperformed intervals. Note that they requires // interval to appear before (validated) as they test unperformed by testing diff --git a/ortools/sat/csharp/CpModel.cs b/ortools/sat/csharp/CpModel.cs index 12f605b387..a76bdc0a6a 100644 --- a/ortools/sat/csharp/CpModel.cs +++ b/ortools/sat/csharp/CpModel.cs @@ -817,7 +817,6 @@ public class CpModel LinearExpr startExpr = GetLinearExpr(start); LinearExpr sizeExpr = GetLinearExpr(size); LinearExpr endExpr = GetLinearExpr(end); - Add(startExpr + sizeExpr == endExpr); LinearExpressionProto startProto = GetLinearExpressionProto(startExpr); LinearExpressionProto sizeProto = GetLinearExpressionProto(sizeExpr); @@ -875,7 +874,6 @@ public class CpModel LinearExpr startExpr = GetLinearExpr(start); LinearExpr sizeExpr = GetLinearExpr(size); LinearExpr endExpr = GetLinearExpr(end); - Add(startExpr + sizeExpr == endExpr).OnlyEnforceIf(is_present); LinearExpressionProto startProto = GetLinearExpressionProto(startExpr); LinearExpressionProto sizeProto = GetLinearExpressionProto(sizeExpr); diff --git a/ortools/sat/parameters_validation.cc b/ortools/sat/parameters_validation.cc index 5d46526df0..b091cf80fc 100644 --- a/ortools/sat/parameters_validation.cc +++ b/ortools/sat/parameters_validation.cc @@ -91,6 +91,7 @@ std::string ValidateParameters(const SatParameters& params) { TEST_IS_FINITE(mip_max_valid_magnitude); TEST_IS_FINITE(mip_drop_tolerance); TEST_IS_FINITE(shared_tree_worker_objective_split_probability); + TEST_IS_FINITE(shared_tree_open_leaves_per_worker); TEST_POSITIVE(at_most_one_max_expansion_size); @@ -128,6 +129,7 @@ std::string ValidateParameters(const SatParameters& params) { TEST_POSITIVE(glucose_decay_increment_period); TEST_POSITIVE(shared_tree_max_nodes_per_worker); + TEST_POSITIVE(shared_tree_open_leaves_per_worker); TEST_POSITIVE(mip_var_scaling); // Test LP tolerances. diff --git a/ortools/sat/probing.cc b/ortools/sat/probing.cc index d4d24ef56a..101018680e 100644 --- a/ortools/sat/probing.cc +++ b/ortools/sat/probing.cc @@ -406,7 +406,7 @@ bool Prober::ProbeDnf(absl::string_view name, num_new_literals_fixed_ > previous_num_literals_fixed) { VLOG(1) << "ProbeDnf(" << name << ", num_fixed_literals=" << num_new_literals_fixed_ - previous_num_literals_fixed - << ", num_pushed_integer_bounds=" + << ", num_fixed_integer_bounds=" << num_new_integer_bounds_ - previous_num_integer_bounds << ", num_valid_conjunctions=" << num_valid_conjunctions << "/" << dnf.size() << ")"; @@ -498,7 +498,7 @@ bool LookForTrivialSatSolution(double deterministic_time_limit, Model* model, bool FailedLiteralProbingRound(ProbingOptions options, Model* model) { WallTimer wall_timer; wall_timer.Start(); - options.log_info |= VLOG_IS_ON(2); + options.log_info |= VLOG_IS_ON(1); // Reset the solver in case it was already used. auto* sat_solver = model->GetOrCreate(); diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index e8e88a1b8e..dbce4f0563 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -2246,10 +2246,6 @@ class CpModel: An `IntervalVar` object. """ - lin = self.add(start + size == end) - if name: - lin.with_name("lin_" + name) - start_expr = self.parse_linear_expression(start) size_expr = self.parse_linear_expression(size) end_expr = self.parse_linear_expression(end) @@ -2420,11 +2416,6 @@ class CpModel: An `IntervalVar` object. """ - # add the linear constraint. - lin = self.add(start + size == end).only_enforce_if(is_present) - if name: - lin.with_name("lin_opt_" + name) - # Creates the IntervalConstraintProto object. is_present_index = self.get_or_make_boolean_index(is_present) start_expr = self.parse_linear_expression(start) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index f128bf5a9a..76c07e27a7 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -994,10 +994,10 @@ class CpModelTest(absltest.TestCase): x = model.new_int_var(0, 4, "x") y = model.new_int_var(0, 3, "y") i = model.new_interval_var(x, 3, y, "i") - self.assertEqual(1, i.index) + self.assertEqual(0, i.index) j = model.new_fixed_size_interval_var(x, 2, "j") - self.assertEqual(2, j.index) + self.assertEqual(1, j.index) start_expr = j.start_expr() size_expr = j.size_expr() end_expr = j.end_expr() @@ -1015,16 +1015,16 @@ class CpModelTest(absltest.TestCase): j = model.new_optional_interval_var(x, y, 10, b, "j") k = model.new_optional_interval_var(x, -y, 10, b, "k") l = model.new_optional_interval_var(x, 10, -y, b, "l") - self.assertEqual(1, i.index) - self.assertEqual(3, j.index) - self.assertEqual(5, k.index) - self.assertEqual(7, l.index) + self.assertEqual(0, i.index) + self.assertEqual(1, j.index) + self.assertEqual(2, k.index) + self.assertEqual(3, l.index) self.assertRaises(TypeError, model.new_optional_interval_var, 1, 2, 3, x, "x") self.assertRaises( TypeError, model.new_optional_interval_var, b + x, 2, 3, b, "x" ) self.assertRaises( - AttributeError, model.new_optional_interval_var, 1, 2, 3, b + 1, "x" + TypeError, model.new_optional_interval_var, 1, 2, 3, b + 1, "x" ) def testNoOverlap(self): @@ -1036,10 +1036,10 @@ class CpModelTest(absltest.TestCase): i = model.new_interval_var(x, 3, y, "i") j = model.new_interval_var(x, 5, z, "j") ct = model.add_no_overlap([i, j]) - self.assertEqual(4, ct.index) + self.assertEqual(2, ct.index) self.assertLen(ct.proto.no_overlap.intervals, 2) - self.assertEqual(1, ct.proto.no_overlap.intervals[0]) - self.assertEqual(3, ct.proto.no_overlap.intervals[1]) + self.assertEqual(0, ct.proto.no_overlap.intervals[0]) + self.assertEqual(1, ct.proto.no_overlap.intervals[1]) def testNoOverlap2D(self): print("testNoOverlap2D") @@ -1050,13 +1050,13 @@ class CpModelTest(absltest.TestCase): i = model.new_interval_var(x, 3, y, "i") j = model.new_interval_var(x, 5, z, "j") ct = model.add_no_overlap_2d([i, j], [j, i]) - self.assertEqual(4, ct.index) + self.assertEqual(2, ct.index) self.assertLen(ct.proto.no_overlap_2d.x_intervals, 2) - self.assertEqual(1, ct.proto.no_overlap_2d.x_intervals[0]) - self.assertEqual(3, ct.proto.no_overlap_2d.x_intervals[1]) + self.assertEqual(0, ct.proto.no_overlap_2d.x_intervals[0]) + self.assertEqual(1, ct.proto.no_overlap_2d.x_intervals[1]) self.assertLen(ct.proto.no_overlap_2d.y_intervals, 2) - self.assertEqual(3, ct.proto.no_overlap_2d.y_intervals[0]) - self.assertEqual(1, ct.proto.no_overlap_2d.y_intervals[1]) + self.assertEqual(1, ct.proto.no_overlap_2d.y_intervals[0]) + self.assertEqual(0, ct.proto.no_overlap_2d.y_intervals[1]) def testCumulative(self): print("testCumulative") @@ -1073,7 +1073,7 @@ class CpModelTest(absltest.TestCase): demands = [1, 3, 5, 2, 4, 5, 3, 4, 2, 3] capacity = 4 ct = model.add_cumulative(intervals, demands, capacity) - self.assertEqual(20, ct.index) + self.assertEqual(10, ct.index) self.assertLen(ct.proto.cumulative.intervals, 10) self.assertRaises(TypeError, model.add_cumulative, [intervals[0], 3], [2, 3], 3) @@ -1614,7 +1614,7 @@ class CpModelTest(absltest.TestCase): + fixed_intervals.to_list() + absent_fixed_intervals.to_list() ) - self.assertLen(model.proto.constraints, 19) + self.assertLen(model.proto.constraints, 13) if __name__ == "__main__": diff --git a/ortools/sat/restart.h b/ortools/sat/restart.h index ea8a9d4c2e..c268a7b7bd 100644 --- a/ortools/sat/restart.h +++ b/ortools/sat/restart.h @@ -57,8 +57,8 @@ class RestartPolicy { // Returns a string with the current restart statistics. std::string InfoString() const; - const RunningAverage& LbdRunningAverage() const { - return lbd_running_average_; + double LbdAverageSinceReset() const { + return lbd_running_average_.GlobalAverage(); } private: diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 04b08568ca..a0542b256d 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -23,7 +23,7 @@ option csharp_namespace = "Google.OrTools.Sat"; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 281 +// NEXT TAG: 283 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -1023,6 +1023,14 @@ message SatParameters { optional double shared_tree_worker_objective_split_probability = 237 [default = 0.5]; + // Minimum number of restarts before a worker will replace a subtree + // that looks "bad" based on the average LBD of learned clauses. + optional int32 shared_tree_worker_min_restarts_per_subtree = 282 + [default = 32]; + + // How many open leaf nodes should the shared tree maintain per worker. + optional double shared_tree_open_leaves_per_worker = 281 [default = 2.0]; + // In order to limit total shared memory and communication overhead, limit the // total number of nodes that may be generated in the shared tree. If the // shared tree runs out of unassigned leaves, workers act as portfolio diff --git a/ortools/sat/work_assignment.cc b/ortools/sat/work_assignment.cc index 2611b8eff1..3bcd070b85 100644 --- a/ortools/sat/work_assignment.cc +++ b/ortools/sat/work_assignment.cc @@ -37,11 +37,11 @@ #include "ortools/sat/integer.h" #include "ortools/sat/integer_search.h" #include "ortools/sat/model.h" +#include "ortools/sat/restart.h" #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/sat_solver.h" #include "ortools/sat/synchronization.h" -#include "ortools/sat/util.h" #include "ortools/util/strong_integers.h" #include "ortools/util/time_limit.h" @@ -191,7 +191,8 @@ SharedTreeManager::SharedTreeManager(Model* model) : params_(*model->GetOrCreate()), num_workers_(std::max(1, params_.shared_tree_num_workers())), shared_response_manager_(model->GetOrCreate()), - num_splits_wanted_(num_workers_ - 1), + num_splits_wanted_( + num_workers_ * params_.shared_tree_open_leaves_per_worker() - 1), max_nodes_(params_.shared_tree_max_nodes_per_worker() >= std::numeric_limits::max() / num_workers_ ? std::numeric_limits::max() @@ -213,7 +214,7 @@ int SharedTreeManager::NumNodes() const { int SharedTreeManager::SplitsToGeneratePerWorker() const { absl::MutexLock mutex_lock(&mu_); - return std::min(num_splits_wanted_, + return std::min(num_splits_wanted_ / 2 + 1, max_nodes_ - static_cast(nodes_.size())); } @@ -488,7 +489,8 @@ void SharedTreeManager::RestartLockHeld() { nodes_[0].id = node_id_offset_; nodes_[0].children = {nullptr, nullptr}; unassigned_leaves_.clear(); - num_splits_wanted_ = num_workers_ - 1; + num_splits_wanted_ = + num_workers_ * params_.shared_tree_open_leaves_per_worker() - 1; num_restarts_ += 1; num_syncs_since_restart_ = 0; } @@ -511,7 +513,9 @@ SharedTreeWorker::SharedTreeWorker(Model* model) objective_(model->Get()), random_(model->GetOrCreate()), helper_(model->GetOrCreate()), - heuristics_(model->GetOrCreate()) {} + heuristics_(model->GetOrCreate()), + restart_policy_(model->GetOrCreate()), + assigned_tree_lbds_(/*window_size=*/8) {} const std::vector& SharedTreeWorker::DecisionReason(int level) { CHECK_LE(level, assigned_tree_literals_.size()); @@ -667,21 +671,32 @@ void SharedTreeWorker::MaybeProposeSplit() { } } +bool SharedTreeWorker::ShouldReplaceSubtree() { + // If we have no assignment, try to get one. + if (assigned_tree_.MaxLevel() == 0) return true; + if (restart_policy_->NumRestarts() < + parameters_->shared_tree_worker_min_restarts_per_subtree()) { + return false; + } + return assigned_tree_lbds_.WindowAverage() < + restart_policy_->LbdAverageSinceReset(); +} + void SharedTreeWorker::SyncWithSharedTree() { splits_wanted_ = manager_->SplitsToGeneratePerWorker(); VLOG(2) << "Splits wanted: " << splits_wanted_ << " " << parameters_->name(); manager_->SyncTree(assigned_tree_); - // If we have no assignment, try to get one. - // We also want to ensure unassigned nodes have their lower bounds bumped - // periodically, so workers need to occasionally replace open trees, but only - // at most once per restart. - // TODO(user): Ideally we should use some metric to replace a - // subtree when the worker is doing badly. - if (assigned_tree_.MaxLevel() == 0 || - (tree_assignment_restart_ < num_restarts_ && - absl::Bernoulli(*random_, 1e-2))) { + if (ShouldReplaceSubtree()) { + ++num_trees_; + VLOG(2) << parameters_->name() << " acquiring tree #" << num_trees_ + << " after " << num_restarts_ - tree_assignment_restart_ + << " restarts prev depth: " << assigned_tree_.MaxLevel() + << " target: " << assigned_tree_lbds_.WindowAverage() + << " lbd: " << restart_policy_->LbdAverageSinceReset(); manager_->ReplaceTree(assigned_tree_); tree_assignment_restart_ = num_restarts_; + assigned_tree_lbds_.Add(restart_policy_->LbdAverageSinceReset()); + restart_policy_->Reset(); } VLOG(2) << "Assigned level: " << assigned_tree_.MaxLevel() << " " << parameters_->name(); diff --git a/ortools/sat/work_assignment.h b/ortools/sat/work_assignment.h index 5ef04bae97..3e39f0c94a 100644 --- a/ortools/sat/work_assignment.h +++ b/ortools/sat/work_assignment.h @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include "ortools/sat/sat_solver.h" #include "ortools/sat/synchronization.h" #include "ortools/sat/util.h" +#include "ortools/util/running_stat.h" #include "ortools/util/strong_integers.h" #include "ortools/util/time_limit.h" @@ -242,6 +244,7 @@ class SharedTreeWorker { std::optional EncodeDecision(Literal decision); bool NextDecision(LiteralIndex* decision_index); void MaybeProposeSplit(); + bool ShouldReplaceSubtree(); // Add any implications to the clause database for the current level. // Return true if any new information was added. @@ -263,8 +266,10 @@ class SharedTreeWorker { ModelRandomGenerator* random_; IntegerSearchHelper* helper_; SearchHeuristics* heuristics_; + RestartPolicy* restart_policy_; int64_t num_restarts_ = 0; + int64_t num_trees_ = 0; ProtoTrail assigned_tree_; std::vector assigned_tree_literals_; @@ -274,6 +279,11 @@ class SharedTreeWorker { int splits_wanted_ = 1; std::vector reason_; + // Stores the average LBD of learned clauses for each tree assigned since it + // was assigned. + // If a tree has worse LBD than the average over the last few trees we replace + // the tree. + RunningAverage assigned_tree_lbds_; }; } // namespace operations_research::sat diff --git a/ortools/util/running_stat.h b/ortools/util/running_stat.h index 1188c3fb8b..37833f1c6d 100644 --- a/ortools/util/running_stat.h +++ b/ortools/util/running_stat.h @@ -62,7 +62,7 @@ class RunningAverage { std::deque values_; }; -// Simple class to compute efficiently the maximum over a fixed size window +// Simple class to efficiently compute the maximum over a fixed size window // of a numeric stream. This works in constant average amortized time. template class RunningMax { From 4cbd27dae94609e1c9413e4c4614c250758f9d9e Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 8 Apr 2024 11:52:13 +0200 Subject: [PATCH 040/392] sync with main --- .bazelrc | 4 +- WORKSPACE | 30 +- bazel/notebook_requirements.in | 2 +- bazel/notebook_requirements.txt | 2 +- bazel/ortools_requirements.in | 2 +- bazel/ortools_requirements.txt | 2 +- cmake/cpp.cmake | 2 +- cmake/dependencies/CMakeLists.txt | 11 +- cmake/host.CMakeLists.txt | 4 +- cmake/ortoolsConfig.cmake.in | 4 +- examples/python/code_samples.bzl | 7 +- examples/tests/init_test.cc | 2 +- ortools/algorithms/BUILD.bazel | 4 +- ortools/algorithms/python/BUILD.bazel | 5 +- ortools/algorithms/set_cover_orlib_test.cc | 29 +- ortools/base/file.cc | 3 +- ortools/base/threadpool.cc | 6 +- ortools/base/threadpool.h | 1 + ortools/dotnet/Google.OrTools-full.csproj.in | 2 +- ortools/dotnet/Google.OrTools-local.csproj.in | 2 +- ortools/glop/lp_solver.cc | 3 +- ortools/graph/BUILD.bazel | 3 +- .../graph/dag_constrained_shortest_path.cc | 4 +- ortools/graph/dag_constrained_shortest_path.h | 865 ++++++++++++++---- .../dag_constrained_shortest_path_test.cc | 289 ++++-- ortools/graph/java/BUILD.bazel | 4 +- ortools/graph/max_flow.h | 31 +- ortools/graph/min_cost_flow.h | 16 + ortools/graph/samples/assignment_min_flow.py | 1 + ortools/graph/samples/balance_min_flow.py | 1 + ortools/graph/solve_flow_model.cc | 3 +- ortools/gurobi/BUILD.bazel | 8 + ortools/gurobi/CMakeLists.txt | 4 +- ortools/gurobi/gurobi_stdout_matchers.cc | 35 + ortools/gurobi/gurobi_stdout_matchers.h | 37 + ortools/init/csharp/InitTests.cs | 2 +- ortools/init/csharp/init.i | 2 +- ortools/init/init.h | 7 +- ortools/init/java/InitTest.java | 2 +- ortools/init/java/init.i | 2 +- ortools/init/python/init.cc | 5 +- ortools/init/python/init_doc.h | 2 +- ortools/init/python/init_test.py | 2 +- ortools/java/pom-full.xml.in | 2 +- ortools/java/pom-local.xml.in | 2 +- ortools/java/pom.xml.in | 2 +- ortools/linear_solver/CMakeLists.txt | 2 +- ortools/linear_solver/gurobi_interface.cc | 15 +- .../linear_solver/proto_solver/BUILD.bazel | 6 + .../linear_solver/proto_solver/CMakeLists.txt | 1 + .../proto_solver/highs_proto_solver.cc | 279 +++++- ortools/linear_solver/python/BUILD.bazel | 2 +- .../python/model_builder_test.py | 15 + .../linear_solver/samples/basic_example.py | 12 +- ortools/linear_solver/wrappers/BUILD.bazel | 2 + .../wrappers/model_builder_helper.cc | 21 + ortools/packing/binpacking_2d_parser.cc | 3 +- ortools/packing/binpacking_2d_parser.h | 3 +- ortools/pdlp/primal_dual_hybrid_gradient.cc | 3 + ortools/pdlp/python/BUILD.bazel | 2 +- ortools/python/setup.py.in | 2 +- ortools/sat/cp_model_solver.cc | 66 +- ortools/sat/cumulative.cc | 3 - ortools/sat/disjunctive.cc | 80 +- ortools/sat/disjunctive.h | 5 +- ortools/sat/docs/scheduling.md | 4 +- ortools/sat/integer.cc | 109 +-- ortools/sat/integer.h | 35 +- ortools/sat/integer_expr.h | 45 +- ortools/sat/intervals.cc | 41 +- ortools/sat/intervals.h | 9 +- ortools/sat/linear_propagation.cc | 28 +- ortools/sat/precedences.cc | 252 +++-- ortools/sat/precedences.h | 117 +-- ortools/sat/presolve_context.cc | 8 +- ortools/sat/python/BUILD.bazel | 2 +- ortools/sat/python/cp_model.py | 20 +- ortools/sat/sat_parameters.proto | 7 +- ortools/sat/sat_solver.cc | 53 +- .../scheduling/jobshop_scheduling_parser.cc | 2 +- .../scheduling/jobshop_scheduling_parser.h | 2 +- ortools/util/file_util.cc | 2 +- ortools/util/parse_proto.cc | 13 +- ortools/util/qap_reader.cc | 3 +- ortools/util/qap_reader.h | 4 +- patches/BUILD.bazel | 3 +- patches/highs.patch | 88 ++ ...tobuf-v25.3.patch => protobuf-v26.1.patch} | 21 +- 88 files changed, 2029 insertions(+), 819 deletions(-) create mode 100644 ortools/gurobi/gurobi_stdout_matchers.cc create mode 100644 ortools/gurobi/gurobi_stdout_matchers.h create mode 100644 patches/highs.patch rename patches/{protobuf-v25.3.patch => protobuf-v26.1.patch} (77%) diff --git a/.bazelrc b/.bazelrc index bbd40359de..e866c014f4 100644 --- a/.bazelrc +++ b/.bazelrc @@ -22,8 +22,8 @@ build --apple_platform_type=macos build --enable_platform_specific_config build:linux --cxxopt="-std=c++17" --cxxopt=-Wno-sign-compare --host_cxxopt="-std=c++17" --host_cxxopt=-Wno-sign-compare -build:macos --cxxopt="-std=c++17" --cxxopt=-Wno-sign-compare --cxxopt=-mmacos-version-min=10.15 --features=-supports_dynamic_linker -build:macos --host_cxxopt="-std=c++17" --host_cxxopt=-Wno-sign-compare --host_cxxopt=-mmacos-version-min=10.15 +build:macos --cxxopt="-std=c++17" --cxxopt=-Wno-sign-compare --cxxopt=-mmacos-version-min=10.15 --cxxopt=-Wno-dangling-field --features=-supports_dynamic_linker +build:macos --host_cxxopt="-std=c++17" --host_cxxopt=-Wno-sign-compare --host_cxxopt=-mmacos-version-min=10.15 --host_cxxopt=-Wno-dangling-field build:windows --cxxopt="/std:c++20" --host_cxxopt="/std:c++20" # Enable the runfiles symlink tree on Windows. This makes it possible to build diff --git a/WORKSPACE b/WORKSPACE index c43d336172..3a52c65c8c 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -65,7 +65,7 @@ git_repository( git_repository( name = "rules_python", - tag = "0.29.0", + tag = "0.31.0", remote = "https://github.com/bazelbuild/rules_python.git", ) @@ -78,13 +78,6 @@ new_git_repository( remote = "https://github.com/madler/zlib.git", ) -## Re2 -git_repository( - name = "com_google_re2", - tag = "2024-02-01", - remote = "https://github.com/google/re2.git", -) - ## Abseil-cpp git_repository( name = "com_google_absl", @@ -94,15 +87,23 @@ git_repository( remote = "https://github.com/abseil/abseil-cpp.git", ) +## Re2 +git_repository( + name = "com_google_re2", + tag = "2024-03-01", + remote = "https://github.com/google/re2.git", + repo_mapping = {"@abseil-cpp": "@com_google_absl"}, +) + ## Protobuf # proto_library, cc_proto_library, and java_proto_library rules implicitly # depend on @com_google_protobuf for protoc and proto runtimes. # This statement defines the @com_google_protobuf repo. git_repository( name = "com_google_protobuf", - patches = ["//patches:protobuf-v25.3.patch"], + patches = ["//patches:protobuf-v26.1.patch"], patch_args = ["-p1"], - tag = "v25.3", + tag = "v26.1", remote = "https://github.com/protocolbuffers/protobuf.git", ) # Load common dependencies. @@ -155,7 +156,7 @@ cc_library( git_repository( name = "highs", - branch = "bazel", + branch = "v1.7.0", remote = "https://github.com/ERGO-Code/HiGHS.git", ) @@ -214,6 +215,13 @@ load("@ortools_notebook_deps//:requirements.bzl", install_notebook_deps="install_deps") install_notebook_deps() +# Protobuf +load("@com_google_protobuf//bazel:system_python.bzl", "system_python") +system_python( + name = "system_python", + minimum_python_version = "3.8", +) + # Absl python library http_archive( name = "com_google_absl_py", diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index 3f35853168..b168475e1b 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -2,7 +2,7 @@ absl-py==2.0.0 immutabledict==3.0.0 numpy==1.26.1 -protobuf==4.25.3 +protobuf==5.26.1 requests==2.32.0 scipy==1.11.3 diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index 3620a22715..85cc4a4c62 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -230,7 +230,7 @@ prompt-toolkit==3.0.39 # via # ipython # jupyter-console -protobuf==4.25.3 +protobuf==5.26.1 # via # -r notebook_requirements.in # mypy-protobuf diff --git a/bazel/ortools_requirements.in b/bazel/ortools_requirements.in index a585bf75ae..54e03db612 100644 --- a/bazel/ortools_requirements.in +++ b/bazel/ortools_requirements.in @@ -2,7 +2,7 @@ absl-py==2.0.0 immutabledict==3.0.0 numpy==1.26.1 -protobuf==4.25.3 +protobuf==5.26.1 requests==2.32.0 scipy==1.11.3 diff --git a/bazel/ortools_requirements.txt b/bazel/ortools_requirements.txt index 5385b3a8eb..b70d7fd3eb 100644 --- a/bazel/ortools_requirements.txt +++ b/bazel/ortools_requirements.txt @@ -45,7 +45,7 @@ platformdirs==3.10.0 # via # black # virtualenv -protobuf==4.25.3 +protobuf==5.26.1 # via # -r ortools_requirements.in # mypy-protobuf diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index 705986209a..1280d28978 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -358,7 +358,7 @@ target_link_libraries(${PROJECT_NAME} PUBLIC ${COINOR_DEPS} $<$:CPLEX::CPLEX> $<$:GLPK::GLPK> - $<$:HIGHS::HIGHS> + $<$:highs::highs> ${PDLP_DEPS} $<$:libscip> Threads::Threads) diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 8688677a37..91918e9f8e 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -104,9 +104,9 @@ if(BUILD_Protobuf) FetchContent_Declare( Protobuf GIT_REPOSITORY "https://github.com/protocolbuffers/protobuf.git" - GIT_TAG "v25.3" + GIT_TAG "v26.1" GIT_SUBMODULES "" - PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/protobuf-v25.3.patch") + PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/protobuf-v26.1.patch") FetchContent_MakeAvailable(Protobuf) list(POP_BACK CMAKE_MESSAGE_INDENT) message(CHECK_PASS "fetched") @@ -122,8 +122,8 @@ if(BUILD_re2) FetchContent_Declare( re2 GIT_REPOSITORY "https://github.com/google/re2.git" - GIT_TAG "2024-02-01" - #PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/re2-2024-02-01.patch" + GIT_TAG "2024-03-01" + #PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/re2-2024-03-01.patch" ) FetchContent_MakeAvailable(re2) list(POP_BACK CMAKE_MESSAGE_INDENT) @@ -235,7 +235,8 @@ if(BUILD_HIGHS) FetchContent_Declare( highs GIT_REPOSITORY "https://github.com/ERGO-Code/HiGHS.git" - GIT_TAG "v1.6.0" + GIT_TAG "v1.7.0" + PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/highs.patch" ) FetchContent_MakeAvailable(highs) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/cmake/host.CMakeLists.txt b/cmake/host.CMakeLists.txt index 965e1601e8..4c2ae0ae50 100644 --- a/cmake/host.CMakeLists.txt +++ b/cmake/host.CMakeLists.txt @@ -121,9 +121,9 @@ set(protobuf_WITH_ZLIB OFF) FetchContent_Declare( protobuf GIT_REPOSITORY "https://github.com/protocolbuffers/protobuf.git" - GIT_TAG "v25.3" + GIT_TAG "v26.1" GIT_SUBMODULES "" - PATCH_COMMAND git apply "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/protobuf-v25.3.patch") + PATCH_COMMAND git apply "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/protobuf-v26.1.patch") FetchContent_MakeAvailable(protobuf) list(POP_BACK CMAKE_MESSAGE_INDENT) message(CHECK_PASS "fetched") diff --git a/cmake/ortoolsConfig.cmake.in b/cmake/ortoolsConfig.cmake.in index a893537ebb..9372ce9ab9 100644 --- a/cmake/ortoolsConfig.cmake.in +++ b/cmake/ortoolsConfig.cmake.in @@ -54,8 +54,8 @@ if(@USE_GLPK@) endif() if(@USE_HIGHS@) - if(NOT TARGET HIGHS::HIGHS) - find_dependency(HIGHS REQUIRED ${CONFIG_FLAG}) + if(NOT TARGET highs::highs) + find_dependency(highs REQUIRED ${CONFIG_FLAG}) endif() endif() diff --git a/examples/python/code_samples.bzl b/examples/python/code_samples.bzl index 34617ba373..2c465e361b 100644 --- a/examples/python/code_samples.bzl +++ b/examples/python/code_samples.bzl @@ -14,6 +14,7 @@ """Helper macro to compile and test code samples.""" load("@pip_deps//:requirements.bzl", "requirement") +load("@rules_python//python:defs.bzl", "py_binary", "py_test") PYTHON_DEPS = [ "//ortools/init/python:init", @@ -32,7 +33,7 @@ PYTHON_DEPS = [ ] def code_sample_compile_py(name): - native.py_binary( + py_binary( name = name + "_py3", srcs = [name + ".py"], main = name + ".py", @@ -42,7 +43,7 @@ def code_sample_compile_py(name): ) def code_sample_test_py(name): - native.py_test( + py_test( name = name + "_py_test", size = "medium", srcs = [name + ".py"], @@ -53,7 +54,7 @@ def code_sample_test_py(name): ) def code_sample_test_arg_py(name, suffix, args, data): - native.py_test( + py_test( name = name + "_" + suffix + "_py_test", size = "medium", srcs = [name + ".py"], diff --git a/examples/tests/init_test.cc b/examples/tests/init_test.cc index eb04dfb329..8e17bfd739 100644 --- a/examples/tests/init_test.cc +++ b/examples/tests/init_test.cc @@ -28,7 +28,7 @@ void TestFlags() { cpp_flags.log_prefix = true; cpp_flags.cp_model_dump_prefix = "init"; cpp_flags.cp_model_dump_models = true; - cpp_flags.cp_model_dump_lns = true; + cpp_flags.cp_model_dump_submodels = true; cpp_flags.cp_model_dump_response = true; CppBridge::SetFlags(cpp_flags); } diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel index a2956585b7..3494c5022d 100644 --- a/ortools/algorithms/BUILD.bazel +++ b/ortools/algorithms/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@rules_proto//proto:defs.bzl", "proto_library") -load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") package(default_visibility = ["//visibility:public"]) diff --git a/ortools/algorithms/python/BUILD.bazel b/ortools/algorithms/python/BUILD.bazel index f18af25005..f4ddbd6baa 100644 --- a/ortools/algorithms/python/BUILD.bazel +++ b/ortools/algorithms/python/BUILD.bazel @@ -11,12 +11,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") + # Python wrapper for .. load("@pip_deps//:requirements.bzl", "requirement") load("@pybind11_bazel//:build_defs.bzl", "pybind_extension") -load("@rules_python//python:defs.bzl", "py_test") -load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") load("@rules_cc//cc:defs.bzl", "cc_library") +load("@rules_python//python:defs.bzl", "py_test") # OSS solvers bool_flag( diff --git a/ortools/algorithms/set_cover_orlib_test.cc b/ortools/algorithms/set_cover_orlib_test.cc index 20f1b542ae..08bb6be01a 100644 --- a/ortools/algorithms/set_cover_orlib_test.cc +++ b/ortools/algorithms/set_cover_orlib_test.cc @@ -15,7 +15,6 @@ #include #include -#include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/strings/str_cat.h" #include "absl/time/time.h" @@ -122,26 +121,26 @@ const char data_dir[] = #define ORLIB_TEST(name, best_objective, expected_objective, size, function) \ TEST(OrlibTest, APPEND_AND_EVAL(TestOnLine, __LINE__)) { \ - auto filespec = file::JoinPathRespectAbsolute( \ - absl::GetFlag(FLAGS_test_srcdir), data_dir, name); \ + auto filespec = \ + file::JoinPathRespectAbsolute(::testing::SrcDir(), data_dir, name); \ LOG(INFO) << "Reading " << name; \ operations_research::SetCoverModel model = function(filespec); \ double cost = RunSolver(name, &model); \ (void)cost; \ } -#define ORLIB_UNICOST_TEST(name, best_objective, expected_objective, size, \ - function) \ - TEST(OrlibUnicostTest, APPEND_AND_EVAL(TestOnLine, __LINE__)) { \ - auto filespec = file::JoinPathRespectAbsolute( \ - absl::GetFlag(FLAGS_test_srcdir), data_dir, name); \ - LOG(INFO) << "Reading " << name; \ - operations_research::SetCoverModel model = function(filespec); \ - for (int i = 0; i < model.num_subsets(); ++i) { \ - model.SetSubsetCost(i, 1.0); \ - } \ - double cost = RunSolver(absl::StrCat(name, "_unicost"), &model); \ - (void)cost; \ +#define ORLIB_UNICOST_TEST(name, best_objective, expected_objective, size, \ + function) \ + TEST(OrlibUnicostTest, APPEND_AND_EVAL(TestOnLine, __LINE__)) { \ + auto filespec = \ + file::JoinPathRespectAbsolute(::testing::SrcDir(), data_dir, name); \ + LOG(INFO) << "Reading " << name; \ + operations_research::SetCoverModel model = function(filespec); \ + for (int i = 0; i < model.num_subsets(); ++i) { \ + model.SetSubsetCost(i, 1.0); \ + } \ + double cost = RunSolver(absl::StrCat(name, "_unicost"), &model); \ + (void)cost; \ } #define SCP_TEST(name, best_objective, expected_objective, size) \ diff --git a/ortools/base/file.cc b/ortools/base/file.cc index 6588bb6717..a5c298506c 100644 --- a/ortools/base/file.cc +++ b/ortools/base/file.cc @@ -240,7 +240,8 @@ namespace { class NoOpErrorCollector : public google::protobuf::io::ErrorCollector { public: ~NoOpErrorCollector() override = default; - void AddError(int line, int column, const std::string& message) override {} + void RecordError(int /*line*/, int /*column*/, + absl::string_view /*message*/) override {} }; } // namespace diff --git a/ortools/base/threadpool.cc b/ortools/base/threadpool.cc index 16f352d485..9dc07fa315 100644 --- a/ortools/base/threadpool.cc +++ b/ortools/base/threadpool.cc @@ -29,8 +29,10 @@ void RunWorker(void* data) { } } -ThreadPool::ThreadPool(absl::string_view prefix, int num_workers) - : num_workers_(num_workers) {} +ThreadPool::ThreadPool(int num_threads) : num_workers_(num_threads) {} + +ThreadPool::ThreadPool(absl::string_view /*prefix*/, int num_threads) + : num_workers_(num_threads) {} ThreadPool::~ThreadPool() { if (started_) { diff --git a/ortools/base/threadpool.h b/ortools/base/threadpool.h index 0f7e6a8df9..7a31b0f264 100644 --- a/ortools/base/threadpool.h +++ b/ortools/base/threadpool.h @@ -27,6 +27,7 @@ namespace operations_research { class ThreadPool { public: + explicit ThreadPool(int num_threads); ThreadPool(absl::string_view prefix, int num_threads); ~ThreadPool(); diff --git a/ortools/dotnet/Google.OrTools-full.csproj.in b/ortools/dotnet/Google.OrTools-full.csproj.in index be355d7ce5..94fe0d295a 100644 --- a/ortools/dotnet/Google.OrTools-full.csproj.in +++ b/ortools/dotnet/Google.OrTools-full.csproj.in @@ -184,7 +184,7 @@ - + diff --git a/ortools/dotnet/Google.OrTools-local.csproj.in b/ortools/dotnet/Google.OrTools-local.csproj.in index ca71502cc9..851b23a537 100644 --- a/ortools/dotnet/Google.OrTools-local.csproj.in +++ b/ortools/dotnet/Google.OrTools-local.csproj.in @@ -172,7 +172,7 @@ - + diff --git a/ortools/glop/lp_solver.cc b/ortools/glop/lp_solver.cc index 8e08ff4f52..072004dbee 100644 --- a/ortools/glop/lp_solver.cc +++ b/ortools/glop/lp_solver.cc @@ -36,6 +36,7 @@ #include "ortools/lp_data/lp_types.h" #include "ortools/lp_data/lp_utils.h" #include "ortools/lp_data/proto_utils.h" +#include "ortools/port/proto_utils.h" #include "ortools/util/fp_utils.h" #include "ortools/util/logging.h" @@ -177,7 +178,7 @@ ProblemStatus LPSolver::SolveWithTimeLimit(const LinearProgram& lp, SOLVER_LOG(&logger_, "Initial problem: ", lp.GetDimensionString()); SOLVER_LOG(&logger_, "Objective stats: ", lp.GetObjectiveStatsString()); SOLVER_LOG(&logger_, "Bounds stats: ", lp.GetBoundsStatsString()); - SOLVER_LOG(&logger_, "Parameters: ", parameters_.ShortDebugString()); + SOLVER_LOG(&logger_, "Parameters: ", ProtobufShortDebugString(parameters_)); } // Check some preconditions. diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index 67d996bbc4..bf5b28d80c 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -11,8 +11,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@rules_proto//proto:defs.bzl", "proto_library") load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") package(default_visibility = ["//visibility:public"]) @@ -397,6 +397,7 @@ cc_library( ":dag_shortest_path", ":graph", ":topologicalsorter", + "//ortools/base:threadpool", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/base:log_severity", "@com_google_absl//absl/log:check", diff --git a/ortools/graph/dag_constrained_shortest_path.cc b/ortools/graph/dag_constrained_shortest_path.cc index 6b2c57e1e1..8fe8450323 100644 --- a/ortools/graph/dag_constrained_shortest_path.cc +++ b/ortools/graph/dag_constrained_shortest_path.cc @@ -78,8 +78,8 @@ PathWithLength ConstrainedShortestPathsOnDag( std::vector destinations = {destination}; ConstrainedShortestPathsOnDagWrapper> constrained_shortest_path_on_dag(&graph, &arc_lengths, &arc_resources, - &(*topological_order), &sources, - &destinations, &max_resources); + *topological_order, sources, + destinations, &max_resources); PathWithLength path_with_length = constrained_shortest_path_on_dag.RunConstrainedShortestPathOnDag(); diff --git a/ortools/graph/dag_constrained_shortest_path.h b/ortools/graph/dag_constrained_shortest_path.h index b864d8d94f..92f0cbf9ff 100644 --- a/ortools/graph/dag_constrained_shortest_path.h +++ b/ortools/graph/dag_constrained_shortest_path.h @@ -14,6 +14,8 @@ #ifndef OR_TOOLS_GRAPH_DAG_CONSTRAINED_SHORTEST_PATH_H_ #define OR_TOOLS_GRAPH_DAG_CONSTRAINED_SHORTEST_PATH_H_ +#include + #include #include #include @@ -24,14 +26,27 @@ #include "absl/log/check.h" #include "absl/strings/str_format.h" #include "absl/types/span.h" +#include "ortools/base/threadpool.h" #include "ortools/graph/dag_shortest_path.h" +#include "ortools/graph/graph.h" namespace operations_research { // This library provides APIs to compute the constrained shortest path (CSP) on // a given directed acyclic graph (DAG) with resources on each arc. A CSP is a // shortest path on a DAG which does not exceed a set of maximum resources -// consumption. The algorithm is exponential and has no guarantee to finish. +// consumption. The algorithm is exponential and has no guarantee to finish. It +// is based on bi-drectionnal search. First is a forward pass from the source to +// nodes “somewhere in the middle” to generate forward labels, just as the +// onedirectional labeling algorithm we discussed; then a symmetric backward +// pass from the destination generates backward labels; and finally at each node +// with both forward and backward labels, it joins any pair of labels to form a +// feasible complete path. Intuitively, the number of labels grows exponentially +// with the number of arcs in the path. The overall number of labels are then +// expected to be smaller with shorter paths. For DAG with a topological +// ordering, we can pick any node (usually right in the middle) as a *midpoint* +// to stop each pass at. Then labels can be joined at only one half of the nodes +// by considering all edges between each half. // // In the DAG, multiple arcs between the same pair of nodes is allowed. However, // self-loop arcs are not allowed. @@ -114,70 +129,154 @@ class ConstrainedShortestPathsOnDagWrapper { // // Validity of arcs and topological order are DCHECKed. // - // If the number of labels in memory exceeds `max_num_created_labels` at any - // point in the algorithm, it returns the best path found so far, most - // particularly the empty path if none were found. + // If the number of labels in memory exceeds `max_num_created_labels / 2` at + // any point in each pass of the algorithm, new labels are not generated + // anymore and it returns the best path found so far, most particularly the + // empty path if none were found. // - // SUBTLE: You can modify the graph, the arc lengths and resources, the - // topological order, sources, destinations or the maximum resource between - // calls to the `RunConstrainedShortestPathOnDag()` function. That's fine. - // Doing so will obviously invalidate the result API of the last constrained - // shortest path run, which could return an upper bound, junk, or crash. + // IMPORTANT: You cannot modify anything except `arc_lengths` between calls to + // the `RunConstrainedShortestPathOnDag()` function. ConstrainedShortestPathsOnDagWrapper( const GraphType* graph, const std::vector* arc_lengths, const std::vector>* arc_resources, - const std::vector* topological_order, - const std::vector* sources, - const std::vector* destinations, + absl::Span topological_order, + absl::Span sources, + absl::Span destinations, const std::vector* max_resources, int max_num_created_labels = 1e9); // Returns {+inf, {}, {}} if there is no constrained path of finite length - // from one node in `sources` to one node in `destinations`. + // wihtin resources constraints from one node in `sources` to one node in + // `destinations`. PathWithLength RunConstrainedShortestPathOnDag(); + // For benchmarking and informational purposes, returns the number of labels + // generated in the call of `RunConstrainedShortestPathOnDag()`. + int label_count() const { + return lengths_from_sources_[FORWARD].size() + + lengths_from_sources_[BACKWARD].size(); + } + private: - // Returns the list of all the arcs of the shortest path from one node in - // `sources` ending by the arc from a given `label_index` if and only if - // `label_index` is between 0 and `labels_from_sources_.size() - 1`. - std::vector BestArcPathEndingWith(int label_index) const; + enum Direction { + FORWARD = 0, + BACKWARD = 1, + }; + + inline static Direction Reverse(Direction d) { + return d == FORWARD ? BACKWARD : FORWARD; + } + + // A LabelPair includes the `length` of a path that can be constructed by + // merging the paths from two *linkable* labels corresponding to + // `label_index`. + struct LabelPair { + double length = 0.0; + int label_index[2]; + }; + + void RunHalfConstrainedShortestPathOnDag( + const GraphType& reverse_graph, absl::Span arc_lengths, + absl::Span> arc_resources, + absl::Span> min_arc_resources, + absl::Span max_resources, int max_num_created_labels, + std::vector& lengths_from_sources, + std::vector>& resources_from_sources, + std::vector& incoming_arc_indices_from_sources, + std::vector& first_label, std::vector& num_labels); + + // Returns the arc index linking two nodes from each pass forming the best + // path. Returns -1 if no better path than the one found from + // `best_label_pair` is found. + ArcIndex MergeHalfRuns( + const GraphType& graph, absl::Span arc_lengths, + absl::Span> arc_resources, + absl::Span max_resources, + const std::vector sub_node_indices[2], + const std::vector lengths_from_sources[2], + const std::vector> resources_from_sources[2], + const std::vector first_label[2], + const std::vector num_labels[2], LabelPair& best_label_pair); + + // Returns the path as list of arc indices that starts from a node in + // `sources` (if `direction` iS FORWARD) or `destinations` (if `direction` is + // BACKWARD) and ends in node represented by `best_label_index`. + std::vector ArcPathTo( + int best_label_index, const GraphType& reverse_graph, + absl::Span arc_lengths, + absl::Span lengths_from_sources, + absl::Span incoming_arc_indices_from_sources, + absl::Span first_label, + absl::Span num_labels) const; + // Returns the list of all the nodes implied by a given `arc_path`. - std::vector NodePathImpliedBy( - const std::vector& arc_path) const; + std::vector NodePathImpliedBy(absl::Span arc_path, + const GraphType& graph) const; + + static constexpr double kTolerance = 1e-6; const GraphType* const graph_; const std::vector* const arc_lengths_; const std::vector>* const arc_resources_; - const std::vector* const topological_order_; - const std::vector* const sources_; - const std::vector* const destinations_; const std::vector* const max_resources_; - int max_num_created_labels_; + absl::Span sources_; + absl::Span destinations_; + const int num_resources_; - std::vector node_is_source_; - std::vector node_is_destination_; - // Using the fact that the graph is a DAG, we can disregard any node that - // comes after the last destination (based on the topological order). - std::vector node_is_after_last_destination_; - - // Data for reverse graph. - GraphType reverse_graph_; - std::vector reverse_inverse_arc_permutation_; + // Data about *reachable* sub-graphs split in two for bidirectional search. + // Reachable nodes are nodes that can be reached given the resources + // constraints, i.e., for each resource, the sum of the minimum resource to + // get to a node from a node in `sources` and to get from a node to a node in + // `destinations` should be less than the maximum resource. Reachable arcs are + // arcs linking reachable nodes. + // + // `sub_reverse_graph_[dir]` is the reachable sub-graph split in *half* with + // an additional linked to sources (resp. destinations) for the forward (resp. + // backward) direction. For the forward (resp. backward) direction, nodes are + // indexed using the original (resp. reverse) topological order. + GraphType sub_reverse_graph_[2]; + std::vector> sub_arc_resources_[2]; + // `sub_full_arc_indices_[dir]` has size `sub_reverse_graph_[dir].num_arcs()` + // such that `sub_full_arc_indices_[dir][sub_arc] = arc` where `sub_arc` is + // the arc in the reachable sub-graph for direction `dir` (i.e. + // `sub_reverse_graph[dir]`) and `arc` is the arc in the original graph (i.e. + // `graph`). + std::vector sub_full_arc_indices_[2]; + // `sub_node_indices_[dir]` has size `graph->num_nodes()` such that + // `sub_node_indices[dir][node] = sub_node` where `node` is the node in the + // original graph (i.e. `graph`) and `sub_node` is the node in the reachable + // sub-graph for direction `dir` (i.e. `sub_reverse_graph[dir]`) and -1 if + // `node` is not present in reachable sub-graph. + std::vector sub_node_indices_[2]; + // `sub_is_source_[dir][sub_dir]` has size + // `sub_reverse_graph_[dir].num_nodes()` such that + // `sub_is_source_[dir][sub_dir][sub_node]` is true if `sub_node` is a node in + // the reachable sub-graph for direction `dir` (i.e. `sub_reverse_graph[dir]`) + // which is a source (resp. destination) is `sub_dir` is FORWARD (resp. + // BACKWARD). + std::vector sub_is_source_[2][2]; + // `sub_min_arc_resources_[dir]` has size `max_resources->size()` and + // `sub_min_arc_resources_[dir][r]`, `sub_reverse_graph_[dir].num_nodes()` + // such that `sub_min_arc_resources_[dir][r][sub_node]` is the minimum of + // resource r needed to get to a destination (resp. come from a source) if + // `dir` is FORWARD (resp. BACKWARD). + std::vector> sub_min_arc_resources_[2]; + // Maximum number of labels created for each sub-graph. + int max_num_created_labels_[2]; // Data about the last call of the RunConstrainedShortestPathOnDag() - // function. A Label includes the cumulative length, resources and the - // previous arc used in the path to get to this node. - struct Label { - double length; - // TODO(b/315786885): Optimize resources in Label struct. - std::vector resources; - ArcIndex incoming_arc; - }; - // A label is present in `labels_from_sources_` if and only if it is feasible - // with respect to all resources. - std::vector