diff --git a/WORKSPACE b/WORKSPACE index ef4ea4b953..2aef8da265 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -20,7 +20,7 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository", "new_git_r ## Bazel Skylib rules. git_repository( name = "bazel_skylib", - tag = "1.5.0", + tag = "1.7.1", remote = "https://github.com/bazelbuild/bazel-skylib.git", ) load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace") @@ -29,7 +29,7 @@ bazel_skylib_workspace() ## Bazel rules. git_repository( name = "platforms", - tag = "0.0.9", + tag = "0.0.10", remote = "https://github.com/bazelbuild/platforms.git", ) @@ -47,25 +47,25 @@ git_repository( git_repository( name = "rules_java", - tag = "7.5.0", + tag = "7.7.0", remote = "https://github.com/bazelbuild/rules_java.git", ) git_repository( name = "rules_jvm_external", - tag = "6.0", + tag = "6.2", remote = "https://github.com/bazelbuild/rules_jvm_external.git", ) git_repository( name = "contrib_rules_jvm", - tag = "v0.24.0", + tag = "v0.27.0", remote = "https://github.com/bazel-contrib/rules_jvm.git", ) git_repository( name = "rules_python", - tag = "0.31.0", + tag = "0.34.0", remote = "https://github.com/bazelbuild/rules_python.git", ) @@ -156,7 +156,7 @@ cc_library( git_repository( name = "highs", - branch = "v1.7.0", + branch = "v1.7.1", remote = "https://github.com/ERGO-Code/HiGHS.git", ) @@ -193,6 +193,21 @@ new_git_repository( load("@rules_python//python:repositories.bzl", "py_repositories") py_repositories() +load("@rules_python//python:repositories.bzl", "python_register_multi_toolchains") +DEFAULT_PYTHON = "3.11" +python_register_multi_toolchains( + name = "python", + default_version = DEFAULT_PYTHON, + python_versions = [ + "3.12", + "3.11", + "3.10", + "3.9", + "3.8" + ], + ignore_root_user_error=True, +) + # Create a central external repo, @pip_deps, that contains Bazel targets for all the # third-party packages specified in the bazel/requirements.txt file. load("@rules_python//python:pip.bzl", "pip_parse") @@ -236,7 +251,7 @@ http_archive( ## `pybind11_bazel` git_repository( name = "pybind11_bazel", - commit = "23926b00e2b2eb2fc46b17e587cf0c0cfd2f2c4b", # 2023/11/29 + tag = "v2.12.0", # 2024/04/08 patches = ["//patches:pybind11_bazel.patch"], patch_args = ["-p1"], remote = "https://github.com/pybind/pybind11_bazel.git", @@ -244,14 +259,14 @@ git_repository( new_git_repository( name = "pybind11", - build_file = "@pybind11_bazel//:pybind11.BUILD", - tag = "v2.12.0", + build_file = "@pybind11_bazel//:pybind11-BUILD.bazel", + tag = "v2.13.1", remote = "https://github.com/pybind/pybind11.git", ) new_git_repository( - name = "pybind11_abseil", - commit = "52f27398876a3177049977249e004770bd869e61", # 2024/01/11 + name = "org_pybind11_abseil", + tag = "v202402.0", patches = ["//patches:pybind11_abseil.patch"], patch_args = ["-p1"], remote = "https://github.com/pybind/pybind11_abseil.git", @@ -259,17 +274,10 @@ new_git_repository( new_git_repository( name = "pybind11_protobuf", - commit = "3b11990a99dea5101799e61d98a82c4737d240cc", # 2024/01/04 + commit = "84653a591aea5df482dc2bde42c19efafbd53a57", # 2024/06/28 remote = "https://github.com/pybind/pybind11_protobuf.git", ) -load("@pybind11_bazel//:python_configure.bzl", "python_configure") -python_configure(name = "local_config_python", python_version = "3") -bind( - name = "python_headers", - actual = "@local_config_python//:python_headers", -) - ## Java support (with junit 5) load("@rules_java//java:repositories.bzl", "rules_java_dependencies", "rules_java_toolchains") rules_java_dependencies() diff --git a/examples/cpp/BUILD.bazel b/examples/cpp/BUILD.bazel index d77d27b996..6cc14900f2 100644 --- a/examples/cpp/BUILD.bazel +++ b/examples/cpp/BUILD.bazel @@ -1012,6 +1012,7 @@ cc_binary( "//ortools/pdlp:solvers_cc_proto", "//ortools/port:proto_utils", "//ortools/util:file_util", + "//ortools/util:fp_roundtrip_conv", "//ortools/util:sigint", "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/log:check", diff --git a/examples/cpp/max_flow.cc b/examples/cpp/max_flow.cc index a7d1529849..a99e9b4983 100644 --- a/examples/cpp/max_flow.cc +++ b/examples/cpp/max_flow.cc @@ -16,6 +16,7 @@ #include #include +#include "absl/flags/flag.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" diff --git a/examples/cpp/min_cost_flow.cc b/examples/cpp/min_cost_flow.cc index 8119bbbf68..7c4dbef571 100644 --- a/examples/cpp/min_cost_flow.cc +++ b/examples/cpp/min_cost_flow.cc @@ -16,6 +16,7 @@ #include #include +#include "absl/flags/flag.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" diff --git a/examples/cpp/pdlp_solve.cc b/examples/cpp/pdlp_solve.cc index 9fbd11dba1..1a86d85eb9 100644 --- a/examples/cpp/pdlp_solve.cc +++ b/examples/cpp/pdlp_solve.cc @@ -16,30 +16,39 @@ // the input problem. #include +#include +#include +#include #include -#include #include "absl/flags/flag.h" -#include "absl/flags/parse.h" -#include "absl/flags/usage.h" +#include "absl/log/check.h" +#include "absl/log/flags.h" #include "absl/strings/match.h" -#include "absl/strings/str_format.h" -#include "ortools/base/commandlineflags.h" -#include "ortools/base/file.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "ortools/base/helpers.h" #include "ortools/base/init_google.h" #include "ortools/base/logging.h" +#include "ortools/base/options.h" #include "ortools/pdlp/iteration_stats.h" #include "ortools/pdlp/primal_dual_hybrid_gradient.h" +#include "ortools/pdlp/quadratic_program.h" #include "ortools/pdlp/quadratic_program_io.h" #include "ortools/pdlp/solve_log.pb.h" #include "ortools/pdlp/solvers.pb.h" #include "ortools/port/proto_utils.h" #include "ortools/util/file_util.h" +#include "ortools/util/fp_roundtrip_conv.h" #include "ortools/util/sigint.h" // TODO: .mps.gz files aren't working. As a workaround, use .mps. -ABSL_FLAG(std::string, input, "", "REQUIRED: Input file name."); +ABSL_FLAG( + std::string, input, "", + "REQUIRED: Input file name. The following formats are supported: \n" + " - a .mps, .mps.bz2 file,\n" + " - an MPModelProto [.pb (binary), .textproto (text), *.json, *.json.gz]"); ABSL_FLAG(std::string, params, "", "PrimalDualHybridGradientParams in text format"); ABSL_FLAG(std::string, solve_log_file, "", @@ -49,11 +58,6 @@ ABSL_FLAG( std::string, sol_file, "", "If non-empty, output the final primal solution in Miplib .sol format."); -static const char kUsageStr[] = - "Run PDLP on the given input file. The following formats are supported: \n" - " - a .mps, .mps.gz, .mps.bz2 file,\n" - " - an MPModelProto [.pb (binary), .textproto (text), *.json, *.json.gz]"; - namespace operations_research::pdlp { void WriteSolveLog(const std::string& solve_log_file, const SolveLog& log) { @@ -68,11 +72,12 @@ void WriteSolveLog(const std::string& solve_log_file, const SolveLog& log) { LOG(FATAL) << "Unrecognized file extension for --solve_log_file: " << solve_log_file << ". Expected .textproto, .pb, or .json"; } - QCHECK(WriteProtoToFile(solve_log_file, log, write_format, /*gzipped=*/false, - /*append_extension_to_file_name=*/false).ok()); + QCHECK_OK(WriteProtoToFile(solve_log_file, log, write_format, + /*gzipped=*/false, + /*append_extension_to_file_name=*/false)); } -void Solve(const std::string& input, const std::string& params_str, +void Solve(const std::string& input, absl::string_view params_str, const std::string& solve_log_file, const std::string& sol_file) { QCHECK(!input.empty()) << "--input is required"; PrimalDualHybridGradientParams params; @@ -104,8 +109,9 @@ void Solve(const std::string& input, const std::string& params_str, // TODO: In what format should we write the dual solution? if (!sol_file.empty() && convergence_information.has_value()) { std::string sol_string; - absl::StrAppend(&sol_string, - "=obj= ", convergence_information->primal_objective(), + absl::StrAppend( + &sol_string, "=obj= ", + RoundTripDoubleFormat(convergence_information->primal_objective()), "\n"); for (int64_t i = 0; i < result.primal_solution.size(); ++i) { std::string name; @@ -114,7 +120,8 @@ void Solve(const std::string& input, const std::string& params_str, } else { name = absl::StrCat("var", i); } - absl::StrAppend(&sol_string, name, " ", result.primal_solution(i), "\n"); + absl::StrAppend(&sol_string, name, " ", + RoundTripDoubleFormat(result.primal_solution(i)), "\n"); } LOG(INFO) << "Writing .sol solution to '" << sol_file << "'.\n"; CHECK_OK(file::SetContents(sol_file, sol_string, file::Defaults())); @@ -125,8 +132,7 @@ void Solve(const std::string& input, const std::string& params_str, int main(int argc, char** argv) { absl::SetFlag(&FLAGS_stderrthreshold, 0); - google::InitGoogleLogging(kUsageStr); - absl::ParseCommandLine(argc, argv); + InitGoogle(argv[0], &argc, &argv, /*remove_flags=*/true); operations_research::pdlp::Solve( absl::GetFlag(FLAGS_input), absl::GetFlag(FLAGS_params), diff --git a/examples/cpp/uncapacitated_facility_location.cc b/examples/cpp/uncapacitated_facility_location.cc index e738ce2417..e91707e0c4 100644 --- a/examples/cpp/uncapacitated_facility_location.cc +++ b/examples/cpp/uncapacitated_facility_location.cc @@ -25,6 +25,7 @@ #include #include +#include "absl/flags/flag.h" #include "absl/flags/parse.h" #include "absl/flags/usage.h" #include "absl/log/initialize.h" diff --git a/examples/python/arc_flow_cutting_stock_sat.py b/examples/python/arc_flow_cutting_stock_sat.py index 60285ee810..ec300e1a0f 100644 --- a/examples/python/arc_flow_cutting_stock_sat.py +++ b/examples/python/arc_flow_cutting_stock_sat.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # Copyright 2010-2024 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -10,15 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Cutting stock problem with the objective to minimize wasted space.""" +"""Cutting stock problem with the objective to minimize wasted space.""" import collections import time -import numpy as np from absl import app from absl import flags +import numpy as np + from google.protobuf import text_format from ortools.linear_solver.python import model_builder as mb from ortools.sat.python import cp_model @@ -26,13 +28,14 @@ from ortools.sat.python import cp_model FLAGS = flags.FLAGS _OUTPUT_PROTO = flags.DEFINE_string( - 'output_proto', '', 'Output file to write the cp_model proto to.') + "output_proto", "", "Output file to write the cp_model proto to." +) _PARAMS = flags.DEFINE_string( - 'params', - 'num_search_workers:8,log_search_progress:true,max_time_in_seconds:10', - 'Sat solver parameters.') -_SOLVER = flags.DEFINE_string( - 'solver', 'sat', 'Method used to solve: sat, mip.') + "params", + "num_search_workers:8,log_search_progress:true,max_time_in_seconds:10", + "Sat solver parameters.", +) +_SOLVER = flags.DEFINE_string("solver", "sat", "Method used to solve: sat, mip.") DESIRED_LENGTHS = [ @@ -104,9 +107,9 @@ def create_state_graph(items, max_capacity): states.append(new_state) state_to_index[new_state] = new_state_index # Add the transition - transitions.append([ - current_state_index, new_state_index, item_index, card + 1 - ]) + transitions.append( + [current_state_index, new_state_index, item_index, card + 1] + ) return states, transitions @@ -114,14 +117,19 @@ def create_state_graph(items, max_capacity): def solve_cutting_stock_with_arc_flow_and_sat(output_proto_file: str, params: str): """Solve the cutting stock with arc-flow and the CP-SAT solver.""" items = regroup_and_count(DESIRED_LENGTHS) - print('Items:', items) + print("Items:", items) num_items = len(DESIRED_LENGTHS) max_capacity = max(POSSIBLE_CAPACITIES) states, transitions = create_state_graph(items, max_capacity) - print('Dynamic programming has generated', len(states), 'states and', - len(transitions), 'transitions') + print( + "Dynamic programming has generated", + len(states), + "states and", + len(transitions), + "transitions", + ) incoming_vars = collections.defaultdict(list) outgoing_vars = collections.defaultdict(list) @@ -139,8 +147,8 @@ def solve_cutting_stock_with_arc_flow_and_sat(output_proto_file: str, params: st count = items[item_index][1] max_count = count // card count_var = model.NewIntVar( - 0, max_count, - 'i%i_f%i_t%i_C%s' % (item_index, incoming, outgoing, card)) + 0, max_count, "i%i_f%i_t%i_C%s" % (item_index, incoming, outgoing, card) + ) incoming_vars[incoming].append(count_var) outgoing_vars[outgoing].append(count_var) item_vars[item_index].append(count_var) @@ -150,7 +158,7 @@ def solve_cutting_stock_with_arc_flow_and_sat(output_proto_file: str, params: st for state_index, state in enumerate(states): if state_index == 0: continue - exit_var = model.NewIntVar(0, num_items, 'e%i' % state_index) + exit_var = model.NewIntVar(0, num_items, "e%i" % state_index) outgoing_vars[state_index].append(exit_var) incoming_sink_vars.append(exit_var) price = price_usage(state, POSSIBLE_CAPACITIES) @@ -159,8 +167,7 @@ def solve_cutting_stock_with_arc_flow_and_sat(output_proto_file: str, params: st # Flow conservation for state_index in range(1, len(states)): - model.Add( - sum(incoming_vars[state_index]) == sum(outgoing_vars[state_index])) + model.Add(sum(incoming_vars[state_index]) == sum(outgoing_vars[state_index])) # Flow going out of the source must go in the sink model.Add(sum(outgoing_vars[0]) == sum(incoming_sink_vars)) @@ -169,13 +176,17 @@ def solve_cutting_stock_with_arc_flow_and_sat(output_proto_file: str, params: st for item_index, size_and_count in enumerate(items): num_arcs = len(item_vars[item_index]) model.Add( - sum(item_vars[item_index][i] * item_coeffs[item_index][i] - for i in range(num_arcs)) == size_and_count[1]) + sum( + item_vars[item_index][i] * item_coeffs[item_index][i] + for i in range(num_arcs) + ) + == size_and_count[1] + ) # Objective is the sum of waste model.Minimize( - sum(objective_vars[i] * objective_coeffs[i] - for i in range(len(objective_vars)))) + sum(objective_vars[i] * objective_coeffs[i] for i in range(len(objective_vars))) + ) # Output model proto to file. if output_proto_file: @@ -192,13 +203,18 @@ def solve_cutting_stock_with_arc_flow_and_sat(output_proto_file: str, params: st def solve_cutting_stock_with_arc_flow_and_mip(): """Solve the cutting stock with arc-flow and a MIP solver.""" items = regroup_and_count(DESIRED_LENGTHS) - print('Items:', items) + print("Items:", items) num_items = len(DESIRED_LENGTHS) max_capacity = max(POSSIBLE_CAPACITIES) states, transitions = create_state_graph(items, max_capacity) - print('Dynamic programming has generated', len(states), 'states and', - len(transitions), 'transitions') + print( + "Dynamic programming has generated", + len(states), + "states and", + len(transitions), + "transitions", + ) incoming_vars = collections.defaultdict(list) outgoing_vars = collections.defaultdict(list) @@ -216,8 +232,10 @@ def solve_cutting_stock_with_arc_flow_and_mip(): for outgoing, incoming, item_index, card in transitions: count = items[item_index][1] count_var = model.new_int_var( - 0, count, 'a%i_i%i_f%i_t%i_c%i' % (var_index, item_index, incoming, - outgoing, card)) + 0, + count, + "a%i_i%i_f%i_t%i_c%i" % (var_index, item_index, incoming, outgoing, card), + ) var_index += 1 incoming_vars[incoming].append(count_var) outgoing_vars[outgoing].append(count_var) @@ -227,7 +245,7 @@ def solve_cutting_stock_with_arc_flow_and_mip(): for state_index, state in enumerate(states): if state_index == 0: continue - exit_var = model.new_int_var(0, num_items, 'e%i' % state_index) + exit_var = model.new_int_var(0, num_items, "e%i" % state_index) outgoing_vars[state_index].append(exit_var) incoming_sink_vars.append(exit_var) price = price_usage(state, POSSIBLE_CAPACITIES) @@ -237,44 +255,52 @@ def solve_cutting_stock_with_arc_flow_and_mip(): # Flow conservation for state_index in range(1, len(states)): model.add( - mb.LinearExpr.sum(incoming_vars[state_index]) == mb.LinearExpr.sum( - outgoing_vars[state_index])) + mb.LinearExpr.sum(incoming_vars[state_index]) + == mb.LinearExpr.sum(outgoing_vars[state_index]) + ) # Flow going out of the source must go in the sink model.add( - mb.LinearExpr.sum(outgoing_vars[0]) == mb.LinearExpr.sum( - incoming_sink_vars)) + mb.LinearExpr.sum(outgoing_vars[0]) == mb.LinearExpr.sum(incoming_sink_vars) + ) # Items must be placed for item_index, size_and_count in enumerate(items): num_arcs = len(item_vars[item_index]) model.add( - mb.LinearExpr.sum([item_vars[item_index][i] * item_coeffs[item_index][i] - for i in range(num_arcs)]) == size_and_count[1]) + mb.LinearExpr.sum( + [ + item_vars[item_index][i] * item_coeffs[item_index][i] + for i in range(num_arcs) + ] + ) + == size_and_count[1] + ) # Objective is the sum of waste model.minimize(np.dot(objective_vars, objective_coeffs)) - solver = mb.ModelSolver('scip') + solver = mb.ModelSolver("scip") solver.enable_output(True) status = solver.solve(model) ### Output the solution. if status == mb.SolveStatus.OPTIMAL or status == mb.SolveStatus.FEASIBLE: - print('Objective value = %f found in %.2f s' % - (solver.objective_value, time.time() - start_time)) + print( + "Objective value = %f found in %.2f s" + % (solver.objective_value, time.time() - start_time) + ) else: - print('No solution') + print("No solution") def main(_): - """Main function""" - if _SOLVER.value == 'sat': - solve_cutting_stock_with_arc_flow_and_sat(_OUTPUT_PROTO.value, - _PARAMS.value) + """Main function.""" + if _SOLVER.value == "sat": + solve_cutting_stock_with_arc_flow_and_sat(_OUTPUT_PROTO.value, _PARAMS.value) else: # 'mip' solve_cutting_stock_with_arc_flow_and_mip() -if __name__ == '__main__': +if __name__ == "__main__": app.run(main) diff --git a/examples/python/testdata/BUILD.bazel b/examples/python/testdata/BUILD.bazel index 7d9df48d90..210dd5ec30 100644 --- a/examples/python/testdata/BUILD.bazel +++ b/examples/python/testdata/BUILD.bazel @@ -18,4 +18,3 @@ exports_files( "salbp_20_1.alb", ], ) - diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index 112f6357ba..b73d601e4a 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -51,7 +51,7 @@ cc_library( ], copts = [ "-DOR_TOOLS_MAJOR=9", - "-DOR_TOOLS_MINOR=10", + "-DOR_TOOLS_MINOR=11", "-DOR_TOOLS_PATCH=9999", ], linkopts = select({ @@ -352,11 +352,20 @@ cc_library( deps = [":base"], ) +cc_library( + name = "mathlimits", + srcs = ["mathlimits.cc"], + hdrs = ["mathlimits.h"], + deps = [], +) + cc_library( name = "mathutil", + srcs = ["mathutil.cc"], hdrs = ["mathutil.h"], deps = [ ":base", + ":mathlimits", ], ) diff --git a/ortools/base/CMakeLists.txt b/ortools/base/CMakeLists.txt index 81d2409e2d..cffeca83ea 100644 --- a/ortools/base/CMakeLists.txt +++ b/ortools/base/CMakeLists.txt @@ -38,5 +38,5 @@ target_link_libraries(${NAME} PRIVATE absl::strings absl::str_format protobuf::libprotobuf - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::base ALIAS ${NAME}) diff --git a/ortools/base/file.cc b/ortools/base/file.cc index a5c298506c..8819865503 100644 --- a/ortools/base/file.cc +++ b/ortools/base/file.cc @@ -62,22 +62,36 @@ size_t File::Size() { bool File::Flush() { return fflush(f_) == 0; } +// Deletes "this" on closing. bool File::Close() { + bool ok = true; + if (f_ == nullptr) { + return ok; + } if (fclose(f_) == 0) { f_ = nullptr; - return true; } else { - return false; + ok = false; } + delete this; + return ok; } +// Deletes "this" on closing. absl::Status File::Close(int flags) { - if (flags != file::Defaults()) - return absl::Status(absl::StatusCode::kInvalidArgument, "Wrong flags"); - return Close() - ? absl::OkStatus() - : absl::Status(absl::StatusCode::kInvalidArgument, - absl::StrCat("Could not close file '", name_, "'")); + absl::Status status; + if (f_ == nullptr) { + return status; + } + if (fclose(f_) == 0) { + f_ = nullptr; + } else { + status.Update( + absl::Status(absl::StatusCode::kInvalidArgument, + absl::StrCat("Could not close file '", name_, "'"))); + } + delete this; + return status; } void File::ReadOrDie(void* buf, size_t size) { @@ -182,13 +196,11 @@ absl::Status GetContents(absl::string_view filename, std::string* output, const int64_t size = file->Size(); if (file->ReadToString(output, size) == size) { status.Update(file->Close(flags)); - delete file; return status; } #if defined(_MSC_VER) // On windows, binary files needs to be opened with the "rb" flags. file->Close(); - delete file; // Retry in binary mode. status = file::Open(filename, "rb", &file, flags); if (!status.ok()) return status; @@ -196,13 +208,11 @@ absl::Status GetContents(absl::string_view filename, std::string* output, const int64_t b_size = file->Size(); if (file->ReadToString(output, b_size) == b_size) { status.Update(file->Close(flags)); - delete file; return status; } #endif // _MSC_VER file->Close(flags).IgnoreError(); // Even if ReadToString() fails! - delete file; return absl::Status(absl::StatusCode::kInvalidArgument, absl::StrCat("Could not read from '", filename, "'.")); } @@ -224,7 +234,6 @@ absl::Status SetContents(absl::string_view filename, absl::string_view contents, if (!status.ok()) return status; status = file::WriteString(file, contents, flags); status.Update(file->Close(flags)); // Even if WriteString() fails! - delete file; return status; } diff --git a/ortools/base/mathlimits.cc b/ortools/base/mathlimits.cc new file mode 100644 index 0000000000..bc658e1ea8 --- /dev/null +++ b/ortools/base/mathlimits.cc @@ -0,0 +1,107 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/base/mathlimits.h" + +namespace operations_research { + +#define DEF_COMMON_LIMITS(Type) \ + const bool MathLimits::kIsSigned; \ + const bool MathLimits::kIsInteger; \ + const int MathLimits::kMin10Exp; \ + const int MathLimits::kMax10Exp; + +#define DEF_UNSIGNED_INT_LIMITS(Type) \ + DEF_COMMON_LIMITS(Type) \ + const Type MathLimits::kPosMin; \ + const Type MathLimits::kPosMax; \ + const Type MathLimits::kMin; \ + const Type MathLimits::kMax; \ + const Type MathLimits::kEpsilon; \ + const Type MathLimits::kStdError; + +#define DEF_SIGNED_INT_LIMITS(Type) \ + DEF_UNSIGNED_INT_LIMITS(Type) \ + const Type MathLimits::kNegMin; \ + const Type MathLimits::kNegMax; + +#define DEF_PRECISION_LIMITS(Type) const int MathLimits::kPrecisionDigits; + +// http://en.wikipedia.org/wiki/Quadruple_precision_floating-point_format#Double-double_arithmetic +// With some compilers (gcc 4.6.x) on some platforms (powerpc64), +// "long double" is implemented as a pair of double: "double double" format. +// This causes a problem with epsilon (eps). +// eps is the smallest positive number such that 1.0 + eps > 1.0 +// +// Normal format: 1.0 + e = 1.0...01 // N-1 zeros for N fraction bits +// D-D format: 1.0 + e = 1.000...0001 // epsilon can be very small +// +// In the normal format, 1.0 + e has to fit in one stretch of bits. +// The maximum rounding error is half of eps. +// +// In the double-double format, 1.0 + e splits across two doubles: +// 1.0 in the high double, e in the low double, and they do not have to +// be contiguous. The maximum rounding error on a value close to 1.0 is +// much larger than eps. +// +// Some code checks for errors by comparing a computed value to a golden +// value +/- some multiple of the maximum rounding error. The maximum +// rounding error is not available so we use eps as an approximation +// instead. That fails when long double is in the double-double format. +// Therefore, we define kStdError as a multiple of +// max(DBL_EPSILON * DBL_EPSILON, kEpsilon) rather than a multiple of kEpsilon. + +#define DEF_FP_LIMITS(Type, min_val, max_val, eps_val, inf_val) \ + DEF_COMMON_LIMITS(Type) \ + const Type MathLimits::kPosMin = min_val; \ + const Type MathLimits::kPosMax = max_val; \ + const Type MathLimits::kMin = -max_val; \ + const Type MathLimits::kMax = max_val; \ + const Type MathLimits::kNegMin = -min_val; \ + const Type MathLimits::kNegMax = -max_val; \ + const Type MathLimits::kEpsilon = eps_val; \ + /* 32 is 5 bits of mantissa error; should be adequate for common errors */ \ + const Type MathLimits::kStdError = \ + 32 * (static_cast(DBL_EPSILON * DBL_EPSILON) > \ + MathLimits::kEpsilon \ + ? static_cast(DBL_EPSILON * DBL_EPSILON) \ + : MathLimits::kEpsilon); \ + DEF_PRECISION_LIMITS(Type) \ + const Type MathLimits::kNaN = inf_val - inf_val; \ + const Type MathLimits::kPosInf = inf_val; \ + const Type MathLimits::kNegInf = -inf_val; + +// The following are *not* casts! +DEF_SIGNED_INT_LIMITS(signed char) +DEF_SIGNED_INT_LIMITS(short) // NOLINT(runtime/int) +DEF_SIGNED_INT_LIMITS(int) // NOLINT(runtime/int) +DEF_SIGNED_INT_LIMITS(long) // NOLINT(runtime/int) +DEF_SIGNED_INT_LIMITS(long long) // NOLINT(runtime/int) + +DEF_UNSIGNED_INT_LIMITS(unsigned char) +DEF_UNSIGNED_INT_LIMITS(unsigned short) // NOLINT(runtime/int) +DEF_UNSIGNED_INT_LIMITS(unsigned) // NOLINT(runtime/int) +DEF_UNSIGNED_INT_LIMITS(unsigned long) // NOLINT(runtime/int) +DEF_UNSIGNED_INT_LIMITS(unsigned long long) // NOLINT(runtime/int) + +DEF_FP_LIMITS(float, FLT_MIN, FLT_MAX, FLT_EPSILON, HUGE_VALF) +DEF_FP_LIMITS(double, DBL_MIN, DBL_MAX, DBL_EPSILON, HUGE_VAL) +DEF_FP_LIMITS(long double, LDBL_MIN, LDBL_MAX, LDBL_EPSILON, HUGE_VALL) + +#undef DEF_COMMON_LIMITS +#undef DEF_SIGNED_INT_LIMITS +#undef DEF_UNSIGNED_INT_LIMITS +#undef DEF_FP_LIMITS +#undef DEF_PRECISION_LIMITS + +} // namespace operations_research diff --git a/ortools/base/mathlimits.h b/ortools/base/mathlimits.h new file mode 100644 index 0000000000..645dba8def --- /dev/null +++ b/ortools/base/mathlimits.h @@ -0,0 +1,263 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_BASE_MATHLIMITS_H_ +#define OR_TOOLS_BASE_MATHLIMITS_H_ + +#include +#include + +namespace operations_research { + +// ========================================================================= // + +// Useful integer and floating point limits and type traits. +// This is just for the documentation; +// real members are defined in our specializations below. +template +struct MathLimits { + // Type name. + typedef T Type; + // Unsigned version of the Type with the same byte size. + // Same as Type for floating point and unsigned types. + typedef T UnsignedType; + // If the type supports negative values. + static const bool kIsSigned; + // If the type supports only integer values. + static const bool kIsInteger; + // Magnitude-wise smallest representable positive value. + static const Type kPosMin; + // Magnitude-wise largest representable positive value. + static const Type kPosMax; + // Smallest representable value. + static const Type kMin; + // Largest representable value. + static const Type kMax; + // Magnitude-wise smallest representable negative value. + // Present only if kIsSigned. + static const Type kNegMin; + // Magnitude-wise largest representable negative value. + // Present only if kIsSigned. + static const Type kNegMax; + // Smallest integer x such that 10^x is representable. + static const int kMin10Exp; + // Largest integer x such that 10^x is representable. + static const int kMax10Exp; + // Smallest positive value such that Type(1) + kEpsilon != Type(1) + static const Type kEpsilon; + // Typical rounding error that is enough to cover + // a few simple floating-point operations. + // Slightly larger than kEpsilon to account for a few rounding errors. + // Is zero if kIsInteger. + static const Type kStdError; + // Number of decimal digits of mantissa precision. + // Present only if !kIsInteger. + static const int kPrecisionDigits; + // Not a number, i.e. result of 0/0. + // Present only if !kIsInteger. + static const Type kNaN; + // Positive infinity, i.e. result of 1/0. + // Present only if !kIsInteger. + static const Type kPosInf; + // Negative infinity, i.e. result of -1/0. + // Present only if !kIsInteger. + static const Type kNegInf; + + // NOTE: Special floating point values behave + // in a special (but mathematically-logical) way + // in terms of (in)equalty comparison and mathematical operations + // -- see out unittest for examples. + + // Special floating point value testers. + // Present in integer types for convenience. + static bool IsFinite(const Type x); + static bool IsNaN(const Type x); + static bool IsInf(const Type x); + static bool IsPosInf(const Type x); + static bool IsNegInf(const Type x); +}; + +// ========================================================================= // + +// All #define-s below are simply to refactor the declarations of +// MathLimits template specializations. +// They are all #undef-ined below. + +// The hoop-jumping in *_INT_(MAX|MIN) below is so that the compiler does not +// get an overflow while computing the constants. + +#define SIGNED_INT_MAX(Type) \ + (((Type(1) << (sizeof(Type) * 8 - 2)) - 1) + \ + (Type(1) << (sizeof(Type) * 8 - 2))) + +#define SIGNED_INT_MIN(Type) \ + (-(Type(1) << (sizeof(Type) * 8 - 2)) - (Type(1) << (sizeof(Type) * 8 - 2))) + +#define UNSIGNED_INT_MAX(Type) \ + (((Type(1) << (sizeof(Type) * 8 - 1)) - 1) + \ + (Type(1) << (sizeof(Type) * 8 - 1))) + +// Compile-time selected log10-related constants for integer types. +#define SIGNED_MAX_10_EXP(Type) \ + (sizeof(Type) == 1 \ + ? 2 \ + : (sizeof(Type) == 2 \ + ? 4 \ + : (sizeof(Type) == 4 ? 9 : (sizeof(Type) == 8 ? 18 : -1)))) + +#define UNSIGNED_MAX_10_EXP(Type) \ + (sizeof(Type) == 1 \ + ? 2 \ + : (sizeof(Type) == 2 \ + ? 4 \ + : (sizeof(Type) == 4 ? 9 : (sizeof(Type) == 8 ? 19 : -1)))) + +#define DECL_INT_LIMIT_FUNCS \ + static bool IsFinite(const Type /*x*/) { return true; } \ + static bool IsNaN(const Type /*x*/) { return false; } \ + static bool IsInf(const Type /*x*/) { return false; } \ + static bool IsPosInf(const Type /*x*/) { return false; } \ + static bool IsNegInf(const Type /*x*/) { return false; } + +#define DECL_SIGNED_INT_LIMITS(IntType, UnsignedIntType) \ + template <> \ + struct MathLimits { \ + typedef IntType Type; \ + typedef UnsignedIntType UnsignedType; \ + static const bool kIsSigned = true; \ + static const bool kIsInteger = true; \ + static const Type kPosMin = 1; \ + static const Type kPosMax = SIGNED_INT_MAX(Type); \ + static const Type kMin = SIGNED_INT_MIN(Type); \ + static const Type kMax = kPosMax; \ + static const Type kNegMin = -1; \ + static const Type kNegMax = kMin; \ + static const int kMin10Exp = 0; \ + static const int kMax10Exp = SIGNED_MAX_10_EXP(Type); \ + static const Type kEpsilon = 1; \ + static const Type kStdError = 0; \ + DECL_INT_LIMIT_FUNCS \ + }; + +#define DECL_UNSIGNED_INT_LIMITS(IntType) \ + template <> \ + struct MathLimits { \ + typedef IntType Type; \ + typedef IntType UnsignedType; \ + static const bool kIsSigned = false; \ + static const bool kIsInteger = true; \ + static const Type kPosMin = 1; \ + static const Type kPosMax = UNSIGNED_INT_MAX(Type); \ + static const Type kMin = 0; \ + static const Type kMax = kPosMax; \ + static const int kMin10Exp = 0; \ + static const int kMax10Exp = UNSIGNED_MAX_10_EXP(Type); \ + static const Type kEpsilon = 1; \ + static const Type kStdError = 0; \ + DECL_INT_LIMIT_FUNCS \ + }; + +// Notes on lint: When exhaustively specifying specializations for all +// integer types, we must use the built-in types rather than +// typedefs, because the typedefs can resolve to the same built-in +// causing a template specialization conflict. +// +// NOLINTNEXTLINE(runtime/int) +DECL_SIGNED_INT_LIMITS(signed char, unsigned char) +// NOLINTNEXTLINE(runtime/int) +DECL_SIGNED_INT_LIMITS(signed short int, unsigned short int) +// NOLINTNEXTLINE(runtime/int) +DECL_SIGNED_INT_LIMITS(signed int, unsigned int) +// NOLINTNEXTLINE(runtime/int) +DECL_SIGNED_INT_LIMITS(signed long int, unsigned long int) +// NOLINTNEXTLINE(runtime/int) +DECL_SIGNED_INT_LIMITS(signed long long int, unsigned long long int) +// NOLINTNEXTLINE(runtime/int) +DECL_UNSIGNED_INT_LIMITS(unsigned char) +// NOLINTNEXTLINE(runtime/int) +DECL_UNSIGNED_INT_LIMITS(unsigned short int) +// NOLINTNEXTLINE(runtime/int) +DECL_UNSIGNED_INT_LIMITS(unsigned int) +// NOLINTNEXTLINE(runtime/int) +DECL_UNSIGNED_INT_LIMITS(unsigned long int) +// NOLINTNEXTLINE(runtime/int) +DECL_UNSIGNED_INT_LIMITS(unsigned long long int) + +#undef DECL_SIGNED_INT_LIMITS +#undef DECL_UNSIGNED_INT_LIMITS +#undef SIGNED_INT_MAX +#undef SIGNED_INT_MIN +#undef UNSIGNED_INT_MAX +#undef SIGNED_MAX_10_EXP +#undef UNSIGNED_MAX_10_EXP +#undef DECL_INT_LIMIT_FUNCS + +// ========================================================================= // +#ifdef WIN32 // Lacks built-in isnan() and isinf() +#define DECL_FP_LIMIT_FUNCS \ + static bool IsFinite(Type x) { return _finite(x) != 0; } \ + static bool IsNaN(Type x) { return _isnan(x) != 0; } \ + static bool IsInf(Type x) { \ + return (_fpclass(x) & (_FPCLASS_NINF | _FPCLASS_PINF)) != 0; \ + } \ + static bool IsPosInf(Type x) { return _fpclass(x) == _FPCLASS_PINF; } \ + static bool IsNegInf(Type x) { return _fpclass(x) == _FPCLASS_NINF; } +#else +#define DECL_FP_LIMIT_FUNCS \ + static bool IsFinite(Type x) { return !std::isinf(x) && !std::isnan(x); } \ + static bool IsNaN(Type x) { return std::isnan(x); } \ + static bool IsInf(Type x) { return std::isinf(x); } \ + static bool IsPosInf(Type x) { return std::isinf(x) && x > 0; } \ + static bool IsNegInf(Type x) { return std::isinf(x) && x < 0; } +#endif + +// We can't put floating-point constant values in the header here because +// such constants are not considered to be primitive-type constants by gcc. +// CAVEAT: Hence, they are going to be initialized only during +// the global objects construction time. +#define DECL_FP_LIMITS(FP_Type, PREFIX) \ + template <> \ + struct MathLimits { \ + typedef FP_Type Type; \ + typedef FP_Type UnsignedType; \ + static const bool kIsSigned = true; \ + static const bool kIsInteger = false; \ + static const Type kPosMin; \ + static const Type kPosMax; \ + static const Type kMin; \ + static const Type kMax; \ + static const Type kNegMin; \ + static const Type kNegMax; \ + static const int kMin10Exp = PREFIX##_MIN_10_EXP; \ + static const int kMax10Exp = PREFIX##_MAX_10_EXP; \ + static const Type kEpsilon; \ + static const Type kStdError; \ + static const int kPrecisionDigits = PREFIX##_DIG; \ + static const Type kNaN; \ + static const Type kPosInf; \ + static const Type kNegInf; \ + DECL_FP_LIMIT_FUNCS \ + }; + +DECL_FP_LIMITS(float, FLT) +DECL_FP_LIMITS(double, DBL) +DECL_FP_LIMITS(long double, LDBL) + +#undef DECL_FP_LIMITS +#undef DECL_FP_LIMIT_FUNCS + +// ========================================================================= // + +} // namespace operations_research + +#endif // OR_TOOLS_BASE_MATHLIMITS_H_ diff --git a/ortools/base/mathutil.cc b/ortools/base/mathutil.cc new file mode 100644 index 0000000000..38698be0a0 --- /dev/null +++ b/ortools/base/mathutil.cc @@ -0,0 +1,51 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/base/mathutil.h" + +#include "ortools/base/logging.h" + +namespace operations_research { + +// The formula is extracted from the following page +// http://en.wikipedia.org/w/index.php?title=Stirling%27s_approximation +double MathUtil::Stirling(double n) { + static const double kLog2Pi = log(2 * M_PI); + const double logN = log(n); + return (n * logN - n + 0.5 * (kLog2Pi + logN) // 0.5 * log(2 * M_PI * n) + + 1 / (12 * n) - 1 / (360 * n * n * n)); +} + +double MathUtil::LogCombinations(int n, int k) { + CHECK_GE(n, k); + CHECK_GT(n, 0); + CHECK_GE(k, 0); + + // use symmetry to pick the shorter calculation + if (k > n / 2) { + k = n - k; + } + + // If we have more than 30 logarithms to calculate, we'll use + // Stirling's approximation for log(n!). + if (k > 15) { + return Stirling(n) - Stirling(k) - Stirling(n - k); + } else { + double result = 0; + for (int i = 1; i <= k; i++) { + result += log(n - k + i) - log(i); + } + return result; + } +} +} // namespace operations_research diff --git a/ortools/base/mathutil.h b/ortools/base/mathutil.h index fdb824d0ac..d7ffd9d0c2 100644 --- a/ortools/base/mathutil.h +++ b/ortools/base/mathutil.h @@ -23,6 +23,7 @@ #include "absl/base/casts.h" #include "ortools/base/logging.h" #include "ortools/base/macros.h" +#include "ortools/base/mathlimits.h" namespace operations_research { class MathUtil { @@ -134,7 +135,142 @@ class MathUtil { return static_cast(x < 0 ? (x - 0.5) : (x + 0.5)); } + // Returns the minimum integer value which is a multiple of rounding_value, + // and greater than or equal to input_value. + // The input_value must be greater than or equal to zero, and the + // rounding_value must be greater than zero. + template + static IntType RoundUpTo(IntType input_value, IntType rounding_value) { + static_assert(MathLimits::kIsInteger, + "RoundUpTo() operation type is not integer"); + DCHECK_GE(input_value, 0); + DCHECK_GT(rounding_value, 0); + const IntType remainder = input_value % rounding_value; + return (remainder == 0) ? input_value + : (input_value - remainder + rounding_value); + } + + // Convert a floating-point number to an integer. For all inputs x where + // static_cast(x) is legal according to the C++ standard, the result + // is identical to that cast (i.e. the result is x with its fractional part + // truncated whenever that is representable as IntOut). + // + // static_cast would cause undefined behavior for the following cases, which + // have well-defined behavior for this function: + // + // 1. If x is NaN, the result is zero. + // + // 2. If the truncated form of x is above the representable range of IntOut, + // the result is MathLimits::kMax. + // + // 3. If the truncated form of x is below the representable range of IntOut, + // the result is MathLimits::kMin. + // + // Note that cases #2 and #3 cover infinities as well as finite numbers. + // + // The range of FloatIn must include the range of IntOut, otherwise + // the results are undefined. + template + static IntOut SafeCast(FloatIn x) { + COMPILE_ASSERT(!MathLimits::kIsInteger, FloatIn_is_integer); + COMPILE_ASSERT(MathLimits::kIsInteger, IntOut_is_not_integer); + COMPILE_ASSERT(std::numeric_limits::radix == 2, IntOut_is_base_2); + + // Special case NaN, for which the logic below doesn't work. + if (MathLimits::IsNaN(x)) { + return 0; + } + + // Negative values all clip to zero for unsigned results. + if (!MathLimits::kIsSigned && x < 0) { + return 0; + } + + // Handle infinities. + if (MathLimits::IsInf(x)) { + return x < 0 ? MathLimits::kMin : MathLimits::kMax; + } + + // Set exp such that x == f * 2^exp for some f with |f| in [0.5, 1.0), + // unless x is zero in which case exp == 0. Note that this implies that the + // magnitude of x is strictly less than 2^exp. + int exp = 0; + std::frexp(x, &exp); + + // Let N be the number of non-sign bits in the representation of IntOut. If + // the magnitude of x is strictly less than 2^N, the truncated version of x + // is representable as IntOut. The only representable integer for which this + // is not the case is kMin for signed types (i.e. -2^N), but that is covered + // by the fall-through below. + if (exp <= std::numeric_limits::digits) { + return x; + } + + // Handle numbers with magnitude >= 2^N. + return x < 0 ? MathLimits::kMin : MathLimits::kMax; + } + + // -------------------------------------------------------------------- + // SafeRound + // These functions round a floating-point number to an integer. + // Results are identical to Round, except in cases where + // the argument is NaN, or when the rounded value would overflow the + // return type. In those cases, Round has undefined + // behavior. SafeRound returns 0 when the argument is + // NaN, and returns the closest possible integer value otherwise (i.e. + // MathLimits::kMax for large positive values, and + // MathLimits::kMin for large negative values). + // The range of FloatIn must include the range of IntOut, otherwise + // the results are undefined. + // -------------------------------------------------------------------- + template + static IntOut SafeRound(FloatIn x) { + COMPILE_ASSERT(!MathLimits::kIsInteger, FloatIn_is_integer); + COMPILE_ASSERT(MathLimits::kIsInteger, IntOut_is_not_integer); + + if (MathLimits::IsNaN(x)) { + return 0; + } else { + return SafeCast((x < 0.) ? (x - 0.5) : (x + 0.5)); + } + } + + // -------------------------------------------------------------------- + // FastInt64Round + // Fast routines for converting floating-point numbers to integers. + // + // These routines are approximately 6 times faster than the default + // implementation of Round on Intel processors (12 times faster on + // the Pentium 3). They are also more than 5 times faster than simply + // casting a "double" to an "int" using static_cast. This is + // because casts are defined to truncate towards zero, which on Intel + // processors requires changing the rounding mode and flushing the + // floating-point pipeline (unless programs are compiled specifically + // for the Pentium 4, which has a new instruction to avoid this). + // + // Numbers that are halfway between two integers may be rounded up or + // down. This is because the conversion is done using the default + // rounding mode, which rounds towards the closest even number in case + // of ties. So for example, FastIntRound(0.5) == 0, but + // FastIntRound(1.5) == 2. These functions should only be used with + // applications that don't care about which way such half-integers are + // rounded. + // + // There are template specializations of Round() which call these + // functions (for "int" and "int64" only), but it's safer to call them + // directly. static int64_t FastInt64Round(double x) { return Round(x); } + + // Returns Stirling's Approximation for log(n!) which has an error + // of at worst 1/(1260*n^5). + static double Stirling(double n); + + // Returns the log of the binomial coefficient C(n, k), known in the + // vernacular as "N choose K". Why log? Because the integer number + // for non-trivial N and K would overflow. + // Note that if k > 15, this uses Stirling's approximation of log(n!). + // The relative error is about 1/(1260*k^5) (which is 7.6e-10 when k=16). + static double LogCombinations(int n, int k); }; } // namespace operations_research diff --git a/ortools/base/strong_vector.h b/ortools/base/strong_vector.h index 38a7ba4f27..3441492ab7 100644 --- a/ortools/base/strong_vector.h +++ b/ortools/base/strong_vector.h @@ -12,31 +12,30 @@ // limitations under the License. // This file provides the StrongVector container that wraps around the STL -// std::vector. -// The wrapper restricts indexing to a pre-specified type-safe integer type or -// IntType (see int_type.h). It prevents accidental indexing -// by different "logical" integer-like types (e.g. another IntType) or native -// integer types. The wrapper is useful as C++ and the standard template -// library allows the user to mix "logical" integral indices that might have a -// different role. +// vector. The wrapper restrict indexing to a pre-specified type-safe integer +// type or StrongInt (see util/intops/strong_int.h). It prevents accidental +// indexing by different "logical" integer-like types (e.g. another StrongInt) +// or native integer types. The wrapper is useful as C++ and the standard +// template library allows the user to mix "logical" integral indices that might +// have a different role. // -// The container can only be indexed by an instance of an IntType class, which +// The container can only be indexed by an instance of an StrongInt class, which // can be declared as: // -// DEFINE_INT_TYPE(IntTypeName, IntTypeValueType); +// DEFINE_STRONG_INT_TYPE(type_name, value_type); // -// where IntTypeName is the desired name for the "logical" integer-like type -// and the ValueType is a supported native integer type such as int or -// uint64_t (see int_type.h for details). +// where type_name is the desired name for the "logical" integer-like type +// and the value_type is a supported native integer type such as int or +// uint64_t (see util/intops/strong_int.h for details). // // The wrapper exposes all public methods of STL vector and behaves mostly as -// pass-through. The only method modified to ensure type-safety is the operator -// [] and the at() method. +// pass-through. The only methods modified to ensure type-safety are the +// operator [] and the at() methods. // // EXAMPLES -------------------------------------------------------------------- // -// DEFINE_INT_TYPE(PhysicalChildIndex, int32_t); -// absl::StrongVector vec; +// DEFINE_STRONG_INT_TYPE(PhysicalChildIndex, int32_t); +// StrongVector vec; // // PhysicalChildIndex physical_index; // vec[physical_index] = ...; <-- index type match: compiles properly. @@ -46,42 +45,39 @@ // vec[physical_index] = ...; <-- fails to compile. // vec.at(physical_index) = ...; <-- fails to compile. // -// DEFINE_INT_TYPE(LogicalChildIndex, int32_t); -// int32_t logical_index; +// DEFINE_STRONG_INT_TYPE(LogicalChildIndex, int32_t); +// LogicalChildIndex logical_index; // vec[logical_index] = ...; <-- fails to compile. // vec.at(logical_index) = ...; <-- fails to compile. // -// NB: Iterator arithmetic is not allowed as the iterators are not wrapped -// themselves. Therefore, the following caveat is possible: -// *(vec.begin() + 0) = ...; +// NB: Iterator arithmetic bypasses strong typing for the index. +// +// OVERFLOW BEHAVIOR +// +// This class ONLY guards against growing the size beyond the range +// indexable by the index type in debug mode. In optimized mode the +// user can CHECK IsValidSize() when deemed important. #ifndef OR_TOOLS_BASE_STRONG_VECTOR_H_ #define OR_TOOLS_BASE_STRONG_VECTOR_H_ -#include - -#include -#include +#include #include -#include #include -#include "ortools/base/int_type.h" -#include "ortools/base/macros.h" +#include "ortools/base/logging.h" +#include "ortools/base/strong_int.h" -namespace absl { +namespace util_intops { -// STL vector ------------------------------------------------------------------ -template > -class StrongVector { +template > +class StrongVector : protected std::vector { public: - typedef IntType IndexType; - typedef std::vector ParentType; - + typedef std::vector ParentType; typedef typename ParentType::size_type size_type; typedef typename ParentType::allocator_type allocator_type; typedef typename ParentType::value_type value_type; - typedef typename ParentType::difference_type difference_type; typedef typename ParentType::reference reference; typedef typename ParentType::const_reference const_reference; typedef typename ParentType::pointer pointer; @@ -93,115 +89,225 @@ class StrongVector { public: StrongVector() {} - - explicit StrongVector(const allocator_type& a) : v_(a) {} - explicit StrongVector(size_type n) : v_(n) {} + explicit StrongVector(const allocator_type& a) : ParentType(a) {} + explicit StrongVector(size_type n) : ParentType(n) { DCHECK(IsValidSize()); } explicit StrongVector(IntType n) : StrongVector(static_cast(n.value())) {} - - StrongVector(size_type n, const value_type& v, + explicit StrongVector(size_type n, const value_type& v, + const allocator_type& a = allocator_type()) + : ParentType(n, v, a) { + DCHECK(IsValidSize()); + } + explicit StrongVector(IntType n, const value_type& v, + const allocator_type& a = allocator_type()) + : StrongVector(static_cast(n.value()), v, a) {} + StrongVector(const StrongVector& x) : ParentType(x.get()) { + DCHECK(IsValidSize()); + } + StrongVector(StrongVector&& x) = default; + StrongVector(std::initializer_list l, const allocator_type& a = allocator_type()) - : v_(n, v, a) {} - - StrongVector( - std::initializer_list il) // NOLINT(runtime/explicit) - : v_(il) {} - + : ParentType(l, a) { + DCHECK(IsValidSize()); + } template StrongVector(InputIteratorType first, InputIteratorType last, const allocator_type& a = allocator_type()) - : v_(first, last, a) {} + : ParentType(first, last, a) { + DCHECK(IsValidSize()); + } + ~StrongVector() {} // -- Accessors -------------------------------------------------------------- // This const accessor is useful in defining the comparison operators below. - const ParentType& get() const { return v_; } - // The mutable accessor is useful when using auxiliar methods relying on - // vector parameters such as JoinUsing(), SplitStringUsing(), etc. Methods + const ParentType& get() const { return *this; } + // The mutable accessor is useful when using auxiliary methods relying on + // vector parameters such as JoinUsing(), SplitStringUsing(), etc. Methods // relying solely on iterators (e.g. STLDeleteElements) should work just fine - // without the need for mutable_get(). NB: It should be used only in this + // without the need for mutable_get(). NB: It should be used only in this // case and thus should not be abused to index the underlying vector without // the appropriate IntType. - ParentType* mutable_get() { return &v_; } + ParentType* mutable_get() { return this; } // -- Modified methods ------------------------------------------------------- - reference operator[](IndexType i) { return v_[Value(i)]; } - const_reference operator[](IndexType i) const { return v_[Value(i)]; } - reference at(IndexType i) { return v_.at(Value(i)); } - const_reference at(IndexType i) const { return v_.at(Value(i)); } + reference operator[](IntType i) { + return ParentType::operator[](static_cast(i.value())); + } + const_reference operator[](IntType i) const { + return ParentType::operator[](static_cast(i.value())); + } + reference at(IntType i) { + return ParentType::at(static_cast(i.value())); + } + const_reference at(IntType i) const { + return ParentType::at(static_cast(i.value())); + } + + // -- Extension methods ------------------------------------------------------ + + // Iteration related methods. Useful for parallel iteration and + // non-trivial access patterns. Typical loop will be: + // for (auto i = v.start_index(); i < v.end_index(); ++i) ... + IntType start_index() const { return IntType(0); } + // Index following the last valid index into the vector. In case + // size() has grown beyond values representable by IntType, this + // function will truncate the result. There is a debugging check for + // such behavior, but it is unlikely to be triggered in testing. + IntType end_index() const { + DCHECK(IsValidSize()); + return IntType(size()); + } + + // Returns true if the vector is fully addressable by the index type. + bool IsValidSize() const { return ValidSize(size()); } + + // Most methods from vector can be reused without any changes. + using ParentType::back; + using ParentType::begin; + using ParentType::capacity; + using ParentType::cbegin; + using ParentType::cend; + using ParentType::clear; + using ParentType::empty; + using ParentType::end; + using ParentType::erase; + using ParentType::front; + using ParentType::max_size; + using ParentType::pop_back; + using ParentType::rbegin; + using ParentType::rend; + using ParentType::shrink_to_fit; + + // Returns an iterator of valid indices into this vector. Goes from + // start_index() to end_index(). This is useful for cases of + // parallel iteration over several vectors indexed by the same type, e.g. + // StrongVector v1; + // StrongVector v2; + // CHECK_EQ(v1.size(), v2.size()); + // for (const auto i : v1.index_range()) { + // do_stuff(v1[i], v2[i]); + // } + StrongIntRange index_range() const { + return StrongIntRange(start_index(), end_index()); + } // -- Pass-through methods to STL vector ------------------------------------- - void assign(size_type n, const value_type& val) { v_.assign(n, val); } + + // Note that vector::data() does not exist. By wrapping data() + // below, this allows StrongVector to still compile, as long as + // StrongVector::data() is never called. + value_type* data() { return ParentType::data(); } + const value_type* data() const { return ParentType::data(); } + + StrongVector& operator=(const StrongVector& x) { + ParentType::operator=(x.get()); + return *this; + } + StrongVector& operator=(StrongVector&& x) = default; + StrongVector& operator=(std::initializer_list l) { + ParentType::operator=(l); + DCHECK(IsValidSize()); + return *this; + } + + void swap(StrongVector& x) noexcept { ParentType::swap(*x.mutable_get()); } + + void assign(size_type n, const value_type& val) { + DCHECK(ValidSize(n)); + ParentType::assign(n, val); + } template void assign(InputIt f, InputIt l) { - v_.assign(f, l); + ParentType::assign(f, l); + DCHECK(IsValidSize()); } - void assign(std::initializer_list ilist) { v_.assign(ilist); } - - iterator begin() { return v_.begin(); } - const_iterator begin() const { return v_.begin(); } - iterator end() { return v_.end(); } - const_iterator end() const { return v_.end(); } - reverse_iterator rbegin() { return v_.rbegin(); } - const_reverse_iterator rbegin() const { return v_.rbegin(); } - reverse_iterator rend() { return v_.rend(); } - const_reverse_iterator rend() const { return v_.rend(); } - - size_type size() const { return v_.size(); } - size_type max_size() const { return v_.max_size(); } - - void resize(size_type new_size) { v_.resize(new_size); } - void resize(size_type new_size, const value_type& x) { - v_.resize(new_size, x); - } - void resize(IntType new_size) { v_.resize(new_size.value()); } - void resize(IntType new_size, const value_type& x) { - v_.resize(new_size.value(), x); + void assign(std::initializer_list l) { + ParentType::assign(l); + DCHECK(IsValidSize()); } - size_type capacity() const { return v_.capacity(); } - bool empty() const { return v_.empty(); } - void reserve(size_type n) { v_.reserve(n); } - void reserve(IntType n) { reserve(static_cast(n.value())); } - void push_back(const value_type& x) { v_.push_back(x); } - void push_back(value_type&& x) { v_.push_back(std::move(x)); } // NOLINT - template - void emplace_back(Args&&... args) { - v_.emplace_back(std::forward(args)...); - } template iterator emplace(const_iterator pos, Args&&... args) { - return v_.emplace(pos, std::forward(args)...); + iterator result = ParentType::emplace(pos, std::forward(args)...); + DCHECK(IsValidSize()); + return result; } - void pop_back() { v_.pop_back(); } - void swap(StrongVector& x) { v_.swap(x.v_); } - void clear() { v_.clear(); } - reference front() { return v_.front(); } - const_reference front() const { return v_.front(); } - reference back() { return v_.back(); } - const_reference back() const { return v_.back(); } - pointer data() { return v_.data(); } - const_pointer data() const { return v_.data(); } - - iterator erase(const_iterator pos) { return v_.erase(pos); } - iterator erase(const_iterator first, const_iterator last) { - return v_.erase(first, last); + template + reference emplace_back(Args&&... args) { + reference value = ParentType::emplace_back(std::forward(args)...); + DCHECK(IsValidSize()); + return value; } + iterator insert(const_iterator pos, const value_type& x) { - return v_.insert(pos, x); + iterator result = ParentType::insert(pos, x); + DCHECK(IsValidSize()); + return result; } - iterator insert(const_iterator pos, value_type&& x) { // NOLINT - return v_.insert(pos, std::move(x)); + iterator insert(const_iterator pos, value_type&& x) { + iterator result = ParentType::insert(pos, std::move(x)); + DCHECK(IsValidSize()); + return result; } - iterator insert(const_iterator pos, size_type n, const value_type& x) { - return v_.insert(pos, n, x); + void insert(const_iterator pos, size_type n, const value_type& x) { + ParentType::insert(pos, n, x); + DCHECK(IsValidSize()); } - template - iterator insert(const_iterator pos, IIt first, IIt last) { - return v_.insert(pos, first, last); + template + void insert(const_iterator pos, SIT first, SIT last) { + ParentType::insert(pos, first, last); + DCHECK(IsValidSize()); } - iterator insert(const_iterator pos, std::initializer_list ilist) { - return v_.insert(pos, ilist); + + void push_back(const value_type& val) { + ParentType::push_back(val); + DCHECK(IsValidSize()); + } + void push_back(value_type&& val) { + ParentType::push_back(std::move(val)); + DCHECK(IsValidSize()); + } + + void reserve(size_type n) { + DCHECK(ValidSize(n)); + ParentType::reserve(n); + } + + void reserve(IntType n) { reserve(static_cast(n.value())); } + + void resize(size_type new_size) { + DCHECK(ValidSize(new_size)); + ParentType::resize(new_size); + } + + void resize(IntType new_size) { + resize(static_cast(new_size.value())); + } + + void resize(size_type new_size, const value_type& x) { + DCHECK(ValidSize(new_size)); + ParentType::resize(new_size, x); + } + + void resize(IntType new_size, const value_type& x) { + resize(static_cast(new_size.value()), x); + } + + using ParentType::size; + + static_assert(std::is_integral::value, + "int type indexed vector must have integral index"); + + template + friend H AbslHashValue(H h, const StrongVector& v) { + return H::combine(std::move(h), v.get()); + } + + private: + // Checks that the given value n is in range of the index type. + static bool ValidSize(size_type n) { + return n <= std::numeric_limits::max(); } friend bool operator==(const StrongVector& x, const StrongVector& y) { @@ -222,22 +328,9 @@ class StrongVector { friend bool operator>=(const StrongVector& x, const StrongVector& y) { return x.get() >= y.get(); } - friend void swap(StrongVector& x, StrongVector& y) { x.swap(y); } - - template - friend H AbslHashValue(H h, const StrongVector& v) { - return H::combine(std::move(h), v.v_); - } - - private: - static size_type Value(IndexType i) { return i.template value(); } - - ParentType v_; - - COMPILE_ASSERT(std::is_integral::value, - int_type_indexed_vector_must_have_integral_index); + friend void swap(StrongVector& x, StrongVector& y) noexcept { x.swap(y); } }; -} // namespace absl +} // namespace util_intops #endif // OR_TOOLS_BASE_STRONG_VECTOR_H_ diff --git a/ortools/bop/CMakeLists.txt b/ortools/bop/CMakeLists.txt index 31441a1314..d608d5b24d 100644 --- a/ortools/bop/CMakeLists.txt +++ b/ortools/bop/CMakeLists.txt @@ -28,5 +28,5 @@ target_link_libraries(${NAME} PRIVATE absl::synchronization absl::str_format protobuf::libprotobuf - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::bop ALIAS ${NAME}) diff --git a/ortools/bop/bop_base.h b/ortools/bop/bop_base.h index 43e6bab0da..2ef5eb8936 100644 --- a/ortools/bop/bop_base.h +++ b/ortools/bop/bop_base.h @@ -182,7 +182,7 @@ class ProblemState { // Returns true when the variable var is fixed in the current problem state. // The value of the fixed variable is returned by GetVariableFixedValue(var). bool IsVariableFixed(VariableIndex var) const { return is_fixed_[var]; } - const absl::StrongVector& is_fixed() const { + const util_intops::StrongVector& is_fixed() const { return is_fixed_; } @@ -191,7 +191,7 @@ class ProblemState { bool GetVariableFixedValue(VariableIndex var) const { return fixed_values_[var]; } - const absl::StrongVector& fixed_values() const { + const util_intops::StrongVector& fixed_values() const { return fixed_values_; } @@ -235,8 +235,8 @@ class ProblemState { const sat::LinearBooleanProblem& original_problem_; BopParameters parameters_; int64_t update_stamp_; - absl::StrongVector is_fixed_; - absl::StrongVector fixed_values_; + util_intops::StrongVector is_fixed_; + util_intops::StrongVector fixed_values_; glop::DenseRow lp_values_; BopSolution solution_; std::vector assignment_preference_; diff --git a/ortools/bop/bop_lns.h b/ortools/bop/bop_lns.h index f825dc44ab..73e49602a9 100644 --- a/ortools/bop/bop_lns.h +++ b/ortools/bop/bop_lns.h @@ -34,7 +34,7 @@ namespace bop { // Uses SAT to solve the full problem under the constraint that the new solution // should be under a given Hamming distance of the current solution. -class BopCompleteLNSOptimizer : public BopOptimizerBase { +class BopCompleteLNSOptimizer final : public BopOptimizerBase { public: BopCompleteLNSOptimizer(absl::string_view name, const BopConstraintTerms& objective_terms); @@ -93,7 +93,7 @@ class NeighborhoodGenerator { // A generic LNS optimizer which generates neighborhoods according to the given // NeighborhoodGenerator and automatically adapt the neighborhood size depending // on how easy it is to solve the associated problem. -class BopAdaptiveLNSOptimizer : public BopOptimizerBase { +class BopAdaptiveLNSOptimizer final : public BopOptimizerBase { public: // Takes ownership of the given neighborhood_generator. // The sat_propagator is assumed to contains the current problem. @@ -119,7 +119,7 @@ class BopAdaptiveLNSOptimizer : public BopOptimizerBase { // Generates a neighborhood by randomly fixing a subset of the objective // variables that are currently at their lower cost. -class ObjectiveBasedNeighborhood : public NeighborhoodGenerator { +class ObjectiveBasedNeighborhood final : public NeighborhoodGenerator { public: ObjectiveBasedNeighborhood(const BopConstraintTerms* objective_terms, absl::BitGenRef random) @@ -137,7 +137,7 @@ class ObjectiveBasedNeighborhood : public NeighborhoodGenerator { // Generates a neighborhood by randomly selecting a subset of constraints and // fixing the objective variables that are currently at their lower cost and // not in the given subset of constraints. -class ConstraintBasedNeighborhood : public NeighborhoodGenerator { +class ConstraintBasedNeighborhood final : public NeighborhoodGenerator { public: ConstraintBasedNeighborhood(const BopConstraintTerms* objective_terms, absl::BitGenRef random) @@ -155,7 +155,7 @@ class ConstraintBasedNeighborhood : public NeighborhoodGenerator { // Generates a neighborhood by taking a random local neighborhood in an // undirected graph where the nodes are the variables and two nodes are linked // if they appear in the same constraint. -class RelationGraphBasedNeighborhood : public NeighborhoodGenerator { +class RelationGraphBasedNeighborhood final : public NeighborhoodGenerator { public: RelationGraphBasedNeighborhood(const sat::LinearBooleanProblem& problem, absl::BitGenRef random); @@ -168,7 +168,8 @@ class RelationGraphBasedNeighborhood : public NeighborhoodGenerator { // TODO(user): reuse by_variable_matrix_ from the LS? Note however than we // don't need the coefficients here. - absl::StrongVector> columns_; + util_intops::StrongVector> + columns_; absl::BitGenRef random_; }; diff --git a/ortools/bop/bop_ls.cc b/ortools/bop/bop_ls.cc index fc85ed9b91..af4cd53e1d 100644 --- a/ortools/bop/bop_ls.cc +++ b/ortools/bop/bop_ls.cc @@ -571,7 +571,7 @@ ConstraintIndex OneFlipConstraintRepairer::ConstraintToRepair() const { TermIndex OneFlipConstraintRepairer::NextRepairingTerm( ConstraintIndex ct_index, TermIndex init_term_index, TermIndex start_term_index) const { - const absl::StrongVector& terms = + const util_intops::StrongVector& terms = by_constraint_matrix_[ct_index]; const int64_t constraint_value = maintainer_.ConstraintValue(ct_index); const int64_t lb = maintainer_.ConstraintLowerBound(ct_index); @@ -623,13 +623,13 @@ sat::Literal OneFlipConstraintRepairer::GetFlip(ConstraintIndex ct_index, } void OneFlipConstraintRepairer::SortTermsOfEachConstraints(int num_variables) { - absl::StrongVector objective(num_variables, 0); + util_intops::StrongVector objective(num_variables, 0); for (const ConstraintTerm& term : by_constraint_matrix_[AssignmentAndConstraintFeasibilityMaintainer:: kObjectiveConstraint]) { objective[term.var] = std::abs(term.weight); } - for (absl::StrongVector& terms : + for (util_intops::StrongVector& terms : by_constraint_matrix_) { std::sort(terms.begin(), terms.end(), [&objective](const ConstraintTerm& a, const ConstraintTerm& b) { diff --git a/ortools/bop/bop_ls.h b/ortools/bop/bop_ls.h index 859ba325f2..2a5ea07842 100644 --- a/ortools/bop/bop_ls.h +++ b/ortools/bop/bop_ls.h @@ -250,7 +250,7 @@ class NonOrderedSetHasher { private: absl::BitGenRef random_; - absl::StrongVector hashes_; + util_intops::StrongVector hashes_; }; // This class is used to incrementally maintain an assignment and the @@ -413,16 +413,16 @@ class AssignmentAndConstraintFeasibilityMaintainer { int64_t weight; }; - absl::StrongVector> + util_intops::StrongVector< + VariableIndex, util_intops::StrongVector> by_variable_matrix_; - absl::StrongVector constraint_lower_bounds_; - absl::StrongVector constraint_upper_bounds_; + util_intops::StrongVector constraint_lower_bounds_; + util_intops::StrongVector constraint_upper_bounds_; BopSolution assignment_; BopSolution reference_; - absl::StrongVector constraint_values_; + util_intops::StrongVector constraint_values_; BacktrackableIntegerSet infeasible_constraint_set_; // This contains the list of variable flipped in assignment_. @@ -514,8 +514,8 @@ class OneFlipConstraintRepairer { // on most promising variables first. void SortTermsOfEachConstraints(int num_variables); - absl::StrongVector> + util_intops::StrongVector< + ConstraintIndex, util_intops::StrongVector> by_constraint_matrix_; const AssignmentAndConstraintFeasibilityMaintainer& maintainer_; const sat::VariablesAssignment& sat_assignment_; @@ -632,7 +632,7 @@ class LocalSearchAssignmentIterator { SatWrapper* const sat_wrapper_; OneFlipConstraintRepairer repairer_; std::vector search_nodes_; - absl::StrongVector initial_term_index_; + util_intops::StrongVector initial_term_index_; // Temporary vector used by ApplyDecision(). std::vector tmp_propagated_literals_; diff --git a/ortools/bop/bop_portfolio.cc b/ortools/bop/bop_portfolio.cc index 5fce8591ea..b3cbbb11f4 100644 --- a/ortools/bop/bop_portfolio.cc +++ b/ortools/bop/bop_portfolio.cc @@ -353,7 +353,8 @@ void PortfolioOptimizer::CreateOptimizers( // OptimizerSelector //------------------------------------------------------------------------------ OptimizerSelector::OptimizerSelector( - const absl::StrongVector& optimizers) + const util_intops::StrongVector& + optimizers) : run_infos_(), selected_index_(optimizers.size()) { for (OptimizerIndex i(0); i < optimizers.size(); ++i) { info_positions_.push_back(run_infos_.size()); diff --git a/ortools/bop/bop_portfolio.h b/ortools/bop/bop_portfolio.h index d40c259a34..aefc8d803e 100644 --- a/ortools/bop/bop_portfolio.h +++ b/ortools/bop/bop_portfolio.h @@ -95,7 +95,7 @@ class PortfolioOptimizer : public BopOptimizerBase { int64_t state_update_stamp_; BopConstraintTerms objective_terms_; std::unique_ptr selector_; - absl::StrongVector optimizers_; + util_intops::StrongVector optimizers_; sat::SatSolver sat_propagator_; BopParameters parameters_; double lower_bound_; @@ -110,7 +110,8 @@ class OptimizerSelector { // Note that the list of optimizers is only used to get the names for // debug purposes, the ownership of the optimizers is not transferred. explicit OptimizerSelector( - const absl::StrongVector& optimizers); + const util_intops::StrongVector& + optimizers); // Selects the next optimizer to run based on the user defined order and // history of success. Returns kInvalidOptimizerIndex if no optimizer is @@ -202,7 +203,7 @@ class OptimizerSelector { }; std::vector run_infos_; - absl::StrongVector info_positions_; + util_intops::StrongVector info_positions_; int selected_index_; }; diff --git a/ortools/bop/bop_solution.h b/ortools/bop/bop_solution.h index 0d2d4dad91..a940ee04ca 100644 --- a/ortools/bop/bop_solution.h +++ b/ortools/bop/bop_solution.h @@ -81,10 +81,10 @@ class BopSolution { } // For range based iteration, i.e. for (const bool value : solution) {...}. - absl::StrongVector::const_iterator begin() const { + util_intops::StrongVector::const_iterator begin() const { return values_.begin(); } - absl::StrongVector::const_iterator end() const { + util_intops::StrongVector::const_iterator end() const { return values_.end(); } @@ -103,7 +103,7 @@ class BopSolution { const sat::LinearBooleanProblem* problem_; std::string name_; - absl::StrongVector values_; + util_intops::StrongVector values_; // Those are mutable because they behave as const values for a given solution // but for performance reasons we want to be lazy on their computation, diff --git a/ortools/bop/bop_types.h b/ortools/bop/bop_types.h index f08a82d3c1..f9843afb8c 100644 --- a/ortools/bop/bop_types.h +++ b/ortools/bop/bop_types.h @@ -85,7 +85,8 @@ struct BopConstraintTerm { return search_id < other.search_id; } }; -typedef absl::StrongVector BopConstraintTerms; +typedef util_intops::StrongVector + BopConstraintTerms; } // namespace bop } // namespace operations_research diff --git a/ortools/bop/integral_solver.cc b/ortools/bop/integral_solver.cc index bb0a6669b3..00d6c25d5e 100644 --- a/ortools/bop/integral_solver.cc +++ b/ortools/bop/integral_solver.cc @@ -408,7 +408,7 @@ class IntegralProblemConverter { // constraint. Fractional AddWeightedIntegralVariable( ColIndex col, Fractional weight, - absl::StrongVector* dense_weights); + util_intops::StrongVector* dense_weights); // Scales weights and adds all non-zero scaled weights and literals to t. // t is a constraint or the objective. @@ -418,25 +418,27 @@ class IntegralProblemConverter { template double ScaleAndSparsifyWeights( double scaling_factor, int64_t gcd, - const absl::StrongVector& dense_weights, T* t); + const util_intops::StrongVector& dense_weights, + T* t); // Returns true when at least one element is non-zero. bool HasNonZeroWeights( - const absl::StrongVector& dense_weights) const; + const util_intops::StrongVector& dense_weights) + const; bool problem_is_boolean_and_has_only_integral_constraints_; // global_to_boolean_[i] represents the Boolean variable index in Bop; when // negative -global_to_boolean_[i] - 1 represents the index of the // integral variable in integral_variables_. - absl::StrongVector + util_intops::StrongVector global_to_boolean_; std::vector integral_variables_; std::vector integral_indices_; int num_boolean_variables_; enum VariableType { BOOLEAN, INTEGRAL, INTEGRAL_EXPRESSED_AS_BOOLEAN }; - absl::StrongVector variable_types_; + util_intops::StrongVector variable_types_; }; IntegralProblemConverter::IntegralProblemConverter() @@ -610,7 +612,7 @@ void IntegralProblemConverter::ConvertAllConstraints( std::vector coefficients; for (RowIndex row(0); row < linear_problem.num_constraints(); ++row) { Fractional offset = 0.0; - absl::StrongVector dense_weights( + util_intops::StrongVector dense_weights( num_boolean_variables_, 0.0); for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) { // Cast in ColIndex due to the transpose. @@ -686,7 +688,7 @@ void IntegralProblemConverter::ConvertObjective( LinearBooleanProblem* boolean_problem) { LinearObjective* objective = boolean_problem->mutable_objective(); Fractional offset = 0.0; - absl::StrongVector dense_weights( + util_intops::StrongVector dense_weights( num_boolean_variables_, 0.0); // Compute the objective weights for the binary variable model. for (ColIndex col(0); col < linear_problem.num_variables(); ++col) { @@ -821,7 +823,7 @@ bool IntegralProblemConverter::CreateVariableUsingConstraint( integral_var->Clear(); const SparseMatrix& transpose = linear_problem.GetTransposeSparseMatrix(); - absl::StrongVector dense_weights( + util_intops::StrongVector dense_weights( num_boolean_variables_, 0.0); Fractional scale = 1.0; int64_t variable_offset = 0; @@ -872,7 +874,7 @@ bool IntegralProblemConverter::CreateVariableUsingConstraint( Fractional IntegralProblemConverter::AddWeightedIntegralVariable( ColIndex col, Fractional weight, - absl::StrongVector* dense_weights) { + util_intops::StrongVector* dense_weights) { CHECK(nullptr != dense_weights); if (weight == 0.0) { @@ -899,7 +901,8 @@ Fractional IntegralProblemConverter::AddWeightedIntegralVariable( template double IntegralProblemConverter::ScaleAndSparsifyWeights( double scaling_factor, int64_t gcd, - const absl::StrongVector& dense_weights, T* t) { + const util_intops::StrongVector& dense_weights, + T* t) { CHECK(nullptr != t); double bound_error = 0.0; @@ -915,7 +918,8 @@ double IntegralProblemConverter::ScaleAndSparsifyWeights( return bound_error; } bool IntegralProblemConverter::HasNonZeroWeights( - const absl::StrongVector& dense_weights) const { + const util_intops::StrongVector& dense_weights) + const { for (const Fractional weight : dense_weights) { if (weight != 0.0) { return true; diff --git a/ortools/constraint_solver/constraint_solveri.h b/ortools/constraint_solver/constraint_solveri.h index 95459cfd8f..323595486f 100644 --- a/ortools/constraint_solver/constraint_solveri.h +++ b/ortools/constraint_solver/constraint_solveri.h @@ -1769,11 +1769,11 @@ class SubDagComputer { // Initialized by BuildGraph(), after which the outgoing arcs of node n are // the range from arcs_[arcs_of_node_[n]] included to // arcs_[arcs_of_node_[n+1]] excluded. - absl::StrongVector arcs_of_node_; + util_intops::StrongVector arcs_of_node_; // Must be false before BuildGraph() is called, true afterwards. bool graph_was_built_ = false; // Used by ComputeSortedSubDagArcs. - absl::StrongVector indegree_of_node_; + util_intops::StrongVector indegree_of_node_; // Used by ComputeSortedSubDagArcs. std::vector nodes_to_visit_; // Used as output, set up as a member to allow reuse. @@ -1839,14 +1839,14 @@ class LocalSearchState { const VariableDomain& d2) const { return d1.max < d2.min || d2.max < d1.min; } - absl::StrongVector relaxed_domains_; - absl::StrongVector current_domains_; + util_intops::StrongVector relaxed_domains_; + util_intops::StrongVector current_domains_; struct TrailedVariableDomain { VariableDomain committed_domain; VariableDomainId domain_id; }; std::vector trailed_domains_; - absl::StrongVector domain_is_trailed_; + util_intops::StrongVector domain_is_trailed_; // True iff all domains have their min <= max. bool state_domains_are_all_nonempty_ = true; bool state_has_relaxed_domains_ = false; @@ -1907,11 +1907,11 @@ class LocalSearchState { // Structure of the expression DAG, used to buffer propagation storage. SubDagComputer dag_; // Maps arcs of dag_ to domain/constraint dependencies. - absl::StrongVector dependency_of_dag_arc_; + util_intops::StrongVector dependency_of_dag_arc_; // Maps domain ids to dag_ nodes. - absl::StrongVector dag_node_of_domain_; + util_intops::StrongVector dag_node_of_domain_; // Maps constraint ids to dag_ nodes. - absl::StrongVector dag_node_of_constraint_; + util_intops::StrongVector dag_node_of_constraint_; // Number of nodes currently allocated in dag_. // Reserve node 0 as a default dummy node with no dependencies. int num_dag_nodes_ = 1; @@ -1931,7 +1931,7 @@ class LocalSearchState { // The triggers of domain i are stored from triggers_of_domain_[i] // to triggers_of_domain_[i+1] excluded. std::vector triggers_; - absl::StrongVector triggers_of_domain_; + util_intops::StrongVector triggers_of_domain_; // Constraints are used to form expressions that make up the objective. // Constraints are directed: they have inputs and an output, moreover the @@ -2001,7 +2001,7 @@ class LocalSearchState { bool constraint_is_trailed_ = false; }; // Used to identify constraints and hold ownership. - absl::StrongVector> constraints_; + util_intops::StrongVector> constraints_; }; // A LocalSearchState Variable can only be created by a LocalSearchState, diff --git a/ortools/constraint_solver/local_search.cc b/ortools/constraint_solver/local_search.cc index 0fbaa42dcd..d92402fa67 100644 --- a/ortools/constraint_solver/local_search.cc +++ b/ortools/constraint_solver/local_search.cc @@ -3785,8 +3785,8 @@ void SubDagComputer::BuildGraph(int num_nodes) { bool SubDagComputer::HasDirectedCycle() const { DCHECK(graph_was_built_); - absl::StrongVector node_is_open(num_nodes_, false); - absl::StrongVector node_was_visited(num_nodes_, false); + util_intops::StrongVector node_is_open(num_nodes_, false); + util_intops::StrongVector node_was_visited(num_nodes_, false); // Depth first search event: a node and a boolean indicating whether // to open or to close it. struct DFSEvent { diff --git a/ortools/constraint_solver/routing.cc b/ortools/constraint_solver/routing.cc index 9ef00d55b2..1bd2ad0482 100644 --- a/ortools/constraint_solver/routing.cc +++ b/ortools/constraint_solver/routing.cc @@ -1028,7 +1028,7 @@ namespace { struct ResourceClass { using DimensionIndex = RoutingModel::DimensionIndex; /// The attributes for each dimension. - absl::StrongVector + util_intops::StrongVector dimension_attributes; /// Assignability of vehicles. std::vector assignable_to_vehicle; @@ -1054,7 +1054,7 @@ void ResourceGroup::ComputeResourceClasses() { for (int r = 0; r < resources_.size(); ++r) { ResourceClass resource_class; - absl::StrongVector& dim_attributes = + util_intops::StrongVector& dim_attributes = resource_class.dimension_attributes; dim_attributes.resize(model_->dimensions_.size(), Attributes()); for (const auto& [dim_index, attributes] : @@ -1355,14 +1355,14 @@ struct VehicleClass { int end_equivalence_class; /// Bounds of cumul variables at start and end vehicle nodes. /// dimension_{start,end}_cumuls_{min,max}[d] is the bound for dimension d. - absl::StrongVector dimension_start_cumuls_min; - absl::StrongVector dimension_start_cumuls_max; - absl::StrongVector dimension_end_cumuls_min; - absl::StrongVector dimension_end_cumuls_max; - absl::StrongVector dimension_capacities; + util_intops::StrongVector dimension_start_cumuls_min; + util_intops::StrongVector dimension_start_cumuls_max; + util_intops::StrongVector dimension_end_cumuls_min; + util_intops::StrongVector dimension_end_cumuls_max; + util_intops::StrongVector dimension_capacities; /// dimension_evaluators[d]->Run(from, to) is the transit value of arc /// from->to for a dimension d. - absl::StrongVector dimension_evaluator_classes; + util_intops::StrongVector dimension_evaluator_classes; /// Hash of the visitability of (non-start/end) nodes. uint64_t visitable_nodes_hash; /// Hash of allowed resources for each resource group, or -1 if a given diff --git a/ortools/constraint_solver/routing.h b/ortools/constraint_solver/routing.h index 131aa6adad..1d40fa6a36 100644 --- a/ortools/constraint_solver/routing.h +++ b/ortools/constraint_solver/routing.h @@ -540,7 +540,7 @@ class RoutingModel { return resource_indices_per_class_[resource_class]; } // clang-format off - const absl::StrongVector >& + const util_intops::StrongVector >& GetResourceIndicesPerClass() const { return resource_indices_per_class_; } @@ -569,7 +569,7 @@ class RoutingModel { // ComputeResourceClasses()). std::vector resource_class_indices_; // clang-format off - absl::StrongVector > + util_intops::StrongVector > resource_indices_per_class_; // clang-format on @@ -2358,7 +2358,7 @@ class RoutingModel { mutable RevSwitch is_bound_to_end_ct_added_; /// Dimensions absl::flat_hash_map dimension_name_to_index_; - absl::StrongVector dimensions_; + util_intops::StrongVector dimensions_; /// Resource Groups. /// If resource_groups_ is not empty, then for each group of resources, each /// (used) vehicle must be assigned to exactly 1 resource, and each resource @@ -2366,7 +2366,7 @@ class RoutingModel { // clang-format off std::vector > resource_groups_; /// Stores the set of resource groups related to each dimension. - absl::StrongVector > + util_intops::StrongVector > dimension_resource_group_indices_; /// TODO(user): Define a new Dimension[Global|Local]OptimizerIndex type @@ -2374,10 +2374,10 @@ class RoutingModel { /// mappings below. std::vector > global_dimension_optimizers_; - absl::StrongVector global_optimizer_index_; + util_intops::StrongVector global_optimizer_index_; std::vector > local_dimension_optimizers_; - absl::StrongVector local_optimizer_index_; + util_intops::StrongVector local_optimizer_index_; // clang-format on std::string primary_constrained_dimension_; /// Costs @@ -2405,7 +2405,7 @@ class RoutingModel { absl::flat_hash_map, std::vector, absl::Hash>> force_distance_to_vehicle_unit_costs_; - absl::StrongVector cost_classes_; + util_intops::StrongVector cost_classes_; #endif // SWIG bool costs_are_homogeneous_across_vehicles_; bool cache_callbacks_; @@ -2416,7 +2416,7 @@ class RoutingModel { VehicleTypeContainer vehicle_type_container_; std::function vehicle_start_class_callback_; /// Disjunctions - absl::StrongVector disjunctions_; + util_intops::StrongVector disjunctions_; // clang-format off std::vector > index_to_disjunctions_; /// Same vehicle costs diff --git a/ortools/constraint_solver/routing_constraints.cc b/ortools/constraint_solver/routing_constraints.cc index c1b4c5cefc..9ee9c2701a 100644 --- a/ortools/constraint_solver/routing_constraints.cc +++ b/ortools/constraint_solver/routing_constraints.cc @@ -179,7 +179,7 @@ class ResourceAssignmentConstraint : public Constraint { }; using RCIndex = RoutingModel::ResourceClassIndex; - const absl::StrongVector> + const util_intops::StrongVector> ignored_resources_per_class(resource_group_.GetResourceClassesCount()); std::vector> assignment_costs(model_.vehicles()); for (int v : resource_group_.GetVehiclesRequiringAResource()) { diff --git a/ortools/constraint_solver/routing_decision_builders.cc b/ortools/constraint_solver/routing_decision_builders.cc index 8adf36d8ae..6e0543e774 100644 --- a/ortools/constraint_solver/routing_decision_builders.cc +++ b/ortools/constraint_solver/routing_decision_builders.cc @@ -239,7 +239,7 @@ class SetCumulsFromLocalDimensionCosts : public DecisionBuilder { std::vector vehicles_without_resource_assignment; std::vector vehicles_with_resource_assignment; - absl::StrongVector> + util_intops::StrongVector> used_resources_per_class; DetermineVehiclesRequiringResourceAssignment( &vehicles_without_resource_assignment, @@ -309,7 +309,7 @@ class SetCumulsFromLocalDimensionCosts : public DecisionBuilder { void DetermineVehiclesRequiringResourceAssignment( std::vector* vehicles_without_resource_assignment, std::vector* vehicles_with_resource_assignment, - absl::StrongVector>* + util_intops::StrongVector>* used_resources_per_class) const { vehicles_without_resource_assignment->clear(); vehicles_with_resource_assignment->clear(); @@ -408,7 +408,7 @@ class SetCumulsFromLocalDimensionCosts : public DecisionBuilder { bool ComputeVehicleResourceClassValuesAndIndices( const std::vector& vehicles_to_assign, - const absl::StrongVector>& + const util_intops::StrongVector>& used_resources_per_class, const std::function& next_accessor, std::vector* resource_indices) { diff --git a/ortools/constraint_solver/routing_filters.cc b/ortools/constraint_solver/routing_filters.cc index 3e8ea5829a..52f2e363df 100644 --- a/ortools/constraint_solver/routing_filters.cc +++ b/ortools/constraint_solver/routing_filters.cc @@ -259,9 +259,9 @@ class NodeDisjunctionFilter : public IntVarLocalSearchFilter { const RoutingModel& routing_model_; - absl::StrongVector + util_intops::StrongVector active_per_disjunction_; - absl::StrongVector + util_intops::StrongVector inactive_per_disjunction_; int64_t synchronized_objective_value_; int64_t accepted_objective_value_; @@ -2820,7 +2820,7 @@ bool ResourceGroupAssignmentFilter::AcceptPath(int64_t path_start, // AcceptPath(), and delay calls to // ComputeVehicleToResourceClassAssignmentCosts() to FinalizeAcceptPath(). using RCIndex = RoutingModel::ResourceClassIndex; - const absl::StrongVector> + const util_intops::StrongVector> ignored_resources_per_class(resource_group_.GetResourceClassesCount()); return ComputeVehicleToResourceClassAssignmentCosts( vehicle, resource_group_, ignored_resources_per_class, @@ -2834,7 +2834,7 @@ bool ResourceGroupAssignmentFilter::AcceptPath(int64_t path_start, bool ResourceGroupAssignmentFilter::FinalizeAcceptPath( int64_t /*objective_min*/, int64_t objective_max) { using RCIndex = RoutingModel::ResourceClassIndex; - const absl::StrongVector> + const util_intops::StrongVector> ignored_resources_per_class(resource_group_.GetResourceClassesCount()); delta_cost_without_transit_ = ComputeBestVehicleToResourceAssignment( resource_group_.GetVehiclesRequiringAResource(), @@ -2871,7 +2871,7 @@ void ResourceGroupAssignmentFilter::OnSynchronizePathFromStart(int64_t start) { // OnSynchronizePathFromStart(), and delay calls to // ComputeVehicleToResourceClassAssignmentCosts() to OnAfterSynchronizePaths() using RCIndex = RoutingModel::ResourceClassIndex; - const absl::StrongVector> + const util_intops::StrongVector> ignored_resources_per_class(resource_group_.GetResourceClassesCount()); if (!ComputeVehicleToResourceClassAssignmentCosts( v, resource_group_, ignored_resources_per_class, next_accessor, @@ -2886,7 +2886,7 @@ void ResourceGroupAssignmentFilter::OnSynchronizePathFromStart(int64_t start) { void ResourceGroupAssignmentFilter::OnAfterSynchronizePaths() { using RCIndex = RoutingModel::ResourceClassIndex; - const absl::StrongVector> + const util_intops::StrongVector> ignored_resources_per_class(resource_group_.GetResourceClassesCount()); synchronized_cost_without_transit_ = (current_synch_failed_ || !filter_objective_cost_) diff --git a/ortools/constraint_solver/routing_index_manager.h b/ortools/constraint_solver/routing_index_manager.h index be84c60423..ca97eca74c 100644 --- a/ortools/constraint_solver/routing_index_manager.h +++ b/ortools/constraint_solver/routing_index_manager.h @@ -109,7 +109,7 @@ class RoutingIndexManager { const std::vector >& starts_ends); std::vector index_to_node_; - absl::StrongVector node_to_index_; + util_intops::StrongVector node_to_index_; std::vector vehicle_to_start_; std::vector vehicle_to_end_; int num_nodes_; diff --git a/ortools/constraint_solver/routing_lp_scheduling.cc b/ortools/constraint_solver/routing_lp_scheduling.cc index 5df8dc4c17..c6766127d5 100644 --- a/ortools/constraint_solver/routing_lp_scheduling.cc +++ b/ortools/constraint_solver/routing_lp_scheduling.cc @@ -2645,7 +2645,7 @@ void MoveValuesToIndicesFrom(std::vector* out_values, bool ComputeVehicleToResourceClassAssignmentCosts( int v, const RoutingModel::ResourceGroup& resource_group, - const absl::StrongVector>& ignored_resources_per_class, const std::function& next_accessor, @@ -2791,9 +2791,9 @@ bool ComputeVehicleToResourceClassAssignmentCosts( int64_t ComputeBestVehicleToResourceAssignment( const std::vector& vehicles, - const absl::StrongVector>& resource_indices_per_class, - const absl::StrongVector>& ignored_resources_per_class, std::function*(int)> diff --git a/ortools/constraint_solver/routing_lp_scheduling.h b/ortools/constraint_solver/routing_lp_scheduling.h index d4308dc454..49461f9e8f 100644 --- a/ortools/constraint_solver/routing_lp_scheduling.h +++ b/ortools/constraint_solver/routing_lp_scheduling.h @@ -971,9 +971,9 @@ class GlobalDimensionCumulOptimizer { // O(num_resource_classes * vehicles.size() + resource_indices->size()). int64_t ComputeBestVehicleToResourceAssignment( const std::vector& vehicles, - const absl::StrongVector>& resource_indices_per_class, - const absl::StrongVector>& ignored_resources_per_class, std::function*(int)> @@ -991,7 +991,7 @@ int64_t ComputeBestVehicleToResourceAssignment( // are also set in cumul_values and break_values, if non-null. bool ComputeVehicleToResourceClassAssignmentCosts( int v, const RoutingModel::ResourceGroup& resource_group, - const absl::StrongVector>& ignored_resources_per_class, const std::function& next_accessor, diff --git a/ortools/flatzinc/cpsat.msc.in b/ortools/flatzinc/cp-sat.msc.in similarity index 83% rename from ortools/flatzinc/cpsat.msc.in rename to ortools/flatzinc/cp-sat.msc.in index ffc97d0d18..82bfbf842a 100644 --- a/ortools/flatzinc/cpsat.msc.in +++ b/ortools/flatzinc/cp-sat.msc.in @@ -1,11 +1,11 @@ { - "id": "com.google.ortools.sat", + "id": "cp-sat", "name": "OR Tools CP-SAT", "description": "Google's Operations Research CP-SAT-LP FlatZinc interface", "version": "@PROJECT_VERSION@", - "mznlib": "../cpsat", + "mznlib": "../cp-sat", "executable": "@FZ_REL_INSTALL_BINARY@", - "tags": ["cpsatlp", "cp", "lcg", "int"], + "tags": ["cp-sat", "cp", "lcg", "int"], "stdFlags": ["-a", "-f", "-p", "-r", "-s", "-v"], "extraFlags": [ ["--params", "Provide parameters interpreted as a text SatParameters proto", "string", ""] diff --git a/ortools/flatzinc/cp_model_fz_solver.cc b/ortools/flatzinc/cp_model_fz_solver.cc index 58924cf353..24b235b990 100644 --- a/ortools/flatzinc/cp_model_fz_solver.cc +++ b/ortools/flatzinc/cp_model_fz_solver.cc @@ -1204,8 +1204,9 @@ void CpModelProtoWithMapping::TranslateSearchAnnotations( // The format is fixed in the flatzinc specification. std::string SolutionString( const fz::SolutionOutputSpecs& output, - const std::function& value_func) { - if (output.variable != nullptr) { + const std::function& value_func, + double objective_value) { + if (output.variable != nullptr && !output.variable->domain.is_float) { const int64_t value = value_func(output.variable); if (output.display_as_boolean) { return absl::StrCat(output.name, " = ", value == 1 ? "true" : "false", @@ -1213,6 +1214,8 @@ std::string SolutionString( } else { return absl::StrCat(output.name, " = ", value, ";"); } + } else if (output.variable != nullptr && output.variable->domain.is_float) { + return absl::StrCat(output.name, " = ", objective_value, ";"); } else { const int bound_size = output.bounds.size(); std::string result = @@ -1245,10 +1248,12 @@ std::string SolutionString( std::string SolutionString( const fz::Model& model, - const std::function& value_func) { + const std::function& value_func, + double objective_value) { std::string solution_string; for (const auto& output_spec : model.output()) { - solution_string.append(SolutionString(output_spec, value_func)); + solution_string.append( + SolutionString(output_spec, value_func, objective_value)); solution_string.append("\n"); } return solution_string; @@ -1338,6 +1343,15 @@ void SolveFzWithCpModelProto(const fz::Model& fz_model, objective->add_coeffs(1); objective->add_vars(m.fz_var_to_index[fz_model.objective()]); } + } else if (!fz_model.float_objective_variables().empty()) { + FloatObjectiveProto* objective = m.proto.mutable_floating_point_objective(); + for (int i = 0; i < fz_model.float_objective_variables().size(); ++i) { + objective->add_vars( + m.fz_var_to_index[fz_model.float_objective_variables()[i]]); + objective->add_coeffs(fz_model.float_objective_coefficients()[i]); + } + objective->set_offset(fz_model.float_objective_offset()); + objective->set_maximize(fz_model.maximize()); } // Fill the search order. @@ -1427,10 +1441,12 @@ void SolveFzWithCpModelProto(const fz::Model& fz_model, if (p.display_all_solutions || p.search_all_solutions) { solution_observer = [&fz_model, &m, &p, solution_logger](const CpSolverResponse& r) { - const std::string solution_string = - SolutionString(fz_model, [&m, &r](fz::Variable* v) { + const std::string solution_string = SolutionString( + fz_model, + [&m, &r](fz::Variable* v) { return r.solution(m.fz_var_to_index.at(v)); - }); + }, + r.objective_value()); SOLVER_LOG(solution_logger, solution_string); if (p.display_statistics) { OutputFlatzincStats(r, solution_logger); @@ -1440,12 +1456,16 @@ void SolveFzWithCpModelProto(const fz::Model& fz_model, } Model sat_model; + + // Setup logging. + // Note that we need to do that before we start calling the sat functions + // below that might create a SolverLogger() themselves. + sat_model.Register(logger); + sat_model.Add(NewSatParameters(m.parameters)); if (solution_observer != nullptr) { sat_model.Add(NewFeasibleSolutionObserver(solution_observer)); } - // Setup logging. - sat_model.Register(logger); const CpSolverResponse response = SolveCpModel(m.proto, &sat_model); @@ -1465,10 +1485,12 @@ void SolveFzWithCpModelProto(const fz::Model& fz_model, if (response.status() == CpSolverStatus::FEASIBLE || response.status() == CpSolverStatus::OPTIMAL) { if (!p.display_all_solutions) { // Already printed otherwise. - const std::string solution_string = - SolutionString(fz_model, [&response, &m](fz::Variable* v) { + const std::string solution_string = SolutionString( + fz_model, + [&response, &m](fz::Variable* v) { return response.solution(m.fz_var_to_index.at(v)); - }); + }, + response.objective_value()); SOLVER_LOG(solution_logger, solution_string); SOLVER_LOG(solution_logger, "----------"); } diff --git a/ortools/flatzinc/model.cc b/ortools/flatzinc/model.cc index 922ba9c1e0..cb984afc90 100644 --- a/ortools/flatzinc/model.cc +++ b/ortools/flatzinc/model.cc @@ -759,6 +759,9 @@ int Argument::Size() const { case VOID_ARGUMENT: { return 0; } + case FLOAT_LIST: { + return floats.size(); + } default: { LOG(FATAL) << "Should not be here"; return 0; @@ -1091,6 +1094,14 @@ std::string Model::DebugString() const { absl::StrAppendFormat(&output, "%s %s\n %s\n", maximize_ ? "Maximize" : "Minimize", objective_->name, JoinDebugString(search_annotations_, ", ")); + } else if (!float_objective_variables_.empty()) { + absl::StrAppendFormat(&output, "%s [%s] * [%s] + %f\n %s\n", + maximize_ ? "Maximize" : "Minimize", + JoinDebugStringPtr(float_objective_variables_, ", "), + absl::StrJoin(float_objective_coefficients_, ", "), + float_objective_offset_, + JoinDebugString(search_annotations_, ", ")); + } else { absl::StrAppendFormat(&output, "Satisfy\n %s\n", JoinDebugString(search_annotations_, ", ")); diff --git a/ortools/flatzinc/model.h b/ortools/flatzinc/model.h index cb5775966b..fd57a3fcd7 100644 --- a/ortools/flatzinc/model.h +++ b/ortools/flatzinc/model.h @@ -23,11 +23,8 @@ #include "absl/container/flat_hash_map.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" -#include "ortools/base/types.h" #include "ortools/graph/iterators.h" #include "ortools/util/logging.h" -#include "ortools/util/string_array.h" namespace operations_research { namespace fz { @@ -390,7 +387,22 @@ class Model { #endif bool maximize() const { return maximize_; } Variable* objective() const { return objective_; } + const std::vector& float_objective_variables() const { + return float_objective_variables_; + } + const std::vector& float_objective_coefficients() const { + return float_objective_coefficients_; + } + double float_objective_offset() const { return float_objective_offset_; } void SetObjective(Variable* obj) { objective_ = obj; } + void ClearObjective() { objective_ = nullptr; } + void AddFloatingPointObjectiveTerm(Variable* var, double coeff) { + float_objective_variables_.push_back(var); + float_objective_coefficients_.push_back(coeff); + } + void SetFloatingPointObjectiveOffset(double offset) { + float_objective_offset_ = offset; + } // Services. std::string DebugString() const; @@ -408,6 +420,9 @@ class Model { // The objective variable (it belongs to variables_). Variable* objective_; bool maximize_; + std::vector float_objective_variables_; + std::vector float_objective_coefficients_; + double float_objective_offset_ = 0.0; // All search annotations are stored as a vector of Annotation. std::vector search_annotations_; std::vector output_; diff --git a/ortools/flatzinc/presolve.cc b/ortools/flatzinc/presolve.cc index 6ba1ec4d14..fb98500645 100644 --- a/ortools/flatzinc/presolve.cc +++ b/ortools/flatzinc/presolve.cc @@ -108,6 +108,19 @@ void Presolver::PresolveBool2Int(Constraint* ct) { } } +// Propagates cast constraint. +// Rule 1: +// Input: int2float(x, y) +// Action: Replace all instances of y by x. +// Output: inactive constraint +void Presolver::PresolveInt2Float(Constraint* ct) { + DCHECK_EQ(ct->type, "int2float"); + // Rule 1. + UpdateRuleStats("int2float: merge integer and floating point variables."); + AddVariableSubstitution(ct->arguments[1].Var(), ct->arguments[0].Var()); + ct->MarkAsInactive(); +} + // Minizinc flattens 2d element constraints (x = A[y][z]) into 1d element // constraint with an affine mapping between y, z and the new index. // This rule stores the mapping to reconstruct the 2d element constraint. @@ -426,6 +439,8 @@ void Presolver::Run(Model* model) { for (Constraint* const ct : model->constraints()) { if (ct->active && ct->type == "bool2int") { PresolveBool2Int(ct); + } else if (ct->active && ct->type == "int2float") { + PresolveInt2Float(ct); } else if (ct->active && ct->type == "int_lin_eq" && ct->arguments[1].variables.size() == 2 && ct->strong_propagation) { @@ -454,6 +469,47 @@ void Presolver::Run(Model* model) { } } + // Third pass: process objective with floating point coefficients. + Variable* float_objective_var = nullptr; + for (Variable* var : model->variables()) { + if (!var->active) continue; + if (var->domain.is_float) { + CHECK(float_objective_var == nullptr); + float_objective_var = var; + } + } + + Constraint* float_objective_ct = nullptr; + if (float_objective_var != nullptr) { + for (Constraint* ct : model->constraints()) { + if (!ct->active) continue; + if (ct->type == "float_lin_eq") { + CHECK(float_objective_ct == nullptr); + float_objective_ct = ct; + break; + } + } + } + + if (float_objective_ct != nullptr || float_objective_var != nullptr) { + CHECK(float_objective_ct != nullptr); + CHECK(float_objective_var != nullptr); + const int arity = float_objective_ct->arguments[0].Size(); + CHECK_EQ(float_objective_ct->arguments[1].variables[arity - 1], + float_objective_var); + CHECK_EQ(float_objective_ct->arguments[0].floats[arity - 1], -1.0); + for (int i = 0; i + 1 < arity; ++i) { + model->AddFloatingPointObjectiveTerm( + float_objective_ct->arguments[1].variables[i], + float_objective_ct->arguments[0].floats[i]); + } + model->SetFloatingPointObjectiveOffset( + -float_objective_ct->arguments[2].floats[0]); + model->ClearObjective(); + float_objective_var->active = false; + float_objective_ct->active = false; + } + // Report presolve rules statistics. if (!successful_rules_.empty()) { for (const auto& rule : successful_rules_) { diff --git a/ortools/flatzinc/presolve.h b/ortools/flatzinc/presolve.h index 890a8bad75..ea3e6a7c87 100644 --- a/ortools/flatzinc/presolve.h +++ b/ortools/flatzinc/presolve.h @@ -95,6 +95,7 @@ class Presolver { // Presolve rules. void PresolveBool2Int(Constraint* ct); + void PresolveInt2Float(Constraint* ct); void PresolveStoreAffineMapping(Constraint* ct); void PresolveStoreFlatteningMapping(Constraint* ct); void PresolveSimplifyElement(Constraint* ct); diff --git a/ortools/glop/CMakeLists.txt b/ortools/glop/CMakeLists.txt index 6884bfe001..3d1a0f9b07 100644 --- a/ortools/glop/CMakeLists.txt +++ b/ortools/glop/CMakeLists.txt @@ -28,5 +28,5 @@ target_link_libraries(${NAME} PRIVATE absl::strings absl::str_format protobuf::libprotobuf - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::glop ALIAS ${NAME}) diff --git a/ortools/glop/markowitz.h b/ortools/glop/markowitz.h index 212c656c0f..c4acfca66d 100644 --- a/ortools/glop/markowitz.h +++ b/ortools/glop/markowitz.h @@ -194,7 +194,8 @@ class MatrixNonZeroPattern { // // TODO(user): We could be even more efficient since a size of int32_t is // enough for us and we could store in common the inlined/not-inlined size. - absl::StrongVector> row_non_zero_; + util_intops::StrongVector> + row_non_zero_; StrictITIVector row_degree_; StrictITIVector col_degree_; DenseBooleanRow deleted_columns_; @@ -274,7 +275,7 @@ class SparseMatrixWithReusableColumnMemory { // mutable_column(col) is stored in columns_[mapping_[col]]. // The columns_ that can be reused have their index stored in free_columns_. const SparseColumn empty_column_; - absl::StrongVector mapping_; + util_intops::StrongVector mapping_; std::vector free_columns_; std::vector columns_; }; diff --git a/ortools/glop/preprocessor.cc b/ortools/glop/preprocessor.cc index 1e73c5e903..4f88318c8a 100644 --- a/ortools/glop/preprocessor.cc +++ b/ortools/glop/preprocessor.cc @@ -1390,10 +1390,10 @@ bool ImpliedFreePreprocessor::Run(LinearProgram* lp) { const int size = num_rows.value(); // TODO(user) : Replace SumWithNegativeInfiniteAndOneMissing and // SumWithPositiveInfiniteAndOneMissing with IntervalSumWithOneMissing. - absl::StrongVector lb_sums( - size); - absl::StrongVector ub_sums( - size); + util_intops::StrongVector + lb_sums(size); + util_intops::StrongVector + ub_sums(size); // Initialize the sums by adding all the bounds of the variables. for (ColIndex col(0); col < num_cols; ++col) { @@ -3677,7 +3677,7 @@ bool ShiftVariableBoundsPreprocessor::Run(LinearProgram* lp) { int num_bound_shifts = 0; const RowIndex num_rows = lp->num_constraints(); KahanSum objective_offset; - absl::StrongVector row_offsets(num_rows.value()); + util_intops::StrongVector row_offsets(num_rows.value()); offsets_.assign(num_cols, 0.0); for (ColIndex col(0); col < num_cols; ++col) { if (0.0 < variable_initial_lbs_[col] || 0.0 > variable_initial_ubs_[col]) { diff --git a/ortools/glop/preprocessor.h b/ortools/glop/preprocessor.h index 1f79296272..37723dad6c 100644 --- a/ortools/glop/preprocessor.h +++ b/ortools/glop/preprocessor.h @@ -268,7 +268,7 @@ class RowDeletionHelper { // EmptyColumnPreprocessor // -------------------------------------------------------- // Removes the empty columns from the problem. -class EmptyColumnPreprocessor : public Preprocessor { +class EmptyColumnPreprocessor final : public Preprocessor { public: explicit EmptyColumnPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -292,7 +292,7 @@ class EmptyColumnPreprocessor : public Preprocessor { // usually called duplicates. The notion is the same once the problem has been // scaled. However, during presolve the columns can't be assumed to be scaled, // so it makes sense to use the more general notion of proportional columns. -class ProportionalColumnPreprocessor : public Preprocessor { +class ProportionalColumnPreprocessor final : public Preprocessor { public: explicit ProportionalColumnPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -333,7 +333,7 @@ class ProportionalColumnPreprocessor : public Preprocessor { // Removes the proportional rows from the problem. // The linear programming literature also calls such rows duplicates, see the // same remark above for columns in ProportionalColumnPreprocessor. -class ProportionalRowPreprocessor : public Preprocessor { +class ProportionalRowPreprocessor final : public Preprocessor { public: explicit ProportionalRowPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -433,7 +433,7 @@ class SingletonUndo { // Deletes as many singleton rows or singleton columns as possible. Note that // each time we delete a row or a column, new singletons may be created. -class SingletonPreprocessor : public Preprocessor { +class SingletonPreprocessor final : public Preprocessor { public: explicit SingletonPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -504,10 +504,10 @@ class SingletonPreprocessor : public Preprocessor { // This is used as a "cache" by MakeConstraintAnEqualityIfPossible() to avoid // scanning more than once each row. See the code to see how this is used. - absl::StrongVector row_sum_is_cached_; - absl::StrongVector + util_intops::StrongVector row_sum_is_cached_; + util_intops::StrongVector row_lb_sum_; - absl::StrongVector + util_intops::StrongVector row_ub_sum_; // TODO(user): It is annoying that we need to store a part of the matrix that @@ -522,7 +522,7 @@ class SingletonPreprocessor : public Preprocessor { // FixedVariablePreprocessor // -------------------------------------------------------- // Removes the fixed variables from the problem. -class FixedVariablePreprocessor : public Preprocessor { +class FixedVariablePreprocessor final : public Preprocessor { public: explicit FixedVariablePreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -557,7 +557,7 @@ class FixedVariablePreprocessor : public Preprocessor { // later by the FreeConstraintPreprocessor. // // * Otherwise, wo do nothing. -class ForcingAndImpliedFreeConstraintPreprocessor : public Preprocessor { +class ForcingAndImpliedFreeConstraintPreprocessor final : public Preprocessor { public: explicit ForcingAndImpliedFreeConstraintPreprocessor( const GlopParameters* parameters) @@ -603,7 +603,7 @@ class ForcingAndImpliedFreeConstraintPreprocessor : public Preprocessor { // TODO(user): Only process doubleton columns so we have more chance in the // later passes to create more doubleton columns? Such columns lead to a smaller // problem thanks to the DoubletonFreeColumnPreprocessor. -class ImpliedFreePreprocessor : public Preprocessor { +class ImpliedFreePreprocessor final : public Preprocessor { public: explicit ImpliedFreePreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -649,7 +649,7 @@ class ImpliedFreePreprocessor : public Preprocessor { // solver open source codes as of July 2013. All of them only process such // columns if one of the two rows is also an equality which is not actually // required. Most probably, commercial solvers do use it though. -class DoubletonFreeColumnPreprocessor : public Preprocessor { +class DoubletonFreeColumnPreprocessor final : public Preprocessor { public: explicit DoubletonFreeColumnPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -700,7 +700,7 @@ class DoubletonFreeColumnPreprocessor : public Preprocessor { // translated into bounds on the reduced costs or the columns, which may force // variables to their bounds. This is called forcing and dominated columns in // the Andersen & Andersen paper. -class UnconstrainedVariablePreprocessor : public Preprocessor { +class UnconstrainedVariablePreprocessor final : public Preprocessor { public: explicit UnconstrainedVariablePreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -752,7 +752,7 @@ class UnconstrainedVariablePreprocessor : public Preprocessor { // FreeConstraintPreprocessor // -------------------------------------------------------- // Removes the constraints with no bounds from the problem. -class FreeConstraintPreprocessor : public Preprocessor { +class FreeConstraintPreprocessor final : public Preprocessor { public: explicit FreeConstraintPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -771,7 +771,7 @@ class FreeConstraintPreprocessor : public Preprocessor { // EmptyConstraintPreprocessor // -------------------------------------------------------- // Removes the constraints with no coefficients from the problem. -class EmptyConstraintPreprocessor : public Preprocessor { +class EmptyConstraintPreprocessor final : public Preprocessor { public: explicit EmptyConstraintPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -793,7 +793,7 @@ class EmptyConstraintPreprocessor : public Preprocessor { // with only one entry) is positive. This is because this way the column will // be transformed in an identity column by the scaling. This will lead to more // efficient solve when this column is involved. -class SingletonColumnSignPreprocessor : public Preprocessor { +class SingletonColumnSignPreprocessor final : public Preprocessor { public: explicit SingletonColumnSignPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -815,7 +815,7 @@ class SingletonColumnSignPreprocessor : public Preprocessor { // Reduce equality constraints involving two variables (i.e. aX + bY = c), // by substitution (and thus removal) of one of the variables by the other // in all the constraints that it is involved in. -class DoubletonEqualityRowPreprocessor : public Preprocessor { +class DoubletonEqualityRowPreprocessor final : public Preprocessor { public: explicit DoubletonEqualityRowPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -904,7 +904,7 @@ void FixConstraintWithFixedStatuses(const DenseColumn& row_lower_bounds, // // IMPORTANT: FreeConstraintPreprocessor() must be called first since this // preprocessor does not deal correctly with free constraints. -class DualizerPreprocessor : public Preprocessor { +class DualizerPreprocessor final : public Preprocessor { public: explicit DualizerPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -962,7 +962,7 @@ class DualizerPreprocessor : public Preprocessor { // the ImpliedFreePreprocessor. However, shifting a variable with a domain like // [-1e10, 1e10] may introduce numerical issues. Relax the definition of // a free variable so that only having a domain containing 0.0 is enough? -class ShiftVariableBoundsPreprocessor : public Preprocessor { +class ShiftVariableBoundsPreprocessor final : public Preprocessor { public: explicit ShiftVariableBoundsPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -992,7 +992,7 @@ class ShiftVariableBoundsPreprocessor : public Preprocessor { // -------------------------------------------------------- // Scales the SparseMatrix of the linear program using a SparseMatrixScaler. // This is only applied if the parameter use_scaling is true. -class ScalingPreprocessor : public Preprocessor { +class ScalingPreprocessor final : public Preprocessor { public: explicit ScalingPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -1015,7 +1015,7 @@ class ScalingPreprocessor : public Preprocessor { // ToMinimizationPreprocessor // -------------------------------------------------------- // Changes the problem from maximization to minimization (if applicable). -class ToMinimizationPreprocessor : public Preprocessor { +class ToMinimizationPreprocessor final : public Preprocessor { public: explicit ToMinimizationPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} @@ -1046,7 +1046,7 @@ class ToMinimizationPreprocessor : public Preprocessor { // memory for no good reason. The internal matrix representation used in glop is // a lot more efficient, and there is no point keeping the slacks in // LinearProgram. It is also bad for incrementaly modifying the LP. -class AddSlackVariablesPreprocessor : public Preprocessor { +class AddSlackVariablesPreprocessor final : public Preprocessor { public: explicit AddSlackVariablesPreprocessor(const GlopParameters* parameters) : Preprocessor(parameters) {} diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index 278c64703a..28ef73d4c9 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -139,7 +139,12 @@ void RevisedSimplex::ClearStateForNextSolve() { } void RevisedSimplex::LoadStateForNextSolve(const BasisState& state) { - SCOPED_TIME_STAT(&function_stats_); + // We avoid marking the state as set externally if it is the same as the + // current one. + // + // TODO(user): Add comparison operator. + if (state.statuses == solution_state_.statuses) return; + solution_state_ = state; solution_state_has_been_set_externally_ = true; } @@ -3820,9 +3825,9 @@ void RevisedSimplex::DisplayVariableBounds() { } } -absl::StrongVector RevisedSimplex::ComputeDictionary( - const DenseRow* column_scales) { - absl::StrongVector dictionary(num_rows_.value()); +util_intops::StrongVector +RevisedSimplex::ComputeDictionary(const DenseRow* column_scales) { + util_intops::StrongVector dictionary(num_rows_.value()); for (ColIndex col(0); col < num_cols_; ++col) { ComputeDirection(col); for (const auto e : direction_) { diff --git a/ortools/glop/variable_values.cc b/ortools/glop/variable_values.cc index 1700bcad93..0990ca2f40 100644 --- a/ortools/glop/variable_values.cc +++ b/ortools/glop/variable_values.cc @@ -201,7 +201,7 @@ void VariableValues::UpdateOnPivoting(const ScatteredColumn& direction, } void VariableValues::UpdateGivenNonBasicVariables( - const std::vector& cols_to_update, bool update_basic_variables) { + absl::Span cols_to_update, bool update_basic_variables) { SCOPED_TIME_STAT(&stats_); if (!update_basic_variables) { for (ColIndex col : cols_to_update) { diff --git a/ortools/glop/variable_values.h b/ortools/glop/variable_values.h index ffdc40e36f..96021d5993 100644 --- a/ortools/glop/variable_values.h +++ b/ortools/glop/variable_values.h @@ -111,7 +111,7 @@ class VariableValues { // update_basic_variables is true. The update is done in an incremental way // and is thus more efficient than calling afterwards // RecomputeBasicVariableValues() and RecomputeDualPrices(). - void UpdateGivenNonBasicVariables(const std::vector& cols_to_update, + void UpdateGivenNonBasicVariables(absl::Span cols_to_update, bool update_basic_variables); // Functions dealing with the primal-infeasible basic variables. A basic diff --git a/ortools/graph/cliques.h b/ortools/graph/cliques.h index 889fc54b29..67703b7895 100644 --- a/ortools/graph/cliques.h +++ b/ortools/graph/cliques.h @@ -275,7 +275,7 @@ class BronKerboschAlgorithm { // clique. // NOTE(user): We could store the delta between the iterations; however, // we need to evaluate the impact this would have on the performance. - absl::StrongVector candidates; + util_intops::StrongVector candidates; // The index of the first actual candidate in 'candidates'. This number is // also the number of elements of the "not" set stored at the beginning of // 'candidates'. @@ -451,7 +451,7 @@ void BronKerboschAlgorithm::PushState(NodeIndex selected) { DCHECK(time_limit_ != nullptr); DVLOG(2) << "PushState: New depth = " << states_.size() + 1 << ", selected node = " << selected; - absl::StrongVector new_candidates; + util_intops::StrongVector new_candidates; State* const previous_state = &states_.back(); const double deterministic_time = diff --git a/ortools/graph/perfect_matching.h b/ortools/graph/perfect_matching.h index 0549a5196d..c06e876673 100644 --- a/ortools/graph/perfect_matching.h +++ b/ortools/graph/perfect_matching.h @@ -456,16 +456,16 @@ class BlossomGraph { bool is_initialized_ = false; // The set of all edges/nodes of the graph. - absl::StrongVector edges_; - absl::StrongVector nodes_; + util_intops::StrongVector edges_; + util_intops::StrongVector nodes_; // Identity for a non-blossom node, and its top blossom node (in case of many // nested blossom) for an internal node. - absl::StrongVector root_blossom_node_; + util_intops::StrongVector root_blossom_node_; // The current graph incidence. Note that one EdgeIndex should appear in // exactly two places (on its tail and head incidence list). - absl::StrongVector> graph_; + util_intops::StrongVector> graph_; // Used by SubNodes(). std::vector subnodes_; diff --git a/ortools/gscip/CMakeLists.txt b/ortools/gscip/CMakeLists.txt index b14b5a7a79..bbcfad5411 100644 --- a/ortools/gscip/CMakeLists.txt +++ b/ortools/gscip/CMakeLists.txt @@ -34,5 +34,5 @@ target_link_libraries(${NAME} PRIVATE absl::str_format protobuf::libprotobuf $<$:libscip> - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::gscip ALIAS ${NAME}) diff --git a/ortools/gurobi/CMakeLists.txt b/ortools/gurobi/CMakeLists.txt index af8f0c7998..ab2d0f6501 100644 --- a/ortools/gurobi/CMakeLists.txt +++ b/ortools/gurobi/CMakeLists.txt @@ -32,7 +32,7 @@ target_link_libraries(${NAME} PRIVATE absl::strings absl::str_format protobuf::libprotobuf - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto + ${PROJECT_NAMESPACE}::ortools_proto ${PROJECT_NAMESPACE}::math_opt_proto $<$:Coin::Cbc>) #add_library(${PROJECT_NAMESPACE}::gurobi ALIAS ${NAME}) diff --git a/ortools/gurobi/environment.cc b/ortools/gurobi/environment.cc index aea8955f08..f8ed9962de 100644 --- a/ortools/gurobi/environment.cc +++ b/ortools/gurobi/environment.cc @@ -346,8 +346,8 @@ void LoadGurobiFunctions(DynamicLibrary* gurobi_dynamic_library) { std::vector GurobiDynamicLibraryPotentialPaths() { std::vector potential_paths; const std::vector kGurobiVersions = { - "1101", "1100", "1003", "1002", "1001", "1000", "952", "951", - "950", "911", "910", "903", "902", "811", "801", "752"}; + "1102", "1101", "1100", "1003", "1002", "1001", "1000", "952", "951", + "950", "911", "910", "903", "902", "811", "801", "752"}; potential_paths.reserve(kGurobiVersions.size() * 3); // Look for libraries pointed by GUROBI_HOME first. @@ -405,8 +405,9 @@ std::vector GurobiDynamicLibraryPotentialPaths() { } #if defined(__GNUC__) // path in linux64 gurobi/optimizer docker image. - for (const std::string& version : {"11.0.0", "10.0.3", "10.0.2", "10.0.1", - "10.0.0", "9.5.2", "9.5.1", "9.5.0"}) { + for (const std::string& version : + {"11.0.2", "11.0.1", "11.0.0", "10.0.3", "10.0.2", "10.0.1", "10.0.0", + "9.5.2", "9.5.1", "9.5.0"}) { potential_paths.push_back( absl::StrCat("/opt/gurobi/linux64/lib/libgurobi.so.", version)); } diff --git a/ortools/init/CMakeLists.txt b/ortools/init/CMakeLists.txt index b90bb52675..2438dc4f38 100644 --- a/ortools/init/CMakeLists.txt +++ b/ortools/init/CMakeLists.txt @@ -28,5 +28,5 @@ target_link_libraries(${NAME} PRIVATE absl::flags absl::strings protobuf::libprotobuf - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::init ALIAS ${NAME}) diff --git a/ortools/init/java/init.i b/ortools/init/java/init.i index 5c4abf48e1..360835eac7 100644 --- a/ortools/init/java/init.i +++ b/ortools/init/java/init.i @@ -37,7 +37,7 @@ %rename (initLogging) operations_research::CppBridge::InitLogging; %rename (shutdownLogging) operations_research::CppBridge::ShutdownLogging; %rename (setFlags) operations_research::CppBridge::SetFlags; -%rename (logGurobiSharedLibrary) operations_research::CppBridge::LoadGurobiSharedLibrary; +%rename (loadGurobiSharedLibrary) operations_research::CppBridge::LoadGurobiSharedLibrary; %unignore operations_research::OrToolsVersion; %rename (getMajorNumber) operations_research::OrToolsVersion::MajorNumber; diff --git a/ortools/linear_solver/CMakeLists.txt b/ortools/linear_solver/CMakeLists.txt index 0727730ab6..74d20df48b 100644 --- a/ortools/linear_solver/CMakeLists.txt +++ b/ortools/linear_solver/CMakeLists.txt @@ -45,7 +45,7 @@ target_link_libraries(${NAME} PRIVATE $<$:highs::highs> $<$:Eigen3::Eigen> $<$:libscip> - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::linear_solver ALIAS ${NAME}) # solve @@ -82,9 +82,11 @@ if(BUILD_TESTING) set(CMAKE_INSTALL_RPATH "$ORIGIN/../${CMAKE_INSTALL_LIBDIR}:$ORIGIN:$ORIGIN/../lib:$ORIGIN") endif () - add_executable(test_xprs_interface xpress_interface_test.cc) - target_compile_features(test_xprs_interface PRIVATE cxx_std_17) - target_link_libraries(test_xprs_interface PRIVATE ortools::ortools GTest::gtest_main) + if(USE_XPRESS) + add_executable(test_xprs_interface xpress_interface_test.cc) + target_compile_features(test_xprs_interface PRIVATE cxx_std_17) + target_link_libraries(test_xprs_interface PRIVATE ortools::ortools GTest::gtest_main) - add_test(NAME cxx_unittests_xpress_interface COMMAND test_xprs_interface) + add_test(NAME cxx_unittests_xpress_interface COMMAND test_xprs_interface) + endif() endif () diff --git a/ortools/linear_solver/highs_interface.cc b/ortools/linear_solver/highs_interface.cc index 240733052d..842823f0df 100644 --- a/ortools/linear_solver/highs_interface.cc +++ b/ortools/linear_solver/highs_interface.cc @@ -152,7 +152,8 @@ MPSolver::ResultStatus HighsInterface::Solve(const MPSolverParameters& param) { : MPModelRequest::HIGHS_LINEAR_PROGRAMMING); // Set parameters. - absl::StatusOr response = HighsSolveProto(request); + absl::StatusOr response = + HighsSolveProto(std::move(request)); if (!response.ok()) { LOG(ERROR) << "Unexpected error solving with Highs: " << response.status(); diff --git a/ortools/linear_solver/linear_solver.cc b/ortools/linear_solver/linear_solver.cc index 7dceb5dcf2..b8dd07a570 100644 --- a/ortools/linear_solver/linear_solver.cc +++ b/ortools/linear_solver/linear_solver.cc @@ -42,6 +42,7 @@ #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_replace.h" +#include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "absl/synchronization/notification.h" #include "absl/time/clock.h" @@ -1001,7 +1002,7 @@ bool InCategory(int status, int category) { return status == category; } -void AppendStatusStr(const std::string& msg, MPSolutionResponse* response) { +void AppendStatusStr(absl::string_view msg, MPSolutionResponse* response) { response->set_status_str( absl::StrCat(response->status_str(), (response->status_str().empty() ? "" : "\n"), msg)); diff --git a/ortools/linear_solver/linear_solver.h b/ortools/linear_solver/linear_solver.h index 8315b5aeb3..0b363d5d30 100644 --- a/ortools/linear_solver/linear_solver.h +++ b/ortools/linear_solver/linear_solver.h @@ -154,7 +154,6 @@ #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_format.h" -#include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "absl/time/clock.h" #include "absl/time/time.h" diff --git a/ortools/linear_solver/model_exporter.cc b/ortools/linear_solver/model_exporter.cc index 319289dcc1..02c1e68f7c 100644 --- a/ortools/linear_solver/model_exporter.cc +++ b/ortools/linear_solver/model_exporter.cc @@ -601,6 +601,7 @@ bool MPModelProtoExporter::ExportModelAsLpFormat( if (binary_var_index < 0 || binary_var_index >= proto_.variable_size()) { return false; } + show_variable[binary_var_index] = true; line_breaker.Append(absl::StrFormat( "%s = %d -> ", exported_variable_names_[binary_var_index], binary_var_value)); diff --git a/ortools/linear_solver/model_validator.cc b/ortools/linear_solver/model_validator.cc index b851fd6af1..ebdffc1fb2 100644 --- a/ortools/linear_solver/model_validator.cc +++ b/ortools/linear_solver/model_validator.cc @@ -29,6 +29,7 @@ #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "ortools/base/accurate_sum.h" #include "ortools/base/commandlineflags.h" @@ -447,7 +448,7 @@ class LazyMPModelNameToIndexMaps { absl::StatusOr LookupName( MPModelProto::Annotation::TargetType target_type, - const std::string& name) { + absl::string_view name) { const absl::flat_hash_map* map = nullptr; switch (target_type) { case MPModelProto::Annotation::VARIABLE_DEFAULT: @@ -470,7 +471,7 @@ class LazyMPModelNameToIndexMaps { map = &general_constraint_name_to_index_.value(); break; } - const int index = gtl::FindWithDefault(*map, name, -2); + const int index = gtl::FindWithDefault(*map, std::string(name), -2); if (index == -2) return absl::NotFoundError("name not found"); if (index == -1) return absl::InvalidArgumentError("name is not unique"); return index; diff --git a/ortools/linear_solver/pdlp_interface.cc b/ortools/linear_solver/pdlp_interface.cc index 3d756c4176..b67b6e9302 100644 --- a/ortools/linear_solver/pdlp_interface.cc +++ b/ortools/linear_solver/pdlp_interface.cc @@ -156,7 +156,7 @@ MPSolver::ResultStatus PdlpInterface::Solve(const MPSolverParameters& param) { << ProtobufDebugString(parameters_); } absl::StatusOr response = PdlpSolveProto( - request, /*relax_integer_variables=*/true, &interrupt_solver_); + std::move(request), /*relax_integer_variables=*/true, &interrupt_solver_); if (!response.ok()) { LOG(ERROR) << "Unexpected error solving with PDLP: " << response.status(); diff --git a/ortools/linear_solver/proto_solver/CMakeLists.txt b/ortools/linear_solver/proto_solver/CMakeLists.txt index 3ae5e16d83..f7c42b357d 100644 --- a/ortools/linear_solver/proto_solver/CMakeLists.txt +++ b/ortools/linear_solver/proto_solver/CMakeLists.txt @@ -48,5 +48,5 @@ target_link_libraries(${NAME} PRIVATE $<$:Eigen3::Eigen> $<$:libscip> $<$:highs::highs> - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::linear_solver_proto_solver ALIAS ${NAME}) diff --git a/ortools/linear_solver/scip_callback.h b/ortools/linear_solver/scip_callback.h index 7097a44cb3..d4bab25f1e 100644 --- a/ortools/linear_solver/scip_callback.h +++ b/ortools/linear_solver/scip_callback.h @@ -25,14 +25,10 @@ #include #include -#include "absl/memory/memory.h" #include "ortools/linear_solver/linear_expr.h" #include "ortools/linear_solver/linear_solver.h" -#include "scip/scip_sol.h" -#include "scip/type_cons.h" #include "scip/type_scip.h" #include "scip/type_sol.h" -#include "scip/type_var.h" namespace operations_research { diff --git a/ortools/linear_solver/solve_mp_model.h b/ortools/linear_solver/solve_mp_model.h index ec56ed207e..bbc8f2457b 100644 --- a/ortools/linear_solver/solve_mp_model.h +++ b/ortools/linear_solver/solve_mp_model.h @@ -18,7 +18,6 @@ #ifndef OR_TOOLS_LINEAR_SOLVER_SOLVE_MP_MODEL_H_ #define OR_TOOLS_LINEAR_SOLVER_SOLVE_MP_MODEL_H_ -#include #include #include "ortools/linear_solver/linear_solver.pb.h" diff --git a/ortools/linear_solver/wrappers/CMakeLists.txt b/ortools/linear_solver/wrappers/CMakeLists.txt index 4bf0916894..5c085720b5 100644 --- a/ortools/linear_solver/wrappers/CMakeLists.txt +++ b/ortools/linear_solver/wrappers/CMakeLists.txt @@ -26,5 +26,5 @@ target_include_directories(${NAME} PRIVATE target_link_libraries(${NAME} PRIVATE absl::status $<$:libscip> - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::linear_solver_wrappers ALIAS ${NAME}) diff --git a/ortools/linear_solver/xpress_interface.cc b/ortools/linear_solver/xpress_interface.cc index ec389db49e..2fc90393b2 100644 --- a/ortools/linear_solver/xpress_interface.cc +++ b/ortools/linear_solver/xpress_interface.cc @@ -227,7 +227,7 @@ class XpressMPCallbackContext : public MPCallbackContext { : xprsprob_(xprsprob), event_(event), num_nodes_(num_nodes), - variable_values_(0){}; + variable_values_(0) {}; // Implementation of the interface. MPCallbackEvent Event() override { return event_; }; @@ -260,7 +260,7 @@ class XpressMPCallbackContext : public MPCallbackContext { // Wraps the MPCallback in order to catch and store exceptions class MPCallbackWrapper { public: - explicit MPCallbackWrapper(MPCallback* callback) : callback_(callback){}; + explicit MPCallbackWrapper(MPCallback* callback) : callback_(callback) {}; MPCallback* GetCallback() const { return callback_; } // Since our (C++) call-back functions are called from the XPRESS (C) code, // exceptions thrown in our call-back code are not caught by XPRESS. @@ -971,8 +971,6 @@ void XpressInterface::SetVariableInteger(int var_index, bool integer) { } // Setup the right-hand side of a constraint. -// The function is expected to _always_ set rhs, sense, range. So for -// non-ranged rows it must set range to zero. void XpressInterface::MakeRhs(double lb, double ub, double& rhs, char& sense, double& range) { if (lb == ub) { @@ -1246,7 +1244,8 @@ int64_t XpressInterface::nodes() const { } // Transform a XPRESS basis status to an MPSolver basis status. -MPSolver::BasisStatus XpressToMPSolverBasisStatus(int xpress_basis_status) { +static MPSolver::BasisStatus XpressToMPSolverBasisStatus( + int xpress_basis_status) { switch (xpress_basis_status) { case XPRS_AT_LOWER: return MPSolver::AT_LOWER_BOUND; @@ -1824,13 +1823,15 @@ MPSolver::ResultStatus XpressInterface::Solve(MPSolverParameters const& param) { // Set log level. XPRSsetintcontrol(mLp, XPRS_OUTPUTLOG, quiet() ? 0 : 1); // Set parameters. - // NOTE: We must invoke SetSolverSpecificParametersAsString() _first_. - // Its current implementation invokes ReadParameterFile() which in - // turn invokes XPRSreadcopyparam(). The latter will _overwrite_ - // all current parameter settings in the environment. + // We first set our internal MPSolverParameters from 'param' and then set + // any user-specified internal solver parameters via + // solver_specific_parameter_string_. + // Default MPSolverParameters can override custom parameters while specific + // parameters allow a higher level of customization (for example for + // presolving) and therefore we apply MPSolverParameters first. + SetParameters(param); solver_->SetSolverSpecificParametersAsString( solver_->solver_specific_parameter_string_); - SetParameters(param); if (solver_->time_limit()) { VLOG(1) << "Setting time limit = " << solver_->time_limit() << " ms."; // In Xpress, a time limit should usually have a negative sign. With a diff --git a/ortools/linear_solver/xpress_interface_test.cc b/ortools/linear_solver/xpress_interface_test.cc index fee28e1787..80916e72fe 100644 --- a/ortools/linear_solver/xpress_interface_test.cc +++ b/ortools/linear_solver/xpress_interface_test.cc @@ -768,6 +768,14 @@ TEST_F(XpressFixtureLP, SetPrimalTolerance) { EXPECT_EQ(getter.getDoubleControl(XPRS_FEASTOL), tol); } +TEST_F(XpressFixtureLP, SetPrimalToleranceNotOverridenByMPSolverParameters) { + double tol = 1e-4; // Choose a value different from kDefaultPrimalTolerance + std::string xpressParamString = "FEASTOL " + std::to_string(tol); + solver.SetSolverSpecificParametersAsString(xpressParamString); + solver.Solve(); + EXPECT_EQ(getter.getDoubleControl(XPRS_FEASTOL), tol); +} + TEST_F(XpressFixtureLP, SetDualTolerance) { MPSolverParameters params; double tol = 1e-2; @@ -776,6 +784,14 @@ TEST_F(XpressFixtureLP, SetDualTolerance) { EXPECT_EQ(getter.getDoubleControl(XPRS_OPTIMALITYTOL), tol); } +TEST_F(XpressFixtureLP, SetDualToleranceNotOverridenByMPSolverParameters) { + double tol = 1e-4; // Choose a value different from kDefaultDualTolerance + std::string xpressParamString = "OPTIMALITYTOL " + std::to_string(tol); + solver.SetSolverSpecificParametersAsString(xpressParamString); + solver.Solve(); + EXPECT_EQ(getter.getDoubleControl(XPRS_OPTIMALITYTOL), tol); +} + TEST_F(XpressFixtureMIP, SetPresolveMode) { MPSolverParameters params; params.SetIntegerParam(MPSolverParameters::PRESOLVE, @@ -788,6 +804,17 @@ TEST_F(XpressFixtureMIP, SetPresolveMode) { EXPECT_EQ(getter.getIntegerControl(XPRS_PRESOLVE), 1); } +TEST_F(XpressFixtureMIP, SetPresolveModeNotOverridenByMPSolverParameters) { + // Test all presolve modes of Xpress + std::vector presolveModes{-1, 0, 1, 2, 3}; + for (int presolveMode : presolveModes) { + std::string xpressParamString = "PRESOLVE " + std::to_string(presolveMode); + solver.SetSolverSpecificParametersAsString(xpressParamString); + solver.Solve(); + EXPECT_EQ(getter.getIntegerControl(XPRS_PRESOLVE), presolveMode); + } +} + TEST_F(XpressFixtureLP, SetLpAlgorithm) { MPSolverParameters params; params.SetIntegerParam(MPSolverParameters::LP_ALGORITHM, @@ -804,6 +831,16 @@ TEST_F(XpressFixtureLP, SetLpAlgorithm) { EXPECT_EQ(getter.getIntegerControl(XPRS_DEFAULTALG), 4); } +TEST_F(XpressFixtureLP, SetLPAlgorithmNotOverridenByMPSolverParameters) { + std::vector defaultAlgs{1, 2, 3, 4}; + for (int defaultAlg : defaultAlgs) { + std::string xpressParamString = "DEFAULTALG " + std::to_string(defaultAlg); + solver.SetSolverSpecificParametersAsString(xpressParamString); + solver.Solve(); + EXPECT_EQ(getter.getIntegerControl(XPRS_DEFAULTALG), defaultAlg); + } +} + TEST_F(XpressFixtureMIP, SetScaling) { MPSolverParameters params; params.SetIntegerParam(MPSolverParameters::SCALING, @@ -816,6 +853,17 @@ TEST_F(XpressFixtureMIP, SetScaling) { EXPECT_EQ(getter.getIntegerControl(XPRS_SCALING), 163); } +TEST_F(XpressFixtureMIP, SetScalingNotOverridenByMPSolverParameters) { + // Scaling is a bitmap on 16 bits in Xpress, test only a random value among + // all possible + int scaling = 2354; + + std::string xpressParamString = "SCALING " + std::to_string(scaling); + solver.SetSolverSpecificParametersAsString(xpressParamString); + solver.Solve(); + EXPECT_EQ(getter.getIntegerControl(XPRS_SCALING), scaling); +} + TEST_F(XpressFixtureMIP, SetRelativeMipGap) { MPSolverParameters params; double relativeMipGap = 1e-3; @@ -824,6 +872,14 @@ TEST_F(XpressFixtureMIP, SetRelativeMipGap) { EXPECT_EQ(getter.getDoubleControl(XPRS_MIPRELSTOP), relativeMipGap); } +TEST_F(XpressFixtureMIP, SetRelativeMipGapNotOverridenByMPSolverParameters) { + double gap = 1e-2; // Choose a value different from kDefaultRelativeMipGap + std::string xpressParamString = "MIPRELSTOP " + std::to_string(gap); + solver.SetSolverSpecificParametersAsString(xpressParamString); + solver.Solve(); + EXPECT_EQ(getter.getDoubleControl(XPRS_MIPRELSTOP), gap); +} + TEST(XpressInterface, setStringControls) { std::vector> params = { {"MPSRHSNAME", XPRS_MPSRHSNAME, "default_value"}, diff --git a/ortools/lp_data/CMakeLists.txt b/ortools/lp_data/CMakeLists.txt index f163cf5272..7018f0f16a 100644 --- a/ortools/lp_data/CMakeLists.txt +++ b/ortools/lp_data/CMakeLists.txt @@ -29,5 +29,5 @@ target_link_libraries(${NAME} PRIVATE absl::str_format protobuf::libprotobuf ${RE2_DEPS} - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::lp_data ALIAS ${NAME}) diff --git a/ortools/lp_data/lp_data.cc b/ortools/lp_data/lp_data.cc index 4745124c72..316b8f0292 100644 --- a/ortools/lp_data/lp_data.cc +++ b/ortools/lp_data/lp_data.cc @@ -65,7 +65,7 @@ bool AreBoundsFreeOrBoxed(Fractional lower_bound, Fractional upper_bound) { } template -double Average(const absl::StrongVector& v) { +double Average(const util_intops::StrongVector& v) { const size_t size = v.size(); double sum = 0.0; double n = 0.0; // n is used in a calculation involving doubles. @@ -78,7 +78,7 @@ double Average(const absl::StrongVector& v) { } template -double StandardDeviation(const absl::StrongVector& v) { +double StandardDeviation(const util_intops::StrongVector& v) { const size_t size = v.size(); double n = 0.0; // n is used in a calculation involving doubles. double sigma_square = 0.0; @@ -95,7 +95,7 @@ double StandardDeviation(const absl::StrongVector& v) { // Returns 0 when the vector is empty. template -T GetMaxElement(const absl::StrongVector& v) { +T GetMaxElement(const util_intops::StrongVector& v) { const size_t size = v.size(); if (size == 0) { return T(0); diff --git a/ortools/lp_data/lp_types.h b/ortools/lp_data/lp_types.h index 88d6228c3b..8d80f64c16 100644 --- a/ortools/lp_data/lp_types.h +++ b/ortools/lp_data/lp_types.h @@ -16,18 +16,18 @@ #ifndef OR_TOOLS_LP_DATA_LP_TYPES_H_ #define OR_TOOLS_LP_DATA_LP_TYPES_H_ -#include +#include #include +#include #include +#include #include #include -#include #include #include "absl/log/check.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" -#include "ortools/base/types.h" #include "ortools/util/bitset.h" #include "ortools/util/strong_integers.h" @@ -279,31 +279,37 @@ class StrictITISpan { const IntType size_; }; -// Wrapper around an ITIVector to allow (and enforce) creation/resize/assign +// Wrapper around a StrongVector to allow (and enforce) creation/resize/assign // to use the index type for the size. // -// TODO(user): This should probably move into ITIVector, but note that this -// version is more strict and does not allow any other size types. -template -class StrictITIVector : public absl::StrongVector { +// TODO(user): This should probably move to StrongVector, but note that this +// version is stricter and does not allow any other size types. +template > +class StrictITIVector : public util_intops::StrongVector { public: - typedef IntType IndexType; - typedef absl::StrongVector ParentType; + using IndexType = IntType; + using ParentType = util_intops::StrongVector; using View = StrictITISpan; using ConstView = StrictITISpan; -// This allows for brace initialization, which is really useful in tests. -// It is not 'explicit' by design, so one can do vector = {...}; + StrictITIVector() = default; + explicit StrictITIVector(IntType size) : ParentType(size) {} + explicit StrictITIVector(const Alloc& a) : ParentType(a) {} + StrictITIVector(IntType n, const T& v, const Alloc& a = Alloc()) + : ParentType(n, v, a) {} + + // This allows for brace initialization, which is really useful in tests. + // It is not 'explicit' by design, so one can do vector = {...}; #if !defined(__ANDROID__) && (!defined(_MSC_VER) || (_MSC_VER >= 1800)) - StrictITIVector(std::initializer_list init_list) // NOLINT - : ParentType(init_list.begin(), init_list.end()) {} + StrictITIVector(std::initializer_list init_list, + const Alloc& a = Alloc()) // NOLINT + : ParentType(init_list.begin(), init_list.end(), a) {} #endif - StrictITIVector() : ParentType() {} - explicit StrictITIVector(IntType size) : ParentType(size.value()) {} - StrictITIVector(IntType size, const T& v) : ParentType(size.value(), v) {} + template - StrictITIVector(InputIteratorType first, InputIteratorType last) - : ParentType(first, last) {} + StrictITIVector(InputIteratorType first, InputIteratorType last, + const Alloc& a = Alloc()) + : ParentType(first, last, a) {} void resize(IntType size) { ParentType::resize(size.value()); } void resize(IntType size, const T& v) { ParentType::resize(size.value(), v); } diff --git a/ortools/lp_data/matrix_scaler.cc b/ortools/lp_data/matrix_scaler.cc index 3bd4d68830..1e3e46e2fb 100644 --- a/ortools/lp_data/matrix_scaler.cc +++ b/ortools/lp_data/matrix_scaler.cc @@ -152,8 +152,8 @@ void SparseMatrixScaler::Scale(GlopParameters::ScalingAlgorithm method) { namespace { template -void ScaleVector(const absl::StrongVector& scale, bool up, - absl::StrongVector* vector_to_scale) { +void ScaleVector(const util_intops::StrongVector& scale, bool up, + util_intops::StrongVector* vector_to_scale) { RETURN_IF_NULL(vector_to_scale); const I size(std::min(scale.size(), vector_to_scale->size())); if (up) { @@ -170,7 +170,7 @@ void ScaleVector(const absl::StrongVector& scale, bool up, template ColIndex CreateOrGetScaleIndex( InputIndexType num, LinearProgram* lp, - absl::StrongVector* scale_var_indices) { + util_intops::StrongVector* scale_var_indices) { if ((*scale_var_indices)[num] == -1) { (*scale_var_indices)[num] = lp->CreateNewVariable(); } @@ -367,8 +367,8 @@ Status SparseMatrixScaler::LPScale() { // Indices to variables in the LinearProgram populated by // GenerateLinearProgram. - absl::StrongVector col_scale_var_indices; - absl::StrongVector row_scale_var_indices; + util_intops::StrongVector col_scale_var_indices; + util_intops::StrongVector row_scale_var_indices; row_scale_var_indices.resize(RowToIntIndex(matrix_->num_rows()), kInvalidCol); col_scale_var_indices.resize(ColToIntIndex(matrix_->num_cols()), kInvalidCol); const ColIndex beta = linear_program->CreateNewVariable(); diff --git a/ortools/lp_data/permutation.h b/ortools/lp_data/permutation.h index bba3565b64..c7b585ef6a 100644 --- a/ortools/lp_data/permutation.h +++ b/ortools/lp_data/permutation.h @@ -90,7 +90,7 @@ class Permutation { int ComputeSignature() const; private: - absl::StrongVector perm_; + util_intops::StrongVector perm_; }; typedef Permutation RowPermutation; @@ -160,7 +160,7 @@ void Permutation::PopulateRandomly() { template bool Permutation::Check() const { const size_t size = perm_.size(); - absl::StrongVector visited(size, false); + util_intops::StrongVector visited(size, false); for (IndexType i(0); i < size; ++i) { if (perm_[i] < 0 || perm_[i] >= size) { return false; @@ -178,7 +178,7 @@ bool Permutation::Check() const { template int Permutation::ComputeSignature() const { const size_t size = perm_.size(); - absl::StrongVector visited(size); + util_intops::StrongVector visited(size); DCHECK(Check()); int signature = 1; for (IndexType i(0); i < size; ++i) { diff --git a/ortools/lp_data/sparse_row.h b/ortools/lp_data/sparse_row.h index 9e19c7888a..7fc4a72a96 100644 --- a/ortools/lp_data/sparse_row.h +++ b/ortools/lp_data/sparse_row.h @@ -58,7 +58,7 @@ class SparseRow : public SparseVector { }; // A matrix stored by rows. -typedef absl::StrongVector RowMajorSparseMatrix; +typedef util_intops::StrongVector RowMajorSparseMatrix; } // namespace glop } // namespace operations_research diff --git a/ortools/math_opt/core/python/BUILD.bazel b/ortools/math_opt/core/python/BUILD.bazel index 0c08f02fad..83f121976b 100644 --- a/ortools/math_opt/core/python/BUILD.bazel +++ b/ortools/math_opt/core/python/BUILD.bazel @@ -52,8 +52,8 @@ pybind_extension( "//ortools/util:solve_interrupter", "@com_google_absl//absl/memory", "@com_google_absl//absl/status:statusor", - "@pybind11_abseil//pybind11_abseil:import_status_module", - "@pybind11_abseil//pybind11_abseil:status_casters", + "@org_pybind11_abseil//pybind11_abseil:import_status_module", + "@org_pybind11_abseil//pybind11_abseil:status_casters", "@pybind11_protobuf//pybind11_protobuf:native_proto_caster", ], ) diff --git a/ortools/math_opt/solvers/glop_solver.cc b/ortools/math_opt/solvers/glop_solver.cc index de147923dd..3dd72ec35c 100644 --- a/ortools/math_opt/solvers/glop_solver.cc +++ b/ortools/math_opt/solvers/glop_solver.cc @@ -188,8 +188,8 @@ void UpdateIdIndexMap(glop::StrictITIVector indices_to_delete, IndexType num_indices, absl::flat_hash_map& id_index_map) { - absl::StrongVector new_indices(num_indices.value(), - IndexType(0)); + util_intops::StrongVector new_indices( + num_indices.value(), IndexType(0)); IndexType new_index(0); for (IndexType index(0); index < num_indices; ++index) { if (indices_to_delete[index]) { diff --git a/ortools/math_opt/solvers/message_callback_data_test.cc b/ortools/math_opt/solvers/message_callback_data_test.cc index d096cecacf..652f002410 100644 --- a/ortools/math_opt/solvers/message_callback_data_test.cc +++ b/ortools/math_opt/solvers/message_callback_data_test.cc @@ -131,7 +131,7 @@ TEST(BufferedMessageCallbackTest, NullFunctionNoEffect) { TEST(BufferedMessageCallbackTest, NonNullFunctionHasCallback) { BufferedMessageCallback buffered_cb( - [](const std::vector& messages) {}); + [](absl::Span messages) {}); EXPECT_TRUE(buffered_cb.has_user_message_callback()); } diff --git a/ortools/packing/CMakeLists.txt b/ortools/packing/CMakeLists.txt index e0be9b7382..857750dff3 100644 --- a/ortools/packing/CMakeLists.txt +++ b/ortools/packing/CMakeLists.txt @@ -28,7 +28,7 @@ target_include_directories(${NAME} PRIVATE target_link_libraries(${NAME} PRIVATE absl::flags absl::strings - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::packing ALIAS ${NAME}) # Vector Bin Packing diff --git a/ortools/packing/arc_flow_builder.cc b/ortools/packing/arc_flow_builder.cc index f471853a78..e7003aa0cc 100644 --- a/ortools/packing/arc_flow_builder.cc +++ b/ortools/packing/arc_flow_builder.cc @@ -78,7 +78,7 @@ class ArcFlowBuilder { void ForwardCompressionPass(const std::vector& source_node); // Can we fit one more item in the bin? - bool CanFitNewItem(const std::vector& used_dimensions, int item) const; + bool CanFitNewItem(absl::Span used_dimensions, int item) const; // Create a new used_dimensions that is used_dimensions + item dimensions. std::vector AddItem(const std::vector& used_dimensions, int item) const; @@ -151,7 +151,7 @@ ArcFlowBuilder::ArcFlowBuilder( }); } -bool ArcFlowBuilder::CanFitNewItem(const std::vector& used_dimensions, +bool ArcFlowBuilder::CanFitNewItem(absl::Span used_dimensions, int item) const { for (int d = 0; d < bin_dimensions_.size(); ++d) { if (used_dimensions[d] + items_[item].dimensions[d] > bin_dimensions_[d]) { @@ -402,8 +402,8 @@ bool ArcFlowGraph::Arc::operator<(const ArcFlowGraph::Arc& other) const { ArcFlowGraph BuildArcFlowGraph( const std::vector& bin_dimensions, - const std::vector>& item_dimensions_by_type, - const std::vector& demand_by_type) { + absl::Span> item_dimensions_by_type, + absl::Span demand_by_type) { ArcFlowBuilder afb(bin_dimensions, item_dimensions_by_type, demand_by_type); return afb.BuildVectorBinPackingGraph(); } diff --git a/ortools/packing/arc_flow_builder.h b/ortools/packing/arc_flow_builder.h index b71e2f1fd6..22a6900e03 100644 --- a/ortools/packing/arc_flow_builder.h +++ b/ortools/packing/arc_flow_builder.h @@ -45,6 +45,7 @@ #include #include "absl/container/flat_hash_map.h" +#include "absl/types/span.h" #include "ortools/base/types.h" namespace operations_research { @@ -83,8 +84,8 @@ struct ArcFlowGraph { // num_dimensions ArcFlowGraph BuildArcFlowGraph( const std::vector& bin_dimensions, - const std::vector>& item_dimensions_by_type, - const std::vector& demand_by_type); + absl::Span> item_dimensions_by_type, + absl::Span demand_by_type); } // namespace packing } // namespace operations_research diff --git a/ortools/packing/vector_bin_packing_parser.cc b/ortools/packing/vector_bin_packing_parser.cc index 9ffaa5eee4..3a43164ea5 100644 --- a/ortools/packing/vector_bin_packing_parser.cc +++ b/ortools/packing/vector_bin_packing_parser.cc @@ -27,7 +27,7 @@ namespace operations_research { namespace packing { namespace vbp { -bool VbpParser::ParseFile(const std::string& data_filename) { +bool VbpParser::ParseFile(absl::string_view data_filename) { vbp_.Clear(); load_status_ = DIMENSION_SECTION; diff --git a/ortools/packing/vector_bin_packing_parser.h b/ortools/packing/vector_bin_packing_parser.h index fec91292bb..cf4c9b125b 100644 --- a/ortools/packing/vector_bin_packing_parser.h +++ b/ortools/packing/vector_bin_packing_parser.h @@ -36,7 +36,7 @@ class VbpParser { public: // Return true iff there were no error, in which case problem() can be // called to retrieve the parsed problem. - bool ParseFile(const std::string& data_filename); + bool ParseFile(absl::string_view data_filename); // We keep the fully qualified name for SWIG. ::operations_research::packing::vbp::VectorBinPackingProblem problem() const { diff --git a/ortools/pdlp/BUILD.bazel b/ortools/pdlp/BUILD.bazel index 6e1c19ef9d..5b68856f23 100644 --- a/ortools/pdlp/BUILD.bazel +++ b/ortools/pdlp/BUILD.bazel @@ -17,6 +17,14 @@ load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) +cc_library( + name = "scheduler", + hdrs = ["scheduler.h"], + deps = [ + "@com_google_absl//absl/functional:any_invocable", + ], +) + proto_library( name = "solve_log_proto", srcs = ["solve_log.proto"], diff --git a/ortools/pdlp/CMakeLists.txt b/ortools/pdlp/CMakeLists.txt index 068c32b188..d89c81466d 100644 --- a/ortools/pdlp/CMakeLists.txt +++ b/ortools/pdlp/CMakeLists.txt @@ -36,5 +36,5 @@ target_link_libraries(${NAME} PRIVATE absl::strings absl::str_format Eigen3::Eigen - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::pdlp ALIAS ${NAME}) diff --git a/ortools/pdlp/python/CMakeLists.txt b/ortools/pdlp/python/CMakeLists.txt index c5e7c8cdb3..f01299634a 100644 --- a/ortools/pdlp/python/CMakeLists.txt +++ b/ortools/pdlp/python/CMakeLists.txt @@ -30,7 +30,7 @@ target_link_libraries(pdlp_pybind11 PRIVATE ${PROJECT_NAMESPACE}::ortools pybind11_native_proto_caster protobuf::libprotobuf - ) +) target_include_directories(pdlp_pybind11 PRIVATE ${protobuf_SOURCE_DIR}) add_library(${PROJECT_NAMESPACE}::pdlp_pybind11 ALIAS pdlp_pybind11) diff --git a/ortools/pdlp/quadratic_program.cc b/ortools/pdlp/quadratic_program.cc index fe0927ce90..c00bb1660b 100644 --- a/ortools/pdlp/quadratic_program.cc +++ b/ortools/pdlp/quadratic_program.cc @@ -27,6 +27,7 @@ #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" #include "ortools/base/status_macros.h" #include "ortools/linear_solver/linear_solver.pb.h" @@ -76,6 +77,22 @@ absl::Status ValidateQuadraticProgramDimensions(const QuadraticProgram& qp) { qp.constraint_matrix.rows(), " rows ")); } + if (qp.variable_names.has_value() && + var_lb_size != qp.variable_names->size()) { + return absl::InvalidArgumentError(absl::StrCat( + "Inconsistent dimensions: variable lower bound vector has size ", + var_lb_size, " while variable names has size ", + qp.variable_names->size())); + } + + if (qp.constraint_names.has_value() && + con_lb_size != qp.constraint_names->size()) { + return absl::InvalidArgumentError(absl::StrCat( + "Inconsistent dimensions: constraint lower bound vector has size ", + con_lb_size, " while constraint names has size ", + qp.constraint_names->size())); + } + return absl::OkStatus(); } @@ -302,6 +319,115 @@ absl::StatusOr QpToMpModelProto(const QuadraticProgram& qp) { return proto; } +std::string ToString(const QuadraticProgram& qp, int64_t max_size) { + auto object_name = [](int64_t index, + const std::optional>& names, + absl::string_view prefix) { + if (names.has_value()) { + CHECK_LT(index, names->size()); + return (*names)[index]; + } + return absl::StrCat(prefix, index); + }; + auto variable_name = [&qp, &object_name](int64_t var_index) { + return object_name(var_index, qp.variable_names, "x"); + }; + auto constraint_name = [&qp, &object_name](int64_t con_index) { + return object_name(con_index, qp.constraint_names, "c"); + }; + + if (auto status = ValidateQuadraticProgramDimensions(qp); !status.ok()) { + return absl::StrCat("Quadratic program with inconsistent dimensions: ", + status.message()); + } + + std::string result; + if (qp.problem_name.has_value()) { + absl::StrAppend(&result, *qp.problem_name, ":\n"); + } + absl::StrAppend( + &result, (qp.objective_scaling_factor < 0.0 ? "maximize " : "minimize "), + qp.objective_scaling_factor, " * (", qp.objective_offset); + for (int64_t i = 0; i < qp.objective_vector.size(); ++i) { + if (qp.objective_vector[i] != 0.0) { + absl::StrAppend(&result, " + ", qp.objective_vector[i], " ", + variable_name(i)); + if (result.size() >= max_size) break; + } + } + if (qp.objective_matrix.has_value()) { + result.append(" + 1/2 * ("); + auto obj_diagonal = qp.objective_matrix->diagonal(); + for (int64_t i = 0; i < obj_diagonal.size(); ++i) { + if (obj_diagonal[i] != 0.0) { + absl::StrAppend(&result, " + ", obj_diagonal[i], " ", variable_name(i), + "^2"); + if (result.size() >= max_size) break; + } + } + // Closes the objective matrix expression. + result.append(")"); + } + // Closes the objective scaling factor expression. + result.append(")\n"); + + Eigen::SparseMatrix + constraint_matrix_transpose = qp.constraint_matrix.transpose(); + for (int64_t constraint_idx = 0; + constraint_idx < constraint_matrix_transpose.outerSize(); + ++constraint_idx) { + absl::StrAppend(&result, constraint_name(constraint_idx), ":"); + if (qp.constraint_lower_bounds[constraint_idx] != + -std::numeric_limits::infinity()) { + absl::StrAppend(&result, " ", qp.constraint_lower_bounds[constraint_idx], + " <="); + } + for (decltype(constraint_matrix_transpose)::InnerIterator it( + constraint_matrix_transpose, constraint_idx); + it; ++it) { + absl::StrAppend(&result, " + ", it.value(), " ", + variable_name(it.index())); + if (result.size() >= max_size) break; + } + if (qp.constraint_upper_bounds[constraint_idx] != + std::numeric_limits::infinity()) { + absl::StrAppend(&result, + " <= ", qp.constraint_upper_bounds[constraint_idx]); + } + result.append("\n"); + } + result.append("Bounds\n"); + for (int64_t i = 0; i < qp.variable_lower_bounds.size(); ++i) { + if (qp.variable_lower_bounds[i] == + -std::numeric_limits::infinity()) { + if (qp.variable_upper_bounds[i] == + std::numeric_limits::infinity()) { + absl::StrAppend(&result, variable_name(i), " free\n"); + } else { + absl::StrAppend(&result, variable_name(i), + " <= ", qp.variable_upper_bounds[i], "\n"); + } + } else { + if (qp.variable_upper_bounds[i] == + std::numeric_limits::infinity()) { + absl::StrAppend(&result, variable_name(i), + " >= ", qp.variable_lower_bounds[i], "\n"); + + } else { + absl::StrAppend(&result, qp.variable_lower_bounds[i], + " <= ", variable_name(i), + " <= ", qp.variable_upper_bounds[i], "\n"); + } + } + if (result.size() >= max_size) break; + } + if (result.size() > max_size) { + result.resize(max_size - 4); + result.append("...\n"); + } + return result; +} + void SetEigenMatrixFromTriplets( std::vector> triplets, Eigen::SparseMatrix& matrix) { diff --git a/ortools/pdlp/quadratic_program.h b/ortools/pdlp/quadratic_program.h index 0af5bf5b37..e9addf5559 100644 --- a/ortools/pdlp/quadratic_program.h +++ b/ortools/pdlp/quadratic_program.h @@ -40,6 +40,14 @@ namespace operations_research::pdlp { // contain positive infinities. Other than that all entries of all fields must // be finite. The `objective_matrix` must be diagonal and non-negative. // +// `variable_lower_bounds`, `variable_upper_bounds`, `objective_vector`, +// `objective_matrix` (if it has a value), and `variable_names` (if it has a +// value) must all have the same size as the number of columns in +// `constraint_matrix`. `constraint_lower_bounds`, `constraint_upper_bounds`, +// and `constraint_names` (if it has a value) must all have the same size as the +// number of rows in `constraint_matrix`. Consistency of these values is checked +// by `ValidateQuadraticProgramDimensions()`. +// // For convenience, the struct also stores `scaling_factor` and // `objective_offset`. These factors can be used to transform objective values // based on the problem definition above into objective values that are @@ -177,6 +185,12 @@ absl::Status CanFitInMpModelProto(const QuadraticProgram& qp); // `CanFitInMpModelProto()` fails. absl::StatusOr QpToMpModelProto(const QuadraticProgram& qp); +// Returns a "pretty" version of `qp`, truncating to at most `max_size` +// characters. This is for debugging purposes only - the format may change +// without notice. Although this output is vaguely similar to "LP format", it is +// not actually compatible with "LP format". +std::string ToString(const QuadraticProgram& qp, int64_t max_size = 1'000'000); + // Like `matrix.setFromTriplets(triplets)`, except that `setFromTriplets` // results in having three copies of the nonzeros in memory at the same time, // because it first fills one matrix from triplets, and then transposes it into diff --git a/ortools/pdlp/quadratic_program_test.cc b/ortools/pdlp/quadratic_program_test.cc index a734f706d5..5638c00b48 100644 --- a/ortools/pdlp/quadratic_program_test.cc +++ b/ortools/pdlp/quadratic_program_test.cc @@ -36,11 +36,16 @@ namespace { using ::google::protobuf::util::ParseTextOrDie; using ::operations_research::pdlp::internal::CombineRepeatedTripletsInPlace; using ::testing::ElementsAre; +using ::testing::EndsWith; using ::testing::Eq; using ::testing::HasSubstr; using ::testing::IsEmpty; using ::testing::Optional; using ::testing::PrintToString; +using ::testing::SizeIs; +using ::testing::StartsWith; +using ::testing::StrEq; + const double kInfinity = std::numeric_limits::infinity(); TEST(QuadraticProgram, DefaultConstructorWorks) { QuadraticProgram qp; } @@ -64,6 +69,15 @@ TEST(ValidateQuadraticProgramDimensions, ValidProblem) { EXPECT_TRUE(status.ok()) << status; } +TEST(ValidateQuadraticProgramDimensions, ValidProblemWithNames) { + QuadraticProgram qp = TestDiagonalQp1(); + qp.variable_names = {"x0", "x1"}; + qp.constraint_names = {"c0"}; + const absl::Status status = + ValidateQuadraticProgramDimensions(TestDiagonalQp1()); + EXPECT_TRUE(status.ok()) << status; +} + TEST(ValidateQuadraticProgramDimensions, ConstraintLowerBoundsInconsistent) { QuadraticProgram qp; qp.ResizeAndInitialize(/*num_variables=*/2, /*num_constraints=*/3); @@ -129,6 +143,22 @@ TEST(ValidateQuadraticProgramDimensions, ObjectiveMatrixRowsInconsistent) { absl::StatusCode::kInvalidArgument); } +TEST(ValidateQuadraticProgramDimensions, VariableNamesInconsistent) { + QuadraticProgram qp; + qp.ResizeAndInitialize(/*num_variables=*/2, /*num_constraints=*/3); + qp.variable_names = {"x0"}; + EXPECT_EQ(ValidateQuadraticProgramDimensions(qp).code(), + absl::StatusCode::kInvalidArgument); +} + +TEST(ValidateQuadraticProgramDimensions, ConstraintNamesInconsistent) { + QuadraticProgram qp; + qp.ResizeAndInitialize(/*num_variables=*/2, /*num_constraints=*/3); + qp.constraint_names = {"c0"}; + EXPECT_EQ(ValidateQuadraticProgramDimensions(qp).code(), + absl::StatusCode::kInvalidArgument); +} + class ConvertQpMpModelProtoTest : public testing::TestWithParam {}; // The LP: @@ -398,6 +428,79 @@ INSTANTIATE_TEST_SUITE_P( } }); +TEST(QuadraticProgramToStringTest, TestLpIsCorrect) { + QuadraticProgram qp = TestLp(); + EXPECT_THAT(ToString(qp), + StrEq("minimize 1 * (-14 + 5.5 x0 + -2 x1 + -1 x2 + 1 x3)\n" + "c0: 12 <= + 2 x0 + 1 x1 + 1 x2 + 2 x3 <= 12\n" + "c1: + 1 x0 + 1 x2 <= 7\n" + "c2: -4 <= + 4 x0\n" + "c3: -1 <= + 1.5 x2 + -1 x3 <= 1\n" + "Bounds\n" + "x0 free\n" + "x1 >= -2\n" + "x2 <= 6\n" + "2.5 <= x3 <= 3.5\n")); +} + +TEST(QuadraticProgramToStringTest, TestLpIsCorrectWithMaximization) { + QuadraticProgram qp = TestLp(); + qp.objective_scaling_factor = -1; + EXPECT_THAT(ToString(qp), + StrEq("maximize -1 * (-14 + 5.5 x0 + -2 x1 + -1 x2 + 1 x3)\n" + "c0: 12 <= + 2 x0 + 1 x1 + 1 x2 + 2 x3 <= 12\n" + "c1: + 1 x0 + 1 x2 <= 7\n" + "c2: -4 <= + 4 x0\n" + "c3: -1 <= + 1.5 x2 + -1 x3 <= 1\n" + "Bounds\n" + "x0 free\n" + "x1 >= -2\n" + "x2 <= 6\n" + "2.5 <= x3 <= 3.5\n")); +} + +TEST(QuadraticProgramToStringTest, TestLpTruncatesCorrectly) { + QuadraticProgram qp = TestLp(); + EXPECT_THAT(ToString(qp, 100), + AllOf(SizeIs(100), EndsWith("...\n"), + StrEq("minimize 1 * (-14 + 5.5 x0 + -2 x1 + -1 x2 + 1 x3)\n" + "c0: 12 <= + 2 x0 + 1 x1 + 1 x2 + 2 x3 <= 12\n" + "c...\n"))); +} + +TEST(QuadraticProgramToStringTest, TestDiagonalQp1IsCorrect) { + QuadraticProgram qp = TestDiagonalQp1(); + EXPECT_THAT( + ToString(qp), + StrEq("minimize 1 * (5 + -1 x0 + -1 x1 + 1/2 * ( + 4 x0^2 + 1 x1^2))\n" + "c0: + 1 x0 + 1 x1 <= 1\n" + "Bounds\n" + "1 <= x0 <= 2\n" + "-2 <= x1 <= 4\n")); +} + +TEST(QuadraticProgramToStringTest, UsesVariableAndConstraintNames) { + QuadraticProgram qp = TestDiagonalQp1(); + qp.problem_name = "test"; + qp.variable_names = {"x", "y"}; + qp.constraint_names = {"total"}; + EXPECT_THAT( + ToString(qp), + StrEq("test:\n" + "minimize 1 * (5 + -1 x + -1 y + 1/2 * ( + 4 x^2 + 1 y^2))\n" + "total: + 1 x + 1 y <= 1\n" + "Bounds\n" + "1 <= x <= 2\n" + "-2 <= y <= 4\n")); +} + +TEST(QuadraticProgramToStringTest, InvalidLpVectorSizes) { + QuadraticProgram qp = TestLp(); + qp.variable_lower_bounds.resize(3); + EXPECT_THAT(ToString(qp), + StartsWith("Quadratic program with inconsistent dimensions: ")); +} + // A matcher for Eigen Triplets. MATCHER_P3(IsEigenTriplet, row, col, value, std::string(negation ? "isn't" : "is") + " the triplet " + diff --git a/ortools/pdlp/scheduler.h b/ortools/pdlp/scheduler.h new file mode 100644 index 0000000000..8df1c44bdc --- /dev/null +++ b/ortools/pdlp/scheduler.h @@ -0,0 +1,37 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef PDLP_SCHEDULER_H_ +#define PDLP_SCHEDULER_H_ + +#include + +#include "absl/functional/any_invocable.h" + +namespace operations_research::pdlp { + +// Thread scheduling interface. +class Scheduler { + public: + virtual ~Scheduler() = default; + virtual int num_threads() const = 0; + virtual std::string info_string() const = 0; + + // Calls `do_func(i)` in parallel for `i` from `start` to `end-1`. + virtual void ParallelFor(int start, int end, + absl::AnyInvocable do_func) = 0; +}; + +} // namespace operations_research::pdlp + +#endif // PDLP_SCHEDULER_H_ diff --git a/ortools/port/CMakeLists.txt b/ortools/port/CMakeLists.txt index d89229d695..01dc28b877 100644 --- a/ortools/port/CMakeLists.txt +++ b/ortools/port/CMakeLists.txt @@ -28,5 +28,5 @@ target_include_directories(${NAME} PRIVATE target_link_libraries(${NAME} PRIVATE absl::strings protobuf::libprotobuf - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::port ALIAS ${NAME}) diff --git a/ortools/routing/parsers/cvrptw_lib.h b/ortools/routing/parsers/cvrptw_lib.h index 1561a7e586..f2840ed6c1 100644 --- a/ortools/routing/parsers/cvrptw_lib.h +++ b/ortools/routing/parsers/cvrptw_lib.h @@ -70,7 +70,7 @@ class LocationContainer { std::mt19937 randomizer_; const int64_t speed_; - absl::StrongVector locations_; + util_intops::StrongVector locations_; }; // Random demand. diff --git a/ortools/sat/2d_orthogonal_packing.cc b/ortools/sat/2d_orthogonal_packing.cc index b433283297..f8bd5b3d42 100644 --- a/ortools/sat/2d_orthogonal_packing.cc +++ b/ortools/sat/2d_orthogonal_packing.cc @@ -372,30 +372,32 @@ bool FindHeuristicSchedulingSolution( } // namespace +// We want to find the minimum set of values of `k` that would always find a +// conflict if there is a `k` for which it exists. In the literature it is +// often implied (but not stated) that it is sufficient to test the values of +// `k` that correspond to the size of an item. This is not true. To find the +// minimum set of values of `k` we look for all values of `k` that are +// "extreme": ie., the rounding on the division truncates the most (or the +// least) amount, depending on the sign it appears in the formula. +// +// To find these extreme values, we look for all local minima of the energy +// slack after applying the DFF (we multiply by `k` for convenience): +// k * f_k(H) * W - sum_i k * f_k(h_i) * w_i +// If this value ever becomes negative for a value of `k`, it must happen in a +// local minimum. Then we use the fact that +// k * floor(x / k) = x - x % k +// and that x%k has a local minimum when k=x/i and a local maximum when k=1+x/i +// for every integer i. The final finer point in the calculation is +// realizing that if +// sum_{i, h_i > H/2} w_i > W +// then you have more "large" objects than it fits in the box, and you will +// have a conflict using the DFF f_0 for l=H/2. So we can safely ignore this +// case for the more expensive DFF f_2 calculation. void OrthogonalPackingInfeasibilityDetector::GetAllCandidatesForKForDff2( - absl::Span sizes_x, - absl::Span sizes_y, IntegerValue x_bb_size, - IntegerValue sqrt_bb_size, IntegerValue y_bb_size, - Bitset64& candidates) { - // We want to find the minimum set of values of `k` that would always find a - // conflict if there is a `k` for which it exists. In the literature it is - // often implied (but not stated) that it is sufficient to test the values of - // `k` that correspond to the size of an item. This is not true. To find the - // minimum set of values of `k` we look for all values of `k` that are - // "extreme": ie., the rounding on the division truncates the most (or the - // least) amount, depending on the sign it appears in the formula. - IntegerValue sum_widths = 0; - for (int i = 0; i < sizes_x.size(); i++) { - const IntegerValue x_size = sizes_x[i]; - if (2 * x_size > x_bb_size) { - sum_widths += 2 * sizes_y[i]; - } else if (2 * x_size == x_bb_size) { - sum_widths += sizes_y[i]; - } - } - const IntegerValue round_up = sum_widths > 2 * y_bb_size ? 0 : 1; + absl::Span sizes, IntegerValue bb_size, + IntegerValue sqrt_bb_size, Bitset64& candidates) { // x_bb_size is less than 65536, so this fits in only 4kib. - candidates.ClearAndResize(x_bb_size / 2 + 2); + candidates.ClearAndResize(bb_size / 2 + 2); // `sqrt_bb_size` is lower than 256. for (IntegerValue i = 2; i <= sqrt_bb_size; i++) { @@ -404,15 +406,14 @@ void OrthogonalPackingInfeasibilityDetector::GetAllCandidatesForKForDff2( for (int i = 1; i <= sqrt_bb_size; i++) { const QuickSmallDivision div(i); if (i > 1) { - candidates.Set(div.DivideByDivisor(x_bb_size.value() + round_up.value())); + candidates.Set(div.DivideByDivisor(bb_size.value())); } - for (int k = 0; k < sizes_x.size(); k++) { - IntegerValue x_size = sizes_x[k]; - if (2 * x_size > x_bb_size && x_size < x_bb_size) { - candidates.Set( - div.DivideByDivisor(x_bb_size.value() - x_size.value() + 1)); - } else if (2 * x_size < x_bb_size) { - candidates.Set(div.DivideByDivisor(x_size.value())); + for (int k = 0; k < sizes.size(); k++) { + IntegerValue size = sizes[k]; + if (2 * size > bb_size && size < bb_size) { + candidates.Set(div.DivideByDivisor(bb_size.value() - size.value() + 1)); + } else if (2 * size < bb_size) { + candidates.Set(div.DivideByDivisor(size.value())); } } } @@ -430,11 +431,11 @@ void OrthogonalPackingInfeasibilityDetector::GetAllCandidatesForKForDff2( // composing several times each. // // [1] F. Clautiaux, PhD thesis, hal/tel-00749411. - candidates.Resize(x_bb_size / 4 + 1); // Erase all >= C/4 - candidates.Resize(x_bb_size / 3 + 2); // Make room for the two special values - candidates.Set(x_bb_size / 4 + 1); - if (x_bb_size > 3) { - candidates.Set(x_bb_size / 3 + 1); + candidates.Resize(bb_size / 4 + 1); // Erase all >= C/4 + candidates.Resize(bb_size / 3 + 2); // Make room for the two special values + candidates.Set(bb_size / 4 + 1); + if (bb_size > 3) { + candidates.Set(bb_size / 3 + 1); } } @@ -495,8 +496,7 @@ OrthogonalPackingInfeasibilityDetector::CheckFeasibilityWithDualFunction2( } } } else { - GetAllCandidatesForKForDff2(sizes_x, sizes_y, x_bb_size, sqrt_bb_size, - y_bb_size, candidates); + GetAllCandidatesForKForDff2(sizes_x, x_bb_size, sqrt_bb_size, candidates); if (max_number_of_parameters_to_check < max_possible_number_of_parameters) { // We might have produced too many candidates. Let's count them and if it diff --git a/ortools/sat/2d_orthogonal_packing.h b/ortools/sat/2d_orthogonal_packing.h index d591af85b2..68bdf2a0b7 100644 --- a/ortools/sat/2d_orthogonal_packing.h +++ b/ortools/sat/2d_orthogonal_packing.h @@ -171,11 +171,9 @@ class OrthogonalPackingInfeasibilityDetector { // All sizes must be positive values less than UINT16_MAX. // The returned bitset will contain less elements than // min(sqrt_bb_size * num_items, x_bb_size/4+1). - void GetAllCandidatesForKForDff2(absl::Span sizes_x, - absl::Span sizes_y, - IntegerValue x_bb_size, + void GetAllCandidatesForKForDff2(absl::Span sizes, + IntegerValue bb_size, IntegerValue sqrt_bb_size, - IntegerValue y_bb_size, Bitset64& candidates); OrthogonalPackingResult CheckFeasibilityWithDualFunction2( diff --git a/ortools/sat/2d_orthogonal_packing_testing.cc b/ortools/sat/2d_orthogonal_packing_testing.cc index 35aceb5c86..95eddf91cb 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.cc +++ b/ortools/sat/2d_orthogonal_packing_testing.cc @@ -20,6 +20,7 @@ #include "absl/log/check.h" #include "absl/random/bit_gen_ref.h" #include "absl/random/distributions.h" +#include "absl/types/span.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" @@ -71,7 +72,7 @@ std::vector GenerateNonConflictingRectangles( } std::vector MakeItemsFromRectangles( - const std::vector& rectangles, double slack_factor, + absl::Span rectangles, double slack_factor, absl::BitGenRef random) { IntegerValue size_max_x = 0; IntegerValue size_max_y = 0; diff --git a/ortools/sat/2d_orthogonal_packing_testing.h b/ortools/sat/2d_orthogonal_packing_testing.h index 2f12462456..68e514f260 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.h +++ b/ortools/sat/2d_orthogonal_packing_testing.h @@ -17,6 +17,7 @@ #include #include "absl/random/bit_gen_ref.h" +#include "absl/types/span.h" #include "ortools/sat/diffn_util.h" namespace operations_research { @@ -26,7 +27,7 @@ std::vector GenerateNonConflictingRectangles(int num_rectangles, absl::BitGenRef random); std::vector MakeItemsFromRectangles( - const std::vector& rectangles, double slack_factor, + absl::Span rectangles, double slack_factor, absl::BitGenRef random); std::vector diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 8b0a2881f5..423f9438bf 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -138,10 +138,14 @@ cc_library( "//ortools/util:time_limit", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/cleanup", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/container:inlined_vector", "@com_google_absl//absl/flags:flag", + "@com_google_absl//absl/hash", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/random", "@com_google_absl//absl/random:bit_gen_ref", @@ -188,9 +192,11 @@ cc_library( ":util", "//ortools/base:stl_util", "//ortools/graph:strongly_connected_components", + "//ortools/util:dense_set", "//ortools/util:saturated_arithmetic", "//ortools/util:sorted_interval_list", "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", @@ -218,14 +224,17 @@ cc_library( "//ortools/algorithms:binary_search", "//ortools/util:sorted_interval_list", "//ortools/util:strong_integers", + "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/functional:any_invocable", "@com_google_absl//absl/functional:bind_front", "@com_google_absl//absl/functional:function_ref", + "@com_google_absl//absl/hash", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/random:bit_gen_ref", "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/strings", + "@com_google_absl//absl/synchronization", "@com_google_absl//absl/types:span", ], ) @@ -280,13 +289,14 @@ cc_library( "@com_google_absl//absl/log:check", "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:span", ], ) cc_library( - name = "cp_model_solver", - srcs = ["cp_model_solver.cc"], - hdrs = ["cp_model_solver.h"], + name = "cp_model_solver_helpers", + srcs = ["cp_model_solver_helpers.cc"], + hdrs = ["cp_model_solver_helpers.h"], deps = [ ":circuit", ":clause", @@ -335,6 +345,122 @@ cc_library( "//ortools/base", "//ortools/base:status_macros", "//ortools/base:strong_vector", + "//ortools/base:timer", + "//ortools/base:types", + "//ortools/graph:connected_components", + "//ortools/linear_solver:linear_solver_cc_proto", + "//ortools/port:proto_utils", + "//ortools/util:logging", + "//ortools/util:random_engine", + "//ortools/util:sorted_interval_list", + "//ortools/util:strong_integers", + "//ortools/util:time_limit", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/cleanup", + "@com_google_absl//absl/container:btree", + "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/container:inlined_vector", + "@com_google_absl//absl/flags:flag", + "@com_google_absl//absl/log", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/meta:type_traits", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/status", + "@com_google_absl//absl/status:statusor", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/synchronization", + "@com_google_absl//absl/time", + "@com_google_absl//absl/types:span", + "@com_google_protobuf//:protobuf", + ], +) + +cc_library( + name = "shaving_solver", + srcs = ["shaving_solver.cc"], + hdrs = ["shaving_solver.h"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_lns", + ":cp_model_presolve", + ":cp_model_solver_helpers", + ":cp_model_utils", + ":integer", + ":model", + ":presolve_context", + ":sat_parameters_cc_proto", + ":subsolver", + ":synchronization", + ":util", + "//ortools/graph:connected_components", + "//ortools/util:sorted_interval_list", + "//ortools/util:time_limit", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/flags:flag", + "@com_google_absl//absl/log", + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/synchronization", + ], +) + +cc_library( + name = "cp_model_solver", + srcs = ["cp_model_solver.cc"], + hdrs = ["cp_model_solver.h"], + deps = [ + ":circuit", + ":clause", + ":cp_model_cc_proto", + ":cp_model_checker", + ":cp_model_lns", + ":cp_model_loader", + ":cp_model_mapping", + ":cp_model_postsolve", + ":cp_model_presolve", + ":cp_model_search", + ":cp_model_solver_helpers", + ":cp_model_symmetries", + ":cp_model_utils", + ":cuts", + ":feasibility_jump", + ":feasibility_pump", + ":implied_bounds", + ":integer", + ":integer_expr", + ":integer_search", + ":intervals", + ":lb_tree_search", + ":linear_constraint", + ":linear_model", + ":linear_programming_constraint", + ":linear_relaxation", + ":lp_utils", + ":max_hs", + ":model", + ":optimization", + ":parameters_validation", + ":precedences", + ":presolve_context", + ":probing", + ":rins", + ":sat_base", + ":sat_inprocessing", + ":sat_parameters_cc_proto", + ":sat_solver", + ":shaving_solver", + ":simplification", + ":stat_tables", + ":subsolver", + ":synchronization", + ":util", + ":work_assignment", + "//ortools/base", + "//ortools/base:status_macros", + "//ortools/base:strong_vector", "//ortools/base:threadpool", "//ortools/base:timer", "//ortools/base:types", @@ -352,6 +478,7 @@ cc_library( "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/container:inlined_vector", "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", @@ -362,6 +489,7 @@ cc_library( "@com_google_absl//absl/strings", "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/synchronization", + "@com_google_absl//absl/time", "@com_google_absl//absl/types:span", "@com_google_protobuf//:protobuf", ], @@ -595,6 +723,7 @@ cc_library( "//ortools/base", "//ortools/base:types", "//ortools/port:proto_utils", + "//ortools/util:logging", "//ortools/util:sorted_interval_list", "@com_google_absl//absl/log:check", ], @@ -817,6 +946,7 @@ cc_library( "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/container:inlined_vector", + "@com_google_absl//absl/functional:any_invocable", "@com_google_absl//absl/log:check", "@com_google_absl//absl/random:bit_gen_ref", "@com_google_absl//absl/random:distributions", @@ -1021,6 +1151,8 @@ cc_library( "//ortools/base:strong_vector", "//ortools/util:strong_integers", "//ortools/util:time_limit", + "@com_google_absl//absl/cleanup", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/strings", "@com_google_absl//absl/time", @@ -1292,14 +1424,18 @@ cc_library( srcs = ["cumulative_energy.cc"], hdrs = ["cumulative_energy.h"], deps = [ + ":2d_orthogonal_packing", + ":diffn_util", ":integer", ":intervals", ":model", + ":synchronization", ":theta_tree", ":util", - "//ortools/base", "//ortools/base:iterator_adaptors", "//ortools/util:strong_integers", + "@com_google_absl//absl/container:inlined_vector", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", ], ) @@ -1501,6 +1637,7 @@ cc_library( "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/meta:type_traits", "@com_google_absl//absl/numeric:int128", @@ -1881,6 +2018,7 @@ cc_library( "@com_google_absl//absl/log:check", "@com_google_absl//absl/random:bit_gen_ref", "@com_google_absl//absl/random:distributions", + "@com_google_absl//absl/types:span", ], ) @@ -1984,6 +2122,7 @@ cc_library( "//ortools/util:time_limit", "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/base:log_severity", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/log", @@ -2063,6 +2202,7 @@ cc_library( "@com_google_absl//absl/strings", "@com_google_absl//absl/synchronization", "@com_google_absl//absl/time", + "@com_google_absl//absl/types:span", ], ) @@ -2137,6 +2277,7 @@ cc_binary( "//ortools/base:path", "//ortools/util:file_util", "//ortools/util:filelineiter", + "//ortools/util:sorted_interval_list", "@com_google_absl//absl/flags:flag", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", @@ -2169,6 +2310,7 @@ cc_library( hdrs = ["cp_model_symmetries.h"], deps = [ ":cp_model_cc_proto", + ":cp_model_checker", ":cp_model_mapping", ":cp_model_utils", ":model", @@ -2178,6 +2320,7 @@ cc_library( ":sat_solver", ":symmetry_util", ":util", + "//ortools/algorithms:binary_search", "//ortools/algorithms:find_graph_symmetries", "//ortools/algorithms:sparse_permutation", "//ortools/base", @@ -2185,7 +2328,10 @@ cc_library( "//ortools/graph", "//ortools/util:affine_relation", "//ortools/util:logging", + "//ortools/util:saturated_arithmetic", "//ortools/util:time_limit", + "@com_google_absl//absl/algorithm:container", + "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/log:check", @@ -2215,6 +2361,7 @@ cc_library( "//ortools/util:logging", "//ortools/util:sorted_interval_list", "//ortools/util:time_limit", + "@com_google_absl//absl/log:check", ], ) diff --git a/ortools/sat/CMakeLists.txt b/ortools/sat/CMakeLists.txt index fe66dde584..027aa46c6f 100644 --- a/ortools/sat/CMakeLists.txt +++ b/ortools/sat/CMakeLists.txt @@ -36,7 +36,7 @@ target_link_libraries(${NAME} PRIVATE absl::str_format protobuf::libprotobuf $<$:Coin::Cbc> - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::sat ALIAS ${NAME}) # Sat Runner diff --git a/ortools/sat/boolean_problem.cc b/ortools/sat/boolean_problem.cc index 642e819232..6192174cb8 100644 --- a/ortools/sat/boolean_problem.cc +++ b/ortools/sat/boolean_problem.cc @@ -753,7 +753,7 @@ void FindLinearBooleanProblemSymmetries( } void ApplyLiteralMappingToBooleanProblem( - const absl::StrongVector& mapping, + const util_intops::StrongVector& mapping, LinearBooleanProblem* problem) { Coefficient bound_shift; Coefficient max_value; @@ -843,7 +843,7 @@ void ProbeAndSimplifyProblem(SatPostsolver* postsolver, LOG(INFO) << "UNSAT when loading the problem."; } - absl::StrongVector equiv_map; + util_intops::StrongVector equiv_map; ProbeAndFindEquivalentLiteral(&solver, postsolver, /*drat_writer=*/nullptr, &equiv_map); @@ -869,7 +869,7 @@ void ProbeAndSimplifyProblem(SatPostsolver* postsolver, // Remap the variables into a dense set. All the variables for which the // equiv_map is not the identity are no longer needed. BooleanVariable new_var(0); - absl::StrongVector var_map; + util_intops::StrongVector var_map; for (BooleanVariable var(0); var < solver.NumVariables(); ++var) { if (equiv_map[Literal(var, true).Index()] == Literal(var, true).Index()) { var_map.push_back(new_var); diff --git a/ortools/sat/boolean_problem.h b/ortools/sat/boolean_problem.h index 03c0e2c9f3..a2006e1fa8 100644 --- a/ortools/sat/boolean_problem.h +++ b/ortools/sat/boolean_problem.h @@ -129,7 +129,7 @@ void FindLinearBooleanProblemSymmetries( // of the correct size. It can also map a literal index to kTrueLiteralIndex // or kFalseLiteralIndex in order to fix the variable. void ApplyLiteralMappingToBooleanProblem( - const absl::StrongVector& mapping, + const util_intops::StrongVector& mapping, LinearBooleanProblem* problem); // A simple preprocessing step that does basic probing and removes the fixed and diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index 684676a894..23d6e28d70 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -217,20 +217,22 @@ SatClause* ClauseManager::ReasonClause(int trail_index) const { } bool ClauseManager::AddClause(absl::Span literals) { - return AddClause(literals, trail_); + return AddClause(literals, trail_, -1); } -bool ClauseManager::AddClause(absl::Span literals, - Trail* trail) { +bool ClauseManager::AddClause(absl::Span literals, Trail* trail, + int lbd) { SatClause* clause = SatClause::Create(literals); clauses_.push_back(clause); + if (add_clause_callback_ != nullptr) add_clause_callback_(lbd, literals); return AttachAndPropagate(clause, trail); } SatClause* ClauseManager::AddRemovableClause( - const std::vector& literals, Trail* trail) { + const std::vector& literals, Trail* trail, int lbd) { SatClause* clause = SatClause::Create(literals); clauses_.push_back(clause); + if (add_clause_callback_ != nullptr) add_clause_callback_(lbd, literals); CHECK(AttachAndPropagate(clause, trail)); return clause; } @@ -558,7 +560,9 @@ bool BinaryImplicationGraph::AddBinaryClause(Literal a, Literal b) { is_dag_ = false; num_implications_ += 2; - if (enable_sharing_ && add_callback_ != nullptr) add_callback_(a, b); + if (enable_sharing_ && add_binary_callback_ != nullptr) { + add_binary_callback_(a, b); + } const auto& assignment = trail_->Assignment(); if (trail_->CurrentDecisionLevel() == 0) { @@ -1134,9 +1138,9 @@ void BinaryImplicationGraph::RemoveFixedVariables() { class SccGraph { public: using Implications = - absl::StrongVector>; + util_intops::StrongVector>; using AtMostOnes = - absl::StrongVector>; + util_intops::StrongVector>; using SccFinder = StronglyConnectedComponentsFinder>; @@ -1668,8 +1672,8 @@ bool BinaryImplicationGraph::TransformIntoMaxCliques( detector.SetWorkLimit(1e9); std::vector dense_index_to_index; - absl::StrongVector> max_cliques_containing( - implications_.size()); + util_intops::StrongVector> + max_cliques_containing(implications_.size()); // We starts by processing larger constraints first. // But we want the output order to be stable. @@ -1796,8 +1800,8 @@ bool BinaryImplicationGraph::TransformIntoMaxCliques( template std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( const absl::Span at_most_one, - const absl::StrongVector& can_be_included, - const absl::StrongVector& expanded_lp_values) { + const util_intops::StrongVector& can_be_included, + const util_intops::StrongVector& expanded_lp_values) { std::vector clique(at_most_one.begin(), at_most_one.end()); std::vector intersection; double clique_weight = 0.0; @@ -1874,14 +1878,16 @@ std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( } // Make sure both version are compiled. -template std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight< - true>(const absl::Span at_most_one, - const absl::StrongVector& can_be_included, - const absl::StrongVector& expanded_lp_values); -template std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight< - false>(const absl::Span at_most_one, - const absl::StrongVector& can_be_included, - const absl::StrongVector& expanded_lp_values); +template std::vector +BinaryImplicationGraph::ExpandAtMostOneWithWeight( + const absl::Span at_most_one, + const util_intops::StrongVector& can_be_included, + const util_intops::StrongVector& expanded_lp_values); +template std::vector +BinaryImplicationGraph::ExpandAtMostOneWithWeight( + const absl::Span at_most_one, + const util_intops::StrongVector& can_be_included, + const util_intops::StrongVector& expanded_lp_values); const std::vector>& BinaryImplicationGraph::GenerateAtMostOnesWithLargeWeight( @@ -1889,9 +1895,10 @@ BinaryImplicationGraph::GenerateAtMostOnesWithLargeWeight( const std::vector& lp_values) { // We only want to generate a cut with literals from the LP, not extra ones. const int num_literals = implications_.size(); - absl::StrongVector can_be_included(num_literals, false); - absl::StrongVector expanded_lp_values(num_literals, - 0.0); + util_intops::StrongVector can_be_included(num_literals, + false); + util_intops::StrongVector expanded_lp_values( + num_literals, 0.0); const int size = literals.size(); for (int i = 0; i < size; ++i) { const Literal l = literals[i]; @@ -1975,8 +1982,8 @@ std::vector> BinaryImplicationGraph::HeuristicAmoPartition(std::vector* literals) { std::vector> result; - absl::StrongVector to_consider(implications_.size(), - false); + util_intops::StrongVector to_consider( + implications_.size(), false); for (const Literal l : *literals) to_consider[l] = true; // Priority queue of (intersection_size, start_of_amo). @@ -2389,7 +2396,7 @@ bool BinaryImplicationGraph::InvariantsAreOk() { } // Check that reverse topo order is correct. - absl::StrongVector lit_to_order; + util_intops::StrongVector lit_to_order; if (is_dag_) { lit_to_order.assign(implications_.size(), -1); for (int i = 0; i < reverse_topological_order_.size(); ++i) { diff --git a/ortools/sat/clause.h b/ortools/sat/clause.h index feda1f807e..23a0308b42 100644 --- a/ortools/sat/clause.h +++ b/ortools/sat/clause.h @@ -28,6 +28,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" +#include "absl/functional/any_invocable.h" #include "absl/log/check.h" #include "absl/random/bit_gen_ref.h" #include "absl/types/span.h" @@ -179,13 +180,13 @@ class ClauseManager : public SatPropagator { SatClause* ReasonClause(int trail_index) const; // Adds a new clause and perform initial propagation for this clause only. - bool AddClause(absl::Span literals, Trail* trail); + bool AddClause(absl::Span literals, Trail* trail, int lbd); bool AddClause(absl::Span literals); // Same as AddClause() for a removable clause. This is only called on learned // conflict, so this should never have all its literal at false (CHECKED). SatClause* AddRemovableClause(const std::vector& literals, - Trail* trail); + Trail* trail, int lbd); // Lazily detach the given clause. The deletion will actually occur when // CleanUpWatchers() is called. The later needs to be called before any other @@ -325,6 +326,12 @@ class ClauseManager : public SatPropagator { return watchers_on_false_[false_literal]; } + void SetAddClauseCallback( + absl::AnyInvocable)> + add_clause_callback) { + add_clause_callback_ = std::move(add_clause_callback); + } + private: // Attaches the given clause. This eventually propagates a literal which is // enqueued on the trail. Returns false if a contradiction was encountered. @@ -343,7 +350,8 @@ class ClauseManager : public SatPropagator { // Common code between LazyDetach() and Detach(). void InternalDetach(SatClause* clause); - absl::StrongVector> watchers_on_false_; + util_intops::StrongVector> + watchers_on_false_; // SatClause reasons by trail_index. std::vector reasons_; @@ -378,6 +386,9 @@ class ClauseManager : public SatPropagator { absl::flat_hash_map clauses_info_; DratProofHandler* drat_proof_handler_ = nullptr; + + absl::AnyInvocable)> + add_clause_callback_ = nullptr; }; // A binary clause. This is used by BinaryClauseManager. @@ -529,9 +540,8 @@ class BinaryImplicationGraph : public SatPropagator { // were we keep new implication and add them in batches. void EnableSharing(bool enable) { enable_sharing_ = enable; } void SetAdditionCallback(std::function f) { - add_callback_ = f; + add_binary_callback_ = f; } - // An at most one constraint of size n is a compact way to encode n * (n - 1) // implications. This must only be called at level zero. // @@ -679,8 +689,8 @@ class BinaryImplicationGraph : public SatPropagator { return num_redundant_implications_; } - // Returns the number of current implications. Note that a => b and not(b) => - // not(a) are counted separately since they appear separately in our + // Returns the number of current implications. Note that a => b and not(b) + // => not(a) are counted separately since they appear separately in our // propagation lists. The number of size 2 clauses that represent the same // thing is half this number. int64_t num_implications() const { return num_implications_; } @@ -774,8 +784,9 @@ class BinaryImplicationGraph : public SatPropagator { template std::vector ExpandAtMostOneWithWeight( absl::Span at_most_one, - const absl::StrongVector& can_be_included, - const absl::StrongVector& expanded_lp_values); + const util_intops::StrongVector& can_be_included, + const util_intops::StrongVector& + expanded_lp_values); // Restarts the at_most_one iterator. void ResetAtMostOneIterator() { at_most_one_iterator_ = 0; } @@ -850,12 +861,12 @@ class BinaryImplicationGraph : public SatPropagator { // // TODO(user): We could be even more efficient since a size of int32_t is // enough for us and we could store in common the inlined/not-inlined size. - absl::StrongVector> + util_intops::StrongVector> implications_; int64_t num_implications_ = 0; // Used by RemoveDuplicates() and NotifyPossibleDuplicate(). - absl::StrongVector might_have_dups_; + util_intops::StrongVector might_have_dups_; std::vector to_clean_; // Internal representation of at_most_one constraints. Each entry point to the @@ -869,7 +880,7 @@ class BinaryImplicationGraph : public SatPropagator { // // TODO(user): We could be more cache efficient by combining this with // implications_ in some way. Do some propagation speed benchmark. - absl::StrongVector> + util_intops::StrongVector> at_most_ones_; std::vector at_most_one_buffer_; const int at_most_one_max_expansion_size_; @@ -918,33 +929,33 @@ class BinaryImplicationGraph : public SatPropagator { bool is_dag_ = false; std::vector reverse_topological_order_; Bitset64 is_redundant_; - absl::StrongVector representative_of_; + util_intops::StrongVector representative_of_; // For in-processing and removing variables. std::vector direct_implications_; std::vector direct_implications_of_negated_literal_; - absl::StrongVector in_direct_implications_; - absl::StrongVector is_removed_; - absl::StrongVector estimated_sizes_; + util_intops::StrongVector in_direct_implications_; + util_intops::StrongVector is_removed_; + util_intops::StrongVector estimated_sizes_; // For RemoveFixedVariables(). int num_processed_fixed_variables_ = 0; bool enable_sharing_ = true; - std::function add_callback_ = nullptr; + std::function add_binary_callback_ = nullptr; }; extern template std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( const absl::Span at_most_one, - const absl::StrongVector& can_be_included, - const absl::StrongVector& expanded_lp_values); + const util_intops::StrongVector& can_be_included, + const util_intops::StrongVector& expanded_lp_values); extern template std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( const absl::Span at_most_one, - const absl::StrongVector& can_be_included, - const absl::StrongVector& expanded_lp_values); + const util_intops::StrongVector& can_be_included, + const util_intops::StrongVector& expanded_lp_values); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/colab/visualization.py b/ortools/sat/colab/visualization.py index ae1b1aa3df..884243d0c3 100644 --- a/ortools/sat/colab/visualization.py +++ b/ortools/sat/colab/visualization.py @@ -40,7 +40,7 @@ def ToDate(v): return "2016-01-01 6:%02i:%02i" % (v / 60, v % 60) -class ColorManager(object): +class ColorManager: """Utility to create colors to use in visualization.""" def ScaledColor(self, sr, sg, sb, er, eg, eb, num_steps, step): @@ -105,7 +105,7 @@ def DisplayJobshop(starts, durations, machines, name): fig.show() -class SvgWrapper(object): +class SvgWrapper: """Simple SVG wrapper to use in colab.""" def __init__(self, sizex, sizey, scaling=20.0): diff --git a/ortools/sat/constraint_violation.cc b/ortools/sat/constraint_violation.cc index 76aba599e3..0c63fc4dbb 100644 --- a/ortools/sat/constraint_violation.cc +++ b/ortools/sat/constraint_violation.cc @@ -18,10 +18,12 @@ #include #include #include +#include #include #include #include "absl/algorithm/container.h" +#include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/types/span.h" @@ -31,12 +33,15 @@ #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_utils.h" #include "ortools/sat/util.h" +#include "ortools/util/dense_set.h" #include "ortools/util/saturated_arithmetic.h" #include "ortools/util/sorted_interval_list.h" namespace operations_research { namespace sat { +namespace { + int64_t ExprValue(const LinearExpressionProto& expr, absl::Span solution) { int64_t result = expr.offset(); @@ -46,6 +51,47 @@ int64_t ExprValue(const LinearExpressionProto& expr, return result; } +LinearExpressionProto ExprDiff(const LinearExpressionProto& a, + const LinearExpressionProto& b) { + LinearExpressionProto result; + result.set_offset(a.offset() - b.offset()); + result.mutable_vars()->Reserve(a.vars().size() + b.vars().size()); + result.mutable_coeffs()->Reserve(a.vars().size() + b.vars().size()); + for (int i = 0; i < a.vars().size(); ++i) { + result.add_vars(a.vars(i)); + result.add_coeffs(a.coeffs(i)); + } + for (int i = 0; i < b.vars().size(); ++i) { + result.add_vars(b.vars(i)); + result.add_coeffs(-b.coeffs(i)); + } + return result; +} + +LinearExpressionProto LinearExprSum(LinearExpressionProto a, + LinearExpressionProto b) { + LinearExpressionProto result; + result.set_offset(a.offset() + b.offset()); + result.mutable_vars()->Reserve(a.vars().size() + b.vars().size()); + result.mutable_coeffs()->Reserve(a.vars().size() + b.vars().size()); + for (const LinearExpressionProto& p : {a, b}) { + for (int i = 0; i < p.vars().size(); ++i) { + result.add_vars(p.vars(i)); + result.add_coeffs(p.coeffs(i)); + } + } + return result; +} + +LinearExpressionProto NegatedLinearExpression(LinearExpressionProto a) { + LinearExpressionProto result = a; + result.set_offset(-a.offset()); + for (int64_t& coeff : *result.mutable_coeffs()) { + coeff = -coeff; + } + return result; +} + int64_t ExprMin(const LinearExpressionProto& expr, const CpModelProto& model) { int64_t result = expr.offset(); for (int i = 0; i < expr.vars_size(); ++i) { @@ -80,6 +126,8 @@ bool LiteralValue(int lit, absl::Span solution) { } } +} // namespace + // ---- LinearIncrementalEvaluator ----- int LinearIncrementalEvaluator::NewConstraint(Domain domain) { @@ -170,26 +218,32 @@ void LinearIncrementalEvaluator::ComputeInitialActivities( num_false_enforcement_.assign(num_constraints_, 0); // Update these numbers for all columns. - for (int var = 0; var < columns_.size(); ++var) { + const int num_vars = columns_.size(); + for (int var = 0; var < num_vars; ++var) { const SpanData& data = columns_[var]; const int64_t value = solution[var]; - int i = data.start; - for (int k = 0; k < data.num_pos_literal; ++k, ++i) { - const int c = ct_buffer_[i]; - if (value == 0) num_false_enforcement_[c]++; - } - for (int k = 0; k < data.num_neg_literal; ++k, ++i) { - const int c = ct_buffer_[i]; - if (value == 1) num_false_enforcement_[c]++; + if (value == 0 && data.num_pos_literal > 0) { + const int* ct_indices = &ct_buffer_[data.start]; + for (int k = 0; k < data.num_pos_literal; ++k) { + num_false_enforcement_[ct_indices[k]]++; + } } - if (value == 0) continue; - int j = data.linear_start; - for (int k = 0; k < data.num_linear_entries; ++k, ++i, ++j) { - const int c = ct_buffer_[i]; - const int64_t coeff = coeff_buffer_[j]; - activities_[c] += coeff * value; + if (value == 1 && data.num_neg_literal > 0) { + const int* ct_indices = &ct_buffer_[data.start + data.num_pos_literal]; + for (int k = 0; k < data.num_neg_literal; ++k) { + num_false_enforcement_[ct_indices[k]]++; + } + } + + if (value != 0 && data.num_linear_entries > 0) { + const int* ct_indices = + &ct_buffer_[data.start + data.num_pos_literal + data.num_neg_literal]; + const int64_t* coeffs = &coeff_buffer_[data.linear_start]; + for (int k = 0; k < data.num_linear_entries; ++k) { + activities_[ct_indices[k]] += coeffs[k] * value; + } } } @@ -200,65 +254,16 @@ void LinearIncrementalEvaluator::ComputeInitialActivities( } } -// Note that the code assumes that a column has no duplicates ct indices. -void LinearIncrementalEvaluator::Update( - int var, int64_t delta, - std::vector>* violation_deltas) { - DCHECK(!creation_phase_); - DCHECK_NE(delta, 0); - if (var >= columns_.size()) return; - - const SpanData& data = columns_[var]; - int i = data.start; - for (int k = 0; k < data.num_pos_literal; ++k, ++i) { - const int c = ct_buffer_[i]; - const int64_t v0 = Violation(c); - if (delta == 1) { - num_false_enforcement_[c]--; - DCHECK_GE(num_false_enforcement_[c], 0); - } else { - num_false_enforcement_[c]++; - } - const int64_t v1 = Violation(c); - is_violated_[c] = v1 > 0; - if (violation_deltas != nullptr) { - violation_deltas->push_back({c, v1 - v0}); - } - } - for (int k = 0; k < data.num_neg_literal; ++k, ++i) { - const int c = ct_buffer_[i]; - const int64_t v0 = Violation(c); - if (delta == -1) { - num_false_enforcement_[c]--; - DCHECK_GE(num_false_enforcement_[c], 0); - } else { - num_false_enforcement_[c]++; - } - const int64_t v1 = Violation(c); - is_violated_[c] = v1 > 0; - if (violation_deltas != nullptr) { - violation_deltas->push_back({c, v1 - v0}); - } - } - int j = data.linear_start; - for (int k = 0; k < data.num_linear_entries; ++k, ++i, ++j) { - const int c = ct_buffer_[i]; - const int64_t v0 = Violation(c); - const int64_t coeff = coeff_buffer_[j]; - activities_[c] += coeff * delta; - distances_[c] = domains_[c].Distance(activities_[c]); - const int64_t v1 = Violation(c); - is_violated_[c] = v1 > 0; - if (violation_deltas != nullptr) { - violation_deltas->push_back({c, v1 - v0}); - } - } -} - void LinearIncrementalEvaluator::ClearAffectedVariables() { - in_last_affected_variables_.resize(columns_.size(), false); - for (const int var : last_affected_variables_) { - in_last_affected_variables_[var] = false; + if (10 * last_affected_variables_.size() < columns_.size()) { + // Sparse. + in_last_affected_variables_.resize(columns_.size(), false); + for (const int var : last_affected_variables_) { + in_last_affected_variables_[var] = false; + } + } else { + // Dense. + in_last_affected_variables_.assign(columns_.size(), false); } last_affected_variables_.clear(); DCHECK(std::all_of(in_last_affected_variables_.begin(), @@ -284,7 +289,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnWeightUpdate( if (enforcement_change != 0.0) { int i = data.start; const int end = data.num_pos_literal + data.num_neg_literal; - dtime_ += end; + num_ops_ += end; for (int k = 0; k < end; ++k, ++i) { const int var = row_var_buffer_[i]; if (!in_last_affected_variables_[var]) { @@ -298,23 +303,43 @@ void LinearIncrementalEvaluator::UpdateScoreOnWeightUpdate( } // Update linear part. - int i = data.start + data.num_pos_literal + data.num_neg_literal; - int j = data.linear_start; - dtime_ += 2 * data.num_linear_entries; - const int64_t old_distance = distances_[c]; - for (int k = 0; k < data.num_linear_entries; ++k, ++i, ++j) { - const int var = row_var_buffer_[i]; - const int64_t coeff = row_coeff_buffer_[j]; - const int64_t new_distance = - domains_[c].Distance(activities_[c] + coeff * jump_deltas[var]); - if (!in_last_affected_variables_[var]) { - var_to_score_change[var] = - static_cast(new_distance - old_distance); - in_last_affected_variables_[var] = true; - last_affected_variables_.push_back(var); - } else { - var_to_score_change[var] += - static_cast(new_distance - old_distance); + if (data.num_linear_entries > 0) { + const int* row_vars = &row_var_buffer_[data.start + data.num_pos_literal + + data.num_neg_literal]; + const int64_t* row_coeffs = &row_coeff_buffer_[data.linear_start]; + num_ops_ += 2 * data.num_linear_entries; + + // Computing general Domain distance is slow. + // TODO(user): optimize even more for one sided constraints. + // Note(user): I tried to factor the two usage of this, but it is slower. + const Domain& rhs = domains_[c]; + const int64_t rhs_min = rhs.Min(); + const int64_t rhs_max = rhs.Max(); + const bool is_simple = rhs.NumIntervals() == 2; + const auto violation = [&rhs, rhs_min, rhs_max, is_simple](int64_t v) { + if (v >= rhs_max) { + return v - rhs_max; + } else if (v <= rhs_min) { + return rhs_min - v; + } else { + return is_simple ? int64_t{0} : rhs.Distance(v); + } + }; + + const int64_t old_distance = distances_[c]; + const int64_t activity = activities_[c]; + for (int k = 0; k < data.num_linear_entries; ++k) { + const int var = row_vars[k]; + const int64_t coeff = row_coeffs[k]; + const int64_t diff = + violation(activity + coeff * jump_deltas[var]) - old_distance; + if (!in_last_affected_variables_[var]) { + var_to_score_change[var] = static_cast(diff); + in_last_affected_variables_[var] = true; + last_affected_variables_.push_back(var); + } else { + var_to_score_change[var] += static_cast(diff); + } } } } @@ -331,7 +356,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnNewlyEnforced( if (weight_time_violation > 0.0) { int i = data.start; const int end = data.num_pos_literal + data.num_neg_literal; - dtime_ += end; + num_ops_ += end; for (int k = 0; k < end; ++k, ++i) { const int var = row_var_buffer_[i]; jump_scores[var] -= weight_time_violation; @@ -346,7 +371,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnNewlyEnforced( { int i = data.start + data.num_pos_literal + data.num_neg_literal; int j = data.linear_start; - dtime_ += 2 * data.num_linear_entries; + num_ops_ += 2 * data.num_linear_entries; const int64_t old_distance = distances_[c]; for (int k = 0; k < data.num_linear_entries; ++k, ++i, ++j) { const int var = row_var_buffer_[i]; @@ -376,7 +401,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnNewlyUnenforced( if (weight_time_violation > 0.0) { int i = data.start; const int end = data.num_pos_literal + data.num_neg_literal; - dtime_ += end; + num_ops_ += end; for (int k = 0; k < end; ++k, ++i) { const int var = row_var_buffer_[i]; jump_scores[var] += weight_time_violation; @@ -387,7 +412,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnNewlyUnenforced( { int i = data.start + data.num_pos_literal + data.num_neg_literal; int j = data.linear_start; - dtime_ += 2 * data.num_linear_entries; + num_ops_ += 2 * data.num_linear_entries; const int64_t old_distance = distances_[c]; for (int k = 0; k < data.num_linear_entries; ++k, ++i, ++j) { const int var = row_var_buffer_[i]; @@ -413,7 +438,7 @@ void LinearIncrementalEvaluator::UpdateScoreOfEnforcementIncrease( const SpanData& data = rows_[c]; int i = data.start; - dtime_ += data.num_pos_literal; + num_ops_ += data.num_pos_literal; for (int k = 0; k < data.num_pos_literal; ++k, ++i) { const int var = row_var_buffer_[i]; if (jump_deltas[var] == 1) { @@ -424,7 +449,7 @@ void LinearIncrementalEvaluator::UpdateScoreOfEnforcementIncrease( } } } - dtime_ += data.num_neg_literal; + num_ops_ += data.num_neg_literal; for (int k = 0; k < data.num_neg_literal; ++k, ++i) { const int var = row_var_buffer_[i]; if (jump_deltas[var] == -1) { @@ -475,7 +500,7 @@ void LinearIncrementalEvaluator::UpdateScoreOnActivityChange( if (delta != 0.0) { int i = data.start; const int end = data.num_pos_literal + data.num_neg_literal; - dtime_ += end; + num_ops_ += end; for (int k = 0; k < end; ++k, ++i) { const int var = row_var_buffer_[i]; jump_scores[var] += delta; @@ -492,29 +517,48 @@ void LinearIncrementalEvaluator::UpdateScoreOnActivityChange( if (min_range >= domains_[c].Max() || max_range <= domains_[c].Min()) return; // Update linear part. - { - int i = data.start + data.num_pos_literal + data.num_neg_literal; - int j = data.linear_start; - dtime_ += 2 * data.num_linear_entries; + if (data.num_linear_entries > 0) { + const int* row_vars = &row_var_buffer_[data.start + data.num_pos_literal + + data.num_neg_literal]; + const int64_t* row_coeffs = &row_coeff_buffer_[data.linear_start]; + num_ops_ += 2 * data.num_linear_entries; + + // Computing general Domain distance is slow. + // TODO(user): optimize even more for one sided constraints. + // Note(user): I tried to factor the two usage of this, but it is slower. const Domain& rhs = domains_[c]; + const int64_t rhs_min = rhs.Min(); + const int64_t rhs_max = rhs.Max(); + const bool is_simple = rhs.NumIntervals() == 2; + const auto violation = [&rhs, rhs_min, rhs_max, is_simple](int64_t v) { + if (v >= rhs_max) { + return v - rhs_max; + } else if (v <= rhs_min) { + return rhs_min - v; + } else { + return is_simple ? int64_t{0} : rhs.Distance(v); + } + }; + const int64_t old_a_minus_new_a = - distances_[c] - rhs.Distance(new_activity); - for (int k = 0; k < data.num_linear_entries; ++k, ++i, ++j) { - const int var = row_var_buffer_[i]; - const int64_t impact = row_coeff_buffer_[j] * jump_deltas[var]; - const int64_t old_b = rhs.Distance(old_activity + impact); - const int64_t new_b = rhs.Distance(new_activity + impact); + distances_[c] - domains_[c].Distance(new_activity); + for (int k = 0; k < data.num_linear_entries; ++k) { + const int var = row_vars[k]; + const int64_t impact = row_coeffs[k] * jump_deltas[var]; + const int64_t old_b = violation(old_activity + impact); + const int64_t new_b = violation(new_activity + impact); // The old score was: // weight * static_cast(old_b - old_a); // the new score is // weight * static_cast(new_b - new_a); so the diff is: - // + // weight * static_cast(new_b - new_a - old_b + old_a) + const int64_t diff = old_a_minus_new_a + new_b - old_b; + // TODO(user): If a variable is at its lower (resp. upper) bound, then // we know that the score will always move in the same direction, so we // might skip the last_affected_variables_ update. - jump_scores[var] += - weight * static_cast(old_a_minus_new_a + new_b - old_b); + jump_scores[var] += weight * static_cast(diff); if (!in_last_affected_variables_[var]) { in_last_affected_variables_[var] = true; last_affected_variables_.push_back(var); @@ -523,16 +567,18 @@ void LinearIncrementalEvaluator::UpdateScoreOnActivityChange( } } +// Note that the code assumes that a column has no duplicates ct indices. void LinearIncrementalEvaluator::UpdateVariableAndScores( int var, int64_t delta, absl::Span weights, absl::Span jump_deltas, absl::Span jump_scores, - std::vector>* violation_deltas) { + std::vector* constraints_with_changed_violation) { DCHECK(!creation_phase_); DCHECK_NE(delta, 0); if (var >= columns_.size()) return; const SpanData& data = columns_[var]; int i = data.start; + num_ops_ += data.num_pos_literal; for (int k = 0; k < data.num_pos_literal; ++k, ++i) { const int c = ct_buffer_[i]; const int64_t v0 = Violation(c); @@ -560,10 +606,11 @@ void LinearIncrementalEvaluator::UpdateVariableAndScores( } const int64_t v1 = Violation(c); is_violated_[c] = v1 > 0; - if (violation_deltas != nullptr) { - violation_deltas->push_back(std::make_pair(c, v1 - v0)); + if (v1 != v0) { + constraints_with_changed_violation->push_back(c); } } + num_ops_ += data.num_neg_literal; for (int k = 0; k < data.num_neg_literal; ++k, ++i) { const int c = ct_buffer_[i]; const int64_t v0 = Violation(c); @@ -591,11 +638,12 @@ void LinearIncrementalEvaluator::UpdateVariableAndScores( } const int64_t v1 = Violation(c); is_violated_[c] = v1 > 0; - if (violation_deltas != nullptr) { - violation_deltas->push_back(std::make_pair(c, v1 - v0)); + if (v1 != v0) { + constraints_with_changed_violation->push_back(c); } } int j = data.linear_start; + num_ops_ += 2 * data.num_linear_entries; for (int k = 0; k < data.num_linear_entries; ++k, ++i, ++j) { const int c = ct_buffer_[i]; const int64_t v0 = Violation(c); @@ -621,8 +669,8 @@ void LinearIncrementalEvaluator::UpdateVariableAndScores( distances_[c] = domains_[c].Distance(activities_[c]); const int64_t v1 = Violation(c); is_violated_[c] = v1 > 0; - if (violation_deltas != nullptr) { - violation_deltas->push_back(std::make_pair(c, v1 - v0)); + if (v1 != v0) { + constraints_with_changed_violation->push_back(c); } } } @@ -671,7 +719,7 @@ double LinearIncrementalEvaluator::WeightedViolationDelta( int i = data.start; double result = 0.0; - dtime_ += data.num_pos_literal; + num_ops_ += data.num_pos_literal; for (int k = 0; k < data.num_pos_literal; ++k, ++i) { const int c = ct_buffer_[i]; if (num_false_enforcement_[c] == 0) { @@ -685,7 +733,7 @@ double LinearIncrementalEvaluator::WeightedViolationDelta( } } - dtime_ += data.num_neg_literal; + num_ops_ += data.num_neg_literal; for (int k = 0; k < data.num_neg_literal; ++k, ++i) { const int c = ct_buffer_[i]; if (num_false_enforcement_[c] == 0) { @@ -700,7 +748,7 @@ double LinearIncrementalEvaluator::WeightedViolationDelta( } int j = data.linear_start; - dtime_ += 2 * data.num_linear_entries; + num_ops_ += 2 * data.num_linear_entries; for (int k = 0; k < data.num_linear_entries; ++k, ++i, ++j) { const int c = ct_buffer_[i]; if (num_false_enforcement_[c] > 0) continue; @@ -899,6 +947,7 @@ void LinearIncrementalEvaluator::PrecomputeCompactView( cached_deltas_.assign(columns_.size(), 0); cached_scores_.assign(columns_.size(), 0); + last_affected_variables_.ClearAndReserve(columns_.size()); } bool LinearIncrementalEvaluator::ViolationChangeIsConvex(int var) const { @@ -910,9 +959,6 @@ bool LinearIncrementalEvaluator::ViolationChangeIsConvex(int var) const { // ----- CompiledConstraint ----- -CompiledConstraint::CompiledConstraint(const ConstraintProto& ct_proto) - : ct_proto_(ct_proto) {} - void CompiledConstraint::InitializeViolation( absl::Span solution) { violation_ = ComputeViolation(solution); @@ -924,16 +970,36 @@ void CompiledConstraint::PerformMove( violation_ += ViolationDelta(var, old_value, solution_with_new_value); } -int64_t CompiledConstraint::ViolationDelta(int /*var*/, int64_t /*old_value*/, +int64_t CompiledConstraint::ViolationDelta(int, int64_t, absl::Span solution) { return ComputeViolation(solution) - violation_; } +// ----- CompiledConstraintWithProto ----- + +CompiledConstraintWithProto::CompiledConstraintWithProto( + const ConstraintProto& ct_proto) + : ct_proto_(ct_proto) {} + +std::vector CompiledConstraintWithProto::UsedVariables( + const CpModelProto& model_proto) const { + std::vector result = sat::UsedVariables(ct_proto_); + for (const int i_var : UsedIntervals(ct_proto_)) { + const ConstraintProto& interval_proto = model_proto.constraints(i_var); + for (const int var : sat::UsedVariables(interval_proto)) { + result.push_back(var); + } + } + gtl::STLSortAndRemoveDuplicates(&result); + result.shrink_to_fit(); + return result; +} + // ----- CompiledBoolXorConstraint ----- CompiledBoolXorConstraint::CompiledBoolXorConstraint( const ConstraintProto& ct_proto) - : CompiledConstraint(ct_proto) {} + : CompiledConstraintWithProto(ct_proto) {} int64_t CompiledBoolXorConstraint::ComputeViolation( absl::Span solution) { @@ -954,7 +1020,7 @@ int64_t CompiledBoolXorConstraint::ViolationDelta( CompiledLinMaxConstraint::CompiledLinMaxConstraint( const ConstraintProto& ct_proto) - : CompiledConstraint(ct_proto) {} + : CompiledConstraintWithProto(ct_proto) {} int64_t CompiledLinMaxConstraint::ComputeViolation( absl::Span solution) { @@ -972,7 +1038,7 @@ int64_t CompiledLinMaxConstraint::ComputeViolation( CompiledIntProdConstraint::CompiledIntProdConstraint( const ConstraintProto& ct_proto) - : CompiledConstraint(ct_proto) {} + : CompiledConstraintWithProto(ct_proto) {} int64_t CompiledIntProdConstraint::ComputeViolation( absl::Span solution) { @@ -989,7 +1055,7 @@ int64_t CompiledIntProdConstraint::ComputeViolation( CompiledIntDivConstraint::CompiledIntDivConstraint( const ConstraintProto& ct_proto) - : CompiledConstraint(ct_proto) {} + : CompiledConstraintWithProto(ct_proto) {} int64_t CompiledIntDivConstraint::ComputeViolation( absl::Span solution) { @@ -1005,7 +1071,7 @@ int64_t CompiledIntDivConstraint::ComputeViolation( CompiledIntModConstraint::CompiledIntModConstraint( const ConstraintProto& ct_proto) - : CompiledConstraint(ct_proto) {} + : CompiledConstraintWithProto(ct_proto) {} int64_t CompiledIntModConstraint::ComputeViolation( absl::Span solution) { @@ -1034,7 +1100,7 @@ int64_t CompiledIntModConstraint::ComputeViolation( CompiledAllDiffConstraint::CompiledAllDiffConstraint( const ConstraintProto& ct_proto) - : CompiledConstraint(ct_proto) {} + : CompiledConstraintWithProto(ct_proto) {} int64_t CompiledAllDiffConstraint::ComputeViolation( absl::Span solution) { @@ -1061,69 +1127,66 @@ int64_t CompiledAllDiffConstraint::ComputeViolation( return violation; } -// ----- CompiledNoOverlapConstraint ----- +// ----- NoOverlapBetweenTwoIntervals ----- -namespace { -int64_t ComputeOverloadArea( - absl::Span intervals, - absl::Span demands, - const CpModelProto& cp_model, const absl::Span solution, - int64_t max_capacity, std::vector>& events) { - events.clear(); - for (int i = 0; i < intervals.size(); ++i) { - const int i_var = intervals[i]; - const ConstraintProto& interval_proto = cp_model.constraints(i_var); - if (!interval_proto.enforcement_literal().empty() && - !LiteralValue(interval_proto.enforcement_literal(0), solution)) { - continue; - } +NoOverlapBetweenTwoIntervals::NoOverlapBetweenTwoIntervals( + int interval_0, int interval_1, const CpModelProto& cp_model) { + const ConstraintProto& ct0 = cp_model.constraints(interval_0); + const ConstraintProto& ct1 = cp_model.constraints(interval_1); - const int64_t demand = - demands.empty() ? 1 : ExprValue(*demands[i], solution); - if (demand == 0) continue; - - const int64_t start = - ExprValue(interval_proto.interval().start(), solution); - const int64_t size = ExprValue(interval_proto.interval().size(), solution); - const int64_t end = ExprValue(interval_proto.interval().end(), solution); - const int64_t max_end = std::max(start + size, end); - if (start >= max_end) continue; - - events.emplace_back(start, demand); - events.emplace_back(max_end, -demand); + // The more compact the better, hence the size + int[]. + num_enforcements_ = + ct0.enforcement_literal().size() + ct1.enforcement_literal().size(); + if (num_enforcements_ > 0) { + enforcements_.reset(new int[num_enforcements_]); + int i = 0; + for (const int lit : ct0.enforcement_literal()) enforcements_[i++] = lit; + for (const int lit : ct1.enforcement_literal()) enforcements_[i++] = lit; } - if (events.empty()) return 0; - std::sort(events.begin(), events.end(), - [](const std::pair& e1, - const std::pair& e2) { - return e1.first < e2.first; - }); - - int64_t overload = 0; - int64_t current_load = 0; - int64_t previous_time = events.front().first; - for (int i = 0; i < events.size();) { - // At this point, current_load is the load at previous_time. - const int64_t time = events[i].first; - if (current_load > max_capacity) { - overload = CapAdd( - overload, CapProd(current_load - max_capacity, time - previous_time)); - } - while (i < events.size() && events[i].first == time) { - current_load += events[i].second; - i++; - } - DCHECK_GE(current_load, 0); - previous_time = time; - } - DCHECK_EQ(current_load, 0); - return overload; + // We prefer to use start + size instead of end so that moving "start" moves + // the whole interval around (for the non-fixed duration case). + end_minus_start_1_ = + ExprDiff(LinearExprSum(ct0.interval().start(), ct0.interval().size()), + ct1.interval().start()); + end_minus_start_2_ = + ExprDiff(LinearExprSum(ct1.interval().start(), ct1.interval().size()), + ct0.interval().start()); } -int64_t ComputeOverlap(const ConstraintProto& interval1, - const ConstraintProto& interval2, - absl::Span solution) { +// Same as NoOverlapMinRepairDistance(). +int64_t NoOverlapBetweenTwoIntervals::ComputeViolationInternal( + absl::Span solution) { + for (int i = 0; i < num_enforcements_; ++i) { + if (!LiteralValue(enforcements_[i], solution)) return 0; + } + const int64_t diff1 = ExprValue(end_minus_start_1_, solution); + const int64_t diff2 = ExprValue(end_minus_start_2_, solution); + return std::max(std::min(diff1, diff2), int64_t{0}); +} + +std::vector NoOverlapBetweenTwoIntervals::UsedVariables( + const CpModelProto& /*model_proto*/) const { + std::vector result; + for (int i = 0; i < num_enforcements_; ++i) { + result.push_back(PositiveRef(enforcements_[i])); + } + for (const int var : end_minus_start_1_.vars()) { + result.push_back(PositiveRef(var)); + } + for (const int var : end_minus_start_2_.vars()) { + result.push_back(PositiveRef(var)); + } + gtl::STLSortAndRemoveDuplicates(&result); + result.shrink_to_fit(); + return result; +} + +// ----- CompiledNoOverlap2dConstraint ----- + +int64_t OverlapOfTwoIntervals(const ConstraintProto& interval1, + const ConstraintProto& interval2, + absl::Span solution) { for (const int lit : interval1.enforcement_literal()) { if (!LiteralValue(lit, solution)) return 0; } @@ -1132,14 +1195,10 @@ int64_t ComputeOverlap(const ConstraintProto& interval1, } const int64_t start1 = ExprValue(interval1.interval().start(), solution); - const int64_t size1 = ExprValue(interval1.interval().size(), solution); - const int64_t end1 = - std::min(start1 + size1, ExprValue(interval1.interval().end(), solution)); + const int64_t end1 = ExprValue(interval1.interval().end(), solution); const int64_t start2 = ExprValue(interval2.interval().start(), solution); - const int64_t size2 = ExprValue(interval2.interval().size(), solution); - const int64_t end2 = - std::min(start2 + size2, ExprValue(interval2.interval().end(), solution)); + const int64_t end2 = ExprValue(interval2.interval().end(), solution); if (start1 >= end2 || start2 >= end1) return 0; // Disjoint. @@ -1150,69 +1209,58 @@ int64_t ComputeOverlap(const ConstraintProto& interval1, int64_t{1}); } -} // namespace - -CompiledNoOverlapConstraint::CompiledNoOverlapConstraint( - const ConstraintProto& ct_proto, const CpModelProto& cp_model) - : CompiledConstraint(ct_proto), cp_model_(cp_model) {} - -int64_t CompiledNoOverlapConstraint::ComputeViolation( - absl::Span solution) { - DCHECK_GE(ct_proto().no_overlap().intervals_size(), 2); - if (ct_proto().no_overlap().intervals_size() == 2) { - return ComputeOverlap( - cp_model_.constraints(ct_proto().no_overlap().intervals(0)), - cp_model_.constraints(ct_proto().no_overlap().intervals(1)), solution); +int64_t NoOverlapMinRepairDistance(const ConstraintProto& interval1, + const ConstraintProto& interval2, + absl::Span solution) { + for (const int lit : interval1.enforcement_literal()) { + if (!LiteralValue(lit, solution)) return 0; } - return ComputeOverloadArea(ct_proto().no_overlap().intervals(), {}, cp_model_, - solution, 1, events_); + for (const int lit : interval2.enforcement_literal()) { + if (!LiteralValue(lit, solution)) return 0; + } + + const int64_t start1 = ExprValue(interval1.interval().start(), solution); + const int64_t end1 = ExprValue(interval1.interval().end(), solution); + + const int64_t start2 = ExprValue(interval2.interval().start(), solution); + const int64_t end2 = ExprValue(interval2.interval().end(), solution); + + return std::max(std::min(end2 - start1, end1 - start2), int64_t{0}); } -// ----- CompiledCumulativeConstraint ----- - -CompiledCumulativeConstraint::CompiledCumulativeConstraint( - const ConstraintProto& ct_proto, const CpModelProto& cp_model) - : CompiledConstraint(ct_proto), cp_model_(cp_model) {} - -int64_t CompiledCumulativeConstraint::ComputeViolation( - absl::Span solution) { - return ComputeOverloadArea( - ct_proto().cumulative().intervals(), ct_proto().cumulative().demands(), - cp_model_, solution, - ExprValue(ct_proto().cumulative().capacity(), solution), events_); -} - -// ----- CompiledCumulativeConstraint ----- - CompiledNoOverlap2dConstraint::CompiledNoOverlap2dConstraint( const ConstraintProto& ct_proto, const CpModelProto& cp_model) - : CompiledConstraint(ct_proto), cp_model_(cp_model) {} - -int64_t CompiledNoOverlap2dConstraint::ComputeOverlapArea( - absl::Span solution, int i, int j) const { - const int64_t x_overlap = ComputeOverlap( - cp_model_.constraints(ct_proto().no_overlap_2d().x_intervals(i)), - cp_model_.constraints(ct_proto().no_overlap_2d().x_intervals(j)), - solution); - if (x_overlap > 0) { - return x_overlap * - ComputeOverlap( - cp_model_.constraints(ct_proto().no_overlap_2d().y_intervals(i)), - cp_model_.constraints(ct_proto().no_overlap_2d().y_intervals(j)), - solution); - } else { - return 0; - } -} + : CompiledConstraintWithProto(ct_proto), cp_model_(cp_model) {} int64_t CompiledNoOverlap2dConstraint::ComputeViolation( absl::Span solution) { DCHECK_GE(ct_proto().no_overlap_2d().x_intervals_size(), 2); const int size = ct_proto().no_overlap_2d().x_intervals_size(); + int64_t violation = 0; for (int i = 0; i + 1 < size; ++i) { + const ConstraintProto& x_i = + cp_model_.constraints(ct_proto().no_overlap_2d().x_intervals(i)); + const ConstraintProto& y_i = + cp_model_.constraints(ct_proto().no_overlap_2d().y_intervals(i)); for (int j = i + 1; j < size; ++j) { - violation += ComputeOverlapArea(solution, i, j); + const ConstraintProto& x_j = + cp_model_.constraints(ct_proto().no_overlap_2d().x_intervals(j)); + const ConstraintProto& y_j = + cp_model_.constraints(ct_proto().no_overlap_2d().y_intervals(j)); + + // TODO(user): Experiment with + // violation += + // std::max(std::min(NoOverlapMinRepairDistance(x_i, x_j, solution), + // NoOverlapMinRepairDistance(y_i, y_j, solution)), + // int64_t{0}); + // Currently, the effect is unclear on 2d packing problems. + violation += + std::max(std::min(NoOverlapMinRepairDistance(x_i, x_j, solution) * + OverlapOfTwoIntervals(y_i, y_j, solution), + NoOverlapMinRepairDistance(y_i, y_j, solution) * + OverlapOfTwoIntervals(x_i, x_j, solution)), + int64_t{0}); } } return violation; @@ -1230,12 +1278,17 @@ int64_t CompiledNoOverlap2dConstraint::ComputeViolation( // // The only difference between single and multi circuit is flow balance at the // depot, so we use the same compiled constraint for both. -class CompiledCircuitConstraint : public CompiledConstraint { +class CompiledCircuitConstraint : public CompiledConstraintWithProto { public: explicit CompiledCircuitConstraint(const ConstraintProto& ct_proto); ~CompiledCircuitConstraint() override = default; int64_t ComputeViolation(absl::Span solution) override; + void PerformMove(int var, int64_t old_value, + absl::Span new_solution) override; + int64_t ViolationDelta( + int var, int64_t old_value, + absl::Span solution_with_new_value) override; private: struct SccOutput { @@ -1246,16 +1299,20 @@ class CompiledCircuitConstraint : public CompiledConstraint { std::vector skipped; std::vector root; }; - void UpdateGraph(absl::Span solution); + void InitGraph(absl::Span solution); + bool UpdateGraph(int var, int64_t value); + int64_t ViolationForCurrentGraph(); + + absl::flat_hash_map> arcs_by_lit_; absl::Span literals_; absl::Span tails_; absl::Span heads_; // Stores the currently active arcs per tail node. - std::vector> graph_; + std::vector> graph_; SccOutput sccs_; + SccOutput committed_sccs_; std::vector has_in_arc_; - StronglyConnectedComponentsFinder>, - SccOutput> + StronglyConnectedComponentsFinder>, SccOutput> scc_finder_; }; @@ -1263,13 +1320,12 @@ void CompiledCircuitConstraint::SccOutput::emplace_back(int const* start, int const* end) { const int root_node = *start; const int size = end - start; - if (size == 1) { - skipped[root_node] = true; - } else { + if (size > 1) { ++num_components; } for (; start != end; ++start) { root[*start] = root_node; + skipped[*start] = (size == 1); } } void CompiledCircuitConstraint::SccOutput::reset(int num_nodes) { @@ -1282,7 +1338,7 @@ void CompiledCircuitConstraint::SccOutput::reset(int num_nodes) { CompiledCircuitConstraint::CompiledCircuitConstraint( const ConstraintProto& ct_proto) - : CompiledConstraint(ct_proto) { + : CompiledConstraintWithProto(ct_proto) { const bool routes = ct_proto.has_routes(); tails_ = routes ? ct_proto.routes().tails() : ct_proto.circuit().tails(); heads_ = absl::MakeConstSpan(routes ? ct_proto.routes().heads() @@ -1290,23 +1346,73 @@ CompiledCircuitConstraint::CompiledCircuitConstraint( literals_ = absl::MakeConstSpan(routes ? ct_proto.routes().literals() : ct_proto.circuit().literals()); graph_.resize(*absl::c_max_element(tails_) + 1); + for (int i = 0; i < literals_.size(); ++i) { + arcs_by_lit_[literals_[i]].push_back(i); + } } -void CompiledCircuitConstraint::UpdateGraph( - absl::Span solution) { - for (std::vector& edges : graph_) { +void CompiledCircuitConstraint::InitGraph(absl::Span solution) { + for (DenseSet& edges : graph_) { edges.clear(); } for (int i = 0; i < tails_.size(); ++i) { if (!LiteralValue(literals_[i], solution)) continue; - graph_[tails_[i]].push_back(heads_[i]); + graph_[tails_[i]].insert(heads_[i]); } } + +bool CompiledCircuitConstraint::UpdateGraph(int var, int64_t value) { + bool needs_update = false; + const int enabled_lit = + value != 0 ? PositiveRef(var) : NegatedRef(PositiveRef(var)); + const int disabled_lit = NegatedRef(enabled_lit); + for (const int arc : arcs_by_lit_[disabled_lit]) { + const int tail = tails_[arc]; + const int head = heads_[arc]; + // Removing a self arc cannot change violation. + needs_update = needs_update || tail != head; + graph_[tails_[arc]].erase(heads_[arc]); + } + for (const int arc : arcs_by_lit_[enabled_lit]) { + const int tail = tails_[arc]; + const int head = heads_[arc]; + // Adding an arc can only change violation if it connects new SCCs. + needs_update = needs_update || + committed_sccs_.root[tail] != committed_sccs_.root[head]; + graph_[tails_[arc]].insert(heads_[arc]); + } + return needs_update; +} + +void CompiledCircuitConstraint::PerformMove( + int var, int64_t, absl::Span new_solution) { + UpdateGraph(var, new_solution[var]); + violation_ = ViolationForCurrentGraph(); + std::swap(committed_sccs_, sccs_); +} + int64_t CompiledCircuitConstraint::ComputeViolation( absl::Span solution) { + InitGraph(solution); + int64_t result = ViolationForCurrentGraph(); + std::swap(committed_sccs_, sccs_); + return result; +} + +int64_t CompiledCircuitConstraint::ViolationDelta( + int var, int64_t old_value, + absl::Span solution_with_new_value) { + int64_t result = 0; + if (UpdateGraph(var, solution_with_new_value[var])) { + result = ViolationForCurrentGraph() - violation_; + } + UpdateGraph(var, old_value); + return result; +} + +int64_t CompiledCircuitConstraint::ViolationForCurrentGraph() { const int num_nodes = graph_.size(); sccs_.reset(num_nodes); - UpdateGraph(solution); scc_finder_.FindStronglyConnectedComponents(num_nodes, graph_, &sccs_); // Skipping all nodes causes off-by-one errors below, so it's simpler to // handle explicitly. @@ -1386,13 +1492,14 @@ LsEvaluator::LsEvaluator(const CpModelProto& cp_model, : cp_model_(cp_model), params_(params) { var_to_constraints_.resize(cp_model_.variables_size()); jump_value_optimal_.resize(cp_model_.variables_size(), true); - num_violated_constraint_per_var_.assign(cp_model_.variables_size(), 0); + num_violated_constraint_per_var_ignoring_objective_.assign( + cp_model_.variables_size(), 0); std::vector ignored_constraints(cp_model_.constraints_size(), false); std::vector additional_constraints; CompileConstraintsAndObjective(ignored_constraints, additional_constraints); BuildVarConstraintGraph(); - pos_in_violated_constraints_.assign(NumEvaluatorConstraints(), -1); + violated_constraints_.reserve(NumEvaluatorConstraints()); } LsEvaluator::LsEvaluator( @@ -1402,10 +1509,11 @@ LsEvaluator::LsEvaluator( : cp_model_(cp_model), params_(params) { var_to_constraints_.resize(cp_model_.variables_size()); jump_value_optimal_.resize(cp_model_.variables_size(), true); - num_violated_constraint_per_var_.assign(cp_model_.variables_size(), 0); + num_violated_constraint_per_var_ignoring_objective_.assign( + cp_model_.variables_size(), 0); CompileConstraintsAndObjective(ignored_constraints, additional_constraints); BuildVarConstraintGraph(); - pos_in_violated_constraints_.assign(NumEvaluatorConstraints(), -1); + violated_constraints_.reserve(NumEvaluatorConstraints()); } void LsEvaluator::BuildVarConstraintGraph() { @@ -1415,16 +1523,10 @@ void LsEvaluator::BuildVarConstraintGraph() { // Build the var <-> constraint graph. for (int ct_index = 0; ct_index < constraints_.size(); ++ct_index) { - for (const int var : UsedVariables(constraints_[ct_index]->ct_proto())) { + constraint_to_vars_[ct_index] = + constraints_[ct_index]->UsedVariables(cp_model_); + for (const int var : constraint_to_vars_[ct_index]) { var_to_constraints_[var].push_back(ct_index); - constraint_to_vars_[ct_index].push_back(var); - } - for (const int i_var : UsedIntervals(constraints_[ct_index]->ct_proto())) { - const ConstraintProto& interval_proto = cp_model_.constraints(i_var); - for (const int var : UsedVariables(interval_proto)) { - var_to_constraints_[var].push_back(ct_index); - constraint_to_vars_[ct_index].push_back(var); - } } } @@ -1550,57 +1652,115 @@ void LsEvaluator::CompileOneConstraint(const ConstraintProto& ct) { break; } case ConstraintProto::ConstraintCase::kNoOverlap: { - if (ct.no_overlap().intervals_size() <= 1) break; - if (ct.no_overlap().intervals_size() > - params_.feasibility_jump_max_expanded_constraint_size()) { - CompiledNoOverlapConstraint* no_overlap = - new CompiledNoOverlapConstraint(ct, cp_model_); - constraints_.emplace_back(no_overlap); + const int size = ct.no_overlap().intervals_size(); + if (size <= 1) break; + if (size > params_.feasibility_jump_max_expanded_constraint_size()) { + // Similar code to the kCumulative constraint. + // The violation will be the area above the capacity. + LinearExpressionProto one; + one.set_offset(1); + std::vector> is_active; + std::vector times; + std::vector demands; + const int num_intervals = ct.no_overlap().intervals().size(); + for (int i = 0; i < num_intervals; ++i) { + const ConstraintProto& interval_ct = + cp_model_.constraints(ct.no_overlap().intervals(i)); + if (interval_ct.enforcement_literal().empty()) { + is_active.push_back(std::nullopt); + is_active.push_back(std::nullopt); + } else { + CHECK_EQ(interval_ct.enforcement_literal().size(), 1); + is_active.push_back(interval_ct.enforcement_literal(0)); + is_active.push_back(interval_ct.enforcement_literal(0)); + } + + times.push_back(interval_ct.interval().start()); + times.push_back(LinearExprSum(interval_ct.interval().start(), + interval_ct.interval().size())); + demands.push_back(one); + demands.push_back(NegatedLinearExpression(one)); + } + constraints_.emplace_back(new CompiledReservoirConstraint( + std::move(one), std::move(is_active), std::move(times), + std::move(demands))); } else { // We expand the no_overlap constraints into a quadratic number of // disjunctions. - for (int i = 0; i + 1 < ct.no_overlap().intervals_size(); ++i) { + for (int i = 0; i + 1 < size; ++i) { const IntervalConstraintProto& interval_i = cp_model_.constraints(ct.no_overlap().intervals(i)).interval(); const int64_t min_start_i = ExprMin(interval_i.start(), cp_model_); const int64_t max_end_i = ExprMax(interval_i.end(), cp_model_); - for (int j = i + 1; j < ct.no_overlap().intervals_size(); ++j) { + for (int j = i + 1; j < size; ++j) { const IntervalConstraintProto& interval_j = cp_model_.constraints(ct.no_overlap().intervals(j)).interval(); const int64_t min_start_j = ExprMin(interval_j.start(), cp_model_); const int64_t max_end_j = ExprMax(interval_j.end(), cp_model_); if (min_start_i >= max_end_j || min_start_j >= max_end_i) continue; - ConstraintProto* disj = expanded_constraints_.add_constraints(); - disj->mutable_no_overlap()->add_intervals( - ct.no_overlap().intervals(i)); - disj->mutable_no_overlap()->add_intervals( - ct.no_overlap().intervals(j)); - CompiledNoOverlapConstraint* no_overlap = - new CompiledNoOverlapConstraint(*disj, cp_model_); - constraints_.emplace_back(no_overlap); + + constraints_.emplace_back(new NoOverlapBetweenTwoIntervals( + ct.no_overlap().intervals(i), ct.no_overlap().intervals(j), + cp_model_)); } } } break; } case ConstraintProto::ConstraintCase::kCumulative: { - constraints_.emplace_back( - new CompiledCumulativeConstraint(ct, cp_model_)); + LinearExpressionProto capacity = ct.cumulative().capacity(); + std::vector> is_active; + std::vector times; + std::vector demands; + const int num_intervals = ct.cumulative().intervals().size(); + for (int i = 0; i < num_intervals; ++i) { + const ConstraintProto& interval_ct = + cp_model_.constraints(ct.cumulative().intervals(i)); + if (interval_ct.enforcement_literal().empty()) { + is_active.push_back(std::nullopt); + is_active.push_back(std::nullopt); + } else { + CHECK_EQ(interval_ct.enforcement_literal().size(), 1); + is_active.push_back(interval_ct.enforcement_literal(0)); + is_active.push_back(interval_ct.enforcement_literal(0)); + } + + // Start. + times.push_back(interval_ct.interval().start()); + demands.push_back(ct.cumulative().demands(i)); + + // End. + // I tried 3 alternatives: end, max(end, start+size) and just start + + // size. The most performing one was "start + size" on the multi-mode + // RCPSP. + // + // Note that for fixed size, this do not matter. It is easy enough to + // try any expression by creating a small wrapper class to use instead + // of a LinearExpressionProto for time. + times.push_back(LinearExprSum(interval_ct.interval().start(), + interval_ct.interval().size())); + demands.push_back(NegatedLinearExpression(ct.cumulative().demands(i))); + } + + constraints_.emplace_back(new CompiledReservoirConstraint( + std::move(capacity), std::move(is_active), std::move(times), + std::move(demands))); break; } case ConstraintProto::ConstraintCase::kNoOverlap2D: { const auto& x_intervals = ct.no_overlap_2d().x_intervals(); const auto& y_intervals = ct.no_overlap_2d().y_intervals(); - if (x_intervals.size() <= 1) break; - if (x_intervals.size() > - params_.feasibility_jump_max_expanded_constraint_size()) { + const int size = x_intervals.size(); + if (size <= 1) break; + if (size == 2 || + size > params_.feasibility_jump_max_expanded_constraint_size()) { CompiledNoOverlap2dConstraint* no_overlap_2d = new CompiledNoOverlap2dConstraint(ct, cp_model_); constraints_.emplace_back(no_overlap_2d); break; } - for (int i = 0; i + 1 < x_intervals.size(); ++i) { + for (int i = 0; i + 1 < size; ++i) { const IntervalConstraintProto& x_interval_i = cp_model_.constraints(x_intervals[i]).interval(); const int64_t x_min_start_i = ExprMin(x_interval_i.start(), cp_model_); @@ -1609,7 +1769,7 @@ void LsEvaluator::CompileOneConstraint(const ConstraintProto& ct) { cp_model_.constraints(y_intervals[i]).interval(); const int64_t y_min_start_i = ExprMin(y_interval_i.start(), cp_model_); const int64_t y_max_end_i = ExprMax(y_interval_i.end(), cp_model_); - for (int j = i + 1; j < x_intervals.size(); ++j) { + for (int j = i + 1; j < size; ++j) { const IntervalConstraintProto& x_interval_j = cp_model_.constraints(x_intervals[j]).interval(); const int64_t x_min_start_j = @@ -1693,65 +1853,58 @@ bool LsEvaluator::ReduceObjectiveBounds(int64_t lb, int64_t ub) { return false; } -void LsEvaluator::OverwriteCurrentSolution(absl::Span solution) { - current_solution_.assign(solution.begin(), solution.end()); -} - -void LsEvaluator::ComputeAllViolations() { +void LsEvaluator::ComputeAllViolations(absl::Span solution) { // Linear constraints. - linear_evaluator_.ComputeInitialActivities(current_solution_); + linear_evaluator_.ComputeInitialActivities(solution); // Generic constraints. for (const auto& ct : constraints_) { - ct->InitializeViolation(current_solution_); + ct->InitializeViolation(solution); } RecomputeViolatedList(/*linear_only=*/false); } -void LsEvaluator::UpdateAllNonLinearViolations() { +void LsEvaluator::ComputeAllNonLinearViolations( + absl::Span solution) { // Generic constraints. for (const auto& ct : constraints_) { - ct->InitializeViolation(current_solution_); + ct->InitializeViolation(solution); } } -void LsEvaluator::UpdateNonLinearViolations(int var, int64_t new_value) { - const int64_t old_value = current_solution_[var]; - if (old_value == new_value) return; - - current_solution_[var] = new_value; +void LsEvaluator::UpdateNonLinearViolations( + int var, int64_t old_value, absl::Span new_solution) { for (const int general_ct_index : var_to_constraints_[var]) { const int c = general_ct_index + linear_evaluator_.num_constraints(); const int64_t v0 = constraints_[general_ct_index]->violation(); - constraints_[general_ct_index]->PerformMove(var, old_value, - current_solution_); + constraints_[general_ct_index]->PerformMove(var, old_value, new_solution); const int64_t violation_delta = constraints_[general_ct_index]->violation() - v0; - last_update_violation_changes_.push_back({c, violation_delta}); + if (violation_delta != 0) { + last_update_violation_changes_.push_back(c); + } } - current_solution_[var] = old_value; } -void LsEvaluator::UpdateLinearScores(int var, int64_t value, +void LsEvaluator::UpdateLinearScores(int var, int64_t old_value, + int64_t new_value, absl::Span weights, absl::Span jump_deltas, absl::Span jump_scores) { DCHECK(RefIsPositive(var)); - const int64_t old_value = current_solution_[var]; - if (old_value == value) return; + if (old_value == new_value) return; last_update_violation_changes_.clear(); linear_evaluator_.ClearAffectedVariables(); - linear_evaluator_.UpdateVariableAndScores(var, value - old_value, weights, + linear_evaluator_.UpdateVariableAndScores(var, new_value - old_value, weights, jump_deltas, jump_scores, &last_update_violation_changes_); } -void LsEvaluator::UpdateVariableValue(int var, int64_t new_value) { - current_solution_[var] = new_value; - +void LsEvaluator::UpdateViolatedList() { // Maintain the list of violated constraints. - for (const auto [c, delta] : last_update_violation_changes_) { + dtime_ += 1e-8 * last_update_violation_changes_.size(); + for (const int c : last_update_violation_changes_) { UpdateViolatedList(c); } } @@ -1825,40 +1978,42 @@ bool LsEvaluator::IsViolated(int c) const { double LsEvaluator::WeightedViolation(absl::Span weights) const { DCHECK_EQ(weights.size(), NumEvaluatorConstraints()); - double violations = linear_evaluator_.WeightedViolation(weights); + double result = linear_evaluator_.WeightedViolation(weights); const int num_linear_constraints = linear_evaluator_.num_constraints(); for (int c = 0; c < constraints_.size(); ++c) { - violations += static_cast(constraints_[c]->violation()) * - weights[num_linear_constraints + c]; + result += static_cast(constraints_[c]->violation()) * + weights[num_linear_constraints + c]; } - return violations; + return result; } -double LsEvaluator::WeightedNonLinearViolationDelta( - absl::Span weights, int var, int64_t delta) const { - const int64_t old_value = current_solution_[var]; - double violation_delta = 0; - // We change the mutable solution here, are restore it after the evaluation. - current_solution_[var] += delta; +double LsEvaluator::WeightedViolationDelta( + bool linear_only, absl::Span weights, int var, int64_t delta, + absl::Span mutable_solution) const { + double result = linear_evaluator_.WeightedViolationDelta(weights, var, delta); + if (linear_only) return result; + + // We change the mutable solution here, and restore it after the evaluation. + const int64_t old_value = mutable_solution[var]; + mutable_solution[var] += delta; + const int num_linear_constraints = linear_evaluator_.num_constraints(); for (const int ct_index : var_to_constraints_[var]) { - DCHECK_LT(ct_index, constraints_.size()); - const int64_t delta = constraints_[ct_index]->ViolationDelta( - var, old_value, current_solution_); - violation_delta += - static_cast(delta) * weights[ct_index + num_linear_constraints]; - } - // Restore. - current_solution_[var] -= delta; - return violation_delta; -} + // We assume linear time delta computation in number of variables. + // TODO(user): refine on a per constraint basis. + dtime_ += 1e-8 * static_cast(constraint_to_vars_[ct_index].size()); -double LsEvaluator::WeightedViolationDelta(absl::Span weights, - int var, int64_t delta) const { - DCHECK_LT(var, current_solution_.size()); - return linear_evaluator_.WeightedViolationDelta(weights, var, delta) + - WeightedNonLinearViolationDelta(weights, var, delta); + DCHECK_LT(ct_index, constraints_.size()); + const int64_t ct_delta = constraints_[ct_index]->ViolationDelta( + var, old_value, mutable_solution); + result += static_cast(ct_delta) * + weights[ct_index + num_linear_constraints]; + } + + // Restore. + mutable_solution[var] = old_value; + return result; } bool LsEvaluator::VariableOnlyInLinearConstraintWithConvexViolationChange( @@ -1867,10 +2022,9 @@ bool LsEvaluator::VariableOnlyInLinearConstraintWithConvexViolationChange( } void LsEvaluator::RecomputeViolatedList(bool linear_only) { - num_violated_constraint_per_var_.assign(cp_model_.variables_size(), 0); + num_violated_constraint_per_var_ignoring_objective_.assign( + cp_model_.variables_size(), 0); violated_constraints_.clear(); - pos_in_violated_constraints_.assign(NumEvaluatorConstraints(), -1); - const int num_constraints = linear_only ? NumLinearConstraints() : NumEvaluatorConstraints(); for (int c = 0; c < num_constraints; ++c) { @@ -1879,30 +2033,224 @@ void LsEvaluator::RecomputeViolatedList(bool linear_only) { } void LsEvaluator::UpdateViolatedList(const int c) { - const int pos = pos_in_violated_constraints_[c]; - if (Violation(c) > 0) { + auto [it, inserted] = violated_constraints_.insert(c); // The constraint is violated. Add if needed. - if (pos != -1) return; - pos_in_violated_constraints_[c] = violated_constraints_.size(); - violated_constraints_.push_back(c); + if (!inserted) return; + if (IsObjectiveConstraint(c)) return; + dtime_ += 1e-8 * ConstraintToVars(c).size(); for (const int v : ConstraintToVars(c)) { - num_violated_constraint_per_var_[v] += 1; + num_violated_constraint_per_var_ignoring_objective_[v] += 1; } return; } - - // The constraint is not violated. Remove if needed. - if (pos == -1) return; - const int last = violated_constraints_.back(); - pos_in_violated_constraints_[last] = pos; - violated_constraints_[pos] = last; - pos_in_violated_constraints_[c] = -1; - violated_constraints_.pop_back(); - for (const int v : ConstraintToVars(c)) { - num_violated_constraint_per_var_[v] -= 1; + if (violated_constraints_.erase(c) == 1) { + if (IsObjectiveConstraint(c)) return; + dtime_ += 1e-8 * ConstraintToVars(c).size(); + for (const int v : ConstraintToVars(c)) { + num_violated_constraint_per_var_ignoring_objective_[v] -= 1; + } } } +int64_t CompiledReservoirConstraint::BuildProfileAndReturnViolation( + absl::Span solution) { + // Starts by filling the cache and profile_. + capacity_value_ = ExprValue(capacity_, solution); + const int num_events = time_values_.size(); + profile_.clear(); + for (int i = 0; i < num_events; ++i) { + time_values_[i] = ExprValue(times_[i], solution); + if (is_active_[i] != std::nullopt && + LiteralValue(*is_active_[i], solution) == 0) { + demand_values_[i] = 0; + } else { + demand_values_[i] = ExprValue(demands_[i], solution); + if (demand_values_[i] != 0) { + profile_.push_back({time_values_[i], demand_values_[i]}); + } + } + } + + if (profile_.empty()) return 0; + absl::c_sort(profile_); + + // Compress the profile for faster incremental evaluation. + { + int p = 0; + for (int i = 1; i < profile_.size(); ++i) { + if (profile_[i].time == profile_[p].time) { + profile_[p].demand += profile_[i].demand; + } else { + profile_[++p] = profile_[i]; + } + } + profile_.resize(p + 1); + } + + int64_t overload = 0; + int64_t current_load = 0; + int64_t previous_time = std::numeric_limits::min(); + for (int i = 0; i < profile_.size(); ++i) { + // At this point, current_load is the load at previous_time. + const int64_t time = profile_[i].time; + if (current_load > capacity_value_) { + overload = CapAdd(overload, CapProd(current_load - capacity_value_, + time - previous_time)); + } + + current_load += profile_[i].demand; + previous_time = time; + } + return overload; +} + +int64_t CompiledReservoirConstraint::IncrementalViolation( + int var, absl::Span solution) { + const int64_t capacity = ExprValue(capacity_, solution); + profile_delta_.clear(); + CHECK(RefIsPositive(var)); + for (const int i : dense_index_to_events_[var_to_dense_index_.at(var)]) { + const int64_t time = ExprValue(times_[i], solution); + int64_t demand = 0; + if (is_active_[i] == std::nullopt || + LiteralValue(*is_active_[i], solution) == 1) { + demand = ExprValue(demands_[i], solution); + } + + if (time == time_values_[i]) { + if (demand != demand_values_[i]) { + // Update the demand at time. + profile_delta_.push_back({time, demand - demand_values_[i]}); + } + } else { + // Remove previous. + if (demand_values_[i] != 0) { + profile_delta_.push_back({time_values_[i], -demand_values_[i]}); + } + // Add new. + if (demand != 0) { + profile_delta_.push_back({time, demand}); + } + } + } + + // Abort early if there is no change. + // This might happen because we use max(start + size, end) for the time and + // even if some variable changed there, the time might not have. + if (capacity == capacity_value_ && profile_delta_.empty()) { + return violation_; + } + absl::c_sort(profile_delta_); + + // Similar algo, but we scan the two vectors at once. + int64_t overload = 0; + int64_t current_load = 0; + int64_t previous_time = std::numeric_limits::min(); + + // TODO(user): This code is the hotspot for our local search on cumulative. + // It can probably be slighlty improved. We might also be able to abort early + // if we know that capacity is high enough compared to the highest point of + // the profile. + int i = 0; + int j = 0; + const absl::Span i_profile(profile_); + const absl::Span j_profile(profile_delta_); + while (true) { + int64_t time; + if (i < i_profile.size() && j < j_profile.size()) { + time = std::min(i_profile[i].time, j_profile[j].time); + } else if (i < i_profile.size()) { + time = i_profile[i].time; + } else if (j < j_profile.size()) { + time = j_profile[j].time; + } else { + // End of loop. + break; + } + + // Update overload if needed. + // At this point, current_load is the load at previous_time. + if (current_load > capacity) { + overload = CapAdd(overload, + CapProd(current_load - capacity, time - previous_time)); + } + + // Update i and current load. + while (i < i_profile.size() && i_profile[i].time == time) { + current_load += i_profile[i].demand; + i++; + } + + // Update j and current load. + while (j < j_profile.size() && j_profile[j].time == time) { + current_load += j_profile[j].demand; + j++; + } + + previous_time = time; + } + return overload; +} + +void CompiledReservoirConstraint::AppendVariablesForEvent( + int i, std::vector* result) const { + if (is_active_[i] != std::nullopt) { + result->push_back(PositiveRef(*is_active_[i])); + } + for (const int var : times_[i].vars()) { + result->push_back(PositiveRef(var)); + } + for (const int var : demands_[i].vars()) { + result->push_back(PositiveRef(var)); + } +} + +void CompiledReservoirConstraint::InitializeDenseIndexToEvents() { + // We scan the constraint a few times, but this is called once, so we don't + // care too much. + CpModelProto unused; + int num_dense_indices = 0; + for (const int var : UsedVariables(unused)) { + var_to_dense_index_[var] = num_dense_indices++; + } + + CompactVectorVector event_to_dense_indices; + event_to_dense_indices.reserve(times_.size()); + const int num_events = times_.size(); + std::vector result; + for (int i = 0; i < num_events; ++i) { + result.clear(); + AppendVariablesForEvent(i, &result); + + // Remap and add. + for (int& var : result) { + var = var_to_dense_index_.at(var); + } + gtl::STLSortAndRemoveDuplicates(&result); + event_to_dense_indices.Add(result); + } + + // Note that because of the capacity (which might be variable) it is important + // to resize this to num_dense_indices. + dense_index_to_events_.ResetFromTranspose(event_to_dense_indices, + num_dense_indices); +} + +std::vector CompiledReservoirConstraint::UsedVariables( + const CpModelProto& /*model_proto*/) const { + std::vector result; + const int num_events = times_.size(); + for (int i = 0; i < num_events; ++i) { + AppendVariablesForEvent(i, &result); + } + for (const int var : capacity_.vars()) { + result.push_back(PositiveRef(var)); + } + gtl::STLSortAndRemoveDuplicates(&result); + result.shrink_to_fit(); + return result; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/constraint_violation.h b/ortools/sat/constraint_violation.h index 2599ffb376..c21741ae98 100644 --- a/ortools/sat/constraint_violation.h +++ b/ortools/sat/constraint_violation.h @@ -17,20 +17,21 @@ #include #include #include +#include #include #include +#include "absl/container/flat_hash_map.h" #include "absl/types/span.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/util.h" +#include "ortools/util/dense_set.h" #include "ortools/util/sorted_interval_list.h" namespace operations_research { namespace sat { -int64_t ExprValue(const LinearExpressionProto& expr, - absl::Span solution); - class LinearIncrementalEvaluator { public: LinearIncrementalEvaluator() = default; @@ -57,31 +58,28 @@ class LinearIncrementalEvaluator { // and before the class starts to be used. This is DCHECKed. void PrecomputeCompactView(absl::Span var_max_variation); - // Compute activities and update them. + // Compute activities. void ComputeInitialActivities(absl::Span solution); - void Update(int var, int64_t delta, - std::vector>* violation_deltas = nullptr); - // Function specific to the feasibility jump heuristic. + // Update the activities of each constraints. + // Also update the current score for the given deltas. + // // Note that the score of the changed variable will not be updated correctly! void UpdateVariableAndScores( int var, int64_t delta, absl::Span weights, absl::Span jump_deltas, absl::Span jump_scores, - std::vector>* violation_deltas = nullptr); + std::vector* constraints_with_changed_violations); // Also for feasibility jump. void UpdateScoreOnWeightUpdate(int c, absl::Span jump_deltas, absl::Span var_to_score_change); - // Variables whose score might have decreased since the last clear for at - // least one jump value. + // Variables whose score/jump might have changed since the last clear. // // Note that because we reason on a per-constraint basis, this is actually - // independent of the set of positive constraint weight used. We just list - // variable for which the contribution to the violation of one constraint - // might have decreased for any jump value. + // independent of the set of positive constraint weight used. void ClearAffectedVariables(); - const std::vector& VariablesAffectedByLastUpdate() const { + absl::Span VariablesAffectedByLastUpdate() const { return last_affected_variables_; } @@ -123,15 +121,17 @@ class LinearIncrementalEvaluator { bool ViolationChangeIsConvex(int var) const; double DeterministicTime() const { - return 5e-9 * static_cast(dtime_); + return 5e-9 * static_cast(num_ops_); } - int64_t ObjectiveDelta(int var, int64_t delta) const { - if (var >= var_entries_.size()) return 0; - const std::vector& data = var_entries_[var]; - if (data.empty()) return 0; - if (data[0].ct_index != 0) return 0; - return data[0].coefficient * delta; + int64_t ObjectiveCoefficient(int var) const { + if (var >= columns_.size()) return 0.0; + const SpanData& data = columns_[var]; + if (data.num_linear_entries == 0) return 0.0; + const int i = data.start + data.num_neg_literal + data.num_pos_literal; + const int c = ct_buffer_[i]; + if (c != 0) return 0.0; + return coeff_buffer_[data.linear_start]; } absl::Span ConstraintToVars(int c) const { @@ -235,45 +235,64 @@ class LinearIncrementalEvaluator { std::vector cached_scores_; std::vector in_last_affected_variables_; - std::vector last_affected_variables_; + FixedCapacityVector last_affected_variables_; - mutable size_t dtime_ = 0; + mutable size_t num_ops_ = 0; }; // View of a generic (non linear) constraint for the LsEvaluator. -// -// TODO(user): Do we add a Update(solution, var, new_value) method ? -// TODO(user): Do we want to support Update(solutions, vars, new_values) ? class CompiledConstraint { public: - explicit CompiledConstraint(const ConstraintProto& ct_proto); + CompiledConstraint() = default; virtual ~CompiledConstraint() = default; // Recomputes the violation of the constraint from scratch. void InitializeViolation(absl::Span solution); - // Update the violation with the new value. - void PerformMove(int var, int64_t old_value, - absl::Span solution_with_new_value); - - // Computes the violation of a constraint. - // - // A violation is a positive integer value. A zero value means the constraint - // is not violated. - virtual int64_t ComputeViolation(absl::Span solution) = 0; + // Updates the violation with the new value. + virtual void PerformMove(int var, int64_t old_value, + absl::Span solution_with_new_value); // Returns the delta if var changes from old_value to solution[var]. virtual int64_t ViolationDelta( int var, int64_t old_value, absl::Span solution_with_new_value); - // Getters. - const ConstraintProto& ct_proto() const { return ct_proto_; } + // Returns the sorted vector of variables used by this constraint. This is + // used to known when a violation might change, and is only called once during + // initialization, so speed is not to much of a concern here. + // + // The global proto is needed to resolve interval variables reference. + virtual std::vector UsedVariables( + const CpModelProto& model_proto) const = 0; + + // The cached violation of this constraint. int64_t violation() const { return violation_; } + protected: + // Computes the violation of a constraint. + // + // This is called by InitializeViolation() and also the default implementation + // of ViolationDelta(). + virtual int64_t ComputeViolation(absl::Span solution) = 0; + + int64_t violation_; +}; + +// Intermediate class for all constraints that store directly their proto as +// part of their implementation. +class CompiledConstraintWithProto : public CompiledConstraint { + public: + explicit CompiledConstraintWithProto(const ConstraintProto& ct_proto); + ~CompiledConstraintWithProto() override = default; + + const ConstraintProto& ct_proto() const { return ct_proto_; } + + // This just returns the variables used by the stored ct_proto_. + std::vector UsedVariables(const CpModelProto& model_proto) const final; + private: const ConstraintProto& ct_proto_; - int64_t violation_; }; // Evaluation container for the local search. @@ -296,28 +315,25 @@ class LsEvaluator { // It returns true if a reduction of the domain took place. bool ReduceObjectiveBounds(int64_t lb, int64_t ub); - // Overwrites the current solution. - void OverwriteCurrentSolution(absl::Span solution); - - // Computes the violations of all constraints. - void ComputeAllViolations(); - - // Recompute the violations of non linear constraints. - void UpdateAllNonLinearViolations(); - - // Sets the value of the variable in the current solution. - // It must be called after UpdateLinearScores(). - void UpdateVariableValue(int var, int64_t new_value); + // Recomputes the violations of all constraints (resp only non-linear one). + void ComputeAllViolations(absl::Span solution); + void ComputeAllNonLinearViolations(absl::Span solution); // Recomputes the violations of all impacted non linear constraints. - void UpdateNonLinearViolations(int var, int64_t new_value); + void UpdateNonLinearViolations(int var, int64_t old_value, + absl::Span new_solution); // Function specific to the linear only feasibility jump. - void UpdateLinearScores(int var, int64_t value, + void UpdateLinearScores(int var, int64_t old_value, int64_t new_value, absl::Span weights, absl::Span jump_deltas, absl::Span jump_scores); - const std::vector& VariablesAffectedByLastLinearUpdate() const { + + // Must be called after UpdateLinearScores() / UpdateNonLinearViolations() + // in order to update the ViolatedConstraints(). + void UpdateViolatedList(); + + absl::Span VariablesAffectedByLastLinearUpdate() const { return linear_evaluator_.VariablesAffectedByLastUpdate(); } @@ -327,9 +343,13 @@ class LsEvaluator { // Returns the objective activity in the current state. int64_t ObjectiveActivity() const; - int64_t ObjectiveDelta(int var, int64_t delta) const { + bool IsObjectiveConstraint(int c) const { + return cp_model_.has_objective() && c == 0; + } + + int64_t ObjectiveCoefficient(int var) const { return cp_model_.has_objective() - ? linear_evaluator_.ObjectiveDelta(var, delta) + ? linear_evaluator_.ObjectiveCoefficient(var) : 0; } @@ -345,15 +365,20 @@ class LsEvaluator { int64_t Violation(int c) const; bool IsViolated(int c) const; double WeightedViolation(absl::Span weights) const; - double WeightedViolationDelta(absl::Span weights, int var, - int64_t delta) const; - // Ignores the violations of the linear constraints. - double WeightedNonLinearViolationDelta(absl::Span weights, - int var, int64_t delta) const; + + // Computes the delta in weighted violation if solution[var] += delta. + // We need a temporary mutable solution to evaluate the violation of generic + // constraints. If linear_only is true, only the linear violation will be + // used. + double WeightedViolationDelta(bool linear_only, + absl::Span weights, int var, + int64_t delta, + absl::Span mutable_solution) const; const LinearIncrementalEvaluator& LinearEvaluator() { return linear_evaluator_; } + LinearIncrementalEvaluator* MutableLinearEvaluator() { return &linear_evaluator_; } @@ -364,27 +389,19 @@ class LsEvaluator { // // The order depends on the algorithm used and shouldn't be relied on. void RecomputeViolatedList(bool linear_only); - const std::vector& ViolatedConstraints() const { - return violated_constraints_; + absl::Span ViolatedConstraints() const { + return violated_constraints_.values(); } + // Returns the number of constraints in ViolatedConstraints containing `var`. - int NumViolatedConstraintsForVar(int var) const { - return num_violated_constraint_per_var_[var]; + int NumViolatedConstraintsForVarIgnoringObjective(int var) const { + return num_violated_constraint_per_var_ignoring_objective_[var]; } + // Indicates if the computed jump value is always the best choice. bool VariableOnlyInLinearConstraintWithConvexViolationChange(int var) const; - // Access the solution stored. - const std::vector& current_solution() const { - return current_solution_; - } - - std::vector* mutable_current_solution() { - return ¤t_solution_; - } - - const std::vector>& last_update_violation_changes() - const { + const std::vector& last_update_violation_changes() const { return last_update_violation_changes_; } @@ -406,7 +423,7 @@ class LsEvaluator { // TODO(user): Properly account all big time consumers. double DeterministicTime() const { - return linear_evaluator_.DeterministicTime(); + return linear_evaluator_.DeterministicTime() + dtime_; } private: @@ -427,19 +444,13 @@ class LsEvaluator { std::vector> constraint_to_vars_; std::vector jump_value_optimal_; - // We need the mutable to evaluate a move by temporarily modifying solution. - mutable std::vector current_solution_; + UnsafeDenseSet violated_constraints_; + std::vector num_violated_constraint_per_var_ignoring_objective_; - // List of violated constraints. - // Invariants: - // - pos_in_violated_constraints_[c] == -1 iff c not int violated_constraints_ - // - violated_constraints_[pos_in_violated_constraints_[c]] = c - std::vector pos_in_violated_constraints_; - std::vector violated_constraints_; - std::vector num_violated_constraint_per_var_; + // Constraint index with changed violations. + std::vector last_update_violation_changes_; - // Constraint index and violation delta for the last update. - std::vector> last_update_violation_changes_; + mutable double dtime_ = 0; }; // ================================ @@ -447,7 +458,7 @@ class LsEvaluator { // ================================ // The violation of a bool_xor constraint is 0 or 1. -class CompiledBoolXorConstraint : public CompiledConstraint { +class CompiledBoolXorConstraint : public CompiledConstraintWithProto { public: explicit CompiledBoolXorConstraint(const ConstraintProto& ct_proto); ~CompiledBoolXorConstraint() override = default; @@ -462,7 +473,7 @@ class CompiledBoolXorConstraint : public CompiledConstraint { // - the sum(max(0, expr_value - target_value) forall expr). This part will be // maintained by the linear part. // - target_value - max(expressions) if positive. -class CompiledLinMaxConstraint : public CompiledConstraint { +class CompiledLinMaxConstraint : public CompiledConstraintWithProto { public: explicit CompiledLinMaxConstraint(const ConstraintProto& ct_proto); ~CompiledLinMaxConstraint() override = default; @@ -472,7 +483,7 @@ class CompiledLinMaxConstraint : public CompiledConstraint { // The violation of an int_prod constraint is // abs(value(target) - prod(value(expr)). -class CompiledIntProdConstraint : public CompiledConstraint { +class CompiledIntProdConstraint : public CompiledConstraintWithProto { public: explicit CompiledIntProdConstraint(const ConstraintProto& ct_proto); ~CompiledIntProdConstraint() override = default; @@ -482,7 +493,7 @@ class CompiledIntProdConstraint : public CompiledConstraint { // The violation of an int_div constraint is // abs(value(target) - value(expr0) / value(expr1)). -class CompiledIntDivConstraint : public CompiledConstraint { +class CompiledIntDivConstraint : public CompiledConstraintWithProto { public: explicit CompiledIntDivConstraint(const ConstraintProto& ct_proto); ~CompiledIntDivConstraint() override = default; @@ -502,7 +513,7 @@ class CompiledIntDivConstraint : public CompiledConstraint { // if target and expr0 have different sign: // abs(target) + abs(expr0) // Note: the modulo (expr1) is always fixed. -class CompiledIntModConstraint : public CompiledConstraint { +class CompiledIntModConstraint : public CompiledConstraintWithProto { public: explicit CompiledIntModConstraint(const ConstraintProto& ct_proto); ~CompiledIntModConstraint() override = default; @@ -512,7 +523,7 @@ class CompiledIntModConstraint : public CompiledConstraint { // The violation of a all_diff is the number of unordered pairs of expressions // with the same value. -class CompiledAllDiffConstraint : public CompiledConstraint { +class CompiledAllDiffConstraint : public CompiledConstraintWithProto { public: explicit CompiledAllDiffConstraint(const ConstraintProto& ct_proto); ~CompiledAllDiffConstraint() override = default; @@ -523,36 +534,39 @@ class CompiledAllDiffConstraint : public CompiledConstraint { std::vector values_; }; -// The violation of a no_overlap is the sum of overloads over time. -class CompiledNoOverlapConstraint : public CompiledConstraint { +// Special constraint for no overlap between two intervals. +// We usually expand small no-overlap in n^2 such constraint, so we want to +// be compact and efficient here. +class NoOverlapBetweenTwoIntervals : public CompiledConstraint { public: - explicit CompiledNoOverlapConstraint(const ConstraintProto& ct_proto, - const CpModelProto& cp_model); - ~CompiledNoOverlapConstraint() override = default; + NoOverlapBetweenTwoIntervals(int interval_0, int interval_1, + const CpModelProto& cp_model); + ~NoOverlapBetweenTwoIntervals() override = default; - int64_t ComputeViolation(absl::Span solution) override; + int64_t ComputeViolation(absl::Span solution) final { + return ComputeViolationInternal(solution); + } + + // Note(user): this is the same implementation as the base one, but it + // avoid one virtual call ! + int64_t ViolationDelta( + int /*var*/, int64_t /*old_value*/, + absl::Span solution_with_new_value) final { + return ComputeViolationInternal(solution_with_new_value) - violation(); + } + + std::vector UsedVariables(const CpModelProto& model_proto) const final; private: - const CpModelProto& cp_model_; - std::vector> events_; + int64_t ComputeViolationInternal(absl::Span solution); + + int num_enforcements_; + std::unique_ptr enforcements_; + LinearExpressionProto end_minus_start_1_; + LinearExpressionProto end_minus_start_2_; }; -// The violation of a cumulative is the sum of overloads over time. -class CompiledCumulativeConstraint : public CompiledConstraint { - public: - explicit CompiledCumulativeConstraint(const ConstraintProto& ct_proto, - const CpModelProto& cp_model); - ~CompiledCumulativeConstraint() override = default; - - int64_t ComputeViolation(absl::Span solution) override; - - private: - const CpModelProto& cp_model_; - std::vector> events_; -}; - -// The violation of a no_overlap is the sum of overloads over time. -class CompiledNoOverlap2dConstraint : public CompiledConstraint { +class CompiledNoOverlap2dConstraint : public CompiledConstraintWithProto { public: explicit CompiledNoOverlap2dConstraint(const ConstraintProto& ct_proto, const CpModelProto& cp_model); @@ -561,11 +575,95 @@ class CompiledNoOverlap2dConstraint : public CompiledConstraint { int64_t ComputeViolation(absl::Span solution) override; private: - int64_t ComputeOverlapArea(absl::Span solution, int i, - int j) const; const CpModelProto& cp_model_; }; +// This can be used to encode reservoir or a cumulative constraints for LS. We +// have a set of event time, and we use for overal violation the sum of overload +// over time. +// +// This version support an incremental computation when just a few events +// changes, which is roughly O(n) instead of O(n log n) which makes it +// significantly faster than recomputing and sorting the profile on each +// ViolationDelta(). +class CompiledReservoirConstraint : public CompiledConstraint { + public: + CompiledReservoirConstraint(LinearExpressionProto capacity, + std::vector> is_active, + std::vector times, + std::vector demands) + : capacity_(std::move(capacity)), + is_active_(std::move(is_active)), + times_(std::move(times)), + demands_(std::move(demands)) { + const int num_events = times_.size(); + time_values_.resize(num_events, 0); + demand_values_.resize(num_events, 0); + InitializeDenseIndexToEvents(); + } + + // Note that since we have our own ViolationDelta() implementation this is + // only used for initialization and our PerformMove(). It is why we set + // violations_ here. + int64_t ComputeViolation(absl::Span solution) final { + violation_ = BuildProfileAndReturnViolation(solution); + return violation_; + } + + void PerformMove(int /*var*/, int64_t /*old_value*/, + absl::Span solution_with_new_value) final { + // TODO(user): we could probably be more incremental here, but it is a bit + // tricky to get right and not too important since the time is dominated by + // evaluating moves, not taking them. + ComputeViolation(solution_with_new_value); + } + + int64_t ViolationDelta( + int var, int64_t /*old_value*/, + absl::Span solution_with_new_value) final { + return IncrementalViolation(var, solution_with_new_value) - violation_; + } + + std::vector UsedVariables(const CpModelProto& model_proto) const final; + + private: + // This works in O(n log n). + int64_t BuildProfileAndReturnViolation(absl::Span solution); + + // This works in O(n) + O(d log d) where d is the number of modified events + // compare to the base solution. In most situation it should be O(1). + int64_t IncrementalViolation(int var, absl::Span solution); + + // This is used to speed up IncrementalViolation(). + void InitializeDenseIndexToEvents(); + void AppendVariablesForEvent(int i, std::vector* result) const; + + // The const data from the constructor. + // Note that is_active_ might be empty if all events are mandatory. + const LinearExpressionProto capacity_; + const std::vector> is_active_; + const std::vector times_; + const std::vector demands_; + + // Remap all UsedVariables() to a dense index in [0, num_used_vars). + absl::flat_hash_map var_to_dense_index_; + CompactVectorVector dense_index_to_events_; + + struct Event { + int64_t time; + int64_t demand; + bool operator<(const Event& o) const { return time < o.time; } + }; + std::vector profile_; + std::vector profile_delta_; + + // This is filled by BuildProfileAndReturnViolation() and correspond to the + // value in the current solutions. + int64_t capacity_value_; + std::vector time_values_; + std::vector demand_values_; +}; + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model.proto b/ortools/sat/cp_model.proto index 3845ad3854..b76dc95aa1 100644 --- a/ortools/sat/cp_model.proto +++ b/ortools/sat/cp_model.proto @@ -698,7 +698,7 @@ enum CpSolverStatus { // An optimal feasible solution has been found. // // More generally, this status represent a success. So we also return OPTIMAL - // if we find a solution for a pure feasiblity problem or if a gap limit has + // if we find a solution for a pure feasibility problem or if a gap limit has // been specified and we return a solution within this limit. In the case // where we need to return all the feasible solution, this status will only be // returned if we enumerated all of them; If we stopped before, we will return @@ -752,10 +752,14 @@ message CpSolverResponse { // is only filled with the info derived during a normal search and we do not // have any dedicated algorithm to improve it. // - // If the problem is a feasibility problem, then these bounds will be valid - // for any feasible solution. If the problem is an optimization problem, then - // these bounds will only be valid for any OPTIMAL solutions, it can exclude - // sub-optimal feasible ones. + // Warning: if you didn't set keep_all_feasible_solutions_in_presolve, then + // these domains might exclude valid feasible solution. Otherwise for a + // feasibility problem, all feasible solution should be there. + // + // Warning: For an optimization problem, these will correspond to valid bounds + // for the problem of finding an improving solution to the best one found so + // far. It might be better to solve a feasibility version if one just want to + // explore the feasible region. repeated IntegerVariableProto tightened_variables = 21; // A subset of the model "assumptions" field. This will only be filled if the diff --git a/ortools/sat/cp_model_checker.cc b/ortools/sat/cp_model_checker.cc index d7e7ca75b5..97f9a7aa66 100644 --- a/ortools/sat/cp_model_checker.cc +++ b/ortools/sat/cp_model_checker.cc @@ -1203,8 +1203,12 @@ class ConstraintChecker { bool LinearConstraintIsFeasible(const ConstraintProto& ct) { int64_t sum = 0; const int num_variables = ct.linear().coeffs_size(); + const int* const vars = ct.linear().vars().data(); + const int64_t* const coeffs = ct.linear().coeffs().data(); for (int i = 0; i < num_variables; ++i) { - sum += Value(ct.linear().vars(i)) * ct.linear().coeffs(i); + // We know we only have positive reference now. + DCHECK(RefIsPositive(vars[i])); + sum += variable_values_[vars[i]] * coeffs[i]; } const bool result = DomainInProtoContains(ct.linear(), sum); if (!result) { diff --git a/ortools/sat/cp_model_expand.cc b/ortools/sat/cp_model_expand.cc index 409c17da99..53d0b3f60e 100644 --- a/ortools/sat/cp_model_expand.cc +++ b/ortools/sat/cp_model_expand.cc @@ -32,7 +32,6 @@ #include "google/protobuf/message.h" #include "ortools/base/logging.h" #include "ortools/base/stl_util.h" -#include "ortools/base/types.h" #include "ortools/port/proto_utils.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_checker.h" @@ -41,7 +40,6 @@ #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/util.h" #include "ortools/util/logging.h" -#include "ortools/util/saturated_arithmetic.h" #include "ortools/util/sorted_interval_list.h" namespace operations_research { @@ -100,13 +98,131 @@ void PropagateAutomaton(const AutomatonConstraintProto& proto, namespace { -void ExpandReservoir(ConstraintProto* ct, PresolveContext* context) { - if (ct->reservoir().min_level() > ct->reservoir().max_level()) { - VLOG(1) << "Empty level domain in reservoir constraint."; - return (void)context->NotifyThatModelIsUnsat(); +// Different encoding that support general demands. This is usually a pretty bad +// encoding, at least until we improve the solver on such models. +void ExpandReservoirUsingCircuit(int64_t sum_of_positive_demand, + int64_t sum_of_negative_demand, + ConstraintProto* reservoir_ct, + PresolveContext* context) { + const ReservoirConstraintProto& reservoir = reservoir_ct->reservoir(); + const int num_events = reservoir.time_exprs_size(); + + // The encoding will create a circuit constraint and on integer variable per + // events representing the level a that event time. + CircuitConstraintProto* circuit = + context->working_model->add_constraints()->mutable_circuit(); + + const int64_t var_min = + std::max(reservoir.min_level(), sum_of_negative_demand); + const int64_t var_max = + std::min(reservoir.max_level(), sum_of_positive_demand); + std::vector level_vars(num_events); + for (int i = 0; i < num_events; ++i) { + level_vars[i] = context->NewIntVar(Domain(var_min, var_max)); } - const ReservoirConstraintProto& reservoir = ct->reservoir(); + // For the corner case where all events are absent, we need a potential + // self-arc on the start/end circuit node. + { + circuit->add_tails(num_events); + circuit->add_heads(num_events); + circuit->add_literals(context->NewBoolVar()); + } + + for (int i = 0; i < num_events; ++i) { + if (!reservoir.active_literals().empty()) { + // Add self arc to represent absence. + circuit->add_tails(i); + circuit->add_heads(i); + circuit->add_literals(NegatedRef(reservoir.active_literals(i))); + } + + // We need an extra circuit node for start/end of circuit. + // We use the available index 'num_events'. + { + // Circuit starts at i, level_vars[i] == demand_expr[i]. + const int start_var = context->NewBoolVar(); + circuit->add_tails(num_events); + circuit->add_heads(i); + circuit->add_literals(start_var); + + // Add enforced linear for demand. + { + ConstraintProto* new_ct = context->working_model->add_constraints(); + new_ct->add_enforcement_literal(start_var); + LinearConstraintProto* lin = new_ct->mutable_linear(); + lin->add_domain(0); + lin->add_domain(0); + lin->add_vars(level_vars[i]); + lin->add_coeffs(1); + AddLinearExpressionToLinearConstraint(reservoir.level_changes(i), -1, + lin); + context->CanonicalizeLinearConstraint(new_ct); + } + + // Circuit ends at i, no extra constraint there. + circuit->add_tails(i); + circuit->add_heads(num_events); + circuit->add_literals(context->NewBoolVar()); + } + + for (int j = 0; j < num_events; ++j) { + if (i == j) continue; + + // If arc_i_j is true then: + // - active_i is true (enforced by circuit). + // - active_j is true (enforced by circuit). + // - time_i <= time_j + // - level_j == level_i + demand_j + // + // TODO(user): Unfortunately we cannot share these literal between + // reservoir except if the set of time point is exactly the same! + // otherwise if we miss one, then A "after" B in one circuit do not + // implies that there is no C in between in another! + const int arc_i_j = context->NewBoolVar(); + circuit->add_tails(i); + circuit->add_heads(j); + circuit->add_literals(arc_i_j); + + // Add enforced linear for time. + { + ConstraintProto* new_ct = context->working_model->add_constraints(); + new_ct->add_enforcement_literal(arc_i_j); + LinearConstraintProto* lin = new_ct->mutable_linear(); + lin->add_domain(0); + lin->add_domain(std::numeric_limits::max()); + AddLinearExpressionToLinearConstraint(reservoir.time_exprs(j), 1, lin); + AddLinearExpressionToLinearConstraint(reservoir.time_exprs(i), -1, lin); + context->CanonicalizeLinearConstraint(new_ct); + } + + // Add enforced linear for demand. + { + ConstraintProto* new_ct = context->working_model->add_constraints(); + new_ct->add_enforcement_literal(arc_i_j); + LinearConstraintProto* lin = new_ct->mutable_linear(); + lin->add_domain(0); + lin->add_domain(0); + lin->add_vars(level_vars[j]); + lin->add_coeffs(1); + lin->add_vars(level_vars[i]); + lin->add_coeffs(-1); + AddLinearExpressionToLinearConstraint(reservoir.level_changes(j), -1, + lin); + context->CanonicalizeLinearConstraint(new_ct); + } + } + } + + reservoir_ct->Clear(); + context->UpdateRuleStats("reservoir: expanded using circuit."); +} + +void ExpandReservoirUsingPrecedences(int64_t sum_of_positive_demand, + int64_t sum_of_negative_demand, + ConstraintProto* reservoir_ct, + PresolveContext* context) { + const ReservoirConstraintProto& reservoir = reservoir_ct->reservoir(); const int num_events = reservoir.time_exprs_size(); const int true_literal = context->GetTrueLiteral(); const auto is_active_literal = [&reservoir, true_literal](int index) { @@ -114,101 +230,251 @@ void ExpandReservoir(ConstraintProto* ct, PresolveContext* context) { return reservoir.active_literals(index); }; + // Constrains the running level to be consistent at all time_exprs. + // For this we only add a constraint at the time a given demand take place. + for (int i = 0; i < num_events; ++i) { + const int active_i = is_active_literal(i); + if (context->LiteralIsFalse(active_i)) continue; + + const int64_t demand_i = context->FixedValue(reservoir.level_changes(i)); + if (demand_i == 0) continue; + + // No need for some constraints if the reservoir is just constrained in + // one direction. + if (demand_i > 0 && sum_of_positive_demand <= reservoir.max_level()) { + continue; + } + if (demand_i < 0 && sum_of_negative_demand >= reservoir.min_level()) { + continue; + } + + ConstraintProto* new_ct = context->working_model->add_constraints(); + LinearConstraintProto* new_linear = new_ct->mutable_linear(); + + // Add contributions from previous events. + int64_t offset = 0; + const LinearExpressionProto& time_i = reservoir.time_exprs(i); + for (int j = 0; j < num_events; ++j) { + if (i == j) continue; + const int active_j = is_active_literal(j); + if (context->LiteralIsFalse(active_j)) continue; + + // Get or create the literal equivalent to + // active_i && active_j && time[j] <= time[i]. + // + // TODO(user): we could get rid of active_i in the equivalence above. + // Experiments when we have enough benchmarks. + const LinearExpressionProto& time_j = reservoir.time_exprs(j); + const int j_lesseq_i = context->GetOrCreateReifiedPrecedenceLiteral( + time_j, time_i, active_j, active_i); + context->working_model->mutable_variables(j_lesseq_i) + ->set_name(absl::StrCat(j, " before ", i)); + + const int64_t demand = context->FixedValue(reservoir.level_changes(j)); + if (RefIsPositive(j_lesseq_i)) { + new_linear->add_vars(j_lesseq_i); + new_linear->add_coeffs(demand); + } else { + new_linear->add_vars(NegatedRef(j_lesseq_i)); + new_linear->add_coeffs(-demand); + offset -= demand; + } + } + + // Add contribution from event i. + // + // TODO(user): Alternatively we can mark the whole constraint as enforced + // only if active_i is true. Experiments with both version, right now we + // miss enough benchmarks to conclude. + if (RefIsPositive(active_i)) { + new_linear->add_vars(active_i); + new_linear->add_coeffs(demand_i); + } else { + new_linear->add_vars(NegatedRef(active_i)); + new_linear->add_coeffs(-demand_i); + offset -= demand_i; + } + + // Note that according to the sign of demand_i, we only need one side. + if (demand_i > 0) { + new_linear->add_domain(std::numeric_limits::min()); + new_linear->add_domain(reservoir.max_level()); + } else { + new_linear->add_domain(reservoir.min_level()); + new_linear->add_domain(std::numeric_limits::max()); + } + + context->CanonicalizeLinearConstraint(new_ct); + } + + reservoir_ct->Clear(); + context->UpdateRuleStats("reservoir: expanded using precedences"); +} + +void ExpandReservoir(ConstraintProto* reservoir_ct, PresolveContext* context) { + if (reservoir_ct->reservoir().min_level() > + reservoir_ct->reservoir().max_level()) { + VLOG(1) << "Empty level domain in reservoir constraint."; + return (void)context->NotifyThatModelIsUnsat(); + } + + const ReservoirConstraintProto& reservoir = reservoir_ct->reservoir(); + const int num_events = reservoir.time_exprs_size(); + int num_positives = 0; int num_negatives = 0; + bool all_demands_are_fixed = true; + int64_t sum_of_positive_demand = 0; + int64_t sum_of_negative_demand = 0; for (const LinearExpressionProto& demand_expr : reservoir.level_changes()) { - const int64_t demand = context->FixedValue(demand_expr); - if (demand > 0) { + if (!context->IsFixed(demand_expr)) { + all_demands_are_fixed = false; + } + const int64_t max_demand = context->MaxOf(demand_expr); + if (max_demand > 0) { num_positives++; - } else if (demand < 0) { + sum_of_positive_demand += max_demand; + } + const int64_t min_demand = context->MinOf(demand_expr); + if (min_demand < 0) { num_negatives++; + sum_of_negative_demand += min_demand; } } - absl::flat_hash_map, int> precedence_cache; + if (sum_of_negative_demand >= reservoir.min_level() && + sum_of_positive_demand <= reservoir.max_level()) { + context->UpdateRuleStats("reservoir: always true"); + reservoir_ct->Clear(); + return; + } - if (num_positives > 0 && num_negatives > 0) { - // Creates Boolean variables equivalent to (start[i] <= start[j]) i != j - for (int i = 0; i < num_events - 1; ++i) { - const int active_i = is_active_literal(i); - if (context->LiteralIsFalse(active_i)) continue; - const LinearExpressionProto& time_i = reservoir.time_exprs(i); - - for (int j = i + 1; j < num_events; ++j) { - const int active_j = is_active_literal(j); - if (context->LiteralIsFalse(active_j)) continue; - const LinearExpressionProto& time_j = reservoir.time_exprs(j); - - const int i_lesseq_j = context->GetOrCreateReifiedPrecedenceLiteral( - time_i, time_j, active_i, active_j); - context->working_model->mutable_variables(i_lesseq_j) - ->set_name(absl::StrCat(i, " before ", j)); - precedence_cache[{i, j}] = i_lesseq_j; - const int j_lesseq_i = context->GetOrCreateReifiedPrecedenceLiteral( - time_j, time_i, active_j, active_i); - context->working_model->mutable_variables(j_lesseq_i) - ->set_name(absl::StrCat(j, " before ", i)); - precedence_cache[{j, i}] = j_lesseq_i; - } - } - - // Constrains the running level to be consistent at all time_exprs. - // For this we only add a constraint at the time a given demand - // take place. We also have a constraint for time zero if needed - // (added below). - for (int i = 0; i < num_events; ++i) { - const int active_i = is_active_literal(i); - if (context->LiteralIsFalse(active_i)) continue; - - // Accumulates level_changes of all predecessors. - ConstraintProto* const level = context->working_model->add_constraints(); - level->add_enforcement_literal(active_i); - - // Add contributions from previous events. - int64_t offset = 0; - for (int j = 0; j < num_events; ++j) { - if (i == j) continue; - const int active_j = is_active_literal(j); - if (context->LiteralIsFalse(active_j)) continue; - - const auto prec_it = precedence_cache.find({j, i}); - CHECK(prec_it != precedence_cache.end()); - const int prec_lit = prec_it->second; - const int64_t demand = context->FixedValue(reservoir.level_changes(j)); - if (RefIsPositive(prec_lit)) { - level->mutable_linear()->add_vars(prec_lit); - level->mutable_linear()->add_coeffs(demand); - } else { - level->mutable_linear()->add_vars(prec_lit); - level->mutable_linear()->add_coeffs(-demand); - offset -= demand; - } - } - - // Accounts for own demand in the domain of the sum. - const int64_t demand_i = context->FixedValue(reservoir.level_changes(i)); - level->mutable_linear()->add_domain( - CapAdd(CapSub(reservoir.min_level(), demand_i), offset)); - level->mutable_linear()->add_domain( - CapAdd(CapSub(reservoir.max_level(), demand_i), offset)); - context->CanonicalizeLinearConstraint(level); - } - } else { - // If all level_changes have the same sign, we do not care about the order, - // just the sum. + // If all level_changes have the same sign, we do not care about the order, + // just the sum. We might need to create intermediate variable for quadratic + // terms though. + if (num_negatives == 0 || num_positives == 0) { + const int true_literal = context->GetTrueLiteral(); ConstraintProto* new_ct = context->working_model->add_constraints(); - auto* const sum = new_ct->mutable_linear(); + LinearConstraintProto* sum = new_ct->mutable_linear(); for (int i = 0; i < num_events; ++i) { - sum->add_vars(is_active_literal(i)); - sum->add_coeffs(context->FixedValue(reservoir.level_changes(i))); + const int active = reservoir.active_literals().empty() + ? true_literal + : reservoir.active_literals(i); + const LinearExpressionProto& demand = reservoir.level_changes(i); + if (context->IsFixed(demand)) { + const int64_t change = context->FixedValue(reservoir.level_changes(i)); + if (RefIsPositive(active)) { + sum->add_vars(active); + sum->add_coeffs(change); + } else { + // Add (1 - not(active)) * level_change. + sum->add_vars(true_literal); + sum->add_coeffs(change); + sum->add_vars(NegatedRef(active)); + sum->add_coeffs(-change); + } + } else if (context->LiteralIsTrue(active)) { + AddLinearExpressionToLinearConstraint(demand, 1, sum); + } else { + const int new_var = context->NewIntVar( + Domain(context->MinOf(demand), context->MaxOf(demand)) + .UnionWith(Domain(0))); + sum->add_vars(new_var); + sum->add_coeffs(1); + + // Active => new_var == demand. + { + ConstraintProto* demand_ct = + context->working_model->add_constraints(); + demand_ct->add_enforcement_literal(active); + LinearConstraintProto* lin = demand_ct->mutable_linear(); + lin->add_domain(0); + lin->add_domain(0); + lin->add_vars(new_var); + lin->add_coeffs(1); + AddLinearExpressionToLinearConstraint(demand, -1, lin); + context->CanonicalizeLinearConstraint(demand_ct); + } + + // not(active) => new_var == 0. + context->AddImplyInDomain(NegatedRef(active), new_var, Domain(0)); + } } sum->add_domain(reservoir.min_level()); sum->add_domain(reservoir.max_level()); context->CanonicalizeLinearConstraint(new_ct); + + context->UpdateRuleStats("reservoir: simple expansion with sum"); + reservoir_ct->Clear(); + return; } + // Call the correct expansion according to our parameter. + if (context->params().expand_reservoir_using_circuit()) { + ExpandReservoirUsingCircuit(sum_of_positive_demand, sum_of_negative_demand, + reservoir_ct, context); + } else { + // This one is the faster option usually. + if (all_demands_are_fixed) { + ExpandReservoirUsingPrecedences(sum_of_positive_demand, + sum_of_negative_demand, reservoir_ct, + context); + } else { + context->UpdateRuleStats( + "reservoir: skipped expansion due to variable demands"); + } + } +} + +// This is mainly used for testing the reservoir implementation. +void EncodeCumulativeAsReservoir(ConstraintProto* ct, + PresolveContext* context) { + if (!context->IsFixed(ct->cumulative().capacity())) { + context->UpdateRuleStats( + "cumulative -> reservoir: expansion is not supported with variable " + "capacity."); + return; + } + + // Note that we know that the min_level can never go below zero, so we can + // just ignore this part of the constraint here. + ConstraintProto reservoir_ct; + auto* reservoir = reservoir_ct.mutable_reservoir(); + reservoir->set_min_level(std::numeric_limits::min()); + reservoir->set_max_level(context->FixedValue(ct->cumulative().capacity())); + + const int true_literal = context->GetTrueLiteral(); + const int num_intervals = ct->cumulative().intervals().size(); + for (int i = 0; i < num_intervals; ++i) { + const auto& interval_ct = + context->working_model->constraints(ct->cumulative().intervals(i)); + const auto& interval = interval_ct.interval(); + *reservoir->add_time_exprs() = interval.start(); + *reservoir->add_time_exprs() = interval.end(); + + const LinearExpressionProto& demand = ct->cumulative().demands(i); + *reservoir->add_level_changes() = demand; + LinearExpressionProto& negated = *reservoir->add_level_changes(); + negated.set_offset(-demand.offset()); + for (int j = 0; j < demand.vars().size(); ++j) { + negated.add_vars(demand.vars(j)); + negated.add_coeffs(-demand.coeffs(j)); + } + + if (interval_ct.enforcement_literal().empty()) { + reservoir->add_active_literals(true_literal); + reservoir->add_active_literals(true_literal); + } else { + CHECK_EQ(interval_ct.enforcement_literal().size(), 1); + reservoir->add_active_literals(interval_ct.enforcement_literal(0)); + reservoir->add_active_literals(interval_ct.enforcement_literal(0)); + } + } + + // Now expand it and clear the cumulative. ct->Clear(); - context->UpdateRuleStats("reservoir: expanded"); + context->UpdateRuleStats("cumulative: expanded into reservoir"); + ExpandReservoir(&reservoir_ct, context); } void ExpandIntMod(ConstraintProto* ct, PresolveContext* context) { @@ -466,7 +732,7 @@ void ExpandLinMax(ConstraintProto* ct, PresolveContext* context) { const int num_exprs = ct->lin_max().exprs().size(); if (num_exprs < 2) return; - // We have a special treatment for Abs, Earlyness, Tardiness, and all + // We have a special treatment for Abs, Earliness, Tardiness, and all // affine_max where there is only one variable present in all the expressions. if (ExpressionsContainsOnlyOneVar(ct->lin_max().exprs())) return; @@ -476,7 +742,7 @@ void ExpandLinMax(ConstraintProto* ct, PresolveContext* context) { // - target >= ai for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { ConstraintProto* new_ct = context->working_model->add_constraints(); - LinearConstraintProto* lin = ct->mutable_linear(); + LinearConstraintProto* lin = new_ct->mutable_linear(); lin->add_domain(0); lin->add_domain(std::numeric_limits::max()); AddLinearExpressionToLinearConstraint(ct->lin_max().target(), 1, lin); @@ -2246,20 +2512,12 @@ void ExpandCpModel(PresolveContext* context) { break; case ConstraintProto::kReservoir: if (context->params().expand_reservoir_constraints()) { - for (const LinearExpressionProto& demand_expr : - ct->reservoir().level_changes()) { - if (!context->IsFixed(demand_expr)) { - skip = true; - break; - } - } - if (skip) { - context->UpdateRuleStats( - "reservoir: expansion is not supported with variable level " - "changes"); - } else { - ExpandReservoir(ct, context); - } + ExpandReservoir(ct, context); + } + break; + case ConstraintProto::kCumulative: + if (context->params().encode_cumulative_as_reservoir()) { + EncodeCumulativeAsReservoir(ct, context); } break; case ConstraintProto::kIntMod: diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index 5a78cf33f3..6f251e3a1e 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -26,6 +26,7 @@ #include #include "absl/algorithm/container.h" +#include "absl/base/log_severity.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" @@ -207,10 +208,10 @@ void NeighborhoodGeneratorHelper::RecomputeHelperData() { { Model local_model; CpModelProto mapping_proto; - simplied_model_proto_.Clear(); - *simplied_model_proto_.mutable_variables() = + simplified_model_proto_.Clear(); + *simplified_model_proto_.mutable_variables() = model_proto_with_only_variables_.variables(); - PresolveContext context(&local_model, &simplied_model_proto_, + PresolveContext context(&local_model, &simplified_model_proto_, &mapping_proto); ModelCopy copier(&context); @@ -222,7 +223,7 @@ void NeighborhoodGeneratorHelper::RecomputeHelperData() { // Compute the constraint <-> variable graph. // // TODO(user): Remove duplicate constraints? - const auto& constraints = simplied_model_proto_.constraints(); + const auto& constraints = simplified_model_proto_.constraints(); constraint_to_var_.clear(); constraint_to_var_.reserve(constraints.size()); for (int ct_index = 0; ct_index < constraints.size(); ++ct_index) { @@ -361,10 +362,10 @@ void NeighborhoodGeneratorHelper::RecomputeHelperData() { // nothing else is done for a while, we will never see the "latest" size // in the log until it is reduced again. shared_response_->LogMessageWithThrottling( - "Model", - absl::StrCat("var:", active_variables_.size(), "/", num_variables, - " constraints:", simplied_model_proto_.constraints().size(), - "/", model_proto_.constraints().size(), compo_message)); + "Model", absl::StrCat("var:", active_variables_.size(), "/", + num_variables, " constraints:", + simplified_model_proto_.constraints().size(), "/", + model_proto_.constraints().size(), compo_message)); } bool NeighborhoodGeneratorHelper::IsActive(int var) const { diff --git a/ortools/sat/cp_model_lns.h b/ortools/sat/cp_model_lns.h index ba9a63af45..00eb337f9f 100644 --- a/ortools/sat/cp_model_lns.h +++ b/ortools/sat/cp_model_lns.h @@ -159,7 +159,8 @@ class NeighborhoodGeneratorHelper : public SubSolver { bool DifficultyMeansFullNeighborhood(double difficulty) const { absl::ReaderMutexLock lock(&graph_mutex_); - const int target_size = std::ceil(difficulty * active_variables_.size()); + const int target_size = + static_cast(std::ceil(difficulty * active_variables_.size())); return target_size == active_variables_.size(); } @@ -309,7 +310,7 @@ class NeighborhoodGeneratorHelper : public SubSolver { // A copy of CpModelProto where we did some basic presolving to remove all // constraint that are always true. The Variable-Constraint graph is based on // this model. Note that only the constraints field is present here. - CpModelProto simplied_model_proto_ ABSL_GUARDED_BY(graph_mutex_); + CpModelProto simplified_model_proto_ ABSL_GUARDED_BY(graph_mutex_); // Variable-Constraint graph. // We replace an interval by its variables in the scheduling constraints. diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index afeb1caa85..9466d5aa80 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -1508,8 +1508,8 @@ void LoadIntProdConstraint(const ConstraintProto& ct, Model* m) { case 0: { auto* integer_trail = m->GetOrCreate(); auto* sat_solver = m->GetOrCreate(); - if (!integer_trail->Enqueue(prod.LowerOrEqual(1), {}) || - !integer_trail->Enqueue(prod.GreaterOrEqual(1), {})) { + if (!integer_trail->Enqueue(prod.LowerOrEqual(1)) || + !integer_trail->Enqueue(prod.GreaterOrEqual(1))) { sat_solver->NotifyThatModelIsUnsat(); } break; diff --git a/ortools/sat/cp_model_mapping.h b/ortools/sat/cp_model_mapping.h index c4dd9cb342..58a6849f74 100644 --- a/ortools/sat/cp_model_mapping.h +++ b/ortools/sat/cp_model_mapping.h @@ -220,8 +220,8 @@ class CpModelMapping { // Recover from a IntervalVariable/BooleanVariable its associated CpModelProto // index. The value of -1 is used to indicate that there is no correspondence // (i.e. this variable is only used internally). - absl::StrongVector reverse_boolean_map_; - absl::StrongVector reverse_integer_map_; + util_intops::StrongVector reverse_boolean_map_; + util_intops::StrongVector reverse_integer_map_; // Set of constraints to ignore because they were already dealt with by // ExtractEncoding(). diff --git a/ortools/sat/cp_model_postsolve.cc b/ortools/sat/cp_model_postsolve.cc index 379653ab78..24cf1f3949 100644 --- a/ortools/sat/cp_model_postsolve.cc +++ b/ortools/sat/cp_model_postsolve.cc @@ -23,6 +23,7 @@ #include "ortools/port/proto_utils.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_utils.h" +#include "ortools/util/logging.h" #include "ortools/util/sorted_interval_list.h" namespace operations_research { @@ -406,5 +407,127 @@ void PostsolveResponse(const int64_t num_variables_in_original_model, } } +void FillTightenedDomainInResponse(const CpModelProto& original_model, + const CpModelProto& mapping_proto, + const std::vector& postsolve_mapping, + const std::vector& search_domains, + CpSolverResponse* response, + SolverLogger* logger) { + // The [0, num_vars) part will contain the tightened domains. + const int num_original_vars = original_model.variables().size(); + const int num_expanded_vars = mapping_proto.variables().size(); + CHECK_LE(num_original_vars, num_expanded_vars); + std::vector domains(num_expanded_vars); + + // Start with the domain from the mapping proto. Note that by construction + // this should be tighter than the original variable domains. + for (int i = 0; i < num_expanded_vars; ++i) { + domains[i] = ReadDomainFromProto(mapping_proto.variables(i)); + if (i < num_original_vars) { + CHECK(domains[i].IsIncludedIn( + ReadDomainFromProto(original_model.variables(i)))); + } + } + + // The first test is for the corner case of presolve closing the problem, + // in which case there is no more info to process. + int num_common_vars = 0; + int num_affine_reductions = 0; + if (!search_domains.empty()) { + if (postsolve_mapping.empty()) { + // Currently no mapping should mean all variables are in common. This + // happen when presolve is disabled, but we might still have more + // variables due to expansion for instance. + // + // There is also the corner case of presolve closing the problem, + CHECK_GE(search_domains.size(), num_original_vars); + num_common_vars = num_original_vars; + for (int i = 0; i < num_original_vars; ++i) { + domains[i] = domains[i].IntersectionWith(search_domains[i]); + } + } else { + // This is the normal presolve case. + // Intersect the domain of the variables in common. + CHECK_EQ(postsolve_mapping.size(), search_domains.size()); + for (int search_i = 0; search_i < postsolve_mapping.size(); ++search_i) { + const int i_in_mapping_model = postsolve_mapping[search_i]; + if (i_in_mapping_model < num_original_vars) { + ++num_common_vars; + } + domains[i_in_mapping_model] = + domains[i_in_mapping_model].IntersectionWith( + search_domains[search_i]); + } + + // Look for affine relation, and do more intersection. + for (const ConstraintProto& ct : mapping_proto.constraints()) { + if (ct.constraint_case() != ConstraintProto::kLinear) continue; + const LinearConstraintProto& lin = ct.linear(); + if (lin.vars().size() != 2) continue; + if (lin.domain().size() != 2) continue; + if (lin.domain(0) != lin.domain(1)) continue; + int v1 = lin.vars(0); + int v2 = lin.vars(1); + int c1 = lin.coeffs(0); + int c2 = lin.coeffs(1); + if (v2 < num_original_vars && v1 >= num_original_vars) { + std::swap(v1, v2); + std::swap(c1, c2); + } + if (v1 < num_original_vars && v2 >= num_original_vars) { + // We can reduce the domain of v1 by using the affine relation + // and the domain of v2. + // We have c1 * v2 + c2 * v2 = offset; + const int64_t offset = lin.domain(0); + const Domain restriction = + Domain(offset) + .AdditionWith(domains[v2].ContinuousMultiplicationBy(-c2)) + .InverseMultiplicationBy(c1); + if (!domains[v1].IsIncludedIn(restriction)) { + ++num_affine_reductions; + domains[v1] = domains[v1].IntersectionWith(restriction); + } + } + } + } + } + + // Copy the names and replace domains. + *response->mutable_tightened_variables() = original_model.variables(); + int num_tigher_domains = 0; + int num_empty = 0; + int num_fixed = 0; + for (int i = 0; i < num_original_vars; ++i) { + FillDomainInProto(domains[i], response->mutable_tightened_variables(i)); + if (domains[i].IsEmpty()) { + ++num_empty; + continue; + } + + if (domains[i].IsFixed()) num_fixed++; + const Domain original = ReadDomainFromProto(original_model.variables(i)); + if (domains[i] != original) { + DCHECK(domains[i].IsIncludedIn(original)); + ++num_tigher_domains; + } + } + + // Some stats. + if (num_empty > 0) { + SOLVER_LOG(logger, num_empty, + " tightened domains are empty. This should not happen except if " + "we proven infeasibility or optimality."); + } + SOLVER_LOG(logger, "Filled tightened domains in the response."); + SOLVER_LOG(logger, "[TighteningInfo] num_tighter:", num_tigher_domains, + " num_fixed:", num_fixed, + " num_affine_reductions:", num_affine_reductions); + SOLVER_LOG(logger, + "[TighteningInfo] original_num_variables:", num_original_vars, + " during_presolve:", num_expanded_vars, + " after:", search_domains.size(), " in_common:", num_common_vars); + SOLVER_LOG(logger, ""); +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_postsolve.h b/ortools/sat/cp_model_postsolve.h index ade4774214..efa24521de 100644 --- a/ortools/sat/cp_model_postsolve.h +++ b/ortools/sat/cp_model_postsolve.h @@ -19,6 +19,8 @@ #include "ortools/base/types.h" #include "ortools/sat/cp_model.pb.h" +#include "ortools/util/logging.h" +#include "ortools/util/sorted_interval_list.h" namespace operations_research { namespace sat { @@ -46,6 +48,16 @@ void PostsolveResponse(int64_t num_variables_in_original_model, const std::vector& postsolve_mapping, std::vector* solution); +// Try to postsolve with a "best-effort" the reduced domain from the presolved +// model to the user given model. See the documentation of the CpSolverResponse +// tightened_variables field for more information on the caveats. +void FillTightenedDomainInResponse(const CpModelProto& original_model, + const CpModelProto& mapping_proto, + const std::vector& postsolve_mapping, + const std::vector& search_domains, + CpSolverResponse* response, + SolverLogger* logger); + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index dcec27b1f2..791fc1291d 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -105,12 +105,13 @@ bool CpModelPresolver::RemoveConstraint(ConstraintProto* ct) { return true; } -// Remove all empty constraints. Note that we need to remap the interval -// references. +// Remove all empty constraints and duplicated intervals. Note that we need to +// remap the interval references. // // Now that they have served their purpose, we also remove dummy constraints, // otherwise that causes issue because our model are invalid in tests. void CpModelPresolver::RemoveEmptyConstraints() { + interval_representative_.clear(); std::vector interval_mapping(context_->working_model->constraints_size(), -1); int new_num_constraints = 0; @@ -120,11 +121,22 @@ void CpModelPresolver::RemoveEmptyConstraints() { const auto type = context_->working_model->constraints(c).constraint_case(); if (type == ConstraintProto::CONSTRAINT_NOT_SET) continue; if (type == ConstraintProto::kDummyConstraint) continue; - if (type == ConstraintProto::kInterval) { - interval_mapping[c] = new_num_constraints; - } - context_->working_model->mutable_constraints(new_num_constraints++) + context_->working_model->mutable_constraints(new_num_constraints) ->Swap(context_->working_model->mutable_constraints(c)); + if (type == ConstraintProto::kInterval) { + // Warning: interval_representative_ holds a pointer to the working model + // to compute hashes, so we need to be careful about not changing a + // constraint after its index is added to the map. + const auto [it, inserted] = interval_representative_.insert( + {new_num_constraints, new_num_constraints}); + interval_mapping[c] = it->second; + if (it->second != new_num_constraints) { + context_->UpdateRuleStats( + "intervals: change duplicate index across constraints"); + continue; + } + } + new_num_constraints++; } google::protobuf::util::Truncate( context_->working_model->mutable_constraints(), new_num_constraints); @@ -5659,7 +5671,12 @@ bool CpModelPresolver::PresolveNoOverlap2D(int /*c*/, ConstraintProto* ct) { std::vector> components = GetOverlappingRectangleComponents( bounding_boxes, absl::MakeSpan(active_boxes)); - if (components.size() > 1) { + // The result of GetOverlappingRectangleComponents() omit singleton components + // thus to check whether a graph is fully connected we must check also the + // size of the unique component. + const bool is_fully_connected = + components.size() == 1 && components[0].size() == active_boxes.size(); + if (!is_fully_connected) { for (const absl::Span boxes : components) { if (boxes.size() <= 1) continue; @@ -5732,14 +5749,17 @@ LinearExpressionProto ConstantExpressionProto(int64_t value) { void CpModelPresolver::DetectDuplicateIntervals( int c, google::protobuf::RepeatedField* intervals) { + interval_representative_.clear(); bool changed = false; const int size = intervals->size(); for (int i = 0; i < size; ++i) { const int index = (*intervals)[i]; - const int new_index = context_->GetIntervalRepresentative(index); - if (index != new_index) { + const auto [it, inserted] = interval_representative_.insert({index, index}); + if (it->second != index) { changed = true; - intervals->Set(i, new_index); + intervals->Set(i, it->second); + context_->UpdateRuleStats( + "intervals: change duplicate index inside constraint"); } } if (changed) context_->UpdateConstraintVariableUsage(c); @@ -7284,8 +7304,13 @@ bool CpModelPresolver::PresolvePureSatPart() { // for blocked clause. It should be possible to allow for this by adding extra // variable to the mapping model at presolve and some linking constraints, but // this is messy. + // + // We also disable this if the user asked for tightened domain as this might + // fix variable to a potentially infeasible value, and just correct them later + // during postsolve of a particular solution. SatParameters params = context_->params(); - if (params.debug_postsolve_with_full_solver()) { + if (params.debug_postsolve_with_full_solver() || + params.fill_tightened_domains_in_response()) { params.set_presolve_blocked_clause(false); } @@ -7299,7 +7324,7 @@ bool CpModelPresolver::PresolvePureSatPart() { // detection as completely, so we still apply the other "probing" code // afterwards even if it will not fix more literals, but it will do one pass // of proper equivalence detection. - absl::StrongVector equiv_map; + util_intops::StrongVector equiv_map; if (!context_->params().debug_postsolve_with_full_solver() && num_ignored_variables == 0 && num_ignored_constraints == 0 && num_in_extra_constraints == 0) { @@ -8230,26 +8255,30 @@ bool CpModelPresolver::ProcessSetPPCSubset(int subset_c, int superset_c, } } if (best != 0) { + LinearConstraintProto new_ct = superset_ct->linear(); int new_size = 0; - for (int i = 0; i < superset_ct->linear().vars().size(); ++i) { - const int var = superset_ct->linear().vars(i); - int64_t coeff = superset_ct->linear().coeffs(i); + for (int i = 0; i < new_ct.vars().size(); ++i) { + const int var = new_ct.vars(i); + int64_t coeff = new_ct.coeffs(i); if (tmp_set->contains(var)) { if (coeff == best) continue; // delete term. coeff -= best; } - superset_ct->mutable_linear()->set_vars(new_size, var); - superset_ct->mutable_linear()->set_coeffs(new_size, coeff); + new_ct.set_vars(new_size, var); + new_ct.set_coeffs(new_size, coeff); ++new_size; } - superset_ct->mutable_linear()->mutable_vars()->Truncate(new_size); - superset_ct->mutable_linear()->mutable_coeffs()->Truncate(new_size); - FillDomainInProto(ReadDomainFromProto(superset_ct->linear()) - .AdditionWith(Domain(-best)), - superset_ct->mutable_linear()); - context_->UpdateConstraintVariableUsage(superset_c); - context_->UpdateRuleStats("setppc: reduced linear coefficients"); + new_ct.mutable_vars()->Truncate(new_size); + new_ct.mutable_coeffs()->Truncate(new_size); + FillDomainInProto(ReadDomainFromProto(new_ct).AdditionWith(Domain(-best)), + &new_ct); + if (!PossibleIntegerOverflow(*context_->working_model, new_ct.vars(), + new_ct.coeffs())) { + *superset_ct->mutable_linear() = std::move(new_ct); + context_->UpdateConstraintVariableUsage(superset_c); + context_->UpdateRuleStats("setppc: reduced linear coefficients"); + } } return true; @@ -8597,7 +8626,10 @@ bool CpModelPresolver::ProcessEncodingFromLinear( for (const int64_t v : context_->DomainOf(target_ref).Values()) { value_set.insert(v); } - for (const auto& [value, literals] : value_to_refs) { + for (auto& [value, literals] : value_to_refs) { + // For determinism. + absl::c_sort(literals); + // If the value is not in the domain, just set all literal to false. if (!value_set.contains(value)) { for (const int lit : literals) { @@ -9506,7 +9538,9 @@ bool CpModelPresolver::RemoveCommonPart( } // We isolated the Boolean in tmp_terms_, use the helper to get - // more precise activity bounds. + // more precise activity bounds. Note that while tmp_terms_ was built from + // a hash map and is in an unspecified order, the Compute*Activity() helpers + // will still return a deterministic result. if (!tmp_terms_.empty()) { min_activity += helper->ComputeMinActivity(tmp_terms_); max_activity += helper->ComputeMaxActivity(tmp_terms_); @@ -11555,13 +11589,20 @@ void ModelCopy::ImportVariablesAndMaybeIgnoreNames( } } +void ModelCopy::CreateVariablesFromDomains(const std::vector& domains) { + for (const Domain& domain : domains) { + FillDomainInProto(domain, context_->working_model->add_variables()); + } +} + // TODO(user): Merge with the phase 1 of the presolve code. // // TODO(user): It seems easy to forget to update this if any new constraint // contains an interval or if we add a field to an existing constraint. Find a // way to remind contributor to not forget this. -bool ModelCopy::ImportAndSimplifyConstraints(const CpModelProto& in_model, - bool first_copy) { +bool ModelCopy::ImportAndSimplifyConstraints( + const CpModelProto& in_model, bool first_copy, + std::function active_constraints) { context_->InitializeNewDomains(); const bool ignore_names = context_->params().ignore_names(); @@ -11571,6 +11612,7 @@ bool ModelCopy::ImportAndSimplifyConstraints(const CpModelProto& in_model, starting_constraint_index_ = context_->working_model->constraints_size(); for (int c = 0; c < in_model.constraints_size(); ++c) { + if (active_constraints != nullptr && !active_constraints(c)) continue; const ConstraintProto& ct = in_model.constraints(c); if (first_copy) { if (!PrepareEnforcementCopyWithDup(ct)) continue; @@ -12117,6 +12159,21 @@ bool ImportModelWithBasicPresolveIntoContext(const CpModelProto& in_model, return !context->ModelIsUnsat(); } +bool ImportModelAndDomainsWithBasicPresolveIntoContext( + const CpModelProto& in_model, const std::vector& domains, + std::function active_constraints, PresolveContext* context) { + CHECK_EQ(domains.size(), in_model.variables_size()); + ModelCopy copier(context); + copier.CreateVariablesFromDomains(domains); + if (copier.ImportAndSimplifyConstraints(in_model, /*first_copy=*/false, + active_constraints)) { + CopyEverythingExceptVariablesAndConstraintsFieldsIntoContext(in_model, + context); + return true; + } + return !context->ModelIsUnsat(); +} + void CopyEverythingExceptVariablesAndConstraintsFieldsIntoContext( const CpModelProto& in_model, PresolveContext* context) { if (!in_model.name().empty()) { @@ -12342,13 +12399,29 @@ CpModelPresolver::CpModelPresolver(PresolveContext* context, : postsolve_mapping_(postsolve_mapping), context_(context), logger_(context->logger()), - time_limit_(context->time_limit()) {} + time_limit_(context->time_limit()), + interval_representative_(context->working_model->constraints_size(), + IntervalConstraintHash{context->working_model}, + IntervalConstraintEq{context->working_model}) {} CpSolverStatus CpModelPresolver::InfeasibleStatus() { if (logger_->LoggingIsEnabled()) context_->LogInfo(); return CpSolverStatus::INFEASIBLE; } +void CpModelPresolver::InitializeMappingModelVariables() { + // Sync the domains. + for (int i = 0; i < context_->working_model->variables_size(); ++i) { + FillDomainInProto(context_->DomainOf(i), + context_->working_model->mutable_variables(i)); + DCHECK_GT(context_->working_model->variables(i).domain_size(), 0); + } + + // Set the variables of the mapping_model. + context_->mapping_model->mutable_variables()->CopyFrom( + context_->working_model->variables()); +} + // The presolve works as follow: // // First stage: @@ -12369,7 +12442,6 @@ CpSolverStatus CpModelPresolver::Presolve() { context_->keep_all_feasible_solutions = context_->params().keep_all_feasible_solutions_in_presolve() || context_->params().enumerate_all_solutions() || - context_->params().fill_tightened_domains_in_response() || !context_->working_model->assumptions().empty() || !context_->params().cp_model_presolve(); @@ -12428,6 +12500,13 @@ CpSolverStatus CpModelPresolver::Presolve() { // We need to append all the variable equivalence that are still used! EncodeAllAffineRelations(); + + // Make sure we also have an initialized mapping model as we use this for + // filling the tightened variables. Even without presolve, we do some + // trivial presolving during the initial copy of the model, and expansion + // might do more. + InitializeMappingModelVariables(); + if (logger_->LoggingIsEnabled()) context_->LogInfo(); return CpSolverStatus::UNKNOWN; } @@ -12657,16 +12736,8 @@ CpSolverStatus CpModelPresolver::Presolve() { google::protobuf::util::Truncate(strategy.mutable_exprs(), new_size); } - // Sync the domains. - for (int i = 0; i < context_->working_model->variables_size(); ++i) { - FillDomainInProto(context_->DomainOf(i), - context_->working_model->mutable_variables(i)); - DCHECK_GT(context_->working_model->variables(i).domain_size(), 0); - } - - // Set the variables of the mapping_model. - context_->mapping_model->mutable_variables()->CopyFrom( - context_->working_model->variables()); + // Sync the domains and initialize the mapping model variables. + InitializeMappingModelVariables(); // Remove all the unused variables from the presolved model. postsolve_mapping_->clear(); @@ -12891,18 +12962,6 @@ void ApplyVariableMapping(const std::vector& mapping, namespace { -ConstraintProto CopyConstraintForDuplicateDetection(const ConstraintProto& ct, - bool ignore_enforcement) { - ConstraintProto copy = ct; - copy.clear_name(); - if (ignore_enforcement) { - copy.mutable_enforcement_literal()->Clear(); - } else if (ct.constraint_case() == ConstraintProto::kLinear) { - copy.mutable_linear()->clear_domain(); - } - return copy; -} - // We ignore all the fields but the linear expression. ConstraintProto CopyObjectiveForDuplicateDetection( const CpObjectiveProto& objective) { @@ -12912,22 +12971,154 @@ ConstraintProto CopyObjectiveForDuplicateDetection( return copy; } +struct ConstraintHashForDuplicateDetection { + const CpModelProto* working_model; + bool ignore_enforcement; + ConstraintProto objective_constraint; + + ConstraintHashForDuplicateDetection(const CpModelProto* working_model, + bool ignore_enforcement) + : working_model(working_model), + ignore_enforcement(ignore_enforcement), + objective_constraint( + CopyObjectiveForDuplicateDetection(working_model->objective())) {} + + // We hash our mostly frequently used constraint directly without extra memory + // allocation. We revert to a generic code using proto serialization for the + // others. + std::size_t operator()(int ct_idx) const { + const ConstraintProto& ct = ct_idx == kObjectiveConstraint + ? objective_constraint + : working_model->constraints(ct_idx); + const std::pair> + type_and_enforcement = {ct.constraint_case(), + ignore_enforcement + ? absl::Span() + : absl::MakeSpan(ct.enforcement_literal())}; + switch (ct.constraint_case()) { + case ConstraintProto::kLinear: + if (ignore_enforcement) { + return absl::HashOf(type_and_enforcement, + absl::MakeSpan(ct.linear().vars()), + absl::MakeSpan(ct.linear().coeffs()), + absl::MakeSpan(ct.linear().domain())); + } else { + // We ignore domain for linear constraint, because if the rest of the + // constraint is the same we can just intersect them. + return absl::HashOf(type_and_enforcement, + absl::MakeSpan(ct.linear().vars()), + absl::MakeSpan(ct.linear().coeffs())); + } + case ConstraintProto::kBoolAnd: + return absl::HashOf(type_and_enforcement, + absl::MakeSpan(ct.bool_and().literals())); + case ConstraintProto::kBoolOr: + return absl::HashOf(type_and_enforcement, + absl::MakeSpan(ct.bool_or().literals())); + case ConstraintProto::kAtMostOne: + return absl::HashOf(type_and_enforcement, + absl::MakeSpan(ct.at_most_one().literals())); + case ConstraintProto::kExactlyOne: + return absl::HashOf(type_and_enforcement, + absl::MakeSpan(ct.exactly_one().literals())); + default: + ConstraintProto copy = ct; + copy.clear_name(); + if (ignore_enforcement) { + copy.mutable_enforcement_literal()->Clear(); + } + return absl::HashOf(copy.SerializeAsString()); + } + } +}; + +struct ConstraintEqForDuplicateDetection { + const CpModelProto* working_model; + bool ignore_enforcement; + ConstraintProto objective_constraint; + + ConstraintEqForDuplicateDetection(const CpModelProto* working_model, + bool ignore_enforcement) + : working_model(working_model), + ignore_enforcement(ignore_enforcement), + objective_constraint( + CopyObjectiveForDuplicateDetection(working_model->objective())) {} + + bool operator()(int a, int b) const { + if (a == b) { + return true; + } + const ConstraintProto& ct_a = a == kObjectiveConstraint + ? objective_constraint + : working_model->constraints(a); + const ConstraintProto& ct_b = b == kObjectiveConstraint + ? objective_constraint + : working_model->constraints(b); + + if (ct_a.constraint_case() != ct_b.constraint_case()) return false; + if (!ignore_enforcement) { + if (absl::MakeSpan(ct_a.enforcement_literal()) != + absl::MakeSpan(ct_b.enforcement_literal())) { + return false; + } + } + switch (ct_a.constraint_case()) { + case ConstraintProto::kLinear: + // As above, we ignore domain for linear constraint, because if the rest + // of the constraint is the same we can just intersect them. + if (ignore_enforcement && absl::MakeSpan(ct_a.linear().domain()) != + absl::MakeSpan(ct_b.linear().domain())) { + return false; + } + return absl::MakeSpan(ct_a.linear().vars()) == + absl::MakeSpan(ct_b.linear().vars()) && + absl::MakeSpan(ct_a.linear().coeffs()) == + absl::MakeSpan(ct_b.linear().coeffs()); + case ConstraintProto::kBoolAnd: + return absl::MakeSpan(ct_a.bool_and().literals()) == + absl::MakeSpan(ct_b.bool_and().literals()); + case ConstraintProto::kBoolOr: + return absl::MakeSpan(ct_a.bool_or().literals()) == + absl::MakeSpan(ct_b.bool_or().literals()); + case ConstraintProto::kAtMostOne: + return absl::MakeSpan(ct_a.at_most_one().literals()) == + absl::MakeSpan(ct_b.at_most_one().literals()); + case ConstraintProto::kExactlyOne: + return absl::MakeSpan(ct_a.exactly_one().literals()) == + absl::MakeSpan(ct_b.exactly_one().literals()); + default: + // Slow (hopefully comparably rare) path. + ConstraintProto copy_a = ct_a; + ConstraintProto copy_b = ct_b; + copy_a.clear_name(); + copy_b.clear_name(); + if (ignore_enforcement) { + copy_a.mutable_enforcement_literal()->Clear(); + copy_b.mutable_enforcement_literal()->Clear(); + } + return copy_a.SerializeAsString() == copy_b.SerializeAsString(); + } + } +}; + } // namespace std::vector> FindDuplicateConstraints( const CpModelProto& model_proto, bool ignore_enforcement) { std::vector> result; - // We use a map hash: serialized_constraint_proto hash -> constraint index. - ConstraintProto copy; - std::string s; - absl::flat_hash_map equiv_constraints; + // We use a map hash that uses the underlying constraint to compute the hash + // and the equality for the indices. + absl::flat_hash_map + equiv_constraints( + model_proto.constraints_size(), + ConstraintHashForDuplicateDetection{&model_proto, ignore_enforcement}, + ConstraintEqForDuplicateDetection{&model_proto, ignore_enforcement}); // Create a special representative for the linear objective. if (model_proto.has_objective() && !ignore_enforcement) { - copy = CopyObjectiveForDuplicateDetection(model_proto.objective()); - s = copy.SerializeAsString(); - equiv_constraints[absl::Hash()(s)] = kObjectiveConstraint; + equiv_constraints[kObjectiveConstraint] = kObjectiveConstraint; } const int num_constraints = model_proto.constraints().size(); @@ -12942,31 +13133,49 @@ std::vector> FindDuplicateConstraints( // Nothing we will presolve in this case. if (ignore_enforcement && type == ConstraintProto::kBoolAnd) continue; - // We ignore names when comparing constraints. - // - // TODO(user): This is not particularly efficient. - copy = CopyConstraintForDuplicateDetection(model_proto.constraints(c), - ignore_enforcement); - s = copy.SerializeAsString(); - - const uint64_t hash = absl::Hash()(s); - const auto [it, inserted] = equiv_constraints.insert({hash, c}); - if (!inserted) { + const auto [it, inserted] = equiv_constraints.insert({c, c}); + if (it->second != c) { // Already present! - const int other_c_with_same_hash = it->second; - copy = other_c_with_same_hash == kObjectiveConstraint - ? CopyObjectiveForDuplicateDetection(model_proto.objective()) - : CopyConstraintForDuplicateDetection( - model_proto.constraints(other_c_with_same_hash), - ignore_enforcement); - if (s == copy.SerializeAsString()) { - result.push_back({c, other_c_with_same_hash}); - } + result.push_back({c, it->second}); } } return result; } +namespace { +bool SimpleLinearExprEq(const LinearExpressionProto& a, + const LinearExpressionProto& b) { + return absl::MakeSpan(a.vars()) == absl::MakeSpan(b.vars()) && + absl::MakeSpan(a.coeffs()) == absl::MakeSpan(b.coeffs()) && + a.offset() == b.offset(); +} + +std::size_t LinearExpressionHash(const LinearExpressionProto& expr) { + return absl::HashOf(absl::MakeSpan(expr.vars()), + absl::MakeSpan(expr.coeffs()), expr.offset()); +} + +} // namespace + +bool CpModelPresolver::IntervalConstraintEq::operator()(int a, int b) const { + const ConstraintProto& ct_a = working_model->constraints(a); + const ConstraintProto& ct_b = working_model->constraints(b); + return absl::MakeSpan(ct_a.enforcement_literal()) == + absl::MakeSpan(ct_b.enforcement_literal()) && + SimpleLinearExprEq(ct_a.interval().start(), ct_b.interval().start()) && + SimpleLinearExprEq(ct_a.interval().size(), ct_b.interval().size()) && + SimpleLinearExprEq(ct_a.interval().end(), ct_b.interval().end()); +} + +std::size_t CpModelPresolver::IntervalConstraintHash::operator()( + int ct_idx) const { + const ConstraintProto& ct = working_model->constraints(ct_idx); + return absl::HashOf(absl::MakeSpan(ct.enforcement_literal()), + LinearExpressionHash(ct.interval().start()), + LinearExpressionHash(ct.interval().size()), + LinearExpressionHash(ct.interval().end())); +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_presolve.h b/ortools/sat/cp_model_presolve.h index 39c3377a6b..017670a37c 100644 --- a/ortools/sat/cp_model_presolve.h +++ b/ortools/sat/cp_model_presolve.h @@ -15,8 +15,10 @@ #define OR_TOOLS_SAT_CP_MODEL_PRESOLVE_H_ #include +#include #include #include +#include #include #include @@ -24,13 +26,10 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "ortools/sat/cp_model.pb.h" -#include "ortools/sat/cp_model_utils.h" #include "ortools/sat/presolve_context.h" #include "ortools/sat/presolve_util.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/util.h" -#include "ortools/util/affine_relation.h" -#include "ortools/util/bitset.h" #include "ortools/util/logging.h" #include "ortools/util/sorted_interval_list.h" #include "ortools/util/time_limit.h" @@ -91,6 +90,11 @@ class CpModelPresolver { // A simple helper that logs the rules applied so far and return INFEASIBLE. CpSolverStatus InfeasibleStatus(); + // At the end of presolve, the mapping model is initialized to contains all + // the variable from the original model + the one created during presolve + // expand. It also contains the tightened domains. + void InitializeMappingModelVariables(); + // Runs the inner loop of the presolver. bool ProcessChangedVariables(std::vector* in_queue, std::deque* queue); @@ -341,10 +345,26 @@ class CpModelPresolver { MaxBoundedSubsetSum lb_infeasible_; MaxBoundedSubsetSum ub_feasible_; MaxBoundedSubsetSum ub_infeasible_; + + struct IntervalConstraintEq { + const CpModelProto* working_model; + bool operator()(int a, int b) const; + }; + + struct IntervalConstraintHash { + const CpModelProto* working_model; + std::size_t operator()(int ct_idx) const; + }; + + // Used by DetectDuplicateIntervals() and RemoveEmptyConstraints(). Note that + // changing the interval constraints of the model will change the hash and + // invalidate this hash map. + absl::flat_hash_map + interval_representative_; }; // This helper class perform copy with simplification from a model and a -// partial assignment to another model. The purpose is to miminize the size of +// partial assignment to another model. The purpose is to minimize the size of // the copied model, as well as to reduce the pressure on the memory sub-system. // // It is currently used by the LNS part, but could be used with any other scheme @@ -367,14 +387,19 @@ class ModelCopy { // Note(user): If first_copy is true, we will reorder the scheduling // constraint so that they only use reference to previously defined intervals. // This allow to be more efficient later in a few preprocessing steps. - bool ImportAndSimplifyConstraints(const CpModelProto& in_model, - bool first_copy = false); + bool ImportAndSimplifyConstraints( + const CpModelProto& in_model, bool first_copy = false, + std::function active_constraints = nullptr); // Copy variables from the in_model to the working model. // It reads the 'ignore_names' parameters from the context, and keeps or // deletes names accordingly. void ImportVariablesAndMaybeIgnoreNames(const CpModelProto& in_model); + // Setup new variables from a vector of domains. + // Inactive variables will be fixed to their lower bound. + void CreateVariablesFromDomains(const std::vector& domains); + private: // Overwrites the out_model to be unsat. Returns false. // The arguments are used to log which constraint caused unsat. @@ -436,6 +461,12 @@ class ModelCopy { bool ImportModelWithBasicPresolveIntoContext(const CpModelProto& in_model, PresolveContext* context); +// Same as ImportModelWithBasicPresolveIntoContext() except that variable +// domains are read from domains. +bool ImportModelAndDomainsWithBasicPresolveIntoContext( + const CpModelProto& in_model, const std::vector& domains, + std::function active_constraints, PresolveContext* context); + // Copies the non constraint, non variables part of the model. void CopyEverythingExceptVariablesAndConstraintsFieldsIntoContext( const CpModelProto& in_model, PresolveContext* context); diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index 0cd842070b..10c77ba015 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -14,6 +14,7 @@ #include "ortools/sat/cp_model_search.h" #include +#include #include #include #include @@ -28,6 +29,7 @@ #include "absl/random/distributions.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" +#include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_mapping.h" @@ -164,6 +166,7 @@ void AddDualSchedulingHeuristics(SatParameters& new_params) { new_params.set_use_timetabling_in_no_overlap_2d(true); new_params.set_use_energetic_reasoning_in_no_overlap_2d(true); new_params.set_use_area_energetic_reasoning_in_no_overlap_2d(true); + new_params.set_use_conservative_scale_overload_checker(true); } // We want a random tie breaking among variables with equivalent values. @@ -422,7 +425,7 @@ std::function ConstructFixedSearchStrategy( std::function InstrumentSearchStrategy( const CpModelProto& cp_model_proto, const std::vector& variable_mapping, - const std::function& instrumented_strategy, + std::function instrumented_strategy, Model* model) { std::vector ref_to_display; for (int i = 0; i < cp_model_proto.variables_size(); ++i) { @@ -436,7 +439,7 @@ std::function InstrumentSearchStrategy( }); std::vector> old_domains(variable_mapping.size()); - return [instrumented_strategy, model, variable_mapping, cp_model_proto, + return [instrumented_strategy, model, &variable_mapping, &cp_model_proto, old_domains, ref_to_display]() mutable { const BooleanOrIntegerLiteral decision = instrumented_strategy(); if (!decision.HasValue()) return decision; @@ -476,22 +479,15 @@ std::function InstrumentSearchStrategy( }; } -// This generates a valid random seed (base_seed + delta) without overflow. -// We assume |delta| is small. -int ValidSumSeed(int base_seed, int delta) { - CHECK_GE(delta, 0); - int64_t result = int64_t{base_seed} + int64_t{delta}; - const int64_t int32max = int64_t{std::numeric_limits::max()}; - while (result > int32max) { - result -= int32max; - } - return static_cast(result); -} - absl::flat_hash_map GetNamedParameters( - const SatParameters& base_params) { + SatParameters base_params) { absl::flat_hash_map strategies; + // By default we disable the logging when we generate a set of parameter. It + // is possible to force it by setting it in the corresponding named parameter + // via the subsolver_params field. + base_params.set_log_search_progress(false); + // The "default" name can be used for the base_params unchanged. strategies["default"] = base_params; @@ -593,13 +589,34 @@ absl::flat_hash_map GetNamedParameters( AddDualSchedulingHeuristics(new_params); } - strategies["objective_shaving_search"] = new_params; + strategies["objective_shaving"] = new_params; new_params.set_linearization_level(0); - strategies["objective_shaving_search_no_lp"] = new_params; + strategies["objective_shaving_no_lp"] = new_params; new_params.set_linearization_level(2); - strategies["objective_shaving_search_max_lp"] = new_params; + strategies["objective_shaving_max_lp"] = new_params; + } + + { + SatParameters new_params = base_params; + new_params.set_use_variables_shaving_search(true); + new_params.set_cp_model_presolve(true); + new_params.set_cp_model_probing_level(0); + new_params.set_symmetry_level(0); + new_params.set_share_objective_bounds(false); + new_params.set_share_level_zero_bounds(false); + + strategies["variables_shaving"] = new_params; + + new_params.set_linearization_level(0); + strategies["variables_shaving_no_lp"] = new_params; + + if (base_params.use_dual_scheduling_heuristics()) { + AddDualSchedulingHeuristics(new_params); + } + new_params.set_linearization_level(2); + strategies["variables_shaving_max_lp"] = new_params; } { @@ -678,7 +695,15 @@ absl::flat_hash_map GetNamedParameters( // Base parameters for shared tree worker. { SatParameters new_params = base_params; + new_params.set_use_shared_tree_search(true); new_params.set_search_branching(SatParameters::AUTOMATIC_SEARCH); + + // These settings don't make sense with shared tree search, turn them off as + // they can break things. + new_params.set_optimize_with_core(false); + new_params.set_optimize_with_lb_tree_search(false); + new_params.set_optimize_with_max_hs(false); + strategies["shared_tree"] = new_params; } @@ -687,10 +712,15 @@ absl::flat_hash_map GetNamedParameters( SatParameters new_params = base_params; new_params.set_stop_after_first_solution(false); new_params.set_cp_model_presolve(true); + + // We disable costly presolve/inprocessing. + new_params.set_use_sat_inprocessing(false); new_params.set_cp_model_probing_level(0); new_params.set_symmetry_level(0); new_params.set_find_big_linear_overlap(false); + new_params.set_log_search_progress(false); + new_params.set_debug_crash_on_bad_hint(false); // Can happen in lns. new_params.set_solution_pool_size(1); // Keep the best solution found. strategies["lns"] = new_params; } @@ -710,6 +740,11 @@ absl::flat_hash_map GetNamedParameters( } } + // Fix names (we don't set them above). + for (auto& [name, params] : strategies) { + params.set_name(name); + } + return strategies; } @@ -720,8 +755,9 @@ absl::flat_hash_map GetNamedParameters( // - Disable linearization_level options for non linear problems // - Fast restart in randomized search // - Different propatation levels for scheduling constraints -std::vector GetDiverseSetOfParameters( - const SatParameters& base_params, const CpModelProto& cp_model) { +std::vector GetFullWorkerParameters( + const SatParameters& base_params, const CpModelProto& cp_model, + int num_already_present, SubsolverNameFilter* filter) { // Defines a set of named strategies so it is easier to read in one place // the one that are used. See below. const auto strategies = GetNamedParameters(base_params); @@ -761,9 +797,10 @@ std::vector GetDiverseSetOfParameters( names.push_back("lb_tree_search"); names.push_back("probing"); names.push_back("objective_lb_search"); - names.push_back("objective_shaving_search_no_lp"); - names.push_back("objective_shaving_search_max_lp"); + names.push_back("objective_shaving_no_lp"); + names.push_back("objective_shaving_max_lp"); names.push_back("probing_max_lp"); + names.push_back("probing_no_lp"); names.push_back("objective_lb_search_no_lp"); names.push_back("objective_lb_search_max_lp"); @@ -788,14 +825,11 @@ std::vector GetDiverseSetOfParameters( } // Remove the names that should be ignored. - absl::flat_hash_set to_ignore; - for (const std::string& name : base_params.ignore_subsolvers()) { - to_ignore.insert(name); - } int new_size = 0; for (const std::string& name : names) { - if (to_ignore.contains(name)) continue; - names[new_size++] = name; + if (filter->Keep(name)) { + names[new_size++] = name; + } } names.resize(new_size); @@ -817,7 +851,9 @@ std::vector GetDiverseSetOfParameters( // TODO(user): Enable shaving search in interleave mode. // Currently it do not respect ^C, and has no per chunk time limit. - if (params.use_objective_shaving_search() && params.interleave_search()) { + if ((params.use_objective_shaving_search() || + params.use_variables_shaving_search()) && + params.interleave_search()) { continue; } @@ -855,24 +891,26 @@ std::vector GetDiverseSetOfParameters( } // Add this strategy. - // - // TODO(user): Find a better randomization for the seed so that changing - // random_seed() has more impact? params.set_name(name); - params.set_random_seed(ValidSumSeed(base_params.random_seed(), - static_cast(result.size()) + 1)); + params.set_random_seed(CombineSeed( + base_params.random_seed(), static_cast(result.size()) + 1)); result.push_back(params); } - // In interleaved mode, we run all of them + // In interleaved mode, we run all of them. + // // TODO(user): Actually make sure the gap num_workers <-> num_heuristics is // contained. if (base_params.interleave_search()) return result; - const int num_non_shared_workers = std::max( - 0, base_params.num_workers() - base_params.shared_tree_num_workers()); + // Apply the logic for how many we keep. + int num_to_keep = base_params.num_full_subsolvers(); + if (num_to_keep == 0) { + // Derive some automatic number to leave room for LS/LNS and other + // strategies not taken into account here. + const int num_available = + std::max(0, base_params.num_workers() - num_already_present); - if (cp_model.has_objective() && !cp_model.objective().vars().empty()) { const auto heuristic_num_workers = [](int num_workers) { DCHECK_GE(num_workers, 0); if (num_workers == 1) return 1; @@ -881,100 +919,200 @@ std::vector GetDiverseSetOfParameters( if (num_workers <= 16) return num_workers - (num_workers / 4 + 1); return num_workers - (num_workers / 2 - 3); }; - const int target = std::min( - heuristic_num_workers(num_non_shared_workers), result.size()); - // If there is an objective, the extra workers will use LNS. - // Make sure we have at least min_num_lns_workers() of them. - if (result.size() > target) result.resize(target); - } else { // No objective. - // If strategies that do not require a full worker are present, leave a - // few workers for them. - const bool need_extra_workers = - (base_params.use_rins_lns() || base_params.use_feasibility_pump()); - // Currently, we have 8 SAT search heuristics. So - const int num_extra_workers = - num_non_shared_workers <= 4 ? 0 : 1 + need_extra_workers; - const int target = std::min(num_non_shared_workers - num_extra_workers, - result.size()); - if (result.size() > target) result.resize(target); + num_to_keep = heuristic_num_workers(num_available); + } + + if (result.size() > num_to_keep) { + result.resize(std::max(0, num_to_keep)); } return result; } -std::vector GetFirstSolutionParams( - const SatParameters& base_params, const CpModelProto& /*cp_model*/, - int num_params_to_generate) { +std::vector GetFirstSolutionBaseParams( + const SatParameters& base_params) { std::vector result; - if (num_params_to_generate <= 0) return result; - int num_random = 0; - int num_random_qr = 0; - while (result.size() < num_params_to_generate) { + + const auto get_base = [&result, &base_params](bool fj) { SatParameters new_params = base_params; + new_params.set_log_search_progress(false); + new_params.set_use_feasibility_jump(fj); + const int base_seed = base_params.random_seed(); - if (num_random <= num_random_qr) { // Random search. - new_params.set_search_branching(SatParameters::RANDOMIZED_SEARCH); - new_params.set_search_random_variable_pool_size(5); - new_params.set_random_seed(ValidSumSeed(base_seed, 2 * num_random + 1)); - if (num_random % 2 == 1) { - new_params.set_name("random_no_lp"); - new_params.set_linearization_level(0); - } else { - new_params.set_name("random"); - } - num_random++; - } else { // Random quick restart. - new_params.set_search_branching( - SatParameters::PORTFOLIO_WITH_QUICK_RESTART_SEARCH); - new_params.set_search_random_variable_pool_size(5); - new_params.set_random_seed(ValidSumSeed(base_seed, 2 * num_random_qr)); - if (num_random_qr % 2 == 1) { - new_params.set_name("random_quick_restart_no_lp"); - new_params.set_linearization_level(0); - } else { - new_params.set_name("random_quick_restart"); - } - num_random_qr++; + new_params.set_random_seed(CombineSeed(base_seed, result.size())); + return new_params; + }; + + // Add one feasibility jump. + if (base_params.use_feasibility_jump()) { + SatParameters new_params = get_base(true); + new_params.set_name("fj"); + new_params.set_feasibility_jump_linearization_level(0); + result.push_back(new_params); + } + + // Random search. + for (int i = 0; i < 2; ++i) { + SatParameters new_params = get_base(false); + new_params.set_search_random_variable_pool_size(5); + new_params.set_search_branching(SatParameters::RANDOMIZED_SEARCH); + if (i % 2 == 0) { + new_params.set_name("fs_random_no_lp"); + new_params.set_linearization_level(0); + } else { + new_params.set_name("fs_random"); } result.push_back(new_params); } - return result; -} -std::vector GetWorkSharingParams( - const SatParameters& base_params, const CpModelProto& cp_model, - int num_params_to_generate) { - std::vector result; - // TODO(user): We could support assumptions, it's just not implemented. - if (!cp_model.assumptions().empty()) return result; - if (num_params_to_generate <= 0) return result; + // Add a second feasibility jump. + if (base_params.use_feasibility_jump()) { + SatParameters new_params = get_base(true); + new_params.set_name("fj"); + new_params.set_feasibility_jump_linearization_level(0); + result.push_back(new_params); + } - const auto strategies = GetNamedParameters(base_params); - const SatParameters& shared_tree_base_params = strategies.at("shared_tree"); - int num_workers = 0; - while (result.size() < num_params_to_generate) { - SatParameters new_params = shared_tree_base_params; - const int base_seed = base_params.random_seed(); - new_params.set_random_seed(ValidSumSeed(base_seed, 2 * num_workers + 1)); - // We force this parameter as it could have been forgotten when set - // manually. - new_params.set_use_shared_tree_search(true); + // Random quick restart. + for (int i = 0; i < 2; ++i) { + SatParameters new_params = get_base(false); + new_params.set_search_random_variable_pool_size(5); + new_params.set_search_branching( + SatParameters::PORTFOLIO_WITH_QUICK_RESTART_SEARCH); + if (i % 2 == 0) { + new_params.set_name("fs_random_quick_restart_no_lp"); + new_params.set_linearization_level(0); + } else { + new_params.set_name("fs_random_quick_restart"); + } + result.push_back(new_params); + } - // These settings don't make sense with shared tree search, turn them off as - // they can break things. - new_params.set_optimize_with_core(false); - new_params.set_optimize_with_lb_tree_search(false); - new_params.set_optimize_with_max_hs(false); - - absl::string_view lp_tags[] = {"no", "default", "max"}; - new_params.set_name(absl::StrCat( - "shared_", lp_tags[std::min(new_params.linearization_level(), 2)], - "_lp_", num_workers)); - num_workers++; + // Add a linear feasibility jump. + // This one seems to perform worse, so we add only 1 for 2 normal LS, and we + // add this late. + if (base_params.use_feasibility_jump()) { + SatParameters new_params = get_base(true); + new_params.set_name("fj_lin"); + new_params.set_feasibility_jump_linearization_level(2); result.push_back(new_params); } return result; } + +std::vector RepeatParameters( + absl::Span base_params, int num_params_to_generate) { + // Return if we are done. + std::vector result; + result.assign(base_params.begin(), base_params.end()); + if (result.empty()) return result; + if (result.size() >= num_params_to_generate) { + result.resize(num_params_to_generate); + return result; + } + + // Repeat parameters until we have enough. + int i = 0; + const int base_size = result.size(); + while (result.size() < num_params_to_generate) { + result.push_back(result[i % base_size]); + result.back().set_random_seed(CombineSeed(result.back().random_seed(), i)); + ++i; + } + return result; +} + +SubsolverNameFilter::SubsolverNameFilter(const SatParameters& params) { + for (const auto& pattern : params.filter_subsolvers()) { + filter_patterns_.push_back(pattern); + } + for (const auto& pattern : params.ignore_subsolvers()) { + ignore_patterns_.push_back(pattern); + } + + // Hack for backward compatibility and easy of use. + if (params.use_ls_only()) { + filter_patterns_.push_back("ls*"); + filter_patterns_.push_back("fj*"); + } + + if (params.use_lns_only()) { + // Still add first solution solvers. + filter_patterns_.push_back("fj*"); + filter_patterns_.push_back("fs*"); + filter_patterns_.push_back("*lns"); + } +} + +bool SubsolverNameFilter::Keep(absl::string_view name) { + last_name_ = name; + if (!filter_patterns_.empty()) { + bool keep = false; + for (const absl::string_view pattern : filter_patterns_) { + if (FNMatch(pattern, name)) { + keep = true; + break; + } + } + if (!keep) { + ignored_.emplace_back(name); + return false; + } + } + for (const absl::string_view pattern : ignore_patterns_) { + if (FNMatch(pattern, name)) { + ignored_.emplace_back(name); + return false; + } + } + return true; +} + +bool SubsolverNameFilter::FNMatch(absl::string_view pattern, + absl::string_view str) { + bool in_wildcard_match = false; + while (true) { + if (pattern.empty()) { + return in_wildcard_match || str.empty(); + } + if (str.empty()) { + return pattern.find_first_not_of('*') == pattern.npos; + } + switch (pattern.front()) { + case '*': + pattern.remove_prefix(1); + in_wildcard_match = true; + break; + case '?': + pattern.remove_prefix(1); + str.remove_prefix(1); + break; + default: + if (in_wildcard_match) { + absl::string_view fixed_portion = pattern; + const size_t end = fixed_portion.find_first_of("*?"); + if (end != fixed_portion.npos) { + fixed_portion = fixed_portion.substr(0, end); + } + const size_t match = str.find(fixed_portion); + if (match == str.npos) { + return false; + } + pattern.remove_prefix(fixed_portion.size()); + str.remove_prefix(match + fixed_portion.size()); + in_wildcard_match = false; + } else { + if (pattern.front() != str.front()) { + return false; + } + pattern.remove_prefix(1); + str.remove_prefix(1); + } + break; + } + } +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_search.h b/ortools/sat/cp_model_search.h index 34e74221fd..2e005b818c 100644 --- a/ortools/sat/cp_model_search.h +++ b/ortools/sat/cp_model_search.h @@ -97,7 +97,7 @@ std::function ConstructFixedSearchStrategy( std::function InstrumentSearchStrategy( const CpModelProto& cp_model_proto, const std::vector& variable_mapping, - const std::function& instrumented_strategy, + std::function instrumented_strategy, Model* model); // Returns all the named set of parameters known to the solver. This include our @@ -106,29 +106,69 @@ std::function InstrumentSearchStrategy( // // Usually, named strategies just override a few field from the base_params. absl::flat_hash_map GetNamedParameters( + SatParameters base_params); + +// Returns a list of full workers to run. +class SubsolverNameFilter; +std::vector GetFullWorkerParameters( + const SatParameters& base_params, const CpModelProto& cp_model, + int num_already_present, SubsolverNameFilter* name_filter); + +// Given a base set of parameter, if non-empty, this repeat them (round-robbin) +// until we get num_params_to_generate. Note that if we don't have a multiple, +// the first base parameters will be repeated more than the others. +// +// Note that this will also change the random_seed of each of these parameters. +std::vector RepeatParameters( + absl::Span base_params, int num_params_to_generate); + +// Returns a vector of base parameters to specify solvers specialized to find a +// initial solution. This is meant to be used with RepeatParameters() and +// FilterParameters(). +std::vector GetFirstSolutionBaseParams( const SatParameters& base_params); -// Returns up to base_params.num_workers() different parameters. -// We do not always return num_worker parameters to leave room for strategies -// like LNS that do not consume a full worker and can always be interleaved. -std::vector GetDiverseSetOfParameters( - const SatParameters& base_params, const CpModelProto& cp_model); +// Simple class used to filter executed subsolver names. +class SubsolverNameFilter { + public: + // Warning, params must outlive the class and be constant. + explicit SubsolverNameFilter(const SatParameters& params); -// Returns a vector of num_params_to_generate set of parameters to specify -// solvers specialized to find a initial solution. -std::vector GetFirstSolutionParams( - const SatParameters& base_params, const CpModelProto& cp_model, - int num_params_to_generate); + // Shall we keep a parameter with given name? + bool Keep(absl::string_view name); -// Returns a vector of num_params_to_generate set of parameters to specify -// solvers that cooperatively explore a search tree. -std::vector GetWorkSharingParams( - const SatParameters& base_params, const CpModelProto& cp_model, - int num_params_to_generate); + // Applies Keep() to all the input list. + std::vector Filter(absl::Span input) { + std::vector result; + for (const SatParameters& param : input) { + if (Keep(param.name())) { + result.push_back(param); + } + } + return result; + } -// This generates a valid random seed (base_seed + delta) without overflow. -// We assume |delta| is small. -int ValidSumSeed(int base_seed, int delta); + // This is just a convenient function to follow the pattern + // if (filter.Keep("my_name")) subsovers.Add(.... filter.LastName() ... ) + // And not repeat "my_name" twice. + std::string LastName() const { return last_name_; } + + // Returns the list of all ignored subsolver for use in logs. + const std::vector& AllIgnored() { + gtl::STLSortAndRemoveDuplicates(&ignored_); + return ignored_; + } + + private: + // Copy of absl::log_internal::FNMatch(). + bool FNMatch(absl::string_view pattern, absl::string_view str); + + std::vector filter_patterns_; + std::vector ignore_patterns_; + std::string last_name_; + + std::vector ignored_; +}; } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 8a6e3ba340..b959fe72f8 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -24,7 +25,6 @@ #include #include #include -#include #include #include @@ -35,15 +35,12 @@ #include "ortools/base/options.h" #endif // __PORTABLE_PLATFORM__ #include "absl/base/thread_annotations.h" -#include "absl/cleanup/cleanup.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" #include "absl/container/flat_hash_map.h" -#include "absl/container/flat_hash_set.h" #include "absl/flags/flag.h" #include "absl/log/check.h" #include "absl/random/distributions.h" -#include "absl/strings/escaping.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" @@ -53,45 +50,29 @@ #include "google/protobuf/arena.h" #include "google/protobuf/text_format.h" #include "ortools/base/logging.h" -#include "ortools/base/strong_vector.h" -#include "ortools/graph/connected_components.h" #include "ortools/port/proto_utils.h" -#include "ortools/sat/clause.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_checker.h" #include "ortools/sat/cp_model_lns.h" -#include "ortools/sat/cp_model_loader.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cp_model_postsolve.h" #include "ortools/sat/cp_model_presolve.h" #include "ortools/sat/cp_model_search.h" +#include "ortools/sat/cp_model_solver_helpers.h" #include "ortools/sat/cp_model_symmetries.h" #include "ortools/sat/cp_model_utils.h" -#include "ortools/sat/cuts.h" #include "ortools/sat/feasibility_jump.h" #include "ortools/sat/feasibility_pump.h" -#include "ortools/sat/implied_bounds.h" #include "ortools/sat/integer.h" -#include "ortools/sat/integer_expr.h" -#include "ortools/sat/integer_search.h" -#include "ortools/sat/intervals.h" -#include "ortools/sat/lb_tree_search.h" -#include "ortools/sat/linear_constraint.h" #include "ortools/sat/linear_model.h" -#include "ortools/sat/linear_programming_constraint.h" -#include "ortools/sat/linear_relaxation.h" #include "ortools/sat/lp_utils.h" -#include "ortools/sat/max_hs.h" #include "ortools/sat/model.h" -#include "ortools/sat/optimization.h" #include "ortools/sat/parameters_validation.h" -#include "ortools/sat/precedences.h" #include "ortools/sat/presolve_context.h" -#include "ortools/sat/probing.h" #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_inprocessing.h" #include "ortools/sat/sat_parameters.pb.h" -#include "ortools/sat/sat_solver.h" +#include "ortools/sat/shaving_solver.h" #include "ortools/sat/stat_tables.h" #include "ortools/sat/subsolver.h" #include "ortools/sat/synchronization.h" @@ -104,22 +85,8 @@ #endif // __PORTABLE_PLATFORM__ #include "ortools/base/version.h" #include "ortools/util/sorted_interval_list.h" -#include "ortools/util/strong_integers.h" #include "ortools/util/time_limit.h" -#if defined(_MSC_VER) -ABSL_FLAG(std::string, cp_model_dump_prefix, ".\\", - "Prefix filename for all dumped files"); -#else -ABSL_FLAG(std::string, cp_model_dump_prefix, "/tmp/", - "Prefix filename for all dumped files"); -#endif -ABSL_FLAG(bool, cp_model_dump_models, false, - "DEBUG ONLY. When set to true, SolveCpModel() will dump its model " - "protos (original model, presolved model, mapping model) in text " - "format to 'FLAGS_cp_model_dump_prefix'{model|presolved_model|" - "mapping_model}.pb.txt."); - ABSL_FLAG( bool, cp_model_export_model, false, "DEBUG ONLY. When set to true, SolveCpModel() will dump its input model " @@ -128,11 +95,6 @@ ABSL_FLAG( ABSL_FLAG(bool, cp_model_dump_text_proto, true, "DEBUG ONLY, dump models in text proto instead of binary proto."); -ABSL_FLAG(bool, cp_model_dump_submodels, false, - "DEBUG ONLY. When set to true, solve will dump all " - "lns or objective_shaving submodels proto in text format to " - "'FLAGS_cp_model_dump_prefix'xxx.pb.txt."); - ABSL_FLAG( bool, cp_model_dump_problematic_lns, false, "DEBUG ONLY. Similar to --cp_model_dump_submodels, but only dump fragment " @@ -150,16 +112,6 @@ ABSL_FLAG(std::string, cp_model_params, "", ABSL_FLAG(bool, debug_model_copy, false, "If true, copy the input model as if with no basic presolve"); -ABSL_FLAG(bool, cp_model_check_intermediate_solutions, false, - "When true, all intermediate solutions found by the solver will be " - "checked. This can be expensive, therefore it is off by default."); - -ABSL_FLAG( - std::string, cp_model_load_debug_solution, "", - "DEBUG ONLY. When this is set to a non-empty file name, " - "we will interpret this as an internal solution which can be used for " - "debugging. For instance we use it to identify wrong cuts/reasons."); - ABSL_FLAG(bool, cp_model_ignore_objective, false, "If true, ignore the objective."); ABSL_FLAG(bool, cp_model_ignore_hints, false, @@ -657,1665 +609,153 @@ std::string CpSolverResponseStats(const CpSolverResponse& response, namespace { -// This should be called on the presolved model. It will read the file -// specified by --cp_model_load_debug_solution and properly fill the -// model->Get() proto vector. -void LoadDebugSolution(const CpModelProto& model_proto, Model* model) { -#if !defined(__PORTABLE_PLATFORM__) - if (absl::GetFlag(FLAGS_cp_model_load_debug_solution).empty()) return; +void LogSubsolverNames( + const std::vector>& subsolvers, + absl::Span ignored, SolverLogger* logger) { + if (!logger->LoggingIsEnabled()) return; - CpSolverResponse response; - SOLVER_LOG(model->GetOrCreate(), - "Reading debug solution from '", - absl::GetFlag(FLAGS_cp_model_load_debug_solution), "'."); - CHECK_OK(file::GetTextProto(absl::GetFlag(FLAGS_cp_model_load_debug_solution), - &response, file::Defaults())); - - // Make sure we load a solution with the same number of variable has in the - // presolved model. - CHECK_EQ(response.solution().size(), model_proto.variables().size()); - model->GetOrCreate()->LoadDebugSolution( - response.solution()); -#endif // __PORTABLE_PLATFORM__ -} - -// This both copy the "main" DebugSolution to a local_model and also cache -// the value of the integer variables in that solution. -void InitializeDebugSolution(const CpModelProto& model_proto, Model* model) { - auto* shared_response = model->Get(); - if (shared_response == nullptr) return; - if (shared_response->DebugSolution().empty()) return; - - // Copy the proto values. - DebugSolution& debug_sol = *model->GetOrCreate(); - debug_sol.proto_values = shared_response->DebugSolution(); - - // Fill the values by integer variable. - const int num_integers = - model->GetOrCreate()->NumIntegerVariables().value(); - debug_sol.ivar_has_value.assign(num_integers, false); - debug_sol.ivar_values.assign(num_integers, 0); - - std::vector boolean_solution; - - const auto& mapping = *model->GetOrCreate(); - for (int i = 0; i < debug_sol.proto_values.size(); ++i) { - if (mapping.IsBoolean(i)) { - Literal l = mapping.Literal(i); - if (debug_sol.proto_values[i] == 0) { - l = l.Negated(); - } - boolean_solution.push_back(l); - } - - if (!mapping.IsInteger(i)) continue; - const IntegerVariable var = mapping.Integer(i); - debug_sol.ivar_has_value[var] = true; - debug_sol.ivar_has_value[NegationOf(var)] = true; - debug_sol.ivar_values[var] = debug_sol.proto_values[i]; - debug_sol.ivar_values[NegationOf(var)] = -debug_sol.proto_values[i]; - } - - // If the solution is fully boolean (there is no integer variable), and - // we have a decision problem (so no new boolean should be created), we load - // it in the sat solver for debugging too. - if (boolean_solution.size() == debug_sol.proto_values.size() && - !model_proto.has_objective()) { - LOG(INFO) << "Loaded pure Boolean debugging solution."; - model->GetOrCreate()->LoadDebugSolution(boolean_solution); - } - - // The objective variable is usually not part of the proto, but it is still - // nice to have it, so we recompute it here. - auto* objective_def = model->Get(); - if (objective_def != nullptr) { - const IntegerVariable objective_var = objective_def->objective_var; - const int64_t objective_value = - ComputeInnerObjective(model_proto.objective(), debug_sol.proto_values); - debug_sol.ivar_has_value[objective_var] = true; - debug_sol.ivar_has_value[NegationOf(objective_var)] = true; - debug_sol.ivar_values[objective_var] = objective_value; - debug_sol.ivar_values[NegationOf(objective_var)] = -objective_value; - } - - // We also register a DEBUG callback to check our reasons. - auto* encoder = model->GetOrCreate(); - const auto checker = [mapping, encoder, debug_sol, model]( - absl::Span clause, - absl::Span integers) { - bool is_satisfied = false; - int num_bools = 0; - int num_ints = 0; - std::vector> to_print; - for (const Literal l : clause) { - // First case, this Boolean is mapped. - { - const int proto_var = - mapping.GetProtoVariableFromBooleanVariable(l.Variable()); - if (proto_var != -1) { - to_print.push_back({l, IntegerLiteral(), proto_var}); - if (debug_sol.proto_values[proto_var] == (l.IsPositive() ? 1 : 0)) { - is_satisfied = true; - break; - } - ++num_bools; - continue; - } - } - - // Second case, it is associated to IntVar >= value. - // We can use any of them, so if one is false, we use this one. - bool all_true = true; - for (const IntegerLiteral associated : encoder->GetIntegerLiterals(l)) { - const int proto_var = mapping.GetProtoVariableFromIntegerVariable( - PositiveVariable(associated.var)); - if (proto_var == -1) break; - int64_t value = debug_sol.proto_values[proto_var]; - to_print.push_back({l, associated, proto_var}); - - if (!VariableIsPositive(associated.var)) value = -value; - if (value < associated.bound) { - ++num_ints; - all_true = false; - break; - } - } - if (all_true) { - is_satisfied = true; + std::vector full_problem_solver_names; + std::vector incomplete_solver_names; + std::vector first_solution_solver_names; + std::vector helper_solver_names; + for (int i = 0; i < subsolvers.size(); ++i) { + const auto& subsolver = subsolvers[i]; + switch (subsolver->type()) { + case SubSolver::FULL_PROBLEM: + full_problem_solver_names.push_back(subsolver->name()); break; - } - } - for (const IntegerLiteral i_lit : integers) { - const int proto_var = mapping.GetProtoVariableFromIntegerVariable( - PositiveVariable(i_lit.var)); - if (proto_var == -1) { - is_satisfied = true; + case SubSolver::INCOMPLETE: + incomplete_solver_names.push_back(subsolver->name()); break; - } - - int64_t value = debug_sol.proto_values[proto_var]; - to_print.push_back({Literal(kNoLiteralIndex), i_lit, proto_var}); - - if (!VariableIsPositive(i_lit.var)) value = -value; - // Note the sign is inversed, we cannot have all literal false and all - // integer literal true. - if (value >= i_lit.bound) { - is_satisfied = true; + case SubSolver::FIRST_SOLUTION: + first_solution_solver_names.push_back(subsolver->name()); break; - } - } - if (!is_satisfied) { - LOG(INFO) << "Reason clause is not satisfied by loaded solution:"; - LOG(INFO) << "Worker '" << model->Name() << "', level=" - << model->GetOrCreate()->CurrentDecisionLevel(); - LOG(INFO) << "literals (neg): " << clause; - LOG(INFO) << "integer literals: " << integers; - for (const auto [l, i_lit, proto_var] : to_print) { - LOG(INFO) << l << " " << i_lit << " var=" << proto_var - << " value_in_sol=" << debug_sol.proto_values[proto_var]; - } - } - return is_satisfied; - }; - const auto lit_checker = [checker](absl::Span clause) { - return checker(clause, {}); - }; - - model->GetOrCreate()->RegisterDebugChecker(lit_checker); - model->GetOrCreate()->RegisterDebugChecker(checker); -} - -std::vector GetSolutionValues(const CpModelProto& model_proto, - const Model& model) { - auto* mapping = model.Get(); - auto* trail = model.Get(); - - std::vector solution; - for (int i = 0; i < model_proto.variables_size(); ++i) { - if (mapping->IsInteger(i)) { - const IntegerVariable var = mapping->Integer(i); - - // For ignored or not fully instantiated variable, we just use the - // lower bound. - solution.push_back(model.Get(LowerBound(var))); - } else { - DCHECK(mapping->IsBoolean(i)); - const Literal literal = mapping->Literal(i); - if (trail->Assignment().LiteralIsAssigned(literal)) { - solution.push_back(model.Get(Value(literal))); - } else { - // Just use the lower bound if the variable is not fully instantiated. - solution.push_back(0); - } - } - } - - if (DEBUG_MODE || - absl::GetFlag(FLAGS_cp_model_check_intermediate_solutions)) { - // TODO(user): Checks against initial model. - CHECK(SolutionIsFeasible(model_proto, solution)); - } - return solution; -} - -namespace { - -IntegerVariable GetOrCreateVariableWithTightBound( - const std::vector>& terms, - Model* model) { - if (terms.empty()) return model->Add(ConstantIntegerVariable(0)); - if (terms.size() == 1 && terms.front().second == 1) { - return terms.front().first; - } - if (terms.size() == 1 && terms.front().second == -1) { - return NegationOf(terms.front().first); - } - - int64_t sum_min = 0; - int64_t sum_max = 0; - for (const std::pair& var_coeff : terms) { - const int64_t min_domain = model->Get(LowerBound(var_coeff.first)); - const int64_t max_domain = model->Get(UpperBound(var_coeff.first)); - const int64_t coeff = var_coeff.second; - const int64_t prod1 = min_domain * coeff; - const int64_t prod2 = max_domain * coeff; - sum_min += std::min(prod1, prod2); - sum_max += std::max(prod1, prod2); - } - return model->Add(NewIntegerVariable(sum_min, sum_max)); -} - -IntegerVariable GetOrCreateVariableLinkedToSumOf( - const std::vector>& terms, - bool lb_required, bool ub_required, Model* model) { - if (terms.empty()) return model->Add(ConstantIntegerVariable(0)); - if (terms.size() == 1 && terms.front().second == 1) { - return terms.front().first; - } - if (terms.size() == 1 && terms.front().second == -1) { - return NegationOf(terms.front().first); - } - - const IntegerVariable new_var = - GetOrCreateVariableWithTightBound(terms, model); - - // TODO(user): use the same format, i.e. LinearExpression in both code! - std::vector vars; - std::vector coeffs; - for (const auto [var, coeff] : terms) { - vars.push_back(var); - coeffs.push_back(coeff); - } - vars.push_back(new_var); - coeffs.push_back(-1); - - // Split if linear is large. - if (vars.size() > model->GetOrCreate()->linear_split_size()) { - SplitAndLoadIntermediateConstraints(lb_required, ub_required, &vars, - &coeffs, model); - } - - // Load the top-level constraint with the required sides. - if (lb_required) { - model->Add(WeightedSumGreaterOrEqual(vars, coeffs, 0)); - } - if (ub_required) { - model->Add(WeightedSumLowerOrEqual(vars, coeffs, 0)); - } - - return new_var; -} - -} // namespace - -// Adds one LinearProgrammingConstraint per connected component of the model. -IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, - const CpModelProto& model_proto, Model* m) { - // Non const as we will std::move() stuff out of there. - LinearRelaxation relaxation = ComputeLinearRelaxation(model_proto, m); - if (m->GetOrCreate()->ModelIsUnsat()) return kNoIntegerVariable; - - // The bipartite graph of LP constraints might be disconnected: - // make a partition of the variables into connected components. - // Constraint nodes are indexed by [0..num_lp_constraints), - // variable nodes by [num_lp_constraints..num_lp_constraints+num_variables). - // - // TODO(user): look into biconnected components. - const int num_lp_constraints = - static_cast(relaxation.linear_constraints.size()); - const int num_lp_cut_generators = - static_cast(relaxation.cut_generators.size()); - const int num_integer_variables = - m->GetOrCreate()->NumIntegerVariables().value(); - - DenseConnectedComponentsFinder components; - components.SetNumberOfNodes(num_lp_constraints + num_lp_cut_generators + - num_integer_variables); - auto get_constraint_index = [](int ct_index) { return ct_index; }; - auto get_cut_generator_index = [num_lp_constraints](int cut_index) { - return num_lp_constraints + cut_index; - }; - auto get_var_index = [num_lp_constraints, - num_lp_cut_generators](IntegerVariable var) { - return num_lp_constraints + num_lp_cut_generators + - PositiveVariable(var).value(); - }; - for (int i = 0; i < num_lp_constraints; i++) { - for (const IntegerVariable var : - relaxation.linear_constraints[i].VarsAsSpan()) { - components.AddEdge(get_constraint_index(i), get_var_index(var)); - } - } - for (int i = 0; i < num_lp_cut_generators; ++i) { - for (const IntegerVariable var : relaxation.cut_generators[i].vars) { - components.AddEdge(get_cut_generator_index(i), get_var_index(var)); - } - } - - const int num_components = components.GetNumberOfComponents(); - std::vector component_sizes(num_components, 0); - const std::vector index_to_component = components.GetComponentIds(); - for (int i = 0; i < num_lp_constraints; i++) { - ++component_sizes[index_to_component[get_constraint_index(i)]]; - } - for (int i = 0; i < num_lp_cut_generators; i++) { - ++component_sizes[index_to_component[get_cut_generator_index(i)]]; - } - - // TODO(user): Optimize memory layout. - std::vector> component_to_var(num_components); - for (IntegerVariable var(0); var < num_integer_variables; var += 2) { - DCHECK(VariableIsPositive(var)); - component_to_var[index_to_component[get_var_index(var)]].push_back(var); - } - - // Make sure any constraint that touch the objective is not discarded even - // if it is the only one in its component. This is important to propagate - // as much as possible the objective bound by using any bounds the LP give - // us on one of its components. This is critical on the zephyrus problems for - // instance. - auto* mapping = m->GetOrCreate(); - for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { - const IntegerVariable var = - mapping->Integer(model_proto.objective().vars(i)); - ++component_sizes[index_to_component[get_var_index(var)]]; - } - - // Dispatch every constraint to its LinearProgrammingConstraint. - std::vector lp_constraints(num_components, - nullptr); - for (int i = 0; i < num_lp_constraints; i++) { - const int c = index_to_component[get_constraint_index(i)]; - if (component_sizes[c] <= 1) continue; - if (lp_constraints[c] == nullptr) { - lp_constraints[c] = - new LinearProgrammingConstraint(m, component_to_var[c]); - m->TakeOwnership(lp_constraints[c]); - } - // Load the constraint. - lp_constraints[c]->AddLinearConstraint( - std::move(relaxation.linear_constraints[i])); - } - - // Dispatch every cut generator to its LinearProgrammingConstraint. - for (int i = 0; i < num_lp_cut_generators; i++) { - const int c = index_to_component[get_cut_generator_index(i)]; - if (lp_constraints[c] == nullptr) { - lp_constraints[c] = - new LinearProgrammingConstraint(m, component_to_var[c]); - m->TakeOwnership(lp_constraints[c]); - } - lp_constraints[c]->AddCutGenerator(std::move(relaxation.cut_generators[i])); - } - - // Add the objective. - std::vector>> - component_to_cp_terms(num_components); - std::vector> top_level_cp_terms; - int num_components_containing_objective = 0; - if (model_proto.has_objective()) { - // First pass: set objective coefficients on the lp constraints, and store - // the cp terms in one vector per component. - for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { - const IntegerVariable var = - mapping->Integer(model_proto.objective().vars(i)); - const int64_t coeff = model_proto.objective().coeffs(i); - const int c = index_to_component[get_var_index(var)]; - if (lp_constraints[c] != nullptr) { - lp_constraints[c]->SetObjectiveCoefficient(var, IntegerValue(coeff)); - component_to_cp_terms[c].push_back(std::make_pair(var, coeff)); - } else { - // Component is too small. We still need to store the objective term. - top_level_cp_terms.push_back(std::make_pair(var, coeff)); - } - } - // Second pass: Build the cp sub-objectives per component. - for (int c = 0; c < num_components; ++c) { - if (component_to_cp_terms[c].empty()) continue; - const IntegerVariable sub_obj_var = GetOrCreateVariableLinkedToSumOf( - component_to_cp_terms[c], objective_need_to_be_tight, true, m); - top_level_cp_terms.push_back(std::make_pair(sub_obj_var, 1)); - lp_constraints[c]->SetMainObjectiveVariable(sub_obj_var); - num_components_containing_objective++; - } - } - - const IntegerVariable main_objective_var = - model_proto.has_objective() - ? GetOrCreateVariableLinkedToSumOf( - top_level_cp_terms, objective_need_to_be_tight, true, m) - : kNoIntegerVariable; - - // Register LP constraints. Note that this needs to be done after all the - // constraints have been added. - for (LinearProgrammingConstraint* lp_constraint : lp_constraints) { - if (lp_constraint == nullptr) continue; - lp_constraint->RegisterWith(m); - VLOG(3) << "LP constraint: " << lp_constraint->DimensionString() << "."; - } - - VLOG(3) << top_level_cp_terms.size() - << " terms in the main objective linear equation (" - << num_components_containing_objective << " from LP constraints)."; - return main_objective_var; -} - -} // namespace - -// Used by NewFeasibleSolutionObserver or NewFeasibleSolutionLogCallback -// to register observers. -struct SolutionObservers { - std::vector> observers; - std::vector> - log_callbacks; - std::vector> best_bound_callbacks; -}; - -std::function NewFeasibleSolutionObserver( - const std::function& observer) { - return [=](Model* model) { - model->GetOrCreate()->observers.push_back(observer); - }; -} - -std::function NewFeasibleSolutionLogCallback( - const std::function& - callback) { - return [=](Model* model) { - model->GetOrCreate()->log_callbacks.push_back(callback); - }; -} - -std::function NewBestBoundCallback( - const std::function& callback) { - return [=](Model* model) { - model->GetOrCreate()->best_bound_callbacks.push_back( - callback); - }; -} - -#if !defined(__PORTABLE_PLATFORM__) -// TODO(user): Support it on android. -std::function NewSatParameters( - const std::string& params) { - sat::SatParameters parameters; - if (!params.empty()) { - CHECK(google::protobuf::TextFormat::ParseFromString(params, ¶meters)) - << params; - } - return NewSatParameters(parameters); -} -#endif // __PORTABLE_PLATFORM__ - -std::function NewSatParameters( - const sat::SatParameters& parameters) { - return [=](Model* model) { - // Tricky: It is important to initialize the model parameters before any - // of the solver object are created, so that by default they use the given - // parameters. - // - // TODO(user): A notable exception to this is the TimeLimit which is - // currently not initializing itself from the SatParameters in the model. It - // will also starts counting from the time of its creation. It will be good - // to find a solution that is less error prone. - *model->GetOrCreate() = parameters; - return parameters; - }; -} - -namespace { - -// Registers a callback that will export variables bounds fixed at level 0 of -// the search. This should not be registered to a LNS search. -void RegisterVariableBoundsLevelZeroExport( - const CpModelProto& /*model_proto*/, - SharedBoundsManager* shared_bounds_manager, Model* model) { - CHECK(shared_bounds_manager != nullptr); - - auto* mapping = model->GetOrCreate(); - auto* trail = model->Get(); - auto* integer_trail = model->Get(); - - int saved_trail_index = 0; - std::vector model_variables; - std::vector new_lower_bounds; - std::vector new_upper_bounds; - absl::flat_hash_set visited_variables; - const std::string name = model->Name(); - - auto broadcast_level_zero_bounds = - [=](const std::vector& modified_vars) mutable { - // Inspect the modified IntegerVariables. - for (const IntegerVariable& var : modified_vars) { - const IntegerVariable positive_var = PositiveVariable(var); - const int model_var = - mapping->GetProtoVariableFromIntegerVariable(positive_var); - - if (model_var == -1) continue; - const auto [_, inserted] = visited_variables.insert(model_var); - if (!inserted) continue; - - const int64_t new_lb = - integer_trail->LevelZeroLowerBound(positive_var).value(); - const int64_t new_ub = - integer_trail->LevelZeroUpperBound(positive_var).value(); - - // TODO(user): We could imagine an API based on atomic - // that could preemptively check if this new bounds are improving. - model_variables.push_back(model_var); - new_lower_bounds.push_back(new_lb); - new_upper_bounds.push_back(new_ub); - } - - // Inspect the newly modified Booleans. - for (; saved_trail_index < trail->Index(); ++saved_trail_index) { - const Literal fixed_literal = (*trail)[saved_trail_index]; - const int model_var = mapping->GetProtoVariableFromBooleanVariable( - fixed_literal.Variable()); - - if (model_var == -1) continue; - const auto [_, inserted] = visited_variables.insert(model_var); - if (!inserted) continue; - - model_variables.push_back(model_var); - if (fixed_literal.IsPositive()) { - new_lower_bounds.push_back(1); - new_upper_bounds.push_back(1); - } else { - new_lower_bounds.push_back(0); - new_upper_bounds.push_back(0); - } - } - - if (!model_variables.empty()) { - shared_bounds_manager->ReportPotentialNewBounds( - model->Name(), model_variables, new_lower_bounds, - new_upper_bounds); - - // Clear for next call. - model_variables.clear(); - new_lower_bounds.clear(); - new_upper_bounds.clear(); - visited_variables.clear(); - - // If we are not in interleave_search we synchronize right away. - if (!model->Get()->interleave_search()) { - shared_bounds_manager->Synchronize(); - } - } - }; - - // The callback will just be called on NEWLY modified var. So initially, - // we do want to read all variables. - // - // TODO(user): Find a better way? It seems nicer to register this before - // any variable is modified. But then we don't want to call it each time - // we reach level zero during probing. It should be better to only call - // it when a new variable has been fixed. - const IntegerVariable num_vars = - model->GetOrCreate()->NumIntegerVariables(); - std::vector all_variables; - all_variables.reserve(num_vars.value()); - for (IntegerVariable var(0); var < num_vars; ++var) { - all_variables.push_back(var); - } - broadcast_level_zero_bounds(all_variables); - - model->GetOrCreate() - ->RegisterLevelZeroModifiedVariablesCallback(broadcast_level_zero_bounds); -} - -// Registers a callback to import new variables bounds stored in the -// shared_bounds_manager. These bounds are imported at level 0 of the search -// in the linear scan minimize function. -void RegisterVariableBoundsLevelZeroImport( - const CpModelProto& model_proto, SharedBoundsManager* shared_bounds_manager, - Model* model) { - CHECK(shared_bounds_manager != nullptr); - const std::string name = model->Name(); - auto* integer_trail = model->GetOrCreate(); - auto* trail = model->GetOrCreate(); - auto* sat_solver = model->GetOrCreate(); - auto* mapping = model->GetOrCreate(); - const int id = shared_bounds_manager->RegisterNewId(); - - const auto& import_level_zero_bounds = [&model_proto, shared_bounds_manager, - name, sat_solver, integer_trail, - trail, id, mapping]() { - std::vector model_variables; - std::vector new_lower_bounds; - std::vector new_upper_bounds; - shared_bounds_manager->GetChangedBounds( - id, &model_variables, &new_lower_bounds, &new_upper_bounds); - bool new_bounds_have_been_imported = false; - for (int i = 0; i < model_variables.size(); ++i) { - const int model_var = model_variables[i]; - - // If this is a Boolean, fix it if not already done. - // Note that it is important not to use AddUnitClause() as we do not - // want to propagate after each addition. - if (mapping->IsBoolean(model_var)) { - Literal lit = mapping->Literal(model_var); - if (new_upper_bounds[i] == 0) lit = lit.Negated(); - if (trail->Assignment().LiteralIsTrue(lit)) continue; - if (trail->Assignment().LiteralIsFalse(lit)) { - sat_solver->NotifyThatModelIsUnsat(); - return false; - } - new_bounds_have_been_imported = true; - trail->EnqueueWithUnitReason(lit); - continue; - } - - // Deal with integer. - if (!mapping->IsInteger(model_var)) continue; - const IntegerVariable var = mapping->Integer(model_var); - const IntegerValue new_lb(new_lower_bounds[i]); - const IntegerValue new_ub(new_upper_bounds[i]); - const IntegerValue old_lb = integer_trail->LowerBound(var); - const IntegerValue old_ub = integer_trail->UpperBound(var); - const bool changed_lb = new_lb > old_lb; - const bool changed_ub = new_ub < old_ub; - if (!changed_lb && !changed_ub) continue; - - new_bounds_have_been_imported = true; - if (VLOG_IS_ON(3)) { - const IntegerVariableProto& var_proto = - model_proto.variables(model_var); - const std::string& var_name = - var_proto.name().empty() - ? absl::StrCat("anonymous_var(", model_var, ")") - : var_proto.name(); - LOG(INFO) << " '" << name << "' imports new bounds for " << var_name - << ": from [" << old_lb << ", " << old_ub << "] to [" - << new_lb << ", " << new_ub << "]"; - } - - if (changed_lb && - !integer_trail->Enqueue(IntegerLiteral::GreaterOrEqual(var, new_lb), - {}, {})) { - return false; - } - if (changed_ub && - !integer_trail->Enqueue(IntegerLiteral::LowerOrEqual(var, new_ub), {}, - {})) { - return false; - } - } - if (new_bounds_have_been_imported && !sat_solver->FinishPropagation()) { - return false; - } - return true; - }; - model->GetOrCreate()->callbacks.push_back( - import_level_zero_bounds); -} - -// Registers a callback that will report improving objective best bound. -// It will be called each time new objective bound are propagated at level zero. -void RegisterObjectiveBestBoundExport( - IntegerVariable objective_var, - SharedResponseManager* shared_response_manager, Model* model) { - auto* integer_trail = model->Get(); - const auto broadcast_objective_lower_bound = - [objective_var, integer_trail, shared_response_manager, model, - best_obj_lb = - kMinIntegerValue](const std::vector&) mutable { - const IntegerValue objective_lb = - integer_trail->LevelZeroLowerBound(objective_var); - if (objective_lb > best_obj_lb) { - best_obj_lb = objective_lb; - shared_response_manager->UpdateInnerObjectiveBounds( - model->Name(), objective_lb, - integer_trail->LevelZeroUpperBound(objective_var)); - // If we are not in interleave_search we synchronize right away. - if (!model->Get()->interleave_search()) { - shared_response_manager->Synchronize(); - } - } - }; - model->GetOrCreate() - ->RegisterLevelZeroModifiedVariablesCallback( - broadcast_objective_lower_bound); -} - -// Registers a callback to import new objective bounds. It will be called each -// time the search main loop is back to level zero. Note that it the presence of -// assumptions, this will not happen until the set of assumptions is changed. -void RegisterObjectiveBoundsImport( - SharedResponseManager* shared_response_manager, Model* model) { - auto* solver = model->GetOrCreate(); - auto* integer_trail = model->GetOrCreate(); - auto* objective = model->GetOrCreate(); - const std::string name = model->Name(); - const auto import_objective_bounds = [name, solver, integer_trail, objective, - shared_response_manager]() { - if (solver->AssumptionLevel() != 0) return true; - bool propagate = false; - - const IntegerValue external_lb = - shared_response_manager->SynchronizedInnerObjectiveLowerBound(); - const IntegerValue current_lb = - integer_trail->LowerBound(objective->objective_var); - if (external_lb > current_lb) { - if (!integer_trail->Enqueue(IntegerLiteral::GreaterOrEqual( - objective->objective_var, external_lb), - {}, {})) { - return false; - } - propagate = true; - } - - const IntegerValue external_ub = - shared_response_manager->SynchronizedInnerObjectiveUpperBound(); - const IntegerValue current_ub = - integer_trail->UpperBound(objective->objective_var); - if (external_ub < current_ub) { - if (!integer_trail->Enqueue(IntegerLiteral::LowerOrEqual( - objective->objective_var, external_ub), - {}, {})) { - return false; - } - propagate = true; - } - - if (!propagate) return true; - - VLOG(3) << "'" << name << "' imports objective bounds: external [" - << objective->ScaleIntegerObjective(external_lb) << ", " - << objective->ScaleIntegerObjective(external_ub) << "], current [" - << objective->ScaleIntegerObjective(current_lb) << ", " - << objective->ScaleIntegerObjective(current_ub) << "]"; - - return solver->FinishPropagation(); - }; - - model->GetOrCreate()->callbacks.push_back( - import_objective_bounds); -} - -// Registers a callback that will export binary clauses discovered during -// search. -void RegisterClausesExport(int id, SharedClausesManager* shared_clauses_manager, - Model* model) { - auto* mapping = model->GetOrCreate(); - const auto& share_binary_clause = [mapping, id, shared_clauses_manager]( - Literal l1, Literal l2) { - const int var1 = - mapping->GetProtoVariableFromBooleanVariable(l1.Variable()); - if (var1 == -1) return; - const int var2 = - mapping->GetProtoVariableFromBooleanVariable(l2.Variable()); - if (var2 == -1) return; - const int lit1 = l1.IsPositive() ? var1 : NegatedRef(var1); - const int lit2 = l2.IsPositive() ? var2 : NegatedRef(var2); - shared_clauses_manager->AddBinaryClause(id, lit1, lit2); - }; - model->GetOrCreate()->SetAdditionCallback( - share_binary_clause); -} - -// Registers a callback to import new clauses stored in the -// shared_clausess_manager. These clauses are imported at level 0 of the search -// in the linear scan minimize function. -// it returns the id of the worker in the shared clause manager. -// -// TODO(user): Can we import them in the core worker ? -int RegisterClausesLevelZeroImport(int id, - SharedClausesManager* shared_clauses_manager, - Model* model) { - CHECK(shared_clauses_manager != nullptr); - CpModelMapping* const mapping = model->GetOrCreate(); - auto* sat_solver = model->GetOrCreate(); - auto* implications = model->GetOrCreate(); - const auto& import_level_zero_clauses = [shared_clauses_manager, id, mapping, - sat_solver, implications]() { - std::vector> new_binary_clauses; - shared_clauses_manager->GetUnseenBinaryClauses(id, &new_binary_clauses); - implications->EnableSharing(false); - for (const auto& [ref1, ref2] : new_binary_clauses) { - const Literal l1 = mapping->Literal(ref1); - const Literal l2 = mapping->Literal(ref2); - if (!sat_solver->AddBinaryClause(l1, l2)) { - return false; - } - } - implications->EnableSharing(true); - return true; - }; - model->GetOrCreate()->callbacks.push_back( - import_level_zero_clauses); - return id; -} - -void LoadBaseModel(const CpModelProto& model_proto, Model* model) { - auto* shared_response_manager = model->GetOrCreate(); - CHECK(shared_response_manager != nullptr); - auto* sat_solver = model->GetOrCreate(); - - // Simple function for the few places where we do "return unsat()". - const auto unsat = [shared_response_manager, sat_solver, model] { - sat_solver->NotifyThatModelIsUnsat(); - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - absl::StrCat(model->Name(), " [loading]")); - }; - - // We will add them all at once after model_proto is loaded. - model->GetOrCreate()->DisableImplicationBetweenLiteral(); - - auto* mapping = model->GetOrCreate(); - const SatParameters& parameters = *(model->GetOrCreate()); - const bool view_all_booleans_as_integers = - (parameters.linearization_level() >= 2) || - (parameters.search_branching() == SatParameters::FIXED_SEARCH && - model_proto.search_strategy().empty()) || - parameters.optimize_with_max_hs(); - LoadVariables(model_proto, view_all_booleans_as_integers, model); - DetectOptionalVariables(model_proto, model); - - // TODO(user): The core algo and symmetries seems to be problematic in some - // cases. See for instance: neos-691058.mps.gz. This is probably because as - // we modify the model, our symmetry might be wrong? investigate. - // - // TODO(user): More generally, we cannot load the symmetry if we create - // new Booleans and constraints that link them to some Booleans of the model. - // Creating Booleans related to integer variable is fine since we only deal - // with Boolean only symmetry here. It is why we disable this when we have - // linear relaxation as some of them create new constraints. - if (!parameters.optimize_with_core() && parameters.symmetry_level() > 1 && - !parameters.enumerate_all_solutions() && - parameters.linearization_level() == 0) { - LoadBooleanSymmetries(model_proto, model); - } - - ExtractEncoding(model_proto, model); - PropagateEncodingFromEquivalenceRelations(model_proto, model); - - // Check the model is still feasible before continuing. - if (sat_solver->ModelIsUnsat()) return unsat(); - - // Fully encode variables as needed by the search strategy. - AddFullEncodingFromSearchBranching(model_proto, model); - if (sat_solver->ModelIsUnsat()) return unsat(); - - // Reserve space for the precedence relations. - model->GetOrCreate()->Resize( - model->GetOrCreate()->NumIntegerVariables().value()); - - // Load the constraints. - int num_ignored_constraints = 0; - absl::flat_hash_set unsupported_types; - for (const ConstraintProto& ct : model_proto.constraints()) { - if (mapping->ConstraintIsAlreadyLoaded(&ct)) { - ++num_ignored_constraints; - continue; - } - - if (!LoadConstraint(ct, model)) { - unsupported_types.insert(ct.constraint_case()); - continue; - } - - // We propagate after each new Boolean constraint but not the integer - // ones. So we call FinishPropagation() manually here. - // - // Note that we only do that in debug mode as this can be really slow on - // certain types of problems with millions of constraints. - if (DEBUG_MODE) { - if (sat_solver->FinishPropagation()) { - Trail* trail = model->GetOrCreate(); - const int old_num_fixed = trail->Index(); - if (trail->Index() > old_num_fixed) { - VLOG(3) << "Constraint fixed " << trail->Index() - old_num_fixed - << " Boolean variable(s): " << ProtobufDebugString(ct); - } - } - } - if (sat_solver->ModelIsUnsat()) { - VLOG(2) << "UNSAT during extraction (after adding '" - << ConstraintCaseName(ct.constraint_case()) << "'). " - << ProtobufDebugString(ct); - return unsat(); - } - } - if (num_ignored_constraints > 0) { - VLOG(3) << num_ignored_constraints << " constraints were skipped."; - } - if (!unsupported_types.empty()) { - VLOG(1) << "There is unsupported constraints types in this model: "; - std::vector names; - for (const ConstraintProto::ConstraintCase type : unsupported_types) { - names.push_back(ConstraintCaseName(type)); - } - std::sort(names.begin(), names.end()); - for (const absl::string_view name : names) { - VLOG(1) << " - " << name; - } - return unsat(); - } - - model->GetOrCreate() - ->AddAllImplicationsBetweenAssociatedLiterals(); - if (!sat_solver->FinishPropagation()) return unsat(); - - model->GetOrCreate()->ProcessImplicationGraph( - model->GetOrCreate()); - model->GetOrCreate()->Build(); -} - -void LoadFeasibilityPump(const CpModelProto& model_proto, Model* model) { - LoadBaseModel(model_proto, model); - - auto* mapping = model->GetOrCreate(); - const SatParameters& parameters = *(model->GetOrCreate()); - if (parameters.linearization_level() == 0) return; - - // Add linear constraints to Feasibility Pump. - const LinearRelaxation relaxation = - ComputeLinearRelaxation(model_proto, model); - if (model->GetOrCreate()->ModelIsUnsat()) return; - - const int num_lp_constraints = - static_cast(relaxation.linear_constraints.size()); - if (num_lp_constraints == 0) return; - auto* feasibility_pump = model->GetOrCreate(); - for (int i = 0; i < num_lp_constraints; i++) { - feasibility_pump->AddLinearConstraint(relaxation.linear_constraints[i]); - } - - if (model_proto.has_objective()) { - for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { - const IntegerVariable var = - mapping->Integer(model_proto.objective().vars(i)); - const int64_t coeff = model_proto.objective().coeffs(i); - feasibility_pump->SetObjectiveCoefficient(var, IntegerValue(coeff)); - } - } -} - -// Loads a CpModelProto inside the given model. -// This should only be called once on a given 'Model' class. -// -// TODO(user): move to cp_model_loader.h/.cc -void LoadCpModel(const CpModelProto& model_proto, Model* model) { - LoadBaseModel(model_proto, model); - - // We want to load the debug solution before the initial propag. - // But at this point the objective is not loaded yet, so we will not have - // a value for the objective integer variable, so we do it again later. - InitializeDebugSolution(model_proto, model); - - // Simple function for the few places where we do "return unsat()". - auto* sat_solver = model->GetOrCreate(); - auto* shared_response_manager = model->GetOrCreate(); - const auto unsat = [shared_response_manager, sat_solver, model] { - sat_solver->NotifyThatModelIsUnsat(); - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - absl::StrCat(model->Name(), " [loading]")); - }; - - auto* mapping = model->GetOrCreate(); - const SatParameters& parameters = *(model->GetOrCreate()); - - // Auto detect "at least one of" constraints in the PrecedencesPropagator. - // Note that we do that before we finish loading the problem (objective and - // LP relaxation), because propagation will be faster at this point and it - // should be enough for the purpose of this auto-detection. - if (parameters.auto_detect_greater_than_at_least_one_of()) { - model->GetOrCreate() - ->AddGreaterThanAtLeastOneOfConstraints(model); - if (!sat_solver->FinishPropagation()) return unsat(); - } - - // Note that this is already done in the presolve, but it is important to redo - // it here to collect literal => integer >= bound constraints that are used in - // many places. Without it, we don't detect them if they depends on long chain - // of implications. - // - // TODO(user): We don't have a good deterministic time on all constraints, - // so this might take more time than wanted. - if (parameters.cp_model_probing_level() > 1) { - Prober* prober = model->GetOrCreate(); - prober->ProbeBooleanVariables(/*deterministic_time_limit=*/1.0); - if (!model->GetOrCreate() - ->ComputeTransitiveReduction()) { - return unsat(); - } - } - if (sat_solver->ModelIsUnsat()) return unsat(); - - // Note that it is important to do that after the probing. - ExtractElementEncoding(model_proto, model); - - // Compute decomposed energies on demands helper. - IntervalsRepository* repository = model->Mutable(); - if (repository != nullptr) { - repository->InitAllDecomposedEnergies(); - } - - // We need to know beforehand if the objective var can just be >= terms or - // needs to be == terms. - bool objective_need_to_be_tight = false; - if (model_proto.has_objective() && - !model_proto.objective().domain().empty()) { - int64_t min_value = 0; - int64_t max_value = 0; - auto* integer_trail = model->GetOrCreate(); - const CpObjectiveProto& obj = model_proto.objective(); - for (int i = 0; i < obj.vars_size(); ++i) { - const int64_t coeff = obj.coeffs(i); - const IntegerVariable var = mapping->Integer(obj.vars(i)); - if (coeff > 0) { - min_value += coeff * integer_trail->LowerBound(var).value(); - max_value += coeff * integer_trail->UpperBound(var).value(); - } else { - min_value += coeff * integer_trail->UpperBound(var).value(); - max_value += coeff * integer_trail->LowerBound(var).value(); - } - } - const Domain user_domain = ReadDomainFromProto(model_proto.objective()); - const Domain automatic_domain = Domain(min_value, max_value); - objective_need_to_be_tight = !automatic_domain.IsIncludedIn(user_domain); - } - - // Create an objective variable and its associated linear constraint if - // needed. - IntegerVariable objective_var = kNoIntegerVariable; - if (parameters.linearization_level() > 0) { - // Linearize some part of the problem and register LP constraint(s). - objective_var = - AddLPConstraints(objective_need_to_be_tight, model_proto, model); - if (sat_solver->ModelIsUnsat()) return unsat(); - } else if (model_proto.has_objective()) { - const CpObjectiveProto& obj = model_proto.objective(); - std::vector> terms; - terms.reserve(obj.vars_size()); - for (int i = 0; i < obj.vars_size(); ++i) { - terms.push_back( - std::make_pair(mapping->Integer(obj.vars(i)), obj.coeffs(i))); - } - if (parameters.optimize_with_core()) { - if (objective_need_to_be_tight) { - // We do not care about the <= obj for core, we only need the other side - // to enforce a restriction of the objective lower bound. - // - // TODO(user): This might still create intermediate variables to - // decompose the objective for no reason. Just deal directly with the - // objective domain in the core algo by forbidding bad assumptions? - // Alternatively, just ignore the core solution if it is "too" good and - // rely on other solvers? - objective_var = - GetOrCreateVariableLinkedToSumOf(terms, true, false, model); - } else { - objective_var = GetOrCreateVariableWithTightBound(terms, model); - } - } else { - objective_var = GetOrCreateVariableLinkedToSumOf( - terms, objective_need_to_be_tight, true, model); - } - } - - // Create the objective definition inside the Model so that it can be accessed - // by the heuristics than needs it. - if (objective_var != kNoIntegerVariable) { - const CpObjectiveProto& objective_proto = model_proto.objective(); - auto* objective_definition = model->GetOrCreate(); - - objective_definition->scaling_factor = objective_proto.scaling_factor(); - if (objective_definition->scaling_factor == 0.0) { - objective_definition->scaling_factor = 1.0; - } - objective_definition->offset = objective_proto.offset(); - objective_definition->objective_var = objective_var; - - const int size = objective_proto.vars_size(); - objective_definition->vars.resize(size); - objective_definition->coeffs.resize(size); - for (int i = 0; i < objective_proto.vars_size(); ++i) { - // Note that if there is no mapping, then the variable will be - // kNoIntegerVariable. - objective_definition->vars[i] = mapping->Integer(objective_proto.vars(i)); - objective_definition->coeffs[i] = IntegerValue(objective_proto.coeffs(i)); - - // Fill the objective heuristics data. - const int ref = objective_proto.vars(i); - if (mapping->IsInteger(ref)) { - const IntegerVariable var = mapping->Integer(objective_proto.vars(i)); - objective_definition->objective_impacting_variables.insert( - objective_proto.coeffs(i) > 0 ? var : NegationOf(var)); - } - } - - // Register an objective special propagator. - model->TakeOwnership( - new LevelZeroEquality(objective_var, objective_definition->vars, - objective_definition->coeffs, model)); - } - - // Intersect the objective domain with the given one if any. - if (!model_proto.objective().domain().empty()) { - auto* integer_trail = model->GetOrCreate(); - const Domain user_domain = ReadDomainFromProto(model_proto.objective()); - const Domain automatic_domain = - integer_trail->InitialVariableDomain(objective_var); - VLOG(3) << "Objective offset:" << model_proto.objective().offset() - << " scaling_factor:" << model_proto.objective().scaling_factor(); - VLOG(3) << "Automatic internal objective domain: " << automatic_domain; - VLOG(3) << "User specified internal objective domain: " << user_domain; - CHECK_NE(objective_var, kNoIntegerVariable); - if (!integer_trail->UpdateInitialDomain(objective_var, user_domain)) { - VLOG(2) << "UNSAT due to the objective domain."; - return unsat(); - } - } - - // Note that we do one last propagation at level zero once all the - // constraints were added. - SOLVER_LOG(model->GetOrCreate(), - "Initial num_bool: ", sat_solver->NumVariables()); - if (!sat_solver->FinishPropagation()) return unsat(); - - if (model_proto.has_objective()) { - // Report the initial objective variable bounds. - auto* integer_trail = model->GetOrCreate(); - shared_response_manager->UpdateInnerObjectiveBounds( - absl::StrCat(model->Name(), " (initial_propagation)"), - integer_trail->LowerBound(objective_var), - integer_trail->UpperBound(objective_var)); - - // Watch improved objective best bounds. - RegisterObjectiveBestBoundExport(objective_var, shared_response_manager, - model); - - // Import objective bounds. - // TODO(user): Support objective bounds import in LNS and Core based - // search. - if (model->GetOrCreate()->share_objective_bounds()) { - RegisterObjectiveBoundsImport(shared_response_manager, model); - } - } - - // Initialize the search strategies. - auto* search_heuristics = model->GetOrCreate(); - search_heuristics->user_search = - ConstructUserSearchStrategy(model_proto, model); - search_heuristics->heuristic_search = - ConstructHeuristicSearchStrategy(model_proto, model); - search_heuristics->integer_completion_search = - ConstructIntegerCompletionSearchStrategy(mapping->GetVariableMapping(), - objective_var, model); - search_heuristics->fixed_search = ConstructFixedSearchStrategy( - search_heuristics->user_search, search_heuristics->heuristic_search, - search_heuristics->integer_completion_search); - if (VLOG_IS_ON(3)) { - search_heuristics->fixed_search = - InstrumentSearchStrategy(model_proto, mapping->GetVariableMapping(), - search_heuristics->fixed_search, model); - } - search_heuristics->hint_search = - ConstructHintSearchStrategy(model_proto, mapping, model); - - // Create the CoreBasedOptimizer class if needed. - if (parameters.optimize_with_core()) { - // TODO(user): Remove code duplication with the solution_observer in - // SolveLoadedCpModel(). - const auto solution_observer = [&model_proto, model, - shared_response_manager, - best_obj_ub = kMaxIntegerValue]() mutable { - const std::vector solution = - GetSolutionValues(model_proto, *model); - const IntegerValue obj_ub = - ComputeInnerObjective(model_proto.objective(), solution); - if (obj_ub < best_obj_ub) { - best_obj_ub = obj_ub; - shared_response_manager->NewSolution(solution, model->Name(), model); - } - }; - - const auto& objective = *model->GetOrCreate(); - if (parameters.optimize_with_max_hs()) { - HittingSetOptimizer* max_hs = new HittingSetOptimizer( - model_proto, objective, solution_observer, model); - model->Register(max_hs); - model->TakeOwnership(max_hs); - } else { - CoreBasedOptimizer* core = - new CoreBasedOptimizer(objective_var, objective.vars, - objective.coeffs, solution_observer, model); - model->Register(core); - model->TakeOwnership(core); - } - } - - InitializeDebugSolution(model_proto, model); -} - -// Solves an already loaded cp_model_proto. -// The final CpSolverResponse must be read from the shared_response_manager. -// -// TODO(user): This should be transformed so that it can be called many times -// and resume from the last search state as if it wasn't interrupted. That would -// allow use to easily interleave different heuristics in the same thread. -void SolveLoadedCpModel(const CpModelProto& model_proto, Model* model) { - auto* shared_response_manager = model->GetOrCreate(); - if (shared_response_manager->ProblemIsSolved()) return; - - const SatParameters& parameters = *model->GetOrCreate(); - if (parameters.stop_after_root_propagation()) return; - - auto solution_observer = [&model_proto, model, shared_response_manager, - best_obj_ub = kMaxIntegerValue]() mutable { - const std::vector solution = - GetSolutionValues(model_proto, *model); - if (model_proto.has_objective()) { - const IntegerValue obj_ub = - ComputeInnerObjective(model_proto.objective(), solution); - if (obj_ub < best_obj_ub) { - best_obj_ub = obj_ub; - shared_response_manager->NewSolution(solution, model->Name(), model); - } - } else { - shared_response_manager->NewSolution(solution, model->Name(), model); - } - }; - - // Make sure we are not at a positive level. - if (!model->GetOrCreate()->ResetToLevelZero()) { - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - model->Name()); - return; - } - - // Reconfigure search heuristic if it was changed. - ConfigureSearchHeuristics(model); - - const auto& mapping = *model->GetOrCreate(); - SatSolver::Status status; - - if (parameters.use_probing_search()) { - ContinuousProber prober(model_proto, model); - while (true) { - status = prober.Probe(); - if (status == SatSolver::INFEASIBLE) { - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - model->Name()); + case SubSolver::HELPER: + helper_solver_names.push_back(subsolver->name()); break; - } - if (status == SatSolver::FEASIBLE) { - solution_observer(); - } else { - break; - } } - } else if (!model_proto.has_objective()) { - while (true) { - if (parameters.use_shared_tree_search()) { - auto* subtree_worker = model->GetOrCreate(); - status = subtree_worker->Search(solution_observer); - } else { - status = ResetAndSolveIntegerProblem( - mapping.Literals(model_proto.assumptions()), model); - } - if (status != SatSolver::Status::FEASIBLE) break; - solution_observer(); - if (!parameters.enumerate_all_solutions()) break; - model->Add(ExcludeCurrentSolutionAndBacktrack()); - } - if (status == SatSolver::INFEASIBLE) { - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - model->Name()); - } - if (status == SatSolver::ASSUMPTIONS_UNSAT) { - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - model->Name()); + } - // Extract a good subset of assumptions and add it to the response. - auto* time_limit = model->GetOrCreate(); - auto* sat_solver = model->GetOrCreate(); - std::vector core = sat_solver->GetLastIncompatibleDecisions(); - MinimizeCoreWithPropagation(time_limit, sat_solver, &core); - std::vector core_in_proto_format; - for (const Literal l : core) { - core_in_proto_format.push_back( - mapping.GetProtoVariableFromBooleanVariable(l.Variable())); - if (!l.IsPositive()) { - core_in_proto_format.back() = NegatedRef(core_in_proto_format.back()); + // TODO(user): We might not want to sort the subsolver by name to keep our + // ordered list by importance? not sure. + auto display_subsolver_list = [logger](absl::Span names, + const absl::string_view type_name) { + if (!names.empty()) { + absl::btree_map solvers_and_count; + for (const auto& name : names) { + solvers_and_count[name]++; + } + std::vector counted_names; + for (const auto& [name, count] : solvers_and_count) { + if (count == 1) { + counted_names.push_back(name); + } else { + counted_names.push_back(absl::StrCat(name, "(", count, ")")); } } - shared_response_manager->AddUnsatCore(core_in_proto_format); + SOLVER_LOG( + logger, names.size(), " ", + absl::StrCat(type_name, names.size() == 1 ? "" : "s"), ": [", + absl::StrJoin(counted_names.begin(), counted_names.end(), ", "), "]"); } + }; + + display_subsolver_list(full_problem_solver_names, "full problem subsolver"); + display_subsolver_list(first_solution_solver_names, + "first solution subsolver"); + display_subsolver_list(incomplete_solver_names, "interleaved subsolver"); + display_subsolver_list(helper_solver_names, "helper subsolver"); + if (!ignored.empty()) { + display_subsolver_list(ignored, "ignored subsolver"); + } + + SOLVER_LOG(logger, ""); +} + +void LogFinalStatistics(SharedClasses* shared) { + if (!shared->logger->LoggingIsEnabled()) return; + + shared->logger->FlushPendingThrottledLogs(/*ignore_rates=*/true); + SOLVER_LOG(shared->logger, ""); + + shared->stat_tables.Display(shared->logger); + shared->response->DisplayImprovementStatistics(); + + std::vector> table; + table.push_back({"Solution repositories", "Added", "Queried", "Synchro"}); + table.push_back(shared->response->SolutionsRepository().TableLineStats()); + if (shared->lp_solutions != nullptr) { + table.push_back(shared->lp_solutions->TableLineStats()); + } + if (shared->incomplete_solutions != nullptr) { + table.push_back(shared->incomplete_solutions->TableLineStats()); + } + SOLVER_LOG(shared->logger, FormatTable(table)); + + if (shared->bounds) { + shared->bounds->LogStatistics(shared->logger); + } + + if (shared->clauses) { + shared->clauses->LogStatistics(shared->logger); + } + + // Extra logging if needed. Note that these are mainly activated on + // --vmodule *some_file*=1 and are here for development. + shared->stats->Log(shared->logger); +} + +void LaunchSubsolvers(const SatParameters& params, SharedClasses* shared, + std::vector>& subsolvers, + absl::Span ignored) { + // Initial logging. + SOLVER_LOG(shared->logger, ""); + if (params.interleave_search()) { + SOLVER_LOG(shared->logger, + absl::StrFormat("Starting deterministic search at %.2fs with " + "%i workers and batch size of %d.", + shared->wall_timer->Get(), params.num_workers(), + params.interleave_batch_size())); } else { - // Optimization problem. - const auto& objective = *model->GetOrCreate(); - const IntegerVariable objective_var = objective.objective_var; - CHECK_NE(objective_var, kNoIntegerVariable); + SOLVER_LOG( + shared->logger, + absl::StrFormat("Starting search at %.2fs with %i workers.", + shared->wall_timer->Get(), params.num_workers())); + } + LogSubsolverNames(subsolvers, ignored, shared->logger); - if (parameters.optimize_with_lb_tree_search()) { - auto* search = model->GetOrCreate(); - status = search->Search(solution_observer); - } else if (parameters.optimize_with_core()) { - // TODO(user): This doesn't work with splitting in chunk for now. It - // shouldn't be too hard to fix. - if (parameters.optimize_with_max_hs()) { - status = model->Mutable()->Optimize(); - } else { - status = model->Mutable()->Optimize(); - } - } else if (parameters.use_shared_tree_search()) { - auto* subtree_worker = model->GetOrCreate(); - status = subtree_worker->Search(solution_observer); - } else { - // TODO(user): This parameter breaks the splitting in chunk of a Solve(). - // It should probably be moved into another SubSolver altogether. - if (parameters.binary_search_num_conflicts() >= 0) { - RestrictObjectiveDomainWithBinarySearch(objective_var, - solution_observer, model); - } - status = MinimizeIntegerVariableWithLinearScanAndLazyEncoding( - objective_var, solution_observer, model); + // Launch the main search loop. + if (params.interleave_search()) { + int batch_size = params.interleave_batch_size(); + if (batch_size == 0) { + batch_size = params.num_workers() == 1 ? 1 : params.num_workers() * 3; + SOLVER_LOG( + shared->logger, + "Setting number of tasks in each batch of interleaved search to ", + batch_size); } - - // The search is done in both case. - // - // TODO(user): Remove the weird translation INFEASIBLE->FEASIBLE in the - // function above? - if (status == SatSolver::INFEASIBLE || status == SatSolver::FEASIBLE) { - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - model->Name()); - } - } -} - -// Try to find a solution by following the hint and using a low conflict limit. -// The CpModelProto must already be loaded in the Model. -void QuickSolveWithHint(const CpModelProto& model_proto, Model* model) { - if (!model_proto.has_solution_hint()) return; - - auto* shared_response_manager = model->GetOrCreate(); - if (shared_response_manager->ProblemIsSolved()) return; - - // Temporarily change the parameters. - auto* parameters = model->GetOrCreate(); - - // If the model was loaded with "optimize_with_core" then the objective - // variable is not linked to its linear expression. Because of that, we can - // return a solution that does not satisfy the objective domain. - // - // TODO(user): This is fixable, but then do we need the hint when optimizing - // with core? - if (parameters->optimize_with_core()) return; - - const SatParameters saved_params = *parameters; - parameters->set_max_number_of_conflicts(parameters->hint_conflict_limit()); - parameters->set_search_branching(SatParameters::HINT_SEARCH); - parameters->set_optimize_with_core(false); - auto cleanup = ::absl::MakeCleanup( - [parameters, saved_params]() { *parameters = saved_params; }); - - // Solve decision problem. - ConfigureSearchHeuristics(model); - const auto& mapping = *model->GetOrCreate(); - const SatSolver::Status status = ResetAndSolveIntegerProblem( - mapping.Literals(model_proto.assumptions()), model); - - const std::string& solution_info = model->Name(); - if (status == SatSolver::Status::FEASIBLE) { - const std::vector solution = - GetSolutionValues(model_proto, *model); - shared_response_manager->NewSolution( - solution, absl::StrCat(solution_info, " [hint]"), model); - - if (!model_proto.has_objective()) { - if (parameters->enumerate_all_solutions()) { - model->Add(ExcludeCurrentSolutionAndBacktrack()); - } - } else { - // Restrict the objective. - const IntegerVariable objective_var = - model->GetOrCreate()->objective_var; - model->GetOrCreate()->Backtrack(0); - IntegerTrail* integer_trail = model->GetOrCreate(); - if (!integer_trail->Enqueue( - IntegerLiteral::LowerOrEqual( - objective_var, - shared_response_manager->GetInnerObjectiveUpperBound()), - {}, {})) { - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - absl::StrCat(solution_info, " [hint]")); - } - } - return; - } - - // This code is here to debug bad presolve during LNS that corrupt the hint. - // Note that sometime the deterministic limit is hit before the hint can be - // completed, so we don't report that has an error. - // - // Tricky: We can only test that if we don't already have a feasible solution - // like we do if the hint is complete. - if (parameters->debug_crash_on_bad_hint() && - shared_response_manager->SolutionsRepository().NumSolutions() == 0 && - !model->GetOrCreate()->LimitReached() && - status != SatSolver::Status::FEASIBLE) { - LOG(FATAL) << "QuickSolveWithHint() didn't find a feasible solution." - << " The model name is '" << model_proto.name() << "'." - << " Status: " << status << "."; - } - - if (status == SatSolver::INFEASIBLE) { - shared_response_manager->NotifyThatImprovingProblemIsInfeasible( - absl::StrCat(solution_info, " [hint]")); - return; - } -} - -// Solve a model with a different objective consisting of minimizing the L1 -// distance with the provided hint. Note that this method creates an in-memory -// copy of the model and loads a local Model object from the copied model. -void MinimizeL1DistanceWithHint(const CpModelProto& model_proto, Model* model) { - Model local_model; - - // Forward some shared class. - local_model.Register( - model->GetOrCreate()); - local_model.Register(model->GetOrCreate()); - - if (!model_proto.has_solution_hint()) return; - - auto* shared_response_manager = model->GetOrCreate(); - if (shared_response_manager->ProblemIsSolved()) return; - - auto* parameters = local_model.GetOrCreate(); - // TODO(user): As of now the repair hint doesn't support when - // enumerate_all_solutions is set since the solution is created on a different - // model. - if (parameters->enumerate_all_solutions()) return; - - // Change the parameters. - const SatParameters saved_params = *model->GetOrCreate(); - *parameters = saved_params; - parameters->set_max_number_of_conflicts(parameters->hint_conflict_limit()); - parameters->set_optimize_with_core(false); - - // Update the model to introduce penalties to go away from hinted values. - CpModelProto updated_model_proto = model_proto; - updated_model_proto.clear_objective(); - - // TODO(user): For boolean variables we can avoid creating new variables. - for (int i = 0; i < model_proto.solution_hint().vars_size(); ++i) { - const int var = model_proto.solution_hint().vars(i); - const int64_t value = model_proto.solution_hint().values(i); - - // Add a new var to represent the difference between var and value. - const int new_var_index = updated_model_proto.variables_size(); - IntegerVariableProto* var_proto = updated_model_proto.add_variables(); - const int64_t min_domain = model_proto.variables(var).domain(0) - value; - const int64_t max_domain = - model_proto.variables(var).domain( - model_proto.variables(var).domain_size() - 1) - - value; - var_proto->add_domain(min_domain); - var_proto->add_domain(max_domain); - - // new_var = var - value. - ConstraintProto* const linear_constraint_proto = - updated_model_proto.add_constraints(); - LinearConstraintProto* linear = linear_constraint_proto->mutable_linear(); - linear->add_vars(new_var_index); - linear->add_coeffs(1); - linear->add_vars(var); - linear->add_coeffs(-1); - linear->add_domain(-value); - linear->add_domain(-value); - - // abs_var = abs(new_var). - const int abs_var_index = updated_model_proto.variables_size(); - IntegerVariableProto* abs_var_proto = updated_model_proto.add_variables(); - const int64_t abs_min_domain = 0; - const int64_t abs_max_domain = - std::max(std::abs(min_domain), std::abs(max_domain)); - abs_var_proto->add_domain(abs_min_domain); - abs_var_proto->add_domain(abs_max_domain); - auto* abs_ct = updated_model_proto.add_constraints()->mutable_lin_max(); - abs_ct->mutable_target()->add_vars(abs_var_index); - abs_ct->mutable_target()->add_coeffs(1); - LinearExpressionProto* left = abs_ct->add_exprs(); - left->add_vars(new_var_index); - left->add_coeffs(1); - LinearExpressionProto* right = abs_ct->add_exprs(); - right->add_vars(new_var_index); - right->add_coeffs(-1); - - updated_model_proto.mutable_objective()->add_vars(abs_var_index); - updated_model_proto.mutable_objective()->add_coeffs(1); - } - - auto* local_response_manager = - local_model.GetOrCreate(); - local_response_manager->InitializeObjective(updated_model_proto); - - // Solve optimization problem. - LoadCpModel(updated_model_proto, &local_model); - - ConfigureSearchHeuristics(&local_model); - const auto& mapping = *local_model.GetOrCreate(); - const SatSolver::Status status = ResetAndSolveIntegerProblem( - mapping.Literals(updated_model_proto.assumptions()), &local_model); - - const std::string& solution_info = model->Name(); - if (status == SatSolver::Status::FEASIBLE) { - const std::vector solution = - GetSolutionValues(model_proto, local_model); - if (DEBUG_MODE) { - const std::vector updated_solution = - GetSolutionValues(updated_model_proto, local_model); - LOG(INFO) << "Found solution with repaired hint penalty = " - << ComputeInnerObjective(updated_model_proto.objective(), - updated_solution); - } - shared_response_manager->NewSolution( - solution, absl::StrCat(solution_info, " [repaired]"), &local_model); - } -} - -// TODO(user): If this ever shows up in the profile, we could avoid copying -// the mapping_proto if we are careful about how we modify the variable domain -// before postsolving it. Note that 'num_variables_in_original_model' refers to -// the model before presolve. -void PostsolveResponseWithFullSolver(int num_variables_in_original_model, - CpModelProto mapping_proto, - const std::vector& postsolve_mapping, - std::vector* solution) { - WallTimer wall_timer; - wall_timer.Start(); - - // Fix the correct variable in the mapping_proto. - for (int i = 0; i < solution->size(); ++i) { - auto* var_proto = mapping_proto.mutable_variables(postsolve_mapping[i]); - var_proto->clear_domain(); - var_proto->add_domain((*solution)[i]); - var_proto->add_domain((*solution)[i]); - } - - // Postosolve parameters. - // TODO(user): this problem is usually trivial, but we may still want to - // impose a time limit or copy some of the parameters passed by the user. - Model postsolve_model; - postsolve_model.Register(&wall_timer); - { - SatParameters& params = *postsolve_model.GetOrCreate(); - params.set_linearization_level(0); - params.set_cp_model_probing_level(0); - } - - auto* response_manager = postsolve_model.GetOrCreate(); - response_manager->InitializeObjective(mapping_proto); - - LoadCpModel(mapping_proto, &postsolve_model); - SolveLoadedCpModel(mapping_proto, &postsolve_model); - const CpSolverResponse postsolve_response = response_manager->GetResponse(); - CHECK(postsolve_response.status() == CpSolverStatus::FEASIBLE || - postsolve_response.status() == CpSolverStatus::OPTIMAL) - << CpSolverResponseStats(postsolve_response); - - // We only copy the solution from the postsolve_response to the response. - CHECK_LE(num_variables_in_original_model, - postsolve_response.solution().size()); - solution->assign( - postsolve_response.solution().begin(), - postsolve_response.solution().begin() + num_variables_in_original_model); -} - -void PostsolveResponseWrapper(const SatParameters& params, - int num_variable_in_original_model, - const CpModelProto& mapping_proto, - const std::vector& postsolve_mapping, - std::vector* solution) { - if (params.debug_postsolve_with_full_solver()) { - PostsolveResponseWithFullSolver(num_variable_in_original_model, - mapping_proto, postsolve_mapping, solution); + DeterministicLoop(subsolvers, params.num_workers(), batch_size, + params.max_num_deterministic_batches()); } else { - PostsolveResponse(num_variable_in_original_model, mapping_proto, - postsolve_mapping, solution); + NonDeterministicLoop(subsolvers, params.num_workers()); } + + // We need to delete the subsolvers in order to fill the stat tables. Note + // that first solution should already be deleted. We delete manually as + // windows release vectors in the opposite order. + for (int i = 0; i < subsolvers.size(); ++i) { + subsolvers[i].reset(); + } + LogFinalStatistics(shared); } -#if !defined(__PORTABLE_PLATFORM__) - -// Small wrapper containing all the shared classes between our subsolver -// threads. Note that all these classes can also be retrieved with something -// like global_model->GetOrCreate() but it is not thread-safe to do so. -// -// All the classes here should be thread-safe, or at least safe in the way they -// are accessed. For instance the model_proto will be kept constant for the -// whole duration of the solve. -struct SharedClasses { - SharedClasses(const CpModelProto* proto, Model* global_model) - : model_proto(proto), - wall_timer(global_model->GetOrCreate()), - time_limit(global_model->GetOrCreate()), - logger(global_model->GetOrCreate()), - stats(global_model->GetOrCreate()), - response(global_model->GetOrCreate()), - shared_tree_manager(global_model->GetOrCreate()) {} - - // These are never nullptr. - const CpModelProto* const model_proto; - WallTimer* const wall_timer; - ModelSharedTimeLimit* const time_limit; - SolverLogger* const logger; - SharedStatistics* const stats; - SharedResponseManager* const response; - SharedTreeManager* const shared_tree_manager; - - // These can be nullptr depending on the options. - std::unique_ptr bounds; - std::unique_ptr lp_solutions; - std::unique_ptr incomplete_solutions; - std::unique_ptr clauses; - - // For displaying summary at the end. - SharedStatTables stat_tables; - - bool SearchIsDone() { - if (response->ProblemIsSolved()) { - // This is for cases where the time limit is checked more often. - time_limit->Stop(); - return true; - } - if (time_limit->LimitReached()) return true; - return false; - } -}; - // Encapsulate a full CP-SAT solve without presolve in the SubSolver API. class FullProblemSolver : public SubSolver { public: FullProblemSolver(absl::string_view name, const SatParameters& local_parameters, bool split_in_chunks, SharedClasses* shared, bool stop_at_first_solution = false) - : SubSolver(stop_at_first_solution ? absl::StrCat("fs_", name) : name, - stop_at_first_solution ? FIRST_SOLUTION : FULL_PROBLEM), + : SubSolver(name, stop_at_first_solution ? FIRST_SOLUTION : FULL_PROBLEM), shared_(shared), split_in_chunks_(split_in_chunks), stop_at_first_solution_(stop_at_first_solution), @@ -2360,6 +800,13 @@ class FullProblemSolver : public SubSolver { // TODO(user): For now we do not count LNS statistics. We could easily // by registering the SharedStatistics class with LNS local model. local_model_.Register(shared_->stats); + + // Setup the local logger, in multi-thread log_search_progress should be + // false by default, but we might turn it on for debugging. It is on by + // default in single-thread mode. + auto* logger = local_model_.GetOrCreate(); + logger->EnableLogging(local_parameters.log_search_progress()); + logger->SetLogToStdOut(local_parameters.log_to_stdout()); } ~FullProblemSolver() override { @@ -2401,7 +848,7 @@ class FullProblemSolver : public SubSolver { } return [this]() { if (solving_first_chunk_) { - LoadCpModel(*shared_->model_proto, &local_model_); + LoadCpModel(shared_->model_proto, &local_model_); // Level zero variable bounds sharing. It is important to register // that after the probing that takes place in LoadCpModel() otherwise @@ -2409,9 +856,9 @@ class FullProblemSolver : public SubSolver { // at the same time. if (shared_->bounds != nullptr) { RegisterVariableBoundsLevelZeroExport( - *shared_->model_proto, shared_->bounds.get(), &local_model_); + shared_->model_proto, shared_->bounds.get(), &local_model_); RegisterVariableBoundsLevelZeroImport( - *shared_->model_proto, shared_->bounds.get(), &local_model_); + shared_->model_proto, shared_->bounds.get(), &local_model_); } // Note that this is done after the loading, so we will never export @@ -2425,12 +872,22 @@ class FullProblemSolver : public SubSolver { RegisterClausesExport(id, shared_->clauses.get(), &local_model_); } + auto* logger = local_model_.GetOrCreate(); + SOLVER_LOG(logger, ""); + SOLVER_LOG(logger, absl::StrFormat( + "Starting subsolver \'%s\' hint search at %.2fs", + name(), shared_->wall_timer->Get())); + if (local_model_.GetOrCreate()->repair_hint()) { - MinimizeL1DistanceWithHint(*shared_->model_proto, &local_model_); + MinimizeL1DistanceWithHint(shared_->model_proto, &local_model_); } else { - QuickSolveWithHint(*shared_->model_proto, &local_model_); + QuickSolveWithHint(shared_->model_proto, &local_model_); } + SOLVER_LOG(logger, + absl::StrFormat("Starting subsolver \'%s\' search at %.2fs", + name(), shared_->wall_timer->Get())); + // No need for mutex since we only run one task at the time. solving_first_chunk_ = false; @@ -2453,7 +910,7 @@ class FullProblemSolver : public SubSolver { } const double saved_dtime = time_limit->GetElapsedDeterministicTime(); - SolveLoadedCpModel(*shared_->model_proto, &local_model_); + SolveLoadedCpModel(shared_->model_proto, &local_model_); absl::MutexLock mutex_lock(&mutex_); previous_task_is_completed_ = true; @@ -2487,200 +944,7 @@ class FullProblemSolver : public SubSolver { bool previous_task_is_completed_ ABSL_GUARDED_BY(mutex_) = true; }; -class ObjectiveShavingSolver : public SubSolver { - public: - ObjectiveShavingSolver(const SatParameters& local_parameters, - NeighborhoodGeneratorHelper* helper, - SharedClasses* shared) - : SubSolver(local_parameters.name(), FULL_PROBLEM), - local_params_(local_parameters), - helper_(helper), - shared_(shared), - local_proto_(*shared->model_proto) {} - - ~ObjectiveShavingSolver() override { - shared_->stat_tables.AddTimingStat(*this); - } - - bool TaskIsAvailable() override { - if (shared_->SearchIsDone()) return false; - - // We only support one task at the time. - absl::MutexLock mutex_lock(&mutex_); - return !task_in_flight_; - } - - std::function GenerateTask(int64_t /*task_id*/) override { - { - absl::MutexLock mutex_lock(&mutex_); - stop_current_chunk_.store(false); - task_in_flight_ = true; - objective_lb_ = shared_->response->GetInnerObjectiveLowerBound(); - } - return [this]() { - if (ResetModel()) { - SolveLoadedCpModel(local_proto_, local_repo_.get()); - const CpSolverResponse local_response = - local_repo_->GetOrCreate()->GetResponse(); - - if (local_response.status() == CpSolverStatus::OPTIMAL || - local_response.status() == CpSolverStatus::FEASIBLE) { - std::vector solution_values( - local_response.solution().begin(), - local_response.solution().end()); - if (local_params_.cp_model_presolve()) { - const int num_original_vars = - shared_->model_proto->variables_size(); - PostsolveResponseWrapper(local_params_, num_original_vars, - mapping_proto_, postsolve_mapping_, - &solution_values); - } - shared_->response->NewSolution(solution_values, Info()); - } else if (local_response.status() == CpSolverStatus::INFEASIBLE) { - absl::MutexLock mutex_lock(&mutex_); - shared_->response->UpdateInnerObjectiveBounds( - Info(), objective_lb_ + 1, kMaxIntegerValue); - } - } - - absl::MutexLock mutex_lock(&mutex_); - task_in_flight_ = false; - if (local_repo_ != nullptr) { - const double dtime = local_repo_->GetOrCreate() - ->GetElapsedDeterministicTime(); - AddTaskDeterministicDuration(dtime); - shared_->time_limit->AdvanceDeterministicTime(dtime); - } - }; - } - - void Synchronize() override { - absl::MutexLock mutex_lock(&mutex_); - if (!task_in_flight_) return; - - // We are just waiting for the inner code to check the time limit or - // to return nicely. - if (stop_current_chunk_) return; - - // TODO(user): Also stop if we have enough newly fixed / improved root level - // bounds so that we think it is worth represolving and restarting. - if (shared_->SearchIsDone()) { - stop_current_chunk_.store(true); - } - if (shared_->response->GetInnerObjectiveLowerBound() > objective_lb_) { - stop_current_chunk_.store(true); - } - } - - private: - std::string Info() { - return absl::StrCat(name(), " (vars=", local_proto_.variables().size(), - " csts=", local_proto_.constraints().size(), ")"); - } - - bool ResetModel() { - local_repo_ = std::make_unique(name()); - *local_repo_->GetOrCreate() = local_params_; - - auto* time_limit = local_repo_->GetOrCreate(); - shared_->time_limit->UpdateLocalLimit(time_limit); - time_limit->RegisterSecondaryExternalBooleanAsLimit(&stop_current_chunk_); - - // We copy the model. - local_proto_ = *shared_->model_proto; - *local_proto_.mutable_variables() = - helper_->FullNeighborhood().delta.variables(); - - // Store the current lb in local variable. - IntegerValue objective_lb; - { - absl::MutexLock mutex_lock(&mutex_); - objective_lb = objective_lb_; - } - - // We replace the objective by a constraint, objective == lb. - // TODO(user): We could use objective <= lb, it might be better or worse - // depending on the model. It is also a bit tricker to make sure a feasible - // solution is feasible. - // We modify local_proto_ to a pure feasibility problem. - // Not having the objective open up more presolve reduction. - if (local_proto_.objective().vars().size() == 1 && - local_proto_.objective().coeffs(0) == 1) { - auto* obj_var = - local_proto_.mutable_variables(local_proto_.objective().vars(0)); - obj_var->clear_domain(); - obj_var->add_domain(objective_lb.value()); - obj_var->add_domain(objective_lb.value()); - } else { - auto* obj = local_proto_.add_constraints()->mutable_linear(); - *obj->mutable_vars() = local_proto_.objective().vars(); - *obj->mutable_coeffs() = local_proto_.objective().coeffs(); - obj->add_domain(objective_lb.value()); - obj->add_domain(objective_lb.value()); - } - - // Clear the objective. - local_proto_.clear_objective(); - - // Dump? - if (absl::GetFlag(FLAGS_cp_model_dump_submodels)) { - const std::string name = - absl::StrCat(absl::GetFlag(FLAGS_cp_model_dump_prefix), - "objective_shaving_", objective_lb.value(), ".pb.txt"); - LOG(INFO) << "Dumping objective shaving model to '" << name << "'."; - CHECK(WriteModelProtoToFile(local_proto_, name)); - } - - // Presolve if asked. - if (local_params_.cp_model_presolve()) { - mapping_proto_.Clear(); - postsolve_mapping_.clear(); - auto context = std::make_unique( - local_repo_.get(), &local_proto_, &mapping_proto_); - const CpSolverStatus presolve_status = - PresolveCpModel(context.get(), &postsolve_mapping_); - if (presolve_status == CpSolverStatus::INFEASIBLE) { - absl::MutexLock mutex_lock(&mutex_); - shared_->response->UpdateInnerObjectiveBounds(Info(), objective_lb_ + 1, - kMaxIntegerValue); - return false; - } - } - - // Tricky: If we aborted during the presolve above, some constraints might - // be in a non-canonical form (like having duplicates, etc...) and it seem - // not all our propagator code deal with that properly. So it is important - // to abort right away here. - // - // We had a bug when the LoadCpModel() below was returning infeasible on - // such non fully-presolved model. - if (local_repo_->GetOrCreate()->LimitReached()) return false; - - LoadCpModel(local_proto_, local_repo_.get()); - return true; - } - - // This is fixed at construction. - SatParameters local_params_; - NeighborhoodGeneratorHelper* helper_; - SharedClasses* shared_; - - // Allow to control the local time limit in addition to a potential user - // defined external Boolean. - std::atomic stop_current_chunk_; - - // Local singleton repository and presolved local model. - std::unique_ptr local_repo_; - CpModelProto local_proto_; - - // For postsolving a feasible solution or improving objective lb. - std::vector postsolve_mapping_; - CpModelProto mapping_proto_; - - absl::Mutex mutex_; - IntegerValue objective_lb_ ABSL_GUARDED_BY(mutex_); - bool task_in_flight_ ABSL_GUARDED_BY(mutex_) = false; -}; +#if !defined(__PORTABLE_PLATFORM__) class FeasibilityPumpSolver : public SubSolver { public: @@ -2711,7 +975,7 @@ class FeasibilityPumpSolver : public SubSolver { // Level zero variable bounds sharing. if (shared_->bounds != nullptr) { RegisterVariableBoundsLevelZeroImport( - *shared_->model_proto, shared_->bounds.get(), local_model_.get()); + shared_->model_proto, shared_->bounds.get(), local_model_.get()); } } @@ -2734,7 +998,7 @@ class FeasibilityPumpSolver : public SubSolver { { absl::MutexLock mutex_lock(&mutex_); if (solving_first_chunk_) { - LoadFeasibilityPump(*shared_->model_proto, local_model_.get()); + LoadFeasibilityPump(shared_->model_proto, local_model_.get()); // No new task will be scheduled for this worker if there is no // linear relaxation. if (local_model_->Get() == nullptr) return; @@ -3050,7 +1314,7 @@ class LnsSolver : public SubSolver { // TODO(user): In a production environment, we should probably just // ignore this fragment and continue. const bool feasible = - SolutionIsFeasible(*shared_->model_proto, solution_values); + SolutionIsFeasible(shared_->model_proto, solution_values); if (!feasible) { if (absl::GetFlag(FLAGS_cp_model_dump_problematic_lns)) { const std::string name = @@ -3084,7 +1348,7 @@ class LnsSolver : public SubSolver { // // TODO(user): We could however fix it in the LNS Helper! if (data.status == CpSolverStatus::OPTIMAL && - !shared_->model_proto->has_symmetry() && !solution_values.empty() && + !shared_->model_proto.has_symmetry() && !solution_values.empty() && neighborhood.is_simple && !neighborhood.variables_that_can_be_fixed_to_local_optimum .empty()) { @@ -3099,7 +1363,7 @@ class LnsSolver : public SubSolver { if (data.status == CpSolverStatus::OPTIMAL || data.status == CpSolverStatus::FEASIBLE) { data.new_objective = IntegerValue(ComputeInnerObjective( - shared_->model_proto->objective(), solution_values)); + shared_->model_proto.objective(), solution_values)); } // Report any feasible solution we have. Optimization: We don't do that @@ -3125,16 +1389,16 @@ class LnsSolver : public SubSolver { generator_->AddSolveData(data); - if (VLOG_IS_ON(1) && display_lns_info) { + if (VLOG_IS_ON(2) && display_lns_info) { std::string s = absl::StrCat(" LNS ", name(), ":"); if (new_solution) { const double base_obj = ScaleObjectiveValue( - shared_->model_proto->objective(), - ComputeInnerObjective(shared_->model_proto->objective(), + shared_->model_proto.objective(), + ComputeInnerObjective(shared_->model_proto.objective(), base_response.solution())); const double new_obj = ScaleObjectiveValue( - shared_->model_proto->objective(), - ComputeInnerObjective(shared_->model_proto->objective(), + shared_->model_proto.objective(), + ComputeInnerObjective(shared_->model_proto.objective(), solution_values)); absl::StrAppend(&s, " [new_sol:", base_obj, " -> ", new_obj, "]"); } @@ -3169,309 +1433,183 @@ class LnsSolver : public SubSolver { SharedClasses* shared_; }; -void SolveCpModelParallel(const CpModelProto& model_proto, - Model* global_model) { +void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { const SatParameters& params = *global_model->GetOrCreate(); CHECK(!params.enumerate_all_solutions()) << "Enumerating all solutions in parallel is not supported."; if (global_model->GetOrCreate()->LimitReached()) return; - SharedClasses shared(&model_proto, global_model); - - if (params.share_level_zero_bounds()) { - shared.bounds = std::make_unique(model_proto); - shared.bounds->set_dump_prefix(absl::GetFlag(FLAGS_cp_model_dump_prefix)); - shared.bounds->LoadDebugSolution( - global_model->GetOrCreate()->DebugSolution()); - } - - shared.lp_solutions = std::make_unique( - /*num_solutions_to_keep=*/10); - global_model->Register(shared.lp_solutions.get()); - - const bool testing = params.use_lns_only() || params.test_feasibility_jump(); - - // We currently only use the feasibility pump or rins/rens if it is enabled - // and some other parameters are not on. - // - // TODO(user): for now this is not deterministic so we disable it on - // interleave search. Fix. - const bool use_rins_rens = params.use_lns() && params.use_rins_lns() && - !testing && !params.interleave_search(); - const bool use_feasibility_pump = params.use_lns() && - params.use_feasibility_pump() && - params.linearization_level() > 0 && - !testing && !params.interleave_search(); - if (use_feasibility_pump || use_rins_rens) { - shared.incomplete_solutions = - std::make_unique(); - global_model->Register( - shared.incomplete_solutions.get()); - } - - // Set up synchronization mode in parallel. - const bool always_synchronize = - !params.interleave_search() || params.num_workers() <= 1; - shared.response->SetSynchronizationMode(always_synchronize); - - if (params.share_binary_clauses()) { - shared.clauses = std::make_unique(always_synchronize); - } + // If specified by the user, we might disable some parameters based on their + // name. + SubsolverNameFilter name_filter(params); // The list of all the SubSolver that will be used in this parallel search. + // These will be synchronized in order. Note that we will assemble this at + // the end from the other list below. std::vector> subsolvers; - std::vector> incomplete_subsolvers; + + // We distinguish subsolver depending on their behavior: + // - 'full' if a full thread is needed and they are not interleaved. + // - 'first_solution' if they will be destroyed as soon as we have a solution. + // - 'interleaved' if the work is cut into small chunk so that a few threads + // can work on many of such subsolvers alternatively. + // - 'reentrant' if one subsolver can generate many such task. + // + // TODO(user): Maybe we should just interleave everything for an easier + // configuration. + std::vector> full_worker_subsolvers; + std::vector> first_solution_full_subsolvers; + std::vector> reentrant_interleaved_subsolvers; + std::vector> interleaved_subsolvers; // Add a synchronization point for the shared classes. subsolvers.push_back(std::make_unique( - "synchronization_agent", [&shared]() { - shared.response->Synchronize(); - shared.response->MutableSolutionsRepository()->Synchronize(); - if (shared.bounds != nullptr) { - shared.bounds->Synchronize(); + "synchronization_agent", [shared]() { + shared->response->Synchronize(); + shared->response->MutableSolutionsRepository()->Synchronize(); + if (shared->bounds != nullptr) { + shared->bounds->Synchronize(); } - if (shared.lp_solutions != nullptr) { - shared.lp_solutions->Synchronize(); + if (shared->lp_solutions != nullptr) { + shared->lp_solutions->Synchronize(); } - if (shared.clauses != nullptr) { - shared.clauses->Synchronize(); + if (shared->clauses != nullptr) { + shared->clauses->Synchronize(); } })); + const auto name_to_params = GetNamedParameters(params); + const SatParameters& lns_params = name_to_params.at("lns"); + // Add the NeighborhoodGeneratorHelper as a special subsolver so that its // Synchronize() is called before any LNS neighborhood solvers. auto unique_helper = std::make_unique( - &model_proto, ¶ms, shared.response, shared.bounds.get()); + &shared->model_proto, ¶ms, shared->response, shared->bounds.get()); NeighborhoodGeneratorHelper* helper = unique_helper.get(); subsolvers.push_back(std::move(unique_helper)); - int num_full_problem_solvers = 0; - if (testing) { - // Register something to find a first solution. Note that this is mainly - // used for experimentation, and using no_lp usually result in a faster - // first solution. - // - // TODO(user): merge code with standard solver. Just make sure that all - // full solvers die after the first solution has been found. - if (!params.test_feasibility_jump()) { - SatParameters local_params = params; - local_params.set_stop_after_first_solution(true); - local_params.set_linearization_level(0); - subsolvers.push_back(std::make_unique( - "first_solution", local_params, - /*split_in_chunks=*/false, &shared)); - } - } else { - for (const SatParameters& local_params : GetWorkSharingParams( - params, model_proto, params.shared_tree_num_workers())) { - subsolvers.push_back(std::make_unique( + // Add shared tree workers if asked. + if (params.shared_tree_num_workers() > 0 && + shared->model_proto.assumptions().empty()) { + for (const SatParameters& local_params : RepeatParameters( + name_filter.Filter({name_to_params.at("shared_tree")}), + params.shared_tree_num_workers())) { + full_worker_subsolvers.push_back(std::make_unique( local_params.name(), local_params, - /*split_in_chunks=*/params.interleave_search(), &shared)); - num_full_problem_solvers++; + /*split_in_chunks=*/params.interleave_search(), shared)); } - for (const SatParameters& local_params : - GetDiverseSetOfParameters(params, model_proto)) { - // TODO(user): This is currently not supported here. - if (params.optimize_with_max_hs()) continue; - ++num_full_problem_solvers; + } - if (local_params.use_objective_shaving_search()) { - subsolvers.push_back(std::make_unique( - local_params, helper, &shared)); - continue; - } + // Add full problem solvers. + for (const SatParameters& local_params : GetFullWorkerParameters( + params, shared->model_proto, + /*num_already_present=*/full_worker_subsolvers.size(), + &name_filter)) { + if (!name_filter.Keep(local_params.name())) continue; - subsolvers.push_back(std::make_unique( - local_params.name(), local_params, - /*split_in_chunks=*/params.interleave_search(), &shared)); + // TODO(user): This is currently not supported here. + if (params.optimize_with_max_hs()) continue; + + // TODO(user): these should probably be interleaved_subsolvers. + if (local_params.use_objective_shaving_search()) { + full_worker_subsolvers.push_back(std::make_unique( + local_params, helper, shared)); + continue; } + + // TODO(user): these should probably be interleaved_subsolvers. + if (local_params.use_variables_shaving_search()) { + full_worker_subsolvers.push_back( + std::make_unique(local_params, shared)); + continue; + } + + full_worker_subsolvers.push_back(std::make_unique( + local_params.name(), local_params, + /*split_in_chunks=*/params.interleave_search(), shared)); } // Add FeasibilityPumpSolver if enabled. - if (use_feasibility_pump) { - incomplete_subsolvers.push_back( - std::make_unique(params, &shared)); + int num_interleaved_subsolver_that_do_not_need_solution = 0; + if (params.use_feasibility_pump() && name_filter.Keep("feasibility_pump")) { + ++num_interleaved_subsolver_that_do_not_need_solution; + interleaved_subsolvers.push_back( + std::make_unique(params, shared)); } - const SatParameters lns_params = GetNamedParameters(params).at("lns"); - - if (use_rins_rens) { + // Add rins/rens. + // This behave like a LNS, it just construct starting solution differently. + if (params.use_rins_lns() && name_filter.Keep("rins/rens")) { // Note that we always create the SharedLPSolutionRepository. This meets // the requirement of having a SharedLPSolutionRepository to // create RINS/RENS lns generators. // TODO(user): Do we create a variable number of these workers. - incomplete_subsolvers.push_back(std::make_unique( + ++num_interleaved_subsolver_that_do_not_need_solution; + reentrant_interleaved_subsolvers.push_back(std::make_unique( std::make_unique( - helper, shared.response, shared.lp_solutions.get(), - shared.incomplete_solutions.get(), "rins/rens"), - lns_params, helper, &shared)); - } - const int num_incomplete_solvers = - params.num_workers() - num_full_problem_solvers; - const LinearModel* linear_model = global_model->Get(); - if (linear_model != nullptr && !params.interleave_search() && - model_proto.has_objective()) { - int num_violation_ls = params.has_num_violation_ls() - ? params.num_violation_ls() - : num_incomplete_solvers / 8 + 1; - if (params.test_feasibility_jump()) { - num_violation_ls = params.num_workers(); - } - for (int i = 0; i < num_violation_ls; ++i) { - SatParameters local_params = params; - local_params.set_random_seed(ValidSumSeed(params.random_seed(), i)); - incomplete_subsolvers.push_back(std::make_unique( - "violation_ls", SubSolver::INCOMPLETE, linear_model, local_params, - shared.time_limit, shared.response, shared.bounds.get(), shared.stats, - &shared.stat_tables)); - } + helper, shared->response, shared->lp_solutions.get(), + shared->incomplete_solutions.get(), name_filter.LastName()), + lns_params, helper, shared)); } - // Adds first solution subsolvers. + // Add incomplete subsolvers that require an objective. // - // The logic is the following. Before the first solution is found, we have (in - // order): - // - num_full_problem_solvers full problem solvers - // - num_workers - num_full_problem_solvers - - // num_dedicated_incomplete_solvers first solution solvers. - // - num_workers - num_full_problem_solvers incomplete solvers. Only - // num_dedicated_incomplete_solvers are active before the first solution - // is found. - // - // After the first solution is found, all first solution solvers die, the we - // have num_full_problem_solvers null problem solvers, and the rest are - // incomplete solvers. - // - // TODO(user): Check with interleave_search. - if (!model_proto.has_objective() || model_proto.objective().vars().empty() || - !params.interleave_search() || params.test_feasibility_jump()) { - const int max_num_incomplete_solvers_running_before_the_first_solution = - params.num_workers() <= 16 ? 1 : 2; - const int num_reserved_incomplete_solvers = - params.test_feasibility_jump() - ? 0 - : std::min( - max_num_incomplete_solvers_running_before_the_first_solution, - static_cast(incomplete_subsolvers.size())); - const int num_available = params.num_workers() - num_full_problem_solvers - - num_reserved_incomplete_solvers; - - // TODO(user): FeasibilityJumpSolver are split in chunk as so we could - // schedule more than the available number of threads. They will just be - // interleaved. We will get an higher diversity, but use more memory. - const int num_feasibility_jump = - (params.interleave_search() || !params.use_feasibility_jump() || - linear_model == nullptr) - ? 0 - : (params.test_feasibility_jump() ? num_available - : (num_available + 1) / 2); - const int num_first_solution_subsolvers = - num_available - num_feasibility_jump; - - // TODO(user): Limit number of options by improving restart - // heuristic and randomizing other option at each restart? - for (int i = 0; i < num_feasibility_jump; ++i) { - // We alternate with a bunch of heuristic. - SatParameters local_params = params; - local_params.set_random_seed(ValidSumSeed(params.random_seed(), i)); - std::string name = "fj"; - - // Long restart or quick restart. - if (i % 2 == 0) { - absl::StrAppend(&name, "_short"); - local_params.set_feasibility_jump_restart_factor(1); - } else { - absl::StrAppend(&name, "_long"); - local_params.set_feasibility_jump_restart_factor(100); - } - - // Linear or not. - if (i / 2 % 2 == 0) { - local_params.set_feasibility_jump_linearization_level(0); - } else { - absl::StrAppend(&name, "_lin"); - local_params.set_feasibility_jump_linearization_level(2); - } - - // Default restart, random restart, or perturb restart. - if (i / 4 % 3 == 0) { - absl::StrAppend(&name, "_default"); - local_params.set_feasibility_jump_enable_restarts(true); - local_params.set_feasibility_jump_var_randomization_probability(0.0); - } else if (i / 4 % 3 == 1) { - absl::StrAppend(&name, "_random"); - local_params.set_feasibility_jump_enable_restarts(true); - local_params.set_feasibility_jump_var_randomization_probability(0.05); - } else { - absl::StrAppend(&name, "_perturb"); - local_params.set_feasibility_jump_enable_restarts(false); - local_params.set_feasibility_jump_var_randomization_probability(0.05); - } - - incomplete_subsolvers.push_back(std::make_unique( - name, SubSolver::FIRST_SOLUTION, linear_model, local_params, - shared.time_limit, shared.response, shared.bounds.get(), shared.stats, - &shared.stat_tables)); - } - for (const SatParameters& local_params : GetFirstSolutionParams( - params, model_proto, num_first_solution_subsolvers)) { - subsolvers.push_back(std::make_unique( - local_params.name(), local_params, - /*split_in_chunks=*/params.interleave_search(), &shared, - /*stop_on_first_solution=*/true)); - } - } - - // Now that first solutions solvers are in place, we can move the - // incomplete_subsolvers into subsolvers. - for (int i = 0; i < incomplete_subsolvers.size(); ++i) { - subsolvers.push_back(std::move(incomplete_subsolvers[i])); - } - incomplete_subsolvers.clear(); - - // Add incomplete subsolvers that require an objective. - if (params.use_lns() && model_proto.has_objective() && - !model_proto.objective().vars().empty() && - !params.test_feasibility_jump()) { + // They are all re-entrant, so we do not need to specify more than the number + // of workers. And they will all be interleaved, so it is okay to have many + // even if we have a single thread for interleaved workers. + if (params.use_lns() && shared->model_proto.has_objective() && + !shared->model_proto.objective().vars().empty()) { // Enqueue all the possible LNS neighborhood subsolvers. // Each will have their own metrics. - subsolvers.push_back(std::make_unique( - std::make_unique(helper, "rnd_var_lns"), - lns_params, helper, &shared)); - subsolvers.push_back(std::make_unique( - std::make_unique(helper, - "rnd_cst_lns"), - lns_params, helper, &shared)); - subsolvers.push_back(std::make_unique( - std::make_unique(helper, - "graph_var_lns"), - lns_params, helper, &shared)); - subsolvers.push_back(std::make_unique( - std::make_unique(helper, - "graph_arc_lns"), - lns_params, helper, &shared)); - subsolvers.push_back(std::make_unique( - std::make_unique(helper, - "graph_cst_lns"), - lns_params, helper, &shared)); - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "graph_dec_lns"), - lns_params, helper, &shared)); - - if (params.use_lb_relax_lns()) { - subsolvers.push_back(std::make_unique( + if (name_filter.Keep("rnd_var_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (name_filter.Keep("rnd_cst_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (name_filter.Keep("graph_var_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (name_filter.Keep("graph_arc_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (name_filter.Keep("graph_cst_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (name_filter.Keep("graph_dec_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (params.use_lb_relax_lns() && name_filter.Keep("lb_relax_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( std::make_unique( - helper, "lb_relax_lns", + helper, name_filter.LastName(), [](const CpModelProto cp_model, Model* model) { model->GetOrCreate() ->InitializeObjective(cp_model); LoadCpModel(cp_model, model); SolveLoadedCpModel(cp_model, model); }, - shared.time_limit), - lns_params, helper, &shared)); + shared->time_limit), + lns_params, helper, shared)); } const bool has_no_overlap_or_cumulative = @@ -3480,22 +1618,26 @@ void SolveCpModelParallel(const CpModelProto& model_proto, // Scheduling (no_overlap and cumulative) specific LNS. if (has_no_overlap_or_cumulative) { - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "scheduling_intervals_lns"), - lns_params, helper, &shared)); - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "scheduling_time_window_lns"), - lns_params, helper, &shared)); + if (name_filter.Keep("scheduling_intervals_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (name_filter.Keep("scheduling_time_window_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } const std::vector> intervals_in_constraints = helper->GetUniqueIntervalSets(); - if (intervals_in_constraints.size() > 2) { - subsolvers.push_back(std::make_unique( + if (intervals_in_constraints.size() > 2 && + name_filter.Keep("scheduling_resource_windows_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( std::make_unique( - helper, intervals_in_constraints, - "scheduling_resource_windows_lns"), - lns_params, helper, &shared)); + helper, intervals_in_constraints, name_filter.LastName()), + lns_params, helper, shared)); } } @@ -3503,26 +1645,34 @@ void SolveCpModelParallel(const CpModelProto& model_proto, const bool has_no_overlap2d = !helper->TypeToConstraints(ConstraintProto::kNoOverlap2D).empty(); if (has_no_overlap2d) { - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "packing_rectangles_lns"), - lns_params, helper, &shared)); - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "packing_precedences_lns"), - lns_params, helper, &shared)); - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "packing_slice_lns"), - lns_params, helper, &shared)); + if (name_filter.Keep("packing_rectangles_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (name_filter.Keep("packing_precedences_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (name_filter.Keep("packing_slice_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } } // Generic scheduling/packing LNS. if (has_no_overlap_or_cumulative || has_no_overlap2d) { - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "scheduling_precedences_lns"), - lns_params, helper, &shared)); + if (name_filter.Keep("scheduling_precedences_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } } const int num_circuit = static_cast( @@ -3530,156 +1680,193 @@ void SolveCpModelParallel(const CpModelProto& model_proto, const int num_routes = static_cast( helper->TypeToConstraints(ConstraintProto::kRoutes).size()); if (num_circuit + num_routes > 0) { - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "routing_random_lns"), - lns_params, helper, &shared)); - - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "routing_path_lns"), - lns_params, helper, &shared)); + if (name_filter.Keep("routing_random_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } + if (name_filter.Keep("routing_path_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } } if (num_routes > 0 || num_circuit > 1) { - subsolvers.push_back(std::make_unique( - std::make_unique( - helper, "routing_full_path_lns"), - lns_params, helper, &shared)); + if (name_filter.Keep("routing_full_path_lns")) { + reentrant_interleaved_subsolvers.push_back(std::make_unique( + std::make_unique( + helper, name_filter.LastName()), + lns_params, helper, shared)); + } } } + // Used by LS and feasibility jump. + // This will automatically be created (only once) if needed. + const auto get_linear_model = [&]() { + auto* candidate = global_model->Get(); + if (candidate != nullptr) return candidate; + + // Create it and transfer ownership. + LinearModel* linear_model = new LinearModel(shared->model_proto); + global_model->TakeOwnership(linear_model); + global_model->Register(linear_model); + return global_model->Get(); + }; + + // Add violation LS workers. + // + // Compared to LNS, these are not re-entrant, so we need to schedule the + // correct number for parallelism. + if (shared->model_proto.has_objective()) { + // If not forced by the parameters, we want one LS every two threads that + // work on interleaved stuff. Note that by default they are many LNS, so + // that shouldn't be too many. + const int num_thread_for_interleaved_workers = + params.num_workers() - full_worker_subsolvers.size(); + int num_violation_ls = params.has_num_violation_ls() + ? params.num_violation_ls() + : (num_thread_for_interleaved_workers + 1) / 2; + + // If there is no rentrant solver, maybe increase the number to reach max + // parallelism. + if (reentrant_interleaved_subsolvers.empty()) { + num_violation_ls = + std::max(num_violation_ls, + num_thread_for_interleaved_workers - + static_cast(interleaved_subsolvers.size())); + } + + const absl::string_view ls_name = "ls"; + const absl::string_view lin_ls_name = "ls_lin"; + + const int num_ls_lin = + name_filter.Keep(lin_ls_name) ? num_violation_ls / 3 : 0; + const int num_ls_default = + name_filter.Keep(ls_name) ? num_violation_ls - num_ls_lin : 0; + + if (num_ls_default > 0) { + std::shared_ptr states = std::make_shared( + ls_name, params, &shared->stat_tables); + for (int i = 0; i < num_ls_default; ++i) { + SatParameters local_params = params; + local_params.set_random_seed( + CombineSeed(params.random_seed(), interleaved_subsolvers.size())); + local_params.set_feasibility_jump_linearization_level(0); + interleaved_subsolvers.push_back( + std::make_unique( + ls_name, SubSolver::INCOMPLETE, get_linear_model(), + local_params, states, shared->time_limit, shared->response, + shared->bounds.get(), shared->stats, &shared->stat_tables)); + } + } + + if (num_ls_lin > 0) { + std::shared_ptr lin_states = + std::make_shared(lin_ls_name, params, + &shared->stat_tables); + for (int i = 0; i < num_ls_lin; ++i) { + SatParameters local_params = params; + local_params.set_random_seed( + CombineSeed(params.random_seed(), interleaved_subsolvers.size())); + local_params.set_feasibility_jump_linearization_level(2); + interleaved_subsolvers.push_back( + std::make_unique( + lin_ls_name, SubSolver::INCOMPLETE, get_linear_model(), + local_params, lin_states, shared->time_limit, shared->response, + shared->bounds.get(), shared->stats, &shared->stat_tables)); + } + } + } + + // Adds first solution subsolvers. + // We have two kind, either full_worker_subsolvers or feasibility jump ones. + // + // These will be stopped and deleted as soon as the first solution is found, + // leaving the resource for the other subsolvers (if we have an objective). + { + int num_thread_available = + params.num_workers() - static_cast(full_worker_subsolvers.size()); + + // We reserve 1 thread for all interleaved subsolved that can work without + // a first solution. If we have feasibility jump, because these will be + // interleaved, we don't do that. + if (!params.use_feasibility_jump() && + num_interleaved_subsolver_that_do_not_need_solution > 0) { + --num_thread_available; + } + num_thread_available = std::max(num_thread_available, 0); + + const std::vector all_params = + RepeatParameters(name_filter.Filter(GetFirstSolutionBaseParams(params)), + num_thread_available); + + std::shared_ptr fj_states; + std::shared_ptr fj_lin_states; + + // Build the requested subsolvers. + for (const SatParameters& local_params : all_params) { + if (local_params.use_feasibility_jump()) { + // Create the SharedLsStates if not already done. + std::shared_ptr states; + if (local_params.feasibility_jump_linearization_level() == 0) { + if (fj_states == nullptr) { + fj_states = std::make_shared( + local_params.name(), params, &shared->stat_tables); + } + states = fj_states; + } else { + if (fj_lin_states == nullptr) { + fj_lin_states = std::make_shared( + local_params.name(), params, &shared->stat_tables); + } + states = fj_lin_states; + } + + interleaved_subsolvers.push_back( + std::make_unique( + local_params.name(), SubSolver::FIRST_SOLUTION, + get_linear_model(), local_params, states, shared->time_limit, + shared->response, shared->bounds.get(), shared->stats, + &shared->stat_tables)); + } else { + first_solution_full_subsolvers.push_back( + std::make_unique( + local_params.name(), local_params, + /*split_in_chunks=*/local_params.interleave_search(), shared, + /*stop_on_first_solution=*/true)); + } + } + } + + // Now that we are done with the logic, move all subsolver into a single + // list. Note that the position of the "synchronization" subsolver matter. + // Some are already in subsolvers, and we will add the gap one last. + const auto move_all = + [&subsolvers](std::vector>& from) { + for (int i = 0; i < from.size(); ++i) { + subsolvers.push_back(std::move(from[i])); + } + from.clear(); + }; + move_all(full_worker_subsolvers); + move_all(first_solution_full_subsolvers); + move_all(reentrant_interleaved_subsolvers); + move_all(interleaved_subsolvers); + // Add a synchronization point for the gap integral that is executed last. // This way, after each batch, the proper deterministic time is updated and // then the function to integrate take the value of the new gap. - if (model_proto.has_objective() && !model_proto.objective().vars().empty()) { + if (shared->model_proto.has_objective() && + !shared->model_proto.objective().vars().empty()) { subsolvers.push_back(std::make_unique( "update_gap_integral", - [&shared]() { shared.response->UpdateGapIntegral(); })); + [shared]() { shared->response->UpdateGapIntegral(); })); } - // Log the name of all our SubSolvers. - auto* logger = global_model->GetOrCreate(); - if (logger->LoggingIsEnabled()) { - // Collect subsolver names per type (full, lns, 1st solution). - std::vector full_problem_solver_names; - std::vector incomplete_solver_names; - std::vector first_solution_solver_names; - std::vector helper_solver_names; - for (int i = 0; i < subsolvers.size(); ++i) { - const auto& subsolver = subsolvers[i]; - switch (subsolver->type()) { - case SubSolver::FULL_PROBLEM: - full_problem_solver_names.push_back(subsolver->name()); - break; - case SubSolver::INCOMPLETE: - incomplete_solver_names.push_back(subsolver->name()); - break; - case SubSolver::FIRST_SOLUTION: - first_solution_solver_names.push_back(subsolver->name()); - break; - case SubSolver::HELPER: - helper_solver_names.push_back(subsolver->name()); - break; - } - } - SOLVER_LOG(logger, ""); - - if (params.interleave_search()) { - SOLVER_LOG(logger, - absl::StrFormat("Starting deterministic search at %.2fs with " - "%i workers and batch size of %d.", - shared.wall_timer->Get(), params.num_workers(), - params.interleave_batch_size())); - } else { - SOLVER_LOG(logger, absl::StrFormat( - "Starting search at %.2fs with %i workers.", - shared.wall_timer->Get(), params.num_workers())); - } - - // TODO(user): We might not want to sort the subsolver by name to keep our - // ordered list by importance? not sure. - auto display_subsolver_list = [logger]( - const std::vector& names, - const absl::string_view type_name) { - if (!names.empty()) { - absl::btree_map solvers_and_count; - for (const auto& name : names) { - solvers_and_count[name]++; - } - std::vector counted_names; - for (const auto& [name, count] : solvers_and_count) { - if (count == 1) { - counted_names.push_back(name); - } else { - counted_names.push_back(absl::StrCat(name, "(", count, ")")); - } - } - SOLVER_LOG( - logger, names.size(), " ", - absl::StrCat(type_name, names.size() == 1 ? "" : "s"), ": [", - absl::StrJoin(counted_names.begin(), counted_names.end(), ", "), - "]"); - } - }; - - display_subsolver_list(full_problem_solver_names, "full problem subsolver"); - display_subsolver_list(first_solution_solver_names, - "first solution subsolver"); - display_subsolver_list(incomplete_solver_names, "incomplete subsolver"); - display_subsolver_list(helper_solver_names, "helper subsolver"); - } - - // Launch the main search loop. - if (params.interleave_search()) { - int batch_size = params.interleave_batch_size(); - if (batch_size == 0) { - batch_size = params.num_workers() == 1 ? 1 : params.num_workers() * 3; - SOLVER_LOG( - logger, - "Setting number of tasks in each batch of interleaved search to ", - batch_size); - } - DeterministicLoop(subsolvers, params.num_workers(), batch_size); - } else { - NonDeterministicLoop(subsolvers, params.num_workers()); - } - - // We need to delete the other subsolver in order to fill the stat tables. - // Note that first solution should already be deleted. - // We delete manually as windows release vectors in the opposite order. - for (int i = 0; i < subsolvers.size(); ++i) { - subsolvers[i].reset(); - } - - // Log statistics. - if (logger->LoggingIsEnabled()) { - logger->FlushPendingThrottledLogs(/*ignore_rates=*/true); - SOLVER_LOG(logger, ""); - - shared.stat_tables.Display(logger); - - shared.response->DisplayImprovementStatistics(); - - std::vector> table; - table.push_back( - {"Solution repositories", "Added", "Queried", "Ignored", "Synchro"}); - table.push_back(shared.response->SolutionsRepository().TableLineStats()); - if (shared.lp_solutions != nullptr) { - table.push_back(shared.lp_solutions->TableLineStats()); - } - if (shared.incomplete_solutions != nullptr) { - table.push_back(shared.incomplete_solutions->TableLineStats()); - } - SOLVER_LOG(logger, FormatTable(table)); - - if (shared.bounds) { - shared.bounds->LogStatistics(logger); - } - - if (shared.clauses) { - shared.clauses->LogStatistics(logger); - } - } + LaunchSubsolvers(params, shared, subsolvers, name_filter.AllIgnored()); } #endif // __PORTABLE_PLATFORM__ @@ -3703,26 +1890,61 @@ void AddPostsolveClauses(const std::vector& postsolve_mapping, postsolve->clauses.clear(); } +bool VarIsFixed(const CpModelProto& model_proto, int i) { + return model_proto.variables(i).domain_size() == 2 && + model_proto.variables(i).domain(0) == + model_proto.variables(i).domain(1); +} + void TestSolutionHintForFeasibility(const CpModelProto& model_proto, SolverLogger* logger, SharedResponseManager* manager = nullptr) { if (!model_proto.has_solution_hint()) return; - // TODO(user): If the hint specifies all non-fixed variables we could also - // do the check. - if (model_proto.solution_hint().vars_size() != model_proto.variables_size()) { - SOLVER_LOG(logger, "The solution hint is incomplete: ", - model_proto.solution_hint().vars_size(), " out of ", - model_proto.variables_size(), " variables hinted."); + int num_active_variables = 0; + int num_hinted_variables = 0; + for (int var = 0; var < model_proto.variables_size(); ++var) { + if (VarIsFixed(model_proto, var)) continue; + ++num_active_variables; + } + + for (int i = 0; i < model_proto.solution_hint().vars_size(); ++i) { + const int ref = model_proto.solution_hint().vars(i); + if (VarIsFixed(model_proto, PositiveRef(ref))) continue; + ++num_hinted_variables; + } + CHECK_LE(num_hinted_variables, num_active_variables); + + if (num_active_variables != num_hinted_variables) { + SOLVER_LOG( + logger, "The solution hint is incomplete: ", num_hinted_variables, + " out of ", num_active_variables, " non fixed variables hinted."); return; } std::vector solution(model_proto.variables_size(), 0); + // Pre-assign from fixed domains. + for (int var = 0; var < model_proto.variables_size(); ++var) { + if (VarIsFixed(model_proto, var)) { + solution[var] = model_proto.variables(var).domain(0); + } + } + for (int i = 0; i < model_proto.solution_hint().vars_size(); ++i) { const int ref = model_proto.solution_hint().vars(i); + const int var = PositiveRef(ref); const int64_t value = model_proto.solution_hint().values(i); - solution[PositiveRef(ref)] = RefIsPositive(ref) ? value : -value; + const int64_t hinted_value = RefIsPositive(ref) ? value : -value; + const Domain domain = ReadDomainFromProto(model_proto.variables(var)); + if (!domain.Contains(hinted_value)) { + SOLVER_LOG(logger, + "The solution hint is complete but it contains values outside " + "of the domain of the variables."); + return; + } + solution[var] = hinted_value; } + if (SolutionIsFeasible(model_proto, solution)) { if (manager != nullptr) { // Add it to the pool right away! Note that we already have a log in this @@ -3742,6 +1964,57 @@ void TestSolutionHintForFeasibility(const CpModelProto& model_proto, } // namespace +std::function NewFeasibleSolutionObserver( + const std::function& callback) { + return [callback = callback](Model* model) { + model->GetOrCreate()->AddSolutionCallback(callback); + }; +} + +std::function NewFeasibleSolutionLogCallback( + const std::function& + callback) { + return [callback = callback](Model* model) { + model->GetOrCreate()->AddLogCallback(callback); + }; +} + +std::function NewBestBoundCallback( + const std::function& callback) { + return [callback = callback](Model* model) { + model->GetOrCreate()->AddBestBoundCallback(callback); + }; +} + +#if !defined(__PORTABLE_PLATFORM__) +// TODO(user): Support it on android. +std::function NewSatParameters( + const std::string& params) { + sat::SatParameters parameters; + if (!params.empty()) { + CHECK(google::protobuf::TextFormat::ParseFromString(params, ¶meters)) + << params; + } + return NewSatParameters(parameters); +} +#endif // __PORTABLE_PLATFORM__ + +std::function NewSatParameters( + const sat::SatParameters& parameters) { + return [parameters = parameters](Model* model) { + // Tricky: It is important to initialize the model parameters before any + // of the solver object are created, so that by default they use the given + // parameters. + // + // TODO(user): A notable exception to this is the TimeLimit which is + // currently not initializing itself from the SatParameters in the model. It + // will also starts counting from the time of its creation. It will be good + // to find a solution that is less error prone. + *model->GetOrCreate() = parameters; + return parameters; + }; +} + CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { auto* wall_timer = model->GetOrCreate(); auto* user_timer = model->GetOrCreate(); @@ -3775,7 +2048,7 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { // Enable the logging component. const SatParameters& params = *model->GetOrCreate(); SolverLogger* logger = model->GetOrCreate(); - logger->EnableLogging(params.log_search_progress() || VLOG_IS_ON(1)); + logger->EnableLogging(params.log_search_progress()); logger->SetLogToStdOut(params.log_to_stdout()); std::string log_string; if (params.log_to_response()) { @@ -3865,26 +2138,9 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { SOLVER_LOG(logger, "Starting ", CpSatSolverVersion()); SOLVER_LOG(logger, "Parameters: ", ProtobufShortDebugString(params)); - // Update params.num_workers() if the old field was used. - if (params.num_workers() == 0) { - model->GetOrCreate()->set_num_workers( - params.num_search_workers()); - } - - // Initialize the number of workers if set to 0. - if (params.num_workers() == 0) { -#if !defined(__PORTABLE_PLATFORM__) - // Sometimes, hardware_concurrency will return 0. So always default to 1. - const int num_cores = - params.enumerate_all_solutions() || !model_proto.assumptions().empty() - ? 1 - : std::max(std::thread::hardware_concurrency(), 1); -#else - const int num_cores = 1; -#endif - SOLVER_LOG(logger, "Setting number of workers to ", num_cores); - model->GetOrCreate()->set_num_workers(num_cores); - } + // Internally we adapt the parameters so that things are disabled if + // they do not make sense. + AdaptGlobalParameters(model_proto, model); if (logger->LoggingIsEnabled() && params.use_absl_random()) { model->GetOrCreate()->LogSalt(); @@ -4095,6 +2351,10 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { const CpSolverStatus presolve_status = PresolveCpModel(context.get(), &postsolve_mapping); + // Delete the context as soon a the presolve is done. Note that only + // postsolve_mapping and mapping_proto are needed for postsolve. + context.reset(nullptr); + if (presolve_status != CpSolverStatus::UNKNOWN) { SOLVER_LOG(logger, "Problem closed by presolve."); CpSolverResponse status_response; @@ -4107,6 +2367,59 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { SOLVER_LOG(logger, ""); SOLVER_LOG(logger, "Presolved ", CpModelStats(*new_cp_model_proto)); + // TODO(user): reduce this function size and find a better place for this? + SharedClasses shared(new_cp_model_proto, model); + + if (params.fill_tightened_domains_in_response()) { + shared_response_manager->AddResponsePostprocessor( + [&model_proto, new_cp_model_proto, mapping_proto, &postsolve_mapping, + logger, &shared](CpSolverResponse* response) { + // Collect the info we know about new_cp_model_proto bounds. + // Note that this is not really needed as we should have the same + // information in the mapping_proto. + std::vector bounds; + for (const IntegerVariableProto& vars : + new_cp_model_proto->variables()) { + bounds.push_back(ReadDomainFromProto(vars)); + } + + // Intersect with the SharedBoundsManager if it exist. + if (shared.bounds != nullptr) { + shared.bounds->UpdateDomains(&bounds); + } + + // Postsolve and fill the field. + FillTightenedDomainInResponse(model_proto, *mapping_proto, + postsolve_mapping, bounds, response, + logger); + }); + } + + // Solution checking. + // We either check all solutions, or only the last one. + // Checking all solution might be expensive if we creates many. + auto check_solution = [&model_proto, ¶ms, mapping_proto, + &postsolve_mapping](CpSolverResponse* response) { + if (response->solution().empty()) return; + if (params.cp_model_presolve()) { + // We pass presolve data for more informative message in case the solution + // is not feasible. + CHECK(SolutionIsFeasible(model_proto, response->solution(), mapping_proto, + &postsolve_mapping)); + } else { + CHECK(SolutionIsFeasible(model_proto, response->solution())); + } + }; + if (DEBUG_MODE || + absl::GetFlag(FLAGS_cp_model_check_intermediate_solutions)) { + shared_response_manager->AddResponsePostprocessor( + std::move(check_solution)); + } else { + shared_response_manager->AddFinalResponsePostprocessor( + std::move(check_solution)); + } + + // Solution postsolving. if (params.cp_model_presolve()) { shared_response_manager->AddSolutionPostprocessor( [&model_proto, ¶ms, mapping_proto, &model, @@ -4116,8 +2429,7 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { *mapping_proto, postsolve_mapping, solution); }); shared_response_manager->AddResponsePostprocessor( - [&model_proto, ¶ms, mapping_proto, - &postsolve_mapping](CpSolverResponse* response) { + [&postsolve_mapping](CpSolverResponse* response) { // Map back the sufficient assumptions for infeasibility. for (int& ref : *(response @@ -4126,80 +2438,18 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { ? postsolve_mapping[ref] : NegatedRef(postsolve_mapping[PositiveRef(ref)]); } - if (!response->solution().empty()) { - CHECK(SolutionIsFeasible( - model_proto, - std::vector(response->solution().begin(), - response->solution().end()), - mapping_proto, &postsolve_mapping)) - << "postsolved solution"; - } - if (params.fill_tightened_domains_in_response()) { - // TODO(user): for now, we just use the domain inferred during - // presolve. - if (mapping_proto->variables().size() >= - model_proto.variables().size()) { - for (int i = 0; i < model_proto.variables().size(); ++i) { - *response->add_tightened_variables() = - mapping_proto->variables(i); - } - } - } }); } else { - shared_response_manager->AddFinalResponsePostprocessor( - [&model_proto](CpSolverResponse* response) { - if (!response->solution().empty()) { - CHECK(SolutionIsFeasible( - model_proto, std::vector(response->solution().begin(), - response->solution().end()))); - } - }); shared_response_manager->AddResponsePostprocessor( - [&model_proto, ¶ms](CpSolverResponse* response) { + [&model_proto](CpSolverResponse* response) { // Truncate the solution in case model expansion added more variables. const int initial_size = model_proto.variables_size(); - if (response->solution_size() > 0) { + if (!response->solution().empty()) { response->mutable_solution()->Truncate(initial_size); - if (DEBUG_MODE || - absl::GetFlag(FLAGS_cp_model_check_intermediate_solutions)) { - CHECK(SolutionIsFeasible( - model_proto, - std::vector(response->solution().begin(), - response->solution().end()))); - } - } - if (params.fill_tightened_domains_in_response()) { - *response->mutable_tightened_variables() = model_proto.variables(); } }); } - // Delete the context. - context.reset(nullptr); - - const auto& observers = model->GetOrCreate()->observers; - if (!observers.empty()) { - shared_response_manager->AddSolutionCallback( - [&observers](const CpSolverResponse& response) { - for (const auto& observer : observers) { - observer(response); - } - }); - } - - const auto& log_callbacks = - model->GetOrCreate()->log_callbacks; - for (const auto& callback : log_callbacks) { - shared_response_manager->AddLogCallback(callback); - } - - const auto& best_bound_callbacks = - model->GetOrCreate()->best_bound_callbacks; - for (const auto& callback : best_bound_callbacks) { - shared_response_manager->AddBestBoundCallback(callback); - } - // Make sure everything stops when we have a first solution if requested. if (params.stop_after_first_solution()) { shared_response_manager->AddSolutionCallback( @@ -4291,75 +2541,25 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { LoadDebugSolution(*new_cp_model_proto, model); - // Linear model (used by feasibility_jump and violation_ls) - if (params.num_workers() > 1 || params.test_feasibility_jump() || - params.num_violation_ls() > 0) { - LinearModel* linear_model = new LinearModel(*new_cp_model_proto); - model->TakeOwnership(linear_model); - model->Register(linear_model); - } - + if (!model->GetOrCreate()->LimitReached()) { #if defined(__PORTABLE_PLATFORM__) - if (/* DISABLES CODE */ (false)) { - // We ignore the multithreading parameter in this case. + if (/* DISABLES CODE */ (false)) { + // We ignore the multithreading parameter in this case. #else // __PORTABLE_PLATFORM__ - if (params.num_workers() > 1 || params.interleave_search() || - !params.subsolvers().empty() || params.test_feasibility_jump()) { - SolveCpModelParallel(*new_cp_model_proto, model); + if (params.num_workers() > 1 || params.interleave_search() || + !params.subsolvers().empty() || params.use_ls_only()) { + SolveCpModelParallel(&shared, model); #endif // __PORTABLE_PLATFORM__ - } else if (!model->GetOrCreate()->LimitReached()) { - SOLVER_LOG(logger, ""); - SOLVER_LOG(logger, absl::StrFormat("Starting to load the model at %.2fs", - wall_timer->Get())); - shared_response_manager->SetUpdateGapIntegralOnEachChange(true); - - // We use a local_model to share statistic report mechanism with the - // parallel case. When this model will be destroyed, we will collect some - // stats that are used to debug/improve internal algorithm. - // - // TODO(user): Reuse a Subsolver to get the same display as for the - // parallel case. Right now we don't have as much stats for single thread! - Model local_model; - local_model.Register(logger); - local_model.Register(model->GetOrCreate()); - local_model.Register(model->GetOrCreate()); - local_model.Register( - model->GetOrCreate()); - local_model.Register(shared_response_manager); - - LoadCpModel(*new_cp_model_proto, &local_model); - - SOLVER_LOG(logger, ""); - SOLVER_LOG(logger, absl::StrFormat("Starting sequential search at %.2fs", - wall_timer->Get())); - if (params.repair_hint()) { - MinimizeL1DistanceWithHint(*new_cp_model_proto, &local_model); } else { - QuickSolveWithHint(*new_cp_model_proto, &local_model); + shared_response_manager->SetUpdateGapIntegralOnEachChange(true); + + // To avoid duplicating code, the single-thread version reuse most of + // the multi-thread architecture. + std::vector> subsolvers; + subsolvers.push_back(std::make_unique( + "main", params, /*split_in_chunks=*/false, &shared)); + LaunchSubsolvers(params, &shared, subsolvers, {}); } - SolveLoadedCpModel(*new_cp_model_proto, &local_model); - - // Display table data. - if (logger->LoggingIsEnabled()) { - logger->FlushPendingThrottledLogs(/*ignore_rates=*/true); - SOLVER_LOG(logger, ""); - SharedStatTables tables; - tables.AddLpStat("default", &local_model); - tables.AddSearchStat("default", &local_model); - tables.AddClausesStat("default", &local_model); - tables.Display(logger); - } - - // Export statistics. - CpSolverResponse status_response; - FillSolveStatsInResponse(&local_model, &status_response); - shared_response_manager->AppendResponseToBeMerged(status_response); - } - - // Extra logging if needed. Note that these are mainly activated on - // --vmodule *some_file*=1 and are here for development. - if (logger->LoggingIsEnabled()) { - model->GetOrCreate()->Log(logger); } return shared_response_manager->GetResponse(); diff --git a/ortools/sat/cp_model_solver.h b/ortools/sat/cp_model_solver.h index c4362e2061..a80f0e5bb6 100644 --- a/ortools/sat/cp_model_solver.h +++ b/ortools/sat/cp_model_solver.h @@ -16,9 +16,7 @@ #include #include -#include -#include "ortools/base/types.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" @@ -90,9 +88,10 @@ CpSolverResponse SolveWithParameters(const CpModelProto& model_proto, * - etc... */ std::function NewFeasibleSolutionObserver( - const std::function& observer); + const std::function& callback); -/** Creates a callbacks that will append a string to the search log when +/** + * Creates a callbacks that will append a string to the search log when * reporting a new solution. * * The given function will be called on each improving feasible solution found @@ -103,8 +102,9 @@ std::function NewFeasibleSolutionLogCallback( const std::function& callback); -/** Creates a callbacks that will be called on each new best objective bound - * found. +/** + * Creates a callbacks that will be called on each new best objective bound + * found. * * Note that this function is called before the update takes place. */ diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc new file mode 100644 index 0000000000..54bc630765 --- /dev/null +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -0,0 +1,1822 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/cp_model_solver_helpers.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ortools/base/logging.h" +#include "ortools/base/timer.h" +#if !defined(__PORTABLE_PLATFORM__) +#include "ortools/base/helpers.h" +#include "ortools/base/options.h" +#endif // __PORTABLE_PLATFORM__ +#include "absl/cleanup/cleanup.h" +#include "absl/container/flat_hash_set.h" +#include "absl/flags/flag.h" +#include "absl/log/check.h" +#include "absl/strings/escaping.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" +#include "absl/types/span.h" +#include "google/protobuf/arena.h" +#include "ortools/base/logging.h" +#include "ortools/base/strong_vector.h" +#include "ortools/graph/connected_components.h" +#include "ortools/port/proto_utils.h" +#include "ortools/sat/clause.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_checker.h" +#include "ortools/sat/cp_model_loader.h" +#include "ortools/sat/cp_model_mapping.h" +#include "ortools/sat/cp_model_postsolve.h" +#include "ortools/sat/cp_model_search.h" +#include "ortools/sat/cp_model_utils.h" +#include "ortools/sat/cuts.h" +#include "ortools/sat/feasibility_pump.h" +#include "ortools/sat/implied_bounds.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_expr.h" +#include "ortools/sat/integer_search.h" +#include "ortools/sat/intervals.h" +#include "ortools/sat/lb_tree_search.h" +#include "ortools/sat/linear_constraint.h" +#include "ortools/sat/linear_programming_constraint.h" +#include "ortools/sat/linear_relaxation.h" +#include "ortools/sat/max_hs.h" +#include "ortools/sat/model.h" +#include "ortools/sat/optimization.h" +#include "ortools/sat/precedences.h" +#include "ortools/sat/probing.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/sat_solver.h" +#include "ortools/sat/synchronization.h" +#include "ortools/sat/util.h" +#include "ortools/sat/work_assignment.h" +#include "ortools/util/logging.h" +#if !defined(__PORTABLE_PLATFORM__) +#endif // __PORTABLE_PLATFORM__ +#include "ortools/base/version.h" +#include "ortools/util/sorted_interval_list.h" +#include "ortools/util/strong_integers.h" +#include "ortools/util/time_limit.h" + +ABSL_FLAG(bool, cp_model_dump_models, false, + "DEBUG ONLY. When set to true, SolveCpModel() will dump its model " + "protos (original model, presolved model, mapping model) in text " + "format to 'FLAGS_cp_model_dump_prefix'{model|presolved_model|" + "mapping_model}.pb.txt."); + +#if defined(_MSC_VER) +ABSL_FLAG(std::string, cp_model_dump_prefix, ".\\", + "Prefix filename for all dumped files"); +#else +ABSL_FLAG(std::string, cp_model_dump_prefix, "/tmp/", + "Prefix filename for all dumped files"); +#endif + +ABSL_FLAG(bool, cp_model_dump_submodels, false, + "DEBUG ONLY. When set to true, solve will dump all " + "lns or objective_shaving submodels proto in text format to " + "'FLAGS_cp_model_dump_prefix'xxx.pb.txt."); + +ABSL_FLAG( + std::string, cp_model_load_debug_solution, "", + "DEBUG ONLY. When this is set to a non-empty file name, " + "we will interpret this as an internal solution which can be used for " + "debugging. For instance we use it to identify wrong cuts/reasons."); + +ABSL_FLAG(bool, cp_model_check_intermediate_solutions, false, + "When true, all intermediate solutions found by the solver will be " + "checked. This can be expensive, therefore it is off by default."); + +namespace operations_research { +namespace sat { + +// This should be called on the presolved model. It will read the file +// specified by --cp_model_load_debug_solution and properly fill the +// model->Get() proto vector. +void LoadDebugSolution(const CpModelProto& model_proto, Model* model) { +#if !defined(__PORTABLE_PLATFORM__) + if (absl::GetFlag(FLAGS_cp_model_load_debug_solution).empty()) return; + + CpSolverResponse response; + SOLVER_LOG(model->GetOrCreate(), + "Reading debug solution from '", + absl::GetFlag(FLAGS_cp_model_load_debug_solution), "'."); + CHECK_OK(file::GetTextProto(absl::GetFlag(FLAGS_cp_model_load_debug_solution), + &response, file::Defaults())); + + // Make sure we load a solution with the same number of variable has in the + // presolved model. + CHECK_EQ(response.solution().size(), model_proto.variables().size()); + model->GetOrCreate()->LoadDebugSolution( + response.solution()); +#endif // __PORTABLE_PLATFORM__ +} + +// This both copy the "main" DebugSolution to a local_model and also cache +// the value of the integer variables in that solution. +void InitializeDebugSolution(const CpModelProto& model_proto, Model* model) { + auto* shared_response = model->Get(); + if (shared_response == nullptr) return; + if (shared_response->DebugSolution().empty()) return; + + // Copy the proto values. + DebugSolution& debug_sol = *model->GetOrCreate(); + debug_sol.proto_values = shared_response->DebugSolution(); + + // Fill the values by integer variable. + const int num_integers = + model->GetOrCreate()->NumIntegerVariables().value(); + debug_sol.ivar_has_value.assign(num_integers, false); + debug_sol.ivar_values.assign(num_integers, 0); + + std::vector boolean_solution; + + const auto& mapping = *model->GetOrCreate(); + for (int i = 0; i < debug_sol.proto_values.size(); ++i) { + if (mapping.IsBoolean(i)) { + Literal l = mapping.Literal(i); + if (debug_sol.proto_values[i] == 0) { + l = l.Negated(); + } + boolean_solution.push_back(l); + } + + if (!mapping.IsInteger(i)) continue; + const IntegerVariable var = mapping.Integer(i); + debug_sol.ivar_has_value[var] = true; + debug_sol.ivar_has_value[NegationOf(var)] = true; + debug_sol.ivar_values[var] = debug_sol.proto_values[i]; + debug_sol.ivar_values[NegationOf(var)] = -debug_sol.proto_values[i]; + } + + // If the solution is fully boolean (there is no integer variable), and + // we have a decision problem (so no new boolean should be created), we load + // it in the sat solver for debugging too. + if (boolean_solution.size() == debug_sol.proto_values.size() && + !model_proto.has_objective()) { + LOG(INFO) << "Loaded pure Boolean debugging solution."; + model->GetOrCreate()->LoadDebugSolution(boolean_solution); + } + + // The objective variable is usually not part of the proto, but it is still + // nice to have it, so we recompute it here. + auto* objective_def = model->Get(); + if (objective_def != nullptr) { + const IntegerVariable objective_var = objective_def->objective_var; + const int64_t objective_value = + ComputeInnerObjective(model_proto.objective(), debug_sol.proto_values); + debug_sol.ivar_has_value[objective_var] = true; + debug_sol.ivar_has_value[NegationOf(objective_var)] = true; + debug_sol.ivar_values[objective_var] = objective_value; + debug_sol.ivar_values[NegationOf(objective_var)] = -objective_value; + } + + // We also register a DEBUG callback to check our reasons. + auto* encoder = model->GetOrCreate(); + const auto checker = [mapping, encoder, debug_sol, model]( + absl::Span clause, + absl::Span integers) { + bool is_satisfied = false; + int num_bools = 0; + int num_ints = 0; + std::vector> to_print; + for (const Literal l : clause) { + // First case, this Boolean is mapped. + { + const int proto_var = + mapping.GetProtoVariableFromBooleanVariable(l.Variable()); + if (proto_var != -1) { + to_print.push_back({l, IntegerLiteral(), proto_var}); + if (debug_sol.proto_values[proto_var] == (l.IsPositive() ? 1 : 0)) { + is_satisfied = true; + break; + } + ++num_bools; + continue; + } + } + + // Second case, it is associated to IntVar >= value. + // We can use any of them, so if one is false, we use this one. + bool all_true = true; + for (const IntegerLiteral associated : encoder->GetIntegerLiterals(l)) { + const int proto_var = mapping.GetProtoVariableFromIntegerVariable( + PositiveVariable(associated.var)); + if (proto_var == -1) break; + int64_t value = debug_sol.proto_values[proto_var]; + to_print.push_back({l, associated, proto_var}); + + if (!VariableIsPositive(associated.var)) value = -value; + if (value < associated.bound) { + ++num_ints; + all_true = false; + break; + } + } + if (all_true) { + is_satisfied = true; + break; + } + } + for (const IntegerLiteral i_lit : integers) { + const int proto_var = mapping.GetProtoVariableFromIntegerVariable( + PositiveVariable(i_lit.var)); + if (proto_var == -1) { + is_satisfied = true; + break; + } + + int64_t value = debug_sol.proto_values[proto_var]; + to_print.push_back({Literal(kNoLiteralIndex), i_lit, proto_var}); + + if (!VariableIsPositive(i_lit.var)) value = -value; + // Note the sign is inversed, we cannot have all literal false and all + // integer literal true. + if (value >= i_lit.bound) { + is_satisfied = true; + break; + } + } + if (!is_satisfied) { + LOG(INFO) << "Reason clause is not satisfied by loaded solution:"; + LOG(INFO) << "Worker '" << model->Name() << "', level=" + << model->GetOrCreate()->CurrentDecisionLevel(); + LOG(INFO) << "literals (neg): " << clause; + LOG(INFO) << "integer literals: " << integers; + for (const auto [l, i_lit, proto_var] : to_print) { + LOG(INFO) << l << " " << i_lit << " var=" << proto_var + << " value_in_sol=" << debug_sol.proto_values[proto_var]; + } + } + return is_satisfied; + }; + const auto lit_checker = [checker](absl::Span clause) { + return checker(clause, {}); + }; + + model->GetOrCreate()->RegisterDebugChecker(lit_checker); + model->GetOrCreate()->RegisterDebugChecker(checker); +} + +std::vector GetSolutionValues(const CpModelProto& model_proto, + const Model& model) { + auto* mapping = model.Get(); + auto* trail = model.Get(); + + std::vector solution; + for (int i = 0; i < model_proto.variables_size(); ++i) { + if (mapping->IsInteger(i)) { + const IntegerVariable var = mapping->Integer(i); + + // For ignored or not fully instantiated variable, we just use the + // lower bound. + solution.push_back(model.Get(LowerBound(var))); + } else { + DCHECK(mapping->IsBoolean(i)); + const Literal literal = mapping->Literal(i); + if (trail->Assignment().LiteralIsAssigned(literal)) { + solution.push_back(model.Get(Value(literal))); + } else { + // Just use the lower bound if the variable is not fully instantiated. + solution.push_back(0); + } + } + } + + if (DEBUG_MODE || + absl::GetFlag(FLAGS_cp_model_check_intermediate_solutions)) { + // TODO(user): Checks against initial model. + CHECK(SolutionIsFeasible(model_proto, solution)); + } + return solution; +} + +namespace { + +IntegerVariable GetOrCreateVariableWithTightBound( + const std::vector>& terms, + Model* model) { + if (terms.empty()) return model->Add(ConstantIntegerVariable(0)); + if (terms.size() == 1 && terms.front().second == 1) { + return terms.front().first; + } + if (terms.size() == 1 && terms.front().second == -1) { + return NegationOf(terms.front().first); + } + + int64_t sum_min = 0; + int64_t sum_max = 0; + for (const std::pair& var_coeff : terms) { + const int64_t min_domain = model->Get(LowerBound(var_coeff.first)); + const int64_t max_domain = model->Get(UpperBound(var_coeff.first)); + const int64_t coeff = var_coeff.second; + const int64_t prod1 = min_domain * coeff; + const int64_t prod2 = max_domain * coeff; + sum_min += std::min(prod1, prod2); + sum_max += std::max(prod1, prod2); + } + return model->Add(NewIntegerVariable(sum_min, sum_max)); +} + +IntegerVariable GetOrCreateVariableLinkedToSumOf( + const std::vector>& terms, + bool lb_required, bool ub_required, Model* model) { + if (terms.empty()) return model->Add(ConstantIntegerVariable(0)); + if (terms.size() == 1 && terms.front().second == 1) { + return terms.front().first; + } + if (terms.size() == 1 && terms.front().second == -1) { + return NegationOf(terms.front().first); + } + + const IntegerVariable new_var = + GetOrCreateVariableWithTightBound(terms, model); + + // TODO(user): use the same format, i.e. LinearExpression in both code! + std::vector vars; + std::vector coeffs; + for (const auto [var, coeff] : terms) { + vars.push_back(var); + coeffs.push_back(coeff); + } + vars.push_back(new_var); + coeffs.push_back(-1); + + // Split if linear is large. + if (vars.size() > model->GetOrCreate()->linear_split_size()) { + SplitAndLoadIntermediateConstraints(lb_required, ub_required, &vars, + &coeffs, model); + } + + // Load the top-level constraint with the required sides. + if (lb_required) { + model->Add(WeightedSumGreaterOrEqual(vars, coeffs, 0)); + } + if (ub_required) { + model->Add(WeightedSumLowerOrEqual(vars, coeffs, 0)); + } + + return new_var; +} + +} // namespace + +// Adds one LinearProgrammingConstraint per connected component of the model. +IntegerVariable AddLPConstraints(bool objective_need_to_be_tight, + const CpModelProto& model_proto, Model* m) { + // Non const as we will std::move() stuff out of there. + LinearRelaxation relaxation = ComputeLinearRelaxation(model_proto, m); + if (m->GetOrCreate()->ModelIsUnsat()) return kNoIntegerVariable; + + // The bipartite graph of LP constraints might be disconnected: + // make a partition of the variables into connected components. + // Constraint nodes are indexed by [0..num_lp_constraints), + // variable nodes by [num_lp_constraints..num_lp_constraints+num_variables). + // + // TODO(user): look into biconnected components. + const int num_lp_constraints = + static_cast(relaxation.linear_constraints.size()); + const int num_lp_cut_generators = + static_cast(relaxation.cut_generators.size()); + const int num_integer_variables = + m->GetOrCreate()->NumIntegerVariables().value(); + + DenseConnectedComponentsFinder components; + components.SetNumberOfNodes(num_lp_constraints + num_lp_cut_generators + + num_integer_variables); + auto get_constraint_index = [](int ct_index) { return ct_index; }; + auto get_cut_generator_index = [num_lp_constraints](int cut_index) { + return num_lp_constraints + cut_index; + }; + auto get_var_index = [num_lp_constraints, + num_lp_cut_generators](IntegerVariable var) { + return num_lp_constraints + num_lp_cut_generators + + PositiveVariable(var).value(); + }; + for (int i = 0; i < num_lp_constraints; i++) { + for (const IntegerVariable var : + relaxation.linear_constraints[i].VarsAsSpan()) { + components.AddEdge(get_constraint_index(i), get_var_index(var)); + } + } + for (int i = 0; i < num_lp_cut_generators; ++i) { + for (const IntegerVariable var : relaxation.cut_generators[i].vars) { + components.AddEdge(get_cut_generator_index(i), get_var_index(var)); + } + } + + const int num_components = components.GetNumberOfComponents(); + std::vector component_sizes(num_components, 0); + const std::vector index_to_component = components.GetComponentIds(); + for (int i = 0; i < num_lp_constraints; i++) { + ++component_sizes[index_to_component[get_constraint_index(i)]]; + } + for (int i = 0; i < num_lp_cut_generators; i++) { + ++component_sizes[index_to_component[get_cut_generator_index(i)]]; + } + + // TODO(user): Optimize memory layout. + std::vector> component_to_var(num_components); + for (IntegerVariable var(0); var < num_integer_variables; var += 2) { + DCHECK(VariableIsPositive(var)); + component_to_var[index_to_component[get_var_index(var)]].push_back(var); + } + + // Make sure any constraint that touch the objective is not discarded even + // if it is the only one in its component. This is important to propagate + // as much as possible the objective bound by using any bounds the LP give + // us on one of its components. This is critical on the zephyrus problems for + // instance. + auto* mapping = m->GetOrCreate(); + for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { + const IntegerVariable var = + mapping->Integer(model_proto.objective().vars(i)); + ++component_sizes[index_to_component[get_var_index(var)]]; + } + + // Dispatch every constraint to its LinearProgrammingConstraint. + std::vector lp_constraints(num_components, + nullptr); + for (int i = 0; i < num_lp_constraints; i++) { + const int c = index_to_component[get_constraint_index(i)]; + if (component_sizes[c] <= 1) continue; + if (lp_constraints[c] == nullptr) { + lp_constraints[c] = + new LinearProgrammingConstraint(m, component_to_var[c]); + m->TakeOwnership(lp_constraints[c]); + } + // Load the constraint. + lp_constraints[c]->AddLinearConstraint( + std::move(relaxation.linear_constraints[i])); + } + + // Dispatch every cut generator to its LinearProgrammingConstraint. + for (int i = 0; i < num_lp_cut_generators; i++) { + const int c = index_to_component[get_cut_generator_index(i)]; + if (lp_constraints[c] == nullptr) { + lp_constraints[c] = + new LinearProgrammingConstraint(m, component_to_var[c]); + m->TakeOwnership(lp_constraints[c]); + } + lp_constraints[c]->AddCutGenerator(std::move(relaxation.cut_generators[i])); + } + + // Add the objective. + std::vector>> + component_to_cp_terms(num_components); + std::vector> top_level_cp_terms; + int num_components_containing_objective = 0; + if (model_proto.has_objective()) { + // First pass: set objective coefficients on the lp constraints, and store + // the cp terms in one vector per component. + for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { + const IntegerVariable var = + mapping->Integer(model_proto.objective().vars(i)); + const int64_t coeff = model_proto.objective().coeffs(i); + const int c = index_to_component[get_var_index(var)]; + if (lp_constraints[c] != nullptr) { + lp_constraints[c]->SetObjectiveCoefficient(var, IntegerValue(coeff)); + component_to_cp_terms[c].push_back(std::make_pair(var, coeff)); + } else { + // Component is too small. We still need to store the objective term. + top_level_cp_terms.push_back(std::make_pair(var, coeff)); + } + } + // Second pass: Build the cp sub-objectives per component. + for (int c = 0; c < num_components; ++c) { + if (component_to_cp_terms[c].empty()) continue; + const IntegerVariable sub_obj_var = GetOrCreateVariableLinkedToSumOf( + component_to_cp_terms[c], objective_need_to_be_tight, true, m); + top_level_cp_terms.push_back(std::make_pair(sub_obj_var, 1)); + lp_constraints[c]->SetMainObjectiveVariable(sub_obj_var); + num_components_containing_objective++; + } + } + + const IntegerVariable main_objective_var = + model_proto.has_objective() + ? GetOrCreateVariableLinkedToSumOf( + top_level_cp_terms, objective_need_to_be_tight, true, m) + : kNoIntegerVariable; + + // Register LP constraints. Note that this needs to be done after all the + // constraints have been added. + for (LinearProgrammingConstraint* lp_constraint : lp_constraints) { + if (lp_constraint == nullptr) continue; + lp_constraint->RegisterWith(m); + VLOG(3) << "LP constraint: " << lp_constraint->DimensionString() << "."; + } + + VLOG(3) << top_level_cp_terms.size() + << " terms in the main objective linear equation (" + << num_components_containing_objective << " from LP constraints)."; + return main_objective_var; +} + +// Registers a callback that will export variables bounds fixed at level 0 of +// the search. This should not be registered to a LNS search. +void RegisterVariableBoundsLevelZeroExport( + const CpModelProto& /*model_proto*/, + SharedBoundsManager* shared_bounds_manager, Model* model) { + CHECK(shared_bounds_manager != nullptr); + + auto* mapping = model->GetOrCreate(); + auto* trail = model->Get(); + auto* integer_trail = model->Get(); + + int saved_trail_index = 0; + std::vector model_variables; + std::vector new_lower_bounds; + std::vector new_upper_bounds; + absl::flat_hash_set visited_variables; + const std::string name = model->Name(); + + auto broadcast_level_zero_bounds = + [=](const std::vector& modified_vars) mutable { + // Inspect the modified IntegerVariables. + for (const IntegerVariable& var : modified_vars) { + const IntegerVariable positive_var = PositiveVariable(var); + const int model_var = + mapping->GetProtoVariableFromIntegerVariable(positive_var); + + if (model_var == -1) continue; + const auto [_, inserted] = visited_variables.insert(model_var); + if (!inserted) continue; + + const int64_t new_lb = + integer_trail->LevelZeroLowerBound(positive_var).value(); + const int64_t new_ub = + integer_trail->LevelZeroUpperBound(positive_var).value(); + + // TODO(user): We could imagine an API based on atomic + // that could preemptively check if this new bounds are improving. + model_variables.push_back(model_var); + new_lower_bounds.push_back(new_lb); + new_upper_bounds.push_back(new_ub); + } + + // Inspect the newly modified Booleans. + for (; saved_trail_index < trail->Index(); ++saved_trail_index) { + const Literal fixed_literal = (*trail)[saved_trail_index]; + const int model_var = mapping->GetProtoVariableFromBooleanVariable( + fixed_literal.Variable()); + + if (model_var == -1) continue; + const auto [_, inserted] = visited_variables.insert(model_var); + if (!inserted) continue; + + model_variables.push_back(model_var); + if (fixed_literal.IsPositive()) { + new_lower_bounds.push_back(1); + new_upper_bounds.push_back(1); + } else { + new_lower_bounds.push_back(0); + new_upper_bounds.push_back(0); + } + } + + if (!model_variables.empty()) { + shared_bounds_manager->ReportPotentialNewBounds( + model->Name(), model_variables, new_lower_bounds, + new_upper_bounds); + + // Clear for next call. + model_variables.clear(); + new_lower_bounds.clear(); + new_upper_bounds.clear(); + visited_variables.clear(); + + // If we are not in interleave_search we synchronize right away. + if (!model->Get()->interleave_search()) { + shared_bounds_manager->Synchronize(); + } + } + }; + + // The callback will just be called on NEWLY modified var. So initially, + // we do want to read all variables. + // + // TODO(user): Find a better way? It seems nicer to register this before + // any variable is modified. But then we don't want to call it each time + // we reach level zero during probing. It should be better to only call + // it when a new variable has been fixed. + const IntegerVariable num_vars = + model->GetOrCreate()->NumIntegerVariables(); + std::vector all_variables; + all_variables.reserve(num_vars.value()); + for (IntegerVariable var(0); var < num_vars; ++var) { + all_variables.push_back(var); + } + broadcast_level_zero_bounds(all_variables); + + model->GetOrCreate() + ->RegisterLevelZeroModifiedVariablesCallback(broadcast_level_zero_bounds); +} + +// Registers a callback to import new variables bounds stored in the +// shared_bounds_manager. These bounds are imported at level 0 of the search +// in the linear scan minimize function. +void RegisterVariableBoundsLevelZeroImport( + const CpModelProto& model_proto, SharedBoundsManager* shared_bounds_manager, + Model* model) { + CHECK(shared_bounds_manager != nullptr); + const std::string name = model->Name(); + auto* integer_trail = model->GetOrCreate(); + auto* trail = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); + auto* mapping = model->GetOrCreate(); + const int id = shared_bounds_manager->RegisterNewId(); + + const auto& import_level_zero_bounds = [&model_proto, shared_bounds_manager, + name, sat_solver, integer_trail, + trail, id, mapping]() { + std::vector model_variables; + std::vector new_lower_bounds; + std::vector new_upper_bounds; + shared_bounds_manager->GetChangedBounds( + id, &model_variables, &new_lower_bounds, &new_upper_bounds); + bool new_bounds_have_been_imported = false; + for (int i = 0; i < model_variables.size(); ++i) { + const int model_var = model_variables[i]; + + // If this is a Boolean, fix it if not already done. + // Note that it is important not to use AddUnitClause() as we do not + // want to propagate after each addition. + if (mapping->IsBoolean(model_var)) { + Literal lit = mapping->Literal(model_var); + if (new_upper_bounds[i] == 0) lit = lit.Negated(); + if (trail->Assignment().LiteralIsTrue(lit)) continue; + if (trail->Assignment().LiteralIsFalse(lit)) { + sat_solver->NotifyThatModelIsUnsat(); + return false; + } + new_bounds_have_been_imported = true; + trail->EnqueueWithUnitReason(lit); + continue; + } + + // Deal with integer. + if (!mapping->IsInteger(model_var)) continue; + const IntegerVariable var = mapping->Integer(model_var); + const IntegerValue new_lb(new_lower_bounds[i]); + const IntegerValue new_ub(new_upper_bounds[i]); + const IntegerValue old_lb = integer_trail->LowerBound(var); + const IntegerValue old_ub = integer_trail->UpperBound(var); + const bool changed_lb = new_lb > old_lb; + const bool changed_ub = new_ub < old_ub; + if (!changed_lb && !changed_ub) continue; + + new_bounds_have_been_imported = true; + if (VLOG_IS_ON(3)) { + const IntegerVariableProto& var_proto = + model_proto.variables(model_var); + const std::string& var_name = + var_proto.name().empty() + ? absl::StrCat("anonymous_var(", model_var, ")") + : var_proto.name(); + LOG(INFO) << " '" << name << "' imports new bounds for " << var_name + << ": from [" << old_lb << ", " << old_ub << "] to [" + << new_lb << ", " << new_ub << "]"; + } + + if (changed_lb && + !integer_trail->Enqueue(IntegerLiteral::GreaterOrEqual(var, new_lb), + {}, {})) { + return false; + } + if (changed_ub && + !integer_trail->Enqueue(IntegerLiteral::LowerOrEqual(var, new_ub), {}, + {})) { + return false; + } + } + if (new_bounds_have_been_imported && !sat_solver->FinishPropagation()) { + return false; + } + return true; + }; + model->GetOrCreate()->callbacks.push_back( + import_level_zero_bounds); +} + +// Registers a callback that will report improving objective best bound. +// It will be called each time new objective bound are propagated at level zero. +void RegisterObjectiveBestBoundExport( + IntegerVariable objective_var, + SharedResponseManager* shared_response_manager, Model* model) { + auto* integer_trail = model->Get(); + const auto broadcast_objective_lower_bound = + [objective_var, integer_trail, shared_response_manager, model, + best_obj_lb = + kMinIntegerValue](const std::vector&) mutable { + const IntegerValue objective_lb = + integer_trail->LevelZeroLowerBound(objective_var); + if (objective_lb > best_obj_lb) { + best_obj_lb = objective_lb; + shared_response_manager->UpdateInnerObjectiveBounds( + model->Name(), objective_lb, + integer_trail->LevelZeroUpperBound(objective_var)); + // If we are not in interleave_search we synchronize right away. + if (!model->Get()->interleave_search()) { + shared_response_manager->Synchronize(); + } + } + }; + model->GetOrCreate() + ->RegisterLevelZeroModifiedVariablesCallback( + broadcast_objective_lower_bound); +} + +// Registers a callback to import new objective bounds. It will be called each +// time the search main loop is back to level zero. Note that it the presence of +// assumptions, this will not happen until the set of assumptions is changed. +void RegisterObjectiveBoundsImport( + SharedResponseManager* shared_response_manager, Model* model) { + auto* solver = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); + auto* objective = model->GetOrCreate(); + const std::string name = model->Name(); + const auto import_objective_bounds = [name, solver, integer_trail, objective, + shared_response_manager]() { + if (solver->AssumptionLevel() != 0) return true; + bool propagate = false; + + const IntegerValue external_lb = + shared_response_manager->GetInnerObjectiveLowerBound(); + const IntegerValue current_lb = + integer_trail->LowerBound(objective->objective_var); + if (external_lb > current_lb) { + if (!integer_trail->Enqueue(IntegerLiteral::GreaterOrEqual( + objective->objective_var, external_lb), + {}, {})) { + return false; + } + propagate = true; + } + + const IntegerValue external_ub = + shared_response_manager->GetInnerObjectiveUpperBound(); + const IntegerValue current_ub = + integer_trail->UpperBound(objective->objective_var); + if (external_ub < current_ub) { + if (!integer_trail->Enqueue(IntegerLiteral::LowerOrEqual( + objective->objective_var, external_ub), + {}, {})) { + return false; + } + propagate = true; + } + + if (!propagate) return true; + + VLOG(3) << "'" << name << "' imports objective bounds: external [" + << objective->ScaleIntegerObjective(external_lb) << ", " + << objective->ScaleIntegerObjective(external_ub) << "], current [" + << objective->ScaleIntegerObjective(current_lb) << ", " + << objective->ScaleIntegerObjective(current_ub) << "]"; + + return solver->FinishPropagation(); + }; + + model->GetOrCreate()->callbacks.push_back( + import_objective_bounds); +} + +// Registers a callback that will export good clauses discovered during search. +void RegisterClausesExport(int id, SharedClausesManager* shared_clauses_manager, + Model* model) { + auto* mapping = model->GetOrCreate(); + const auto& share_binary_clause = [mapping, id, shared_clauses_manager]( + Literal l1, Literal l2) { + const int var1 = + mapping->GetProtoVariableFromBooleanVariable(l1.Variable()); + if (var1 == -1) return; + const int var2 = + mapping->GetProtoVariableFromBooleanVariable(l2.Variable()); + if (var2 == -1) return; + const int lit1 = l1.IsPositive() ? var1 : NegatedRef(var1); + const int lit2 = l2.IsPositive() ? var2 : NegatedRef(var2); + shared_clauses_manager->AddBinaryClause(id, lit1, lit2); + }; + model->GetOrCreate()->SetAdditionCallback( + share_binary_clause); + if (!model->GetOrCreate()->share_glue_clauses()) { + return; + } + auto* clause_stream = shared_clauses_manager->GetClauseStream(id); + const int max_lbd = + model->GetOrCreate()->clause_cleanup_lbd_bound(); + // Note that this callback takes no global locks, everything operates on this + // worker's own clause stream, whose lock is only used by this worker, and + // briefly when generating a batch in SharedClausesManager::Synchronize(). + auto share_clause = [mapping, clause_stream, max_lbd, + clause = std::vector()]( + int lbd, absl::Span literals) mutable { + if (lbd <= 0 || lbd > max_lbd || + !clause_stream->CanAccept(literals.size(), lbd)) { + return; + } + clause.clear(); + for (const Literal& lit : literals) { + const int var = + mapping->GetProtoVariableFromBooleanVariable(lit.Variable()); + if (var == -1) return; + clause.push_back(lit.IsPositive() ? var : NegatedRef(var)); + } + clause_stream->Add(clause); + }; + model->GetOrCreate()->SetAddClauseCallback( + std::move(share_clause)); +} + +// Registers a callback to import new clauses stored in the +// shared_clausess_manager. These clauses are imported at level 0 of the search +// in the linear scan minimize function. +// it returns the id of the worker in the shared clause manager. +// +// TODO(user): Can we import them in the core worker ? +int RegisterClausesLevelZeroImport(int id, + SharedClausesManager* shared_clauses_manager, + Model* model) { + CHECK(shared_clauses_manager != nullptr); + CpModelMapping* const mapping = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); + auto* implications = model->GetOrCreate(); + bool share_glue_clauses = + model->GetOrCreate()->share_glue_clauses(); + auto* clause_stream = share_glue_clauses + ? shared_clauses_manager->GetClauseStream(id) + : nullptr; + const auto& import_level_zero_clauses = [shared_clauses_manager, id, mapping, + sat_solver, implications, + clause_stream]() { + std::vector> new_binary_clauses; + shared_clauses_manager->GetUnseenBinaryClauses(id, &new_binary_clauses); + implications->EnableSharing(false); + for (const auto& [ref1, ref2] : new_binary_clauses) { + const Literal l1 = mapping->Literal(ref1); + const Literal l2 = mapping->Literal(ref2); + if (!sat_solver->AddBinaryClause(l1, l2)) { + return false; + } + } + implications->EnableSharing(true); + if (clause_stream == nullptr) return true; + + std::array local_clause; + for (const absl::Span shared_clause : + shared_clauses_manager->GetUnseenClauses(id)) { + // Check this clause was not already learned by this worker. + // We can delete the fingerprint because we should not learn an identical + // clause, and the global stream will not emit the same clause while any + // worker hasn't consumed this clause (and thus also shouldn't relearn the + // clause). + if (clause_stream->Delete(shared_clause)) continue; + for (int i = 0; i < shared_clause.size(); ++i) { + local_clause[i] = mapping->Literal(shared_clause[i]); + } + if (!sat_solver->AddProblemClause( + absl::MakeSpan(local_clause).subspan(0, shared_clause.size()))) { + return false; + } + } + return true; + }; + model->GetOrCreate()->callbacks.push_back( + import_level_zero_clauses); + return id; +} + +void LoadBaseModel(const CpModelProto& model_proto, Model* model) { + auto* shared_response_manager = model->GetOrCreate(); + CHECK(shared_response_manager != nullptr); + auto* sat_solver = model->GetOrCreate(); + + // Simple function for the few places where we do "return unsat()". + const auto unsat = [shared_response_manager, sat_solver, model] { + sat_solver->NotifyThatModelIsUnsat(); + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + absl::StrCat(model->Name(), " [loading]")); + }; + + // We will add them all at once after model_proto is loaded. + model->GetOrCreate()->DisableImplicationBetweenLiteral(); + + auto* mapping = model->GetOrCreate(); + const SatParameters& parameters = *(model->GetOrCreate()); + const bool view_all_booleans_as_integers = + (parameters.linearization_level() >= 2) || + (parameters.search_branching() == SatParameters::FIXED_SEARCH && + model_proto.search_strategy().empty()) || + parameters.optimize_with_max_hs(); + LoadVariables(model_proto, view_all_booleans_as_integers, model); + DetectOptionalVariables(model_proto, model); + + // TODO(user): The core algo and symmetries seems to be problematic in some + // cases. See for instance: neos-691058.mps.gz. This is probably because as + // we modify the model, our symmetry might be wrong? investigate. + // + // TODO(user): More generally, we cannot load the symmetry if we create + // new Booleans and constraints that link them to some Booleans of the model. + // Creating Booleans related to integer variable is fine since we only deal + // with Boolean only symmetry here. It is why we disable this when we have + // linear relaxation as some of them create new constraints. + if (!parameters.optimize_with_core() && parameters.symmetry_level() > 1 && + !parameters.enumerate_all_solutions() && + parameters.linearization_level() == 0) { + LoadBooleanSymmetries(model_proto, model); + } + + ExtractEncoding(model_proto, model); + PropagateEncodingFromEquivalenceRelations(model_proto, model); + + // Check the model is still feasible before continuing. + if (sat_solver->ModelIsUnsat()) return unsat(); + + // Fully encode variables as needed by the search strategy. + AddFullEncodingFromSearchBranching(model_proto, model); + if (sat_solver->ModelIsUnsat()) return unsat(); + + // Reserve space for the precedence relations. + model->GetOrCreate()->Resize( + model->GetOrCreate()->NumIntegerVariables().value()); + + // Load the constraints. + int num_ignored_constraints = 0; + absl::flat_hash_set unsupported_types; + for (const ConstraintProto& ct : model_proto.constraints()) { + if (mapping->ConstraintIsAlreadyLoaded(&ct)) { + ++num_ignored_constraints; + continue; + } + + if (!LoadConstraint(ct, model)) { + unsupported_types.insert(ct.constraint_case()); + continue; + } + + // We propagate after each new Boolean constraint but not the integer + // ones. So we call FinishPropagation() manually here. + // + // Note that we only do that in debug mode as this can be really slow on + // certain types of problems with millions of constraints. + if (DEBUG_MODE) { + if (sat_solver->FinishPropagation()) { + Trail* trail = model->GetOrCreate(); + const int old_num_fixed = trail->Index(); + if (trail->Index() > old_num_fixed) { + VLOG(3) << "Constraint fixed " << trail->Index() - old_num_fixed + << " Boolean variable(s): " << ProtobufDebugString(ct); + } + } + } + if (sat_solver->ModelIsUnsat()) { + VLOG(2) << "UNSAT during extraction (after adding '" + << ConstraintCaseName(ct.constraint_case()) << "'). " + << ProtobufDebugString(ct); + return unsat(); + } + } + if (num_ignored_constraints > 0) { + VLOG(3) << num_ignored_constraints << " constraints were skipped."; + } + if (!unsupported_types.empty()) { + VLOG(1) << "There is unsupported constraints types in this model: "; + std::vector names; + for (const ConstraintProto::ConstraintCase type : unsupported_types) { + names.push_back(ConstraintCaseName(type)); + } + std::sort(names.begin(), names.end()); + for (const absl::string_view name : names) { + VLOG(1) << " - " << name; + } + return unsat(); + } + + model->GetOrCreate() + ->AddAllImplicationsBetweenAssociatedLiterals(); + if (!sat_solver->FinishPropagation()) return unsat(); + + model->GetOrCreate()->ProcessImplicationGraph( + model->GetOrCreate()); + model->GetOrCreate()->Build(); +} + +void LoadFeasibilityPump(const CpModelProto& model_proto, Model* model) { + LoadBaseModel(model_proto, model); + + auto* mapping = model->GetOrCreate(); + const SatParameters& parameters = *(model->GetOrCreate()); + if (parameters.linearization_level() == 0) return; + + // Add linear constraints to Feasibility Pump. + const LinearRelaxation relaxation = + ComputeLinearRelaxation(model_proto, model); + if (model->GetOrCreate()->ModelIsUnsat()) return; + + const int num_lp_constraints = + static_cast(relaxation.linear_constraints.size()); + if (num_lp_constraints == 0) return; + auto* feasibility_pump = model->GetOrCreate(); + for (int i = 0; i < num_lp_constraints; i++) { + feasibility_pump->AddLinearConstraint(relaxation.linear_constraints[i]); + } + + if (model_proto.has_objective()) { + for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { + const IntegerVariable var = + mapping->Integer(model_proto.objective().vars(i)); + const int64_t coeff = model_proto.objective().coeffs(i); + feasibility_pump->SetObjectiveCoefficient(var, IntegerValue(coeff)); + } + } +} + +// Loads a CpModelProto inside the given model. +// This should only be called once on a given 'Model' class. +void LoadCpModel(const CpModelProto& model_proto, Model* model) { + LoadBaseModel(model_proto, model); + + // We want to load the debug solution before the initial propag. + // But at this point the objective is not loaded yet, so we will not have + // a value for the objective integer variable, so we do it again later. + InitializeDebugSolution(model_proto, model); + + // Simple function for the few places where we do "return unsat()". + auto* sat_solver = model->GetOrCreate(); + auto* shared_response_manager = model->GetOrCreate(); + const auto unsat = [shared_response_manager, sat_solver, model] { + sat_solver->NotifyThatModelIsUnsat(); + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + absl::StrCat(model->Name(), " [loading]")); + }; + + auto* mapping = model->GetOrCreate(); + const SatParameters& parameters = *(model->GetOrCreate()); + + // Auto detect "at least one of" constraints in the PrecedencesPropagator. + // Note that we do that before we finish loading the problem (objective and + // LP relaxation), because propagation will be faster at this point and it + // should be enough for the purpose of this auto-detection. + if (parameters.auto_detect_greater_than_at_least_one_of()) { + model->GetOrCreate() + ->AddGreaterThanAtLeastOneOfConstraints(model); + if (!sat_solver->FinishPropagation()) return unsat(); + } + + // Note that this is already done in the presolve, but it is important to redo + // it here to collect literal => integer >= bound constraints that are used in + // many places. Without it, we don't detect them if they depends on long chain + // of implications. + // + // TODO(user): We don't have a good deterministic time on all constraints, + // so this might take more time than wanted. + if (parameters.cp_model_probing_level() > 1) { + Prober* prober = model->GetOrCreate(); + prober->ProbeBooleanVariables(/*deterministic_time_limit=*/1.0); + if (!model->GetOrCreate() + ->ComputeTransitiveReduction()) { + return unsat(); + } + } + if (sat_solver->ModelIsUnsat()) return unsat(); + + // Note that it is important to do that after the probing. + ExtractElementEncoding(model_proto, model); + + // Compute decomposed energies on demands helper. + IntervalsRepository* repository = model->Mutable(); + if (repository != nullptr) { + repository->InitAllDecomposedEnergies(); + } + + // We need to know beforehand if the objective var can just be >= terms or + // needs to be == terms. + bool objective_need_to_be_tight = false; + if (model_proto.has_objective() && + !model_proto.objective().domain().empty()) { + int64_t min_value = 0; + int64_t max_value = 0; + auto* integer_trail = model->GetOrCreate(); + const CpObjectiveProto& obj = model_proto.objective(); + for (int i = 0; i < obj.vars_size(); ++i) { + const int64_t coeff = obj.coeffs(i); + const IntegerVariable var = mapping->Integer(obj.vars(i)); + if (coeff > 0) { + min_value += coeff * integer_trail->LowerBound(var).value(); + max_value += coeff * integer_trail->UpperBound(var).value(); + } else { + min_value += coeff * integer_trail->UpperBound(var).value(); + max_value += coeff * integer_trail->LowerBound(var).value(); + } + } + const Domain user_domain = ReadDomainFromProto(model_proto.objective()); + const Domain automatic_domain = Domain(min_value, max_value); + objective_need_to_be_tight = !automatic_domain.IsIncludedIn(user_domain); + } + + // Create an objective variable and its associated linear constraint if + // needed. + IntegerVariable objective_var = kNoIntegerVariable; + if (parameters.linearization_level() > 0) { + // Linearize some part of the problem and register LP constraint(s). + objective_var = + AddLPConstraints(objective_need_to_be_tight, model_proto, model); + if (sat_solver->ModelIsUnsat()) return unsat(); + } else if (model_proto.has_objective()) { + const CpObjectiveProto& obj = model_proto.objective(); + std::vector> terms; + terms.reserve(obj.vars_size()); + for (int i = 0; i < obj.vars_size(); ++i) { + terms.push_back( + std::make_pair(mapping->Integer(obj.vars(i)), obj.coeffs(i))); + } + if (parameters.optimize_with_core()) { + if (objective_need_to_be_tight) { + // We do not care about the <= obj for core, we only need the other side + // to enforce a restriction of the objective lower bound. + // + // TODO(user): This might still create intermediate variables to + // decompose the objective for no reason. Just deal directly with the + // objective domain in the core algo by forbidding bad assumptions? + // Alternatively, just ignore the core solution if it is "too" good and + // rely on other solvers? + objective_var = + GetOrCreateVariableLinkedToSumOf(terms, true, false, model); + } else { + objective_var = GetOrCreateVariableWithTightBound(terms, model); + } + } else { + objective_var = GetOrCreateVariableLinkedToSumOf( + terms, objective_need_to_be_tight, true, model); + } + } + + // Create the objective definition inside the Model so that it can be accessed + // by the heuristics than needs it. + if (objective_var != kNoIntegerVariable) { + const CpObjectiveProto& objective_proto = model_proto.objective(); + auto* objective_definition = model->GetOrCreate(); + + objective_definition->scaling_factor = objective_proto.scaling_factor(); + if (objective_definition->scaling_factor == 0.0) { + objective_definition->scaling_factor = 1.0; + } + objective_definition->offset = objective_proto.offset(); + objective_definition->objective_var = objective_var; + + const int size = objective_proto.vars_size(); + objective_definition->vars.resize(size); + objective_definition->coeffs.resize(size); + for (int i = 0; i < objective_proto.vars_size(); ++i) { + // Note that if there is no mapping, then the variable will be + // kNoIntegerVariable. + objective_definition->vars[i] = mapping->Integer(objective_proto.vars(i)); + objective_definition->coeffs[i] = IntegerValue(objective_proto.coeffs(i)); + + // Fill the objective heuristics data. + const int ref = objective_proto.vars(i); + if (mapping->IsInteger(ref)) { + const IntegerVariable var = mapping->Integer(objective_proto.vars(i)); + objective_definition->objective_impacting_variables.insert( + objective_proto.coeffs(i) > 0 ? var : NegationOf(var)); + } + } + + // Register an objective special propagator. + model->TakeOwnership( + new LevelZeroEquality(objective_var, objective_definition->vars, + objective_definition->coeffs, model)); + } + + // Intersect the objective domain with the given one if any. + if (!model_proto.objective().domain().empty()) { + auto* integer_trail = model->GetOrCreate(); + const Domain user_domain = ReadDomainFromProto(model_proto.objective()); + const Domain automatic_domain = + integer_trail->InitialVariableDomain(objective_var); + VLOG(3) << "Objective offset:" << model_proto.objective().offset() + << " scaling_factor:" << model_proto.objective().scaling_factor(); + VLOG(3) << "Automatic internal objective domain: " << automatic_domain; + VLOG(3) << "User specified internal objective domain: " << user_domain; + CHECK_NE(objective_var, kNoIntegerVariable); + if (!integer_trail->UpdateInitialDomain(objective_var, user_domain)) { + VLOG(2) << "UNSAT due to the objective domain."; + return unsat(); + } + } + + // Note that we do one last propagation at level zero once all the + // constraints were added. + SOLVER_LOG(model->GetOrCreate(), + "Initial num_bool: ", sat_solver->NumVariables()); + if (!sat_solver->FinishPropagation()) return unsat(); + + if (model_proto.has_objective()) { + // Report the initial objective variable bounds. + auto* integer_trail = model->GetOrCreate(); + shared_response_manager->UpdateInnerObjectiveBounds( + absl::StrCat(model->Name(), " (initial_propagation)"), + integer_trail->LowerBound(objective_var), + integer_trail->UpperBound(objective_var)); + + // Watch improved objective best bounds. + RegisterObjectiveBestBoundExport(objective_var, shared_response_manager, + model); + + // Import objective bounds. + // TODO(user): Support objective bounds import in LNS and Core based + // search. + if (model->GetOrCreate()->share_objective_bounds()) { + RegisterObjectiveBoundsImport(shared_response_manager, model); + } + } + + // Initialize the search strategies. + auto* search_heuristics = model->GetOrCreate(); + search_heuristics->user_search = + ConstructUserSearchStrategy(model_proto, model); + search_heuristics->heuristic_search = + ConstructHeuristicSearchStrategy(model_proto, model); + search_heuristics->integer_completion_search = + ConstructIntegerCompletionSearchStrategy(mapping->GetVariableMapping(), + objective_var, model); + search_heuristics->fixed_search = ConstructFixedSearchStrategy( + search_heuristics->user_search, search_heuristics->heuristic_search, + search_heuristics->integer_completion_search); + if (VLOG_IS_ON(3)) { + search_heuristics->fixed_search = + InstrumentSearchStrategy(model_proto, mapping->GetVariableMapping(), + search_heuristics->fixed_search, model); + } + search_heuristics->hint_search = + ConstructHintSearchStrategy(model_proto, mapping, model); + + // Create the CoreBasedOptimizer class if needed. + if (parameters.optimize_with_core()) { + // TODO(user): Remove code duplication with the solution_observer in + // SolveLoadedCpModel(). + const auto solution_observer = [&model_proto, model, + shared_response_manager, + best_obj_ub = kMaxIntegerValue]() mutable { + const std::vector solution = + GetSolutionValues(model_proto, *model); + const IntegerValue obj_ub = + ComputeInnerObjective(model_proto.objective(), solution); + if (obj_ub < best_obj_ub) { + best_obj_ub = obj_ub; + shared_response_manager->NewSolution(solution, model->Name(), model); + } + }; + + const auto& objective = *model->GetOrCreate(); + if (parameters.optimize_with_max_hs()) { + HittingSetOptimizer* max_hs = new HittingSetOptimizer( + model_proto, objective, solution_observer, model); + model->Register(max_hs); + model->TakeOwnership(max_hs); + } else { + CoreBasedOptimizer* core = + new CoreBasedOptimizer(objective_var, objective.vars, + objective.coeffs, solution_observer, model); + model->Register(core); + model->TakeOwnership(core); + } + } + + InitializeDebugSolution(model_proto, model); +} + +// Solves an already loaded cp_model_proto. +// The final CpSolverResponse must be read from the shared_response_manager. +// +// TODO(user): This should be transformed so that it can be called many times +// and resume from the last search state as if it wasn't interrupted. That would +// allow use to easily interleave different heuristics in the same thread. +void SolveLoadedCpModel(const CpModelProto& model_proto, Model* model) { + auto* shared_response_manager = model->GetOrCreate(); + if (shared_response_manager->ProblemIsSolved()) return; + + const SatParameters& parameters = *model->GetOrCreate(); + if (parameters.stop_after_root_propagation()) return; + + auto solution_observer = [&model_proto, model, shared_response_manager, + best_obj_ub = kMaxIntegerValue]() mutable { + const std::vector solution = + GetSolutionValues(model_proto, *model); + if (model_proto.has_objective()) { + const IntegerValue obj_ub = + ComputeInnerObjective(model_proto.objective(), solution); + if (obj_ub < best_obj_ub) { + best_obj_ub = obj_ub; + shared_response_manager->NewSolution(solution, model->Name(), model); + } + } else { + shared_response_manager->NewSolution(solution, model->Name(), model); + } + }; + + // Make sure we are not at a positive level. + if (!model->GetOrCreate()->ResetToLevelZero()) { + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + model->Name()); + return; + } + + // Reconfigure search heuristic if it was changed. + ConfigureSearchHeuristics(model); + + const auto& mapping = *model->GetOrCreate(); + SatSolver::Status status; + + if (parameters.use_probing_search()) { + ContinuousProber prober(model_proto, model); + while (true) { + status = prober.Probe(); + if (status == SatSolver::INFEASIBLE) { + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + model->Name()); + break; + } + if (status == SatSolver::FEASIBLE) { + solution_observer(); + } else { + break; + } + } + } else if (!model_proto.has_objective()) { + while (true) { + if (parameters.use_shared_tree_search()) { + auto* subtree_worker = model->GetOrCreate(); + status = subtree_worker->Search(solution_observer); + } else { + status = ResetAndSolveIntegerProblem( + mapping.Literals(model_proto.assumptions()), model); + } + if (status != SatSolver::Status::FEASIBLE) break; + solution_observer(); + if (!parameters.enumerate_all_solutions()) break; + model->Add(ExcludeCurrentSolutionAndBacktrack()); + } + if (status == SatSolver::INFEASIBLE) { + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + model->Name()); + } + if (status == SatSolver::ASSUMPTIONS_UNSAT) { + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + model->Name()); + + // Extract a good subset of assumptions and add it to the response. + auto* time_limit = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); + std::vector core = sat_solver->GetLastIncompatibleDecisions(); + MinimizeCoreWithPropagation(time_limit, sat_solver, &core); + std::vector core_in_proto_format; + for (const Literal l : core) { + core_in_proto_format.push_back( + mapping.GetProtoVariableFromBooleanVariable(l.Variable())); + if (!l.IsPositive()) { + core_in_proto_format.back() = NegatedRef(core_in_proto_format.back()); + } + } + shared_response_manager->AddUnsatCore(core_in_proto_format); + } + } else { + // Optimization problem. + const auto& objective = *model->GetOrCreate(); + const IntegerVariable objective_var = objective.objective_var; + CHECK_NE(objective_var, kNoIntegerVariable); + + if (parameters.optimize_with_lb_tree_search()) { + auto* search = model->GetOrCreate(); + status = search->Search(solution_observer); + } else if (parameters.optimize_with_core()) { + // TODO(user): This doesn't work with splitting in chunk for now. It + // shouldn't be too hard to fix. + if (parameters.optimize_with_max_hs()) { + status = model->Mutable()->Optimize(); + } else { + status = model->Mutable()->Optimize(); + } + } else if (parameters.use_shared_tree_search()) { + auto* subtree_worker = model->GetOrCreate(); + status = subtree_worker->Search(solution_observer); + } else { + // TODO(user): This parameter breaks the splitting in chunk of a Solve(). + // It should probably be moved into another SubSolver altogether. + if (parameters.binary_search_num_conflicts() >= 0) { + RestrictObjectiveDomainWithBinarySearch(objective_var, + solution_observer, model); + } + status = MinimizeIntegerVariableWithLinearScanAndLazyEncoding( + objective_var, solution_observer, model); + } + + // The search is done in both case. + // + // TODO(user): Remove the weird translation INFEASIBLE->FEASIBLE in the + // function above? + if (status == SatSolver::INFEASIBLE || status == SatSolver::FEASIBLE) { + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + model->Name()); + } + } +} + +// Try to find a solution by following the hint and using a low conflict limit. +// The CpModelProto must already be loaded in the Model. +void QuickSolveWithHint(const CpModelProto& model_proto, Model* model) { + if (!model_proto.has_solution_hint()) return; + + auto* shared_response_manager = model->GetOrCreate(); + if (shared_response_manager->ProblemIsSolved()) return; + + // Temporarily change the parameters. + auto* parameters = model->GetOrCreate(); + + // If the model was loaded with "optimize_with_core" then the objective + // variable is not linked to its linear expression. Because of that, we can + // return a solution that does not satisfy the objective domain. + // + // TODO(user): This is fixable, but then do we need the hint when optimizing + // with core? + if (parameters->optimize_with_core()) return; + + const SatParameters saved_params = *parameters; + parameters->set_max_number_of_conflicts(parameters->hint_conflict_limit()); + parameters->set_search_branching(SatParameters::HINT_SEARCH); + parameters->set_optimize_with_core(false); + parameters->set_use_sat_inprocessing(false); + auto cleanup = ::absl::MakeCleanup( + [parameters, saved_params]() { *parameters = saved_params; }); + + // Solve decision problem. + ConfigureSearchHeuristics(model); + const auto& mapping = *model->GetOrCreate(); + const SatSolver::Status status = ResetAndSolveIntegerProblem( + mapping.Literals(model_proto.assumptions()), model); + + const std::string& solution_info = model->Name(); + if (status == SatSolver::Status::FEASIBLE) { + const std::vector solution = + GetSolutionValues(model_proto, *model); + shared_response_manager->NewSolution( + solution, absl::StrCat(solution_info, " [hint]"), model); + + if (!model_proto.has_objective()) { + if (parameters->enumerate_all_solutions()) { + model->Add(ExcludeCurrentSolutionAndBacktrack()); + } + } else { + // Restrict the objective. + const IntegerVariable objective_var = + model->GetOrCreate()->objective_var; + model->GetOrCreate()->Backtrack(0); + IntegerTrail* integer_trail = model->GetOrCreate(); + if (!integer_trail->Enqueue( + IntegerLiteral::LowerOrEqual( + objective_var, + shared_response_manager->GetInnerObjectiveUpperBound()), + {}, {})) { + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + absl::StrCat(solution_info, " [hint]")); + } + } + return; + } + + // This code is here to debug bad presolve during LNS that corrupt the hint. + // Note that sometime the deterministic limit is hit before the hint can be + // completed, so we don't report that has an error. + // + // Tricky: We can only test that if we don't already have a feasible solution + // like we do if the hint is complete. + if (parameters->debug_crash_on_bad_hint() && + shared_response_manager->SolutionsRepository().NumSolutions() == 0 && + !model->GetOrCreate()->LimitReached() && + status != SatSolver::Status::FEASIBLE) { + LOG(FATAL) << "QuickSolveWithHint() didn't find a feasible solution." + << " The model name is '" << model_proto.name() << "'." + << " Status: " << status << "."; + } + + if (status == SatSolver::INFEASIBLE) { + shared_response_manager->NotifyThatImprovingProblemIsInfeasible( + absl::StrCat(solution_info, " [hint]")); + return; + } +} + +// Solve a model with a different objective consisting of minimizing the L1 +// distance with the provided hint. Note that this method creates an in-memory +// copy of the model and loads a local Model object from the copied model. +void MinimizeL1DistanceWithHint(const CpModelProto& model_proto, Model* model) { + Model local_model; + + // Forward some shared class. + local_model.Register( + model->GetOrCreate()); + local_model.Register(model->GetOrCreate()); + + if (!model_proto.has_solution_hint()) return; + + auto* shared_response_manager = model->GetOrCreate(); + if (shared_response_manager->ProblemIsSolved()) return; + + auto* parameters = local_model.GetOrCreate(); + // TODO(user): As of now the repair hint doesn't support when + // enumerate_all_solutions is set since the solution is created on a different + // model. + if (parameters->enumerate_all_solutions()) return; + + // Change the parameters. + const SatParameters saved_params = *model->GetOrCreate(); + *parameters = saved_params; + parameters->set_max_number_of_conflicts(parameters->hint_conflict_limit()); + parameters->set_optimize_with_core(false); + + // Update the model to introduce penalties to go away from hinted values. + CpModelProto updated_model_proto = model_proto; + updated_model_proto.clear_objective(); + + // TODO(user): For boolean variables we can avoid creating new variables. + for (int i = 0; i < model_proto.solution_hint().vars_size(); ++i) { + const int var = model_proto.solution_hint().vars(i); + const int64_t value = model_proto.solution_hint().values(i); + + // Add a new var to represent the difference between var and value. + const int new_var_index = updated_model_proto.variables_size(); + IntegerVariableProto* var_proto = updated_model_proto.add_variables(); + const int64_t min_domain = model_proto.variables(var).domain(0) - value; + const int64_t max_domain = + model_proto.variables(var).domain( + model_proto.variables(var).domain_size() - 1) - + value; + var_proto->add_domain(min_domain); + var_proto->add_domain(max_domain); + + // new_var = var - value. + ConstraintProto* const linear_constraint_proto = + updated_model_proto.add_constraints(); + LinearConstraintProto* linear = linear_constraint_proto->mutable_linear(); + linear->add_vars(new_var_index); + linear->add_coeffs(1); + linear->add_vars(var); + linear->add_coeffs(-1); + linear->add_domain(-value); + linear->add_domain(-value); + + // abs_var = abs(new_var). + const int abs_var_index = updated_model_proto.variables_size(); + IntegerVariableProto* abs_var_proto = updated_model_proto.add_variables(); + const int64_t abs_min_domain = 0; + const int64_t abs_max_domain = + std::max(std::abs(min_domain), std::abs(max_domain)); + abs_var_proto->add_domain(abs_min_domain); + abs_var_proto->add_domain(abs_max_domain); + auto* abs_ct = updated_model_proto.add_constraints()->mutable_lin_max(); + abs_ct->mutable_target()->add_vars(abs_var_index); + abs_ct->mutable_target()->add_coeffs(1); + LinearExpressionProto* left = abs_ct->add_exprs(); + left->add_vars(new_var_index); + left->add_coeffs(1); + LinearExpressionProto* right = abs_ct->add_exprs(); + right->add_vars(new_var_index); + right->add_coeffs(-1); + + updated_model_proto.mutable_objective()->add_vars(abs_var_index); + updated_model_proto.mutable_objective()->add_coeffs(1); + } + + auto* local_response_manager = + local_model.GetOrCreate(); + local_response_manager->InitializeObjective(updated_model_proto); + + // Solve optimization problem. + LoadCpModel(updated_model_proto, &local_model); + + ConfigureSearchHeuristics(&local_model); + const auto& mapping = *local_model.GetOrCreate(); + const SatSolver::Status status = ResetAndSolveIntegerProblem( + mapping.Literals(updated_model_proto.assumptions()), &local_model); + + const std::string& solution_info = model->Name(); + if (status == SatSolver::Status::FEASIBLE) { + const std::vector solution = + GetSolutionValues(model_proto, local_model); + if (DEBUG_MODE) { + const std::vector updated_solution = + GetSolutionValues(updated_model_proto, local_model); + LOG(INFO) << "Found solution with repaired hint penalty = " + << ComputeInnerObjective(updated_model_proto.objective(), + updated_solution); + } + shared_response_manager->NewSolution( + solution, absl::StrCat(solution_info, " [repaired]"), &local_model); + } +} + +// TODO(user): If this ever shows up in the profile, we could avoid copying +// the mapping_proto if we are careful about how we modify the variable domain +// before postsolving it. Note that 'num_variables_in_original_model' refers to +// the model before presolve. +void PostsolveResponseWithFullSolver(int num_variables_in_original_model, + CpModelProto mapping_proto, + const std::vector& postsolve_mapping, + std::vector* solution) { + WallTimer wall_timer; + wall_timer.Start(); + + // Fix the correct variable in the mapping_proto. + for (int i = 0; i < solution->size(); ++i) { + auto* var_proto = mapping_proto.mutable_variables(postsolve_mapping[i]); + var_proto->clear_domain(); + var_proto->add_domain((*solution)[i]); + var_proto->add_domain((*solution)[i]); + } + + // Postosolve parameters. + // TODO(user): this problem is usually trivial, but we may still want to + // impose a time limit or copy some of the parameters passed by the user. + Model postsolve_model; + postsolve_model.Register(&wall_timer); + { + SatParameters& params = *postsolve_model.GetOrCreate(); + params.set_linearization_level(0); + params.set_cp_model_probing_level(0); + } + + auto* response_manager = postsolve_model.GetOrCreate(); + response_manager->InitializeObjective(mapping_proto); + + LoadCpModel(mapping_proto, &postsolve_model); + SolveLoadedCpModel(mapping_proto, &postsolve_model); + const CpSolverResponse postsolve_response = response_manager->GetResponse(); + CHECK(postsolve_response.status() == CpSolverStatus::FEASIBLE || + postsolve_response.status() == CpSolverStatus::OPTIMAL) + << postsolve_response.status(); + + // We only copy the solution from the postsolve_response to the response. + CHECK_LE(num_variables_in_original_model, + postsolve_response.solution().size()); + solution->assign( + postsolve_response.solution().begin(), + postsolve_response.solution().begin() + num_variables_in_original_model); +} + +void PostsolveResponseWrapper(const SatParameters& params, + int num_variable_in_original_model, + const CpModelProto& mapping_proto, + const std::vector& postsolve_mapping, + std::vector* solution) { + if (params.debug_postsolve_with_full_solver()) { + PostsolveResponseWithFullSolver(num_variable_in_original_model, + mapping_proto, postsolve_mapping, solution); + } else { + PostsolveResponse(num_variable_in_original_model, mapping_proto, + postsolve_mapping, solution); + } +} + +void AdaptGlobalParameters(const CpModelProto& model_proto, Model* model) { + auto* params = model->GetOrCreate(); + auto* logger = model->GetOrCreate(); + + // Update params.num_workers() if the old field was used. + if (params->num_workers() == 0) { + params->set_num_workers(params->num_search_workers()); + } + + if (params->enumerate_all_solutions()) { + if (params->num_workers() >= 1) { + SOLVER_LOG(logger, + "Forcing sequential search as enumerating all solutions is " + "not supported in multi-thread."); + } + params->set_num_workers(1); + } + + if (!model_proto.assumptions().empty()) { + if (params->num_workers() >= 1) { + SOLVER_LOG(logger, + "Forcing sequential search as assumptions are not supported " + "in multi-thread."); + } + params->set_num_workers(1); + } + + if (params->num_workers() == 0) { + // Initialize the number of workers if set to 0. +#if !defined(__PORTABLE_PLATFORM__) + // Sometimes, hardware_concurrency will return 0. So always default to 1. + const int num_cores = std::max(std::thread::hardware_concurrency(), 1); +#else + const int num_cores = 1; +#endif + SOLVER_LOG(logger, "Setting number of workers to ", num_cores); + params->set_num_workers(num_cores); + } + + // We currently only use the feasibility pump or rins/rens if it is enabled + // and some other parameters are not on. + // + // TODO(user): for now this is not deterministic so we disable it on + // interleave search. Fix. + if (params->interleave_search() || params->num_workers() == 1 || + !params->use_lns()) { + params->set_use_rins_lns(false); + params->set_use_feasibility_pump(false); + } + + // We disable this if the global param asked for no LP. + if (params->linearization_level() == 0) { + params->set_use_feasibility_pump(false); + } + + // Disable shared bounds if we are in single thread and we are not + // tightening the domains. + if (!params->fill_tightened_domains_in_response() && + params->num_workers() == 1) { + params->set_share_level_zero_bounds(false); + } +} + +SharedClasses::SharedClasses(const CpModelProto* proto, Model* global_model) + : model_proto(*proto), + wall_timer(global_model->GetOrCreate()), + time_limit(global_model->GetOrCreate()), + logger(global_model->GetOrCreate()), + stats(global_model->GetOrCreate()), + response(global_model->GetOrCreate()), + shared_tree_manager(global_model->GetOrCreate()) { + const SatParameters& params = *global_model->GetOrCreate(); + + if (params.share_level_zero_bounds()) { + bounds = std::make_unique(*proto); + bounds->set_dump_prefix(absl::GetFlag(FLAGS_cp_model_dump_prefix)); + bounds->LoadDebugSolution(response->DebugSolution()); + } + + // Create extra shared classes if needed. Note that while these parameters + // are true by default, we disable them if we don't have enough workers for + // them in AdaptGlobalParameters(). + // + // Registering them to the global model should not really be necessary, + // except if one wants to expect them from outside SolveCpModel(). + if (params.use_rins_lns() || params.use_feasibility_pump()) { + lp_solutions = std::make_unique( + /*num_solutions_to_keep=*/10); + global_model->Register(lp_solutions.get()); + + incomplete_solutions = std::make_unique(); + global_model->Register( + incomplete_solutions.get()); + } + + // Set up synchronization mode in parallel. + const bool always_synchronize = + !params.interleave_search() || params.num_workers() <= 1; + response->SetSynchronizationMode(always_synchronize); + if (params.share_binary_clauses() && params.num_workers() > 1) { + clauses = std::make_unique(always_synchronize, + absl::Seconds(1)); + } +} + +bool SharedClasses::SearchIsDone() { + if (response->ProblemIsSolved()) { + // This is for cases where the time limit is checked more often. + time_limit->Stop(); + return true; + } + if (time_limit->LimitReached()) return true; + return false; +} + +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/cp_model_solver_helpers.h b/ortools/sat/cp_model_solver_helpers.h new file mode 100644 index 0000000000..14e34ab310 --- /dev/null +++ b/ortools/sat/cp_model_solver_helpers.h @@ -0,0 +1,151 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_SAT_CP_MODEL_SOLVER_HELPERS_H_ +#define OR_TOOLS_SAT_CP_MODEL_SOLVER_HELPERS_H_ + +#include +#include +#include +#include + +#include "absl/flags/declare.h" +#include "ortools/base/timer.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/stat_tables.h" +#include "ortools/sat/synchronization.h" +#include "ortools/sat/util.h" +#include "ortools/sat/work_assignment.h" +#include "ortools/util/logging.h" + +ABSL_DECLARE_FLAG(bool, cp_model_dump_models); +ABSL_DECLARE_FLAG(bool, cp_model_check_intermediate_solutions); +ABSL_DECLARE_FLAG(std::string, cp_model_dump_prefix); +ABSL_DECLARE_FLAG(bool, cp_model_dump_submodels); + +namespace operations_research { +namespace sat { + +// Small wrapper containing all the shared classes between our subsolver +// threads. Note that all these classes can also be retrieved with something +// like global_model->GetOrCreate() but it is not thread-safe to do so. +// +// All the classes here should be thread-safe, or at least safe in the way they +// are accessed. For instance the model_proto will be kept constant for the +// whole duration of the solve. +struct SharedClasses { + SharedClasses(const CpModelProto* proto, Model* global_model); + + // These are never nullptr. + const CpModelProto& model_proto; + WallTimer* const wall_timer; + ModelSharedTimeLimit* const time_limit; + SolverLogger* const logger; + SharedStatistics* const stats; + SharedResponseManager* const response; + SharedTreeManager* const shared_tree_manager; + + // These can be nullptr depending on the options. + std::unique_ptr bounds; + std::unique_ptr lp_solutions; + std::unique_ptr incomplete_solutions; + std::unique_ptr clauses; + + // For displaying summary at the end. + SharedStatTables stat_tables; + + bool SearchIsDone(); +}; + +// Loads a CpModelProto inside the given model. +// This should only be called once on a given 'Model' class. +void LoadCpModel(const CpModelProto& model_proto, Model* model); + +// Solves an already loaded cp_model_proto. +// The final CpSolverResponse must be read from the shared_response_manager. +// +// TODO(user): This should be transformed so that it can be called many times +// and resume from the last search state as if it wasn't interrupted. That would +// allow use to easily interleave different heuristics in the same thread. +void SolveLoadedCpModel(const CpModelProto& model_proto, Model* model); + +// Registers a callback that will export variables bounds fixed at level 0 of +// the search. This should not be registered to a LNS search. +void RegisterVariableBoundsLevelZeroExport( + const CpModelProto& /*model_proto*/, + SharedBoundsManager* shared_bounds_manager, Model* model); + +// Registers a callback to import new variables bounds stored in the +// shared_bounds_manager. These bounds are imported at level 0 of the search +// in the linear scan minimize function. +void RegisterVariableBoundsLevelZeroImport( + const CpModelProto& model_proto, SharedBoundsManager* shared_bounds_manager, + Model* model); + +// Registers a callback that will report improving objective best bound. +// It will be called each time new objective bound are propagated at level zero. +void RegisterObjectiveBestBoundExport( + IntegerVariable objective_var, + SharedResponseManager* shared_response_manager, Model* model); + +// Registers a callback to import new objective bounds. It will be called each +// time the search main loop is back to level zero. Note that it the presence of +// assumptions, this will not happen until the set of assumptions is changed. +void RegisterObjectiveBoundsImport( + SharedResponseManager* shared_response_manager, Model* model); + +// Registers a callback that will export good clauses discovered during search. +void RegisterClausesExport(int id, SharedClausesManager* shared_clauses_manager, + Model* model); + +// Registers a callback to import new clauses stored in the +// shared_clausess_manager. These clauses are imported at level 0 of the search +// in the linear scan minimize function. +// it returns the id of the worker in the shared clause manager. +// +// TODO(user): Can we import them in the core worker ? +int RegisterClausesLevelZeroImport(int id, + SharedClausesManager* shared_clauses_manager, + Model* model); + +void PostsolveResponseWrapper(const SatParameters& params, + int num_variable_in_original_model, + const CpModelProto& mapping_proto, + const std::vector& postsolve_mapping, + std::vector* solution); + +// Try to find a solution by following the hint and using a low conflict limit. +// The CpModelProto must already be loaded in the Model. +void QuickSolveWithHint(const CpModelProto& model_proto, Model* model); + +// Solve a model with a different objective consisting of minimizing the L1 +// distance with the provided hint. Note that this method creates an in-memory +// copy of the model and loads a local Model object from the copied model. +void MinimizeL1DistanceWithHint(const CpModelProto& model_proto, Model* model); + +void LoadFeasibilityPump(const CpModelProto& model_proto, Model* model); + +void AdaptGlobalParameters(const CpModelProto& model_proto, Model* model); + +// This should be called on the presolved model. It will read the file +// specified by --cp_model_load_debug_solution and properly fill the +// model->Get() proto vector. +void LoadDebugSolution(const CpModelProto& model_proto, Model* model); + +} // namespace sat +} // namespace operations_research + +#endif // OR_TOOLS_SAT_CP_MODEL_SOLVER_HELPERS_H_ diff --git a/ortools/sat/cp_model_symmetries.cc b/ortools/sat/cp_model_symmetries.cc index 9cd429cfa4..1977fef19c 100644 --- a/ortools/sat/cp_model_symmetries.cc +++ b/ortools/sat/cp_model_symmetries.cc @@ -17,12 +17,15 @@ #include #include +#include #include #include #include #include #include +#include "absl/algorithm/container.h" +#include "absl/container/btree_map.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" @@ -32,12 +35,14 @@ #include "absl/strings/str_join.h" #include "absl/types/span.h" #include "google/protobuf/message.h" +#include "ortools/algorithms/binary_search.h" #include "ortools/algorithms/find_graph_symmetries.h" #include "ortools/algorithms/sparse_permutation.h" #include "ortools/base/hash.h" #include "ortools/base/logging.h" #include "ortools/graph/graph.h" #include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_checker.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cp_model_utils.h" #include "ortools/sat/model.h" @@ -49,6 +54,7 @@ #include "ortools/sat/util.h" #include "ortools/util/affine_relation.h" #include "ortools/util/logging.h" +#include "ortools/util/saturated_arithmetic.h" #include "ortools/util/time_limit.h" namespace operations_research { @@ -65,6 +71,19 @@ struct VectorHash { } }; +struct NodeExprCompare { + bool operator()(const LinearExpressionProto& a, + const LinearExpressionProto& b) const { + if (a.offset() != b.offset()) return a.offset() < b.offset(); + if (a.vars_size() != b.vars_size()) return a.vars_size() < b.vars_size(); + for (int i = 0; i < a.vars_size(); ++i) { + if (a.vars(i) != b.vars(i)) return a.vars(i) < b.vars(i); + if (a.coeffs(i) != b.coeffs(i)) return a.coeffs(i) < b.coeffs(i); + } + return false; + } +}; + // A simple class to generate equivalence class number for // GenerateGraphForSymmetryDetection(). class IdGenerator { @@ -97,6 +116,28 @@ void Append( } } +bool IsIntervalFixedSize(const IntervalConstraintProto& interval) { + if (!interval.size().vars().empty()) { + return false; + } + if (interval.start().vars().size() != interval.end().vars().size()) { + return false; + } + for (int i = 0; i < interval.start().vars().size(); ++i) { + if (interval.start().coeffs(i) != interval.end().coeffs(i)) { + return false; + } + if (interval.start().vars(i) != interval.end().vars(i)) { + return false; + } + } + if (interval.end().offset() != + interval.start().offset() + interval.size().offset()) { + return false; + } + return true; +} + // Returns a graph whose automorphisms can be mapped back to the symmetries of // the model described in the given CpModelProto. // @@ -130,22 +171,25 @@ std::unique_ptr GenerateGraphForSymmetryDetection( VARIABLE_NODE, VAR_COEFFICIENT_NODE, CONSTRAINT_NODE, + VAR_LIN_EXPR_NODE, }; IdGenerator color_id_generator; initial_equivalence_classes->clear(); - auto new_node = [&initial_equivalence_classes, &graph, - &color_id_generator](const std::vector& color) { + auto new_node_from_id = [&initial_equivalence_classes, &graph](int color_id) { // Since we add nodes one by one, initial_equivalence_classes->size() gives // the number of nodes at any point, which we use as the next node index. const int node = initial_equivalence_classes->size(); - initial_equivalence_classes->push_back(color_id_generator.GetId(color)); + initial_equivalence_classes->push_back(color_id); // In some corner cases, we create a node but never uses it. We still // want it to be there. graph->AddNode(node); return node; }; - + auto new_node = [&new_node_from_id, + &color_id_generator](const std::vector& color) { + return new_node_from_id(color_id_generator.GetId(color)); + }; // For two variables to be in the same equivalence class, they need to have // the same objective coefficient, and the same possible bounds. // @@ -169,29 +213,43 @@ std::unique_ptr GenerateGraphForSymmetryDetection( CHECK_EQ(v, new_node(tmp_color)); } + const int color_id_for_coeff_one = + color_id_generator.GetId({VAR_COEFFICIENT_NODE, 1}); + const int color_id_for_coeff_minus_one = + color_id_generator.GetId({VAR_COEFFICIENT_NODE, -1}); + // We will lazily create "coefficient nodes" that correspond to a variable // with a given coefficient. absl::flat_hash_map, int> coefficient_nodes; - auto get_coefficient_node = [&new_node, &graph, &coefficient_nodes, - &tmp_color](int var, int64_t coeff) { - const int var_node = var; - DCHECK(RefIsPositive(var)); + auto get_coefficient_node = + [&new_node_from_id, &graph, &coefficient_nodes, &color_id_generator, + &tmp_color, color_id_for_coeff_minus_one](int var, int64_t coeff) { + const int var_node = var; + DCHECK(RefIsPositive(var)); - // For a coefficient of one, which are the most common, we can optimize the - // size of the graph by omitting the coefficient node altogether and using - // directly the var_node in this case. - if (coeff == 1) return var_node; + // For a coefficient of one, which are the most common, we can optimize + // the size of the graph by omitting the coefficient node altogether and + // using directly the var_node in this case. + if (coeff == 1) return var_node; - const auto insert = - coefficient_nodes.insert({std::make_pair(var, coeff), 0}); - if (!insert.second) return insert.first->second; + const auto insert = + coefficient_nodes.insert({std::make_pair(var, coeff), 0}); + if (!insert.second) return insert.first->second; - tmp_color = {VAR_COEFFICIENT_NODE, coeff}; - const int secondary_node = new_node(tmp_color); - graph->AddArc(var_node, secondary_node); - insert.first->second = secondary_node; - return secondary_node; - }; + int color_id; + // Because -1 is really common (also used for negated literal), we have + // a fast path for it. + if (coeff == -1) { + color_id = color_id_for_coeff_minus_one; + } else { + tmp_color = {VAR_COEFFICIENT_NODE, coeff}; + color_id = color_id_generator.GetId(tmp_color); + } + const int secondary_node = new_node_from_id(color_id); + graph->AddArc(var_node, secondary_node); + insert.first->second = secondary_node; + return secondary_node; + }; // For a literal we use the same as a coefficient 1 or -1. We can do that // because literal and (var, coefficient) never appear together in the same @@ -213,15 +271,16 @@ std::unique_ptr GenerateGraphForSymmetryDetection( // node. This makes sure that any permutation that touch a variable, must // permute its coefficient nodes accordingly. absl::flat_hash_set> implications; - auto get_implication_node = [&new_node, &graph, &coefficient_nodes, - &tmp_color](int ref) { + auto get_implication_node = [&new_node_from_id, &graph, &coefficient_nodes, + color_id_for_coeff_one, + color_id_for_coeff_minus_one](int ref) { const int var = PositiveRef(ref); const int64_t coeff = RefIsPositive(ref) ? 1 : -1; const auto insert = coefficient_nodes.insert({std::make_pair(var, coeff), 0}); if (!insert.second) return insert.first->second; - tmp_color = {VAR_COEFFICIENT_NODE, coeff}; - const int secondary_node = new_node(tmp_color); + const int secondary_node = new_node_from_id( + coeff == 1 ? color_id_for_coeff_one : color_id_for_coeff_minus_one); graph->AddArc(var, secondary_node); insert.first->second = secondary_node; return secondary_node; @@ -238,6 +297,35 @@ std::unique_ptr GenerateGraphForSymmetryDetection( get_implication_node(NegatedRef(ref_a))); }; + auto make_linear_expr_node = [&new_node, &graph, &get_coefficient_node]( + const LinearExpressionProto& expr, + const std::vector& color) { + std::vector local_color = color; + local_color.push_back(expr.offset()); + const int local_node = new_node(local_color); + + for (int i = 0; i < expr.vars().size(); ++i) { + const int ref = expr.vars(i); + const int var_node = PositiveRef(ref); + const int64_t coeff = + RefIsPositive(ref) ? expr.coeffs(i) : -expr.coeffs(i); + graph->AddArc(get_coefficient_node(var_node, coeff), local_node); + } + return local_node; + }; + + absl::btree_map expr_nodes; + auto shared_linear_expr_node = + [&make_linear_expr_node, &expr_nodes](const LinearExpressionProto& expr) { + const auto [it, inserted] = expr_nodes.insert({expr, 0}); + if (inserted) { + const std::vector local_color = {VAR_LIN_EXPR_NODE, + expr.offset()}; + it->second = make_linear_expr_node(expr, local_color); + } + return it->second; + }; + // We need to keep track of this for scheduling constraints. absl::flat_hash_map interval_constraint_index_to_node; @@ -273,6 +361,14 @@ std::unique_ptr GenerateGraphForSymmetryDetection( } break; } + case ConstraintProto::kAllDiff: { + CHECK_EQ(constraint_node, new_node(color)); + for (const LinearExpressionProto& expr : + constraint.all_diff().exprs()) { + graph->AddArc(shared_linear_expr_node(expr), constraint_node); + } + break; + } case ConstraintProto::kBoolOr: { CHECK_EQ(constraint_node, new_node(color)); for (const int ref : constraint.bool_or().literals()) { @@ -328,75 +424,54 @@ std::unique_ptr GenerateGraphForSymmetryDetection( const LinearExpressionProto& target_expr = constraint.lin_max().target(); - std::vector local_color = color; - local_color.push_back(target_expr.offset()); - const int target_node = new_node(local_color); - local_color.pop_back(); - - for (int j = 0; j < target_expr.vars_size(); ++j) { - const int var = target_expr.vars(j); - DCHECK(RefIsPositive(var)); - const int64_t coeff = target_expr.coeffs(j); - graph->AddArc(get_coefficient_node(var, coeff), target_node); - } + const int target_node = make_linear_expr_node(target_expr, color); for (int i = 0; i < constraint.lin_max().exprs_size(); ++i) { - // TODO(user): We can create a node per LinearExpressionProto instead. - // This will allow to reuse node between constraint, if they share a - // common expression. const LinearExpressionProto& expr = constraint.lin_max().exprs(i); - - local_color.push_back(expr.offset()); - const int local_node = new_node(local_color); - local_color.pop_back(); - - for (int j = 0; j < expr.vars().size(); ++j) { - const int var = expr.vars(j); - DCHECK(RefIsPositive(var)); - const int64_t coeff = expr.coeffs(j); - graph->AddArc(get_coefficient_node(var, coeff), local_node); - } - - graph->AddArc(local_node, target_node); + graph->AddArc(shared_linear_expr_node(expr), target_node); } break; } case ConstraintProto::kInterval: { - // We create 3 constraint nodes (for start, size and end) including the - // offset. We connect these to their terms like for a linear constraint. - std::vector nodes; - for (int indicator = 0; indicator <= 2; ++indicator) { - const LinearExpressionProto& expr = - indicator == 0 ? constraint.interval().start() - : indicator == 1 ? constraint.interval().size() - : constraint.interval().end(); - + static constexpr int kFixedIntervalColor = 0; + static constexpr int kNonFixedIntervalColor = 1; + if (IsIntervalFixedSize(constraint.interval())) { std::vector local_color = color; - local_color.push_back(indicator); - local_color.push_back(expr.offset()); - const int local_node = new_node(local_color); - nodes.push_back(local_node); + local_color.push_back(kFixedIntervalColor); + local_color.push_back(constraint.interval().size().offset()); + const int full_node = + make_linear_expr_node(constraint.interval().start(), local_color); + CHECK_EQ(full_node, constraint_node); + } else { + // We create 3 constraint nodes (for start, size and end) including + // the offset. We connect these to their terms like for a linear + // constraint. + std::vector local_color = color; + local_color.push_back(kNonFixedIntervalColor); - for (int i = 0; i < expr.vars().size(); ++i) { - const int ref = expr.vars(i); - const int var_node = PositiveRef(ref); - const int64_t coeff = - RefIsPositive(ref) ? expr.coeffs(i) : -expr.coeffs(i); - graph->AddArc(get_coefficient_node(var_node, coeff), local_node); - } + local_color.push_back(0); + const int start_node = + make_linear_expr_node(constraint.interval().start(), local_color); + local_color.pop_back(); + CHECK_EQ(start_node, constraint_node); + + // We can use a shared node for one of the three. Let's use the size + // since it has the most chance of being reused. + const int size_node = + shared_linear_expr_node(constraint.interval().size()); + + local_color.push_back(1); + const int end_node = + make_linear_expr_node(constraint.interval().end(), local_color); + local_color.pop_back(); + + // Make sure that if one node is mapped to another one, its other two + // components are the same. + graph->AddArc(start_node, end_node); + graph->AddArc(end_node, size_node); } - - // We will only map enforcement literal to the start_node below because - // it has the same index as the constraint_node. interval_constraint_index_to_node[constraint_index] = constraint_node; - CHECK_EQ(nodes[0], constraint_node); - - // Make sure that if one node is mapped to another one, its other two - // components are the same. - graph->AddArc(nodes[0], nodes[1]); - graph->AddArc(nodes[1], nodes[2]); - graph->AddArc(nodes[2], nodes[0]); // TODO(user): not needed? break; } case ConstraintProto::kNoOverlap: { @@ -414,21 +489,76 @@ std::unique_ptr GenerateGraphForSymmetryDetection( // Note(user): This require that intervals appear before they are used. // We currently enforce this at validation, otherwise we need two passes // here and in a bunch of other places. - // - // TODO(user): With this graph encoding, we loose the symmetry that the - // dimension x can be swapped with the dimension y. I think it is - // possible to encode this by creating two extra nodes X and - // Y, each connected to all the x and all the y, but I have to think - // more about it. CHECK_EQ(constraint_node, new_node(color)); + std::vector local_color = color; + local_color.push_back(0); const int size = constraint.no_overlap_2d().x_intervals().size(); + const int node_x = new_node(local_color); + const int node_y = new_node(local_color); + local_color.pop_back(); + graph->AddArc(constraint_node, node_x); + graph->AddArc(constraint_node, node_y); + local_color.push_back(1); for (int i = 0; i < size; ++i) { + const int box_node = new_node(local_color); + graph->AddArc(box_node, constraint_node); const int x = constraint.no_overlap_2d().x_intervals(i); const int y = constraint.no_overlap_2d().y_intervals(i); - graph->AddArc(interval_constraint_index_to_node.at(x), - constraint_node); - graph->AddArc(interval_constraint_index_to_node.at(x), - interval_constraint_index_to_node.at(y)); + graph->AddArc(interval_constraint_index_to_node.at(x), node_x); + graph->AddArc(interval_constraint_index_to_node.at(x), box_node); + graph->AddArc(interval_constraint_index_to_node.at(y), node_y); + graph->AddArc(interval_constraint_index_to_node.at(y), box_node); + } + break; + } + case ConstraintProto::kCumulative: { + // Note(user): This require that intervals appear before they are used. + // We currently enforce this at validation, otherwise we need two passes + // here and in a bunch of other places. + const CumulativeConstraintProto& ct = constraint.cumulative(); + std::vector capacity_color = color; + capacity_color.push_back(0); + CHECK_EQ(constraint_node, new_node(capacity_color)); + graph->AddArc(constraint_node, + make_linear_expr_node(ct.capacity(), capacity_color)); + + std::vector task_color = color; + task_color.push_back(1); + for (int i = 0; i < ct.intervals().size(); ++i) { + const int task_node = + make_linear_expr_node(ct.demands(i), task_color); + graph->AddArc(task_node, constraint_node); + graph->AddArc(task_node, + interval_constraint_index_to_node.at(ct.intervals(i))); + } + break; + } + case ConstraintProto::kCircuit: { + // Note that this implementation will generate the same graph for a + // circuit constraint with two disconnected components and two circuit + // constraints with one component each. + const int num_arcs = constraint.circuit().literals().size(); + absl::flat_hash_map circuit_node_to_symmetry_node; + std::vector arc_color = color; + arc_color.push_back(1); + for (int i = 0; i < num_arcs; ++i) { + const int literal = constraint.circuit().literals(i); + const int tail = constraint.circuit().tails(i); + const int head = constraint.circuit().heads(i); + const int arc_node = new_node(arc_color); + if (!circuit_node_to_symmetry_node.contains(head)) { + circuit_node_to_symmetry_node[head] = new_node(color); + } + const int head_node = circuit_node_to_symmetry_node[head]; + if (!circuit_node_to_symmetry_node.contains(tail)) { + circuit_node_to_symmetry_node[tail] = new_node(color); + } + const int tail_node = circuit_node_to_symmetry_node[tail]; + // To make the graph directed, we add two arcs on the head but not on + // the tail. + graph->AddArc(tail_node, arc_node); + graph->AddArc(arc_node, get_literal_node(literal)); + graph->AddArc(arc_node, head_node); } break; } @@ -664,7 +794,7 @@ namespace { // TODO(user): The same reasonning can be done if fixing the variable to // zero leads to many propagations at one. For general variables, we might be // able to do something too. -void OrbitAndPropagation(const std::vector& orbits, int var, +void OrbitAndPropagation(absl::Span orbits, int var, std::vector* can_be_fixed_to_false, PresolveContext* context) { // Note that if a variable is fixed in the orbit, then everything should be @@ -719,6 +849,52 @@ void OrbitAndPropagation(const std::vector& orbits, int var, } } +std::vector BuildInequalityCoeffsForOrbitope( + const std::vector& maximum_values, int64_t max_linear_size, + bool* is_approximated) { + std::vector out(maximum_values.size()); + int64_t range_product = 1; + uint64_t greatest_coeff = 0; + for (int i = 0; i < maximum_values.size(); ++i) { + out[i] = range_product; + greatest_coeff = + std::max(greatest_coeff, static_cast(maximum_values[i])); + range_product = CapProd(range_product, 1 + maximum_values[i]); + } + + if (range_product <= max_linear_size) { + // The product of all ranges fit in a int64_t. This is good news, that + // means we can interpret each row of the matrix as an integer in a + // mixed-radix representation and impose row[i] <= row[i+1]. + *is_approximated = false; + return out; + } + *is_approximated = true; + + const auto compute_approximate_coeffs = + [max_linear_size, &maximum_values](double scaling_factor, + std::vector* coeffs) -> bool { + int64_t max_size = 0; + double cumulative_product_double = 1.0; + for (int i = 0; i < maximum_values.size(); ++i) { + const int64_t max = maximum_values[i]; + const int64_t coeff = static_cast(cumulative_product_double); + (*coeffs)[i] = coeff; + cumulative_product_double *= scaling_factor * max + 1; + max_size = CapAdd(max_size, CapProd(max, coeff)); + if (max_size > max_linear_size) return false; + } + return true; + }; + + const double scaling = BinarySearch( + 0.0, 1.0, [&compute_approximate_coeffs, &out](double scaling_factor) { + return compute_approximate_coeffs(scaling_factor, &out); + }); + CHECK(compute_approximate_coeffs(scaling, &out)); + return out; +} + } // namespace bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { @@ -781,7 +957,7 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { } } - // We have a few heuristics. The firsts only look at the gobal orbits under + // We have a few heuristics. The first only look at the global orbits under // the symmetry group and try to infer Boolean variable fixing via symmetry // breaking. Note that nothing is fixed yet, we will decide later if we fix // these Booleans or not. @@ -829,7 +1005,7 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { // literal, and maximize the number of fixing. // // TODO(user): Doing that is not always good, on cod105.mps, fixing variables - // instead of letting the innner solver handle Boolean symmetries make the + // instead of letting the inner solver handle Boolean symmetries make the // problem unsolvable instead of easily solved. This is probably because this // fixing do not exploit the full structure of these symmeteries. Note // however that the fixing via propagation above close cod105 even more @@ -964,8 +1140,7 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { // iterate! int max_num_fixed_in_orbitope = 0; if (!orbitope.empty()) { - const int num_rows = orbitope[0].size(); - int size_left = num_rows; + int size_left = orbitope[0].size(); for (int col = 0; size_left > 1 && col < orbitope.size(); ++col) { max_num_fixed_in_orbitope += size_left - 1; --size_left; @@ -1021,72 +1196,77 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { } } - while (!orbitope.empty() && orbitope[0].size() > 1) { + if (!orbitope.empty() && orbitope[0].size() > 1) { const int num_cols = orbitope[0].size(); - const std::vector orbits = GetOrbitopeOrbits(num_vars, orbitope); + const std::vector orbitope_orbits = + GetOrbitopeOrbits(num_vars, orbitope); + + // Using the orbitope orbits and intersecting at most ones, we will be able + // in some case to derive a property of the literals of one row of the + // orbitope. Namely that: + // - All literals of that row take the same value. + // - At most one literal can be true. + // - At most one literal can be false. + // + // See the comment below for how we can infer this. + const int num_rows = orbitope.size(); + std::vector row_is_all_equivalent(num_rows, false); + std::vector row_has_at_most_one_true(num_rows, false); + std::vector row_has_at_most_one_false(num_rows, false); // Because in the orbitope case, we have a full symmetry group of the // columns, we can infer more than just using the orbits under a general // permutation group. If an at most one contains two variables from the - // orbit, we can infer: + // row, we can infer: // 1/ If the two variables appear positively, then there is an at most one - // on the full orbit, and we can set n - 1 variables to zero to break the + // on the full row, and we can set n - 1 variables to zero to break the // symmetry. // 2/ If the two variables appear negatively, then the opposite situation - // arise and there is at most one zero on the orbit, we can set n - 1 + // arise and there is at most one zero on the row, we can set n - 1 // variables to one. // 3/ If two literals of opposite sign appear, then the only possibility - // for the orbit are all at one or all at zero, thus we can mark all + // for the row are all at one or all at zero, thus we can mark all // variables as equivalent. // // These property comes from the fact that when we permute a line of the // orbitope in any way, then the position than ends up in the at most one // must never be both at one. // - // Note that 1/ can be done without breaking any symmetry, but for 2/ and 3/ - // by choosing which variable is not fixed, we will break some symmetry, and - // we will need to update the orbitope to stabilize this choice before - // continuing. + // Note that 3/ can be done without breaking any symmetry, but for 1/ and 2/ + // by choosing which variable is not fixed, we will break some symmetry. // - // TODO(user): for 2/ and 3/ we could add an at most one constraint on the - // full orbit if it is not already there! + // TODO(user): for 1/ and 2/ we could add an at most one constraint on the + // full row if it is not already there! // - // Note(user): On the miplib, only 1/ happens currently. Not sure with LNS - // though. - std::vector all_equivalent_rows(orbitope.size(), false); - - // The result described above can be generalized if an at most one intersect - // many of the orbitope rows, each in at leat two positions. We will track - // the set of best rows on which we have an at most one (or at most one - // zero) on all their entries. - bool at_most_one_in_best_rows; // The alternative is at most one zero. - int64_t best_score = 0; - std::vector best_rows; - - std::vector rows_in_at_most_one; + // Note(user): On the miplib, only 1/ and 2/ happens currently. Not sure + // with LNS though. for (const google::protobuf::RepeatedField* literals : at_most_ones) { tmp_to_clear.clear(); for (const int literal : *literals) { if (context->IsFixed(literal)) continue; const int var = PositiveRef(literal); - const int rep = orbits[var]; - if (rep == -1) continue; + const int row = orbitope_orbits[var]; + if (row == -1) continue; - if (tmp_sizes[rep] == 0) tmp_to_clear.push_back(rep); - tmp_sizes[rep]++; - if (RefIsPositive(literal)) tmp_num_positive[rep]++; + if (tmp_sizes[row] == 0) tmp_to_clear.push_back(row); + tmp_sizes[row]++; + if (RefIsPositive(literal)) tmp_num_positive[row]++; } - int num_positive_direction = 0; - int num_negative_direction = 0; - - // An at most one touching two positions in an orbitope row can possibly - // be extended, depending if it has singleton intersection swith other - // rows and where. + // An at most one touching two positions in an orbitope row can be + // extended to include the full row. + // + // Note(user): I am not sure we care about that here. By symmetry, if we + // have an at most one touching two positions, then we should have others + // touching all pair of positions. And the at most one expansion would + // already have extended it. So this is more FYI. bool possible_extension = false; - rows_in_at_most_one.clear(); + // TODO(user): if the same at most one touch more than one row, we can + // deduce more. It is a bit tricky and maybe not frequent enough to make a + // big difference. Also, as we start to fix things, at most one might + // propagate by themselves. for (const int row : tmp_to_clear) { const int size = tmp_sizes[row]; const int num_positive = tmp_num_positive[row]; @@ -1094,16 +1274,15 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { tmp_sizes[row] = 0; tmp_num_positive[row] = 0; + if (num_positive > 0 && num_negative > 0) { + row_is_all_equivalent[row] = true; + } if (num_positive > 1 && num_negative == 0) { if (size < num_cols) possible_extension = true; - rows_in_at_most_one.push_back(row); - ++num_positive_direction; + row_has_at_most_one_true[row] = true; } else if (num_positive == 0 && num_negative > 1) { if (size < num_cols) possible_extension = true; - rows_in_at_most_one.push_back(row); - ++num_negative_direction; - } else if (num_positive > 0 && num_negative > 0) { - all_equivalent_rows[row] = true; + row_has_at_most_one_false[row] = true; } } @@ -1111,107 +1290,130 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { context->UpdateRuleStats( "TODO symmetry: possible at most one extension."); } - - if (num_positive_direction > 0 && num_negative_direction > 0) { - return context->NotifyThatModelIsUnsat("Symmetry and at most ones"); - } - const bool direction = num_positive_direction > 0; - - // Because of symmetry, the choice of the column shouldn't matter (they - // will all appear in the same number of constraints of the same types), - // however we prefer to fix the variables that seems to touch more - // constraints. - // - // TODO(user): maybe we should simplify the constraint using the variable - // we fix before choosing the next row to break symmetry on. If there are - // multiple row involved, we could also take the intersection instead of - // probably counting the same constraints more than once. - int64_t score = 0; - for (const int row : rows_in_at_most_one) { - score += - context->VarToConstraints(PositiveRef(orbitope[row][0])).size(); - } - if (score > best_score) { - at_most_one_in_best_rows = direction; - best_score = score; - best_rows = rows_in_at_most_one; - } } - // Mark all the equivalence. + // List the row in "at most one" by score. We will be able to fix a + // "triangle" of literals in order to break some of the symmetry. + std::vector> rows_by_score; + + // Mark all the equivalence or fixed rows. // Note that this operation do not change the symmetry group. // // TODO(user): We could remove these rows from the orbitope. Note that // currently this never happen on the miplib (maybe in LNS though). - for (int i = 0; i < all_equivalent_rows.size(); ++i) { - if (all_equivalent_rows[i]) { - for (int j = 1; j < num_cols; ++j) { - context->StoreBooleanEqualityRelation(orbitope[i][0], orbitope[i][j]); - context->UpdateRuleStats("symmetry: all equivalent in orbit"); + for (int i = 0; i < num_rows; ++i) { + if (row_has_at_most_one_true[i] && row_has_at_most_one_false[i]) { + // If we have both property, it means we have + // - sum_j orbitope[row][j] <= 1 + // - sum_j not(orbitope[row][j]) <= 1 which is the same as + // sum_j orbitope[row][j] >= num_cols - 1. + // This is only possible if we have two elements and we don't have + // row_is_all_equivalent. + if (num_cols == 2 && !row_is_all_equivalent[i]) { + // We have [1, 0] or [0, 1]. + context->UpdateRuleStats("symmetry: equivalence in orbitope row"); + context->StoreBooleanEqualityRelation(orbitope[i][0], + NegatedRef(orbitope[i][1])); if (context->ModelIsUnsat()) return false; - } - } - } - - // Break the symmetry on our set of best rows by picking one columns - // and setting all the other entries to zero or one. Note that the at most - // one applies to all entries in all rows. - // - // TODO(user): We don't have any at most one relation on this orbitope, - // but we could still add symmetry breaking inequality by picking any matrix - // entry and making it the largest/lowest value on its row. This also work - // for non-Booleans. - if (best_score == 0) { - context->UpdateRuleStats( - "TODO symmetry: add symmetry breaking inequalities?"); - break; - } - - // If our symmetry group is valid, they cannot be any variable already - // fixed to one (or zero if !at_most_one_in_best_rows). Otherwise all would - // be fixed to one and the problem would be unsat. - for (const int i : best_rows) { - for (int j = 0; j < num_cols; ++j) { - const int var = orbitope[i][j]; - if ((at_most_one_in_best_rows && context->LiteralIsTrue(var)) || - (!at_most_one_in_best_rows && context->LiteralIsFalse(var))) { - return context->NotifyThatModelIsUnsat("Symmetry and at most one"); - } - } - } - - // We have an at most one on a set of rows, we will pick a column, and set - // all other entries on these rows to zero. - // - // TODO(user): All choices should be equivalent, but double check? - const int best_col = 0; - for (const int i : best_rows) { - for (int j = 0; j < num_cols; ++j) { - if (j == best_col) continue; - const int var = orbitope[i][j]; - if (at_most_one_in_best_rows) { - context->UpdateRuleStats("symmetry: fixed to false"); - if (!context->SetLiteralToFalse(var)) return false; } else { - context->UpdateRuleStats("symmetry: fixed to true"); - if (!context->SetLiteralToTrue(var)) return false; + // No solution. + return context->NotifyThatModelIsUnsat("orbitope and at most one"); + } + continue; + } + + if (row_is_all_equivalent[i]) { + // Here we proved that the row is either all ones or all zeros. + // This was because we had: + // at_most_one = [x, ~y, ...] + // orbitope = [x, y, ...] + // and by symmetry we have + // at_most_one = [~x, y, ...] + // This for all pairs of positions in that row. + if (row_has_at_most_one_false[i]) { + context->UpdateRuleStats("symmetry: all true in orbitope row"); + for (int j = 0; j < num_cols; ++j) { + if (!context->SetLiteralToTrue(orbitope[i][j])) return false; + } + } else if (row_has_at_most_one_true[i]) { + context->UpdateRuleStats("symmetry: all false in orbitope row"); + for (int j = 0; j < num_cols; ++j) { + if (!context->SetLiteralToFalse(orbitope[i][j])) return false; + } + } else { + context->UpdateRuleStats("symmetry: all equivalent in orbitope row"); + for (int j = 1; j < num_cols; ++j) { + context->StoreBooleanEqualityRelation(orbitope[i][0], + orbitope[i][j]); + if (context->ModelIsUnsat()) return false; + } + } + continue; + } + + // We use as the score the number of constraint in which variables from + // this row participate. + const int64_t score = + context->VarToConstraints(PositiveRef(orbitope[i][0])).size(); + if (row_has_at_most_one_true[i]) { + rows_by_score.push_back({i, score}); + } else if (row_has_at_most_one_false[i]) { + rows_by_score.push_back({i, score}); + } + } + + // Break the symmetry by fixing at each step all but one literal to true or + // false. Note that each time we do that for a row, we need to exclude the + // non-fixed column from the rest of the row processing. We thus fix a + // "triangle" of literals. + // + // This is the same as ordering the columns in some lexicographic order and + // using the at_most_ones to fix known position. Note that we can still add + // lexicographic symmetry breaking inequality on the columns as long as we + // do that in the same order as these fixing. + absl::c_stable_sort(rows_by_score, [](const std::pair& p1, + const std::pair& p2) { + return p1.second > p2.second; + }); + int num_processed_rows = 0; + for (const auto [row, score] : rows_by_score) { + if (num_processed_rows + 1 >= num_cols) break; + ++num_processed_rows; + if (row_has_at_most_one_true[row]) { + context->UpdateRuleStats( + "symmetry: fixed all but one to false in orbitope row"); + for (int j = num_processed_rows; j < num_cols; ++j) { + if (!context->SetLiteralToFalse(orbitope[row][j])) return false; + } + } else { + CHECK(row_has_at_most_one_false[row]); + context->UpdateRuleStats( + "symmetry: fixed all but one to true in orbitope row"); + for (int j = num_processed_rows; j < num_cols; ++j) { + if (!context->SetLiteralToTrue(orbitope[row][j])) return false; } } } - // Remove all best rows. - for (const int i : best_rows) orbitope[i].clear(); - int new_size = 0; - for (int i = 0; i < orbitope.size(); ++i) { - if (!orbitope[i].empty()) orbitope[new_size++] = orbitope[i]; - } - CHECK_LT(new_size, orbitope.size()); - orbitope.resize(new_size); + // For correctness of the code below, reduce the orbitope. + // + // TODO(user): This is probably not needed if we add lexicographic + // constraint instead of just breaking a single row below. + if (num_processed_rows > 0) { + // Remove the first num_processed_rows. + int new_size = 0; + for (int i = num_processed_rows; i < orbitope.size(); ++i) { + orbitope[new_size++] = std::move(orbitope[i]); + } + CHECK_LT(new_size, orbitope.size()); + orbitope.resize(new_size); - // Remove best_col. - for (int i = 0; i < orbitope.size(); ++i) { - std::swap(orbitope[i][best_col], orbitope[i].back()); - orbitope[i].pop_back(); + // For each of them remove the first num_processed_rows entries. + for (int i = 0; i < orbitope.size(); ++i) { + CHECK_LT(num_processed_rows, orbitope[i].size()); + orbitope[i].erase(orbitope[i].begin(), + orbitope[i].begin() + num_processed_rows); + } } } @@ -1239,6 +1441,44 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { context->UpdateRuleStats("symmetry: added symmetry breaking inequality"); } context->UpdateNewConstraintsVariableUsage(); + } else if (orbitope.size() > 1 && params.symmetry_level() > 3) { + std::vector max_values(orbitope.size()); + for (int i = 0; i < orbitope.size(); ++i) { + const int var = orbitope[i][0]; + const int64_t max = std::max(std::abs(context->MaxOf(var)), + std::abs(context->MinOf(var))); + max_values[i] = max; + } + constexpr int kMaxBits = 60; + bool is_approximated; + const std::vector coeffs = BuildInequalityCoeffsForOrbitope( + max_values, (int64_t{1} << kMaxBits), &is_approximated); + for (int i = 0; i + 1 < orbitope[0].size(); ++i) { + ConstraintProto* ct = context->working_model->add_constraints(); + auto* arg = ct->mutable_linear(); + for (int j = 0; j < orbitope.size(); ++j) { + const int64_t coeff = coeffs[j]; + arg->add_vars(orbitope[j][i + 1]); + arg->add_coeffs(coeff); + arg->add_vars(orbitope[j][i]); + arg->add_coeffs(-coeff); + DCHECK_EQ(context->MaxOf(orbitope[j][i + 1]), + context->MaxOf(orbitope[j][i])); + DCHECK_EQ(context->MinOf(orbitope[j][i + 1]), + context->MinOf(orbitope[j][i])); + } + arg->add_domain(0); + arg->add_domain(std::numeric_limits::max()); + DCHECK(!PossibleIntegerOverflow(*context->working_model, arg->vars(), + arg->coeffs())); + } + context->UpdateRuleStats( + absl::StrCat("symmetry: added linear ", + is_approximated ? "approximated " : "", + "inequality ordering orbitope columns"), + orbitope[0].size()); + context->UpdateNewConstraintsVariableUsage(); + return true; } return true; diff --git a/ortools/sat/cp_model_utils.cc b/ortools/sat/cp_model_utils.cc index 3fa2df9aba..2af03ee0e2 100644 --- a/ortools/sat/cp_model_utils.cc +++ b/ortools/sat/cp_model_utils.cc @@ -935,5 +935,11 @@ bool ConvertCpModelProtoToCnf(const CpModelProto& cp_model, std::string* out) { return true; } +int CombineSeed(int base_seed, int64_t delta) { + CHECK_GE(delta, 0); + const uint64_t fp = FingerprintSingleField(delta, kDefaultFingerprintSeed); + return static_cast(FingerprintSingleField(base_seed, fp) & (0x7FFFFFFF)); +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_utils.h b/ortools/sat/cp_model_utils.h index 61411d644a..2bb793e2ac 100644 --- a/ortools/sat/cp_model_utils.h +++ b/ortools/sat/cp_model_utils.h @@ -244,7 +244,6 @@ bool ExpressionsContainsOnlyOneVar(const ExpressionList& exprs) { // Default seed for fingerprints. constexpr uint64_t kDefaultFingerprintSeed = 0xa5b85c5e198ed849; -// T must be castable to uint64_t. template inline uint64_t FingerprintRepeatedField( const google::protobuf::RepeatedField& sequence, uint64_t seed) { @@ -253,7 +252,6 @@ inline uint64_t FingerprintRepeatedField( sequence.size() * sizeof(T), seed); } -// T must be castable to uint64_t. template inline uint64_t FingerprintSingleField(const T& field, uint64_t seed) { return fasthash64(reinterpret_cast(&field), sizeof(T), seed); @@ -360,6 +358,9 @@ H AbslHashValue(H h, const LinearConstraintProto& m) { bool ConvertCpModelProtoToCnf(const CpModelProto& cp_mode, std::string* out); +// We assume delta >= 0 and we only use the low bit of delta. +int CombineSeed(int base_seed, int64_t delta); + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cumulative.cc b/ortools/sat/cumulative.cc index f57b915405..ed1265277c 100644 --- a/ortools/sat/cumulative.cc +++ b/ortools/sat/cumulative.cc @@ -249,6 +249,22 @@ std::function Cumulative( if (parameters.use_overload_checker_in_cumulative()) { AddCumulativeOverloadChecker(capacity, helper, demands_helper, model); } + if (parameters.use_conservative_scale_overload_checker()) { + // Since we use the potential DFF conflict on demands to apply the + // heuristic, only do so if any demand is greater than 1. + bool any_demand_greater_than_one = false; + for (int i = 0; i < vars.size(); ++i) { + const IntegerValue demand_min = integer_trail->LowerBound(demands[i]); + if (demand_min > 1) { + any_demand_greater_than_one = true; + break; + } + } + if (any_demand_greater_than_one) { + AddCumulativeOverloadCheckerDff(capacity, helper, demands_helper, + model); + } + } // Propagator responsible for applying the Timetable Edge finding filtering // rule. It increases the minimum of the start variables and decreases the diff --git a/ortools/sat/cumulative_energy.cc b/ortools/sat/cumulative_energy.cc index edcd74c3a6..42c09ac80f 100644 --- a/ortools/sat/cumulative_energy.cc +++ b/ortools/sat/cumulative_energy.cc @@ -14,14 +14,23 @@ #include "ortools/sat/cumulative_energy.h" #include +#include +#include +#include +#include #include #include +#include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "ortools/base/iterator_adaptors.h" +#include "ortools/base/logging.h" +#include "ortools/sat/2d_orthogonal_packing.h" +#include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" #include "ortools/sat/intervals.h" #include "ortools/sat/model.h" +#include "ortools/sat/synchronization.h" #include "ortools/sat/theta_tree.h" #include "ortools/sat/util.h" #include "ortools/util/strong_integers.h" @@ -40,6 +49,19 @@ void AddCumulativeOverloadChecker(AffineExpression capacity, model->TakeOwnership(constraint); } +void AddCumulativeOverloadCheckerDff(AffineExpression capacity, + SchedulingConstraintHelper* helper, + SchedulingDemandHelper* demands, + Model* model) { + auto* watcher = model->GetOrCreate(); + + CumulativeDualFeasibleEnergyConstraint* constraint_dff = + new CumulativeDualFeasibleEnergyConstraint(capacity, helper, demands, + model); + constraint_dff->RegisterWith(watcher); + model->TakeOwnership(constraint_dff); +} + CumulativeEnergyConstraint::CumulativeEnergyConstraint( AffineExpression capacity, SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands, Model* model) @@ -367,5 +389,241 @@ void CumulativeIsAfterSubsetConstraint::RegisterWith( } } +CumulativeDualFeasibleEnergyConstraint::CumulativeDualFeasibleEnergyConstraint( + AffineExpression capacity, SchedulingConstraintHelper* helper, + SchedulingDemandHelper* demands, Model* model) + : random_(model->GetOrCreate()), + shared_stats_(model->GetOrCreate()), + opp_infeasibility_detector_(*random_, shared_stats_), + capacity_(capacity), + integer_trail_(model->GetOrCreate()), + helper_(helper), + demands_(demands) { + const int num_tasks = helper_->NumTasks(); + task_to_start_event_.resize(num_tasks); +} + +CumulativeDualFeasibleEnergyConstraint:: + ~CumulativeDualFeasibleEnergyConstraint() { + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back( + {"CumulativeDualFeasibleEnergyConstraint/called", num_calls_}); + stats.push_back( + {"CumulativeDualFeasibleEnergyConstraint/conflicts", num_conflicts_}); + stats.push_back({"CumulativeDualFeasibleEnergyConstraint/no_potential_window", + num_no_potential_window_}); + + shared_stats_->AddStats(stats); +} + +void CumulativeDualFeasibleEnergyConstraint::RegisterWith( + GenericLiteralWatcher* watcher) { + const int id = watcher->Register(this); + helper_->WatchAllTasks(id, watcher); + watcher->SetPropagatorPriority(id, 3); + watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id); +} + +bool CumulativeDualFeasibleEnergyConstraint::FindAndPropagateConflict( + IntegerValue window_start, IntegerValue window_end) { + const int num_tasks = helper_->NumTasks(); + const IntegerValue capacity_max = integer_trail_->UpperBound(capacity_); + std::vector sizes; + std::vector demands; + std::vector index_to_task; + sizes.reserve(num_tasks); + demands.reserve(num_tasks); + index_to_task.reserve(num_tasks); + for (int task = 0; task < num_tasks; ++task) { + if (!helper_->IsPresent(task) || demands_->DemandMin(task) == 0) { + continue; + } + const IntegerValue size = Smallest1DIntersection( + helper_->StartMin(task), helper_->EndMax(task), helper_->SizeMin(task), + window_start, window_end); + if (size == 0) continue; + + sizes.push_back(size); + demands.push_back(demands_->DemandMin(task)); + index_to_task.push_back(task); + } + auto result = opp_infeasibility_detector_.TestFeasibility( + sizes, demands, {window_end - window_start, capacity_max}, + OrthogonalPackingOptions{ + .use_pairwise = true, + .use_dff_f0 = true, + .use_dff_f2 = true, + // Disable brute force which is correct only for bin packing. + .brute_force_threshold = 0, + .dff2_max_number_of_parameters_to_check = 100}); + + if (result.GetResult() != OrthogonalPackingResult::Status::INFEASIBLE) { + return true; + } + VLOG_EVERY_N_SEC(2, 3) << "Found a conflict on the sub-problem of window [" + << window_start << ", " << window_end << "] (with " + << sizes.size() << "/" << num_tasks << " tasks)" + << " with " + << result.GetItemsParticipatingOnConflict().size() + << " tasks participating on the conflict."; + + const auto& items = result.GetItemsParticipatingOnConflict(); + for (int i = 0; i < items.size(); ++i) { + const int task = index_to_task[items[i].index]; + const IntegerValue size_zero_level = Smallest1DIntersection( + helper_->LevelZeroStartMin(task), helper_->LevelZeroEndMax(task), + helper_->SizeMin(task), window_start, window_end); + + result.TryUseSlackToReduceItemSize( + i, OrthogonalPackingResult::Coord::kCoordX, size_zero_level); + result.TryUseSlackToReduceItemSize(i, + OrthogonalPackingResult::Coord::kCoordY, + demands_->LevelZeroDemandMin(task)); + } + helper_->ClearReason(); + for (const auto& item : result.GetItemsParticipatingOnConflict()) { + const int task = index_to_task[item.index]; + + const IntegerValue full_x_size = helper_->SizeMin(task); + const IntegerValue size_slack = full_x_size - item.size_x; + + helper_->AddStartMinReason(task, window_start - size_slack); + helper_->AddEndMaxReason(task, window_end + size_slack); + + helper_->AddSizeMinReason(task); + helper_->AddPresenceReason(task); + + demands_->AddDemandMinReason(task, item.size_y); + } + if (capacity_.var != kNoIntegerVariable) { + helper_->MutableIntegerReason()->push_back( + integer_trail_->UpperBoundAsLiteral(capacity_.var)); + } + return helper_->ReportConflict(); +} + +bool CumulativeDualFeasibleEnergyConstraint::Propagate() { + if (!helper_->SynchronizeAndSetTimeDirection(true)) return false; + demands_->CacheAllEnergyValues(); + + const IntegerValue capacity_max = integer_trail_->UpperBound(capacity_); + if (capacity_max <= 0) return true; + + // Set up theta tree. + start_event_task_time_.clear(); + int num_events = 0; + for (const auto task_time : helper_->TaskByIncreasingStartMin()) { + const int task = task_time.task_index; + if (!helper_->IsPresent(task) || demands_->DemandMin(task) == 0) { + task_to_start_event_[task] = -1; + continue; + } + start_event_task_time_.emplace_back(task_time); + task_to_start_event_[task] = num_events; + num_events++; + } + + if (num_events == 0) return true; + ++num_calls_; + + const IntegerValue largest_window = + helper_->EndMax(helper_->TaskByDecreasingEndMax().front().task_index) - + helper_->TaskByIncreasingStartMin().front().time; + const IntegerValue max_for_fixpoint_inverse = + std::numeric_limits::max() / + (num_events * capacity_max * largest_window); + + theta_tree_.Reset(num_events); + + // Since checking all possible dual-feasible functions is expensive, we only + // look for energy conflicts on time windows where a conflict with a DFF is + // possible. To rule out time windows where DFF conflicts are impossible, we + // use the following nice property stated in [1]: + // + // If f is a DFF, then for all possible sizes h_i of a problem of height H: + // f(h_i)/f(H) <= 1 / floor(H / h_i). + // + // This follows from the fact that floor(H / h_i) copies of h_i can fit + // sideways on the original problem and that those copies must still fit after + // any arbitrary DFF is applied. + // + // So, in practice, for a cumulative constraint with maximum capacity C and + // demands d_i, we look for time windows with energy conflicts for the + // modified problem: + // Capacity: L + // Demand for item i: L / (C / d_i) + // where L is any sufficiently large integer used to compute inverses without + // losing too much precision. + // + // [1] Carlier, Jacques, François Clautiaux, and Aziz Moukrim. "New reduction + // procedures and lower bounds for the two-dimensional bin packing problem + // with fixed orientation." Computers & Operations Research 34.8 (2007): + // 2223-2250. + std::vector> candidates_for_conflict; + const auto by_decreasing_end_max = helper_->TaskByDecreasingEndMax(); + for (const auto [current_task, current_end] : + ::gtl::reversed_view(by_decreasing_end_max)) { + if (task_to_start_event_[current_task] == -1) continue; + if (!helper_->IsPresent(current_task)) continue; + + // Add the current task to the tree. + { + const IntegerValue current_pseudo_energy = + helper_->SizeMin(current_task) * + (max_for_fixpoint_inverse / + (capacity_max / demands_->DemandMin(current_task))); + const int current_event = task_to_start_event_[current_task]; + const IntegerValue start_min = start_event_task_time_[current_event].time; + theta_tree_.AddOrUpdateEvent( + current_event, start_min * max_for_fixpoint_inverse, + current_pseudo_energy, current_pseudo_energy); + } + + { + // Find the critical interval. + const IntegerValue envelope = theta_tree_.GetEnvelope(); + const int critical_event = + theta_tree_.GetMaxEventWithEnvelopeGreaterThan(envelope - 1); + const IntegerValue window_start = + start_event_task_time_[critical_event].time; + const IntegerValue window_end = current_end; + const IntegerValue window_size = window_end - window_start; + if (window_size == 0) continue; + + if (envelope > window_end * max_for_fixpoint_inverse) { + candidates_for_conflict.push_back({window_start, window_end}); + } + } + } + VLOG_EVERY_N_SEC(2, 3) << "Found " << candidates_for_conflict.size() + << " intervals with potential energy conflict using a " + "DFF on a problem of size " + << num_events << "."; + + if (candidates_for_conflict.empty()) { + ++num_no_potential_window_; + return true; + } + // The code above is efficient for pruning the initial problem to a set of + // windows with potential conflict, but it might produce some "overly large" + // windows: ie., a window that has no conflict but would show one if narrowed. + // + // TODO(user): explore with using Theta-trees with a multi-valued energy + // value. + absl::InlinedVector, 3> + sampled_candidates; + std::sample(candidates_for_conflict.begin(), candidates_for_conflict.end(), + std::back_inserter(sampled_candidates), 3, *random_); + for (const auto& [window_start, window_end] : sampled_candidates) { + if (!FindAndPropagateConflict(window_start, window_end)) { + ++num_conflicts_; + return false; + } + } + + return true; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cumulative_energy.h b/ortools/sat/cumulative_energy.h index c892b7dfa6..3e50d5d98d 100644 --- a/ortools/sat/cumulative_energy.h +++ b/ortools/sat/cumulative_energy.h @@ -14,13 +14,16 @@ #ifndef OR_TOOLS_SAT_CUMULATIVE_ENERGY_H_ #define OR_TOOLS_SAT_CUMULATIVE_ENERGY_H_ +#include #include #include #include +#include "ortools/sat/2d_orthogonal_packing.h" #include "ortools/sat/integer.h" #include "ortools/sat/intervals.h" #include "ortools/sat/model.h" +#include "ortools/sat/synchronization.h" #include "ortools/sat/theta_tree.h" #include "ortools/sat/util.h" @@ -45,6 +48,13 @@ void AddCumulativeOverloadChecker(AffineExpression capacity, SchedulingDemandHelper* demands, Model* model); +// Same as above, but applying a Dual Feasible Function (also known as a +// conservative scale) before looking for overload. +void AddCumulativeOverloadCheckerDff(AffineExpression capacity, + SchedulingConstraintHelper* helper, + SchedulingDemandHelper* demands, + Model* model); + // Implementation of AddCumulativeOverloadChecker(). class CumulativeEnergyConstraint : public PropagatorInterface { public: @@ -108,6 +118,44 @@ class CumulativeIsAfterSubsetConstraint : public PropagatorInterface { SchedulingDemandHelper* demands_; }; +// Implementation of AddCumulativeOverloadCheckerDff(). +class CumulativeDualFeasibleEnergyConstraint : public PropagatorInterface { + public: + CumulativeDualFeasibleEnergyConstraint(AffineExpression capacity, + SchedulingConstraintHelper* helper, + SchedulingDemandHelper* demands, + Model* model); + + ~CumulativeDualFeasibleEnergyConstraint() override; + + bool Propagate() final; + void RegisterWith(GenericLiteralWatcher* watcher); + + private: + bool FindAndPropagateConflict(IntegerValue window_start, + IntegerValue window_end); + + ModelRandomGenerator* random_; + SharedStatistics* shared_stats_; + OrthogonalPackingInfeasibilityDetector opp_infeasibility_detector_; + const AffineExpression capacity_; + IntegerTrail* integer_trail_; + SchedulingConstraintHelper* helper_; + SchedulingDemandHelper* demands_; + + ThetaLambdaTree theta_tree_; + + // Task characteristics. + std::vector task_to_start_event_; + + // Start event characteristics, by nondecreasing start time. + std::vector start_event_task_time_; + + int64_t num_calls_ = 0; + int64_t num_conflicts_ = 0; + int64_t num_no_potential_window_ = 0; +}; + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index e921a4a8d9..da89753b9e 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -89,8 +89,11 @@ void CutTerm::Complement(absl::int128* rhs) { expr_offset = bound_diff - expr_offset; // Note that this is not involutive because of floating point error. Fix? - lp_value = ToDouble(bound_diff) - lp_value; + lp_value = static_cast(bound_diff.value()) - lp_value; coeff = -coeff; + + // Swap the implied bound info. + std::swap(cached_implied_lb, cached_implied_ub); } void CutTerm::ReplaceExpressionByLiteral(IntegerVariable var) { @@ -152,13 +155,13 @@ bool CutData::AppendOneTerm(IntegerVariable var, IntegerValue coeff, entry.expr_coeffs[0] = -IntegerValue(1); entry.expr_offset = ub; entry.coeff = -coeff; - entry.lp_value = ToDouble(ub) - lp_value; + entry.lp_value = static_cast(ub.value()) - lp_value; } else { // C = (X - LB) + LB entry.expr_coeffs[0] = IntegerValue(1); entry.expr_offset = -lb; entry.coeff = coeff; - entry.lp_value = lp_value - ToDouble(lb); + entry.lp_value = lp_value - static_cast(lb.value()); } terms.push_back(entry); return true; @@ -166,7 +169,7 @@ bool CutData::AppendOneTerm(IntegerVariable var, IntegerValue coeff, bool CutData::FillFromLinearConstraint( const LinearConstraint& base_ct, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, IntegerTrail* integer_trail) { rhs = absl::int128(base_ct.ub.value()); terms.clear(); @@ -270,55 +273,88 @@ double CutData::ComputeEfficacy() const { void CutDataBuilder::ClearIndices() { num_merges_ = 0; constraint_is_indexed_ = false; - direct_index_.clear(); - complemented_index_.clear(); + bool_index_.clear(); + secondary_bool_index_.clear(); } -void CutDataBuilder::RegisterAllBooleansTerms(const CutData& cut) { +void CutDataBuilder::RegisterAllBooleanTerms(const CutData& cut) { constraint_is_indexed_ = true; const int size = cut.terms.size(); for (int i = 0; i < size; ++i) { const CutTerm& term = cut.terms[i]; if (term.bound_diff != 1) continue; if (!term.IsSimple()) continue; - if (term.expr_coeffs[0] > 0) { - direct_index_[term.expr_vars[0]] = i; - } else { - complemented_index_[term.expr_vars[0]] = i; - } + + // Initially we shouldn't have duplicate bools and (1 - bools). + // So we just fill bool_index_. + bool_index_[term.expr_vars[0]] = i; } } void CutDataBuilder::AddOrMergeTerm(const CutTerm& term, IntegerValue t, CutData* cut) { if (!constraint_is_indexed_) { - RegisterAllBooleansTerms(*cut); + RegisterAllBooleanTerms(*cut); } DCHECK(term.IsSimple()); const IntegerVariable var = term.expr_vars[0]; + const bool is_positive = (term.expr_coeffs[0] > 0); const int new_index = cut->terms.size(); - const auto [it, inserted] = - term.expr_coeffs[0] > 0 ? direct_index_.insert({var, new_index}) - : complemented_index_.insert({var, new_index}); - const int entry_index = it->second; + const auto [it, inserted] = bool_index_.insert({var, new_index}); if (inserted) { cut->terms.push_back(term); - } else { - // We can only merge the term if term.coeff + old_coeff do not overflow and - // if t * new_coeff do not overflow. - // - // If we cannot merge the term, we will keep them separate. The produced cut - // will be less strong, but can still be used. - const IntegerValue new_coeff = - CapAddI(cut->terms[entry_index].coeff, term.coeff); - if (AtMinOrMaxInt64I(new_coeff) || ProdOverflow(t, new_coeff)) { - // If we cannot merge the term, we keep them separate. + return; + } + + // If the referred var is not right, replace the entry. + int entry_index = it->second; + if (entry_index >= new_index || cut->terms[entry_index].expr_vars[0] != var) { + it->second = new_index; + cut->terms.push_back(term); + return; + } + + // If the sign is not right, look into secondary hash_map for opposite sign. + if ((cut->terms[entry_index].expr_coeffs[0] > 0) != is_positive) { + const auto [it, inserted] = secondary_bool_index_.insert({var, new_index}); + if (inserted) { cut->terms.push_back(term); - } else { - ++num_merges_; - cut->terms[entry_index].coeff = new_coeff; + return; } + + // If the referred var is not right, replace the entry. + entry_index = it->second; + if (entry_index >= new_index || + cut->terms[entry_index].expr_vars[0] != var) { + it->second = new_index; + cut->terms.push_back(term); + return; + } + + // If the sign is not right, replace the entry. + if ((cut->terms[entry_index].expr_coeffs[0] > 0) != is_positive) { + it->second = new_index; + cut->terms.push_back(term); + return; + } + } + DCHECK_EQ(cut->terms[entry_index].expr_vars[0], var); + DCHECK_EQ((cut->terms[entry_index].expr_coeffs[0] > 0), is_positive); + + // We can only merge the term if term.coeff + old_coeff do not overflow and + // if t * new_coeff do not overflow. + // + // If we cannot merge the term, we will keep them separate. The produced cut + // will be less strong, but can still be used. + const IntegerValue new_coeff = + CapAddI(cut->terms[entry_index].coeff, term.coeff); + if (AtMinOrMaxInt64I(new_coeff) || ProdOverflow(t, new_coeff)) { + // If we cannot merge the term, we keep them separate. + cut->terms.push_back(term); + } else { + ++num_merges_; + cut->terms[entry_index].coeff = new_coeff; } } @@ -660,7 +696,7 @@ double IntegerRoundingCutHelper::GetScaledViolation( // Even before we finish the adjust, we can have a lower bound on the // activily loss using this divisor, and so we can abort early. This is // similar to what is done below. - double max_violation = ToDouble(initial_rhs_remainder); + double max_violation = static_cast(initial_rhs_remainder.value()); for (int i = 0; i < cut.num_relevant_entries; ++i) { const CutTerm& entry = cut.terms[i]; const IntegerValue remainder = PositiveRemainder(entry.coeff, divisor); @@ -668,7 +704,8 @@ double IntegerRoundingCutHelper::GetScaledViolation( if (remainder <= initial_rhs_remainder) { // We do not know exactly f() yet, but it will always round to the // floor of the division by divisor in this case. - max_violation -= ToDouble(remainder) * entry.lp_value; + max_violation -= + static_cast(remainder.value()) * entry.lp_value; if (max_violation <= 1e-3) return 0.0; continue; } @@ -752,7 +789,7 @@ bool IntegerRoundingCutHelper::ComputeCut( // This should be better except it can mess up the norm and the divisors. cut_ = base_ct; if (options.use_ib_before_heuristic && ib_processor != nullptr) { - cut_builder_.ClearIndices(); + ib_processor->BaseCutBuilder()->ClearNumMerges(); const int old_size = static_cast(cut_.terms.size()); bool abort = true; for (int i = 0; i < old_size; ++i) { @@ -764,7 +801,7 @@ bool IntegerRoundingCutHelper::ComputeCut( cut_.terms[i].Complement(&cut_.rhs); if (ib_processor->TryToExpandWithLowerImpliedbound( IntegerValue(1), i, - /*complement=*/true, &cut_, &cut_builder_)) { + /*complement=*/true, &cut_, ib_processor->BaseCutBuilder())) { ++total_num_initial_ibs_; abort = false; continue; @@ -774,12 +811,13 @@ bool IntegerRoundingCutHelper::ComputeCut( if (ib_processor->TryToExpandWithLowerImpliedbound( IntegerValue(1), i, - /*complement=*/true, &cut_, &cut_builder_)) { + /*complement=*/true, &cut_, ib_processor->BaseCutBuilder())) { abort = false; ++total_num_initial_ibs_; } } - total_num_initial_merges_ += cut_builder_.NumMergesSinceLastClear(); + total_num_initial_merges_ += + ib_processor->BaseCutBuilder()->NumMergesSinceLastClear(); // TODO(user): We assume that this is called with and without the option // use_ib_before_heuristic, so that we can abort if no IB has been applied @@ -1049,16 +1087,16 @@ struct LargeCoeffFirst { struct SmallContribFirst { bool operator()(const CutTerm& a, const CutTerm& b) const { - const double contrib_a = a.lp_value * AsDouble(a.coeff); - const double contrib_b = b.lp_value * AsDouble(b.coeff); + const double contrib_a = a.lp_value * static_cast(a.coeff.value()); + const double contrib_b = b.lp_value * static_cast(b.coeff.value()); return contrib_a < contrib_b; } }; struct LargeContribFirst { bool operator()(const CutTerm& a, const CutTerm& b) const { - const double contrib_a = a.lp_value * AsDouble(a.coeff); - const double contrib_b = b.lp_value * AsDouble(b.coeff); + const double contrib_a = a.lp_value * static_cast(a.coeff.value()); + const double contrib_b = b.lp_value * static_cast(b.coeff.value()); return contrib_a > contrib_b; } }; @@ -1069,15 +1107,19 @@ struct LargeContribFirst { // lead to the same formula as for Booleans. struct KnapsackAdd { bool operator()(const CutTerm& a, const CutTerm& b) const { - const double contrib_a = a.LpDistToMaxValue() / AsDouble(a.coeff); - const double contrib_b = b.LpDistToMaxValue() / AsDouble(b.coeff); + const double contrib_a = + a.LpDistToMaxValue() / static_cast(a.coeff.value()); + const double contrib_b = + b.LpDistToMaxValue() / static_cast(b.coeff.value()); return contrib_a < contrib_b; } }; struct KnapsackRemove { bool operator()(const CutTerm& a, const CutTerm& b) const { - const double contrib_a = a.LpDistToMaxValue() / AsDouble(a.coeff); - const double contrib_b = b.LpDistToMaxValue() / AsDouble(b.coeff); + const double contrib_a = + a.LpDistToMaxValue() / static_cast(a.coeff.value()); + const double contrib_b = + b.LpDistToMaxValue() / static_cast(b.coeff.value()); return contrib_a > contrib_b; } }; @@ -1160,15 +1202,22 @@ int CoverCutHelper::GetCoverSize(int relevant_size) { // Try a simple cover heuristic. // Look for violated CUT of the form: sum (UB - X) or (X - LB) >= 1. -int CoverCutHelper::GetCoverSizeForBooleans(int relevant_size) { - if (relevant_size == 0) return 0; - +int CoverCutHelper::GetCoverSizeForBooleans() { // Sorting can be slow, so we start by splitting the vector in 3 parts // [can always be in cover, candidates, can never be in cover]. int part1 = 0; + int relevant_size = cut_.terms.size(); const double threshold = 1.0 - 1.0 / static_cast(relevant_size); for (int i = 0; i < relevant_size;) { const double lp_value = cut_.terms[i].lp_value; + + // Exclude non-Boolean. + if (cut_.terms[i].bound_diff > 1) { + --relevant_size; + std::swap(cut_.terms[i], cut_.terms[relevant_size]); + continue; + } + if (lp_value >= threshold) { // Move to part 1. std::swap(cut_.terms[i], cut_.terms[part1]); @@ -1248,7 +1297,7 @@ bool CoverCutHelper::TrySimpleKnapsack(const CutData& input_ct, // Tricky: This only work because the cut absl128 rhs is not changed by these // operations. if (ib_processor != nullptr) { - cut_builder_.ClearIndices(); + ib_processor->BaseCutBuilder()->ClearNumMerges(); const int old_size = static_cast(cut_.terms.size()); for (int i = 0; i < old_size; ++i) { // We only look at non-Boolean with an lp value not close to the upper @@ -1259,7 +1308,7 @@ bool CoverCutHelper::TrySimpleKnapsack(const CutData& input_ct, if (ib_processor->TryToExpandWithLowerImpliedbound( IntegerValue(1), i, - /*complement=*/false, &cut_, &cut_builder_)) { + /*complement=*/false, &cut_, ib_processor->BaseCutBuilder())) { ++cover_stats_.num_initial_ibs; } } @@ -1274,10 +1323,18 @@ bool CoverCutHelper::TrySimpleKnapsack(const CutData& input_ct, } const int base_size = static_cast(cut_.terms.size()); - int cover_size = + const int cover_size = has_relevant_int ? GetCoverSize(base_size) - : GetCoverSizeForBooleans(base_size); + : GetCoverSizeForBooleans(); + if (!has_relevant_int && ib_processor == nullptr) { + // If some implied bound substitution are possible, we do not cache anything + // currently because the logic is currently sighlty different betweent the + // two code. Fix? + has_bool_base_ct_ = true; + bool_base_ct_ = cut_; + bool_cover_size_ = cover_size; + } if (cover_size == 0) return false; // The cut is just obtained by complementing the variable in the cover and @@ -1352,17 +1409,9 @@ bool CoverCutHelper::TrySingleNodeFlow(const CutData& input_ct, ImpliedBoundsProcessor* ib_processor) { InitializeCut(input_ct); - bool has_large_coeff = false; - for (const CutTerm& term : cut_.terms) { - if (IntTypeAbs(term.coeff) > 1'000'000) { - has_large_coeff = true; - break; - } - } - // TODO(user): Change the heuristic to depends on the lp_value of the implied - // bounds. This way we can exactly match what happen in FlowCoverCutHelper and - // remove the code there. + // bounds. This way we can exactly match what happen in the old + // FlowCoverCutHelper. const int base_size = static_cast(cut_.terms.size()); const int cover_size = GetCoverSize(base_size); if (cover_size == 0) return false; @@ -1393,6 +1442,14 @@ bool CoverCutHelper::TrySingleNodeFlow(const CutData& input_ct, return false; } + bool has_large_coeff = false; + for (const CutTerm& term : cut_.terms) { + if (IntTypeAbs(term.coeff) > 1'000'000) { + has_large_coeff = true; + break; + } + } + // TODO(user): Shouldn't we just use rounding f() with maximum coeff to allows // lift of all other terms? but then except for the heuristic the cut is // really similar to the cover cut. @@ -1454,33 +1511,36 @@ bool CoverCutHelper::TrySingleNodeFlow(const CutData& input_ct, bool CoverCutHelper::TryWithLetchfordSouliLifting( const CutData& input_ct, ImpliedBoundsProcessor* ib_processor) { - InitializeCut(input_ct); + int cover_size; + if (has_bool_base_ct_) { + // We already called GetCoverSizeForBooleans() and ib_processor was nullptr, + // so reuse that info. + CHECK(ib_processor == nullptr); + InitializeCut(bool_base_ct_); + cover_size = bool_cover_size_; + } else { + InitializeCut(input_ct); - // Perform IB expansion with no restriction, all coeff should still be - // positive. - // - // TODO(user): Merge Boolean terms that are complement of each other. - if (ib_processor != nullptr) { - cut_builder_.ClearIndices(); - const int old_size = static_cast(cut_.terms.size()); - for (int i = 0; i < old_size; ++i) { - if (cut_.terms[i].bound_diff <= 1) continue; - if (ib_processor->TryToExpandWithLowerImpliedbound( - IntegerValue(1), i, - /*complement=*/false, &cut_, &cut_builder_)) { - ++ls_stats_.num_initial_ibs; + // Perform IB expansion with no restriction, all coeff should still be + // positive. + // + // TODO(user): Merge Boolean terms that are complement of each other. + if (ib_processor != nullptr) { + ib_processor->BaseCutBuilder()->ClearNumMerges(); + const int old_size = static_cast(cut_.terms.size()); + for (int i = 0; i < old_size; ++i) { + if (cut_.terms[i].bound_diff <= 1) continue; + if (ib_processor->TryToExpandWithLowerImpliedbound( + IntegerValue(1), i, + /*complement=*/false, &cut_, ib_processor->BaseCutBuilder())) { + ++ls_stats_.num_initial_ibs; + } } } + + // TODO(user): we currently only deal with Boolean in the cover. Fix. + cover_size = GetCoverSizeForBooleans(); } - - // TODO(user): we currently only deal with Boolean in the cover. Fix. - const int num_bools = - std::partition(cut_.terms.begin(), cut_.terms.end(), - [](const CutTerm& t) { return t.bound_diff == 1; }) - - cut_.terms.begin(); - if (num_bools == 0) return false; - - const int cover_size = GetCoverSizeForBooleans(num_bools); if (cover_size == 0) return false; // We don't support big rhs here. @@ -1965,7 +2025,7 @@ ImpliedBoundsProcessor::GetCachedImpliedBoundInfo(IntegerVariable var) const { ImpliedBoundsProcessor::BestImpliedBoundInfo ImpliedBoundsProcessor::ComputeBestImpliedBound( IntegerVariable var, - const absl::StrongVector& lp_values) { + const util_intops::StrongVector& lp_values) { auto it = cache_.find(var); if (it != cache_.end()) return it->second; BestImpliedBoundInfo result; @@ -2032,7 +2092,7 @@ ImpliedBoundsProcessor::ComputeBestImpliedBound( } void ImpliedBoundsProcessor::RecomputeCacheAndSeparateSomeImpliedBoundCuts( - const absl::StrongVector& lp_values) { + const util_intops::StrongVector& lp_values) { cache_.clear(); for (const IntegerVariable var : implied_bounds_->VariablesWithImpliedBounds()) { @@ -2047,7 +2107,7 @@ bool ImpliedBoundsProcessor::DecomposeWithImpliedLowerBound( // We only want to expand non-Boolean and non-slack term! if (term.bound_diff <= 1) return false; if (!term.IsSimple()) return false; - CHECK_EQ(IntTypeAbs(term.expr_coeffs[0]), 1); + DCHECK_EQ(IntTypeAbs(term.expr_coeffs[0]), 1); // Try lower bounded direction for implied bound. // This kind should always be beneficial if it exists: @@ -2067,15 +2127,11 @@ bool ImpliedBoundsProcessor::DecomposeWithImpliedLowerBound( // // TODO(user): Only do it if coeff_b > 0 ? But again we could still merge // B with an existing Boolean for a better cut even if coeff_b == 0. - const IntegerVariable ib_var = term.expr_coeffs[0] > 0 - ? term.expr_vars[0] - : NegationOf(term.expr_vars[0]); - const ImpliedBoundsProcessor::BestImpliedBoundInfo info = - GetCachedImpliedBoundInfo(ib_var); + if (term.cached_implied_lb < 0) return false; + const BestImpliedBoundInfo info = cached_data_[term.cached_implied_lb]; const IntegerValue lb = -term.expr_offset; const IntegerValue bound_diff = info.implied_bound - lb; if (bound_diff <= 0) return false; - if (info.bool_var == kNoIntegerVariable) return false; if (ProdOverflow(factor_t, CapProdI(term.coeff, bound_diff))) return false; // We have X/-X = info.diff * Boolean + slack. @@ -2248,309 +2304,36 @@ bool ImpliedBoundsProcessor::TryToExpandWithLowerImpliedbound( return true; } -FlowCoverCutHelper::~FlowCoverCutHelper() { - if (!VLOG_IS_ON(1)) return; - if (shared_stats_ == nullptr) return; - std::vector> stats; - stats.push_back({"flow_cover/num_aborts", num_aborts_}); - shared_stats_->AddStats(stats); -} +bool ImpliedBoundsProcessor::CacheDataForCut(IntegerVariable first_slack, + CutData* cut) { + base_cut_builder_.ClearIndices(); + cached_data_.clear(); -std::string SingleNodeFlow::DebugString() const { - return absl::StrCat("#in:", in_flow.size(), " #out:", out_flow.size(), - " demand:", demand, " #bool:", num_bool, - " #lb:", num_to_lb, " #ub:", num_to_ub); -} + const int size = cut->terms.size(); + for (int i = 0; i < size; ++i) { + const CutTerm& term = cut->terms[i]; + if (!term.IsSimple()) continue; + if (term.IsBoolean()) continue; + if (term.expr_vars[0] >= first_slack) continue; -// The flow info of a linear term is always the same. -void FlowCoverCutHelper::FinishAndAddFlowInfo(const CutTerm& term, - FlowInfo* info, - SingleNodeFlow* result) const { - const IntegerValue positive_coeff = IntTypeAbs(term.coeff); - info->capacity = positive_coeff * term.bound_diff; - info->flow_lp_value = ToDouble(positive_coeff) * term.lp_value; - info->flow_expr.var = term.expr_vars[0]; - info->flow_expr.coeff = positive_coeff * term.expr_coeffs[0]; - info->flow_expr.constant = positive_coeff * term.expr_offset; - if (term.coeff > 0) { - result->in_flow.push_back(*info); - } else { - result->out_flow.push_back(*info); - } -} - -bool FlowCoverCutHelper::TryXminusLB(const CutTerm& term, - ImpliedBoundsProcessor* ib_helper, - SingleNodeFlow* result) const { - // We want an implied upper bound on the term. - const ImpliedBoundsProcessor::BestImpliedBoundInfo ib = - ib_helper->GetCachedImpliedBoundInfo(term.expr_coeffs[0] > 0 - ? NegationOf(term.expr_vars[0]) - : term.expr_vars[0]); - if (ib.bool_var == kNoIntegerVariable) return false; - - // We want the implied_bound to force the term to zero. - // - If coeff > 0, -x >= implied_bound, so c * x <= -c * implied_bound - // - If coeff < 0, x >= implied_bound, so c * x <= c * implied_bound - if (term.expr_offset != IntTypeAbs(term.expr_coeffs[0]) * ib.implied_bound) { - return false; - } - - // Note that the meaning is reversed since bool at true implies flow at zero - // and we want the opposite. - FlowInfo info; - if (ib.is_positive) { - info.bool_lp_value = 1 - ib.bool_lp_value; - info.bool_expr.var = ib.bool_var; - info.bool_expr.coeff = -1; - info.bool_expr.constant = 1; - } else { - info.bool_lp_value = ib.bool_lp_value; - info.bool_expr.var = ib.bool_var; - info.bool_expr.coeff = 1; - } - - FinishAndAddFlowInfo(term, &info, result); - return true; -} - -bool FlowCoverCutHelper::TryUBminusX(const CutTerm& term, - ImpliedBoundsProcessor* ib_helper, - SingleNodeFlow* result) const { - CutTerm copy = term; - copy.Complement(&result->demand); - if (TryXminusLB(copy, ib_helper, result)) return true; - copy.Complement(&result->demand); - return false; -} - -bool FlowCoverCutHelper::ComputeFlowCoverRelaxationAndGenerateCut( - const CutData& base_ct, ImpliedBoundsProcessor* ib_helper) { - if (!ComputeFlowCoverRelaxation(base_ct, &snf_, ib_helper)) { - return false; - } - return GenerateCut(snf_); -} - -bool FlowCoverCutHelper::ComputeFlowCoverRelaxation( - const CutData& base_ct, SingleNodeFlow* snf, - ImpliedBoundsProcessor* ib_helper) { - snf->clear(); - snf->demand = base_ct.rhs; - for (const CutTerm& term : base_ct.terms) { - // We do not support complex terms, but we shouldn't get any. - if (term.expr_coeffs[1] != 0) { - ++num_aborts_; - return false; + // Cache the BestImpliedBoundInfo if relevant. + const IntegerVariable ib_var = term.expr_coeffs[0] > 0 + ? term.expr_vars[0] + : NegationOf(term.expr_vars[0]); + BestImpliedBoundInfo lb_info = GetCachedImpliedBoundInfo(ib_var); + if (lb_info.bool_var != kNoIntegerVariable) { + cut->terms[i].cached_implied_lb = cached_data_.size(); + cached_data_.emplace_back(std::move(lb_info)); } - - // Hack: abort if coefficient in the base constraint are too large. - // Otherwise we can generate cut with coeff too large as well... - if (IntTypeAbs(term.coeff) > 1'000'000) return false; - - // Fixed variable shouldn't really appear here. - if (term.bound_diff == 0) { - continue; - } - - // We can either use (X - LB) or (UB - X) for a variable in [0, capacity]. - const IntegerValue capacity( - CapProdI(IntTypeAbs(term.coeff), term.bound_diff)); - if (capacity >= kMaxIntegerValue) return false; - - // We have a Boolean, this is an easy case. - if (term.bound_diff == 1) { - ++snf->num_bool; - FlowInfo info; - info.bool_lp_value = term.lp_value; - info.bool_expr.var = term.expr_vars[0]; - info.bool_expr.coeff = term.expr_coeffs[0]; - info.bool_expr.constant = term.expr_offset; - FinishAndAddFlowInfo(term, &info, snf); - continue; - } - - // TODO(user): Improve our logic to decide what implied bounds to use. We - // rely on the best implied bounds, not necessarily one implying var at its - // level zero bound like we need here. - const bool prefer_lb = term.lp_value > term.LpDistToMaxValue(); - if (prefer_lb) { - if (TryXminusLB(term, ib_helper, snf)) { - ++snf->num_to_lb; - continue; - } - if (TryUBminusX(term, ib_helper, snf)) { - ++snf->num_to_ub; - continue; - } - } else { - if (TryUBminusX(term, ib_helper, snf)) { - ++snf->num_to_ub; - continue; - } - if (TryXminusLB(term, ib_helper, snf)) { - ++snf->num_to_lb; - continue; - } - } - - // Ignore term. - if (term.coeff < 0) { - CutTerm copy = term; - copy.Complement(&snf->demand); + BestImpliedBoundInfo ub_info = + GetCachedImpliedBoundInfo(NegationOf(ib_var)); + if (ub_info.bool_var != kNoIntegerVariable) { + cut->terms[i].cached_implied_ub = cached_data_.size(); + cached_data_.emplace_back(std::move(ub_info)); } } - return true; -} - -// Reference: "Lifted flow cover inequalities for mixed 0-1 integer programs". -// Zonghao Gu, George L. Nemhauser, Martin W.P. Savelsbergh. 1999. -bool FlowCoverCutHelper::GenerateCut(const SingleNodeFlow& data) { - // TODO(user): Support int128 demand. - if (data.empty() || - data.demand > absl::int128(std::numeric_limits::max()) || - data.demand < absl::int128(std::numeric_limits::min())) { - ++num_aborts_; - return false; - } - IntegerValue demand = static_cast(data.demand); - const double tolerance = 1e-2; - - // We are looking for two subsets CI (in-flow subset) and CO (out-flow subset) - // so that sum_CI capa - sum_CO capa = demand + slack, slack > 0. - // - // Moreover we want to maximize sum_CI bool_lp_value + sum_CO bool_lp_value. - std::vector in_cover(data.in_flow.size(), false); - std::vector out_cover(data.out_flow.size(), false); - - // Start by selecting all the possible in_flow (except low bool value) and - // all the out_flow with a bool value close to one. - IntegerValue slack; - { - IntegerValue sum_in = 0; - IntegerValue sum_out = 0; - for (int i = 0; i < data.in_flow.size(); ++i) { - const FlowInfo& info = data.in_flow[i]; - if (info.bool_lp_value > tolerance) { - in_cover[i] = true; - sum_in += info.capacity; - } - } - for (int i = 0; i < data.out_flow.size(); ++i) { - const FlowInfo& info = data.out_flow[i]; - if (info.bool_lp_value > 1 - tolerance) { - out_cover[i] = true; - sum_out += info.capacity; - } - } - - // This is the best slack we can hope for. - slack = sum_in - sum_out - demand; - } - if (slack <= 0) return false; - - // Now greedily remove item from the in_cover and add_item to the out_cover - // as long as we have remaining slack. We prefer item with a high score an - // low slack variation. - // - // Note that this is just the classic greedy heuristic of a knapsack problem. - if (slack > 1) { - struct Item { - bool correspond_to_in_flow; - int index; - double score; - }; - std::vector actions; - for (int i = 0; i < data.in_flow.size(); ++i) { - if (!in_cover[i]) continue; - const FlowInfo& info = data.in_flow[i]; - if (info.bool_lp_value > 1 - tolerance) continue; // Do not remove these. - actions.push_back( - {true, i, (1 - info.bool_lp_value) / ToDouble(info.capacity)}); - } - for (int i = 0; i < data.out_flow.size(); ++i) { - if (out_cover[i]) continue; - const FlowInfo& info = data.out_flow[i]; - if (info.bool_lp_value < tolerance) continue; // Do not add these. - actions.push_back( - {false, i, info.bool_lp_value / ToDouble(info.capacity)}); - } - - // Sort by decreasing score. - std::sort(actions.begin(), actions.end(), - [](const Item& a, const Item& b) { return a.score > b.score; }); - - // Greedily remove/add item as long as we have slack. - for (const Item& item : actions) { - if (item.correspond_to_in_flow) { - const IntegerValue delta = data.in_flow[item.index].capacity; - if (delta >= slack) continue; - slack -= delta; - in_cover[item.index] = false; - } else { - const IntegerValue delta = data.out_flow[item.index].capacity; - if (delta >= slack) continue; - slack -= delta; - out_cover[item.index] = true; - } - } - } - - // The non-lifted simple generalized flow cover inequality (SGFCI) cut will be - // demand - sum_CI flow_i - sum_CI++ (capa_i - slack)(1 - bool_i) - // + sum_CO capa_i + sum_L- slack * bool_i + sum_L-- flow_i >=0 - // - // Where CI++ are the arc with capa > slack in CI. - // And L is O \ CO. L- arc with capa > slack and L-- the other. - // - // TODO(user): Also try to generate the extended generalized flow cover - // inequality (EGFCI). - CHECK_GT(slack, 0); - - // For display only. - slack_ = slack; - num_in_ignored_ = 0; - num_in_flow_ = 0; - num_in_bin_ = 0; - num_out_capa_ = 0; - num_out_flow_ = 0; - num_out_bin_ = 0; - - // Note that we need to generate a <= version, so we negate everything. - cut_builder_.Clear(); - for (int i = 0; i < data.in_flow.size(); ++i) { - const FlowInfo& info = data.in_flow[i]; - if (!in_cover[i]) { - num_in_ignored_++; - continue; - } - num_in_flow_++; - cut_builder_.AddTerm(info.flow_expr, 1); - if (info.capacity > slack) { - num_in_bin_++; - const IntegerValue coeff = info.capacity - slack; - cut_builder_.AddConstant(coeff); - cut_builder_.AddTerm(info.bool_expr, -coeff); - } - } - for (int i = 0; i < data.out_flow.size(); ++i) { - const FlowInfo& info = data.out_flow[i]; - if (out_cover[i]) { - num_out_capa_++; - cut_builder_.AddConstant(-info.capacity); - } else if (info.capacity > slack) { - num_out_bin_++; - cut_builder_.AddTerm(info.bool_expr, -slack); - } else { - num_out_flow_++; - cut_builder_.AddTerm(info.flow_expr, -1); - } - } - - // TODO(user): Lift the cut. - cut_ = cut_builder_.BuildConstraint(kMinIntegerValue, demand); - return true; + return !cached_data_.empty(); } void SumOfAllDiffLowerBounder::Clear() { @@ -2621,7 +2404,7 @@ namespace { void TryToGenerateAllDiffCut( const std::vector>& sorted_exprs_lp, const IntegerTrail& integer_trail, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, TopNCuts& top_n_cuts, Model* model) { const int num_exprs = sorted_exprs_lp.size(); @@ -2734,7 +2517,7 @@ IntegerValue MaxCornerDifference(const IntegerVariable var, IntegerValue MPlusCoefficient( const std::vector& x_vars, const std::vector& exprs, - const absl::StrongVector& variable_partition, + const util_intops::StrongVector& variable_partition, const int max_index, const IntegerTrail& integer_trail) { IntegerValue coeff = exprs[max_index].offset; // TODO(user): This algo is quadratic since GetCoefficientOfPositiveVar() @@ -2756,7 +2539,7 @@ IntegerValue MPlusCoefficient( double ComputeContribution( const IntegerVariable xi_var, const std::vector& z_vars, const std::vector& exprs, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, const IntegerTrail& integer_trail, const int target_index) { CHECK_GE(target_index, 0); CHECK_LT(target_index, exprs.size()); @@ -2800,10 +2583,11 @@ CutGenerator CreateLinMaxCutGenerator( integer_trail, model](LinearConstraintManager* manager) { const auto& lp_values = manager->LpValues(); - absl::StrongVector variable_partition( + util_intops::StrongVector variable_partition( lp_values.size(), -1); - absl::StrongVector variable_partition_contrib( - lp_values.size(), std::numeric_limits::infinity()); + util_intops::StrongVector + variable_partition_contrib(lp_values.size(), + std::numeric_limits::infinity()); for (int expr_index = 0; expr_index < num_exprs; ++expr_index) { for (const IntegerVariable var : x_vars) { const double contribution = ComputeContribution( diff --git a/ortools/sat/cuts.h b/ortools/sat/cuts.h index c7b6639ffd..7e975cc6d4 100644 --- a/ortools/sat/cuts.h +++ b/ortools/sat/cuts.h @@ -93,9 +93,17 @@ struct CutTerm { // X = the given LinearExpression. // We only support size 1 or 2 here which allow to inline the memory. // When a coefficient is zero, we don't care about the variable. + // + // TODO(user): We might want to store that elsewhere, as sorting CutTerm is a + // bit slow and we don't need to look at that in most places. Same for the + // cached_implied_lb/ub below. IntegerValue expr_offset = IntegerValue(0); std::array expr_vars; std::array expr_coeffs; + + // Refer to cached_data_ in ImpliedBoundsProcessor. + int cached_implied_lb = -1; + int cached_implied_ub = -1; }; // Our cut are always of the form linear_expression <= rhs. @@ -104,7 +112,7 @@ struct CutData { // Returns false if we encounter any integer overflow. bool FillFromLinearConstraint( const LinearConstraint& base_ct, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, IntegerTrail* integer_trail); bool FillFromParallelVectors(IntegerValue ub, @@ -148,18 +156,20 @@ class CutDataBuilder { // is the only case we need. void ClearIndices(); void AddOrMergeTerm(const CutTerm& term, IntegerValue t, CutData* cut); + + void ClearNumMerges() { num_merges_ = 0; } int NumMergesSinceLastClear() const { return num_merges_; } // Returns false if we encounter an integer overflow. bool ConvertToLinearConstraint(const CutData& cut, LinearConstraint* output); private: - void RegisterAllBooleansTerms(const CutData& cut); + void RegisterAllBooleanTerms(const CutData& cut); int num_merges_ = 0; bool constraint_is_indexed_ = false; - absl::flat_hash_map direct_index_; - absl::flat_hash_map complemented_index_; + absl::flat_hash_map bool_index_; + absl::flat_hash_map secondary_bool_index_; absl::btree_map tmp_map_; }; @@ -186,7 +196,7 @@ class ImpliedBoundsProcessor { // Important: This must be called before we process any constraints with a // different lp_values or level zero bounds. void RecomputeCacheAndSeparateSomeImpliedBoundCuts( - const absl::StrongVector& lp_values); + const util_intops::StrongVector& lp_values); // This assumes the term is simple: expr[0] = var - LB / UB - var. We use an // implied lower bound on this expr, independently of the term.coeff sign. @@ -216,6 +226,17 @@ class ImpliedBoundsProcessor { const std::function& f, IntegerValue factor_t, CutData* cut, CutDataBuilder* builder); + // Precomputes quantities used by all cut generation. + // This allows to do that once rather than 6 times. + // Return false if there are no exploitable implied bounds. + bool CacheDataForCut(IntegerVariable first_slack, CutData* cut); + + // All our cut code use the same base cut (modulo complement), so we reuse the + // hash-map of where boolean are in the cut. Note that even if we add new + // entry that are no longer there for another cut algo, we can still reuse the + // same hash-map. + CutDataBuilder* BaseCutBuilder() { return &base_cut_builder_; } + bool TryToExpandWithLowerImpliedbound(IntegerValue factor_t, int i, bool complement, CutData* cut, CutDataBuilder* builder); @@ -237,8 +258,9 @@ class ImpliedBoundsProcessor { IntegerVariable bool_var = kNoIntegerVariable; double SlackLpValue(IntegerValue lb) const { - const double bool_term = ToDouble(implied_bound - lb) * bool_lp_value; - return var_lp_value - ToDouble(lb) - bool_term; + const double bool_term = + static_cast((implied_bound - lb).value()) * bool_lp_value; + return var_lp_value - static_cast(lb.value()) - bool_term; } std::string DebugString() const { @@ -255,11 +277,15 @@ class ImpliedBoundsProcessor { private: BestImpliedBoundInfo ComputeBestImpliedBound( IntegerVariable var, - const absl::StrongVector& lp_values); + const util_intops::StrongVector& lp_values); absl::flat_hash_set lp_vars_; mutable absl::flat_hash_map cache_; + // Temporary data used by CacheDataForCut(). + CutDataBuilder base_cut_builder_; + std::vector cached_data_; + TopNCuts ib_cut_pool_ = TopNCuts(50); // Data from the constructor. @@ -267,126 +293,6 @@ class ImpliedBoundsProcessor { ImpliedBounds* implied_bounds_; }; -// A single node flow relaxation is a constraint of the form -// Sum in_flow - Sum out_flow <= demand -// where each flow variable F_i is in [0, capacity_i] and satisfy -// F_i <= capacity_i B_i -// with B_i a Boolean representing the arc usage. -// -// From a generic constraint sum coeff_i X_i <= b, we try to put it in this -// format. We can first transform all variables to be in [0, max_value]. -// -// Then we cover different cases: -// 1/ A coeff * Boolean, can be easily transformed. -// 2/ A coeff * Integer in [0, capacity] with Bool => integer == 0 too. -// 3/ For a general integer, we can always use a Bool == 1 for the arc usage. -// -// TODO(user): cover case 3/. We loose a lot of relaxation here, except if -// the variable is at is upper/lower bound. -// -// TODO(user): Altough the cut should still be correct, we might use the same -// Boolean more than once in the implied bound. Or this Boolean might already -// appear in the constraint. Not sure if we can do something smarter here. -struct FlowInfo { - // Flow is always in [0, capacity] with the given current value in the - // lp relaxation. Now that we usually only consider tight constraint were - // flow_lp_value = capacity * bool_lp_value. - IntegerValue capacity; - double bool_lp_value; - - // TODO(user): We don't use this in the heuristic currently. - double flow_lp_value; - - // The definition of the flow variable and the arc usage variable in term - // of original problem variables. After we compute a cut on the flow and - // usage variable, we can just directly substitute these variable by the - // expression here to have a cut in term of the original problem variables. - AffineExpression flow_expr; - AffineExpression bool_expr; -}; - -struct SingleNodeFlow { - bool empty() const { return in_flow.empty() && out_flow.empty(); } - void clear() { - demand = 0; - in_flow.clear(); - out_flow.clear(); - num_bool = 0; - num_to_lb = 0; - num_to_ub = 0; - } - std::string DebugString() const; - - absl::int128 demand; - std::vector in_flow; - std::vector out_flow; - - // Stats filled during extraction. - int num_bool = 0; - int num_to_lb = 0; - int num_to_ub = 0; -}; - -class FlowCoverCutHelper { - public: - ~FlowCoverCutHelper(); - - // Extract a SingleNodeFlow relaxation from the base_ct and try to generate - // a cut from it. - bool ComputeFlowCoverRelaxationAndGenerateCut( - const CutData& base_ct, ImpliedBoundsProcessor* ib_helper); - - // Try to generate a cut for the given single node flow problem. Returns true - // if a cut was generated. It can be accessed by cut(). - bool GenerateCut(const SingleNodeFlow& data); - - // If successful, info about the last generated cut. - const LinearConstraint& cut() const { return cut_; } - - // Single line of text that we append to the cut log line. - std::string Info() const { - return absl::StrCat(" slack=", slack_.value(), " #in=", num_in_ignored_, - "|", num_in_flow_, "|", num_in_bin_, - " #out:", num_out_capa_, "|", num_out_flow_, "|", - num_out_bin_); - } - - void SetSharedStatistics(SharedStatistics* stats) { shared_stats_ = stats; } - - private: - // Try to extract a nice SingleNodeFlow relaxation for the given upper bounded - // linear constraint. - bool ComputeFlowCoverRelaxation(const CutData& base_ct, SingleNodeFlow* snf, - ImpliedBoundsProcessor* ib_helper); - - // Helpers used by ComputeFlowCoverRelaxation() to convert one linear term. - bool TryXminusLB(const CutTerm& term, ImpliedBoundsProcessor* ib_helper, - SingleNodeFlow* result) const; - bool TryUBminusX(const CutTerm& term, ImpliedBoundsProcessor* ib_helper, - SingleNodeFlow* result) const; - void FinishAndAddFlowInfo(const CutTerm& term, FlowInfo* info, - SingleNodeFlow* result) const; - - // Temporary memory to avoid reallocating the vector. - SingleNodeFlow snf_; - - // Stats, mainly to debug/investigate the code. - IntegerValue slack_; - int num_in_ignored_; - int num_in_flow_; - int num_in_bin_; - int num_out_capa_; - int num_out_flow_; - int num_out_bin_; - - LinearConstraintBuilder cut_builder_; - LinearConstraint cut_; - - // Stats. - SharedStatistics* shared_stats_ = nullptr; - int64_t num_aborts_ = 0; -}; - // Visible for testing. Returns a function f on integers such that: // - f is non-decreasing. // - f is super-additive: f(a) + f(b) <= f(a + b) @@ -605,16 +511,20 @@ class CoverCutHelper { void SetSharedStatistics(SharedStatistics* stats) { shared_stats_ = stats; } + void ClearCache() { has_bool_base_ct_ = false; } + private: void InitializeCut(const CutData& input_ct); // This looks at base_ct_ and reoder the terms so that the first ones are in // the cover. return zero if no interesting cover was found. - int GetCoverSizeForBooleans(int relevant_size); - template int GetCoverSize(int relevant_size); + // Same as GetCoverSize() but only look at Booleans, and use a different + // heuristic. + int GetCoverSizeForBooleans(); + template int MinimizeCover(int cover_size, absl::int128 slack); @@ -623,6 +533,11 @@ class CoverCutHelper { CutData temp_cut_; CutDataBuilder cut_builder_; + // Hack to not sort twice. + bool has_bool_base_ct_ = false; + CutData bool_base_ct_; + int bool_cover_size_ = 0; + // Stats. SharedStatistics* shared_stats_ = nullptr; int64_t num_lifting_ = 0; diff --git a/ortools/sat/diffn_cuts.cc b/ortools/sat/diffn_cuts.cc index 10b49b9289..8c4b77b2d1 100644 --- a/ortools/sat/diffn_cuts.cc +++ b/ortools/sat/diffn_cuts.cc @@ -98,7 +98,7 @@ struct DiffnEnergyEvent : DiffnBaseEvent { // It must be called before the EnergyEvent is used. ABSL_MUST_USE_RESULT bool FillEnergyLp( AffineExpression x_size, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, Model* model) { LinearConstraintBuilder tmp_energy(model); if (IsPresent()) { diff --git a/ortools/sat/diffn_util.cc b/ortools/sat/diffn_util.cc index fb48a1fd01..981da952e0 100644 --- a/ortools/sat/diffn_util.cc +++ b/ortools/sat/diffn_util.cc @@ -342,10 +342,10 @@ absl::Span FilterBoxesAndRandomize( } absl::Span FilterBoxesThatAreTooLarge( - const std::vector& cached_rectangles, - const std::vector& energies, absl::Span boxes) { + absl::Span cached_rectangles, + absl::Span energies, absl::Span boxes) { // Sort the boxes by increasing area. - std::sort(boxes.begin(), boxes.end(), [&cached_rectangles](int a, int b) { + std::sort(boxes.begin(), boxes.end(), [cached_rectangles](int a, int b) { return cached_rectangles[a].Area() < cached_rectangles[b].Area(); }); diff --git a/ortools/sat/diffn_util.h b/ortools/sat/diffn_util.h index bd7bfef3f4..cc5f4faae2 100644 --- a/ortools/sat/diffn_util.h +++ b/ortools/sat/diffn_util.h @@ -159,8 +159,8 @@ absl::Span FilterBoxesAndRandomize( // box" conflict. As we remove this box, the total energy decrease, so we might // remove more. This works in O(n log n). absl::Span FilterBoxesThatAreTooLarge( - const std::vector& cached_rectangles, - const std::vector& energies, absl::Span boxes); + absl::Span cached_rectangles, + absl::Span energies, absl::Span boxes); struct IndexedInterval { int index; diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index 60fe83d05c..88958e3816 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -102,28 +102,28 @@ void AddDisjunctive(const std::vector& intervals, { // Only one direction is needed by this one. DisjunctiveOverloadChecker* overload_checker = - new DisjunctiveOverloadChecker(helper); + new DisjunctiveOverloadChecker(helper, model); const int id = overload_checker->RegisterWith(watcher); watcher->SetPropagatorPriority(id, 1); model->TakeOwnership(overload_checker); } for (const bool time_direction : {true, false}) { DisjunctiveDetectablePrecedences* detectable_precedences = - new DisjunctiveDetectablePrecedences(time_direction, helper); + new DisjunctiveDetectablePrecedences(time_direction, helper, model); const int id = detectable_precedences->RegisterWith(watcher); watcher->SetPropagatorPriority(id, 2); model->TakeOwnership(detectable_precedences); } for (const bool time_direction : {true, false}) { DisjunctiveNotLast* not_last = - new DisjunctiveNotLast(time_direction, helper); + new DisjunctiveNotLast(time_direction, helper, model); const int id = not_last->RegisterWith(watcher); watcher->SetPropagatorPriority(id, 3); model->TakeOwnership(not_last); } for (const bool time_direction : {true, false}) { DisjunctiveEdgeFinding* edge_finding = - new DisjunctiveEdgeFinding(time_direction, helper); + new DisjunctiveEdgeFinding(time_direction, helper, model); const int id = edge_finding->RegisterWith(watcher); watcher->SetPropagatorPriority(id, 4); model->TakeOwnership(edge_finding); @@ -181,7 +181,10 @@ void TaskSet::NotifyEntryIsNowLastIfPresent(const Entry& e) { for (int i = 0;; ++i) { if (i == size) return; if (sorted_tasks_[i].task == e.task) { - sorted_tasks_.erase(sorted_tasks_.begin() + i); + for (int j = i; j + 1 < size; ++j) { + sorted_tasks_[j] = sorted_tasks_[j + 1]; + } + sorted_tasks_.pop_back(); break; } } @@ -413,7 +416,7 @@ bool CombinedDisjunctive::Propagate() { // TODO(user): Maybe factor out the code? It does require a function with a // lot of arguments though. helper_->ClearReason(); - const std::vector& sorted_tasks = + const absl::Span sorted_tasks = task_sets_[best_d_index].SortedTasks(); const IntegerValue window_start = sorted_tasks[best_critical_index].start_min; @@ -451,10 +454,15 @@ bool CombinedDisjunctive::Propagate() { } bool DisjunctiveOverloadChecker::Propagate() { + stats_.OnPropagate(); if (!helper_->SynchronizeAndSetTimeDirection(/*is_forward=*/true)) { + ++stats_.num_conflicts; return false; } + // We use this to detect precedence between task that must cause a push. + TaskTime task_with_max_end_min = {0, kMinIntegerValue}; + // Split problem into independent part. // // Many propagators in this file use the same approach, we start by processing @@ -470,18 +478,65 @@ bool DisjunctiveOverloadChecker::Propagate() { IntegerValue relevant_end; int window_size = 0; int relevant_size = 0; - for (const TaskTime task_time : helper_->TaskByIncreasingShiftedStartMin()) { - const int task = task_time.task_index; - if (helper_->IsAbsent(task)) continue; + TaskTime* const window = window_.get(); + for (const auto [task, presence_lit, start_min] : + helper_->TaskByIncreasingShiftedStartMin()) { + if (helper_->IsAbsent(presence_lit)) continue; + + // Nothing to do with overload checking, but almost free to do that here. + const IntegerValue size_min = helper_->SizeMin(task); + const IntegerValue end_min = start_min + size_min; + const IntegerValue start_max = helper_->StartMax(task); + if (start_max < task_with_max_end_min.time && + helper_->IsPresent(presence_lit) && size_min > 0) { + // We have a detectable precedence that must cause a push. + // + // Remarks: If we added all precedence literals + linear relation, this + // propagation would have been done by the linear propagator, but if we + // didn't add such relations yet, it is beneficial to detect that here! + // + // TODO(user): Actually, we just infered a "not last" so we could check + // for relevant_size > 2 potential propagation? + // + // TODO(user): Can we detect and propagate all such relations easily and + // do a pass before this maybe? On a related note, because this + // propagator is not instantiated in both direction, we might miss some + // easy propag. + const int to_push = task_with_max_end_min.task_index; + helper_->ClearReason(); + helper_->AddPresenceReason(task); + helper_->AddReasonForBeingBefore(task, to_push); + helper_->AddEndMinReason(task, end_min); + + if (!helper_->IncreaseStartMin(to_push, end_min)) { + ++stats_.num_conflicts; + return false; + } + + // TODO(user): Shall we keep propagating? we know the prefix didn't + // change, so we could be faster here. On another hand, it might be + // better to propagate all the linear constraints before returning + // here. + ++stats_.num_propagations; + + stats_.EndWithoutConflicts(); + return true; + } + + // Note that we need to do that AFTER the block above. + if (end_min > task_with_max_end_min.time) { + task_with_max_end_min = {task, end_min}; + } // Extend window. - const IntegerValue start_min = task_time.time; if (start_min < window_end) { - window_[window_size++] = task_time; - window_end += helper_->SizeMin(task); - if (window_end > helper_->EndMax(task)) { + window[window_size++] = {task, start_min}; + if (window_end > start_max) { + window_end += size_min; relevant_size = window_size; relevant_end = window_end; + } else { + window_end += size_min; } continue; } @@ -490,21 +545,24 @@ bool DisjunctiveOverloadChecker::Propagate() { // We don't need to process the end of the window (after relevant_size) // because these interval can be greedily assembled in a feasible solution. if (relevant_size > 0 && !PropagateSubwindow(relevant_size, relevant_end)) { + ++stats_.num_conflicts; return false; } // Start of the next window. window_size = 0; - window_[window_size++] = task_time; - window_end = start_min + helper_->SizeMin(task); + window[window_size++] = {task, start_min}; + window_end = start_min + size_min; relevant_size = 0; } // Process last window. if (relevant_size > 0 && !PropagateSubwindow(relevant_size, relevant_end)) { + ++stats_.num_conflicts; return false; } + stats_.EndWithoutConflicts(); return true; } @@ -524,7 +582,10 @@ bool DisjunctiveOverloadChecker::PropagateSubwindow( // No point adding a task if its end_max is too large. const TaskTime& task_time = window_[i]; const int task = task_time.task_index; - const IntegerValue end_max = helper_->EndMax(task); + + // We use the shifted end-max. + const IntegerValue end_max = + helper_->StartMax(task) + helper_->SizeMin(task); if (end_max < global_window_end) { window_[num_events] = task_time; task_to_event_[task] = num_events; @@ -532,6 +593,7 @@ bool DisjunctiveOverloadChecker::PropagateSubwindow( ++num_events; } } + if (num_events <= 1) return true; theta_tree_.Reset(num_events); // Introduce events by increasing end_max, check for overloads. @@ -577,7 +639,7 @@ bool DisjunctiveOverloadChecker::PropagateSubwindow( const int task = window_[event].task_index; helper_->AddPresenceReason(task); helper_->AddEnergyAfterReason(task, energy_min, window_start); - helper_->AddEndMaxReason(task, window_end); + helper_->AddShiftedEndMaxReason(task, window_end); } } return helper_->ReportConflict(); @@ -610,14 +672,15 @@ bool DisjunctiveOverloadChecker::PropagateSubwindow( const int task = window_[event].task_index; helper_->AddPresenceReason(task); helper_->AddEnergyAfterReason(task, energy_min, window_start); - helper_->AddEndMaxReason(task, window_end); + helper_->AddShiftedEndMaxReason(task, window_end); } } helper_->AddEnergyAfterReason(optional_task, optional_size_min, window_start); - helper_->AddEndMaxReason(optional_task, window_end); + helper_->AddShiftedEndMaxReason(optional_task, window_end); + ++stats_.num_propagations; if (!helper_->PushTaskAbsence(optional_task)) return false; } @@ -632,13 +695,18 @@ int DisjunctiveOverloadChecker::RegisterWith(GenericLiteralWatcher* watcher) { // This propagator reach the fix point in one pass. const int id = watcher->Register(this); helper_->SetTimeDirection(/*is_forward=*/true); + watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id); helper_->WatchAllTasks(id, watcher, /*watch_start_max=*/false, /*watch_end_max=*/true); return id; } bool DisjunctiveDetectablePrecedences::Propagate() { - if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) return false; + stats_.OnPropagate(); + if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) { + ++stats_.num_conflicts; + return false; + } to_propagate_.clear(); processed_.assign(helper_->NumTasks(), false); @@ -652,49 +720,53 @@ bool DisjunctiveDetectablePrecedences::Propagate() { // start_max >= end_min, so wouldn't be in detectable precedence. task_by_increasing_end_min_.clear(); IntegerValue window_end = kMinIntegerValue; + IntegerValue max_end_min = kMinIntegerValue; for (const TaskTime task_time : helper_->TaskByIncreasingStartMin()) { const int task = task_time.task_index; if (helper_->IsAbsent(task)) continue; // Note that the helper returns value assuming the task is present. - const IntegerValue start_min = helper_->StartMin(task); - const IntegerValue size_min = helper_->SizeMin(task); + const IntegerValue start_min = task_time.time; const IntegerValue end_min = helper_->EndMin(task); - DCHECK_GE(end_min, start_min + size_min); if (start_min < window_end) { + const IntegerValue size_min = helper_->SizeMin(task); + DCHECK_GE(end_min, start_min + size_min); + task_by_increasing_end_min_.push_back({task, end_min}); + max_end_min = std::max(max_end_min, end_min); window_end = std::max(window_end, start_min) + size_min; continue; } // Process current window. - if (task_by_increasing_end_min_.size() > 1 && !PropagateSubwindow()) { + if (task_by_increasing_end_min_.size() > 1 && + !PropagateSubwindow(max_end_min)) { + ++stats_.num_conflicts; return false; } // Start of the next window. task_by_increasing_end_min_.clear(); task_by_increasing_end_min_.push_back({task, end_min}); + max_end_min = end_min; window_end = end_min; } - if (task_by_increasing_end_min_.size() > 1 && !PropagateSubwindow()) { + if (task_by_increasing_end_min_.size() > 1 && + !PropagateSubwindow(max_end_min)) { + ++stats_.num_conflicts; return false; } + stats_.EndWithoutConflicts(); return true; } -bool DisjunctiveDetectablePrecedences::PropagateSubwindow() { +bool DisjunctiveDetectablePrecedences::PropagateSubwindow( + const IntegerValue max_end_min) { DCHECK(!task_by_increasing_end_min_.empty()); - // The vector is already sorted by shifted_start_min, so there is likely a - // good correlation, hence the incremental sort. - IncrementalSort(task_by_increasing_end_min_.begin(), - task_by_increasing_end_min_.end()); - const IntegerValue max_end_min = task_by_increasing_end_min_.back().time; - // Fill and sort task_by_increasing_start_max_. // // TODO(user): we should use start max if present, but more generally, all @@ -708,9 +780,16 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow() { } } if (task_by_increasing_start_max_.empty()) return true; + std::sort(task_by_increasing_start_max_.begin(), task_by_increasing_start_max_.end()); + // The vector is already sorted by shifted_start_min, so there is likely a + // good correlation, hence the incremental sort. + IncrementalSort(task_by_increasing_end_min_.begin(), + task_by_increasing_end_min_.end()); + DCHECK_EQ(max_end_min, task_by_increasing_end_min_.back().time); + // Invariant: need_update is false implies that task_set_end_min is equal to // task_set_.ComputeEndMin(). // @@ -802,7 +881,7 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow() { // Note that this works as well when IsPresent(t) is false. if (task_set_end_min > helper_->StartMin(t)) { const int critical_index = task_set_.GetCriticalIndex(); - const std::vector& sorted_tasks = + const absl::Span sorted_tasks = task_set_.SortedTasks(); helper_->ClearReason(); @@ -874,6 +953,7 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow() { // This augment the start-min of t. Note that t is not in task set // yet, so we will use this updated start if we ever add it there. + ++stats_.num_propagations; if (!helper_->IncreaseStartMin(t, new_start_min)) { return false; } @@ -909,44 +989,43 @@ int DisjunctiveDetectablePrecedences::RegisterWith( return id; } -DisjunctivePrecedences::~DisjunctivePrecedences() { - if (!VLOG_IS_ON(1)) return; - if (shared_stats_ == nullptr) return; - std::vector> stats; - stats.push_back({"disj_precedences/num_propagations_", num_propagations_}); - shared_stats_->AddStats(stats); -} - bool DisjunctivePrecedences::Propagate() { - if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) return false; + stats_.OnPropagate(); + if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) { + ++stats_.num_conflicts; + return false; + } window_.clear(); // We only need to consider "critical" set of tasks given how we compute the // min-offset in PropagateSubwindow(). IntegerValue window_end = kMinIntegerValue; - for (const TaskTime task_time : helper_->TaskByIncreasingShiftedStartMin()) { - const int task = task_time.task_index; - if (!helper_->IsPresent(task)) continue; + for (const auto [task, presence_lit, start_min] : + helper_->TaskByIncreasingShiftedStartMin()) { + if (!helper_->IsPresent(presence_lit)) continue; - const IntegerValue start_min = task_time.time; if (start_min < window_end) { - window_.push_back(task_time); + window_.push_back({task, start_min}); window_end += helper_->SizeMin(task); continue; } if (window_.size() > 1 && !PropagateSubwindow()) { + ++stats_.num_conflicts; return false; } // Start of the next window. window_.clear(); - window_.push_back(task_time); + window_.push_back({task, start_min}); window_end = start_min + helper_->SizeMin(task); } if (window_.size() > 1 && !PropagateSubwindow()) { + ++stats_.num_conflicts; return false; } + + stats_.EndWithoutConflicts(); return true; } @@ -1095,7 +1174,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { helper_->MutableLiteralReason()->push_back(l.Negated()); } } - ++num_propagations_; + ++stats_.num_propagations; if (!helper_->PushIntegerLiteral( IntegerLiteral::GreaterOrEqual(var, best_new_lb))) { return false; @@ -1115,11 +1194,14 @@ int DisjunctivePrecedences::RegisterWith(GenericLiteralWatcher* watcher) { } bool DisjunctiveNotLast::Propagate() { - if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) return false; + stats_.OnPropagate(); + if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) { + ++stats_.num_conflicts; + return false; + } - const auto& task_by_decreasing_start_max = - helper_->TaskByDecreasingStartMax(); - const auto& task_by_increasing_shifted_start_min = + const auto task_by_decreasing_start_max = helper_->TaskByDecreasingStartMax(); + const auto task_by_increasing_shifted_start_min = helper_->TaskByIncreasingShiftedStartMin(); // Split problem into independent part. @@ -1139,16 +1221,15 @@ bool DisjunctiveNotLast::Propagate() { start_min_window_.clear(); IntegerValue window_end = kMinIntegerValue; for (; i < num_tasks; ++i) { - const TaskTime task_time = task_by_increasing_shifted_start_min[i]; - const int task = task_time.task_index; - if (!helper_->IsPresent(task)) continue; + const auto [task, presence_lit, start_min] = + task_by_increasing_shifted_start_min[i]; + if (!helper_->IsPresent(presence_lit)) continue; - const IntegerValue start_min = task_time.time; if (start_min_window_.empty()) { - start_min_window_.push_back(task_time); + start_min_window_.push_back({task, start_min}); window_end = start_min + helper_->SizeMin(task); } else if (start_min < window_end) { - start_min_window_.push_back(task_time); + start_min_window_.push_back({task, start_min}); window_end += helper_->SizeMin(task); } else { break; @@ -1174,9 +1255,12 @@ bool DisjunctiveNotLast::Propagate() { // Process current window. if (!start_max_window_.empty() && !PropagateSubwindow()) { + ++stats_.num_conflicts; return false; } } + + stats_.EndWithoutConflicts(); return true; } @@ -1251,7 +1335,8 @@ bool DisjunctiveNotLast::PropagateSubwindow() { // Find the largest start-max of the critical tasks (excluding t). The // end-max for t need to be smaller than or equal to this. IntegerValue largest_ct_start_max = kMinIntegerValue; - const std::vector& sorted_tasks = task_set_.SortedTasks(); + const absl::Span sorted_tasks = + task_set_.SortedTasks(); const int sorted_tasks_size = sorted_tasks.size(); for (int i = critical_index; i < sorted_tasks_size; ++i) { const int ct = sorted_tasks[i].task; @@ -1298,6 +1383,7 @@ bool DisjunctiveNotLast::PropagateSubwindow() { // Enqueue the new end-max for t. // Note that changing it will not influence the rest of the loop. + ++stats_.num_propagations; if (!helper_->DecreaseEndMax(t, largest_ct_start_max)) return false; } } @@ -1312,21 +1398,25 @@ int DisjunctiveNotLast::RegisterWith(GenericLiteralWatcher* watcher) { } bool DisjunctiveEdgeFinding::Propagate() { + stats_.OnPropagate(); const int num_tasks = helper_->NumTasks(); - if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) return false; + if (!helper_->SynchronizeAndSetTimeDirection(time_direction_)) { + ++stats_.num_conflicts; + return false; + } is_gray_.resize(num_tasks, false); non_gray_task_to_event_.resize(num_tasks); window_.clear(); IntegerValue window_end = kMinIntegerValue; - for (const TaskTime task_time : helper_->TaskByIncreasingShiftedStartMin()) { - const int task = task_time.task_index; - if (helper_->IsAbsent(task)) continue; + for (const auto [task, presence_lit, shifted_smin] : + helper_->TaskByIncreasingShiftedStartMin()) { + if (helper_->IsAbsent(presence_lit)) continue; // Note that we use the real start min here not the shifted one. This is // because we might be able to push it if it is smaller than window end. if (helper_->StartMin(task) < window_end) { - window_.push_back(task_time); + window_.push_back({task, shifted_smin}); window_end += helper_->SizeMin(task); continue; } @@ -1334,17 +1424,21 @@ bool DisjunctiveEdgeFinding::Propagate() { // We need at least 3 tasks for the edge-finding to be different from // detectable precedences. if (window_.size() > 2 && !PropagateSubwindow(window_end)) { + ++stats_.num_conflicts; return false; } // Start of the next window. window_.clear(); - window_.push_back(task_time); - window_end = task_time.time + helper_->SizeMin(task); + window_.push_back({task, shifted_smin}); + window_end = shifted_smin + helper_->SizeMin(task); } if (window_.size() > 2 && !PropagateSubwindow(window_end)) { + ++stats_.num_conflicts; return false; } + + stats_.EndWithoutConflicts(); return true; } @@ -1546,6 +1640,7 @@ bool DisjunctiveEdgeFinding::PropagateSubwindow(IntegerValue window_end_min) { // TODO(user): propagate the precedence Boolean here too? I think it // will be more powerful. Even if eventually all these precedence will // become detectable (see Petr Villim PhD). + ++stats_.num_propagations; if (!helper_->IncreaseStartMin(gray_task, non_gray_end_min)) { return false; } diff --git a/ortools/sat/disjunctive.h b/ortools/sat/disjunctive.h index 6b3f50e011..70d5356a71 100644 --- a/ortools/sat/disjunctive.h +++ b/ortools/sat/disjunctive.h @@ -53,7 +53,7 @@ void AddDisjunctiveWithBooleanPrecedencesOnly( // for most of the function here, not a O(log(n)) one. class TaskSet { public: - explicit TaskSet(int num_tasks) { sorted_tasks_.reserve(num_tasks); } + explicit TaskSet(int num_tasks) { sorted_tasks_.ClearAndReserve(num_tasks); } struct Entry { int task; @@ -113,13 +113,56 @@ class TaskSet { // another unneeded loop. int GetCriticalIndex() const { return optimized_restart_; } - const std::vector& SortedTasks() const { return sorted_tasks_; } + absl::Span SortedTasks() const { return sorted_tasks_; } private: - std::vector sorted_tasks_; + FixedCapacityVector sorted_tasks_; mutable int optimized_restart_ = 0; }; +// Simple class to display statistics at the end if --v=1. +struct PropagationStatistics { + explicit PropagationStatistics(std::string _name, Model* model = nullptr) + : name(_name), + shared_stats(model == nullptr + ? nullptr + : model->GetOrCreate()) {}; + + ~PropagationStatistics() { + if (shared_stats == nullptr) return; + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back({absl::StrCat(name, "/num_calls"), num_calls}); + stats.push_back({absl::StrCat(name, "/num_calls_with_propagation"), + num_calls_with_propagation}); + stats.push_back( + {absl::StrCat(name, "/num_calls_with_conflicts"), num_conflicts}); + stats.push_back( + {absl::StrCat(name, "/num_propagations"), num_propagations}); + shared_stats->AddStats(stats); + } + + void OnPropagate() { + ++num_calls; + saved_num_propag = num_propagations; + } + + void EndWithoutConflicts() { + if (num_propagations > saved_num_propag) { + ++num_calls_with_propagation; + } + } + + const std::string name; + SharedStatistics* shared_stats; + int64_t saved_num_propag; + + int64_t num_calls = 0; + int64_t num_calls_with_propagation = 0; // Only count if we did something. + int64_t num_conflicts = 0; + int64_t num_propagations = 0; +}; + // ============================================================================ // Below are many of the known propagation techniques for the disjunctive, each // implemented in only one time direction and in its own propagator class. The @@ -132,10 +175,14 @@ class TaskSet { class DisjunctiveOverloadChecker : public PropagatorInterface { public: - explicit DisjunctiveOverloadChecker(SchedulingConstraintHelper* helper) + explicit DisjunctiveOverloadChecker(SchedulingConstraintHelper* helper, + Model* model = nullptr) : helper_(helper), window_(new TaskTime[helper->NumTasks()]), - task_to_event_(new int[helper->NumTasks()]) {} + task_to_event_(new int[helper->NumTasks()]), + stats_("DisjunctiveOverloadChecker", model) { + task_by_increasing_end_max_.ClearAndReserve(helper->NumTasks()); + } bool Propagate() final; int RegisterWith(GenericLiteralWatcher* watcher); @@ -149,33 +196,41 @@ class DisjunctiveOverloadChecker : public PropagatorInterface { std::unique_ptr window_; std::unique_ptr task_to_event_; - std::vector task_by_increasing_end_max_; + FixedCapacityVector task_by_increasing_end_max_; ThetaLambdaTree theta_tree_; + PropagationStatistics stats_; }; class DisjunctiveDetectablePrecedences : public PropagatorInterface { public: DisjunctiveDetectablePrecedences(bool time_direction, - SchedulingConstraintHelper* helper) + SchedulingConstraintHelper* helper, + Model* model = nullptr) : time_direction_(time_direction), helper_(helper), - task_set_(helper->NumTasks()) {} + task_set_(helper->NumTasks()), + stats_("DisjunctiveDetectablePrecedences", model) { + task_by_increasing_end_min_.ClearAndReserve(helper->NumTasks()); + task_by_increasing_start_max_.ClearAndReserve(helper->NumTasks()); + to_propagate_.ClearAndReserve(helper->NumTasks()); + } bool Propagate() final; int RegisterWith(GenericLiteralWatcher* watcher); private: - bool PropagateSubwindow(); + bool PropagateSubwindow(IntegerValue max_end_min); - std::vector task_by_increasing_end_min_; - std::vector task_by_increasing_start_max_; + FixedCapacityVector task_by_increasing_end_min_; + FixedCapacityVector task_by_increasing_start_max_; std::vector processed_; - std::vector to_propagate_; + FixedCapacityVector to_propagate_; const bool time_direction_; SchedulingConstraintHelper* helper_; TaskSet task_set_; + PropagationStatistics stats_; }; // Singleton model class which is just a SchedulingConstraintHelper will all @@ -211,29 +266,42 @@ class CombinedDisjunctive : public PropagatorInterface { class DisjunctiveNotLast : public PropagatorInterface { public: - DisjunctiveNotLast(bool time_direction, SchedulingConstraintHelper* helper) + DisjunctiveNotLast(bool time_direction, SchedulingConstraintHelper* helper, + Model* model = nullptr) : time_direction_(time_direction), helper_(helper), - task_set_(helper->NumTasks()) {} + task_set_(helper->NumTasks()), + stats_("DisjunctiveNotLast", model) { + start_min_window_.ClearAndReserve(helper->NumTasks()); + start_max_window_.ClearAndReserve(helper->NumTasks()); + } bool Propagate() final; int RegisterWith(GenericLiteralWatcher* watcher); private: bool PropagateSubwindow(); - std::vector start_min_window_; - std::vector start_max_window_; + FixedCapacityVector start_min_window_; + FixedCapacityVector start_max_window_; const bool time_direction_; SchedulingConstraintHelper* helper_; TaskSet task_set_; + PropagationStatistics stats_; }; class DisjunctiveEdgeFinding : public PropagatorInterface { public: DisjunctiveEdgeFinding(bool time_direction, - SchedulingConstraintHelper* helper) - : time_direction_(time_direction), helper_(helper) {} + SchedulingConstraintHelper* helper, + Model* model = nullptr) + : time_direction_(time_direction), + helper_(helper), + stats_("DisjunctiveEdgeFinding", model) { + task_by_increasing_end_max_.ClearAndReserve(helper->NumTasks()); + window_.ClearAndReserve(helper->NumTasks()); + event_size_.ClearAndReserve(helper->NumTasks()); + } bool Propagate() final; int RegisterWith(GenericLiteralWatcher* watcher); @@ -244,16 +312,18 @@ class DisjunctiveEdgeFinding : public PropagatorInterface { SchedulingConstraintHelper* helper_; // This only contains non-gray tasks. - std::vector task_by_increasing_end_max_; + FixedCapacityVector task_by_increasing_end_max_; // All these member are indexed in the same way. - std::vector window_; + FixedCapacityVector window_; ThetaLambdaTree theta_tree_; - std::vector event_size_; + FixedCapacityVector event_size_; // Task indexed. std::vector non_gray_task_to_event_; std::vector is_gray_; + + PropagationStatistics stats_; }; // Exploits the precedences relations of the form "this set of disjoint @@ -267,8 +337,11 @@ class DisjunctivePrecedences : public PropagatorInterface { helper_(helper), integer_trail_(model->GetOrCreate()), precedence_relations_(model->GetOrCreate()), - shared_stats_(model->GetOrCreate()) {} - ~DisjunctivePrecedences() override; + stats_("DisjunctivePrecedences", model) { + window_.ClearAndReserve(helper->NumTasks()); + index_to_end_vars_.ClearAndReserve(helper->NumTasks()); + indices_before_.ClearAndReserve(helper->NumTasks()); + } bool Propagate() final; int RegisterWith(GenericLiteralWatcher* watcher); @@ -280,16 +353,15 @@ class DisjunctivePrecedences : public PropagatorInterface { SchedulingConstraintHelper* helper_; IntegerTrail* integer_trail_; PrecedenceRelations* precedence_relations_; - SharedStatistics* shared_stats_; - int64_t num_propagations_ = 0; + FixedCapacityVector window_; + FixedCapacityVector index_to_end_vars_; - std::vector window_; - std::vector index_to_end_vars_; - - std::vector indices_before_; + FixedCapacityVector indices_before_; std::vector skip_; std::vector before_; + + PropagationStatistics stats_; }; // This is an optimization for the case when we have a big number of such diff --git a/ortools/sat/docs/channeling.md b/ortools/sat/docs/channeling.md index ab998de089..cf16b8960d 100644 --- a/ortools/sat/docs/channeling.md +++ b/ortools/sat/docs/channeling.md @@ -165,13 +165,14 @@ import com.google.ortools.sat.BoolVar; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.DecisionStrategyProto; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; import com.google.ortools.sat.SatParameters; /** Link integer constraints together. */ -public class ChannelingSampleSat { +public final class ChannelingSampleSat { public static void main(String[] args) throws Exception { Loader.loadNativeLibraries(); // Create the CP-SAT model. @@ -207,7 +208,7 @@ public class ChannelingSampleSat { solver.getParameters().setEnumerateAllSolutions(true); // Solve the problem with the printer callback. - solver.solve(model, new CpSolverSolutionCallback() { + CpSolverStatus unusedStatus = solver.solve(model, new CpSolverSolutionCallback() { public CpSolverSolutionCallback init(IntVar[] variables) { variableArray = variables; return this; @@ -224,6 +225,8 @@ public class ChannelingSampleSat { private IntVar[] variableArray; }.init(new IntVar[] {vars[0], vars[1], b})); } + + private ChannelingSampleSat() {} } ``` diff --git a/ortools/sat/docs/integer_arithmetic.md b/ortools/sat/docs/integer_arithmetic.md index fca30f9710..2fcd3c5629 100644 --- a/ortools/sat/docs/integer_arithmetic.md +++ b/ortools/sat/docs/integer_arithmetic.md @@ -512,6 +512,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.DecisionStrategyProto; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; @@ -570,7 +571,7 @@ public class EarlinessTardinessCostSampleSat { solver.getParameters().setEnumerateAllSolutions(true); // Solve the problem with the printer callback. - solver.solve(model, new CpSolverSolutionCallback() { + CpSolverStatus unusedStatus = solver.solve(model, new CpSolverSolutionCallback() { public CpSolverSolutionCallback init(IntVar[] variables) { variableArray = variables; return this; @@ -949,6 +950,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.DecisionStrategyProto; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.Literal; @@ -1012,7 +1014,7 @@ public class StepFunctionSampleSat { solver.getParameters().setEnumerateAllSolutions(true); // Solve the problem with the printer callback. - solver.solve(model, new CpSolverSolutionCallback() { + CpSolverStatus unusedStatus = solver.solve(model, new CpSolverSolutionCallback() { public CpSolverSolutionCallback init(IntVar[] variables) { variableArray = variables; return this; @@ -1302,3 +1304,90 @@ def bool_and_int_var_product_sample_sat(): bool_and_int_var_product_sample_sat() ``` + +## Scanning the domain of variables. + +In this example, we will implement the all_different_except_0 constraint. This +constraint is useful as it expresses that 2 active assignment should be +different, but we do not care when they are inactive (represented by being +assigned a zero value). + +To implement this constraint, we will collect all values in the initial domain +of all variables and attach Boolean variables for each of them. This requires +reading back the values from the model. + +### Python code + +```python +#!/usr/bin/env python3 +"""Implements AllDifferentExcept0 using atomic constraints.""" + +import collections + +from ortools.sat.python import cp_model + + +def all_different_except_0(): + """Encode the AllDifferentExcept0 constraint.""" + + # Model. + model = cp_model.CpModel() + + # Declare our primary variable. + x = [model.new_int_var(0, 10, f"x{i}") for i in range(5)] + + # Expand the AllDifferentExcept0 constraint. + variables_per_value = collections.defaultdict(list) + all_values = set() + + for var in x: + all_encoding_literals = [] + # Domains of variables are represented by flat intervals. + for i in range(0, len(var.proto.domain), 2): + start = var.proto.domain[i] + end = var.proto.domain[i + 1] + for value in range(start, end + 1): # Intervals are inclusive. + # Create the literal attached to var == value. + bool_var = model.new_bool_var(f"{var} == {value}") + model.add(var == value).only_enforce_if(bool_var) + + # Collect all encoding literals for a given variable. + all_encoding_literals.append(bool_var) + + # Collect all encoding literals for a given value. + variables_per_value[value].append(bool_var) + + # Collect all different values. + all_values.add(value) + + # One variable must have exactly one value. + model.add_exactly_one(all_encoding_literals) + + # Add the all_different constraints. + for value, literals in variables_per_value.items(): + if value == 0: + continue + model.add_at_most_one(literals) + + model.add(x[0] == 0) + model.add(x[1] == 0) + + model.maximize(sum(x)) + + # Create a solver and solve. + solver = cp_model.CpSolver() + status = solver.solve(model) + + # Checks and prints the output. + if status == cp_model.OPTIMAL: + print(f"Optimal solution: {solver.objective_value}, expected: 27.0") + elif status == cp_model.FEASIBLE: + print(f"Feasible solution: {solver.objective_value}, optimal 27.0") + elif status == cp_model.INFEASIBLE: + print("The model is infeasible") + else: + print("Something went wrong. Please check the status and the log") + + +all_different_except_0() +``` diff --git a/ortools/sat/docs/model.md b/ortools/sat/docs/model.md index 904a570c8e..3949f8ce2b 100644 --- a/ortools/sat/docs/model.md +++ b/ortools/sat/docs/model.md @@ -181,6 +181,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; @@ -211,7 +212,7 @@ public class SolutionHintingSampleSat { CpSolver solver = new CpSolver(); VarArraySolutionPrinterWithObjective cb = new VarArraySolutionPrinterWithObjective(new IntVar[] {x, y, z}); - solver.solve(model, cb); + CpSolverStatus unusedStatus = solver.solve(model, cb); } static class VarArraySolutionPrinterWithObjective extends CpSolverSolutionCallback { diff --git a/ortools/sat/docs/scheduling.md b/ortools/sat/docs/scheduling.md index 6763a7bb38..19a8b5264c 100644 --- a/ortools/sat/docs/scheduling.md +++ b/ortools/sat/docs/scheduling.md @@ -459,13 +459,101 @@ func main() { } ``` +## Time relations between intervals + +Temporal relations between intervals can be expressions using linear +inequalities involving the start and end expressions of the intervals. + +As seen above, the factory methods on the model used to build intervals accept +1-var affine expression (a * var + b, a, b integer constants) as arguments to +the start, size, and end parameters. + +Once the interval is build, these same expressions can be queries using +`StartExpr(), SizeExpr() and EndExpr()` in C++ and C#, `start_expr(), +size_expr(), and end_expr()` in python, and `getStartExpr(), getSizeExpr(), and +getEndExpr()` in Java. + +If one or both intervals are optional, then these inequalities must be reified +by the presence literals of the optional intervals used. + +### Python code + +```python +#!/usr/bin/env python3 +"""Builds temporal relations between intervals.""" + +from ortools.sat.python import cp_model + + +def interval_relations_sample_sat(): + """Showcases how to build temporal relations between intervals.""" + model = cp_model.CpModel() + horizon = 100 + + # An interval can be created from three 1-var affine expressions. + start_var = model.new_int_var(0, horizon, "start") + duration = 10 # Python CP-SAT code accept integer variables or constants. + end_var = model.new_int_var(0, horizon, "end") + interval_var = model.new_interval_var(start_var, duration, end_var, "interval") + + # If the size is fixed, a simpler version uses the start expression and the + # size. + fixed_size_start_var = model.new_int_var(0, horizon, "fixed_start") + fixed_size_duration = 10 + fixed_size_interval_var = model.new_fixed_size_interval_var( + fixed_size_start_var, + fixed_size_duration, + "fixed_size_interval_var", + ) + + # An optional interval can be created from three 1-var affine expressions and + # a literal. + opt_start_var = model.new_int_var(0, horizon, "opt_start") + opt_duration = model.new_int_var(2, 6, "opt_size") + opt_end_var = model.new_int_var(0, horizon, "opt_end") + opt_presence_var = model.new_bool_var("opt_presence") + opt_interval_var = model.new_optional_interval_var( + opt_start_var, opt_duration, opt_end_var, opt_presence_var, "opt_interval" + ) + + # If the size is fixed, a simpler version uses the start expression, the + # size, and the presence literal. + opt_fixed_size_start_var = model.new_int_var(0, horizon, "opt_fixed_start") + opt_fixed_size_duration = 10 + opt_fixed_size_presence_var = model.new_bool_var("opt_fixed_presence") + opt_fixed_size_interval_var = model.new_optional_fixed_size_interval_var( + opt_fixed_size_start_var, + opt_fixed_size_duration, + opt_fixed_size_presence_var, + "opt_fixed_size_interval_var", + ) + + # Simple precedence between two non optional intervals. + model.add(interval_var.start_expr() >= fixed_size_interval_var.end_expr()) + + # Synchronize start between two intervals (one optional, one not) + model.add( + interval_var.start_expr() == opt_interval_var.start_expr() + ).only_enforce_if(opt_presence_var) + + # Exact delay between two optional intervals. + exact_delay: int = 5 + model.add( + opt_interval_var.start_expr() + == opt_fixed_size_interval_var.end_expr() + exact_delay + ).only_enforce_if(opt_presence_var, opt_fixed_size_presence_var) + + +interval_relations_sample_sat() +``` + ## NoOverlap constraint A no_overlap constraint simply states that all intervals are disjoint. It is built with a list of interval variables. Fixed intervals are useful for excluding part of the timeline. -In the following examples. We want to schedule 3 tasks on 3 weeks excluding +In the following examples, you want to schedule 3 tasks on 3 weeks excluding weekends, making the final day as early as possible. ### Python code @@ -1116,16 +1204,16 @@ if __name__ == "__main__": ## Ranking tasks in a disjunctive resource -To rank intervals in a no_overlap constraint, we will count the number of +To rank intervals in a no_overlap constraint, you will count the number of performed intervals that precede each interval. This is slightly complicated if some interval variables are optional. To -implement it, we will create a matrix of `precedences` boolean variables. +implement it, you will create a matrix of `precedences` boolean variables. `precedences[i][j]` is set to true if and only if interval `i` is performed, interval `j` is performed, and if the start of `i` is before the start of `j`. Furthermore, `precedences[i][i]` is set to be equal to `presences[i]`. This way, -we can define the rank of an interval `i` as `sum over j(precedences[j][i]) - +you can define the rank of an interval `i` as `sum over j(precedences[j][i]) - 1`. If the interval is not performed, the rank computed as -1, if the interval is performed, its presence variable negates the -1, and the formula counts the number of other intervals that precede it. @@ -1927,10 +2015,10 @@ func main() { ## Ranking tasks in a disjunctive resource with a circuit constraint. -To rank intervals in a no_overlap constraint, we will use a circuit constraint +To rank intervals in a no_overlap constraint, you will use a circuit constraint to perform the transitive reduction from precedences to successors. -This is slightly complicated if some interval variables are optional, and we +This is slightly complicated if some interval variables are optional, and you need to take into account the case where no task is performed. ### Python code @@ -1951,7 +2039,7 @@ def rank_tasks_with_circuit( durations: Sequence[int], presences: Sequence[cp_model.IntVar], ranks: Sequence[cp_model.IntVar], -): +) -> None: """This method uses a circuit constraint to rank tasks. This method assumes that all starts are disjoint, meaning that all tasks have @@ -1961,7 +2049,7 @@ def rank_tasks_with_circuit( To implement this ranking, we will create a dense graph with num_tasks + 1 nodes. The extra node (with id 0) will be used to decide which task is first with - its only outgoing arc, and whhich task is last with its only incoming arc. + its only outgoing arc, and which task is last with its only incoming arc. Each task i will be associated with id i + 1, and an arc between i + 1 and j + 1 indicates that j is the immediate successor of i. @@ -2027,7 +2115,7 @@ def rank_tasks_with_circuit( model.add_circuit(arcs) -def ranking_sample_sat(): +def ranking_sample_sat() -> None: """Ranks tasks in a NoOverlap constraint.""" model = cp_model.CpModel() @@ -2110,7 +2198,7 @@ Sometimes, a task can be interrupted by a break (overnight, lunch break). In that context, although the processing time of the task is the same, the duration can vary. -To implement this feature, we will have the duration of the task be a function +To implement this feature, you will have the duration of the task be a function of the start of the task. This is implemented using channeling constraints. The following code displays: @@ -2197,8 +2285,8 @@ scheduling_with_calendar_sample_sat() ## Detecting if two intervals overlap. -We want a Boolean variable to be true if and only if two intervals overlap. To -enforce this, we will create 3 Boolean variables, link two of them to the +You want a Boolean variable to be true if and only if two intervals overlap. To +enforce this, you will create 3 Boolean variables, link two of them to the relative positions of the two intervals, and define the third one using the other two Boolean variables. @@ -2207,8 +2295,8 @@ uses one clause and two implications. Propagation will be faster using this version. The second version uses a `sum(..) == 1` equation. It is more compact, but assumes the length of the two intervals is > 0. -Note that we need to create the intervals to enforce `start + size == end`, but -we do not actually use them in this code sample. +Note that you need to create the intervals to enforce `start + size == end`, but +you do not actually use them in this code sample. The following code displays @@ -2316,8 +2404,209 @@ def overlapping_interval_sample_sat(): overlapping_interval_sample_sat() ``` -## Transitions in a disjunctive resource +## Transitions in a no_overlap constraint -## Precedences between intervals +In some scheduling problems, switching between certain type of tasks on a +machine implies some penalty, and/or some delay. Implementing these +functionalities implies knowing which are the direct successors of each task. + +The circuit constraint is used to perform the transitive reduction from +precedences to successors. Once this is done, it is straightforward to use the +successor literals to implement the penalties or the delays. + +### Python code + +```python +#!/usr/bin/env python3 +"""Implements transition times and costs in a no_overlap constraint.""" + +from typing import Dict, List, Sequence, Tuple, Union + +from ortools.sat.python import cp_model + + +def transitive_reduction_with_circuit_delays_and_penalties( + model: cp_model.CpModel, + starts: Sequence[cp_model.IntVar], + durations: Sequence[int], + presences: Sequence[Union[cp_model.IntVar, bool]], + penalties: Dict[Tuple[int, int], int], + delays: Dict[Tuple[int, int], int], +) -> Sequence[Tuple[cp_model.IntVar, int]]: + """This method uses a circuit constraint to rank tasks. + + This method assumes that all starts are disjoint, meaning that all tasks have + a strictly positive duration, and they appear in the same NoOverlap + constraint. + + The extra node (with id 0) will be used to decide which task is first with + its only outgoing arc, and which task is last with its only incoming arc. + Each task i will be associated with id i + 1, and an arc between i + 1 and j + + 1 indicates that j is the immediate successor of i. + + The circuit constraint ensures there is at most 1 hamiltonian cycle of + length > 1. If no such path exists, then no tasks are active. + We also need to enforce that any hamiltonian cycle of size > 1 must contain + the node 0. And thus, there is a self loop on node 0 iff the circuit is empty. + + Args: + model: The CpModel to add the constraints to. + starts: The array of starts variables of all tasks. + durations: the durations of all tasks. + presences: The array of presence variables of all tasks. + penalties: the array of tuple (`tail_index`, `head_index`, `penalty`) that + specifies that if task `tail_index` is the successor of the task + `head_index`, then `penalty` must be added to the cost. + delays: the array of tuple (`tail_index`, `head_index`, `delay`) that + specifies that if task `tail_index` is the successor of the task + `head_index`, then an extra `delay` must be added between the end of the + first task and the start of the second task. + + Returns: + The list of pairs (Boolean variables, penalty) to be added to the objective. + """ + + num_tasks = len(starts) + all_tasks = range(num_tasks) + + arcs: List[cp_model.ArcT] = [] + penalty_terms = [] + for i in all_tasks: + # if node i is first. + start_lit = model.new_bool_var(f"start_{i}") + arcs.append((0, i + 1, start_lit)) + + # As there are no other constraints on the problem, we can add this + # redundant constraint. + model.add(starts[i] == 0).only_enforce_if(start_lit) + + # if node i is last. + end_lit = model.new_bool_var(f"end_{i}") + arcs.append((i + 1, 0, end_lit)) + + for j in all_tasks: + if i == j: + arcs.append((i + 1, i + 1, ~presences[i])) + else: + literal = model.new_bool_var(f"arc_{i}_to_{j}") + arcs.append((i + 1, j + 1, literal)) + + # To perform the transitive reduction from precedences to successors, + # we need to tie the starts of the tasks with 'literal'. + # In a pure problem, the following inequality could be an equality. + # It is not true in general. + # + # Note that we could use this literal to penalize the transition, add an + # extra delay to the precedence. + min_delay = 0 + key = (i, j) + if key in delays: + min_delay = delays[key] + model.add( + starts[j] >= starts[i] + durations[i] + min_delay + ).only_enforce_if(literal) + + # Create the penalties. + if key in penalties: + penalty_terms.append((literal, penalties[key])) + + # Manage the empty circuit + empty = model.new_bool_var("empty") + arcs.append((0, 0, empty)) + + for i in all_tasks: + model.add_implication(empty, ~presences[i]) + + # Add the circuit constraint. + model.add_circuit(arcs) + + return penalty_terms + + +def transitions_in_no_overlap_sample_sat(): + """Implement transitions in a NoOverlap constraint.""" + + model = cp_model.CpModel() + horizon = 40 + num_tasks = 4 + + # Breaking the natural sequence induces a fixed penalty. + penalties = { + (1, 0): 10, + (2, 0): 10, + (3, 0): 10, + (2, 1): 10, + (3, 1): 10, + (3, 2): 10, + } + + # Switching from an odd to even or even to odd task indices induces a delay. + delays = { + (1, 0): 10, + (0, 1): 10, + (3, 0): 10, + (0, 3): 10, + (1, 2): 10, + (2, 1): 10, + (3, 2): 10, + (2, 3): 10, + } + + all_tasks = range(num_tasks) + + starts = [] + durations = [] + intervals = [] + presences = [] + + # Creates intervals, all present. But the cost is robust w.r.t. optional + # intervals. + for t in all_tasks: + start = model.new_int_var(0, horizon, f"start[{t}]") + duration = 5 + presence = True + interval = model.new_optional_fixed_size_interval_var( + start, duration, presence, f"opt_interval[{t}]" + ) + + starts.append(start) + durations.append(duration) + intervals.append(interval) + presences.append(presence) + + # Adds NoOverlap constraint. + model.add_no_overlap(intervals) + + # Adds ranking constraint. + penalty_terms = transitive_reduction_with_circuit_delays_and_penalties( + model, starts, durations, presences, penalties, delays + ) + + # Minimize the sum of penalties, + model.minimize(sum(var * penalty for var, penalty in penalty_terms)) + + # In practise, only one penalty can happen. Thus the two even tasks are + # together, same for the two odd tasks. + # Because of the penalties, the optimal sequence is 0 -> 2 -> 1 -> 3 + # which induces one penalty and one delay. + + # Solves the model model. + solver = cp_model.CpSolver() + status = solver.solve(model) + + if status == cp_model.OPTIMAL: + # Prints out the makespan and the start times and ranks of all tasks. + print(f"Optimal cost: {solver.objective_value}") + for t in all_tasks: + if solver.value(presences[t]): + print(f"Task {t} starts at {solver.value(starts[t])} ") + else: + print(f"Task {t} in not performed") + else: + print(f"Solver exited with nonoptimal status: {status}") + + +transitions_in_no_overlap_sample_sat() +``` ## Convex hull of a set of intervals diff --git a/ortools/sat/docs/solver.md b/ortools/sat/docs/solver.md index 1484515314..5881234d43 100644 --- a/ortools/sat/docs/solver.md +++ b/ortools/sat/docs/solver.md @@ -375,11 +375,13 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; +import java.util.function.Consumer; /** Solves an optimization problem and displays all intermediate solutions. */ -public class SolveAndPrintIntermediateSolutionsSampleSat { +public final class SolveAndPrintIntermediateSolutionsSampleSat { static class VarArraySolutionPrinterWithObjective extends CpSolverSolutionCallback { public VarArraySolutionPrinterWithObjective(IntVar[] variables) { variableArray = variables; @@ -403,6 +405,27 @@ public class SolveAndPrintIntermediateSolutionsSampleSat { private final IntVar[] variableArray; } + static class BestBoundCallback implements Consumer { + public BestBoundCallback() { + bestBound = 0.0; + numImprovements = 0; + } + + @Override + public void accept(Double bound) { + bestBound = bound; + numImprovements++; + } + + public double getBestBound() { + return bestBound; + } + + double bestBound; + int numImprovements; + } + + public static void main(String[] args) throws Exception { Loader.loadNativeLibraries(); // Create the model. @@ -425,10 +448,18 @@ public class SolveAndPrintIntermediateSolutionsSampleSat { CpSolver solver = new CpSolver(); VarArraySolutionPrinterWithObjective cb = new VarArraySolutionPrinterWithObjective(new IntVar[] {x, y, z}); - solver.solve(model, cb); + solver.getParameters().setNumWorkers(1); + solver.getParameters().setLinearizationLevel(2); + BestBoundCallback bestBoundCallback = new BestBoundCallback(); - System.out.println(cb.getSolutionCount() + " solutions found."); + solver.setBestBoundCallback(bestBoundCallback); + CpSolverStatus unusedStatus = solver.solve(model, cb); + + System.out.println("solution count: " + cb.getSolutionCount()); + System.out.println("best bound count: " + bestBoundCallback.numImprovements); } + + private SolveAndPrintIntermediateSolutionsSampleSat() {} } ``` @@ -707,6 +738,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; /** Code sample that solves a model and displays all solutions. */ @@ -754,7 +786,7 @@ public class SearchForAllSolutionsSampleSat { // Tell the solver to enumerate all solutions. solver.getParameters().setEnumerateAllSolutions(true); // And solve. - solver.solve(model, cb); + CpSolverStatus unusedStatus = solver.solve(model, cb); System.out.println(cb.getSolutionCount() + " solutions found."); } @@ -1037,6 +1069,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; /** Code sample that solves a model and displays a small number of solutions. */ @@ -1087,7 +1120,7 @@ public final class StopAfterNSolutionsSampleSat { // Tell the solver to enumerate all solutions. solver.getParameters().setEnumerateAllSolutions(true); // And solve. - solver.solve(model, cb); + CpSolverStatus unusedStatus = solver.solve(model, cb); System.out.println(cb.getSolutionCount() + " solutions found."); if (cb.getSolutionCount() != 5) { diff --git a/ortools/sat/drat_checker.h b/ortools/sat/drat_checker.h index 9069285ab4..903ed504fc 100644 --- a/ortools/sat/drat_checker.h +++ b/ortools/sat/drat_checker.h @@ -228,7 +228,7 @@ class DratChecker { ClauseIndex first_infered_clause_index_; // The problem clauses, followed by the infered clauses. - absl::StrongVector clauses_; + util_intops::StrongVector clauses_; // A content addressable set of the non-deleted clauses in clauses_. After // adding a clause to clauses_, this set can be used to find if the same @@ -255,7 +255,7 @@ class DratChecker { // For each variable, the index of the unit clause that caused its assignment, // or kNoClauseIndex if the variable is not assigned, or was assigned to // falsify the clause that is currently being checked. - absl::StrongVector assignment_source_; + util_intops::StrongVector assignment_source_; // The stack of literals that remain to be assigned to true during boolean // constraint propagation, with high priority (unit clauses which are already @@ -278,7 +278,8 @@ class DratChecker { // satisfied (in more details: if a clause c is contained in // 'watched_literals_[l]' for literal l, then either c is satisfied with // 'assignment_', or l is unassigned or assigned to true). - absl::StrongVector> watched_literals_; + util_intops::StrongVector> + watched_literals_; // The list of clauses with only one literal. This is needed for boolean // constraint propagation, in addition to watched literals, because watched diff --git a/ortools/sat/drat_proof_handler.cc b/ortools/sat/drat_proof_handler.cc index 0e2a006a8a..2c91d36c6d 100644 --- a/ortools/sat/drat_proof_handler.cc +++ b/ortools/sat/drat_proof_handler.cc @@ -44,8 +44,9 @@ DratProofHandler::DratProofHandler(bool in_binary_format, File* output, } void DratProofHandler::ApplyMapping( - const absl::StrongVector& mapping) { - absl::StrongVector new_mapping; + const util_intops::StrongVector& + mapping) { + util_intops::StrongVector new_mapping; for (BooleanVariable v(0); v < mapping.size(); ++v) { const BooleanVariable image = mapping[v]; if (image != kNoBooleanVariable) { diff --git a/ortools/sat/drat_proof_handler.h b/ortools/sat/drat_proof_handler.h index 2f583910c0..8b8d9f04f1 100644 --- a/ortools/sat/drat_proof_handler.h +++ b/ortools/sat/drat_proof_handler.h @@ -58,8 +58,8 @@ class DratProofHandler { // // TODO(user): This is exactly the same mecanism as in the SatPostsolver // class. Factor out the code. - void ApplyMapping( - const absl::StrongVector& mapping); + void ApplyMapping(const util_intops::StrongVector& mapping); // This need to be called when new variables are created. void SetNumVariables(int num_variables); @@ -105,7 +105,7 @@ class DratProofHandler { // This mapping will be applied to all clause passed to AddClause() or // DeleteClause() so that they are in term of the original problem. - absl::StrongVector reverse_mapping_; + util_intops::StrongVector reverse_mapping_; std::unique_ptr drat_checker_; std::unique_ptr drat_writer_; diff --git a/ortools/sat/feasibility_jump.cc b/ortools/sat/feasibility_jump.cc index 1faca20211..282f674afa 100644 --- a/ortools/sat/feasibility_jump.cc +++ b/ortools/sat/feasibility_jump.cc @@ -28,6 +28,7 @@ #include #include "absl/functional/any_invocable.h" +#include "absl/functional/bind_front.h" #include "absl/functional/function_ref.h" #include "absl/log/check.h" #include "absl/random/bit_gen_ref.h" @@ -42,7 +43,6 @@ #include "ortools/sat/cp_model_utils.h" #include "ortools/sat/integer.h" #include "ortools/sat/linear_model.h" -#include "ortools/sat/restart.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/subsolver.h" #include "ortools/sat/synchronization.h" @@ -57,9 +57,10 @@ namespace { constexpr double kCompoundDiscount = 1. / 1024; } // namespace -JumpTable::JumpTable( - absl::AnyInvocable(int)> compute_jump) - : compute_jump_(std::move(compute_jump)) {} +void JumpTable::SetComputeFunction( + absl::AnyInvocable(int) const> compute_jump) { + compute_jump_ = std::move(compute_jump); +} void JumpTable::RecomputeAll(int num_variables) { deltas_.resize(num_variables); @@ -75,11 +76,7 @@ void JumpTable::SetJump(int var, int64_t delta, double score) { void JumpTable::Recompute(int var) { needs_recomputation_[var] = true; } -bool JumpTable::PossiblyGood(int var) const { - return needs_recomputation_[var] || scores_[var] < 0; -} - -bool JumpTable::JumpIsUpToDate(int var) { +bool JumpTable::JumpIsUpToDate(int var) const { const auto& [delta, score] = compute_jump_(var); if (delta != deltas_[var]) { LOG(ERROR) << "Incorrect delta for var " << var << ": " << deltas_[var] @@ -102,24 +99,37 @@ std::pair JumpTable::GetJump(int var) { return std::make_pair(deltas_[var], scores_[var]); } +SharedLsStates::~SharedLsStates() { + // Do a final collection. + for (int i = 0; i < states_.size(); ++i) { + CollectStatistics(*states_[i].get()); + } + + // Display aggregated states + for (const auto& [options, counters] : options_to_stats_) { + stat_tables_->AddLsStat( + absl::StrCat(name_, "_", options.name()), counters.num_batches, + options_to_num_restarts_[options] + counters.num_perturbations, + counters.num_linear_moves, counters.num_general_moves, + counters.num_compound_moves, counters.num_backtracks, + counters.num_weight_updates, counters.num_scores_computed); + } +} + FeasibilityJumpSolver::~FeasibilityJumpSolver() { stat_tables_->AddTimingStat(*this); - stat_tables_->AddLsStat(name(), num_batches_, num_restarts_, - num_linear_moves_, num_general_moves_, - num_compound_moves_, num_weight_updates_); - - if (!VLOG_IS_ON(1)) return; - std::vector> stats; - stats.push_back({"fs_jump/num_general_moves_computed", num_general_evals_}); - stats.push_back({"fs_jump/num_general_moves_done", num_general_moves_}); - stats.push_back({"fs_jump/num_linear_moves_computed", num_linear_evals_}); - stats.push_back({"fs_jump/num_linear_moves_done", num_linear_moves_}); - stats.push_back({"fs_jump/num_perturbations_applied", num_perturbations_}); - stats.push_back({"fs_jump/num_solutions_imported", num_solutions_imported_}); - stats.push_back({"fs_jump/num_weight_updates", num_weight_updates_}); - shared_stats_->AddStats(stats); } +void FeasibilityJumpSolver::ImportState() { + state_ = states_->GetNextState(); + if (state_->move == nullptr) { + const int num_variables = var_domains_.size(); + state_->move = std::make_unique(num_variables); + } +} + +void FeasibilityJumpSolver::ReleaseState() { states_->Release(state_); } + void FeasibilityJumpSolver::Initialize() { is_initialized_ = true; @@ -137,16 +147,13 @@ void FeasibilityJumpSolver::Initialize() { const int num_variables = linear_model_->model_proto().variables().size(); var_domains_.resize(num_variables); - var_has_two_values_.resize(num_variables); for (int v = 0; v < num_variables; ++v) { - var_domains_[v] = - ReadDomainFromProto(linear_model_->model_proto().variables(v)); - var_has_two_values_[v] = var_domains_[v].HasTwoValues(); + var_domains_.Set( + v, ReadDomainFromProto(linear_model_->model_proto().variables(v))); } - vars_to_scan_.reserve(num_variables); - in_vars_to_scan_.assign(num_variables, false); - move_ = - std::make_unique(evaluator_.get(), num_variables); + var_domains_.InitializeObjective(linear_model_->model_proto()); + + vars_to_scan_.ClearAndReserve(num_variables); var_occurs_in_non_linear_constraint_.resize(num_variables); for (int c = 0; c < evaluator_->NumNonLinearConstraints(); ++c) { for (int v : evaluator_->GeneralConstraintToVars(c)) { @@ -217,13 +224,13 @@ int64_t RandomValueNearValue(const Domain& domain, int64_t value, } // namespace -void FeasibilityJumpSolver::ResetCurrentSolution() { +void FeasibilityJumpSolver::ResetCurrentSolution( + bool use_hint, bool use_objective, double perturbation_probability) { const int num_variables = linear_model_->model_proto().variables().size(); - const double default_value_probability = - 1.0 - params_.feasibility_jump_var_randomization_probability(); + const double default_value_probability = 1.0 - perturbation_probability; const double range_ratio = params_.feasibility_jump_var_perburbation_range_ratio(); - std::vector& solution = *evaluator_->mutable_current_solution(); + std::vector& solution = state_->solution; // Resize the solution if needed. solution.resize(num_variables); @@ -235,8 +242,7 @@ void FeasibilityJumpSolver::ResetCurrentSolution() { continue; } - if (num_batches_ == 0 || - absl::Bernoulli(random_, default_value_probability)) { + if (absl::Bernoulli(random_, default_value_probability)) { solution[var] = var_domains_[var].SmallestValue(); } else { solution[var] = @@ -245,8 +251,7 @@ void FeasibilityJumpSolver::ResetCurrentSolution() { } // Use objective half of the time (if the model has one). - if (linear_model_->model_proto().has_objective() && - absl::Bernoulli(random_, 0.5)) { + if (use_objective && linear_model_->model_proto().has_objective()) { const int num_terms = linear_model_->model_proto().objective().vars().size(); for (int i = 0; i < num_terms; ++i) { @@ -254,16 +259,14 @@ void FeasibilityJumpSolver::ResetCurrentSolution() { if (var_domains_[var].IsFixed()) continue; if (linear_model_->model_proto().objective().coeffs(i) > 0) { - if (num_batches_ == 0 || - absl::Bernoulli(random_, default_value_probability)) { + if (absl::Bernoulli(random_, default_value_probability)) { solution[var] = var_domains_[var].Min(); } else { solution[var] = RandomValueNearMin(var_domains_[var], range_ratio, random_); } } else { - if (num_batches_ == 0 || - absl::Bernoulli(random_, default_value_probability)) { + if (absl::Bernoulli(random_, default_value_probability)) { solution[var] = var_domains_[var].Max(); } else { solution[var] = @@ -273,8 +276,7 @@ void FeasibilityJumpSolver::ResetCurrentSolution() { } } - // Overwrite with the (partial) hint on the first batch. - if (num_batches_ == 0 && linear_model_->model_proto().has_solution_hint()) { + if (use_hint && linear_model_->model_proto().has_solution_hint()) { const auto& hint = linear_model_->model_proto().solution_hint(); for (int i = 0; i < hint.vars().size(); ++i) { solution[hint.vars(i)] = hint.values(i); @@ -282,13 +284,13 @@ void FeasibilityJumpSolver::ResetCurrentSolution() { } } -void FeasibilityJumpSolver::PerturbateCurrentSolution() { +void FeasibilityJumpSolver::PerturbateCurrentSolution( + double perturbation_probability) { + if (perturbation_probability == 0.0) return; const int num_variables = linear_model_->model_proto().variables().size(); - const double perturbation_probability = - params_.feasibility_jump_var_randomization_probability(); const double perturbation_ratio = params_.feasibility_jump_var_perburbation_range_ratio(); - std::vector& solution = *evaluator_->mutable_current_solution(); + std::vector& solution = state_->solution; for (int var = 0; var < num_variables; ++var) { if (var_domains_[var].IsFixed()) continue; if (absl::Bernoulli(random_, perturbation_probability)) { @@ -299,31 +301,23 @@ void FeasibilityJumpSolver::PerturbateCurrentSolution() { } std::string FeasibilityJumpSolver::OneLineStats() const { - // Restarts, perturbations, and solutions imported. - std::string restart_str; - if (num_restarts_ > 1) { - absl::StrAppend(&restart_str, " #restarts:", num_restarts_ - 1); - } - if (num_solutions_imported_ > 0) { - absl::StrAppend(&restart_str, - " #solutions_imported:", num_solutions_imported_); - } - if (num_perturbations_ > 0) { - absl::StrAppend(&restart_str, " #perturbations:", num_perturbations_); - } - // Moves and evaluations in the general iterations. const std::string general_str = - num_general_evals_ == 0 && num_general_moves_ == 0 - ? "" - : absl::StrCat(" #gen_moves:", FormatCounter(num_general_moves_), - " #gen_evals:", FormatCounter(num_general_evals_)); - const std::string compound_str = - num_compound_moves_ == 0 && move_->NumBacktracks() == 0 + state_->counters.num_general_evals == 0 && + state_->counters.num_general_moves == 0 ? "" : absl::StrCat( - " #comp_moves:", FormatCounter(num_compound_moves_), - " #backtracks:", FormatCounter(move_->NumBacktracks())); + " gen{mvs:", FormatCounter(state_->counters.num_general_moves), + " evals:", FormatCounter(state_->counters.num_general_evals), + "}"); + const std::string compound_str = + state_->counters.num_compound_moves == 0 && + state_->counters.num_backtracks == 0 + ? "" + : absl::StrCat(" comp{mvs:", + FormatCounter(state_->counters.num_compound_moves), + " btracks:", + FormatCounter(state_->counters.num_backtracks), "}"); // Improving jumps and infeasible constraints. const int num_infeasible_cts = evaluator_->NumInfeasibleConstraints(); @@ -334,11 +328,13 @@ std::string FeasibilityJumpSolver::OneLineStats() const { " #inf_cts:", FormatCounter(evaluator_->NumInfeasibleConstraints())); - return absl::StrCat("batch:", num_batches_, restart_str, - " #lin_moves:", FormatCounter(num_linear_moves_), - " #lin_evals:", FormatCounter(num_linear_evals_), - general_str, compound_str, non_solution_str, - " #weight_updates:", FormatCounter(num_weight_updates_)); + return absl::StrCat( + "batch:", state_->counters.num_batches, + " lin{mvs:", FormatCounter(state_->counters.num_linear_moves), + " evals:", FormatCounter(state_->counters.num_linear_evals), "}", + general_str, compound_str, non_solution_str, + " #w_updates:", FormatCounter(state_->counters.num_weight_updates), + " #perturb:", FormatCounter(state_->counters.num_perturbations)); } std::function FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) { @@ -349,170 +345,172 @@ std::function FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) { // to scan the whole model, so we want to do this part in parallel. if (!is_initialized_) Initialize(); - bool should_recompute_violations = false; - bool reset_weights = false; - bool recompute_compound_weights = false; + // Load the next state to work on. + ImportState(); - // In incomplete mode, query the starting solution for the shared response - // manager. - if (type() == SubSolver::INCOMPLETE) { // violation_ls. - // Choose a base solution for this neighborhood. - const SharedSolutionRepository& repo = - shared_response_->SolutionsRepository(); - CHECK_GT(repo.NumSolutions(), 0); - const SharedSolutionRepository::Solution solution = - repo.GetRandomBiasedSolution(random_); - if (solution.rank < last_solution_rank_) { - evaluator_->OverwriteCurrentSolution(solution.variable_values); - should_recompute_violations = true; - reset_weights = true; - - // Update last solution rank. - last_solution_rank_ = solution.rank; - VLOG(2) << name() << " import a solution with value " << solution.rank; - ++num_solutions_imported_; - num_batches_before_perturbation_ = - params_.violation_ls_perturbation_period(); - } else if (num_batches_before_perturbation_ <= 0) { - // TODO(user): Tune the improvement constant, maybe use luby. - num_batches_before_perturbation_ = - params_.violation_ls_perturbation_period(); - ++num_perturbations_; - PerturbateCurrentSolution(); - should_recompute_violations = true; - reset_weights = true; + // If we found a new best solution, we will restart all violation ls (we + // still finish each batch though). We will also reset the luby sequence. + bool new_best_solution_was_found = false; + if (type() == SubSolver::INCOMPLETE) { + const int64_t best = + shared_response_->SolutionsRepository().GetBestRank(); + if (best < state_->last_solution_rank) { + states_->ResetLubyCounter(); + new_best_solution_was_found = true; + state_->last_solution_rank = best; } - } else { // feasibility_jump. - // Restart? Note that we always "restart" the first time. - const double dtime = evaluator_->DeterministicTime(); - if (dtime >= dtime_restart_threshold_ && - num_weight_updates_ >= update_restart_threshold_) { - if (num_restarts_ == 0 || params_.feasibility_jump_enable_restarts()) { - ++num_restarts_; - ResetCurrentSolution(); - should_recompute_violations = true; - reset_weights = true; - } else if (params_.feasibility_jump_var_randomization_probability() > - 0.0) { - ++num_perturbations_; - PerturbateCurrentSolution(); - should_recompute_violations = true; - reset_weights = true; - } + } - // We use luby restart with a base of 1 deterministic unit. - // We also block the restart if there was not enough weight update. - // Note that we only restart between batches too. - // - // TODO(user): Ideally batch should use deterministic time too so we - // can just use number of batch for the luby restart. - // TODO(user): Maybe have one worker with very low restart - // rate. - const int weight = - std::max(1, params_.feasibility_jump_restart_factor()); - dtime_restart_threshold_ = - dtime + weight * SUniv(num_restarts_ + num_perturbations_); - update_restart_threshold_ = num_weight_updates_ + 10 * weight; + bool reset_weights = false; + if (new_best_solution_was_found || state_->num_batches_before_change <= 0) { + reset_weights = true; + if (state_->options.use_restart) { + states_->CollectStatistics(*state_); + state_->options.Randomize(params_, &random_); + state_->counters = LsCounters(); // Reset. + } else { + state_->options.Randomize(params_, &random_); + } + if (type() == SubSolver::INCOMPLETE) { + // This is not used once we have a solution, and setting it to false + // allow to fix the logs. + state_->options.use_objective = false; + } + + const bool first_time = (state_->num_restarts == 0); + if (state_->options.use_restart || first_time || + new_best_solution_was_found) { + if (type() == SubSolver::INCOMPLETE) { + // Choose a base solution for this neighborhood. + const SharedSolutionRepository::Solution solution = + shared_response_->SolutionsRepository().GetRandomBiasedSolution( + random_); + state_->solution = solution.variable_values; + ++state_->num_solutions_imported; + } else { + ResetCurrentSolution(/*use_hint=*/first_time, + state_->options.use_objective, + state_->options.perturbation_probability); + } + } else { + PerturbateCurrentSolution( + params_.feasibility_jump_var_randomization_probability()); + } + + if (state_->options.use_restart) { + ++state_->num_restarts; + states_->ConfigureNextLubyRestart(state_); + } else { + // TODO(user): Tune the improvement constant, maybe use luby. + ++state_->counters.num_perturbations; + state_->num_batches_before_change = + params_.violation_ls_perturbation_period(); } } // Between chunk, we synchronize bounds. + bool recompute_compound_weights = false; if (linear_model_->model_proto().has_objective()) { const IntegerValue lb = shared_response_->GetInnerObjectiveLowerBound(); const IntegerValue ub = shared_response_->GetInnerObjectiveUpperBound(); + if (lb != state_->saved_inner_objective_lb || + ub != state_->saved_inner_objective_ub) { + recompute_compound_weights = true; + } + state_->saved_inner_objective_lb = lb; + state_->saved_inner_objective_ub = ub; + if (ub < lb) return; // Search is finished. if (evaluator_->ReduceObjectiveBounds(lb.value(), ub.value())) { - should_recompute_violations = true; - } - } - - // Update the variable domains with the last information. - // It is okay to be in O(num_variables) here since we only do that between - // chunks. - if (shared_bounds_ != nullptr) { - shared_bounds_->UpdateDomains(&var_domains_); - for (int var = 0; var < var_domains_.size(); ++var) { - // We abort if the problem is trivially UNSAT. This might happen while - // we are cleaning up all workers at the end of a search. - if (var_domains_[var].IsEmpty()) return; - var_has_two_values_[var] = var_domains_[var].HasTwoValues(); - } - } - - // Checks the current solution is compatible with updated domains. - { - // Make sure the solution is within the potentially updated domain. - std::vector& current_solution = - *evaluator_->mutable_current_solution(); - for (int var = 0; var < current_solution.size(); ++var) { - const int64_t old_value = current_solution[var]; - const int64_t new_value = var_domains_[var].ClosestValue(old_value); - if (new_value != old_value) { - current_solution[var] = new_value; - should_recompute_violations = true; - } - } - // Check if compound move search might backtrack out of the new domains. - if (!move_->StackValuesInDomains(var_domains_)) { recompute_compound_weights = true; } } - if (should_recompute_violations) { - evaluator_->ComputeAllViolations(); - recompute_compound_weights = true; + // Update the variable domains with the last information. + if (!var_domains_.UpdateFromSharedBounds()) return; + + // Checks the current solution is compatible with updated domains. + { + // Make sure the solution is within the potentially updated domain. + // This also initialize var_domains_.CanIncrease()/CanDecrease(). + const int num_vars = state_->solution.size(); + for (int var = 0; var < num_vars; ++var) { + const int64_t old_value = state_->solution[var]; + const int64_t new_value = var_domains_[var].ClosestValue(old_value); + if (new_value != old_value) { + state_->solution[var] = new_value; + recompute_compound_weights = true; + } + var_domains_.OnValueChange(var, new_value); + } + // Check if compound move search might backtrack out of the new domains. + if (!state_->move->StackValuesInDomains(var_domains_.AsSpan())) { + recompute_compound_weights = true; + } } + + // Search for feasible solution. + // We always recompute that since we might have loaded from a different + // state. + evaluator_->ComputeAllViolations(state_->solution); + if (reset_weights) { - // Each time we reset the weight, we randomly choose if we do decay or - // not. - bump_value_ = 1.0; - weights_.assign(evaluator_->NumEvaluatorConstraints(), 1.0); - use_decay_ = absl::Bernoulli(random_, 0.5); - use_compound_moves_ = absl::Bernoulli( - random_, params_.violation_ls_compound_move_probability()); + state_->bump_value = 1.0; + state_->weights.assign(evaluator_->NumEvaluatorConstraints(), 1.0); recompute_compound_weights = true; } if (recompute_compound_weights) { - move_->Clear(); - if (use_compound_moves_) { - compound_weights_.assign(weights_.begin(), weights_.end()); - for (int c = 0; c < weights_.size(); ++c) { + state_->move->Clear(); + if (state_->options.use_compound_moves) { + state_->compound_weights.assign(state_->weights.begin(), + state_->weights.end()); + for (int c = 0; c < state_->weights.size(); ++c) { if (evaluator_->IsViolated(c)) continue; - compound_weights_[c] *= kCompoundDiscount; + state_->compound_weights[c] *= kCompoundDiscount; } - compound_weight_changed_.clear(); - in_compound_weight_changed_.assign(weights_.size(), false); - compound_move_max_discrepancy_ = 0; + state_->compound_weight_changed.clear(); + state_->in_compound_weight_changed.assign(state_->weights.size(), + false); + state_->compound_move_max_discrepancy = 0; } } - // Search for feasible solution. - ++num_batches_; + + if (!state_->options.use_compound_moves) { + DCHECK_EQ(state_->move->Size(), 0); + } + + ++state_->counters.num_batches; if (DoSomeLinearIterations() && DoSomeGeneralIterations()) { // Checks for infeasibility induced by the non supported constraints. - if (SolutionIsFeasible(linear_model_->model_proto(), - evaluator_->current_solution())) { + if (SolutionIsFeasible(linear_model_->model_proto(), state_->solution)) { shared_response_->NewSolution( - evaluator_->current_solution(), - absl::StrCat(name(), "(", OneLineStats(), ")")); - num_batches_before_perturbation_ = - params_.violation_ls_perturbation_period(); + state_->solution, absl::StrCat(name(), "_", state_->options.name(), + "(", OneLineStats(), ")")); } else { shared_response_->LogMessage(name(), "infeasible solution. Aborting."); model_is_supported_ = false; } - } else { - --num_batches_before_perturbation_; } // Update dtime. // Since we execute only one task at the time, this is safe. { - const double dtime = evaluator_->DeterministicTime(); - const double delta = dtime - deterministic_time(); - AddTaskDeterministicDuration(delta); - shared_time_limit_->AdvanceDeterministicTime(delta); + // TODO(user): Find better names. DeterministicTime() is maintained by + // this class while deterministic_time() is the one saved in the SubSolver + // base class). + const double current_dtime = DeterministicTime(); + const double delta = current_dtime - deterministic_time(); + + // Because deterministic_time() is computed with a sum of difference, it + // might be slighlty different than DeterministicTime() and we don't want + // to go backward, even by 1e-18. + if (delta >= 0) { + AddTaskDeterministicDuration(delta); + shared_time_limit_->AdvanceDeterministicTime(delta); + } } + ReleaseState(); task_generated_ = false; // Atomic. }; } @@ -520,31 +518,26 @@ std::function FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) { double FeasibilityJumpSolver::ComputeScore(absl::Span weights, int var, int64_t delta, bool linear_only) { - ++num_scores_computed_; + ++state_->counters.num_scores_computed; + double score = evaluator_->WeightedViolationDelta( + linear_only, weights, var, delta, absl::MakeSpan(state_->solution)); constexpr double kEpsilon = 1.0 / std::numeric_limits::max(); - double score = - evaluator_->LinearEvaluator().WeightedViolationDelta(weights, var, delta); - if (!linear_only) { - score += evaluator_->WeightedNonLinearViolationDelta(weights, var, delta); - } - score += kEpsilon * evaluator_->ObjectiveDelta(var, delta); + score += kEpsilon * delta * evaluator_->ObjectiveCoefficient(var); return score; } std::pair FeasibilityJumpSolver::ComputeLinearJump(int var) { - const std::vector& solution = evaluator_->current_solution(); - if (var_domains_[var].IsFixed()) { - return std::make_pair(0l, 0.0); - } + DCHECK(!var_domains_[var].IsFixed()); + const int64_t current_value = state_->solution[var]; - ++num_linear_evals_; + ++state_->counters.num_linear_evals; const LinearIncrementalEvaluator& linear_evaluator = evaluator_->LinearEvaluator(); - if (var_has_two_values_[var]) { + if (var_domains_.HasTwoValues(var)) { const int64_t min_value = var_domains_[var].Min(); const int64_t max_value = var_domains_[var].Max(); - const int64_t delta = solution[var] == min_value ? max_value - min_value + const int64_t delta = current_value == min_value ? max_value - min_value : min_value - max_value; return std::make_pair( delta, ComputeScore(ScanWeights(), var, delta, /*linear_only=*/true)); @@ -555,17 +548,17 @@ std::pair FeasibilityJumpSolver::ComputeLinearJump(int var) { // queries! // // Tricky/Annoying: if the value is not in the domain, we returns it. - const int64_t p1 = var_domains_[var].ValueAtOrBefore(solution[var] - 1); - const int64_t p2 = var_domains_[var].ValueAtOrAfter(solution[var] + 1); + const int64_t p1 = var_domains_[var].ValueAtOrBefore(current_value - 1); + const int64_t p2 = var_domains_[var].ValueAtOrAfter(current_value + 1); std::pair best_jump; const double v1 = var_domains_[var].Contains(p1) - ? ComputeScore(ScanWeights(), var, p1 - solution[var], + ? ComputeScore(ScanWeights(), var, p1 - current_value, /*linear_only=*/true) : std::numeric_limits::infinity(); if (v1 < 0.0) { // Point p1 is improving. Look for best before it. - // Note that we can exclude all point after solution[var] since it is + // Note that we can exclude all point after current_value since it is // worse and we assume convexity. const Domain dom = var_domains_[var].IntersectionWith( Domain(std::numeric_limits::min(), p1 - 1)); @@ -573,17 +566,17 @@ std::pair FeasibilityJumpSolver::ComputeLinearJump(int var) { best_jump = {p1, v1}; } else { tmp_breakpoints_ = - linear_evaluator.SlopeBreakpoints(var, solution[var], dom); + linear_evaluator.SlopeBreakpoints(var, current_value, dom); best_jump = ConvexMinimum( /*is_to_the_right=*/true, {p1, v1}, tmp_breakpoints_, - [this, var, &solution](int64_t jump_value) { - return ComputeScore(ScanWeights(), var, jump_value - solution[var], + [this, var, current_value](int64_t jump_value) { + return ComputeScore(ScanWeights(), var, jump_value - current_value, /*linear_only=*/true); }); } } else { const double v2 = var_domains_[var].Contains(p2) - ? ComputeScore(ScanWeights(), var, p2 - solution[var], + ? ComputeScore(ScanWeights(), var, p2 - current_value, /*linear_only=*/true) : std::numeric_limits::infinity(); if (v2 < 0.0) { @@ -595,12 +588,12 @@ std::pair FeasibilityJumpSolver::ComputeLinearJump(int var) { best_jump = {p2, v2}; } else { tmp_breakpoints_ = - linear_evaluator.SlopeBreakpoints(var, solution[var], dom); + linear_evaluator.SlopeBreakpoints(var, current_value, dom); best_jump = ConvexMinimum( /*is_to_the_right=*/false, {p2, v2}, tmp_breakpoints_, - [this, var, &solution](int64_t jump_value) { + [this, var, current_value](int64_t jump_value) { return ComputeScore(ScanWeights(), var, - jump_value - solution[var], + jump_value - current_value, /*linear_only=*/true); }); } @@ -616,8 +609,8 @@ std::pair FeasibilityJumpSolver::ComputeLinearJump(int var) { } } } - DCHECK_NE(best_jump.first, solution[var]); - return std::make_pair(best_jump.first - solution[var], best_jump.second); + DCHECK_NE(best_jump.first, current_value); + return std::make_pair(best_jump.first - current_value, best_jump.second); } std::pair FeasibilityJumpSolver::ComputeGeneralJump(int var) { @@ -627,23 +620,21 @@ std::pair FeasibilityJumpSolver::ComputeGeneralJump(int var) { Domain domain = var_domains_[var]; if (domain.IsFixed()) return std::make_pair(0, 0.0); - ++num_general_evals_; - const int64_t current_value = evaluator_->current_solution()[var]; + ++state_->counters.num_general_evals; + const int64_t current_value = state_->solution[var]; domain = domain.IntersectionWith( Domain(current_value, current_value).Complement()); - std::pair result = RangeConvexMinimum( - domain[0].start - current_value, domain[0].end - current_value + 1, - [&](int64_t delta) -> double { - return ComputeScore(ScanWeights(), var, delta, /*linear_only=*/false); - }); - for (int i = 1; i < domain.NumIntervals(); ++i) { + std::pair result; + for (int i = 0; i < domain.NumIntervals(); ++i) { const int64_t min_delta = domain[i].start - current_value; const int64_t max_delta = domain[i].end - current_value; const auto& [delta, score] = RangeConvexMinimum( min_delta, max_delta + 1, [&](int64_t delta) -> double { return ComputeScore(ScanWeights(), var, delta, /*linear_only=*/false); }); - if (score < result.second) result = std::make_pair(delta, score); + if (i == 0 || score < result.second) { + result = std::make_pair(delta, score); + } } DCHECK(domain.Contains(current_value + result.first)) << current_value << "+" << result.first << " not in domain " @@ -651,8 +642,8 @@ std::pair FeasibilityJumpSolver::ComputeGeneralJump(int var) { return result; } -void FeasibilityJumpSolver::UpdateViolatedConstraintWeights(JumpTable& jumps) { - ++num_weight_updates_; +void FeasibilityJumpSolver::UpdateViolatedConstraintWeights() { + ++state_->counters.num_weight_updates; // Because we update the weight incrementally, it is better to not have a // super high magnitude, otherwise doing +max_weight and then -max_weight @@ -661,29 +652,38 @@ void FeasibilityJumpSolver::UpdateViolatedConstraintWeights(JumpTable& jumps) { const double kMaxWeight = 1e10; const double kBumpFactor = 1.0 / params_.feasibility_jump_decay(); const int num_variables = var_domains_.size(); - if (use_decay_) { - bump_value_ *= kBumpFactor; + if (state_->options.use_decay) { + state_->bump_value *= kBumpFactor; } // Note that ViolatedConstraints() might contain only linear constraint // depending on how it was initialized and updated. bool rescale = false; + num_ops_ += evaluator_->ViolatedConstraints().size(); for (const int c : evaluator_->ViolatedConstraints()) { DCHECK(evaluator_->IsViolated(c)); - if (use_compound_moves_) DCHECK_EQ(compound_weights_[c], weights_[c]); - weights_[c] += bump_value_; - if (use_compound_moves_) compound_weights_[c] = weights_[c]; - if (weights_[c] > kMaxWeight) rescale = true; + if (state_->options.use_compound_moves) { + DCHECK_EQ(state_->compound_weights[c], state_->weights[c]); + } + state_->weights[c] += state_->bump_value; + if (state_->options.use_compound_moves) { + state_->compound_weights[c] = state_->weights[c]; + } + if (state_->weights[c] > kMaxWeight) { + rescale = true; + } } if (rescale) { const double factor = 1.0 / kMaxWeight; - bump_value_ *= factor; - for (int c = 0; c < weights_.size(); ++c) { - weights_[c] *= factor; - if (use_compound_moves_) compound_weights_[c] *= factor; + state_->bump_value *= factor; + for (int c = 0; c < state_->weights.size(); ++c) { + state_->weights[c] *= factor; + if (state_->options.use_compound_moves) { + state_->compound_weights[c] *= factor; + } } - jumps.RecomputeAll(num_variables); + jumps_.RecomputeAll(num_variables); return; } @@ -697,33 +697,35 @@ void FeasibilityJumpSolver::UpdateViolatedConstraintWeights(JumpTable& jumps) { evaluator_->MutableLinearEvaluator(); linear_evaluator->ClearAffectedVariables(); for_weight_update_.resize(num_variables); + num_ops_ += evaluator_->ViolatedConstraints().size(); for (const int c : evaluator_->ViolatedConstraints()) { if (c < evaluator_->NumLinearConstraints()) { linear_evaluator->UpdateScoreOnWeightUpdate( - c, jumps.Deltas(), absl::MakeSpan(for_weight_update_)); + c, jumps_.Deltas(), absl::MakeSpan(for_weight_update_)); } else { for (const int v : evaluator_->ConstraintToVars(c)) { - jumps.Recompute(v); - AddVarToScan(jumps, v); + jumps_.Recompute(v); + AddVarToScan(v); } } } // Recompute the affected jumps. // Note that the constraint violations are unaffected. + absl::Span scores = jumps_.MutableScores(); for (const int var : linear_evaluator->VariablesAffectedByLastUpdate()) { // Apply the delta. // // TODO(user): We could compute the minimal bump that would lead to a // good move. That might change depending on the jump value though, so // we can only do that easily for Booleans. - if (!var_has_two_values_[var]) { - jumps.Recompute(var); + if (!var_domains_.HasTwoValues(var)) { + jumps_.Recompute(var); } else { // We may know the correct score for binary vars. - jumps.MutableScores()[var] += bump_value_ * for_weight_update_[var]; + scores[var] += state_->bump_value * for_weight_update_[var]; } - AddVarToScan(jumps, var); + AddVarToScan(var); } } @@ -734,45 +736,46 @@ bool FeasibilityJumpSolver::DoSomeLinearIterations() { // TODO(user): It should be possible to support compound moves with // the specialized linear code, but lets keep it simpler for now. - if (use_compound_moves_) return true; + if (state_->options.use_compound_moves) return true; evaluator_->RecomputeViolatedList(/*linear_only=*/true); - RecomputeVarsToScan(linear_jumps_); + jumps_.SetComputeFunction( + absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this)); + RecomputeVarsToScan(); - // Do a batch of a given number of loop here. + // Do a batch of a given dtime. // Outer loop: when no more greedy moves, update the weight. - const int kBatchSize = 10000; - const std::vector& solution = evaluator_->current_solution(); - for (int loop = 0; loop < kBatchSize; ++loop) { + const double dtime_threshold = + DeterministicTime() + params_.feasibility_jump_batch_dtime(); + while (DeterministicTime() < dtime_threshold) { // Inner loop: greedy descent. - for (; loop < kBatchSize; ++loop) { + while (DeterministicTime() < dtime_threshold) { // Take the best jump score amongst some random candidates. // It is okay if we pick twice the same, we don't really care. int best_var; int64_t best_value; double best_score; - if (!ScanRelevantVariables(/*num_to_scan=*/5, linear_jumps_, &best_var, - &best_value, &best_score)) { + if (!ScanRelevantVariables(/*num_to_scan=*/5, &best_var, &best_value, + &best_score)) { break; } - const int64_t current_value = solution[best_var]; // Perform the move. - ++num_linear_moves_; - evaluator_->UpdateLinearScores(best_var, best_value, weights_, - linear_jumps_.Deltas(), - linear_jumps_.MutableScores()); - evaluator_->UpdateVariableValue(best_var, best_value); + ++state_->counters.num_linear_moves; + const int64_t prev_value = state_->solution[best_var]; + state_->solution[best_var] = best_value; + evaluator_->UpdateLinearScores(best_var, prev_value, best_value, + state_->weights, jumps_.Deltas(), + jumps_.MutableScores()); + evaluator_->UpdateViolatedList(); + var_domains_.OnValueChange(best_var, best_value); - if (var_has_two_values_[best_var]) { + MarkJumpsThatNeedToBeRecomputed(best_var); + if (var_domains_.HasTwoValues(best_var)) { // We already know the score of undoing the move we just did, and that // this is optimal. - linear_jumps_.SetJump(best_var, current_value - best_value, - -best_score); - } else { - linear_jumps_.Recompute(best_var); + jumps_.SetJump(best_var, prev_value - best_value, -best_score); } - MarkJumpsThatNeedToBeRecomputed(best_var, linear_jumps_); } if (time_limit_crossed_) return false; @@ -780,7 +783,7 @@ bool FeasibilityJumpSolver::DoSomeLinearIterations() { if (vars_to_scan_.empty()) { // Note that we only count linear constraint as violated here. if (evaluator_->ViolatedConstraints().empty()) return true; - UpdateViolatedConstraintWeights(linear_jumps_); + UpdateViolatedConstraintWeights(); } } return false; @@ -799,159 +802,184 @@ bool FeasibilityJumpSolver::DoSomeLinearIterations() { // TODO(user): For non-Boolean, we could easily detect if a non-improving // score cannot become improving. We don't need to add such variable to // the queue. -void FeasibilityJumpSolver::MarkJumpsThatNeedToBeRecomputed(int changed_var, - JumpTable& jumps) { - for (const int var : evaluator_->VariablesAffectedByLastLinearUpdate()) { - if (var != changed_var && !var_has_two_values_[var]) { - jumps.Recompute(var); +void FeasibilityJumpSolver::MarkJumpsThatNeedToBeRecomputed(int changed_var) { + // To keep DCHECKs happy. Note that we migh overwrite this afterwards with the + // known score/jump of undoing the move. + jumps_.Recompute(changed_var); + + // Generic part. + // No optimization there, we just update all touched variables. + // We need to do this before the Linear part, so that the status is correct in + // AddVarToScan() for variable with two values. + num_ops_ += evaluator_->VarToGeneralConstraints(changed_var).size(); + for (const int c : evaluator_->VarToGeneralConstraints(changed_var)) { + num_ops_ += evaluator_->GeneralConstraintToVars(c).size(); + for (const int var : evaluator_->GeneralConstraintToVars(c)) { + jumps_.Recompute(var); + AddVarToScan(var); } - AddVarToScan(jumps, var); } - for (const auto& [c, violation_delta] : - evaluator_->last_update_violation_changes()) { - if (c < evaluator_->NumLinearConstraints()) continue; - for (const int var : evaluator_->ConstraintToVars(c)) { - if (var != changed_var) { - jumps.Recompute(var); - } - AddVarToScan(jumps, var); + + // Linear part. + num_ops_ += evaluator_->VariablesAffectedByLastLinearUpdate().size(); + for (const int var : evaluator_->VariablesAffectedByLastLinearUpdate()) { + if (!var_domains_.HasTwoValues(var)) { + jumps_.Recompute(var); } + AddVarToScan(var); } } bool FeasibilityJumpSolver::DoSomeGeneralIterations() { - if (!use_compound_moves_ && evaluator_->NumNonLinearConstraints() == 0) { + if (!state_->options.use_compound_moves && + evaluator_->NumNonLinearConstraints() == 0) { return true; } - const std::vector& solution = evaluator_->current_solution(); // Non-linear constraints are not evaluated in the linear phase. - evaluator_->UpdateAllNonLinearViolations(); + evaluator_->ComputeAllNonLinearViolations(state_->solution); evaluator_->RecomputeViolatedList(/*linear_only=*/false); - RecomputeVarsToScan(general_jumps_); - auto effort = [&]() { - return num_scores_computed_ + num_weight_updates_ + num_general_moves_; - }; - const int64_t effort_limit = effort() + 100000; - while (effort() < effort_limit) { + if (evaluator_->NumNonLinearConstraints() == 0) { + jumps_.SetComputeFunction( + absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this)); + } else { + jumps_.SetComputeFunction( + absl::bind_front(&FeasibilityJumpSolver::ComputeGeneralJump, this)); + } + RecomputeVarsToScan(); + + const double dtime_threshold = + DeterministicTime() + params_.feasibility_jump_batch_dtime(); + while (DeterministicTime() < dtime_threshold) { int var; - int64_t value; + int64_t new_value; double score; const bool found_move = ScanRelevantVariables( - /*num_to_scan=*/3, general_jumps_, &var, &value, &score); + /*num_to_scan=*/3, &var, &new_value, &score); const bool backtrack = - !found_move && move_->Backtrack(&var, &value, &score); + !found_move && state_->move->Backtrack(&var, &new_value, &score); if (found_move || backtrack) { + if (backtrack) ++state_->counters.num_backtracks; + DCHECK_NE(var, -1) << var << " " << found_move << " " << backtrack; + // Perform the move. - ++num_general_moves_; - CHECK_NE(var, -1) << var << " " << found_move << " " << backtrack; - const int64_t prev_value = solution[var]; - DCHECK_NE(prev_value, value); - // Update the linear part. - evaluator_->UpdateLinearScores(var, value, ScanWeights(), - general_jumps_.Deltas(), - general_jumps_.MutableScores()); - // Update the non-linear part. Note it also commits the move. - evaluator_->UpdateNonLinearViolations(var, value); - evaluator_->UpdateVariableValue(var, value); - if (use_compound_moves_ && !backtrack) { + ++state_->counters.num_general_moves; + const int64_t prev_value = state_->solution[var]; + DCHECK_NE(prev_value, new_value); + state_->solution[var] = new_value; + + // Update the linear part and non-linear part. + evaluator_->UpdateLinearScores(var, prev_value, new_value, ScanWeights(), + jumps_.Deltas(), jumps_.MutableScores()); + evaluator_->UpdateNonLinearViolations(var, prev_value, state_->solution); + evaluator_->UpdateViolatedList(); + var_domains_.OnValueChange(var, new_value); + + if (state_->options.use_compound_moves && !backtrack) { // `!backtrack` is just an optimisation - we can never break any new // constraints on backtrack, so we can never change any // compound_weight_. - for (const auto& [c, violation_delta] : - evaluator_->last_update_violation_changes()) { - if (violation_delta == 0) continue; + for (const auto& c : evaluator_->last_update_violation_changes()) { if (evaluator_->IsViolated(c) && - compound_weights_[c] != weights_[c]) { - compound_weights_[c] = weights_[c]; - if (!in_compound_weight_changed_[c]) { - in_compound_weight_changed_[c] = true; - compound_weight_changed_.push_back(c); + state_->compound_weights[c] != state_->weights[c]) { + state_->compound_weights[c] = state_->weights[c]; + if (!state_->in_compound_weight_changed[c]) { + state_->in_compound_weight_changed[c] = true; + state_->compound_weight_changed.push_back(c); } for (const int v : evaluator_->ConstraintToVars(c)) { - general_jumps_.Recompute(v); - // Vars will be added in MarkJumpsThatNeedToBeRecomputed. + jumps_.Recompute(v); + // Vars will be added in MarkJumpsThatNeedToBeRecomputed(). } } else if (!evaluator_->IsViolated(c) && - !in_compound_weight_changed_[c] && - compound_weights_[c] == weights_[c]) { - in_compound_weight_changed_[c] = true; - compound_weight_changed_.push_back(c); + !state_->in_compound_weight_changed[c] && + state_->compound_weights[c] == state_->weights[c]) { + state_->in_compound_weight_changed[c] = true; + state_->compound_weight_changed.push_back(c); } } } - if (!use_decay_) { - // Check that the score for undoing the move is -score with both the - // default weights (which may be `weights_` or `compound_weights_`), and - // with `weights_` explicitly. - DCHECK_EQ(-score, - ComputeScore(ScanWeights(), var, prev_value - value, false)); - DCHECK_EQ(-score, - ComputeScore(weights_, var, prev_value - value, false)); + + // Check that the score for undoing the move is -score with both the + // default weights (which may be `state_->weights` or + // `state_->compound_weights`), and with `weights` explicitly. + if (!state_->options.use_decay) { + DCHECK_EQ(-score, ComputeScore(state_->weights, var, + prev_value - new_value, false)); + DCHECK_EQ(-score, ComputeScore(ScanWeights(), var, + prev_value - new_value, false)); } - if (var_has_two_values_[var]) { + + MarkJumpsThatNeedToBeRecomputed(var); + if (var_domains_.HasTwoValues(var)) { // We already know the score of the only possible move (undoing what we // just did). - general_jumps_.SetJump(var, prev_value - value, -score); - } else { - general_jumps_.Recompute(var); + jumps_.SetJump(var, prev_value - new_value, -score); + DCHECK(state_->options.use_decay || jumps_.JumpIsUpToDate(var)); } - MarkJumpsThatNeedToBeRecomputed(var, general_jumps_); - if (use_compound_moves_ && !backtrack) { + + if (state_->options.use_compound_moves && !backtrack) { // Make sure we can undo the move. - move_->Push(var, prev_value, score); - if (move_->Score() < 0) { - num_compound_moves_ += move_->Size(); - move_->Clear(); - compound_move_max_discrepancy_ = 0; + DCHECK_NE(prev_value, state_->solution[var]); + state_->move->Push(var, prev_value, score); + if (state_->move->Score() < 0) { + state_->counters.num_compound_moves += state_->move->Size(); + state_->move->Clear(); + state_->compound_move_max_discrepancy = 0; } } continue; } else if (time_limit_crossed_) { return false; } - DCHECK_EQ(move_->Size(), 0); + + DCHECK_EQ(state_->move->Size(), 0); if (evaluator_->ViolatedConstraints().empty()) return true; - if (use_compound_moves_) ResetChangedCompoundWeights(); - if (!use_compound_moves_ || ++compound_move_max_discrepancy_ > 2) { - compound_move_max_discrepancy_ = 0; - UpdateViolatedConstraintWeights(general_jumps_); + if (state_->options.use_compound_moves) { + ResetChangedCompoundWeights(); + } + if (!state_->options.use_compound_moves || + ++state_->compound_move_max_discrepancy > 2) { + state_->compound_move_max_discrepancy = 0; + UpdateViolatedConstraintWeights(); } } return false; } void FeasibilityJumpSolver::ResetChangedCompoundWeights() { - if (!use_compound_moves_) return; - DCHECK_EQ(move_->Size(), 0); - for (const int c : compound_weight_changed_) { - in_compound_weight_changed_[c] = false; + if (!state_->options.use_compound_moves) return; + DCHECK_EQ(state_->move->Size(), 0); + num_ops_ += state_->compound_weight_changed.size(); + for (const int c : state_->compound_weight_changed) { + state_->in_compound_weight_changed[c] = false; const double expected_weight = - (evaluator_->IsViolated(c) ? 1.0 : kCompoundDiscount) * weights_[c]; - if (compound_weights_[c] == expected_weight) continue; - compound_weights_[c] = expected_weight; + (evaluator_->IsViolated(c) ? 1.0 : kCompoundDiscount) * + state_->weights[c]; + if (state_->compound_weights[c] == expected_weight) continue; + state_->compound_weights[c] = expected_weight; + num_ops_ += evaluator_->ConstraintToVars(c).size(); for (const int var : evaluator_->ConstraintToVars(c)) { - general_jumps_.Recompute(var); - AddVarToScan(general_jumps_, var); + jumps_.Recompute(var); + AddVarToScan(var); } } - compound_weight_changed_.clear(); + state_->compound_weight_changed.clear(); } bool FeasibilityJumpSolver::ShouldExtendCompoundMove(double score, double novelty) { - if (move_->Score() + score - std::max(novelty, 0.0) < 0) { + if (state_->move->Score() + score - std::max(novelty, 0.0) < 0) { return true; } - return score < move_->BestChildScore(); + return score < state_->move->BestChildScore(); } bool FeasibilityJumpSolver::ScanRelevantVariables(int num_to_scan, - JumpTable& jumps, int* best_var, int64_t* best_value, double* best_score) { if (time_limit_crossed_) return false; - if (move_->Discrepancy() > compound_move_max_discrepancy_) { + if (state_->move->Discrepancy() > state_->compound_move_max_discrepancy) { return false; } double best_scan_score = 0.0; @@ -969,40 +997,40 @@ bool FeasibilityJumpSolver::ScanRelevantVariables(int num_to_scan, } }; while (!vars_to_scan_.empty() && num_good < num_to_scan) { + num_ops_ += 6; // We are slow here. const int index = absl::Uniform(random_, 0, vars_to_scan_.size()); const int var = vars_to_scan_[index]; DCHECK_GE(var, 0); DCHECK(in_vars_to_scan_[var]); - if (!ShouldScan(jumps, var)) { + if (!ShouldScan(var)) { remove_var_to_scan_at_index(index); continue; } - const auto [delta, scan_score] = jumps.GetJump(var); - if ((num_general_evals_ + num_linear_evals_) % 100 == 0 && + const auto [delta, scan_score] = jumps_.GetJump(var); + if ((state_->counters.num_general_evals + + state_->counters.num_linear_evals) % + 100 == + 0 && shared_time_limit_ != nullptr && shared_time_limit_->LimitReached()) { time_limit_crossed_ = true; return false; } - const int64_t current_value = evaluator_->current_solution()[var]; + const int64_t current_value = state_->solution[var]; DCHECK(var_domains_[var].Contains(current_value + delta)) << var << " " << current_value << "+" << delta << " not in " << var_domains_[var].ToString(); DCHECK(!var_domains_[var].IsFixed()); - // Note that this will likely fail if you use decaying weights as they - // will have large magnitudes and the incremental update will be - // imprecise. - DCHECK(use_decay_ || jumps.JumpIsUpToDate(var)) - << var << " " << var_domains_[var].ToString() << " " - << ComputeScore(ScanWeights(), var, delta, (&jumps == &linear_jumps_)); if (scan_score >= 0) { remove_var_to_scan_at_index(index); continue; } double score = scan_score; - if (use_compound_moves_) { + if (state_->options.use_compound_moves) { // We only use compound moves in general iterations. - score = ComputeScore(weights_, var, delta, /*linear_only=*/false); + score = ComputeScore( + state_->weights, var, delta, + /*linear_only=*/!var_occurs_in_non_linear_constraint_[var]); if (!ShouldExtendCompoundMove(score, score - scan_score)) { remove_var_to_scan_at_index(index); continue; @@ -1011,7 +1039,7 @@ bool FeasibilityJumpSolver::ScanRelevantVariables(int num_to_scan, ++num_good; if (scan_score < best_scan_score) { - CHECK_NE(delta, 0) << score; + DCHECK_NE(delta, 0) << score; *best_var = var; *best_value = current_value + delta; *best_score = score; @@ -1029,34 +1057,69 @@ bool FeasibilityJumpSolver::ScanRelevantVariables(int num_to_scan, return false; } -void FeasibilityJumpSolver::AddVarToScan(const JumpTable& jumps, int var) { +void FeasibilityJumpSolver::AddVarToScan(int var) { DCHECK_GE(var, 0); - if (in_vars_to_scan_[var] || !ShouldScan(jumps, var)) return; + if (in_vars_to_scan_[var]) return; + if (!ShouldScan(var)) return; vars_to_scan_.push_back(var); in_vars_to_scan_[var] = true; } -bool FeasibilityJumpSolver::ShouldScan(const JumpTable& jumps, int var) const { +bool FeasibilityJumpSolver::ShouldScan(int var) const { DCHECK_GE(var, 0); - if (var_domains_[var].IsFixed()) return false; - if (!jumps.PossiblyGood(var)) return false; - if (move_->OnStack(var)) return false; - if (evaluator_->NumViolatedConstraintsForVar(var) > 0) return true; - const int64_t value = evaluator_->current_solution()[var]; + + if (state_->move->OnStack(var)) return false; + + if (!jumps_.NeedRecomputation(var)) { + // We already have the score/jump of that variable. + // + // Note that the DCHECK will likely fail if you use decaying weights as they + // will have large magnitudes and the incremental update will be imprecise. + DCHECK(state_->options.use_decay || jumps_.JumpIsUpToDate(var)) + << var << " " << var_domains_[var] << " " << state_->options.name(); + const double score = jumps_.Score(var); + return score < 0.0; + } + + // See RecomputeVarsToScan(), we shouldn't have any fixed variable here. + DCHECK(!var_domains_.IsFixed(var)); + // Return true iff var is has a better objective value in its domain. - return evaluator_->ObjectiveDelta(var, var_domains_[var].Min() - value) < 0 || - evaluator_->ObjectiveDelta(var, var_domains_[var].Max() - value) < 0; + if (var_domains_.HasBetterObjectiveValue(var)) return true; + + // We will need to recompute the score. Lets skip variable for which we known + // in advance that there will be no good score. + // + // For the objective, we don't care if it is violated or not, we only want + // to scan variable that might improve it (and thus reduce its violation if it + // is violated). + // + // TODO(user): We should generalize the objective logic to all constraint. + // There is no point scanning a variable of a violated constraint if it is at + // the wrong bound and cannot improve the violation! + return evaluator_->NumViolatedConstraintsForVarIgnoringObjective(var) > 0; } -void FeasibilityJumpSolver::RecomputeVarsToScan(JumpTable& jumps) { +void FeasibilityJumpSolver::RecomputeVarsToScan() { const int num_variables = var_domains_.size(); - jumps.RecomputeAll(num_variables); + jumps_.RecomputeAll(num_variables); + DCHECK(SlowCheckNumViolatedConstraints()); + in_vars_to_scan_.assign(num_variables, false); vars_to_scan_.clear(); - DCHECK(SlowCheckNumViolatedConstraints()); + + // Since the fixed status never changes during one batch, we marks such + // variable as "in_vars_to_scan_" even if we don't add them here. This allow + // to skip them without any extra lookup. + for (const int var : var_domains_.FixedVariables()) { + in_vars_to_scan_[var] = true; + } + + num_ops_ += evaluator_->ViolatedConstraints().size(); for (const int c : evaluator_->ViolatedConstraints()) { + num_ops_ += evaluator_->ConstraintToVars(c).size(); for (const int v : evaluator_->ConstraintToVars(c)) { - AddVarToScan(jumps, v); + AddVarToScan(v); } } } @@ -1065,12 +1128,14 @@ bool FeasibilityJumpSolver::SlowCheckNumViolatedConstraints() const { std::vector result; result.assign(var_domains_.size(), 0); for (const int c : evaluator_->ViolatedConstraints()) { + if (evaluator_->IsObjectiveConstraint(c)) continue; for (const int v : evaluator_->ConstraintToVars(c)) { ++result[v]; } } for (int v = 0; v < result.size(); ++v) { - CHECK_EQ(result[v], evaluator_->NumViolatedConstraintsForVar(v)); + CHECK_EQ(result[v], + evaluator_->NumViolatedConstraintsForVarIgnoringObjective(v)); } return true; } @@ -1088,13 +1153,11 @@ bool CompoundMoveBuilder::OnStack(int var) const { bool CompoundMoveBuilder::Backtrack(int* var, int64_t* value, double* score) { if (stack_.empty()) return false; - ++num_backtracks_; *var = stack_.back().var; *value = stack_.back().prev_value; *score = stack_.back().score; var_on_stack_[*var] = false; stack_.pop_back(); - DCHECK_NE(*value, evaluator_->current_solution()[*var]); if (!stack_.empty()) { ++stack_.back().discrepancy; } @@ -1102,7 +1165,6 @@ bool CompoundMoveBuilder::Backtrack(int* var, int64_t* value, double* score) { } void CompoundMoveBuilder::Push(int var, int64_t prev_value, double score) { - DCHECK_NE(prev_value, evaluator_->current_solution()[var]); DCHECK(!var_on_stack_[var]); if (!stack_.empty()) { stack_.back().best_child_score = diff --git a/ortools/sat/feasibility_jump.h b/ortools/sat/feasibility_jump.h index 130ad1c257..921f98591d 100644 --- a/ortools/sat/feasibility_jump.h +++ b/ortools/sat/feasibility_jump.h @@ -14,7 +14,9 @@ #ifndef OR_TOOLS_SAT_FEASIBILITY_JUMP_H_ #define OR_TOOLS_SAT_FEASIBILITY_JUMP_H_ +#include #include +#include #include #include #include @@ -23,11 +25,21 @@ #include #include +#include "absl/container/flat_hash_map.h" #include "absl/functional/any_invocable.h" #include "absl/functional/bind_front.h" +#include "absl/hash/hash.h" +#include "absl/log/check.h" +#include "absl/random/distributions.h" +#include "absl/strings/str_join.h" +#include "absl/strings/string_view.h" +#include "absl/synchronization/mutex.h" #include "absl/types/span.h" +#include "ortools/base/logging.h" #include "ortools/sat/constraint_violation.h" +#include "ortools/sat/integer.h" #include "ortools/sat/linear_model.h" +#include "ortools/sat/restart.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/stat_tables.h" #include "ortools/sat/subsolver.h" @@ -46,19 +58,26 @@ class CompoundMoveBuilder; // call to GetJump(var) by calling Recompute. class JumpTable { public: - explicit JumpTable( - absl::AnyInvocable(int)> compute_jump); + JumpTable() = default; + + void SetComputeFunction( + absl::AnyInvocable(int) const> compute_jump); + void RecomputeAll(int num_variables); // Gets the current jump delta and score, recomputing if necessary. std::pair GetJump(int var); + // If the new optimum value and score is known, users can update it directly. // e.g. after weight rescaling, or after changing a binary variable. void SetJump(int var, int64_t delta, double score); + // Recompute the jump for `var` when `GetJump(var)` is next called. void Recompute(int var); - // Returns true if the jump score for `var` might be negative. - bool PossiblyGood(int var) const; + + bool NeedRecomputation(int var) const { return needs_recomputation_[var]; } + + double Score(int var) const { return scores_[var]; } // Advanced usage, allows users to read possibly stale deltas for incremental // score updates. @@ -71,12 +90,14 @@ class JumpTable { absl::Span MutableScores() { return absl::MakeSpan(scores_); } + // For debugging and testing. + // // Note if you have very high weights (e.g. when using decay), the tolerances // in this function are likely too tight. - bool JumpIsUpToDate(int var); // For debugging and testing. + bool JumpIsUpToDate(int var) const; private: - absl::AnyInvocable(int)> compute_jump_; + absl::AnyInvocable(int) const> compute_jump_; // For each variable, we store: // - A jump delta which represents a change in variable value: @@ -88,6 +109,350 @@ class JumpTable { std::vector needs_recomputation_; }; +// Accessing Domain can be expensive, so we maintain vector of bool for the +// hot spots. +class VarDomainWrapper { + public: + explicit VarDomainWrapper(SharedBoundsManager* shared_bounds) + : shared_bounds_id_( + shared_bounds == nullptr ? 0 : shared_bounds->RegisterNewId()), + shared_bounds_(shared_bounds) {} + + Domain operator[](int var) const { return domains_[var]; } + bool HasTwoValues(int var) const { return has_two_values_[var]; } + size_t size() const { return domains_.size(); } + + void resize(int num_vars) { + domains_.resize(num_vars); + has_two_values_.resize(num_vars); + is_fixed_.resize(num_vars, false); + objective_is_positive_.resize(num_vars, false); + objective_is_negative_.resize(num_vars, false); + has_better_objective_value_.resize(num_vars, false); + } + + void Set(int var, Domain d) { + has_two_values_[var] = d.HasTwoValues(); + if (is_fixed_[var]) { + // The code here assume that once fixed, a variable stays that way. + CHECK(d.IsFixed()); + } else if (d.IsFixed()) { + is_fixed_[var] = true; + fixed_vars_.push_back(var); + } + domains_[var] = std::move(d); + } + + // Return false if one of the domain becomes empty (UNSAT). This might happen + // while we are cleaning up all workers at the end of a search. + bool UpdateFromSharedBounds() { + if (shared_bounds_ == nullptr) return true; + shared_bounds_->GetChangedBounds(shared_bounds_id_, &tmp_variables_, + &tmp_new_lower_bounds_, + &tmp_new_upper_bounds_); + for (int i = 0; i < tmp_variables_.size(); ++i) { + const int var = tmp_variables_[i]; + const Domain new_domain = domains_[var].IntersectionWith( + Domain(tmp_new_lower_bounds_[i], tmp_new_upper_bounds_[i])); + if (new_domain.IsEmpty()) return false; + Set(var, new_domain); + } + return true; + } + + absl::Span AsSpan() const { return domains_; } + + void InitializeObjective(const CpModelProto& cp_model_proto) { + if (!cp_model_proto.has_objective()) return; + const int num_terms = cp_model_proto.objective().vars().size(); + for (int i = 0; i < num_terms; ++i) { + const int var = cp_model_proto.objective().vars(i); + const int coeff = cp_model_proto.objective().coeffs(i); + objective_is_positive_[var] = coeff > 0; + objective_is_negative_[var] = coeff < 0; + } + } + + bool IsFixed(int var) const { return is_fixed_[var]; } + + bool HasBetterObjectiveValue(int var) const { + return has_better_objective_value_[var]; + } + + // Tricky: this must be called on solution value change or domains update. + void OnValueChange(int var, int64_t value) { + has_better_objective_value_[var] = + (objective_is_positive_[var] && value > domains_[var].Min()) || + (objective_is_negative_[var] && value < domains_[var].Max()); + } + + absl::Span FixedVariables() const { return fixed_vars_; } + + private: + const int shared_bounds_id_; + SharedBoundsManager* shared_bounds_; + + // Basically fixed once and for all. + std::vector objective_is_positive_; + std::vector objective_is_negative_; + + // Depends on domain updates. + std::vector domains_; + std::vector has_two_values_; + std::vector is_fixed_; + std::vector fixed_vars_; + + // This is the only one that depends on the current solution value. + std::vector has_better_objective_value_; + + // Temporary data for UpdateFromSharedBounds() + std::vector tmp_variables_; + std::vector tmp_new_lower_bounds_; + std::vector tmp_new_upper_bounds_; +}; + +// Local search counters. This can either be the stats of one run without +// restart or some aggregation of such runs. +struct LsCounters { + int64_t num_batches = 0; + int64_t num_perturbations = 0; + int64_t num_linear_evals = 0; + int64_t num_linear_moves = 0; + int64_t num_general_evals = 0; + int64_t num_general_moves = 0; + int64_t num_backtracks = 0; + int64_t num_compound_moves = 0; + int64_t num_weight_updates = 0; + int64_t num_scores_computed = 0; + + void AddFrom(const LsCounters& o) { + num_batches += o.num_batches; + num_perturbations += o.num_perturbations; + num_linear_evals += o.num_linear_evals; + num_linear_moves += o.num_linear_moves; + num_general_evals += o.num_general_evals; + num_general_moves += o.num_general_moves; + num_backtracks += o.num_backtracks; + num_compound_moves += o.num_compound_moves; + num_weight_updates += o.num_weight_updates; + num_scores_computed += o.num_scores_computed; + } +}; + +// The parameters used by the local search code. +struct LsOptions { + // This one never changes. + // - If true, each restart is independent from the other. This is nice because + // it plays well with the theoretical Luby restart sequence. + // - If false, we always "restart" from the current state, but we perturb it + // or just reset the constraint weight. We currently use this one way less + // often. + bool use_restart = true; + + // These are randomized each restart by Randomize(). + double perturbation_probability = 0.0; + bool use_decay = true; + bool use_compound_moves = true; + bool use_objective = true; // No effect if there are no objective. + + // Allows to identify which options worked well. + std::string name() const { + std::vector parts; + parts.reserve(5); + if (use_restart) parts.push_back("restart"); + if (use_decay) parts.push_back("decay"); + if (use_compound_moves) parts.push_back("compound"); + if (perturbation_probability > 0) parts.push_back("perturb"); + if (use_objective) parts.push_back("obj"); + return absl::StrJoin(parts, "_"); + } + + // In order to collect statistics by options. + template + friend H AbslHashValue(H h, const LsOptions& o) { + return H::combine(std::move(h), o.use_restart, o.perturbation_probability, + o.use_decay, o.use_compound_moves, o.use_objective); + } + + bool operator==(const LsOptions& o) const { + return use_restart == o.use_restart && + perturbation_probability == o.perturbation_probability && + use_decay == o.use_decay && + use_compound_moves == o.use_compound_moves && + use_objective == o.use_objective; + } + + void Randomize(const SatParameters& params, ModelRandomGenerator* random) { + perturbation_probability = + absl::Bernoulli(*random, 0.5) + ? 0.0 + : params.feasibility_jump_var_randomization_probability(); + use_decay = absl::Bernoulli(*random, 0.5); + use_compound_moves = absl::Bernoulli(*random, 0.5); + use_objective = absl::Bernoulli(*random, 0.5); + } +}; + +// Each FeasibilityJumpSolver work on many LsState in an interleaved parallel +// fashion. Each "batch of moves" will update one of these state. Restart +// heuristic are also on a per state basis. +// +// This allows to not use O(problem size) per state while having a more +// diverse set of heuristics. +struct LsState { + // The score of a solution is just the sum of infeasibility of each + // constraint weighted by these weights. + std::vector solution; + std::vector weights; + + // Depending on the options, we use an exponentially decaying constraint + // weight like for SAT activities. + double bump_value = 1.0; + + // If using compound moves, these will be discounted on a new incumbent then + // re-converge to weights after some exploration. Search will repeatedly pick + // moves with negative WeightedViolationDelta using these weights. + // + // We limit the discrepancy in compound move search (i.e. limit the number of + // backtracks to any ancestor of the current leaf). This is set to 0 whenever + // a new incumbent is found or weights are updated, and increased at fixed + // point. Weights are only increased if no moves are found with discrepancy 2. + // Empirically we have seen very few moves applied with discrepancy > 2. + int compound_move_max_discrepancy = 0; + std::vector compound_weights; + std::vector in_compound_weight_changed; + std::vector compound_weight_changed; + std::unique_ptr move; + + // Counters for a "non-restarted" run. + LsCounters counters; + + // Strategy + LsOptions options; + + // Global counters, incremented across restart. + int64_t num_restarts = 0; + int64_t num_solutions_imported = 0; + + // When this reach zero, we restart / perturbate or trigger something. + int64_t num_batches_before_change = 0; + + // Used by LS to know the rank of the starting solution for this state. + int64_t last_solution_rank = std::numeric_limits::max(); + + // Tricky: If this changed since last time, we need to recompute the + // compound moves as the objective constraint bound changed. + IntegerValue saved_inner_objective_lb = 0; + IntegerValue saved_inner_objective_ub = 0; +}; + +// Shared set of local search states that we work on. +class SharedLsStates { + public: + // Important: max_parallelism should be greater or equal than the actual + // number of thread sharing this class, otherwise the code will break. + SharedLsStates(absl::string_view name, const SatParameters& params, + SharedStatTables* stat_tables) + : name_(name), params_(params), stat_tables_(stat_tables) { + // We always start with at least 8 states. + // We will create more if there are more parallel workers as needed. + for (int i = 0; i < 8; ++i) CreateNewState(); + } + + ~SharedLsStates(); + + void CreateNewState() { + const int index = states_.size(); + states_.emplace_back(new LsState()); + taken_.push_back(false); + num_selected_.push_back(0); + + // We add one no-restart per 16 states and put it last. + states_.back()->options.use_restart = (index % 16 != 15); + } + + // Returns the next available state in round-robin fashion. + // This is thread safe. If we respect the max_parallelism guarantee, then + // all states should be independent. + LsState* GetNextState() { + absl::MutexLock mutex_lock(&mutex_); + int next = -1; + const int num_states = states_.size(); + for (int i = 0; i < num_states; ++i) { + const int index = round_robin_index_; + round_robin_index_ = (round_robin_index_ + 1) % num_states; + if (taken_[index]) continue; + if (next == -1 || num_selected_[index] < num_selected_[next]) { + next = index; + } + } + + if (next == -1) { + // We need more parallelism and create a new state. + next = num_states; + CreateNewState(); + } + + --states_[next]->num_batches_before_change; + taken_[next] = true; + num_selected_[next]++; + return states_[next].get(); + } + + void Release(LsState* state) { + absl::MutexLock mutex_lock(&mutex_); + for (int i = 0; i < states_.size(); ++i) { + if (state == states_[i].get()) { + taken_[i] = false; + break; + } + } + } + + void ResetLubyCounter() { + absl::MutexLock mutex_lock(&mutex_); + luby_counter_ = 0; + } + + // We share a global running Luby sequence for all the "restart" state. + // Note that we randomize the parameters on each restart. + // + // Hack: options.use_restart is constant, so we are free to inspect it. + // Also if options.use_restart, then num_batches_before_change is only + // modified under lock, so this code should be thread safe. + void ConfigureNextLubyRestart(LsState* state) { + absl::MutexLock mutex_lock(&mutex_); + const int factor = std::max(1, params_.feasibility_jump_restart_factor()); + CHECK(state->options.use_restart); + const int64_t next = factor * SUniv(++luby_counter_); + state->num_batches_before_change = next; + } + + // Accumulate in the relevant bucket the counters of the given states. + void CollectStatistics(const LsState& state) { + if (state.counters.num_batches == 0) return; + + absl::MutexLock mutex_lock(&mutex_); + options_to_stats_[state.options].AddFrom(state.counters); + options_to_num_restarts_[state.options]++; + } + + private: + const std::string name_; + const SatParameters& params_; + SharedStatTables* stat_tables_; + + mutable absl::Mutex mutex_; + int round_robin_index_ = 0; + std::vector> states_; + std::vector taken_; + std::vector num_selected_; + int luby_counter_ = 0; + + absl::flat_hash_map options_to_stats_; + absl::flat_hash_map options_to_num_restarts_; +}; + // Implements and heuristic similar to the one described in the paper: // "Feasibility Jump: an LP-free Lagrangian MIP heuristic", Bjørnar // Luteberget, Giorgio Sartor, 2023, Mathematical Programming Computation. @@ -101,8 +466,10 @@ class JumpTable { // model and its transpose for each FeasibilityJumpSolver. class FeasibilityJumpSolver : public SubSolver { public: - FeasibilityJumpSolver(const std::string name, SubSolver::SubsolverType type, + FeasibilityJumpSolver(const absl::string_view name, + SubSolver::SubsolverType type, const LinearModel* linear_model, SatParameters params, + std::shared_ptr ls_states, ModelSharedTimeLimit* shared_time_limit, SharedResponseManager* shared_response, SharedBoundsManager* shared_bounds, @@ -111,16 +478,12 @@ class FeasibilityJumpSolver : public SubSolver { : SubSolver(name, type), linear_model_(linear_model), params_(params), + states_(std::move(ls_states)), shared_time_limit_(shared_time_limit), shared_response_(shared_response), - shared_bounds_(shared_bounds), - shared_stats_(shared_stats), stat_tables_(stat_tables), random_(params_), - linear_jumps_( - absl::bind_front(&FeasibilityJumpSolver::ComputeLinearJump, this)), - general_jumps_(absl::bind_front( - &FeasibilityJumpSolver::ComputeGeneralJump, this)) {} + var_domains_(shared_bounds) {} // If VLOG_IS_ON(1), it will export a bunch of statistics. ~FeasibilityJumpSolver() override; @@ -150,17 +513,19 @@ class FeasibilityJumpSolver : public SubSolver { std::function GenerateTask(int64_t /*task_id*/) final; private: + void ImportState(); + void ReleaseState(); + void Initialize(); - void ResetCurrentSolution(); - void PerturbateCurrentSolution(); + void ResetCurrentSolution(bool use_hint, bool use_objective, + double perturbation_probability); + void PerturbateCurrentSolution(double perturbation_probability); std::string OneLineStats() const; - absl::Span ScanWeights() { - return absl::MakeSpan(use_compound_moves_ ? compound_weights_ : weights_); - } absl::Span ScanWeights() const { - return absl::MakeConstSpan(use_compound_moves_ ? compound_weights_ - : weights_); + return absl::MakeConstSpan(state_->options.use_compound_moves + ? state_->compound_weights + : state_->weights); } // Returns the weighted violation delta plus epsilon * the objective delta. @@ -177,29 +542,27 @@ class FeasibilityJumpSolver : public SubSolver { // Marks all variables whose jump value may have changed due to the last // update, except for `changed var`. - void MarkJumpsThatNeedToBeRecomputed(int changed_var, JumpTable& jumps); + void MarkJumpsThatNeedToBeRecomputed(int changed_var); // Moves. bool DoSomeLinearIterations(); bool DoSomeGeneralIterations(); // Returns true if an improving move was found. - bool ScanRelevantVariables(int num_to_scan, JumpTable& jumps, int* var, - int64_t* value, double* score); + bool ScanRelevantVariables(int num_to_scan, int* var, int64_t* value, + double* score); // Increases the weight of the currently infeasible constraints. // Ensures jumps remains consistent. - void UpdateViolatedConstraintWeights(JumpTable& jumps); - - void UpdateNumViolatedConstraintsPerVar(); - - void RecomputeVarsToScan(JumpTable&); + void UpdateViolatedConstraintWeights(); // Returns true if it is possible that `var` may have value that reduces // weighted violation or improve the objective. // Note that this is independent of the actual weights used. - bool ShouldScan(const JumpTable& jumps, int var) const; - void AddVarToScan(const JumpTable&, int var); + bool ShouldScan(int var) const; + + void AddVarToScan(int var); + void RecomputeVarsToScan(); // Resets the weights used to find compound moves. // Ensures the following invariant holds afterwards: @@ -217,15 +580,20 @@ class FeasibilityJumpSolver : public SubSolver { // evaluator_->ViolatedConstraints. bool SlowCheckNumViolatedConstraints() const; + double DeterministicTime() const { + return evaluator_->DeterministicTime() + num_ops_ * 1e-8; + } + const LinearModel* linear_model_; SatParameters params_; + std::shared_ptr states_; ModelSharedTimeLimit* shared_time_limit_; SharedResponseManager* shared_response_; - SharedBoundsManager* shared_bounds_ = nullptr; - SharedStatistics* shared_stats_; SharedStatTables* stat_tables_; ModelRandomGenerator random_; + VarDomainWrapper var_domains_; + // Synchronization Booleans. // // Note that we don't fully support all type of model, and we will abort by @@ -236,77 +604,22 @@ class FeasibilityJumpSolver : public SubSolver { bool time_limit_crossed_ = false; std::unique_ptr evaluator_; - std::vector var_domains_; - std::vector var_has_two_values_; std::vector var_occurs_in_non_linear_constraint_; - JumpTable linear_jumps_; - JumpTable general_jumps_; + JumpTable jumps_; std::vector for_weight_update_; - // The score of a solution is just the sum of infeasibility of each - // constraint weighted by these scores. - std::vector weights_; - // If using compound moves, these will be discounted on a new incumbent then - // re-converge to `weights_` after some exploration. - // Search will repeatedly pick moves with negative WeightedViolationDelta - // using these weights. - std::vector compound_weights_; - - std::vector in_compound_weight_changed_; - std::vector compound_weight_changed_; - - // Depending on the options, we use an exponentially decaying constraint - // weight like for SAT activities. - double bump_value_ = 1.0; + // The current sate we work on. + LsState* state_; // A list of variables that might be relevant to check for improving jumps. std::vector in_vars_to_scan_; - std::vector vars_to_scan_; - - // We restart each time our local deterministic time crosses this. - double dtime_restart_threshold_ = 0.0; - int64_t update_restart_threshold_ = 0; - int num_batches_before_perturbation_; + FixedCapacityVector vars_to_scan_; std::vector tmp_breakpoints_; - // Each time we reset the weights, randomly change this to update them with - // decay or not. - bool use_decay_ = true; - - // Each time we reset the weights, randomly decide if we will use compound - // moves or not. - bool use_compound_moves_ = false; - - // Limit the discrepancy in compound move search (i.e. limit the number of - // backtracks to any ancestor of the current leaf). This is set to 0 whenever - // a new incumbent is found or weights are updated, and increased at fixed - // point. - // Weights are only increased if no moves are found with discrepancy 2. - // Empirically we have seen very few moves applied with discrepancy > 2. - int compound_move_max_discrepancy_ = 0; - - // Statistics - int64_t num_batches_ = 0; - int64_t num_linear_evals_ = 0; - int64_t num_general_evals_ = 0; - int64_t num_general_moves_ = 0; - int64_t num_compound_moves_ = 0; - int64_t num_linear_moves_ = 0; - int64_t num_perturbations_ = 0; - int64_t num_restarts_ = 0; - int64_t num_solutions_imported_ = 0; - int64_t num_weight_updates_ = 0; - int64_t num_scores_computed_ = 0; - - std::unique_ptr move_; - - // Counts the number of violated constraints each var is in. - std::vector num_violated_constraints_per_var_; - - // Info on the last solution loaded. - int64_t last_solution_rank_ = std::numeric_limits::max(); + // For counting the dtime. See DeterministicTime(). + int64_t num_ops_ = 0; }; // This class helps keep track of moves that change more than one variable. @@ -315,8 +628,8 @@ class FeasibilityJumpSolver : public SubSolver { // move, you just need to call `Clear` instead of Backtracking over the changes. class CompoundMoveBuilder { public: - CompoundMoveBuilder(LsEvaluator* evaluator, int num_variables) - : evaluator_(evaluator), var_on_stack_(num_variables, false) {} + explicit CompoundMoveBuilder(int num_variables) + : var_on_stack_(num_variables, false) {} // Adds an atomic move to the stack. // `var` must not be on the stack (this is DCHECKed). @@ -350,9 +663,6 @@ class CompoundMoveBuilder { return stack_.empty() ? 0 : stack_.back().discrepancy; } - // Returns the number of backtracking moves that have been applied. - int NumBacktracks() const { return num_backtracks_; } - // Returns true if all prev_values on the stack are in the appropriate domain. bool StackValuesInDomains(absl::Span var_domains) const; @@ -372,11 +682,8 @@ class CompoundMoveBuilder { double best_child_score = 0.0; int discrepancy = 0; }; - LsEvaluator* evaluator_; std::vector var_on_stack_; std::vector stack_; - - int64_t num_backtracks_ = 0; }; } // namespace operations_research::sat diff --git a/ortools/sat/feasibility_pump.h b/ortools/sat/feasibility_pump.h index 44f94d198a..5848310619 100644 --- a/ortools/sat/feasibility_pump.h +++ b/ortools/sat/feasibility_pump.h @@ -166,7 +166,8 @@ class FeasibilityPump { double objective_normalization_factor_ = 0.0; double mixing_factor_ = 1.0; - absl::StrongVector integer_lp_; + util_intops::StrongVector + integer_lp_; int model_vars_size_ = 0; // Underlying LP solver API. diff --git a/ortools/sat/implied_bounds.cc b/ortools/sat/implied_bounds.cc index 70d8d702e1..925d6cb01b 100644 --- a/ortools/sat/implied_bounds.cc +++ b/ortools/sat/implied_bounds.cc @@ -765,7 +765,7 @@ std::pair Canonicalize(IntegerVariable a, double GetLiteralLpValue( IntegerVariable var, - const absl::StrongVector& lp_values) { + const util_intops::StrongVector& lp_values) { return VariableIsPositive(var) ? lp_values[var] : 1.0 - lp_values[PositiveVariable(var)]; } @@ -773,7 +773,7 @@ double GetLiteralLpValue( } // namespace void ProductDetector::UpdateRLTMaps( - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, IntegerVariable var1, double lp1, IntegerVariable var2, double lp2, IntegerVariable bound_var, double bound_lp) { // we have var1 * var2 <= bound_var, and this is only useful if it is better @@ -798,7 +798,7 @@ void ProductDetector::UpdateRLTMaps( // TODO(user): limit work if too many ternary. void ProductDetector::InitializeBooleanRLTCuts( const absl::flat_hash_map& lp_vars, - const absl::StrongVector& lp_values) { + const util_intops::StrongVector& lp_values) { // TODO(user): Maybe we shouldn't reconstruct this every time, but it is hard // in case of multiple lps to make sure we don't use variables not in the lp // otherwise. diff --git a/ortools/sat/implied_bounds.h b/ortools/sat/implied_bounds.h index 12fe0c7586..7446ed3775 100644 --- a/ortools/sat/implied_bounds.h +++ b/ortools/sat/implied_bounds.h @@ -165,7 +165,7 @@ class ImpliedBounds { // all variables at once, so no need to organize it by IntegerVariable even // if that might be more friendly cache-wise. std::vector empty_implied_bounds_; - absl::StrongVector> + util_intops::StrongVector> var_to_bounds_; SparseBitset has_implied_bounds_; @@ -299,7 +299,7 @@ class ProductDetector { // And set-up data structure to query this efficiently. void InitializeBooleanRLTCuts( const absl::flat_hash_map& lp_vars, - const absl::StrongVector& lp_values); + const util_intops::StrongVector& lp_values); // BoolRLTCandidates()[var] contains the list of factor for which we have // a violated upper bound on lit(var) * lit(factor). @@ -328,7 +328,7 @@ class ProductDetector { // Process a relation lit(var1) * lit(var2) <= lit(bound_var). void UpdateRLTMaps( - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, IntegerVariable var1, double lp1, IntegerVariable var2, double lp2, IntegerVariable bound_var, double bound_lp); @@ -342,7 +342,7 @@ class ProductDetector { SharedStatistics* shared_stats_; // No need to process implication a => b if a was never seen. - absl::StrongVector seen_; + util_intops::StrongVector seen_; // For each clause of size 3 (l0, l1, l2) and a permutation of index (i, j, k) // we bitset[i] to true if lj => not(lk) and lk => not(lj). diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index b33c75974a..534cdf1878 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -1149,22 +1149,24 @@ void IntegerTrail::RemoveLevelZeroBounds( } std::vector* IntegerTrail::InitializeConflict( - IntegerLiteral integer_literal, const LazyReasonFunction& lazy_reason, + IntegerLiteral integer_literal, bool use_lazy_reason, absl::Span literals_reason, absl::Span bounds_reason) { DCHECK(tmp_queue_.empty()); std::vector* conflict = trail_->MutableConflict(); - if (lazy_reason == nullptr) { + if (use_lazy_reason) { + // We use the current trail index here. + conflict->clear(); + const int trail_index = integer_trail_.size(); + lazy_reasons_[trail_index].Explain(integer_literal, trail_index, conflict, + &tmp_queue_); + } else { conflict->assign(literals_reason.begin(), literals_reason.end()); const int num_vars = var_lbs_.size(); for (const IntegerLiteral& literal : bounds_reason) { const int trail_index = FindLowestTrailIndexThatExplainBound(literal); if (trail_index >= num_vars) tmp_queue_.push_back(trail_index); } - } else { - // We use the current trail index here. - conflict->clear(); - lazy_reason(integer_literal, integer_trail_.size(), conflict, &tmp_queue_); } return conflict; } @@ -1248,13 +1250,6 @@ bool IntegerTrail::SafeEnqueue( return Enqueue(i_lit, {}, tmp_cleaned_reason_); } -bool IntegerTrail::Enqueue(IntegerLiteral i_lit, - absl::Span literal_reason, - absl::Span integer_reason) { - return EnqueueInternal(i_lit, nullptr, literal_reason, integer_reason, - integer_trail_.size()); -} - bool IntegerTrail::ConditionalEnqueue( Literal lit, IntegerLiteral i_lit, std::vector* literal_reason, std::vector* integer_reason) { @@ -1292,19 +1287,6 @@ bool IntegerTrail::ConditionalEnqueue( return true; } -bool IntegerTrail::Enqueue(IntegerLiteral i_lit, - absl::Span literal_reason, - absl::Span integer_reason, - int trail_index_with_same_reason) { - return EnqueueInternal(i_lit, nullptr, literal_reason, integer_reason, - trail_index_with_same_reason); -} - -bool IntegerTrail::Enqueue(IntegerLiteral i_lit, - LazyReasonFunction lazy_reason) { - return EnqueueInternal(i_lit, lazy_reason, {}, {}, integer_trail_.size()); -} - bool IntegerTrail::ReasonIsValid( absl::Span literal_reason, absl::Span integer_reason) { @@ -1394,15 +1376,15 @@ bool IntegerTrail::ReasonIsValid( void IntegerTrail::EnqueueLiteral( Literal literal, absl::Span literal_reason, absl::Span integer_reason) { - EnqueueLiteralInternal(literal, nullptr, literal_reason, integer_reason); + EnqueueLiteralInternal(literal, false, literal_reason, integer_reason); } void IntegerTrail::EnqueueLiteralInternal( - Literal literal, LazyReasonFunction lazy_reason, + Literal literal, bool use_lazy_reason, absl::Span literal_reason, absl::Span integer_reason) { DCHECK(!trail_->Assignment().LiteralIsAssigned(literal)); - DCHECK(lazy_reason != nullptr || + DCHECK(!use_lazy_reason || ReasonIsValid(literal, literal_reason, integer_reason)); if (integer_search_levels_.empty()) { // Level zero. We don't keep any reason. @@ -1412,7 +1394,7 @@ void IntegerTrail::EnqueueLiteralInternal( // If we are fixing something at a positive level, remember it. if (!integer_search_levels_.empty() && integer_reason.empty() && - literal_reason.empty() && lazy_reason == nullptr) { + literal_reason.empty() && !use_lazy_reason) { delayed_to_fix_->literal_to_fix.push_back(literal); } @@ -1422,23 +1404,10 @@ void IntegerTrail::EnqueueLiteralInternal( } boolean_trail_index_to_integer_one_[trail_index] = integer_trail_.size(); - int reason_index = literals_reason_starts_.size(); - if (lazy_reason != nullptr) { - if (integer_trail_.size() >= lazy_reasons_.size()) { - lazy_reasons_.resize(integer_trail_.size() + 1, nullptr); - } - lazy_reasons_[integer_trail_.size()] = lazy_reason; - reason_index = -1; - } else { - // Copy the reason. - literals_reason_starts_.push_back(literals_reason_buffer_.size()); - literals_reason_buffer_.insert(literals_reason_buffer_.end(), - literal_reason.begin(), - literal_reason.end()); - bounds_reason_starts_.push_back(bounds_reason_buffer_.size()); - bounds_reason_buffer_.insert(bounds_reason_buffer_.end(), - integer_reason.begin(), integer_reason.end()); - } + const int reason_index = + use_lazy_reason + ? -1 + : AppendReasonToInternalBuffers(literal_reason, integer_reason); integer_trail_.push_back({/*bound=*/IntegerValue(0), /*var=*/kNoIntegerVariable, @@ -1520,12 +1489,34 @@ void IntegerTrail::CanonicalizeLiteralIfNeeded(IntegerLiteral* i_lit) { } } +int IntegerTrail::AppendReasonToInternalBuffers( + absl::Span literal_reason, + absl::Span integer_reason) { + const int reason_index = literals_reason_starts_.size(); + DCHECK_EQ(reason_index, bounds_reason_starts_.size()); + + literals_reason_starts_.push_back(literals_reason_buffer_.size()); + if (!literal_reason.empty()) { + literals_reason_buffer_.insert(literals_reason_buffer_.end(), + literal_reason.begin(), + literal_reason.end()); + } + + bounds_reason_starts_.push_back(bounds_reason_buffer_.size()); + if (!integer_reason.empty()) { + bounds_reason_buffer_.insert(bounds_reason_buffer_.end(), + integer_reason.begin(), integer_reason.end()); + } + + return reason_index; +} + bool IntegerTrail::EnqueueInternal( - IntegerLiteral i_lit, LazyReasonFunction lazy_reason, + IntegerLiteral i_lit, bool use_lazy_reason, absl::Span literal_reason, absl::Span integer_reason, int trail_index_with_same_reason) { - DCHECK(lazy_reason != nullptr || + DCHECK(use_lazy_reason || ReasonIsValid(i_lit, literal_reason, integer_reason)); const IntegerVariable var(i_lit.var); @@ -1550,8 +1541,8 @@ bool IntegerTrail::EnqueueInternal( // Note that we want only one call to MergeReasonIntoInternal() for // efficiency and a potential smaller reason. - auto* conflict = - InitializeConflict(i_lit, lazy_reason, literal_reason, integer_reason); + auto* conflict = InitializeConflict(i_lit, use_lazy_reason, literal_reason, + integer_reason); { const int trail_index = FindLowestTrailIndexThatExplainBound(ub_reason); const int num_vars = var_lbs_.size(); // must be signed. @@ -1606,8 +1597,8 @@ bool IntegerTrail::EnqueueInternal( if (literal_index != kNoLiteralIndex) { const Literal to_enqueue = Literal(literal_index); if (trail_->Assignment().LiteralIsFalse(to_enqueue)) { - auto* conflict = InitializeConflict(i_lit, lazy_reason, literal_reason, - integer_reason); + auto* conflict = InitializeConflict(i_lit, use_lazy_reason, + literal_reason, integer_reason); conflict->push_back(to_enqueue); MergeReasonIntoInternal(conflict); return false; @@ -1620,7 +1611,7 @@ bool IntegerTrail::EnqueueInternal( if (bound >= i_lit.bound) { DCHECK_EQ(bound, i_lit.bound); if (!trail_->Assignment().LiteralIsTrue(to_enqueue)) { - EnqueueLiteralInternal(to_enqueue, lazy_reason, literal_reason, + EnqueueLiteralInternal(to_enqueue, use_lazy_reason, literal_reason, integer_reason); } return EnqueueAssociatedIntegerLiteral(i_lit, to_enqueue); @@ -1638,7 +1629,7 @@ bool IntegerTrail::EnqueueInternal( boolean_trail_index_to_integer_one_.resize(trail_index + 1); } boolean_trail_index_to_integer_one_[trail_index] = - trail_index_with_same_reason; + integer_trail_.size(); trail_->Enqueue(to_enqueue, propagator_id_); } } @@ -1662,32 +1653,16 @@ bool IntegerTrail::EnqueueInternal( // If we are not at level zero but there is not reason, we have a root level // deduction. Remember it so that we don't forget on the next restart. if (!integer_search_levels_.empty() && integer_reason.empty() && - literal_reason.empty() && lazy_reason == nullptr && - trail_index_with_same_reason >= integer_trail_.size()) { + literal_reason.empty() && !use_lazy_reason) { if (!RootLevelEnqueue(i_lit)) return false; } - int reason_index = literals_reason_starts_.size(); - if (lazy_reason != nullptr) { - if (integer_trail_.size() >= lazy_reasons_.size()) { - lazy_reasons_.resize(integer_trail_.size() + 1, nullptr); - } - lazy_reasons_[integer_trail_.size()] = lazy_reason; + int reason_index; + if (use_lazy_reason) { reason_index = -1; } else if (trail_index_with_same_reason >= integer_trail_.size()) { - // Save the reason into our internal buffers. - literals_reason_starts_.push_back(literals_reason_buffer_.size()); - if (!literal_reason.empty()) { - literals_reason_buffer_.insert(literals_reason_buffer_.end(), - literal_reason.begin(), - literal_reason.end()); - } - bounds_reason_starts_.push_back(bounds_reason_buffer_.size()); - if (!integer_reason.empty()) { - bounds_reason_buffer_.insert(bounds_reason_buffer_.end(), - integer_reason.begin(), - integer_reason.end()); - } + reason_index = + AppendReasonToInternalBuffers(literal_reason, integer_reason); } else { reason_index = integer_trail_[trail_index_with_same_reason].reason_index; } @@ -1741,12 +1716,8 @@ bool IntegerTrail::EnqueueAssociatedIntegerLiteral(IntegerLiteral i_lit, } DCHECK_GT(trail_->CurrentDecisionLevel(), 0); - const int reason_index = literals_reason_starts_.size(); - CHECK_EQ(reason_index, bounds_reason_starts_.size()); - literals_reason_starts_.push_back(literals_reason_buffer_.size()); - bounds_reason_starts_.push_back(bounds_reason_buffer_.size()); - literals_reason_buffer_.push_back(literal_reason.Negated()); - + const int reason_index = + AppendReasonToInternalBuffers({literal_reason.Negated()}, {}); const int prev_trail_index = var_trail_index_[i_lit.var]; integer_trail_.push_back({/*bound=*/i_lit.bound, /*var=*/i_lit.var, @@ -1763,8 +1734,9 @@ void IntegerTrail::ComputeLazyReasonIfNeeded(int trail_index) const { if (reason_index == -1) { const TrailEntry& entry = integer_trail_[trail_index]; const IntegerLiteral literal(entry.var, entry.bound); - lazy_reasons_[trail_index](literal, trail_index, &lazy_reason_literals_, - &lazy_reason_trail_indices_); + lazy_reasons_[trail_index].Explain(literal, trail_index, + &lazy_reason_literals_, + &lazy_reason_trail_indices_); } } diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index 81123c782a..59f27b4df6 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -332,8 +332,8 @@ struct AffineExpression { } // Returns the affine expression value under a given LP solution. - double LpValue( - const absl::StrongVector& lp_values) const { + double LpValue(const util_intops::StrongVector& + lp_values) const { if (var == kNoIntegerVariable) return ToDouble(constant); return ToDouble(coeff) * lp_values[var] + ToDouble(constant); } @@ -372,7 +372,8 @@ H AbslHashValue(H h, const AffineExpression& e) { // A model singleton that holds the root level integer variable domains. // we just store a single domain for both var and its negation. -struct IntegerDomains : public absl::StrongVector {}; +struct IntegerDomains + : public util_intops::StrongVector {}; // A model singleton used for debugging. If this is set in the model, then we // can check that various derived constraint do not exclude this solution (if it @@ -390,8 +391,8 @@ struct DebugSolution { // TODO(user): When this happen we should be able to infer the value of these // derived variable in the solution. For now, we only do that for the // objective variable. - absl::StrongVector ivar_has_value; - absl::StrongVector ivar_values; + util_intops::StrongVector ivar_has_value; + util_intops::StrongVector ivar_values; }; // A value and a literal. @@ -698,15 +699,16 @@ class IntegerEncoder { // // TODO(user): Remove the entry no longer needed because of level zero // propagations. - absl::StrongVector> + util_intops::StrongVector> encoding_by_var_; // Store for a given LiteralIndex the list of its associated IntegerLiterals. const InlinedIntegerLiteralVector empty_integer_literal_vector_; - absl::StrongVector + util_intops::StrongVector reverse_encoding_; const InlinedIntegerValueVector empty_integer_value_vector_; - absl::StrongVector + util_intops::StrongVector reverse_equality_encoding_; // Used by GetAllAssociatedVariables(). @@ -714,7 +716,7 @@ class IntegerEncoder { // Store for a given LiteralIndex its IntegerVariable view or kNoLiteralIndex // if there is none. - absl::StrongVector literal_view_; + util_intops::StrongVector literal_view_; // Mapping (variable == value) -> associated literal. Note that even if // there is more than one literal associated to the same fact, we just keep @@ -726,12 +728,12 @@ class IntegerEncoder { equality_to_associated_literal_; // Mutable because this is lazily cleaned-up by PartialDomainEncoding(). - mutable absl::StrongVector> + mutable util_intops::StrongVector> equality_by_var_; // Variables that are fully encoded. - mutable absl::StrongVector is_fully_encoded_; + mutable util_intops::StrongVector is_fully_encoded_; // A literal that is always true, convenient to encode trivial domains. // This will be lazily created when needed. @@ -745,10 +747,35 @@ class IntegerEncoder { mutable std::vector partial_encoding_; }; +class LazyReasonInterface { + public: + LazyReasonInterface() = default; + virtual ~LazyReasonInterface() = default; + + // The function is provided with the IntegerLiteral to explain and its index + // in the integer trail. It must fill the two vectors so that literals + // contains any Literal part of the reason and dependencies contains the trail + // index of any IntegerLiteral that is also part of the reason. + // + // Remark: sometimes this is called to fill the conflict while the literal to + // explain is propagated. In this case, trail_index will be the current trail + // index, and we cannot assume that there is anything filled yet in + // integer_literal[trail_index]. + // + // TODO(user): Right now this is only used by "linear" propagator, if we need + // more we could replace {id, propagation_slack} by a generic payload so that + // each implementation can cast it to its need. Then the memory will just be + // the max size of this payload data (16 bytes should be fine). + virtual void Explain(int id, IntegerValue propagation_slack, + IntegerLiteral literal_to_explain, int trail_index, + std::vector* literals_reason, + std::vector* trail_indices_reason) = 0; +}; + // This class maintains a set of integer variables with their current bounds. // Bounds can be propagated from an external "source" and this class helps // to maintain the reason for each propagation. -class IntegerTrail : public SatPropagator { +class IntegerTrail final : public SatPropagator { public: explicit IntegerTrail(Model* model) : SatPropagator("IntegerTrail"), @@ -945,9 +972,15 @@ class IntegerTrail : public SatPropagator { // TODO(user): If the given bound is equal to the current bound, maybe the new // reason is better? how to decide and what to do in this case? to think about // it. Currently we simply don't do anything. + ABSL_MUST_USE_RESULT bool Enqueue(IntegerLiteral i_lit) { + return EnqueueInternal(i_lit, false, {}, {}, integer_trail_.size()); + } ABSL_MUST_USE_RESULT bool Enqueue( IntegerLiteral i_lit, absl::Span literal_reason, - absl::Span integer_reason); + absl::Span integer_reason) { + return EnqueueInternal(i_lit, false, literal_reason, integer_reason, + integer_trail_.size()); + } // Enqueue new information about a variable bound. It has the same behavior // as the Enqueue() method, except that it accepts true and false integer @@ -981,24 +1014,22 @@ class IntegerTrail : public SatPropagator { ABSL_MUST_USE_RESULT bool Enqueue( IntegerLiteral i_lit, absl::Span literal_reason, absl::Span integer_reason, - int trail_index_with_same_reason); + int trail_index_with_same_reason) { + return EnqueueInternal(i_lit, false, literal_reason, integer_reason, + trail_index_with_same_reason); + } // Lazy reason API. - // - // The function is provided with the IntegerLiteral to explain and its index - // in the integer trail. It must fill the two vectors so that literals - // contains any Literal part of the reason and dependencies contains the trail - // index of any IntegerLiteral that is also part of the reason. - // - // Remark: sometimes this is called to fill the conflict while the literal - // to explain is propagated. In this case, trail_index_of_literal will be - // the current trail index, and we cannot assume that there is anything filled - // yet in integer_literal[trail_index_of_literal]. - using LazyReasonFunction = std::function* literals, std::vector* dependencies)>; - ABSL_MUST_USE_RESULT bool Enqueue(IntegerLiteral i_lit, - LazyReasonFunction lazy_reason); + ABSL_MUST_USE_RESULT bool EnqueueWithLazyReason( + IntegerLiteral i_lit, int id, IntegerValue propagation_slack, + LazyReasonInterface* explainer) { + const int trail_index = integer_trail_.size(); + if (trail_index >= lazy_reasons_.size()) { + lazy_reasons_.resize(trail_index + 1); + } + lazy_reasons_[trail_index] = {explainer, propagation_slack, id}; + return EnqueueInternal(i_lit, true, {}, {}, 0); + } // Sometimes we infer some root level bounds but we are not at the root level. // In this case, we will update the level-zero bounds right away, but will @@ -1143,19 +1174,24 @@ class IntegerTrail : public SatPropagator { // common conflict initialization that must terminate by a call to // MergeReasonIntoInternal(conflict) where conflict is the returned vector. std::vector* InitializeConflict( - IntegerLiteral integer_literal, const LazyReasonFunction& lazy_reason, + IntegerLiteral integer_literal, bool use_lazy_reason, absl::Span literals_reason, absl::Span bounds_reason); + // Saves the given reason and return its index. + int AppendReasonToInternalBuffers( + absl::Span literal_reason, + absl::Span integer_reason); + // Internal implementation of the different public Enqueue() functions. ABSL_MUST_USE_RESULT bool EnqueueInternal( - IntegerLiteral i_lit, LazyReasonFunction lazy_reason, + IntegerLiteral i_lit, bool use_lazy_reason, absl::Span literal_reason, absl::Span integer_reason, int trail_index_with_same_reason); // Internal implementation of the EnqueueLiteral() functions. - void EnqueueLiteralInternal(Literal literal, LazyReasonFunction lazy_reason, + void EnqueueLiteralInternal(Literal literal, bool use_lazy_reason, absl::Span literal_reason, absl::Span integer_reason); @@ -1197,8 +1233,8 @@ class IntegerTrail : public SatPropagator { // Information for each integer variable about its current lower bound and // position of the last TrailEntry in the trail referring to this var. - absl::StrongVector var_lbs_; - absl::StrongVector var_trail_index_; + util_intops::StrongVector var_lbs_; + util_intops::StrongVector var_trail_index_; // This is used by FindLowestTrailIndexThatExplainBound() and // FindTrailIndexOfVarBefore() to speed up the lookup. It keeps a trail index @@ -1207,7 +1243,8 @@ class IntegerTrail : public SatPropagator { // // The cache will only be updated with trail_index >= threshold. mutable int var_trail_index_cache_threshold_ = 0; - mutable absl::StrongVector var_trail_index_cache_; + mutable util_intops::StrongVector + var_trail_index_cache_; // Used by GetOrCreateConstantIntegerVariable() to return already created // constant variables that share the same value. @@ -1226,7 +1263,20 @@ class IntegerTrail : public SatPropagator { int32_t reason_index; }; std::vector integer_trail_; - std::vector lazy_reasons_; + + struct LazyReasonEntry { + LazyReasonInterface* explainer; + IntegerValue propagation_slack; + int id; + + void Explain(IntegerLiteral literal_to_explain, int trail_index_of_literal, + std::vector* literals, + std::vector* dependencies) const { + explainer->Explain(id, propagation_slack, literal_to_explain, + trail_index_of_literal, literals, dependencies); + } + }; + std::vector lazy_reasons_; // Start of each decision levels in integer_trail_. // TODO(user): use more general reversible mechanism? @@ -1256,7 +1306,7 @@ class IntegerTrail : public SatPropagator { mutable bool has_dependency_ = false; mutable std::vector tmp_queue_; mutable std::vector tmp_to_clear_; - mutable absl::StrongVector + mutable util_intops::StrongVector tmp_var_to_trail_index_in_queue_; mutable SparseBitset added_variables_; @@ -1362,7 +1412,7 @@ class RevIntegerValueRepository : public RevRepository { // watched Literal or LbVar changes. // // TODO(user): Move this to its own file. Add unit tests! -class GenericLiteralWatcher : public SatPropagator { +class GenericLiteralWatcher final : public SatPropagator { public: explicit GenericLiteralWatcher(Model* model); @@ -1520,8 +1570,10 @@ class GenericLiteralWatcher : public SatPropagator { return id == o.id && watch_index == o.watch_index; } }; - absl::StrongVector> literal_to_watcher_; - absl::StrongVector> var_to_watcher_; + util_intops::StrongVector> + literal_to_watcher_; + util_intops::StrongVector> + var_to_watcher_; std::vector watchers_; SparseBitset modified_vars_; diff --git a/ortools/sat/integer_expr.cc b/ortools/sat/integer_expr.cc index 607a7a6ee0..25f8092195 100644 --- a/ortools/sat/integer_expr.cc +++ b/ortools/sat/integer_expr.cc @@ -227,6 +227,35 @@ LinearConstraintPropagator::ConditionalLb( } } +template +void LinearConstraintPropagator::Explain( + int /*id*/, IntegerValue propagation_slack, + IntegerLiteral literal_to_explain, int trail_index, + std::vector* literals_reason, + std::vector* trail_indices_reason) { + *literals_reason = literal_reason_; + trail_indices_reason->clear(); + shared_->reason_coeffs.clear(); + for (int i = 0; i < size_; ++i) { + const IntegerVariable var = vars_[i]; + if (PositiveVariable(var) == PositiveVariable(literal_to_explain.var)) { + continue; + } + const int index = + shared_->integer_trail->FindTrailIndexOfVarBefore(var, trail_index); + if (index >= 0) { + trail_indices_reason->push_back(index); + if (propagation_slack > 0) { + shared_->reason_coeffs.push_back(coeffs_[i]); + } + } + } + if (propagation_slack > 0) { + shared_->integer_trail->RelaxLinearReason( + propagation_slack, shared_->reason_coeffs, trail_indices_reason); + } +} + template bool LinearConstraintPropagator::Propagate() { // Reified case: If any of the enforcement_literals are false, we ignore the @@ -353,36 +382,9 @@ bool LinearConstraintPropagator::Propagate() { new_ub = lb + div; propagation_slack = (div + 1) * coeff - slack - 1; } - if (!shared_->integer_trail->Enqueue( - IntegerLiteral::LowerOrEqual(var, new_ub), - /*lazy_reason=*/[this, propagation_slack]( - IntegerLiteral i_lit, int trail_index, - std::vector* literal_reason, - std::vector* trail_indices_reason) { - *literal_reason = literal_reason_; - trail_indices_reason->clear(); - shared_->reason_coeffs.clear(); - for (int i = 0; i < size_; ++i) { - const IntegerVariable var = vars_[i]; - if (PositiveVariable(var) == PositiveVariable(i_lit.var)) { - continue; - } - const int index = - shared_->integer_trail->FindTrailIndexOfVarBefore( - var, trail_index); - if (index >= 0) { - trail_indices_reason->push_back(index); - if (propagation_slack > 0) { - shared_->reason_coeffs.push_back(coeffs_[i]); - } - } - } - if (propagation_slack > 0) { - shared_->integer_trail->RelaxLinearReason( - propagation_slack, shared_->reason_coeffs, - trail_indices_reason); - } - })) { + if (!shared_->integer_trail->EnqueueWithLazyReason( + IntegerLiteral::LowerOrEqual(var, new_ub), 0, propagation_slack, + this)) { // TODO(user): this is never supposed to happen since if we didn't have a // conflict above, we should be able to reduce the upper bound. It might // indicate an issue with our Boolean <-> integer encoding. @@ -650,9 +652,48 @@ LinMinPropagator::LinMinPropagator(const std::vector& exprs, model_(model), integer_trail_(model_->GetOrCreate()) {} +void LinMinPropagator::Explain(int id, IntegerValue propagation_slack, + IntegerLiteral literal_to_explain, + int trail_index, + std::vector* literals_reason, + std::vector* trail_indices_reason) { + const auto& vars = exprs_[id].vars; + const auto& coeffs = exprs_[id].coeffs; + literals_reason->clear(); + trail_indices_reason->clear(); + std::vector reason_coeffs; + const int size = vars.size(); + for (int i = 0; i < size; ++i) { + const IntegerVariable var = vars[i]; + if (PositiveVariable(var) == PositiveVariable(literal_to_explain.var)) { + continue; + } + const int index = + integer_trail_->FindTrailIndexOfVarBefore(var, trail_index); + if (index >= 0) { + trail_indices_reason->push_back(index); + if (propagation_slack > 0) { + reason_coeffs.push_back(coeffs[i]); + } + } + } + if (propagation_slack > 0) { + integer_trail_->RelaxLinearReason(propagation_slack, reason_coeffs, + trail_indices_reason); + } + // Now add the old integer_reason that triggered this propagation. + for (IntegerLiteral reason_lit : integer_reason_for_unique_candidate_) { + const int index = + integer_trail_->FindTrailIndexOfVarBefore(reason_lit.var, trail_index); + if (index >= 0) { + trail_indices_reason->push_back(index); + } + } +} + bool LinMinPropagator::PropagateLinearUpperBound( - const std::vector& vars, - const std::vector& coeffs, const IntegerValue upper_bound) { + int id, absl::Span vars, + absl::Span coeffs, const IntegerValue upper_bound) { IntegerValue sum_lb = IntegerValue(0); const int num_vars = vars.size(); max_variations_.resize(num_vars); @@ -699,46 +740,10 @@ bool LinMinPropagator::PropagateLinearUpperBound( const IntegerValue coeff = coeffs[i]; const IntegerValue div = slack / coeff; const IntegerValue new_ub = integer_trail_->LowerBound(var) + div; - const IntegerValue propagation_slack = (div + 1) * coeff - slack - 1; - if (!integer_trail_->Enqueue( - IntegerLiteral::LowerOrEqual(var, new_ub), - /*lazy_reason=*/[this, &vars, &coeffs, propagation_slack]( - IntegerLiteral i_lit, int trail_index, - std::vector* literal_reason, - std::vector* trail_indices_reason) { - literal_reason->clear(); - trail_indices_reason->clear(); - std::vector reason_coeffs; - const int size = vars.size(); - for (int i = 0; i < size; ++i) { - const IntegerVariable var = vars[i]; - if (PositiveVariable(var) == PositiveVariable(i_lit.var)) { - continue; - } - const int index = - integer_trail_->FindTrailIndexOfVarBefore(var, trail_index); - if (index >= 0) { - trail_indices_reason->push_back(index); - if (propagation_slack > 0) { - reason_coeffs.push_back(coeffs[i]); - } - } - } - if (propagation_slack > 0) { - integer_trail_->RelaxLinearReason( - propagation_slack, reason_coeffs, trail_indices_reason); - } - // Now add the old integer_reason that triggered this propagation. - for (IntegerLiteral reason_lit : - integer_reason_for_unique_candidate_) { - const int index = integer_trail_->FindTrailIndexOfVarBefore( - reason_lit.var, trail_index); - if (index >= 0) { - trail_indices_reason->push_back(index); - } - } - })) { + if (!integer_trail_->EnqueueWithLazyReason( + IntegerLiteral::LowerOrEqual(var, new_ub), id, propagation_slack, + this)) { return false; } } @@ -815,7 +820,7 @@ bool LinMinPropagator::Propagate() { } return PropagateLinearUpperBound( - exprs_[last_possible_min_interval].vars, + last_possible_min_interval, exprs_[last_possible_min_interval].vars, exprs_[last_possible_min_interval].coeffs, current_min_ub - exprs_[last_possible_min_interval].offset); } diff --git a/ortools/sat/integer_expr.h b/ortools/sat/integer_expr.h index 7f17b88e6c..104cfeeb21 100644 --- a/ortools/sat/integer_expr.h +++ b/ortools/sat/integer_expr.h @@ -64,7 +64,8 @@ namespace sat { // constraint implementation. But we do need support for enforcement literals // there. template -class LinearConstraintPropagator : public PropagatorInterface { +class LinearConstraintPropagator : public PropagatorInterface, + LazyReasonInterface { public: // If refied_literal is kNoLiteralIndex then this is a normal constraint, // otherwise we enforce the implication refied_literal => constraint is true. @@ -99,6 +100,12 @@ class LinearConstraintPropagator : public PropagatorInterface { std::pair ConditionalLb( IntegerLiteral integer_literal, IntegerVariable target_var) const; + // For LazyReasonInterface. + void Explain(int id, IntegerValue propagation_slack, + IntegerLiteral literal_to_explain, int trail_index, + std::vector* literals_reason, + std::vector* trail_indices_reason) final; + private: // Fills integer_reason_ with all the current lower_bounds. The real // explanation may require removing one of them, but as an optimization, we @@ -233,7 +240,7 @@ class MinPropagator : public PropagatorInterface { // Same as MinPropagator except this works on min = MIN(exprs) where exprs are // linear expressions. It uses IntegerSumLE to propagate bounds on the exprs. // Assumes Canonical expressions (all positive coefficients). -class LinMinPropagator : public PropagatorInterface { +class LinMinPropagator : public PropagatorInterface, LazyReasonInterface { public: LinMinPropagator(const std::vector& exprs, IntegerVariable min_var, Model* model); @@ -243,12 +250,18 @@ class LinMinPropagator : public PropagatorInterface { bool Propagate() final; void RegisterWith(GenericLiteralWatcher* watcher); + // For LazyReasonInterface. + void Explain(int id, IntegerValue propagation_slack, + IntegerLiteral literal_to_explain, int trail_index, + std::vector* literals_reason, + std::vector* trail_indices_reason) final; + private: // Lighter version of IntegerSumLE. This uses the current value of // integer_reason_ in addition to the reason for propagating the linear // constraint. The coeffs are assumed to be positive here. - bool PropagateLinearUpperBound(const std::vector& vars, - const std::vector& coeffs, + bool PropagateLinearUpperBound(int id, absl::Span vars, + absl::Span coeffs, IntegerValue upper_bound); const std::vector exprs_; diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index f7aa8dd8bc..e924dda7bd 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -416,6 +416,8 @@ std::function SatSolverHeuristic(Model* model) { }; } +// TODO(user): Do we need a mechanism to reduce the range of possible gaps +// when nothing gets proven? This could be a parameter or some adaptative code. std::function ShaveObjectiveLb(Model* model) { auto* objective_definition = model->GetOrCreate(); const IntegerVariable obj_var = objective_definition->objective_var; diff --git a/ortools/sat/intervals.cc b/ortools/sat/intervals.cc index a8ad3fa76d..6ce288d165 100644 --- a/ortools/sat/intervals.cc +++ b/ortools/sat/intervals.cc @@ -293,7 +293,7 @@ bool SchedulingConstraintHelper::Propagate() { bool SchedulingConstraintHelper::IncrementalPropagate( const std::vector& watch_indices) { - for (const int t : watch_indices) recompute_cache_[t] = true; + for (const int t : watch_indices) recompute_cache_.Set(t); return true; } @@ -326,7 +326,6 @@ void SchedulingConstraintHelper::RegisterWith(GenericLiteralWatcher* watcher) { } bool SchedulingConstraintHelper::UpdateCachedValues(int t) { - recompute_cache_[t] = false; if (IsAbsent(t)) return true; IntegerValue smin = integer_trail_->LowerBound(starts_[t]); @@ -432,7 +431,10 @@ void SchedulingConstraintHelper::InitSortedVectors() { const int num_tasks = starts_.size(); recompute_all_cache_ = true; - recompute_cache_.resize(num_tasks, true); + recompute_cache_.Resize(num_tasks); + for (int t = 0; t < num_tasks; ++t) { + recompute_cache_.Set(t); + } // Make sure all the cached_* arrays can hold enough data. CHECK_LE(num_tasks, capacity_); @@ -448,8 +450,12 @@ void SchedulingConstraintHelper::InitSortedVectors() { task_by_increasing_end_min_[t].task_index = t; task_by_decreasing_start_max_[t].task_index = t; task_by_decreasing_end_max_[t].task_index = t; + task_by_increasing_shifted_start_min_[t].task_index = t; + task_by_increasing_shifted_start_min_[t].presence_lit = + reason_for_presence_[t]; task_by_negated_shifted_end_max_[t].task_index = t; + task_by_negated_shifted_end_max_[t].presence_lit = reason_for_presence_[t]; } recompute_energy_profile_ = true; @@ -485,12 +491,11 @@ bool SchedulingConstraintHelper::SynchronizeAndSetTimeDirection( if (!UpdateCachedValues(t)) return false; } } else { - for (int t = 0; t < recompute_cache_.size(); ++t) { - if (recompute_cache_[t]) { - if (!UpdateCachedValues(t)) return false; - } + for (const int t : recompute_cache_) { + if (!UpdateCachedValues(t)) return false; } } + recompute_cache_.ClearAll(); recompute_all_cache_ = false; return true; } @@ -506,13 +511,17 @@ IntegerValue SchedulingConstraintHelper::GetCurrentMinDistanceBetweenTasks( return kMinIntegerValue; } - const IntegerValue offset = + // We take the max of the level zero offset and the one coming from a + // conditional precedence at true. + const IntegerValue conditional_offset = precedence_relations_->GetConditionalOffset(before.var, after.var); - if (offset == kMinIntegerValue) return kMinIntegerValue; + const IntegerValue known = integer_trail_->LevelZeroLowerBound(after.var) - + integer_trail_->LevelZeroUpperBound(before.var); + const IntegerValue offset = std::max(conditional_offset, known); const IntegerValue needed_offset = before.constant - after.constant; const IntegerValue distance = offset - needed_offset; - if (add_reason_if_after && distance >= 0) { + if (add_reason_if_after && distance >= 0 && known < conditional_offset) { for (const Literal l : precedence_relations_->GetConditionalEnforcements( before.var, after.var)) { literal_reason_.push_back(l.Negated()); @@ -553,9 +562,7 @@ bool SchedulingConstraintHelper::PropagatePrecedence(int a, int b) { absl::Span SchedulingConstraintHelper::TaskByIncreasingStartMin() { - const int num_tasks = NumTasks(); - for (int i = 0; i < num_tasks; ++i) { - TaskTime& ref = task_by_increasing_start_min_[i]; + for (TaskTime& ref : task_by_increasing_start_min_) { ref.time = StartMin(ref.task_index); } IncrementalSort(task_by_increasing_start_min_.begin(), @@ -565,9 +572,7 @@ SchedulingConstraintHelper::TaskByIncreasingStartMin() { absl::Span SchedulingConstraintHelper::TaskByIncreasingEndMin() { - const int num_tasks = NumTasks(); - for (int i = 0; i < num_tasks; ++i) { - TaskTime& ref = task_by_increasing_end_min_[i]; + for (TaskTime& ref : task_by_increasing_end_min_) { ref.time = EndMin(ref.task_index); } IncrementalSort(task_by_increasing_end_min_.begin(), @@ -577,9 +582,7 @@ SchedulingConstraintHelper::TaskByIncreasingEndMin() { absl::Span SchedulingConstraintHelper::TaskByDecreasingStartMax() { - const int num_tasks = NumTasks(); - for (int i = 0; i < num_tasks; ++i) { - TaskTime& ref = task_by_decreasing_start_max_[i]; + for (TaskTime& ref : task_by_decreasing_start_max_) { ref.time = StartMax(ref.task_index); } IncrementalSort(task_by_decreasing_start_max_.begin(), @@ -590,9 +593,7 @@ SchedulingConstraintHelper::TaskByDecreasingStartMax() { absl::Span SchedulingConstraintHelper::TaskByDecreasingEndMax() { - const int num_tasks = NumTasks(); - for (int i = 0; i < num_tasks; ++i) { - TaskTime& ref = task_by_decreasing_end_max_[i]; + for (TaskTime& ref : task_by_decreasing_end_max_) { ref.time = EndMax(ref.task_index); } IncrementalSort(task_by_decreasing_end_max_.begin(), @@ -600,15 +601,13 @@ SchedulingConstraintHelper::TaskByDecreasingEndMax() { return task_by_decreasing_end_max_; } -absl::Span +absl::Span SchedulingConstraintHelper::TaskByIncreasingShiftedStartMin() { if (recompute_shifted_start_min_) { recompute_shifted_start_min_ = false; - const int num_tasks = NumTasks(); bool is_sorted = true; IntegerValue previous = kMinIntegerValue; - for (int i = 0; i < num_tasks; ++i) { - TaskTime& ref = task_by_increasing_shifted_start_min_[i]; + for (CachedTaskBounds& ref : task_by_increasing_shifted_start_min_) { ref.time = ShiftedStartMin(ref.task_index); is_sorted = is_sorted && ref.time >= previous; previous = ref.time; @@ -722,6 +721,7 @@ bool SchedulingConstraintHelper::PushIntervalBound(int t, IntegerLiteral lit) { if (!PushIntegerLiteralIfTaskPresent(t, lit)) return false; if (IsAbsent(t)) return true; if (!UpdateCachedValues(t)) return false; + recompute_cache_.Clear(t); return true; } @@ -1040,6 +1040,15 @@ void SchedulingDemandHelper::AddDemandMinReason(int t) { } } +void SchedulingDemandHelper::AddDemandMinReason(int t, + IntegerValue min_demand) { + DCHECK_LT(t, demands_.size()); + if (demands_[t].var != kNoIntegerVariable) { + helper_->MutableIntegerReason()->push_back( + demands_[t].GreaterOrEqual(min_demand)); + } +} + void SchedulingDemandHelper::AddEnergyMinReason(int t) { // We prefer these reason in order. const IntegerValue value = cached_energies_min_[t]; diff --git a/ortools/sat/intervals.h b/ortools/sat/intervals.h index 0b6e5e47a6..b047a73376 100644 --- a/ortools/sat/intervals.h +++ b/ortools/sat/intervals.h @@ -201,12 +201,12 @@ class IntervalsRepository { // Literal indicating if the tasks is executed. Tasks that are always executed // will have a kNoLiteralIndex entry in this vector. - absl::StrongVector is_present_; + util_intops::StrongVector is_present_; // The integer variables for each tasks. - absl::StrongVector starts_; - absl::StrongVector ends_; - absl::StrongVector sizes_; + util_intops::StrongVector starts_; + util_intops::StrongVector ends_; + util_intops::StrongVector sizes_; // We can share the helper for all the propagators that work on the same set // of intervals. @@ -243,6 +243,19 @@ struct TaskTime { bool operator>(TaskTime other) const { return time > other.time; } }; +// We have some free space in TaskTime. +// We stick the presence_lit to save an indirection in some algo. +// +// TODO(user): Experiment caching more value. In particular +// TaskByIncreasingShiftedStartMin() could tie break task for better heuristics? +struct CachedTaskBounds { + int task_index; + LiteralIndex presence_lit; + IntegerValue time; + bool operator<(CachedTaskBounds other) const { return time < other.time; } + bool operator>(CachedTaskBounds other) const { return time > other.time; } +}; + // Helper class shared by the propagators that manage a given list of tasks. // // One of the main advantage of this class is that it allows to share the @@ -317,6 +330,9 @@ class SchedulingConstraintHelper : public PropagatorInterface, IntegerValue LevelZeroStartMax(int t) const { return integer_trail_->LevelZeroUpperBound(starts_[t]); } + IntegerValue LevelZeroEndMax(int t) const { + return integer_trail_->LevelZeroUpperBound(ends_[t]); + } // In the presence of tasks with a variable size, we do not necessarily // have start_min + size_min = end_min, we can instead have a situation @@ -353,6 +369,11 @@ class SchedulingConstraintHelper : public PropagatorInterface, bool IsPresent(int t) const; bool IsAbsent(int t) const; + // Same if one already have the presence LiteralIndex of a task. + bool IsOptional(LiteralIndex lit) const; + bool IsPresent(LiteralIndex lit) const; + bool IsAbsent(LiteralIndex lit) const; + // Return a value so that End(a) + dist <= Start(b). // Returns kMinInterValue if we don't have any such relation. IntegerValue GetCurrentMinDistanceBetweenTasks( @@ -382,7 +403,8 @@ class SchedulingConstraintHelper : public PropagatorInterface, absl::Span TaskByIncreasingEndMin(); absl::Span TaskByDecreasingStartMax(); absl::Span TaskByDecreasingEndMax(); - absl::Span TaskByIncreasingShiftedStartMin(); + + absl::Span TaskByIncreasingShiftedStartMin(); // Returns a sorted vector where each task appear twice, the first occurrence // is at size (end_min - size_min) and the second one at (end_min). @@ -414,6 +436,7 @@ class SchedulingConstraintHelper : public PropagatorInterface, void AddStartMaxReason(int t, IntegerValue upper_bound); void AddEndMinReason(int t, IntegerValue lower_bound); void AddEndMaxReason(int t, IntegerValue upper_bound); + void AddShiftedEndMaxReason(int t, IntegerValue upper_bound); void AddEnergyAfterReason(int t, IntegerValue energy_min, IntegerValue time); void AddEnergyMinInIntervalReason(int t, IntegerValue min, IntegerValue max); @@ -571,15 +594,15 @@ class SchedulingConstraintHelper : public PropagatorInterface, // This one is the most commonly used, so we optimized a bit more its // computation by detecting when there is nothing to do. - std::vector task_by_increasing_shifted_start_min_; - std::vector task_by_negated_shifted_end_max_; + std::vector task_by_increasing_shifted_start_min_; + std::vector task_by_negated_shifted_end_max_; bool recompute_shifted_start_min_ = true; bool recompute_negated_shifted_end_max_ = true; // If recompute_cache_[t] is true, then we need to update all the cached // value for the task t in SynchronizeAndSetTimeDirection(). bool recompute_all_cache_ = true; - std::vector recompute_cache_; + Bitset64 recompute_cache_; // Reason vectors. std::vector literal_reason_; @@ -612,8 +635,12 @@ class SchedulingDemandHelper { // this. IntegerValue DemandMin(int t) const; IntegerValue DemandMax(int t) const; + IntegerValue LevelZeroDemandMin(int t) const { + return integer_trail_->LevelZeroLowerBound(demands_[t]); + } bool DemandIsFixed(int t) const; void AddDemandMinReason(int t); + void AddDemandMinReason(int t, IntegerValue min_demand); const std::vector& Demands() const { return demands_; } // Adds the linearized demand (either the affine demand expression, or the @@ -747,6 +774,20 @@ inline bool SchedulingConstraintHelper::IsAbsent(int t) const { return trail_->Assignment().LiteralIsFalse(Literal(reason_for_presence_[t])); } +inline bool SchedulingConstraintHelper::IsOptional(LiteralIndex lit) const { + return lit != kNoLiteralIndex; +} + +inline bool SchedulingConstraintHelper::IsPresent(LiteralIndex lit) const { + if (lit == kNoLiteralIndex) return true; + return trail_->Assignment().LiteralIsTrue(Literal(lit)); +} + +inline bool SchedulingConstraintHelper::IsAbsent(LiteralIndex lit) const { + if (lit == kNoLiteralIndex) return false; + return trail_->Assignment().LiteralIsFalse(Literal(lit)); +} + inline void SchedulingConstraintHelper::ClearReason() { integer_reason_.clear(); literal_reason_.clear(); @@ -848,6 +889,11 @@ inline void SchedulingConstraintHelper::AddEndMaxReason( AddGenericReason(ends_[t], upper_bound, starts_[t], sizes_[t]); } +inline void SchedulingConstraintHelper::AddShiftedEndMaxReason( + int t, IntegerValue upper_bound) { + AddStartMaxReason(t, upper_bound - SizeMin(t)); +} + inline void SchedulingConstraintHelper::AddEnergyAfterReason( int t, IntegerValue energy_min, IntegerValue time) { if (StartMin(t) >= time) { diff --git a/ortools/sat/java/CpSolverTest.java b/ortools/sat/java/CpSolverTest.java index 0ce6ee82c1..3a16e6e6dc 100644 --- a/ortools/sat/java/CpSolverTest.java +++ b/ortools/sat/java/CpSolverTest.java @@ -169,12 +169,58 @@ public final class CpSolverTest { assertNotNull(solver); solver.getParameters().setEnumerateAllSolutions(true); final SolutionCounter cb = new SolutionCounter(); - solver.solve(model, cb); + CpSolverStatus status = solver.solve(model, cb); + assertThat(status).isEqualTo(CpSolverStatus.OPTIMAL); assertThat(cb.getSolutionCount()).isEqualTo(18); assertThat(solver.numBranches()).isGreaterThan(0L); } + static class BestBoundCallback implements Consumer { + public BestBoundCallback() { + bestBound = 0.0; + } + + @Override + public void accept(Double bound) { + bestBound = bound; + } + + public double getBestBound() { + return bestBound; + } + + double bestBound; + } + + @Test + public void testCpSolver_bestBoundCallback() throws Exception { + System.out.println("testCpSolver_bestBoundCallback"); + final CpModel model = new CpModel(); + assertNotNull(model); + // Creates the variables. + final BoolVar b0 = model.newBoolVar("x0"); + final BoolVar b1 = model.newBoolVar("x1"); + final BoolVar b2 = model.newBoolVar("x2"); + final BoolVar b3 = model.newBoolVar("x3"); + + model.addBoolOr(new Literal[] {b0, b1, b2, b3}); + model.minimize(DoubleLinearExpr.weightedSumWithOffset( + new Literal[] {b0, b1, b2, b3}, new double[] {3, 2, 4, 5}, 0.6)); + + // Creates a solver and solves the model. + final CpSolver solver = new CpSolver(); + assertNotNull(solver); + solver.getParameters().setNumWorkers(1); + solver.getParameters().setLinearizationLevel(2); + BestBoundCallback cb = new BestBoundCallback(); + solver.setBestBoundCallback(cb); + CpSolverStatus status = solver.solve(model); + + assertThat(status).isEqualTo(CpSolverStatus.OPTIMAL); + assertThat(cb.getBestBound()).isEqualTo(2.6); + } + @Test public void testCpSolver_objectiveValue() throws Exception { System.out.println("testCpSolver_objectiveValue"); @@ -314,8 +360,6 @@ public final class CpSolverTest { final IntervalVar[] tasksIntervals = new IntervalVar[numTasks + capacities[0].length]; final Domain domainT = Domain.fromValues(domainArr); - final Domain intervalRange = - Domain.fromFlatIntervals(new long[] {domainT.min() + 1, domainT.max() + 1}); final int unitIntervalSize = 1; for (int i = 0; i < numTasks; i++) { final BoolVar presence = model.newBoolVar(""); diff --git a/ortools/sat/java/sat.i b/ortools/sat/java/sat.i index e2c6cad058..ef6399103a 100644 --- a/ortools/sat/java/sat.i +++ b/ortools/sat/java/sat.i @@ -82,6 +82,10 @@ PROTO2_RETURN(operations_research::sat::CpSolverResponse, %} %typemap(in) std::function %{ + // Catch nullptr inputs. + jclass $input_object_class = jenv->GetObjectClass($input); + if (nullptr == $input_object_class) return $null; + // $input will be deleted once this function return. // So we create a JNI global reference to keep it alive. jobject $input_object = jenv->NewGlobalRef($input); @@ -91,8 +95,6 @@ PROTO2_RETURN(operations_research::sat::CpSolverResponse, jenv->GetJavaVM(&jvm); auto $input_guard = std::make_shared(jvm, $input_object); - jclass $input_object_class = jenv->GetObjectClass($input); - if (nullptr == $input_object_class) return $null; jmethodID $input_method_id = jenv->GetMethodID( $input_object_class, "accept", "(Ljava/lang/Object;)V"); assert($input_method_id != nullptr); @@ -116,6 +118,10 @@ PROTO2_RETURN(operations_research::sat::CpSolverResponse, %typemap(javain) std::function "$javainput" // passing the Callback to JNI java class. %typemap(in) std::function %{ + // Catch nullptr inputs. + jclass $input_object_class = jenv->GetObjectClass($input); + if (nullptr == $input_object_class) return $null; + // $input will be deleted once this function return. // So we create a JNI global reference to keep it alive. jobject $input_object = jenv->NewGlobalRef($input); @@ -125,22 +131,29 @@ PROTO2_RETURN(operations_research::sat::CpSolverResponse, jenv->GetJavaVM(&jvm); auto $input_guard = std::make_shared(jvm, $input_object); - jclass $input_object_class = jenv->GetObjectClass($input); - if (nullptr == $input_object_class) return $null; jmethodID $input_method_id = jenv->GetMethodID( $input_object_class, "accept", "(Ljava/lang/Double;)V"); assert($input_method_id != nullptr); + // We will need to box double before calling the java method. + jclass $input_doubleClass = jenv->FindClass("java/lang/Double"); + jmethodID $input_doubleConstructor = + jenv->GetMethodID($input_doubleClass, "", "(D)V"); + // When the lambda will be destroyed, input_guard's destructor will be called. - $1 = [jvm, $input_object, $input_method_id, $input_guard]( - double bound) -> void { + $1 = [jvm, $input_object, $input_method_id, $input_guard, $input_doubleClass, + $input_doubleConstructor](double bound) -> void { JNIEnv *jenv = NULL; JavaVMAttachArgs args; args.version = JNI_VERSION_1_2; args.name = NULL; args.group = NULL; jvm->AttachCurrentThread((void**)&jenv, &args); - jenv->CallVoidMethod($input_object, $input_method_id, bound); + + jobject doubleObj = jenv->NewObject( + $input_doubleClass, $input_doubleConstructor, (jdouble)bound); + + jenv->CallVoidMethod($input_object, $input_method_id, doubleObj); jvm->DetachCurrentThread(); }; %} diff --git a/ortools/sat/lb_tree_search.cc b/ortools/sat/lb_tree_search.cc index db965a3d7d..394d5d09fa 100644 --- a/ortools/sat/lb_tree_search.cc +++ b/ortools/sat/lb_tree_search.cc @@ -16,15 +16,15 @@ #include #include #include +#include #include #include #include #include +#include "absl/cleanup/cleanup.h" #include "absl/log/check.h" #include "absl/strings/str_cat.h" -#include "absl/time/clock.h" -#include "absl/time/time.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" @@ -54,6 +54,7 @@ LbTreeSearch::LbTreeSearch(Model* model) sat_solver_(model->GetOrCreate()), integer_encoder_(model->GetOrCreate()), trail_(model->GetOrCreate()), + assignment_(trail_->Assignment()), integer_trail_(model->GetOrCreate()), watcher_(model->GetOrCreate()), shared_response_(model->GetOrCreate()), @@ -67,7 +68,7 @@ LbTreeSearch::LbTreeSearch(Model* model) // the objective at their minimum value? this should emulate the first step of // the core approach and gives a similar bound. const ObjectiveDefinition* objective = model->Get(); - CHECK(objective != nullptr); + DCHECK(objective != nullptr); objective_var_ = objective->objective_var; // Identify an LP with the same objective variable. @@ -83,15 +84,87 @@ LbTreeSearch::LbTreeSearch(Model* model) // We use the normal SAT search but we will bump the variable activity // slightly differently. In addition to the conflicts, we also bump it each // time the objective lower bound increase in a sub-node. - search_heuristic_ = SequentialSearch( - {SatSolverHeuristic(model), MostFractionalHeuristic(model), - IntegerValueSelectionHeuristic( - model->GetOrCreate()->fixed_search, model)}); + std::vector> heuristics; + if (SaveLpBasisOption()) { + heuristics.emplace_back(LpPseudoCostHeuristic(model)); + } + heuristics.emplace_back(SatSolverHeuristic(model)); + heuristics.emplace_back(MostFractionalHeuristic(model)); + heuristics.emplace_back(IntegerValueSelectionHeuristic( + model->GetOrCreate()->fixed_search, model)); + search_heuristic_ = SequentialSearch(std::move(heuristics)); +} + +bool LbTreeSearch::NodeHasBasis(const Node& node) const { + return !node.basis.IsEmpty(); +} + +bool LbTreeSearch::NodeHasUpToDateBasis(const Node& node) const { + if (node.basis.IsEmpty()) return false; + + // TODO(user): Do something smarter. We can at least reuse the variable + // statuses maybe? + if (node.basis_timestamp != lp_constraint_->num_lp_changes()) { + return false; + } + return true; +} + +void LbTreeSearch::EnableLpAndLoadBestBasis() { + DCHECK(lp_constraint_ != nullptr); + lp_constraint_->EnablePropagation(true); + + const int level = sat_solver_->CurrentDecisionLevel(); + if (current_branch_.empty()) return; + + NodeIndex n = current_branch_[0]; // Root. + int basis_level = -1; + NodeIndex last_node_with_basis(-1); + for (int i = 0; i < level; ++i) { + if (n >= nodes_.size()) break; + if (NodeHasBasis(nodes_[n])) { + basis_level = i; + last_node_with_basis = n; + } + const Literal decision = sat_solver_->Decisions()[i].literal; + if (nodes_[n].literal_index == decision.Index()) { + n = nodes_[n].true_child; + } else { + DCHECK_EQ(nodes_[n].literal_index, decision.NegatedIndex()); + n = nodes_[n].false_child; + } + } + if (n < nodes_.size()) { + if (NodeHasBasis(nodes_[n])) { + basis_level = level; + last_node_with_basis = n; + } + } + + if (last_node_with_basis == -1) { + VLOG(1) << "no basis?"; + return; + } + VLOG(1) << "load " << basis_level << " / " << level; + + if (!NodeHasUpToDateBasis(nodes_[last_node_with_basis])) { + // The basis is no longer up to date, for now we do not load it. + // TODO(user): try to do something about it. + VLOG(1) << "Skipping potentially bad basis."; + return; + } + + lp_constraint_->LoadBasisState(nodes_[last_node_with_basis].basis); +} + +void LbTreeSearch::SaveLpBasisInto(Node& node) { + node.basis_timestamp = lp_constraint_->num_lp_changes(); + node.basis = lp_constraint_->GetBasisState(); } void LbTreeSearch::UpdateParentObjective(int level) { - CHECK_GE(level, 0); - CHECK_LT(level, current_branch_.size()); + DCHECK_GE(level, 0); + DCHECK_LT(level, current_branch_.size()); if (level == 0) return; const NodeIndex parent_index = current_branch_[level - 1]; Node& parent = nodes_[parent_index]; @@ -100,36 +173,79 @@ void LbTreeSearch::UpdateParentObjective(int level) { if (parent.true_child == child_index) { parent.UpdateTrueObjective(child.MinObjective()); } else { - CHECK_EQ(parent.false_child, child_index); + DCHECK_EQ(parent.false_child, child_index); parent.UpdateFalseObjective(child.MinObjective()); } } void LbTreeSearch::UpdateObjectiveFromParent(int level) { - CHECK_GE(level, 0); - CHECK_LT(level, current_branch_.size()); + DCHECK_GE(level, 0); + DCHECK_LT(level, current_branch_.size()); if (level == 0) return; const NodeIndex parent_index = current_branch_[level - 1]; const Node& parent = nodes_[parent_index]; - CHECK_GE(parent.MinObjective(), current_objective_lb_); + DCHECK_GE(parent.MinObjective(), current_objective_lb_); const NodeIndex child_index = current_branch_[level]; Node& child = nodes_[child_index]; if (parent.true_child == child_index) { child.UpdateObjective(parent.true_objective); } else { - CHECK_EQ(parent.false_child, child_index); + DCHECK_EQ(parent.false_child, child_index); child.UpdateObjective(parent.false_objective); } } -void LbTreeSearch::DebugDisplayTree(NodeIndex root) const { - int num_nodes = 0; - const IntegerValue root_lb = nodes_[root].MinObjective(); +std::string LbTreeSearch::NodeDebugString(NodeIndex n) const { + const IntegerValue root_lb = current_objective_lb_; const auto shifted_lb = [root_lb](IntegerValue lb) { return std::max(0, (lb - root_lb).value()); }; - absl::StrongVector level(nodes_.size(), 0); + std::string s; + absl::StrAppend(&s, "#", n.value()); + + const Node& node = nodes_[n]; + std::string true_letter = "t"; + std::string false_letter = "f"; + if (node.literal_index != kNoLiteralIndex && !node.is_deleted) { + const Literal decision = node.Decision(); + if (assignment_.LiteralIsTrue(decision)) { + true_letter = "T"; + } + if (assignment_.LiteralIsFalse(decision)) { + false_letter = "F"; + } + } + + if (node.true_child < nodes_.size()) { + absl::StrAppend(&s, " [", true_letter, ":#", node.true_child.value(), " ", + shifted_lb(node.true_objective), "]"); + } else { + absl::StrAppend(&s, " [", true_letter, ":## ", + shifted_lb(node.true_objective), "]"); + } + if (node.false_child < nodes_.size()) { + absl::StrAppend(&s, " [", false_letter, ":#", node.false_child.value(), " ", + shifted_lb(node.false_objective), "]"); + } else { + absl::StrAppend(&s, " [", false_letter, ":## ", + shifted_lb(node.false_objective), "]"); + } + + if (node.is_deleted) { + absl::StrAppend(&s, " "); + } + if (NodeHasBasis(node)) { + absl::StrAppend(&s, " "); + } + + return s; +} + +void LbTreeSearch::DebugDisplayTree(NodeIndex root) const { + int num_nodes = 0; + + util_intops::StrongVector level(nodes_.size(), 0); std::vector to_explore = {root}; while (!to_explore.empty()) { NodeIndex n = to_explore.back(); @@ -139,25 +255,17 @@ void LbTreeSearch::DebugDisplayTree(NodeIndex root) const { const Node& node = nodes_[n]; std::string s(level[n], ' '); - absl::StrAppend(&s, "#", n.value()); + absl::StrAppend(&s, NodeDebugString(n)); + LOG(INFO) << s; if (node.true_child < nodes_.size()) { - absl::StrAppend(&s, " [t:#", node.true_child.value(), " ", - shifted_lb(node.true_objective), "]"); to_explore.push_back(node.true_child); level[node.true_child] = level[n] + 1; - } else { - absl::StrAppend(&s, " [t:## ", shifted_lb(node.true_objective), "]"); } if (node.false_child < nodes_.size()) { - absl::StrAppend(&s, " [f:#", node.false_child.value(), " ", - shifted_lb(node.false_objective), "]"); to_explore.push_back(node.false_child); level[node.false_child] = level[n] + 1; - } else { - absl::StrAppend(&s, " [f:## ", shifted_lb(node.false_objective), "]"); } - LOG(INFO) << s; } LOG(INFO) << "num_nodes: " << num_nodes; } @@ -177,19 +285,34 @@ bool LbTreeSearch::FullRestart() { void LbTreeSearch::MarkAsDeletedNodeAndUnreachableSubtree(Node& node) { --num_nodes_in_tree_; + DCHECK(!node.is_deleted); node.is_deleted = true; - if (sat_solver_->Assignment().LiteralIsTrue(node.literal)) { + DCHECK_NE(node.literal_index, kNoLiteralIndex); + if (assignment_.LiteralIsTrue(Literal(node.literal_index))) { MarkSubtreeAsDeleted(node.false_child); } else { MarkSubtreeAsDeleted(node.true_child); } } +void LbTreeSearch::MarkBranchAsInfeasible(Node& node, bool true_branch) { + if (true_branch) { + node.UpdateTrueObjective(kMaxIntegerValue); + MarkSubtreeAsDeleted(node.true_child); + node.true_child = NodeIndex(std::numeric_limits::max()); + } else { + node.UpdateFalseObjective(kMaxIntegerValue); + MarkSubtreeAsDeleted(node.false_child); + node.false_child = NodeIndex(std::numeric_limits::max()); + } +} + void LbTreeSearch::MarkSubtreeAsDeleted(NodeIndex root) { std::vector to_delete{root}; for (int i = 0; i < to_delete.size(); ++i) { const NodeIndex n = to_delete[i]; if (n >= nodes_.size()) continue; + if (nodes_[n].is_deleted) continue; --num_nodes_in_tree_; nodes_[n].is_deleted = true; @@ -203,7 +326,85 @@ std::string LbTreeSearch::SmallProgressString() const { return absl::StrCat( "nodes=", num_nodes_in_tree_, "/", nodes_.size(), " rc=", num_rc_detected_, " decisions=", num_decisions_taken_, - " @root=", num_back_to_root_node_, " restarts=", num_full_restarts_); + " @root=", num_back_to_root_node_, " restarts=", num_full_restarts_, + " lp_iters=[", FormatCounter(num_lp_iters_at_level_zero_), ", ", + FormatCounter(num_lp_iters_save_basis_), ", ", + FormatCounter(num_lp_iters_first_branch_), ", ", + FormatCounter(num_lp_iters_dive_), "]"); +} + +std::function LbTreeSearch::UpdateLpIters(int64_t* counter) { + if (lp_constraint_ == nullptr) return []() {}; + const int64_t old_num = lp_constraint_->total_num_simplex_iterations(); + return [old_num, counter, this]() { + const int64_t new_num = lp_constraint_->total_num_simplex_iterations(); + *counter += new_num - old_num; + }; +} + +bool LbTreeSearch::LevelZeroLogic() { + ++num_back_to_root_node_; + num_decisions_taken_at_last_level_zero_ = num_decisions_taken_; + + // Always run the LP when we are back at level zero. + if (SaveLpBasisOption() && !current_branch_.empty()) { + const auto cleanup = + absl::MakeCleanup(UpdateLpIters(&num_lp_iters_at_level_zero_)); + EnableLpAndLoadBestBasis(); + if (!sat_solver_->FinishPropagation()) { + return false; + } + SaveLpBasisInto(nodes_[current_branch_[0]]); + lp_constraint_->EnablePropagation(false); + } + + // Import the objective upper-bound. + // We do that manually because we disabled objective import to not "pollute" + // the objective lower_bound and still have local reason for objective + // improvement. + { + const IntegerValue ub = shared_response_->GetInnerObjectiveUpperBound(); + if (integer_trail_->UpperBound(objective_var_) > ub) { + if (!integer_trail_->Enqueue( + IntegerLiteral::LowerOrEqual(objective_var_, ub), {}, {})) { + sat_solver_->NotifyThatModelIsUnsat(); + return false; + } + if (!sat_solver_->FinishPropagation()) { + return false; + } + } + } + + // If the search has not just been restarted (in which case nodes_ would be + // empty), and if we are at level zero (either naturally, or if the + // backtrack level was set to zero in the above code), let's run a different + // heuristic to decide whether to restart the search from scratch or not. + // + // We ignore small search trees. + if (num_nodes_in_tree_ > 50) { + // Let's count how many nodes have worse objective bounds than the best + // known external objective lower bound. + const IntegerValue latest_lb = + shared_response_->GetInnerObjectiveLowerBound(); + int num_nodes = 0; + int num_nodes_with_lower_objective = 0; + for (const Node& node : nodes_) { + if (node.is_deleted) continue; + ++num_nodes; + if (node.MinObjective() < latest_lb) num_nodes_with_lower_objective++; + } + DCHECK_EQ(num_nodes_in_tree_, num_nodes); + if (num_nodes_with_lower_objective * 2 > num_nodes) { + VLOG(2) << "lb_tree_search restart nodes: " + << num_nodes_with_lower_objective << "/" << num_nodes << " : " + << 100.0 * num_nodes_with_lower_objective / num_nodes << "%" + << ", decisions:" << num_decisions_taken_; + if (!FullRestart()) return false; + } + } + + return true; } SatSolver::Status LbTreeSearch::Search( @@ -234,79 +435,14 @@ SatSolver::Status LbTreeSearch::Search( const int kMaxNumInitialRestarts = 10; const int64_t kNumDecisionsBeforeInitialRestarts = 1000; + // If some branches already have a good lower bound, no need to call the LP + // on those. + watcher_->SetStopPropagationCallback([this] { + return integer_trail_->LowerBound(objective_var_) > current_objective_lb_; + }); + while (!time_limit_->LimitReached() && !shared_response_->ProblemIsSolved()) { - // This is the current bound we try to improve. We cache it here to avoid - // getting the lock many times and it is also easier to follow the code if - // this is assumed constant for one iteration. - current_objective_lb_ = shared_response_->GetInnerObjectiveLowerBound(); - - // If some branches already have a good lower bound, no need to call the LP - // on those. - watcher_->SetStopPropagationCallback([this] { - return integer_trail_->LowerBound(objective_var_) > current_objective_lb_; - }); - - // Propagate upward in the tree the new objective lb. - if (!current_branch_.empty()) { - // Our branch is always greater or equal to the level. - // We increase the objective_lb of the current node if needed. - { - const int current_level = sat_solver_->CurrentDecisionLevel(); - CHECK_GE(current_branch_.size(), current_level); - for (int i = 0; i < current_level; ++i) { - CHECK(sat_solver_->Assignment().LiteralIsAssigned( - nodes_[current_branch_[i]].literal)); - } - if (current_level < current_branch_.size()) { - nodes_[current_branch_[current_level]].UpdateObjective( - integer_trail_->LowerBound(objective_var_)); - } - - // Minor optim: sometimes, because of the LP and cuts, the reason for - // objective_var_ only contains lower level literals, so we can exploit - // that. - // - // TODO(user): No point checking that if the objective lb wasn't - // assigned at this level. - // - // TODO(user): Exploit the reasons further. - if (integer_trail_->LowerBound(objective_var_) > - integer_trail_->LevelZeroLowerBound(objective_var_)) { - const std::vector reason = - integer_trail_->ReasonFor(IntegerLiteral::GreaterOrEqual( - objective_var_, integer_trail_->LowerBound(objective_var_))); - int max_level = 0; - for (const Literal l : reason) { - max_level = std::max( - max_level, - sat_solver_->LiteralTrail().Info(l.Variable()).level); - } - if (max_level < current_level) { - nodes_[current_branch_[max_level]].UpdateObjective( - integer_trail_->LowerBound(objective_var_)); - } - } - } - - // Propagate upward and then forward any new bounds. - for (int level = current_branch_.size(); --level > 0;) { - UpdateParentObjective(level); - } - nodes_[current_branch_[0]].UpdateObjective(current_objective_lb_); - for (int level = 1; level < current_branch_.size(); ++level) { - UpdateObjectiveFromParent(level); - } - - // If the root lb increased, update global shared objective lb. - const IntegerValue bound = nodes_[current_branch_[0]].MinObjective(); - if (bound > current_objective_lb_) { - shared_response_->UpdateInnerObjectiveBounds( - absl::StrCat(name_, " (", SmallProgressString(), ") "), bound, - integer_trail_->LevelZeroUpperBound(objective_var_)); - current_objective_lb_ = bound; - if (VLOG_IS_ON(3)) DebugDisplayTree(current_branch_[0]); - } - } + VLOG(2) << "LOOP " << sat_solver_->CurrentDecisionLevel(); // Each time we are back here, we bump the activities of the variable that // are part of the objective lower bound reason. @@ -332,6 +468,93 @@ SatSolver::Status LbTreeSearch::Search( sat_decision_->UpdateVariableActivityIncrement(); } + // Propagate upward in the tree the new objective lb. + if (!current_branch_.empty()) { + // Our branch is always greater or equal to the level. + // We increase the objective_lb of the current node if needed. + { + const int current_level = sat_solver_->CurrentDecisionLevel(); + const IntegerValue current_objective_lb = + integer_trail_->LowerBound(objective_var_); + if (DEBUG_MODE) { + CHECK_LE(current_level, current_branch_.size()); + for (int i = 0; i < current_level; ++i) { + CHECK(!nodes_[current_branch_[i]].is_deleted); + CHECK(assignment_.LiteralIsAssigned( + nodes_[current_branch_[i]].Decision())); + } + } + if (current_level < current_branch_.size()) { + nodes_[current_branch_[current_level]].UpdateObjective( + current_objective_lb); + } + + // Minor optim: sometimes, because of the LP and cuts, the reason for + // objective_var_ only contains lower level literals, so we can exploit + // that. + // + // TODO(user): No point checking that if the objective lb wasn't + // assigned at this level. + // + // TODO(user): Exploit the reasons further. + if (current_objective_lb > + integer_trail_->LevelZeroLowerBound(objective_var_)) { + const std::vector reason = + integer_trail_->ReasonFor(IntegerLiteral::GreaterOrEqual( + objective_var_, current_objective_lb)); + int max_level = 0; + for (const Literal l : reason) { + max_level = std::max( + max_level, + sat_solver_->LiteralTrail().Info(l.Variable()).level); + } + if (max_level < current_level) { + nodes_[current_branch_[max_level]].UpdateObjective( + current_objective_lb); + } + } + } + + // Propagate upward any new bounds. + for (int level = current_branch_.size(); --level > 0;) { + UpdateParentObjective(level); + } + } + + if (SaveLpBasisOption()) { + // We disable LP automatic propagation and only enable it: + // - at root node + // - when we go to a new branch. + lp_constraint_->EnablePropagation(false); + } + + // This will import other workers bound if we are back to level zero. + // It might also decide to restart. + if (!search_helper_->BeforeTakingDecision()) { + return sat_solver_->UnsatStatus(); + } + + // This is the current bound we try to improve. We cache it here to avoid + // getting the lock many times and it is also easier to follow the code if + // this is assumed constant for one iteration. + current_objective_lb_ = shared_response_->GetInnerObjectiveLowerBound(); + if (!current_branch_.empty()) { + nodes_[current_branch_[0]].UpdateObjective(current_objective_lb_); + for (int i = 1; i < current_branch_.size(); ++i) { + UpdateObjectiveFromParent(i); + } + + // If the root lb increased, update global shared objective lb. + const IntegerValue bound = nodes_[current_branch_[0]].MinObjective(); + if (bound > current_objective_lb_) { + shared_response_->UpdateInnerObjectiveBounds( + absl::StrCat(name_, " (", SmallProgressString(), ") "), bound, + integer_trail_->LevelZeroUpperBound(objective_var_)); + current_objective_lb_ = bound; + if (VLOG_IS_ON(3)) DebugDisplayTree(current_branch_[0]); + } + } + // Forget the whole tree and restart. // We will do it periodically at the beginning of the search each time we // cross the kNumDecisionsBeforeInitialRestarts decision since the last @@ -342,6 +565,15 @@ SatSolver::Status LbTreeSearch::Search( VLOG(2) << "lb_tree_search (initial_restart " << SmallProgressString() << ")"; if (!FullRestart()) return sat_solver_->UnsatStatus(); + continue; + } + + // Periodic backtrack to level zero so we can import bounds. + if (num_decisions_taken_ >= + num_decisions_taken_at_last_level_zero_ + 10000) { + if (!sat_solver_->ResetToLevelZero()) { + return sat_solver_->UnsatStatus(); + } } // Backtrack if needed. @@ -359,58 +591,20 @@ SatSolver::Status LbTreeSearch::Search( current_branch_.pop_back(); } - // Backtrack the solver. + // Backtrack the solver to be in sync with current_branch_. { - int backtrack_level = + const int backtrack_level = std::max(0, static_cast(current_branch_.size()) - 1); - - // Periodic backtrack to level zero so we can import bounds. - if (num_decisions_taken_ >= - num_decisions_taken_at_last_level_zero_ + 10000) { - backtrack_level = 0; - } - sat_solver_->Backtrack(backtrack_level); if (!sat_solver_->FinishPropagation()) { return sat_solver_->UnsatStatus(); } + if (sat_solver_->CurrentDecisionLevel() < backtrack_level) continue; } if (sat_solver_->CurrentDecisionLevel() == 0) { - ++num_back_to_root_node_; - num_decisions_taken_at_last_level_zero_ = num_decisions_taken_; - } - - // This will import other workers bound if we are back to level zero. - if (!search_helper_->BeforeTakingDecision()) { - return sat_solver_->UnsatStatus(); - } - - // If the search has not just been restarted (in which case nodes_ would be - // empty), and if we are at level zero (either naturally, or if the - // backtrack level was set to zero in the above code), let's run a different - // heuristic to decide whether to restart the search from scratch or not. - // - // We ignore small search trees. - if (sat_solver_->CurrentDecisionLevel() == 0 && num_nodes_in_tree_ > 50) { - // Let's count how many nodes have worse objective bounds than the best - // known external objective lower bound. - const IntegerValue latest_lb = - shared_response_->GetInnerObjectiveLowerBound(); - int num_nodes = 0; - int num_nodes_with_lower_objective = 0; - for (const Node& node : nodes_) { - if (node.is_deleted) continue; - ++num_nodes; - if (node.MinObjective() < latest_lb) num_nodes_with_lower_objective++; - } - DCHECK_EQ(num_nodes_in_tree_, num_nodes); - if (num_nodes_with_lower_objective * 2 > num_nodes) { - VLOG(2) << "lb_tree_search restart nodes: " - << num_nodes_with_lower_objective << "/" << num_nodes << " : " - << 100.0 * num_nodes_with_lower_objective / num_nodes << "%" - << ", decisions:" << num_decisions_taken_; - if (!FullRestart()) return sat_solver_->UnsatStatus(); + if (!LevelZeroLogic()) { + return sat_solver_->UnsatStatus(); } } @@ -420,23 +614,38 @@ SatSolver::Status LbTreeSearch::Search( // TODO(user): If we have new information and our current objective bound // is higher than any bound in a whole subtree, we might want to just // restart this subtree exploration? - while (current_branch_.size() == sat_solver_->CurrentDecisionLevel() + 1) { - const int level = current_branch_.size() - 1; - CHECK_EQ(level, sat_solver_->CurrentDecisionLevel()); - Node& node = nodes_[current_branch_[level]]; + while (true) { + const int size = current_branch_.size(); + const int level = sat_solver_->CurrentDecisionLevel(); + + // Invariant are tricky: + // current_branch_ contains one entry per decision taken + the last one + // which we are about to take. If we don't have the last entry, it means + // we are about to take a new decision. + DCHECK(size == level || size == level + 1); + if (size == level) break; // Take new decision. + + const NodeIndex node_index = current_branch_[level]; + Node& node = nodes_[node_index]; + DCHECK_GT(node.true_child, node_index); + DCHECK_GT(node.false_child, node_index); + + // If the bound of this node is high, restart the main loop.. node.UpdateObjective(std::max( current_objective_lb_, integer_trail_->LowerBound(objective_var_))); if (node.MinObjective() > current_objective_lb_) break; - CHECK_EQ(node.MinObjective(), current_objective_lb_) << level; + DCHECK_EQ(node.MinObjective(), current_objective_lb_) << level; // This will be set to the next node index. NodeIndex n; + DCHECK(!node.is_deleted); + const Literal node_literal = node.Decision(); // If the variable is already fixed, we bypass the node and connect // its parent directly to the relevant child. - if (sat_solver_->Assignment().LiteralIsAssigned(node.literal)) { + if (assignment_.LiteralIsAssigned(node_literal)) { IntegerValue new_lb; - if (sat_solver_->Assignment().LiteralIsTrue(node.literal)) { + if (assignment_.LiteralIsTrue(node_literal)) { n = node.true_child; new_lb = node.true_objective; } else { @@ -450,16 +659,42 @@ SatSolver::Status LbTreeSearch::Search( current_branch_.pop_back(); if (!current_branch_.empty()) { const NodeIndex parent = current_branch_.back(); - if (sat_solver_->Assignment().LiteralIsTrue(nodes_[parent].literal)) { + DCHECK(!nodes_[parent].is_deleted); + const Literal parent_literal = nodes_[parent].Decision(); + if (assignment_.LiteralIsTrue(parent_literal)) { nodes_[parent].true_child = n; nodes_[parent].UpdateTrueObjective(new_lb); } else { - DCHECK(sat_solver_->Assignment().LiteralIsFalse( - nodes_[parent].literal)); + DCHECK(assignment_.LiteralIsFalse(parent_literal)); nodes_[parent].false_child = n; nodes_[parent].UpdateFalseObjective(new_lb); } - if (nodes_[parent].MinObjective() > current_objective_lb_) break; + if (new_lb > current_objective_lb_) { + // This is probably not needed. + if (n < nodes_.size() && !nodes_[n].IsLeaf()) { + current_branch_.push_back(n); + nodes_[n].UpdateObjective(new_lb); + } + break; + } + } else { + if (n >= nodes_.size()) { + // We never explored the other branch, so we can just clear all + // nodes. + num_nodes_in_tree_ = 0; + nodes_.clear(); + } else if (nodes_[n].IsLeaf()) { + // Keep the saved basis. + num_nodes_in_tree_ = 1; + Node root = std::move(nodes_[n]); + nodes_.clear(); + nodes_.push_back(std::move(root)); + n = NodeIndex(0); + } else { + // We always make sure the root is at zero. + // The root is no longer at zero, that might cause issue. + // Cleanup. + } } } else { // See if we have better bounds using the current LP state. @@ -472,21 +707,59 @@ SatSolver::Status LbTreeSearch::Search( // work as well. num_decisions_taken_++; const bool choose_true = node.true_objective <= node.false_objective; + Literal next_decision; if (choose_true) { n = node.true_child; - search_helper_->TakeDecision(node.literal); + next_decision = node_literal; } else { n = node.false_child; - search_helper_->TakeDecision(node.literal.Negated()); + next_decision = node_literal.Negated(); } + // If we are taking this branch for the first time, we enable the LP and + // make sure we solve it before taking the decision. This allow to have + // proper pseudo-costs, and also be incremental for the decision we are + // about to take. + // + // We also enable the LP if we have no basis info for this node. + if (SaveLpBasisOption() && + (n >= nodes_.size() || !NodeHasBasis(node))) { + const auto cleanup = + absl::MakeCleanup(UpdateLpIters(&num_lp_iters_save_basis_)); + + VLOG(1) << "~~~~"; + EnableLpAndLoadBestBasis(); + const int level = sat_solver_->CurrentDecisionLevel(); + if (!sat_solver_->FinishPropagation()) { + return sat_solver_->UnsatStatus(); + } + if (sat_solver_->CurrentDecisionLevel() < level) { + node.UpdateObjective(kMaxIntegerValue); + break; + } + + // The decision might have become assigned, in which case we loop. + if (assignment_.LiteralIsAssigned(next_decision)) { + continue; + } + + SaveLpBasisInto(node); + + // If we are not at the end, disable the LP propagation. + if (n < nodes_.size()) { + lp_constraint_->EnablePropagation(false); + } + } + + // Take the decision. + const auto cleanup = + absl::MakeCleanup(UpdateLpIters(&num_lp_iters_first_branch_)); + DCHECK(!assignment_.LiteralIsAssigned(next_decision)); + search_helper_->TakeDecision(next_decision); + // Conflict? if (current_branch_.size() != sat_solver_->CurrentDecisionLevel()) { - if (choose_true) { - node.UpdateTrueObjective(kMaxIntegerValue); - } else { - node.UpdateFalseObjective(kMaxIntegerValue); - } + MarkBranchAsInfeasible(node, choose_true); break; } @@ -498,6 +771,13 @@ SatSolver::Status LbTreeSearch::Search( } else { node.UpdateFalseObjective(lb); } + + if (n < nodes_.size()) { + nodes_[n].UpdateObjective(lb); + } else if (SaveLpBasisOption()) { + SaveLpBasisInto(nodes_[CreateNewEmptyNodeIfNeeded()]); + } + if (lb > current_objective_lb_) break; } @@ -506,7 +786,7 @@ SatSolver::Status LbTreeSearch::Search( "TreeS", absl::StrCat(" (", SmallProgressString(), ")")); } - if (n < nodes_.size()) { + if (n < nodes_.size() && !nodes_[n].IsLeaf()) { current_branch_.push_back(n); } else { break; @@ -518,6 +798,22 @@ SatSolver::Status LbTreeSearch::Search( continue; } + // TODO(user): The code is hard to follow. Fix and merge that with test + // below. + if (!current_branch_.empty()) { + const Node& final_node = nodes_[current_branch_.back()]; + if (assignment_.LiteralIsTrue(final_node.Decision())) { + if (final_node.true_objective > current_objective_lb_) { + continue; + } + } else { + DCHECK(assignment_.LiteralIsFalse(final_node.Decision())); + if (final_node.false_objective > current_objective_lb_) { + continue; + } + } + } + // This test allow to not take a decision when the branch is already closed // (i.e. the true branch or false branch lb is high enough). Adding it // basically changes if we take the decision later when we explore the @@ -535,6 +831,67 @@ SatSolver::Status LbTreeSearch::Search( continue; } + const auto cleanup = absl::MakeCleanup(UpdateLpIters(&num_lp_iters_dive_)); + + if (current_branch_.empty()) { + VLOG(2) << "DIVE from empty tree"; + } else { + VLOG(2) << "DIVE from " << NodeDebugString(current_branch_.back()); + } + + if (SaveLpBasisOption() && !lp_constraint_->PropagationIsEnabled()) { + // This reuse or create a node to store the basis. + const NodeIndex index = CreateNewEmptyNodeIfNeeded(); + + EnableLpAndLoadBestBasis(); + const int level = sat_solver_->CurrentDecisionLevel(); + if (!sat_solver_->FinishPropagation()) { + return sat_solver_->UnsatStatus(); + } + + // Loop on backtrack or bound improvement. + if (sat_solver_->CurrentDecisionLevel() < level) { + Node& node = nodes_[index]; + node.UpdateObjective(kMaxIntegerValue); + continue; + } + + SaveLpBasisInto(nodes_[index]); + + const IntegerValue obj_lb = integer_trail_->LowerBound(objective_var_); + if (obj_lb > current_objective_lb_) { + nodes_[index].UpdateObjective(obj_lb); + if (!current_branch_.empty()) { + Node& parent_node = nodes_[current_branch_.back()]; + const Literal node_literal = parent_node.Decision(); + DCHECK(assignment_.LiteralIsAssigned(node_literal)); + if (assignment_.LiteralIsTrue(node_literal)) { + parent_node.UpdateTrueObjective(obj_lb); + } else { + parent_node.UpdateFalseObjective(obj_lb); + } + } + continue; + } + } + + // Invariant: The current branch is fully assigned, and the solver is in + // sync. And we are not on a "bad" path. + const int base_level = sat_solver_->CurrentDecisionLevel(); + if (DEBUG_MODE) { + CHECK_EQ(base_level, current_branch_.size()); + for (const NodeIndex index : current_branch_) { + CHECK(!nodes_[index].is_deleted); + const Literal decision = nodes_[index].Decision(); + if (assignment_.LiteralIsTrue(decision)) { + CHECK_EQ(nodes_[index].true_objective, current_objective_lb_); + } else { + CHECK(assignment_.LiteralIsFalse(decision)); + CHECK_EQ(nodes_[index].false_objective, current_objective_lb_); + } + } + } + // We are about to take a new decision, what we will do is dive until // the objective lower bound increase. we will then create a bunch of new // nodes in the tree. @@ -544,7 +901,6 @@ SatSolver::Status LbTreeSearch::Search( // // TODO(user): In multithread, this change the behavior a lot since we // dive until we beat the best shared bound. Maybe we shouldn't do that. - const int base_level = sat_solver_->CurrentDecisionLevel(); while (true) { // TODO(user): We sometimes branch on the objective variable, this should // probably be avoided. @@ -565,11 +921,18 @@ SatSolver::Status LbTreeSearch::Search( if (!search_helper_->TakeDecision(Literal(decision))) { return sat_solver_->UnsatStatus(); } - if (sat_solver_->CurrentDecisionLevel() < base_level) break; + if (sat_solver_->CurrentDecisionLevel() < base_level) { + // TODO(user): it would be nice to mark some node as infeasible if + // this is the case. However this could happen after many decision and + // we realize with the lp that one of them should have been fixed + // earlier, without any infeasibility in the current branch. + break; + } if (integer_trail_->LowerBound(objective_var_) > current_objective_lb_) { break; } } + if (sat_solver_->CurrentDecisionLevel() <= base_level) continue; // Analyse the reason for objective increase. Deduce a set of new nodes to @@ -587,16 +950,21 @@ SatSolver::Status LbTreeSearch::Search( sat_decision_->UpdateVariableActivityIncrement(); // Create one node per new decisions. - CHECK_EQ(current_branch_.size(), base_level); + DCHECK_EQ(current_branch_.size(), base_level); for (const Literal d : decisions) { AppendNewNodeToCurrentBranch(d); } + // TODO(user): We should probably save the basis in more cases. + if (SaveLpBasisOption() && decisions.size() == 1) { + SaveLpBasisInto(nodes_[CreateNewEmptyNodeIfNeeded()]); + } + // Update the objective of the last node in the branch since we just // improved that. if (!current_branch_.empty()) { Node& n = nodes_[current_branch_.back()]; - if (sat_solver_->Assignment().LiteralIsTrue(n.literal)) { + if (assignment_.LiteralIsTrue(n.Decision())) { n.UpdateTrueObjective(integer_trail_->LowerBound(objective_var_)); } else { n.UpdateFalseObjective(integer_trail_->LowerBound(objective_var_)); @@ -609,10 +977,10 @@ SatSolver::Status LbTreeSearch::Search( // The decision level is the number of decision taken. // Decision()[level] is the decision at that level. int backtrack_level = base_level; - CHECK_LE(current_branch_.size(), sat_solver_->CurrentDecisionLevel()); + DCHECK_LE(current_branch_.size(), sat_solver_->CurrentDecisionLevel()); while (backtrack_level < current_branch_.size() && - sat_solver_->Decisions()[backtrack_level].literal == - nodes_[current_branch_[backtrack_level]].literal) { + sat_solver_->Decisions()[backtrack_level].literal.Index() == + nodes_[current_branch_[backtrack_level]].literal_index) { ++backtrack_level; } sat_solver_->Backtrack(backtrack_level); @@ -674,21 +1042,86 @@ std::vector LbTreeSearch::ExtractDecisions( return result; } -void LbTreeSearch::AppendNewNodeToCurrentBranch(Literal decision) { - const NodeIndex n(nodes_.size()); - ++num_nodes_in_tree_; - nodes_.emplace_back(Literal(decision), current_objective_lb_); - if (!current_branch_.empty()) { - const NodeIndex parent = current_branch_.back(); - if (sat_solver_->Assignment().LiteralIsTrue(nodes_[parent].literal)) { - nodes_[parent].true_child = n; - nodes_[parent].UpdateTrueObjective(nodes_.back().MinObjective()); +LbTreeSearch::NodeIndex LbTreeSearch::CreateNewEmptyNodeIfNeeded() { + NodeIndex n(0); + if (current_branch_.empty()) { + if (nodes_.empty()) { + ++num_nodes_in_tree_; + nodes_.emplace_back(current_objective_lb_); } else { - CHECK(sat_solver_->Assignment().LiteralIsFalse(nodes_[parent].literal)); - nodes_[parent].false_child = n; - nodes_[parent].UpdateFalseObjective(nodes_.back().MinObjective()); + DCHECK_EQ(nodes_.size(), 1); + } + } else { + const NodeIndex parent = current_branch_.back(); + DCHECK(!nodes_[parent].is_deleted); + const Literal parent_literal = nodes_[parent].Decision(); + if (assignment_.LiteralIsTrue(parent_literal)) { + if (nodes_[parent].true_child >= nodes_.size()) { + n = NodeIndex(nodes_.size()); + ++num_nodes_in_tree_; + nodes_[parent].true_child = NodeIndex(nodes_.size()); + nodes_.emplace_back(current_objective_lb_); + } else { + n = nodes_[parent].true_child; + } + nodes_[parent].UpdateTrueObjective(current_objective_lb_); + } else { + DCHECK(assignment_.LiteralIsFalse(parent_literal)); + if (nodes_[parent].false_child >= nodes_.size()) { + n = NodeIndex(nodes_.size()); + ++num_nodes_in_tree_; + nodes_[parent].false_child = NodeIndex(nodes_.size()); + nodes_.emplace_back(current_objective_lb_); + } else { + n = nodes_[parent].false_child; + } + nodes_[parent].UpdateFalseObjective(current_objective_lb_); } } + DCHECK(!nodes_[n].is_deleted); + DCHECK_EQ(nodes_[n].literal_index, kNoLiteralIndex); + return n; +} + +void LbTreeSearch::AppendNewNodeToCurrentBranch(Literal decision) { + NodeIndex n(nodes_.size()); + if (current_branch_.empty()) { + if (nodes_.empty()) { + ++num_nodes_in_tree_; + nodes_.emplace_back(current_objective_lb_); + } else { + DCHECK_EQ(nodes_.size(), 1); + n = 0; + } + } else { + const NodeIndex parent = current_branch_.back(); + DCHECK(!nodes_[parent].is_deleted); + const Literal parent_literal = nodes_[parent].Decision(); + if (assignment_.LiteralIsTrue(parent_literal)) { + if (nodes_[parent].true_child < nodes_.size()) { + n = nodes_[parent].true_child; + } else { + ++num_nodes_in_tree_; + nodes_.emplace_back(current_objective_lb_); + nodes_[parent].true_child = n; + } + nodes_[parent].UpdateTrueObjective(current_objective_lb_); + } else { + DCHECK(assignment_.LiteralIsFalse(parent_literal)); + if (nodes_[parent].false_child < nodes_.size()) { + n = nodes_[parent].false_child; + } else { + ++num_nodes_in_tree_; + nodes_.emplace_back(current_objective_lb_); + nodes_[parent].false_child = n; + } + nodes_[parent].UpdateFalseObjective(current_objective_lb_); + } + } + DCHECK_LT(n, nodes_.size()); + DCHECK_EQ(nodes_[n].literal_index, kNoLiteralIndex) << " issue " << n; + nodes_[n].SetDecision(decision); + nodes_[n].UpdateObjective(current_objective_lb_); current_branch_.push_back(n); } @@ -718,9 +1151,11 @@ void LbTreeSearch::ExploitReducedCosts(NodeIndex n) { // implied by it. We need that for correctness. int num_tests = 0; Node& node = nodes_[n]; - CHECK(!sat_solver_->Assignment().LiteralIsAssigned(node.literal)); + DCHECK(!node.is_deleted); + const Literal node_literal = node.Decision(); + DCHECK(!assignment_.LiteralIsAssigned(node_literal)); for (const IntegerLiteral integer_literal : - integer_encoder_->GetIntegerLiterals(node.literal)) { + integer_encoder_->GetIntegerLiterals(node_literal)) { // To avoid bad corner case. Not sure it ever triggers. if (++num_tests > 10) break; diff --git a/ortools/sat/lb_tree_search.h b/ortools/sat/lb_tree_search.h index 14a56778db..f0cc53e689 100644 --- a/ortools/sat/lb_tree_search.h +++ b/ortools/sat/lb_tree_search.h @@ -68,8 +68,7 @@ class LbTreeSearch { // Code a binary tree. DEFINE_STRONG_INDEX_TYPE(NodeIndex); struct Node { - Node(Literal l, IntegerValue lb) - : literal(l), true_objective(lb), false_objective(lb) {} + explicit Node(IntegerValue lb) : true_objective(lb), false_objective(lb) {} // The objective lower bound at this node. IntegerValue MinObjective() const { @@ -88,8 +87,24 @@ class LbTreeSearch { false_objective = std::max(false_objective, v); } + // Should be called only once. + void SetDecision(Literal l) { + DCHECK(!is_deleted); + DCHECK_EQ(literal_index, kNoLiteralIndex); + literal_index = l.Index(); + } + + Literal Decision() const { + DCHECK(!is_deleted); + DCHECK_NE(literal_index, kNoLiteralIndex); + return sat::Literal(literal_index); + } + + bool IsLeaf() const { return literal_index == kNoLiteralIndex; } + // The decision for the true and false branch under this node. - /*const*/ Literal literal; + // Initially this is kNoLiteralIndex until SetDecision() is called. + LiteralIndex literal_index = kNoLiteralIndex; // The objective lower bound in both branches. IntegerValue true_objective; @@ -101,10 +116,26 @@ class LbTreeSearch { // Indicates if this nodes was removed from the tree. bool is_deleted = false; + + // Experimental. Store the optimal basis at each node. + int64_t basis_timestamp; + glop::BasisState basis; }; + // Regroup some logic done when we are back at level zero in Search(). + // Returns false if UNSAT. + bool LevelZeroLogic(); + + // Returns true if we save/load LP basis. + // Note that when this is true we also do not solve the LP as often. + bool SaveLpBasisOption() const { + return lp_constraint_ != nullptr && + parameters_.save_lp_basis_in_lb_tree_search(); + } + // Display the current tree, this is mainly here to investigate ideas to // improve the code. + std::string NodeDebugString(NodeIndex node) const; void DebugDisplayTree(NodeIndex root) const; // Updates the objective of the node in the current branch at level n from @@ -118,13 +149,21 @@ class LbTreeSearch { // Returns false on conflict. bool FullRestart(); + // Loads any known basis that is the closest to the current branch. + void EnableLpAndLoadBestBasis(); + void SaveLpBasisInto(Node& node); + bool NodeHasUpToDateBasis(const Node& node) const; + bool NodeHasBasis(const Node& node) const; + // Mark the given node as deleted. Its literal is assumed to be set. We also // delete the subtree that is not longer relevant. void MarkAsDeletedNodeAndUnreachableSubtree(Node& node); + void MarkBranchAsInfeasible(Node& node, bool true_branch); void MarkSubtreeAsDeleted(NodeIndex root); // Create a new node at the end of the current branch. // This assume the last decision in the branch is assigned. + NodeIndex CreateNewEmptyNodeIfNeeded(); void AppendNewNodeToCurrentBranch(Literal decision); // Update the bounds on the given nodes by using reduced costs if possible. @@ -138,6 +177,12 @@ class LbTreeSearch { // Used in the solve logs. std::string SmallProgressString() const; + // Save the current number of iterations on creation and add the difference + // to the counter when the returned function is called. This is meant to + // be used with: + // const auto cleanup = absl::MakeCleanup(UpdateLpIters(&counter)); + std::function UpdateLpIters(int64_t* counter); + // Model singleton class used here. const std::string name_; TimeLimit* time_limit_; @@ -145,6 +190,7 @@ class LbTreeSearch { SatSolver* sat_solver_; IntegerEncoder* integer_encoder_; Trail* trail_; + const VariablesAssignment& assignment_; IntegerTrail* integer_trail_; GenericLiteralWatcher* watcher_; SharedResponseManager* shared_response_; @@ -163,7 +209,7 @@ class LbTreeSearch { // Memory for all the nodes. int num_nodes_in_tree_ = 0; - absl::StrongVector nodes_; + util_intops::StrongVector nodes_; // The list of nodes in the current branch, in order from the root. std::vector current_branch_; @@ -177,6 +223,12 @@ class LbTreeSearch { // tree. int64_t num_decisions_taken_ = 0; + // Counts number of lp iterations at various places. + int64_t num_lp_iters_at_level_zero_ = 0; + int64_t num_lp_iters_save_basis_ = 0; + int64_t num_lp_iters_first_branch_ = 0; + int64_t num_lp_iters_dive_ = 0; + // Used to trigger the initial restarts and imports. int num_full_restarts_ = 0; int64_t num_decisions_taken_at_last_restart_ = 0; diff --git a/ortools/sat/linear_constraint.cc b/ortools/sat/linear_constraint.cc index 2dba78d663..f3f0c891fd 100644 --- a/ortools/sat/linear_constraint.cc +++ b/ortools/sat/linear_constraint.cc @@ -164,7 +164,7 @@ LinearExpression LinearConstraintBuilder::BuildExpression() { double ComputeActivity( const LinearConstraint& constraint, - const absl::StrongVector& values) { + const util_intops::StrongVector& values) { int i = 0; const int size = constraint.num_terms; const int shifted_size = size - 3; @@ -215,21 +215,28 @@ IntegerValue ComputeInfinityNorm(const LinearConstraint& ct) { } double ScalarProduct(const LinearConstraint& ct1, const LinearConstraint& ct2) { + if (ct1.num_terms == 0 || ct2.num_terms == 0) return 0.0; DCHECK(std::is_sorted(ct1.vars.get(), ct1.vars.get() + ct1.num_terms)); DCHECK(std::is_sorted(ct2.vars.get(), ct2.vars.get() + ct2.num_terms)); double scalar_product = 0.0; int index_1 = 0; int index_2 = 0; - while (index_1 < ct1.num_terms && index_2 < ct2.num_terms) { - if (ct1.vars[index_1] == ct2.vars[index_2]) { - scalar_product += - ToDouble(ct1.coeffs[index_1]) * ToDouble(ct2.coeffs[index_2]); - index_1++; - index_2++; - } else if (ct1.vars[index_1] > ct2.vars[index_2]) { - index_2++; + IntegerVariable var1 = ct1.vars[index_1]; + IntegerVariable var2 = ct2.vars[index_2]; + while (true) { + if (var1 == var2) { + scalar_product += static_cast(ct1.coeffs[index_1].value()) * + static_cast(ct2.coeffs[index_2].value()); + if (++index_1 == ct1.num_terms) break; + if (++index_2 == ct2.num_terms) break; + var1 = ct1.vars[index_1]; + var2 = ct2.vars[index_2]; + } else if (var1 > var2) { + if (++index_2 == ct2.num_terms) break; + var2 = ct2.vars[index_2]; } else { - index_1++; + if (++index_1 == ct1.num_terms) break; + var1 = ct1.vars[index_1]; } } return scalar_product; @@ -303,7 +310,7 @@ void MakeAllVariablesPositive(LinearConstraint* constraint) { } double LinearExpression::LpValue( - const absl::StrongVector& lp_values) const { + const util_intops::StrongVector& lp_values) const { double result = ToDouble(offset); for (int i = 0; i < vars.size(); ++i) { result += ToDouble(coeffs[i]) * lp_values[vars[i]]; diff --git a/ortools/sat/linear_constraint.h b/ortools/sat/linear_constraint.h index 8b343343cb..10a7411b96 100644 --- a/ortools/sat/linear_constraint.h +++ b/ortools/sat/linear_constraint.h @@ -134,8 +134,8 @@ struct LinearExpression { IntegerValue offset = IntegerValue(0); // Return[s] the evaluation of the linear expression. - double LpValue( - const absl::StrongVector& lp_values) const; + double LpValue(const util_intops::StrongVector& + lp_values) const; IntegerValue LevelZeroMin(IntegerTrail* integer_trail) const; @@ -294,7 +294,7 @@ class LinearConstraintBuilder { // the linear terms. double ComputeActivity( const LinearConstraint& constraint, - const absl::StrongVector& values); + const util_intops::StrongVector& values); // Tests for possible overflow in the given linear constraint used for the // linear relaxation. This is a bit relaxed compared to what we require for diff --git a/ortools/sat/linear_constraint_manager.cc b/ortools/sat/linear_constraint_manager.cc index ffc597ec59..1f5288284e 100644 --- a/ortools/sat/linear_constraint_manager.cc +++ b/ortools/sat/linear_constraint_manager.cc @@ -304,7 +304,7 @@ void LinearConstraintManager::PermanentlyRemoveSomeConstraints() { ConstraintIndex new_size(0); equiv_constraints_.clear(); - absl::StrongVector index_mapping( + util_intops::StrongVector index_mapping( constraint_infos_.size()); int num_deleted_constraints = 0; for (ConstraintIndex i(0); i < constraint_infos_.size(); ++i) { @@ -792,7 +792,7 @@ bool LinearConstraintManager::DebugCheckConstraint( void TopNCuts::AddCut( LinearConstraint ct, absl::string_view name, - const absl::StrongVector& lp_solution) { + const util_intops::StrongVector& lp_solution) { if (ct.num_terms == 0) return; const double activity = ComputeActivity(ct, lp_solution); const double violation = diff --git a/ortools/sat/linear_constraint_manager.h b/ortools/sat/linear_constraint_manager.h index ebf799238f..c823f8f210 100644 --- a/ortools/sat/linear_constraint_manager.h +++ b/ortools/sat/linear_constraint_manager.h @@ -47,7 +47,8 @@ namespace sat { // case where we have many different LinearProgrammingConstraint and a lot of // variable, we could theoretically use up a quadratic amount of memory // otherwise. -struct ModelLpValues : public absl::StrongVector { +struct ModelLpValues + : public util_intops::StrongVector { ModelLpValues() = default; }; @@ -149,8 +150,8 @@ class LinearConstraintManager { void AddAllConstraintsToLp(); // All the constraints managed by this class. - const absl::StrongVector& AllConstraints() - const { + const util_intops::StrongVector& + AllConstraints() const { return constraint_infos_; } @@ -161,7 +162,7 @@ class LinearConstraintManager { } // To simplify CutGenerator api. - const absl::StrongVector& LpValues() { + const util_intops::StrongVector& LpValues() { return expanded_lp_solution_; } @@ -231,7 +232,7 @@ class LinearConstraintManager { // Optimization to avoid calling SimplifyConstraint() when not needed. int64_t last_simplification_timestamp_ = 0; - absl::StrongVector constraint_infos_; + util_intops::StrongVector constraint_infos_; // The subset of constraints currently in the lp. std::vector lp_constraints_; @@ -297,8 +298,9 @@ class TopNCuts { explicit TopNCuts(int n) : cuts_(n) {} // Adds a cut to the local pool. - void AddCut(LinearConstraint ct, absl::string_view name, - const absl::StrongVector& lp_solution); + void AddCut( + LinearConstraint ct, absl::string_view name, + const util_intops::StrongVector& lp_solution); // Empty the local pool and add all its content to the manager. void TransferToManager(LinearConstraintManager* manager); diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index da9812a817..64ed3d7f90 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -47,6 +46,7 @@ #include "ortools/lp_data/lp_types.h" #include "ortools/lp_data/scattered_vector.h" #include "ortools/lp_data/sparse_column.h" +#include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cuts.h" #include "ortools/sat/implied_bounds.h" #include "ortools/sat/integer.h" @@ -258,6 +258,7 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( time_limit_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), trail_(model->GetOrCreate()), + watcher_(model->GetOrCreate()), integer_encoder_(model->GetOrCreate()), product_detector_(model->GetOrCreate()), objective_definition_(model->GetOrCreate()), @@ -290,7 +291,6 @@ LinearProgrammingConstraint::LinearProgrammingConstraint( // Register SharedStatistics with the cut helpers. auto* stats = model->GetOrCreate(); integer_rounding_cut_helper_.SetSharedStatistics(stats); - flow_cover_cut_helper_.SetSharedStatistics(stats); cover_cut_helper_.SetSharedStatistics(stats); // Initialize the IntegerVariable -> ColIndex mapping. @@ -531,22 +531,21 @@ void LinearProgrammingConstraint::RegisterWith(Model* model) { return; } - GenericLiteralWatcher* watcher = model->GetOrCreate(); - const int watcher_id = watcher->Register(this); + watcher_id_ = watcher_->Register(this); const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { - watcher->WatchIntegerVariable(integer_variables_[i], watcher_id, i); + watcher_->WatchIntegerVariable(integer_variables_[i], watcher_id_, i); } if (objective_is_defined_) { - watcher->WatchUpperBound(objective_cp_, watcher_id); + watcher_->WatchUpperBound(objective_cp_, watcher_id_); } - watcher->SetPropagatorPriority(watcher_id, 2); - watcher->AlwaysCallAtLevelZero(watcher_id); + watcher_->SetPropagatorPriority(watcher_id_, 2); + watcher_->AlwaysCallAtLevelZero(watcher_id_); // Registering it with the trail make sure this class is always in sync when // it is used in the decision heuristics. integer_trail_->RegisterReversibleClass(this); - watcher->RegisterReversibleInt(watcher_id, &rev_optimal_constraints_size_); + watcher_->RegisterReversibleInt(watcher_id_, &rev_optimal_constraints_size_); } void LinearProgrammingConstraint::SetLevel(int level) { @@ -557,6 +556,7 @@ void LinearProgrammingConstraint::SetLevel(int level) { if (lp_solution_is_set_ && level < lp_solution_level_) { lp_solution_is_set_ = false; } + if (level < previous_level_) { lp_at_optimal_ = false; lp_objective_lower_bound_ = -std::numeric_limits::infinity(); @@ -568,6 +568,8 @@ void LinearProgrammingConstraint::SetLevel(int level) { // // TODO(user): Keep all optimal solution in the current branch? // TODO(user): Still try to add cuts/constraints though! + // TODO(user): Reload the basis? This might cause issue with the basis + // saving/loading code in lb_tree_search. if (level == 0 && !lp_solution_is_set_ && !level_zero_lp_solution_.empty()) { lp_solution_is_set_ = true; lp_solution_ = level_zero_lp_solution_; @@ -637,6 +639,12 @@ bool LinearProgrammingConstraint::IncrementalPropagate( // to be careful since the reversible int in IntegerSumLE are not registered. // However, because we delete "optimalconstraints" on backtrack, we might not // care. + // + // Remark: Note that if we do the sequence SetBasis() / Propagate() / + // GetAndSaveBasis() and we are in the case where the solution is still + // optimal, we should get the basis from when the lp solution was set which + // should be what we want. Even if we set garbage during SetBasis() it should + // be ignored. TODO(user): We might still have problem at level zero. return true; } @@ -667,11 +675,13 @@ void LinearProgrammingConstraint::UpdateBoundsOfLpVariables() { } bool LinearProgrammingConstraint::SolveLp() { - if (trail_->CurrentDecisionLevel() == 0) { + const int level = trail_->CurrentDecisionLevel(); + if (level == 0) { lp_at_level_zero_is_final_ = false; } const auto status = simplex_.Solve(lp_data_, time_limit_); + state_ = simplex_.GetState(); total_num_simplex_iterations_ += simplex_.GetNumberOfIterations(); if (!status.ok()) { VLOG(1) << "The LP solver encountered an error: " << status.error_message(); @@ -724,6 +734,7 @@ bool LinearProgrammingConstraint::SolveLp() { level_zero_lp_solution_ = lp_solution_; } } + return true; } @@ -809,7 +820,6 @@ bool LinearProgrammingConstraint::AnalyzeLp() { // Copy more info about the current solution. if (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL) { CHECK(lp_solution_is_set_); - lp_solution_is_integer_ = true; const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { @@ -1085,9 +1095,18 @@ bool LinearProgrammingConstraint::AddCutFromConstraints( } } + // Note that the indexing will survive ComplementForSmallerLpValues() below. + if (ib_processor != nullptr) { + if (!ib_processor->CacheDataForCut(first_slack, &base_ct_)) { + ib_processor = nullptr; + } + } + // Try cover approach to find cut. // TODO(user): Share common computation between kinds. { + cover_cut_helper_.ClearCache(); + if (cover_cut_helper_.TrySingleNodeFlow(base_ct_, ib_processor)) { at_least_one_added |= PostprocessAndAddCut( absl::StrCat(name, "_FF"), cover_cut_helper_.Info(), first_slack, @@ -1098,6 +1117,9 @@ bool LinearProgrammingConstraint::AddCutFromConstraints( absl::StrCat(name, "_K"), cover_cut_helper_.Info(), first_slack, cover_cut_helper_.cut()); } + + // This one need to be called after TrySimpleKnapsack() in order to reuse + // some cached data if possible. if (cover_cut_helper_.TryWithLetchfordSouliLifting(base_ct_, ib_processor)) { at_least_one_added |= PostprocessAndAddCut( @@ -1350,11 +1372,35 @@ void LinearProgrammingConstraint::AddObjectiveCut() { return; } - // Try knapsack. + // If there are no integer (all Booleans), no need to try implied bounds + // heurititics. By setting this to nullptr, we are a bit faster. + ImpliedBoundsProcessor* ib_processor = nullptr; + { + bool some_ints = false; + bool some_relevant_positions = false; + for (const CutTerm& term : base_ct_.terms) { + if (term.bound_diff > 1) some_ints = true; + if (term.HasRelevantLpValue()) some_relevant_positions = true; + } + + // If all value are integer, we will not be able to cut anything. + if (!some_relevant_positions) return; + if (some_ints) ib_processor = &implied_bounds_processor_; + } + + // Note that the indexing will survive the complement of terms below. const IntegerVariable first_slack( std::numeric_limits::max()); + if (ib_processor != nullptr) { + if (!ib_processor->CacheDataForCut(first_slack, &base_ct_)) { + ib_processor = nullptr; + } + } + + // Try knapsack. base_ct_.ComplementForPositiveCoefficients(); - if (cover_cut_helper_.TrySimpleKnapsack(base_ct_)) { + cover_cut_helper_.ClearCache(); + if (cover_cut_helper_.TrySimpleKnapsack(base_ct_, ib_processor)) { PostprocessAndAddCut("Objective_K", cover_cut_helper_.Info(), first_slack, cover_cut_helper_.cut()); } @@ -1364,7 +1410,7 @@ void LinearProgrammingConstraint::AddObjectiveCut() { options.max_scaling = parameters_.max_integer_rounding_scaling(); base_ct_.ComplementForSmallerLpValues(); if (integer_rounding_cut_helper_.ComputeCut(options, base_ct_, - &implied_bounds_processor_)) { + ib_processor)) { PostprocessAndAddCut("Objective_R", integer_rounding_cut_helper_.Info(), first_slack, integer_rounding_cut_helper_.cut()); } @@ -1385,7 +1431,7 @@ void LinearProgrammingConstraint::AddMirCuts() { // TODO(user): We could combine n rows to make sure we eliminate n variables // far away from their bounds by solving exactly in integer small linear // system. - absl::StrongVector dense_cut( + util_intops::StrongVector dense_cut( integer_variables_.size(), IntegerValue(0)); SparseBitset non_zeros_(ColIndex(integer_variables_.size())); @@ -1393,9 +1439,9 @@ void LinearProgrammingConstraint::AddMirCuts() { // for the MIR_n procedure below. const int num_rows = lp_data_.num_constraints().value(); std::vector> base_rows; - absl::StrongVector row_weights(num_rows, 0.0); - absl::StrongVector at_ub(num_rows, false); - absl::StrongVector at_lb(num_rows, false); + util_intops::StrongVector row_weights(num_rows, 0.0); + util_intops::StrongVector at_ub(num_rows, false); + util_intops::StrongVector at_lb(num_rows, false); for (RowIndex row(0); row < num_rows; ++row) { // We only consider tight rows. // We use both the status and activity to have as much options as possible. @@ -1443,7 +1489,7 @@ void LinearProgrammingConstraint::AddMirCuts() { std::shuffle(base_rows.begin(), base_rows.end(), *random_); std::vector weights; - absl::StrongVector used_rows; + util_intops::StrongVector used_rows; std::vector> integer_multipliers; for (const std::pair& entry : base_rows) { if (time_limit_->LimitReached()) break; @@ -1786,8 +1832,8 @@ bool LinearProgrammingConstraint::Propagate() { } int num_added = 0; - state_ = simplex_.GetState(); if (constraint_manager_.ChangeLp(&state_, &num_added)) { + ++num_lp_changes_; simplex_.LoadStateForNextSolve(state_); if (!CreateLpFromConstraintManager()) { return integer_trail_->ReportConflict({}); @@ -1838,9 +1884,15 @@ bool LinearProgrammingConstraint::ScalingCanOverflow( const std::vector>& multipliers, int64_t overflow_cap) const { int64_t bound = 0; + const int64_t factor = int64_t{1} << power; + const double factor_as_double = static_cast(factor); + if (take_objective_into_account) { + bound = CapAdd(bound, CapProd(factor, objective_infinity_norm_.value())); + if (bound >= overflow_cap) return true; + } for (const auto [row, double_coeff] : multipliers) { const double magnitude = - std::abs(std::round(std::ldexp(double_coeff, power))); + std::abs(std::round(double_coeff * factor_as_double)); if (std::isnan(magnitude)) return true; if (magnitude >= static_cast(std::numeric_limits::max())) { return true; @@ -1849,11 +1901,6 @@ bool LinearProgrammingConstraint::ScalingCanOverflow( infinity_norms_[row].value())); if (bound >= overflow_cap) return true; } - if (take_objective_into_account) { - bound = CapAdd( - bound, CapProd(int64_t{1} << power, objective_infinity_norm_.value())); - if (bound >= overflow_cap) return true; - } return bound >= overflow_cap; } @@ -1925,8 +1972,9 @@ LinearProgrammingConstraint::ScaleLpMultiplier( // Scale the multipliers by *scaling. // Note that we use the exact same formula as in ScalingCanOverflow(). int64_t gcd = scaling->value(); + const double scaling_as_double = static_cast(scaling->value()); for (const auto [row, double_coeff] : tmp_cp_multipliers_) { - const IntegerValue coeff(std::round(std::ldexp(double_coeff, power))); + const IntegerValue coeff(std::round(double_coeff * scaling_as_double)); if (coeff != 0) { gcd = std::gcd(gcd, std::abs(coeff.value())); integer_multipliers.push_back({row, coeff}); diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index 2a41be232e..6f4866ec22 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -108,10 +108,10 @@ class ScatteredIntegerVector { // from sparse to dense as needed. bool is_sparse_ = true; std::vector non_zeros_; - absl::StrongVector is_zeros_; + util_intops::StrongVector is_zeros_; // The dense representation of the vector. - absl::StrongVector dense_vector_; + util_intops::StrongVector dense_vector_; }; // A SAT constraint that enforces a set of linear inequality constraints on @@ -219,6 +219,9 @@ class LinearProgrammingConstraint : public PropagatorInterface, int64_t num_bad_cuts() const { return num_bad_cuts_; } int64_t num_scaling_issues() const { return num_scaling_issues_; } + // This can serve as a timestamp to know if a saved basis is out of date. + int64_t num_lp_changes() const { return num_lp_changes_; } + const std::vector& num_solves_by_status() const { return num_solves_by_status_; } @@ -240,9 +243,18 @@ class LinearProgrammingConstraint : public PropagatorInterface, // This api allows to temporarily disable the LP propagator which can be // costly during probing or other heavy propagation phase. - void EnablePropagation(bool enable) { enabled_ = enable; } + void EnablePropagation(bool enable) { + enabled_ = enable; + watcher_->CallOnNextPropagate(watcher_id_); + } bool PropagationIsEnabled() const { return enabled_; } + const glop::BasisState& GetBasisState() const { return state_; } + void LoadBasisState(const glop::BasisState& state) { + state_ = state; + simplex_.LoadStateForNextSolve(state_); + } + private: // Helper method to fill reduced cost / dual ray reason in 'integer_reason'. // Generates a set of IntegerLiterals explaining why the best solution can not @@ -355,7 +367,8 @@ class LinearProgrammingConstraint : public PropagatorInterface, // Converts a dense representation of a linear constraint to a sparse one // expressed in terms of IntegerVariable. void ConvertToLinearConstraint( - const absl::StrongVector& dense_vector, + const util_intops::StrongVector& + dense_vector, IntegerValue upper_bound, LinearConstraint* result); // Compute the implied lower bound of the given linear expression using the @@ -435,8 +448,9 @@ class LinearProgrammingConstraint : public PropagatorInterface, LinearExpression integer_objective_; IntegerValue integer_objective_offset_ = IntegerValue(0); IntegerValue objective_infinity_norm_ = IntegerValue(0); - absl::StrongVector integer_lp_; - absl::StrongVector infinity_norms_; + util_intops::StrongVector + integer_lp_; + util_intops::StrongVector infinity_norms_; // Underlying LP solver API. glop::GlopParameters simplex_params_; @@ -451,7 +465,6 @@ class LinearProgrammingConstraint : public PropagatorInterface, // Temporary data for cuts. ZeroHalfCutHelper zero_half_cut_helper_; CoverCutHelper cover_cut_helper_; - FlowCoverCutHelper flow_cover_cut_helper_; IntegerRoundingCutHelper integer_rounding_cut_helper_; bool problem_proven_infeasible_by_cuts_ = false; @@ -477,7 +490,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // Note that these indices are dense in [0, mirror_lp_variable_.size()] so // they can be used as vector indices. // - // TODO(user): This should be absl::StrongVector Except if we have too many LinearProgrammingConstraint. std::vector integer_variables_; absl::flat_hash_map mirror_lp_variable_; @@ -497,6 +510,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, TimeLimit* time_limit_; IntegerTrail* integer_trail_; Trail* trail_; + GenericLiteralWatcher* watcher_; IntegerEncoder* integer_encoder_; ProductDetector* product_detector_; ObjectiveDefinition* objective_definition_; @@ -504,6 +518,8 @@ class LinearProgrammingConstraint : public PropagatorInterface, SharedResponseManager* shared_response_manager_; ModelRandomGenerator* random_; + int watcher_id_; + BoolRLTCutHelper rlt_cut_helper_; // Used while deriving cuts. @@ -587,6 +603,9 @@ class LinearProgrammingConstraint : public PropagatorInterface, int64_t total_num_cut_propagations_ = 0; int64_t total_num_eq_propagations_ = 0; + // The number of times we changed the LP. + int64_t num_lp_changes_ = 0; + // Some stats on the LP statuses encountered. int64_t num_solves_ = 0; mutable int64_t num_adjusts_ = 0; diff --git a/ortools/sat/linear_propagation.cc b/ortools/sat/linear_propagation.cc index 95dd45336c..db1b60f13d 100644 --- a/ortools/sat/linear_propagation.cc +++ b/ortools/sat/linear_propagation.cc @@ -603,6 +603,7 @@ bool LinearPropagator::AddConstraint( info.rev_rhs = upper_bound; info.rev_size = vars.size(); infos_.push_back(std::move(info)); + initial_rhs_.push_back(upper_bound); } id_to_propagation_count_.push_back(0); @@ -643,20 +644,17 @@ bool LinearPropagator::AddConstraint( watcher_->CallOnNextPropagate(watcher_id_); } - // When a conditional precedence becomes enforced, add it. Note that - // we cannot just use rev_size == 2 since we might miss some - // explanation if a longer constraint only have 2 non-fixed variable - // now.. It is however okay not to push precedence involving a fixed - // variable, since these should be reflected in the variable domain - // anyway. + // When a conditional precedence becomes enforced, add it. + // Note that we only look at relation that were a "precedence" from + // the start, note the one currently of size 2 if we ignore fixed + // variables. if (status == EnforcementStatus::IS_ENFORCED) { const auto info = infos_[id]; - if (info.initial_size == 2 && info.rev_size == 2 && - info.all_coeffs_are_one) { + if (info.initial_size == 2 && info.all_coeffs_are_one) { const auto vars = GetVariables(info); precedences_->PushConditionalRelation( enforcement_propagator_->GetEnforcementLiterals(enf_id), - vars[0], vars[1], info.rev_rhs); + vars[0], vars[1], initial_rhs_[id]); } } }); @@ -887,6 +885,39 @@ bool LinearPropagator::PropagateInfeasibleConstraint(int id, integer_reason_); } +void LinearPropagator::Explain(int id, IntegerValue propagation_slack, + IntegerLiteral literal_to_explain, + int trail_index, + std::vector* literals_reason, + std::vector* trail_indices_reason) { + literals_reason->clear(); + trail_indices_reason->clear(); + const ConstraintInfo& info = infos_[id]; + enforcement_propagator_->AddEnforcementReason(info.enf_id, literals_reason); + reason_coeffs_.clear(); + + const auto coeffs = GetCoeffs(info); + const auto vars = GetVariables(info); + for (int i = 0; i < info.initial_size; ++i) { + const IntegerVariable var = vars[i]; + if (PositiveVariable(var) == PositiveVariable(literal_to_explain.var)) { + continue; + } + const int index = + integer_trail_->FindTrailIndexOfVarBefore(var, trail_index); + if (index >= 0) { + trail_indices_reason->push_back(index); + if (propagation_slack > 0) { + reason_coeffs_.push_back(coeffs[i]); + } + } + } + if (propagation_slack > 0) { + integer_trail_->RelaxLinearReason(propagation_slack, reason_coeffs_, + trail_indices_reason); + } +} + bool LinearPropagator::PropagateOneConstraint(int id) { const auto [slack, num_to_push] = AnalyzeConstraint(id); if (slack < 0) return PropagateInfeasibleConstraint(id, slack); @@ -927,39 +958,9 @@ bool LinearPropagator::PropagateOneConstraint(int id) { const IntegerValue div = slack / coeff; const IntegerValue new_ub = integer_trail_->LowerBound(var) + div; const IntegerValue propagation_slack = (div + 1) * coeff - slack - 1; - if (!integer_trail_->Enqueue( - IntegerLiteral::LowerOrEqual(var, new_ub), - /*lazy_reason=*/[this, info, propagation_slack]( - IntegerLiteral i_lit, int trail_index, - std::vector* literal_reason, - std::vector* trail_indices_reason) { - literal_reason->clear(); - trail_indices_reason->clear(); - enforcement_propagator_->AddEnforcementReason(info.enf_id, - literal_reason); - reason_coeffs_.clear(); - - const auto coeffs = GetCoeffs(info); - const auto vars = GetVariables(info); - for (int i = 0; i < info.initial_size; ++i) { - const IntegerVariable var = vars[i]; - if (PositiveVariable(var) == PositiveVariable(i_lit.var)) { - continue; - } - const int index = - integer_trail_->FindTrailIndexOfVarBefore(var, trail_index); - if (index >= 0) { - trail_indices_reason->push_back(index); - if (propagation_slack > 0) { - reason_coeffs_.push_back(coeffs[i]); - } - } - } - if (propagation_slack > 0) { - integer_trail_->RelaxLinearReason( - propagation_slack, reason_coeffs_, trail_indices_reason); - } - })) { + if (!integer_trail_->EnqueueWithLazyReason( + IntegerLiteral::LowerOrEqual(var, new_ub), id, propagation_slack, + this)) { return false; } diff --git a/ortools/sat/linear_propagation.h b/ortools/sat/linear_propagation.h index ba255d4927..20e470935d 100644 --- a/ortools/sat/linear_propagation.h +++ b/ortools/sat/linear_propagation.h @@ -120,12 +120,12 @@ class EnforcementPropagator : public SatPropagator { // All enforcement will be copied there, and we will create Span out of this. // Note that we don't store the span so that we are not invalidated on buffer_ // resizing. - absl::StrongVector starts_; + util_intops::StrongVector starts_; std::vector buffer_; - absl::StrongVector statuses_; - absl::StrongVector> + util_intops::StrongVector statuses_; + util_intops::StrongVector< + EnforcementId, std::function> callbacks_; // Used to restore status and call callback on untrail. @@ -134,7 +134,7 @@ class EnforcementPropagator : public SatPropagator { int64_t rev_stamp_ = 0; // We use a two watcher scheme. - absl::StrongVector> + util_intops::StrongVector> watcher_; std::vector temp_literals_; @@ -279,9 +279,9 @@ class ConstraintPropagationOrder { // For each variable we only keep the constraint id that pushes it further. // In case of tie, we only keep the first to be registered. Bitset64 var_has_entry_; - absl::StrongVector var_to_id_; - absl::StrongVector var_to_lb_; - absl::StrongVector var_to_pos_; + util_intops::StrongVector var_to_id_; + util_intops::StrongVector var_to_lb_; + util_intops::StrongVector var_to_pos_; std::vector to_clear_; // Set/queue of constraints to be propagated. @@ -297,7 +297,9 @@ class ConstraintPropagationOrder { // - Lack detection and propagation of at least one of these linear is true // which can be used to propagate more bound if a variable appear in all these // constraint. -class LinearPropagator : public PropagatorInterface, ReversibleInterface { +class LinearPropagator : public PropagatorInterface, + ReversibleInterface, + LazyReasonInterface { public: explicit LinearPropagator(Model* model); ~LinearPropagator() override; @@ -313,6 +315,12 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { absl::Span coeffs, IntegerValue upper_bound); + // For LazyReasonInterface. + void Explain(int id, IntegerValue propagation_slack, + IntegerLiteral literal_to_explain, int trail_index, + std::vector* literals_reason, + std::vector* trail_indices_reason) final; + private: // We try to pack the struct as much as possible. Using a maximum size of // 1 << 29 should be okay since we split long constraint anyway. Technically @@ -323,7 +331,8 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { // initial size and enf_id that are only needed when we push something. struct ConstraintInfo { unsigned int enf_status : 2; - bool all_coeffs_are_one : 1; + // With Visual Studio or minGW, using bool here breaks the struct packing. + unsigned int all_coeffs_are_one : 1; unsigned int initial_size : 29; // Const. The size including all terms. EnforcementId enf_id; // Const. The id in enforcement_propagator_. @@ -332,10 +341,8 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { IntegerValue rev_rhs; // The current rhs, updated on fixed terms. }; -#if !defined(_MSC_VER) static_assert(sizeof(ConstraintInfo) == 24, "ERROR_ConstraintInfo_is_not_well_compacted"); -#endif // !defined(_MSC_VER) absl::Span GetCoeffs(const ConstraintInfo& info); absl::Span GetVariables(const ConstraintInfo& info); @@ -394,6 +401,7 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { // Per constraint info used during propagation. Note that we keep pointer for // the rev_size/rhs there, so we do need a deque. std::deque infos_; + std::vector initial_rhs_; // Buffer of the constraints data. std::vector variables_buffer_; @@ -421,15 +429,15 @@ class LinearPropagator : public PropagatorInterface, ReversibleInterface { std::vector unenforced_constraints_; // Watchers. - absl::StrongVector is_watched_; - absl::StrongVector> + util_intops::StrongVector is_watched_; + util_intops::StrongVector> var_to_constraint_ids_; // For an heuristic similar to Tarjan contribution to Bellman-Ford algorithm. // We mark for each variable the last constraint that pushed it, and also keep // the count of propagated variable for each constraint. SparseBitset propagated_by_was_set_; - absl::StrongVector propagated_by_; + util_intops::StrongVector propagated_by_; std::vector id_to_propagation_count_; // Used by DissasembleSubtreeAndAddToQueue(). diff --git a/ortools/sat/lp_utils.cc b/ortools/sat/lp_utils.cc index 0e1d7c827c..9c55cbab03 100644 --- a/ortools/sat/lp_utils.cc +++ b/ortools/sat/lp_utils.cc @@ -871,8 +871,8 @@ double FindFractionalScaling(const std::vector& coefficients, double FindBestScalingAndComputeErrors( const std::vector& coefficients, - const std::vector& lower_bounds, - const std::vector& upper_bounds, int64_t max_absolute_activity, + absl::Span lower_bounds, + absl::Span upper_bounds, int64_t max_absolute_activity, double wanted_absolute_activity_precision, double* relative_coeff_error, double* scaled_sum_error) { // Starts by computing the highest possible factor. diff --git a/ortools/sat/lp_utils.h b/ortools/sat/lp_utils.h index 72b952ce02..82a2f0c2ed 100644 --- a/ortools/sat/lp_utils.h +++ b/ortools/sat/lp_utils.h @@ -21,6 +21,7 @@ #include #include +#include "absl/types/span.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/lp_data/lp_data.h" #include "ortools/sat/boolean_problem.pb.h" @@ -66,8 +67,8 @@ int64_t FindRationalFactor(double x, int64_t limit = 1e4, // an exact definition for the max_absolute_activity allowed. double FindBestScalingAndComputeErrors( const std::vector& coefficients, - const std::vector& lower_bounds, - const std::vector& upper_bounds, int64_t max_absolute_activity, + absl::Span lower_bounds, + absl::Span upper_bounds, int64_t max_absolute_activity, double wanted_absolute_activity_precision, double* relative_coeff_error, double* scaled_sum_error); diff --git a/ortools/sat/max_hs.cc b/ortools/sat/max_hs.cc index 9ef9e66f39..83bd855f52 100644 --- a/ortools/sat/max_hs.cc +++ b/ortools/sat/max_hs.cc @@ -290,7 +290,7 @@ HittingSetOptimizer::ComputeAdditionalVariablesToExtract() { } void HittingSetOptimizer::ProjectAndAddAtMostOne( - const std::vector& literals) { + absl::Span literals) { LinearConstraintBuilder builder(model_, 0, 1); for (const Literal& literal : literals) { if (!builder.AddLiteralTerm(literal, 1)) { @@ -441,7 +441,7 @@ bool HittingSetOptimizer::ProcessSolution() { } void HittingSetOptimizer::AddCoresToTheMpModel( - const std::vector>& cores) { + absl::Span> cores) { MPModelProto* hs_model = request_.mutable_model(); for (const std::vector& core : cores) { diff --git a/ortools/sat/max_hs.h b/ortools/sat/max_hs.h index fa5e1546aa..5aeb001c4b 100644 --- a/ortools/sat/max_hs.h +++ b/ortools/sat/max_hs.h @@ -95,7 +95,7 @@ class HittingSetOptimizer { bool ComputeInitialMpModel(); // Project the at_most_one constraint on the set of extracted variables. - void ProjectAndAddAtMostOne(const std::vector& literals); + void ProjectAndAddAtMostOne(absl::Span literals); // Project the linear constraint on the set of extracted variables. Non // extracted variables are used to 'extend' the lower and upper bound of the @@ -114,7 +114,7 @@ class HittingSetOptimizer { void TightenMpModel(); // Processes the cores from the SAT solver and add them to the MPModel. - void AddCoresToTheMpModel(const std::vector>& cores); + void AddCoresToTheMpModel(absl::Span> cores); // Builds the assumptions from the current MP solution. std::vector BuildAssumptions( @@ -163,7 +163,7 @@ class HittingSetOptimizer { // variables. // By convention, we always associate the MPVariableProto with both the // positive and the negative SAT variable. - absl::StrongVector sat_var_to_mp_var_; + util_intops::StrongVector sat_var_to_mp_var_; // The list of created during the // ExtractVariable() method. diff --git a/ortools/sat/optimization.cc b/ortools/sat/optimization.cc index 25e4db15b9..bf9c8c6818 100644 --- a/ortools/sat/optimization.cc +++ b/ortools/sat/optimization.cc @@ -903,8 +903,8 @@ void CoreBasedOptimizer::PresolveObjectiveWithAtMostOne( // This contains non-negative value. If a literal has negative weight, then // we just put a positive weight on its negation and update the offset. const int num_literals = implications_->literal_size(); - absl::StrongVector weights(num_literals); - absl::StrongVector is_candidate(num_literals); + util_intops::StrongVector weights(num_literals); + util_intops::StrongVector is_candidate(num_literals); // For now, we do not use weight. Note that finding the at most on in the // creation order of the variable make a HUGE difference on the max-sat frb @@ -913,7 +913,7 @@ void CoreBasedOptimizer::PresolveObjectiveWithAtMostOne( // TODO(user): We can assign preferences to literals to favor certain at most // one instead of other. For now we don't, so ExpandAtMostOneWithWeight() will // kind of randomize the expansion amongst possible choices. - absl::StrongVector preferences; + util_intops::StrongVector preferences; // Collect all literals with "negative weights", we will try to find at most // one between them. diff --git a/ortools/sat/parameters_validation.cc b/ortools/sat/parameters_validation.cc index 4feb6c34c7..4ee24917e0 100644 --- a/ortools/sat/parameters_validation.cc +++ b/ortools/sat/parameters_validation.cc @@ -92,6 +92,7 @@ std::string ValidateParameters(const SatParameters& params) { TEST_IS_FINITE(mip_drop_tolerance); TEST_IS_FINITE(shared_tree_worker_objective_split_probability); TEST_IS_FINITE(shared_tree_open_leaves_per_worker); + TEST_IS_FINITE(feasibility_jump_batch_dtime); TEST_POSITIVE(at_most_one_max_expansion_size); @@ -102,7 +103,6 @@ std::string ValidateParameters(const SatParameters& params) { const int kMaxReasonableParallelism = 10'000; TEST_IN_RANGE(num_workers, 0, kMaxReasonableParallelism); TEST_IN_RANGE(num_search_workers, 0, kMaxReasonableParallelism); - TEST_IN_RANGE(min_num_lns_workers, 0, kMaxReasonableParallelism); TEST_IN_RANGE(shared_tree_num_workers, 0, kMaxReasonableParallelism); TEST_IN_RANGE(interleave_batch_size, 0, kMaxReasonableParallelism); TEST_IN_RANGE(shared_tree_open_leaves_per_worker, 1, @@ -163,14 +163,6 @@ std::string ValidateParameters(const SatParameters& params) { return "Do not specify both num_search_workers and num_workers"; } - if (params.has_shared_tree_num_workers() && - static_cast(params.shared_tree_num_workers()) + - static_cast(params.min_num_lns_workers()) > - std::max(params.num_workers(), - params.num_search_workers())) { - return "Cannot have more shared tree + lns workers than total workers"; - } - if (params.use_shared_tree_search()) { return "use_shared_tree_search must only be set on workers' parameters"; } @@ -199,11 +191,6 @@ std::string ValidateParameters(const SatParameters& params) { } } - for (const std::string& subsolver : params.ignore_subsolvers()) { - if (!strategies.contains(subsolver)) { - return absl::StrCat("subsolver \'", subsolver, "\' is not valid"); - } - } return ""; } diff --git a/ortools/sat/pb_constraint.cc b/ortools/sat/pb_constraint.cc index 6e70663d8c..c60e743863 100644 --- a/ortools/sat/pb_constraint.cc +++ b/ortools/sat/pb_constraint.cc @@ -114,7 +114,7 @@ bool ComputeBooleanLinearExpressionCanonicalForm( } bool ApplyLiteralMapping( - const absl::StrongVector& mapping, + const util_intops::StrongVector& mapping, std::vector* cst, Coefficient* bound_shift, Coefficient* max_value) { int index = 0; @@ -1099,7 +1099,7 @@ void PbConstraints::UpdateActivityIncrement() { } void PbConstraints::DeleteConstraintMarkedForDeletion() { - absl::StrongVector index_mapping( + util_intops::StrongVector index_mapping( constraints_.size(), ConstraintIndex(-1)); ConstraintIndex new_index(0); for (ConstraintIndex i(0); i < constraints_.size(); ++i) { diff --git a/ortools/sat/pb_constraint.h b/ortools/sat/pb_constraint.h index ba42352f69..acd164a377 100644 --- a/ortools/sat/pb_constraint.h +++ b/ortools/sat/pb_constraint.h @@ -103,7 +103,7 @@ bool ComputeBooleanLinearExpressionCanonicalForm( // Finally, this will return false if some integer overflow or underflow // occurred during the constraint simplification. bool ApplyLiteralMapping( - const absl::StrongVector& mapping, + const util_intops::StrongVector& mapping, std::vector* cst, Coefficient* bound_shift, Coefficient* max_value); @@ -331,7 +331,7 @@ class MutableUpperBoundedLinearConstraint { // The encoding is special: // - If terms_[x] > 0, then the associated term is 'terms_[x] . x' // - If terms_[x] < 0, then the associated term is 'terms_[x] . (x - 1)' - absl::StrongVector terms_; + util_intops::StrongVector terms_; // The right hand side of the constraint (sum terms <= rhs_). Coefficient rhs_; @@ -657,11 +657,11 @@ class PbConstraints : public SatPropagator { std::vector> constraints_; // The current value of the threshold for each constraints. - absl::StrongVector thresholds_; + util_intops::StrongVector thresholds_; // For each literal, the list of all the constraints that contains it together // with the literal coefficient in these constraints. - absl::StrongVector> + util_intops::StrongVector> to_update_; // Bitset used to optimize the Untrail() function. @@ -736,7 +736,7 @@ class VariableWithSameReasonIdentifier { private: const Trail& trail_; - absl::StrongVector first_variable_; + util_intops::StrongVector first_variable_; SparseBitset seen_; }; diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index e27702d7ab..a991dc877b 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -208,8 +208,8 @@ void PrecedenceRelations::Build() { is_built_ = true; const int num_nodes = graph_.num_nodes(); - absl::StrongVector> before( - num_nodes); + util_intops::StrongVector> + before(num_nodes); // We will construct a graph with the current relation from all_relations_. // And use this to compute the "closure". @@ -402,7 +402,7 @@ void PrecedenceRelations::ComputeFullPrecedences( } void PrecedenceRelations::CollectPrecedences( - const std::vector& vars, + absl::Span vars, std::vector* output) { // +1 for the negation. const int needed_size = @@ -1245,7 +1245,7 @@ int GreaterThanAtLeastOneOfDetector:: auto* solver = model->GetOrCreate(); // Fill the set of interesting relations for each variables. - absl::StrongVector> var_to_relations; + util_intops::StrongVector> var_to_relations; for (int index = 0; index < relations_.size(); ++index) { const Relation& r = relations_[index]; if (r.a.var != kNoIntegerVariable && IntTypeAbs(r.a.coeff) == 1) { diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index bb2ce15d5e..3c26f9c73b 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -109,7 +109,7 @@ class PrecedenceRelations : public ReversibleInterface { IntegerVariable var; int index; }; - void CollectPrecedences(const std::vector& vars, + void CollectPrecedences(absl::Span vars, std::vector* output); // If we don't have too many variable, we compute the full transitive closure @@ -233,14 +233,15 @@ class PrecedenceRelations : public ReversibleInterface { // Store for each variable x, the variables y that appears in GetOffset(x, y) // or GetConditionalOffset(x, y). That is the variable that are after x with // an offset. Note that conditional_after_ is updated on dive/backtrack. - absl::StrongVector> after_; - absl::StrongVector> + util_intops::StrongVector> + after_; + util_intops::StrongVector> conditional_after_; // Temp data for CollectPrecedences. std::vector var_with_positive_degree_; - absl::StrongVector var_to_degree_; - absl::StrongVector var_to_last_index_; + util_intops::StrongVector var_to_degree_; + util_intops::StrongVector var_to_last_index_; std::vector tmp_precedences_; }; @@ -413,17 +414,18 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // consecutive like in StaticGraph should have a big performance impact. // // TODO(user): We do not need to store ArcInfo.tail_var here. - absl::StrongVector> + util_intops::StrongVector> impacted_arcs_; - absl::StrongVector arcs_; + util_intops::StrongVector arcs_; // This is similar to impacted_arcs_/arcs_ but it is only used to propagate // one of the presence literals when the arc cannot be present. An arc needs // to appear only once in potential_arcs_, but it will be referenced by // all its variable in impacted_potential_arcs_. - absl::StrongVector> + util_intops::StrongVector> impacted_potential_arcs_; - absl::StrongVector potential_arcs_; + util_intops::StrongVector potential_arcs_; // Each time a literal becomes true, this list the set of arcs for which we // need to decrement their count. When an arc count reach zero, it must be @@ -432,9 +434,9 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // // TODO(user): Try a one-watcher approach instead. Note that in most cases // arc should be controlled by 1 or 2 literals, so not sure it is worth it. - absl::StrongVector> + util_intops::StrongVector> literal_to_new_impacted_arcs_; - absl::StrongVector arc_counts_; + util_intops::StrongVector arc_counts_; // Temp vectors to hold the reason of an assignment. std::vector literal_reason_; @@ -514,7 +516,7 @@ class GreaterThanAtLeastOneOfDetector { }; std::vector relations_; - absl::StrongVector> lit_to_relations_; + util_intops::StrongVector> lit_to_relations_; }; // ============================================================================= diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index 5a60479798..722f530cdf 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -1011,6 +1011,23 @@ bool PresolveContext::StoreAffineRelation(int ref_x, int ref_y, int64_t coeff, CHECK_NE(coeff, 0); if (is_unsat_) return false; + if (hint_is_loaded_) { + const int var_x = PositiveRef(ref_x); + const int var_y = PositiveRef(ref_y); + if (!hint_has_value_[var_y] && hint_has_value_[var_x]) { + hint_has_value_[var_y] = true; + const int64_t x_mult = RefIsPositive(ref_x) ? 1 : -1; + const int64_t y_mult = RefIsPositive(ref_y) ? 1 : -1; + hint_[var_y] = (hint_[var_x] * x_mult - offset) / coeff * y_mult; + if (hint_[var_y] * coeff * y_mult + offset != hint_[var_x] * x_mult) { + // TODO(user): Do we implement a rounding to closest instead of + // routing towards 0. + UpdateRuleStats( + "Warning: hint didn't satisfy affine relation and was corrected"); + } + } + } + #ifdef CHECK_HINT const int64_t vx = RefIsPositive(ref_x) ? hint_[ref_x] : -hint_[NegatedRef(ref_x)]; @@ -2208,8 +2225,12 @@ int PresolveContext::GetOrCreateReifiedPrecedenceLiteral( auto* const bool_or = working_model->add_constraints()->mutable_bool_or(); bool_or->add_literals(result); bool_or->add_literals(rev_it->second); - bool_or->add_literals(NegatedRef(active_i)); - bool_or->add_literals(NegatedRef(active_j)); + if (!LiteralIsTrue(active_i)) { + bool_or->add_literals(NegatedRef(active_i)); + } + if (!LiteralIsTrue(active_j)) { + bool_or->add_literals(NegatedRef(active_j)); + } } return result; @@ -2315,24 +2336,6 @@ bool LoadModelForProbing(PresolveContext* context, Model* local_model) { return true; } -int PresolveContext::GetIntervalRepresentative(int index) { - const IntervalConstraintProto& interval = - working_model->constraints(index).interval(); - const auto [it, inserted] = - interval_representative_.insert({interval.SerializeAsString(), index}); - if (!inserted && index != it->second) { - // In case the "representative" was deleted. - if (working_model->constraints(it->second).SerializeAsString() != - it->first) { - it->second = index; - return index; - } - UpdateRuleStats("intervals: change duplicate index"); - return it->second; - } - return index; -} - template bool CanonicalizeLinearExpressionInternal( absl::Span enforcements, ProtoWithVarsAndCoeffs* proto, diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index 7b8b8240d3..2244435d21 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -570,9 +570,6 @@ class PresolveContext { // Logs stats to the logger. void LogInfo(); - // Return the given index, or the index of an interval with the same data. - int GetIntervalRepresentative(int index); - // This should be called only once after InitializeNewDomains() to load // the hint, in order to maintain it as best as possible during presolve. void LoadSolutionHint(); @@ -746,9 +743,6 @@ class PresolveContext { // Just used to display statistics on the presolve rules that were used. absl::flat_hash_map stats_by_rule_name_; - // Serialized proto (should be small) to index. - absl::flat_hash_map interval_representative_; - // Used by CanonicalizeLinearExpressionInternal(). std::vector> tmp_terms_; diff --git a/ortools/sat/presolve_util.cc b/ortools/sat/presolve_util.cc index 9df169980a..6dfaa8f80d 100644 --- a/ortools/sat/presolve_util.cc +++ b/ortools/sat/presolve_util.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -302,27 +303,27 @@ void ActivityBoundHelper::AddAllAtMostOnes(const CpModelProto& proto) { int64_t ActivityBoundHelper::ComputeActivity( bool compute_min, absl::Span> terms, std::vector>* conditional) { - tmp_terms_.clear(); - tmp_terms_.reserve(terms.size()); + tmp_terms_for_compute_activity_.clear(); + tmp_terms_for_compute_activity_.reserve(terms.size()); int64_t offset = 0; for (auto [lit, coeff] : terms) { if (compute_min) coeff = -coeff; // Negate. if (coeff >= 0) { - tmp_terms_.push_back({lit, coeff}); + tmp_terms_for_compute_activity_.push_back({lit, coeff}); } else { // l is the same as 1 - (1 - l) - tmp_terms_.push_back({NegatedRef(lit), -coeff}); + tmp_terms_for_compute_activity_.push_back({NegatedRef(lit), -coeff}); offset += coeff; } } const int64_t internal_result = - ComputeMaxActivityInternal(tmp_terms_, conditional); + ComputeMaxActivityInternal(tmp_terms_for_compute_activity_, conditional); // Correct everything. if (conditional != nullptr) { const int num_terms = terms.size(); for (int i = 0; i < num_terms; ++i) { - if (tmp_terms_[i].first != terms[i].first) { + if (tmp_terms_for_compute_activity_[i].first != terms[i].first) { // The true/false meaning is swapped std::swap((*conditional)[i][0], (*conditional)[i][1]); } @@ -357,17 +358,23 @@ void ActivityBoundHelper::PartitionIntoAmo( amo_sums_[a] += coeff; } } - to_sort_.push_back({terms[i].second, i}); + to_sort_.push_back( + TermWithIndex{.coeff = coeff, .index = index, .span_index = i}); } - std::sort(to_sort_.begin(), to_sort_.end(), std::greater<>()); + std::sort(to_sort_.begin(), to_sort_.end(), + [](const TermWithIndex& a, const TermWithIndex& b) { + // We take into account the index to make the result + // deterministic. + return std::tie(a.coeff, a.index) > std::tie(b.coeff, b.index); + }); int num_parts = 0; partition_.resize(num_terms); used_amo_to_dense_index_.clear(); - for (int i = 0; i < num_terms; ++i) { - const int original_i = to_sort_[i].second; - const Index index = IndexFromLiteral(terms[original_i].first); - const int64_t coeff = terms[original_i].second; + for (const TermWithIndex& term : to_sort_) { + const int original_i = term.span_index; + const Index index = term.index; + const int64_t coeff = term.coeff; int best = -1; int64_t best_sum = 0; bool done = false; diff --git a/ortools/sat/presolve_util.h b/ortools/sat/presolve_util.h index ba8cd024ae..d584a68d16 100644 --- a/ortools/sat/presolve_util.h +++ b/ortools/sat/presolve_util.h @@ -137,7 +137,7 @@ class DomainDeductions { std::vector tmp_num_occurrences_; SparseBitset something_changed_; - absl::StrongVector> enforcement_to_vars_; + util_intops::StrongVector> enforcement_to_vars_; absl::flat_hash_map, Domain> deductions_; }; @@ -188,6 +188,10 @@ class ActivityBoundHelper { // // Important: We shouldn't have duplicates or a lit and NegatedRef(lit) // appearing both. + + // Note: the result of this function is not exact (it uses an heuristic to + // detect AMOs), but it does not depend on the order of the input terms, so + // passing an input in non-deterministic order is fine. // // TODO(user): Indicate when the bounds are trivial (i.e. not intersection // with any amo) so that we don't waste more time processing the result? @@ -261,10 +265,16 @@ class ActivityBoundHelper { // We use an unique index by at most one, and just stores for each literal // the at most one to which it belong. int num_at_most_ones_ = 0; - absl::StrongVector> amo_indices_; + util_intops::StrongVector> amo_indices_; - std::vector> tmp_terms_; - std::vector> to_sort_; + std::vector> tmp_terms_for_compute_activity_; + + struct TermWithIndex { + int64_t coeff; + Index index; + int span_index; + }; + std::vector to_sort_; // We partition the set of term into disjoint at most one. absl::flat_hash_map used_amo_to_dense_index_; @@ -305,7 +315,7 @@ class ClauseWithOneMissingHasher { } absl::BitGenRef random_; - absl::StrongVector literal_to_hash_; + util_intops::StrongVector literal_to_hash_; std::vector clause_to_hash_; }; diff --git a/ortools/sat/probing.cc b/ortools/sat/probing.cc index 101018680e..1a19cab9e3 100644 --- a/ortools/sat/probing.cc +++ b/ortools/sat/probing.cc @@ -540,10 +540,10 @@ bool FailedLiteralProbingRound(ProbingOptions options, Model* model) { bool operator<(const SavedNextLiteral& o) const { return rank < o.rank; } }; std::vector queue; - absl::StrongVector position_in_order; + util_intops::StrongVector position_in_order; // This is only needed when options use_queue is false; - absl::StrongVector starts; + util_intops::StrongVector starts; if (!options.use_queue) starts.resize(2 * num_variables, 0); // We delay fixing of already assigned literal once we go back to level diff --git a/ortools/sat/pseudo_costs.h b/ortools/sat/pseudo_costs.h index 0f73294921..3ab96f30e0 100644 --- a/ortools/sat/pseudo_costs.h +++ b/ortools/sat/pseudo_costs.h @@ -14,6 +14,8 @@ #ifndef OR_TOOLS_SAT_PSEUDO_COSTS_H_ #define OR_TOOLS_SAT_PSEUDO_COSTS_H_ +#include +#include #include #include "absl/log/check.h" @@ -127,16 +129,16 @@ class PseudoCosts { // Current IntegerVariable pseudo costs. std::vector relevant_variables_; - absl::StrongVector is_relevant_; - absl::StrongVector scores_; - absl::StrongVector pseudo_costs_; + util_intops::StrongVector is_relevant_; + util_intops::StrongVector scores_; + util_intops::StrongVector pseudo_costs_; // This version is mainly based on the lp relaxation. - absl::StrongVector + util_intops::StrongVector average_unit_objective_increase_; // This version is based on objective increase explanation. - absl::StrongVector lit_pseudo_costs_; + util_intops::StrongVector lit_pseudo_costs_; }; } // namespace sat diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index 8165fa3394..be6f2282b3 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -2768,6 +2768,10 @@ class CpModel: self.assert_is_boolean_variable(arg.negated()) return arg.index if isinstance(arg, IntegralTypes): + if arg == ~False: # -1 + return self.get_or_make_index_from_constant(1) + if arg == ~True: # -2 + return self.get_or_make_index_from_constant(0) arg = cmh.assert_is_zero_or_one(arg) return self.get_or_make_index_from_constant(arg) if cmh.is_boolean(arg): diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 76c07e27a7..31060a4c77 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -110,6 +110,15 @@ class LogToString: return self.__log +class BestBoundCallback: + + def __init__(self): + self.best_bound: float = 0.0 + + def new_best_bound(self, bb: float): + self.best_bound = bb + + class CpModelTest(absltest.TestCase): def testCreateIntegerVariable(self): @@ -1253,6 +1262,25 @@ class CpModelTest(absltest.TestCase): self.assertEqual(cp_model.OPTIMAL, status) self.assertEqual(6, solution_sum.sum) + def testBestBoundCallback(self): + print("testBestBoundCallback") + model = cp_model.CpModel() + x0 = model.new_bool_var("x0") + x1 = model.new_bool_var("x1") + x2 = model.new_bool_var("x2") + x3 = model.new_bool_var("x3") + model.add_bool_or(x0, x1, x2, x3) + model.minimize(3 * x0 + 2 * x1 + 4 * x2 + 5 * x3 + 0.6) + + solver = cp_model.CpSolver() + best_bound_callback = BestBoundCallback() + solver.best_bound_callback = best_bound_callback.new_best_bound + solver.parameters.num_workers = 1 + solver.parameters.linearization_level = 2 + status = solver.solve(model) + self.assertEqual(cp_model.OPTIMAL, status) + self.assertEqual(2.6, best_bound_callback.best_bound) + def testValue(self): print("testValue") model = cp_model.CpModel() diff --git a/ortools/sat/python/swig_helper_test.py b/ortools/sat/python/swig_helper_test.py index adb9011f95..a332ecb53a 100644 --- a/ortools/sat/python/swig_helper_test.py +++ b/ortools/sat/python/swig_helper_test.py @@ -35,6 +35,15 @@ class Callback(swig_helper.SolutionCallback): return self.__solution_count +class BestBoundCallback: + + def __init__(self): + self.best_bound: float = 0.0 + + def new_best_bound(self, bb: float): + self.best_bound = bb + + class SwigHelperTest(absltest.TestCase): def testVariableDomain(self): @@ -180,6 +189,35 @@ class SwigHelperTest(absltest.TestCase): self.assertEqual(5, callback.solution_count()) self.assertEqual(cp_model_pb2.OPTIMAL, solution.status) + def testBestBoundCallback(self): + model_string = """ + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + variables { domain: 0 domain: 1 } + constraints { bool_or { literals: [0, 1, 2, 3] } } + objective { + vars: [0, 1, 2, 3] + coeffs: [3, 2, 4, 5] + offset: 0.6 + } + """ + model = cp_model_pb2.CpModelProto() + self.assertTrue(text_format.Parse(model_string, model)) + + solve_wrapper = swig_helper.SolveWrapper() + best_bound_callback = BestBoundCallback() + solve_wrapper.add_best_bound_callback(best_bound_callback.new_best_bound) + params = sat_parameters_pb2.SatParameters() + params.num_workers = 1 + params.linearization_level = 2 + params.log_search_progress = True + solve_wrapper.set_parameters(params) + solution = solve_wrapper.solve(model) + + self.assertEqual(2.6, best_bound_callback.best_bound) + self.assertEqual(cp_model_pb2.OPTIMAL, solution.status) + def testModelStats(self): model_string = """ variables { domain: -10 domain: 10 } diff --git a/ortools/sat/routing_cuts.cc b/ortools/sat/routing_cuts.cc index 237d04f725..b7242a9041 100644 --- a/ortools/sat/routing_cuts.cc +++ b/ortools/sat/routing_cuts.cc @@ -798,7 +798,7 @@ void SeparateFlowInequalities( IntegerValue* min_incoming_flow, IntegerValue* min_outgoing_flow)> get_flows, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, LinearConstraintManager* manager, Model* model) { // We will collect only the arcs with a positive lp capacity value to speed up // some computation below. diff --git a/ortools/sat/samples/BUILD.bazel b/ortools/sat/samples/BUILD.bazel index e081d680b0..47f1ef0c75 100644 --- a/ortools/sat/samples/BUILD.bazel +++ b/ortools/sat/samples/BUILD.bazel @@ -13,6 +13,8 @@ load(":code_samples.bzl", "code_sample_cc_py", "code_sample_java", "code_sample_py") +code_sample_py(name = "all_different_except_zero_sample_sat") + code_sample_cc_py(name = "assignment_sat") code_sample_cc_py(name = "assignment_groups_sat") @@ -47,6 +49,8 @@ code_sample_cc_py(name = "earliness_tardiness_cost_sample_sat") code_sample_py(name = "index_first_boolvar_true_sample_sat") +code_sample_py(name = "interval_relations_sample_sat") + code_sample_cc_py(name = "interval_sample_sat") code_sample_cc_py(name = "minimal_jobshop_sat") @@ -93,6 +97,8 @@ code_sample_cc_py(name = "solve_with_time_limit_sample_sat") code_sample_cc_py(name = "stop_after_n_solutions_sample_sat") +code_sample_py(name = "transitions_in_no_overlap_sample_sat") + code_sample_java(name = "AssignmentGroupsSat") code_sample_java(name = "AssignmentSat") diff --git a/ortools/sat/samples/ChannelingSampleSat.java b/ortools/sat/samples/ChannelingSampleSat.java index 46c3f99e6f..59f220fb3f 100644 --- a/ortools/sat/samples/ChannelingSampleSat.java +++ b/ortools/sat/samples/ChannelingSampleSat.java @@ -18,13 +18,14 @@ import com.google.ortools.sat.BoolVar; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.DecisionStrategyProto; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; import com.google.ortools.sat.SatParameters; /** Link integer constraints together. */ -public class ChannelingSampleSat { +public final class ChannelingSampleSat { public static void main(String[] args) throws Exception { Loader.loadNativeLibraries(); // Create the CP-SAT model. @@ -60,7 +61,7 @@ public class ChannelingSampleSat { solver.getParameters().setEnumerateAllSolutions(true); // Solve the problem with the printer callback. - solver.solve(model, new CpSolverSolutionCallback() { + CpSolverStatus unusedStatus = solver.solve(model, new CpSolverSolutionCallback() { public CpSolverSolutionCallback init(IntVar[] variables) { variableArray = variables; return this; @@ -77,4 +78,6 @@ public class ChannelingSampleSat { private IntVar[] variableArray; }.init(new IntVar[] {vars[0], vars[1], b})); } + + private ChannelingSampleSat() {} } diff --git a/ortools/sat/samples/CpIsFunSat.java b/ortools/sat/samples/CpIsFunSat.java index ed5b9d3074..70a28d3763 100644 --- a/ortools/sat/samples/CpIsFunSat.java +++ b/ortools/sat/samples/CpIsFunSat.java @@ -13,11 +13,13 @@ // [START program] package com.google.ortools.sat.samples; + // [START import] import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; // [END import] @@ -46,9 +48,10 @@ public final class CpIsFunSat { private int solutionCount; private final IntVar[] variableArray; } + // [END solution_printer] - public static void main(String[] args) throws Exception { + public static void main(String[] args) { Loader.loadNativeLibraries(); // Create the model. // [START model] @@ -56,7 +59,7 @@ public final class CpIsFunSat { // [END model] // [START variables] - int base = 10; + final int base = 10; IntVar c = model.newIntVar(1, base - 1, "C"); IntVar p = model.newIntVar(0, base - 1, "P"); IntVar i = model.newIntVar(1, base - 1, "I"); @@ -90,7 +93,7 @@ public final class CpIsFunSat { // Tell the solver to enumerate all solutions. solver.getParameters().setEnumerateAllSolutions(true); // And solve. - solver.solve(model, cb); + CpSolverStatus unusedStatus = solver.solve(model, cb); // [END solve] // Statistics. diff --git a/ortools/sat/samples/EarlinessTardinessCostSampleSat.java b/ortools/sat/samples/EarlinessTardinessCostSampleSat.java index 6326bf831b..2097ef62d5 100644 --- a/ortools/sat/samples/EarlinessTardinessCostSampleSat.java +++ b/ortools/sat/samples/EarlinessTardinessCostSampleSat.java @@ -17,6 +17,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.DecisionStrategyProto; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; @@ -75,7 +76,7 @@ public class EarlinessTardinessCostSampleSat { solver.getParameters().setEnumerateAllSolutions(true); // Solve the problem with the printer callback. - solver.solve(model, new CpSolverSolutionCallback() { + CpSolverStatus unusedStatus = solver.solve(model, new CpSolverSolutionCallback() { public CpSolverSolutionCallback init(IntVar[] variables) { variableArray = variables; return this; diff --git a/ortools/sat/samples/NQueensSat.java b/ortools/sat/samples/NQueensSat.java index 27d8c483ca..1ad85aab52 100644 --- a/ortools/sat/samples/NQueensSat.java +++ b/ortools/sat/samples/NQueensSat.java @@ -13,11 +13,13 @@ // [START program] package com.google.ortools.sat.samples; + // [START import] import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; // [END import] @@ -99,7 +101,7 @@ public final class NQueensSat { // Tell the solver to enumerate all solutions. solver.getParameters().setEnumerateAllSolutions(true); // And solve. - solver.solve(model, cb); + CpSolverStatus unusedStatus = solver.solve(model, cb); // [END solve] // Statistics. diff --git a/ortools/sat/samples/SearchForAllSolutionsSampleSat.java b/ortools/sat/samples/SearchForAllSolutionsSampleSat.java index 1f19bfec7d..fd15d01635 100644 --- a/ortools/sat/samples/SearchForAllSolutionsSampleSat.java +++ b/ortools/sat/samples/SearchForAllSolutionsSampleSat.java @@ -18,6 +18,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; /** Code sample that solves a model and displays all solutions. */ @@ -74,7 +75,7 @@ public class SearchForAllSolutionsSampleSat { // Tell the solver to enumerate all solutions. solver.getParameters().setEnumerateAllSolutions(true); // And solve. - solver.solve(model, cb); + CpSolverStatus unusedStatus = solver.solve(model, cb); // [END solve] System.out.println(cb.getSolutionCount() + " solutions found."); diff --git a/ortools/sat/samples/SolutionHintingSampleSat.java b/ortools/sat/samples/SolutionHintingSampleSat.java index 6edda57fb5..8b7671a889 100644 --- a/ortools/sat/samples/SolutionHintingSampleSat.java +++ b/ortools/sat/samples/SolutionHintingSampleSat.java @@ -18,6 +18,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; @@ -57,7 +58,7 @@ public class SolutionHintingSampleSat { CpSolver solver = new CpSolver(); VarArraySolutionPrinterWithObjective cb = new VarArraySolutionPrinterWithObjective(new IntVar[] {x, y, z}); - solver.solve(model, cb); + CpSolverStatus unusedStatus = solver.solve(model, cb); // [END solve] } diff --git a/ortools/sat/samples/SolveAndPrintIntermediateSolutionsSampleSat.java b/ortools/sat/samples/SolveAndPrintIntermediateSolutionsSampleSat.java index 3166eb43af..305c865c02 100644 --- a/ortools/sat/samples/SolveAndPrintIntermediateSolutionsSampleSat.java +++ b/ortools/sat/samples/SolveAndPrintIntermediateSolutionsSampleSat.java @@ -18,11 +18,13 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.LinearExpr; +import java.util.function.Consumer; /** Solves an optimization problem and displays all intermediate solutions. */ -public class SolveAndPrintIntermediateSolutionsSampleSat { +public final class SolveAndPrintIntermediateSolutionsSampleSat { // [START print_solution] static class VarArraySolutionPrinterWithObjective extends CpSolverSolutionCallback { public VarArraySolutionPrinterWithObjective(IntVar[] variables) { @@ -46,6 +48,27 @@ public class SolveAndPrintIntermediateSolutionsSampleSat { private int solutionCount; private final IntVar[] variableArray; } + + static class BestBoundCallback implements Consumer { + public BestBoundCallback() { + bestBound = 0.0; + numImprovements = 0; + } + + @Override + public void accept(Double bound) { + bestBound = bound; + numImprovements++; + } + + public double getBestBound() { + return bestBound; + } + + double bestBound; + int numImprovements; + } + // [END print_solution] public static void main(String[] args) throws Exception { @@ -79,10 +102,18 @@ public class SolveAndPrintIntermediateSolutionsSampleSat { CpSolver solver = new CpSolver(); VarArraySolutionPrinterWithObjective cb = new VarArraySolutionPrinterWithObjective(new IntVar[] {x, y, z}); - solver.solve(model, cb); + solver.getParameters().setNumWorkers(1); + solver.getParameters().setLinearizationLevel(2); + BestBoundCallback bestBoundCallback = new BestBoundCallback(); + + solver.setBestBoundCallback(bestBoundCallback); + CpSolverStatus unusedStatus = solver.solve(model, cb); // [END solve] - System.out.println(cb.getSolutionCount() + " solutions found."); + System.out.println("solution count: " + cb.getSolutionCount()); + System.out.println("best bound count: " + bestBoundCallback.numImprovements); } + + private SolveAndPrintIntermediateSolutionsSampleSat() {} } // [END program] diff --git a/ortools/sat/samples/StepFunctionSampleSat.java b/ortools/sat/samples/StepFunctionSampleSat.java index 490fd98eb8..867eadd3de 100644 --- a/ortools/sat/samples/StepFunctionSampleSat.java +++ b/ortools/sat/samples/StepFunctionSampleSat.java @@ -17,6 +17,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.DecisionStrategyProto; import com.google.ortools.sat.IntVar; import com.google.ortools.sat.Literal; @@ -80,7 +81,7 @@ public class StepFunctionSampleSat { solver.getParameters().setEnumerateAllSolutions(true); // Solve the problem with the printer callback. - solver.solve(model, new CpSolverSolutionCallback() { + CpSolverStatus unusedStatus = solver.solve(model, new CpSolverSolutionCallback() { public CpSolverSolutionCallback init(IntVar[] variables) { variableArray = variables; return this; diff --git a/ortools/sat/samples/StopAfterNSolutionsSampleSat.java b/ortools/sat/samples/StopAfterNSolutionsSampleSat.java index d03d9267f2..215558e100 100644 --- a/ortools/sat/samples/StopAfterNSolutionsSampleSat.java +++ b/ortools/sat/samples/StopAfterNSolutionsSampleSat.java @@ -18,6 +18,7 @@ import com.google.ortools.Loader; import com.google.ortools.sat.CpModel; import com.google.ortools.sat.CpSolver; import com.google.ortools.sat.CpSolverSolutionCallback; +import com.google.ortools.sat.CpSolverStatus; import com.google.ortools.sat.IntVar; /** Code sample that solves a model and displays a small number of solutions. */ @@ -68,7 +69,7 @@ public final class StopAfterNSolutionsSampleSat { // Tell the solver to enumerate all solutions. solver.getParameters().setEnumerateAllSolutions(true); // And solve. - solver.solve(model, cb); + CpSolverStatus unusedStatus = solver.solve(model, cb); System.out.println(cb.getSolutionCount() + " solutions found."); if (cb.getSolutionCount() != 5) { diff --git a/ortools/sat/samples/all_different_except_zero_sample_sat.py b/ortools/sat/samples/all_different_except_zero_sample_sat.py new file mode 100644 index 0000000000..57a5f346e0 --- /dev/null +++ b/ortools/sat/samples/all_different_except_zero_sample_sat.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# Copyright 2010-2024 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implements AllDifferentExcept0 using atomic constraints.""" + +import collections + +from ortools.sat.python import cp_model + + +def all_different_except_0(): + """Encode the AllDifferentExcept0 constraint.""" + + # Model. + model = cp_model.CpModel() + + # Declare our primary variable. + x = [model.new_int_var(0, 10, f"x{i}") for i in range(5)] + + # Expand the AllDifferentExcept0 constraint. + variables_per_value = collections.defaultdict(list) + all_values = set() + + for var in x: + all_encoding_literals = [] + # Domains of variables are represented by flat intervals. + for i in range(0, len(var.proto.domain), 2): + start = var.proto.domain[i] + end = var.proto.domain[i + 1] + for value in range(start, end + 1): # Intervals are inclusive. + # Create the literal attached to var == value. + bool_var = model.new_bool_var(f"{var} == {value}") + model.add(var == value).only_enforce_if(bool_var) + + # Collect all encoding literals for a given variable. + all_encoding_literals.append(bool_var) + + # Collect all encoding literals for a given value. + variables_per_value[value].append(bool_var) + + # Collect all different values. + all_values.add(value) + + # One variable must have exactly one value. + model.add_exactly_one(all_encoding_literals) + + # Add the all_different constraints. + for value, literals in variables_per_value.items(): + if value == 0: + continue + model.add_at_most_one(literals) + + model.add(x[0] == 0) + model.add(x[1] == 0) + + model.maximize(sum(x)) + + # Create a solver and solve. + solver = cp_model.CpSolver() + status = solver.solve(model) + + # Checks and prints the output. + if status == cp_model.OPTIMAL: + print(f"Optimal solution: {solver.objective_value}, expected: 27.0") + elif status == cp_model.FEASIBLE: + print(f"Feasible solution: {solver.objective_value}, optimal 27.0") + elif status == cp_model.INFEASIBLE: + print("The model is infeasible") + else: + print("Something went wrong. Please check the status and the log") + + +all_different_except_0() diff --git a/ortools/sat/samples/interval_relations_sample_sat.py b/ortools/sat/samples/interval_relations_sample_sat.py new file mode 100644 index 0000000000..81b7fe89a6 --- /dev/null +++ b/ortools/sat/samples/interval_relations_sample_sat.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +# Copyright 2010-2024 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Builds temporal relations between intervals.""" + +from ortools.sat.python import cp_model + + +def interval_relations_sample_sat(): + """Showcases how to build temporal relations between intervals.""" + model = cp_model.CpModel() + horizon = 100 + + # An interval can be created from three 1-var affine expressions. + start_var = model.new_int_var(0, horizon, "start") + duration = 10 # Python CP-SAT code accept integer variables or constants. + end_var = model.new_int_var(0, horizon, "end") + interval_var = model.new_interval_var(start_var, duration, end_var, "interval") + + # If the size is fixed, a simpler version uses the start expression and the + # size. + fixed_size_start_var = model.new_int_var(0, horizon, "fixed_start") + fixed_size_duration = 10 + fixed_size_interval_var = model.new_fixed_size_interval_var( + fixed_size_start_var, + fixed_size_duration, + "fixed_size_interval_var", + ) + + # An optional interval can be created from three 1-var affine expressions and + # a literal. + opt_start_var = model.new_int_var(0, horizon, "opt_start") + opt_duration = model.new_int_var(2, 6, "opt_size") + opt_end_var = model.new_int_var(0, horizon, "opt_end") + opt_presence_var = model.new_bool_var("opt_presence") + opt_interval_var = model.new_optional_interval_var( + opt_start_var, opt_duration, opt_end_var, opt_presence_var, "opt_interval" + ) + + # If the size is fixed, a simpler version uses the start expression, the + # size, and the presence literal. + opt_fixed_size_start_var = model.new_int_var(0, horizon, "opt_fixed_start") + opt_fixed_size_duration = 10 + opt_fixed_size_presence_var = model.new_bool_var("opt_fixed_presence") + opt_fixed_size_interval_var = model.new_optional_fixed_size_interval_var( + opt_fixed_size_start_var, + opt_fixed_size_duration, + opt_fixed_size_presence_var, + "opt_fixed_size_interval_var", + ) + + # Simple precedence between two non optional intervals. + model.add(interval_var.start_expr() >= fixed_size_interval_var.end_expr()) + + # Synchronize start between two intervals (one optional, one not) + model.add( + interval_var.start_expr() == opt_interval_var.start_expr() + ).only_enforce_if(opt_presence_var) + + # Exact delay between two optional intervals. + exact_delay: int = 5 + model.add( + opt_interval_var.start_expr() + == opt_fixed_size_interval_var.end_expr() + exact_delay + ).only_enforce_if(opt_presence_var, opt_fixed_size_presence_var) + + +interval_relations_sample_sat() diff --git a/ortools/sat/samples/ranking_circuit_sample_sat.py b/ortools/sat/samples/ranking_circuit_sample_sat.py index 4141346cad..9ef7862664 100644 --- a/ortools/sat/samples/ranking_circuit_sample_sat.py +++ b/ortools/sat/samples/ranking_circuit_sample_sat.py @@ -26,7 +26,7 @@ def rank_tasks_with_circuit( durations: Sequence[int], presences: Sequence[cp_model.IntVar], ranks: Sequence[cp_model.IntVar], -): +) -> None: """This method uses a circuit constraint to rank tasks. This method assumes that all starts are disjoint, meaning that all tasks have @@ -36,7 +36,7 @@ def rank_tasks_with_circuit( To implement this ranking, we will create a dense graph with num_tasks + 1 nodes. The extra node (with id 0) will be used to decide which task is first with - its only outgoing arc, and whhich task is last with its only incoming arc. + its only outgoing arc, and which task is last with its only incoming arc. Each task i will be associated with id i + 1, and an arc between i + 1 and j + 1 indicates that j is the immediate successor of i. @@ -102,7 +102,7 @@ def rank_tasks_with_circuit( model.add_circuit(arcs) -def ranking_sample_sat(): +def ranking_sample_sat() -> None: """Ranks tasks in a NoOverlap constraint.""" model = cp_model.CpModel() diff --git a/ortools/sat/samples/transitions_in_no_overlap_sample_sat.py b/ortools/sat/samples/transitions_in_no_overlap_sample_sat.py new file mode 100644 index 0000000000..56e7d6302f --- /dev/null +++ b/ortools/sat/samples/transitions_in_no_overlap_sample_sat.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python3 +# Copyright 2010-2024 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implements transition times and costs in a no_overlap constraint.""" + +from typing import Dict, List, Sequence, Tuple, Union + +from ortools.sat.python import cp_model + + +def transitive_reduction_with_circuit_delays_and_penalties( + model: cp_model.CpModel, + starts: Sequence[cp_model.IntVar], + durations: Sequence[int], + presences: Sequence[Union[cp_model.IntVar, bool]], + penalties: Dict[Tuple[int, int], int], + delays: Dict[Tuple[int, int], int], +) -> Sequence[Tuple[cp_model.IntVar, int]]: + """This method uses a circuit constraint to rank tasks. + + This method assumes that all starts are disjoint, meaning that all tasks have + a strictly positive duration, and they appear in the same NoOverlap + constraint. + + The extra node (with id 0) will be used to decide which task is first with + its only outgoing arc, and which task is last with its only incoming arc. + Each task i will be associated with id i + 1, and an arc between i + 1 and j + + 1 indicates that j is the immediate successor of i. + + The circuit constraint ensures there is at most 1 hamiltonian cycle of + length > 1. If no such path exists, then no tasks are active. + We also need to enforce that any hamiltonian cycle of size > 1 must contain + the node 0. And thus, there is a self loop on node 0 iff the circuit is empty. + + Args: + model: The CpModel to add the constraints to. + starts: The array of starts variables of all tasks. + durations: the durations of all tasks. + presences: The array of presence variables of all tasks. + penalties: the array of tuple (`tail_index`, `head_index`, `penalty`) that + specifies that if task `tail_index` is the successor of the task + `head_index`, then `penalty` must be added to the cost. + delays: the array of tuple (`tail_index`, `head_index`, `delay`) that + specifies that if task `tail_index` is the successor of the task + `head_index`, then an extra `delay` must be added between the end of the + first task and the start of the second task. + + Returns: + The list of pairs (Boolean variables, penalty) to be added to the objective. + """ + + num_tasks = len(starts) + all_tasks = range(num_tasks) + + arcs: List[cp_model.ArcT] = [] + penalty_terms = [] + for i in all_tasks: + # if node i is first. + start_lit = model.new_bool_var(f"start_{i}") + arcs.append((0, i + 1, start_lit)) + + # As there are no other constraints on the problem, we can add this + # redundant constraint. + model.add(starts[i] == 0).only_enforce_if(start_lit) + + # if node i is last. + end_lit = model.new_bool_var(f"end_{i}") + arcs.append((i + 1, 0, end_lit)) + + for j in all_tasks: + if i == j: + arcs.append((i + 1, i + 1, ~presences[i])) + else: + literal = model.new_bool_var(f"arc_{i}_to_{j}") + arcs.append((i + 1, j + 1, literal)) + + # To perform the transitive reduction from precedences to successors, + # we need to tie the starts of the tasks with 'literal'. + # In a pure problem, the following inequality could be an equality. + # It is not true in general. + # + # Note that we could use this literal to penalize the transition, add an + # extra delay to the precedence. + min_delay = 0 + key = (i, j) + if key in delays: + min_delay = delays[key] + model.add( + starts[j] >= starts[i] + durations[i] + min_delay + ).only_enforce_if(literal) + + # Create the penalties. + if key in penalties: + penalty_terms.append((literal, penalties[key])) + + # Manage the empty circuit + empty = model.new_bool_var("empty") + arcs.append((0, 0, empty)) + + for i in all_tasks: + model.add_implication(empty, ~presences[i]) + + # Add the circuit constraint. + model.add_circuit(arcs) + + return penalty_terms + + +def transitions_in_no_overlap_sample_sat(): + """Implement transitions in a NoOverlap constraint.""" + + model = cp_model.CpModel() + horizon = 40 + num_tasks = 4 + + # Breaking the natural sequence induces a fixed penalty. + penalties = { + (1, 0): 10, + (2, 0): 10, + (3, 0): 10, + (2, 1): 10, + (3, 1): 10, + (3, 2): 10, + } + + # Switching from an odd to even or even to odd task indices induces a delay. + delays = { + (1, 0): 10, + (0, 1): 10, + (3, 0): 10, + (0, 3): 10, + (1, 2): 10, + (2, 1): 10, + (3, 2): 10, + (2, 3): 10, + } + + all_tasks = range(num_tasks) + + starts = [] + durations = [] + intervals = [] + presences = [] + + # Creates intervals, all present. But the cost is robust w.r.t. optional + # intervals. + for t in all_tasks: + start = model.new_int_var(0, horizon, f"start[{t}]") + duration = 5 + presence = True + interval = model.new_optional_fixed_size_interval_var( + start, duration, presence, f"opt_interval[{t}]" + ) + + starts.append(start) + durations.append(duration) + intervals.append(interval) + presences.append(presence) + + # Adds NoOverlap constraint. + model.add_no_overlap(intervals) + + # Adds ranking constraint. + penalty_terms = transitive_reduction_with_circuit_delays_and_penalties( + model, starts, durations, presences, penalties, delays + ) + + # Minimize the sum of penalties, + model.minimize(sum(var * penalty for var, penalty in penalty_terms)) + + # In practise, only one penalty can happen. Thus the two even tasks are + # together, same for the two odd tasks. + # Because of the penalties, the optimal sequence is 0 -> 2 -> 1 -> 3 + # which induces one penalty and one delay. + + # Solves the model model. + solver = cp_model.CpSolver() + status = solver.solve(model) + + if status == cp_model.OPTIMAL: + # Prints out the makespan and the start times and ranks of all tasks. + print(f"Optimal cost: {solver.objective_value}") + for t in all_tasks: + if solver.value(presences[t]): + print(f"Task {t} starts at {solver.value(starts[t])} ") + else: + print(f"Task {t} in not performed") + else: + print(f"Solver exited with nonoptimal status: {status}") + + +transitions_in_no_overlap_sample_sat() diff --git a/ortools/sat/sat_base.h b/ortools/sat/sat_base.h index 8affe1d12d..2d6c2bc391 100644 --- a/ortools/sat/sat_base.h +++ b/ortools/sat/sat_base.h @@ -28,11 +28,9 @@ #include "absl/base/attributes.h" #include "absl/log/check.h" #include "absl/strings/str_format.h" -#include "absl/strings/string_view.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" -#include "ortools/sat/model.h" #include "ortools/util/bitset.h" #include "ortools/util/strong_integers.h" @@ -67,9 +65,7 @@ const LiteralIndex kFalseLiteralIndex(-3); // same number XOR 1 encode its negation. class Literal { public: - // Not explicit for tests so we can write: - // vector literal = {+1, -3, +4, -9}; - Literal(int signed_value) // NOLINT + explicit Literal(int signed_value) : index_(signed_value > 0 ? ((signed_value - 1) << 1) : ((-signed_value - 1) << 1) ^ 1) { CHECK_NE(signed_value, 0); @@ -139,6 +135,17 @@ inline std::ostream& operator<<(std::ostream& os, return os; } +// Only used for testing to use the classical SAT notation for a literal. This +// allows to write Literals({+1, -4, +3}) for the clause with BooleanVariable 0 +// and 2 appearing positively and 3 negatively. +inline std::vector Literals(absl::Span input) { + std::vector result(input.size()); + for (int i = 0; i < result.size(); ++i) { + result[i] = Literal(input[i]); + } + return result; +} + // Holds the current variable assignment of the solver. // Each variable can be unassigned or be assigned to true or false. class VariablesAssignment { @@ -465,11 +472,11 @@ class Trail { VariablesAssignment assignment_; std::vector trail_; std::vector conflict_; - absl::StrongVector info_; + util_intops::StrongVector info_; SatClause* failing_sat_clause_; // Data used by EnqueueWithSameReasonAs(). - absl::StrongVector + util_intops::StrongVector reference_var_with_same_reason_as_; // Reason cache. Mutable since we want the API to be the same whether the @@ -496,9 +503,9 @@ class Trail { // variables, the memory address of the vectors (kept in reasons_) are still // valid. mutable std::deque> reasons_repository_; - mutable absl::StrongVector> + mutable util_intops::StrongVector> reasons_; - mutable absl::StrongVector old_type_; + mutable util_intops::StrongVector old_type_; // This is used by RegisterPropagator() and Reason(). std::vector propagators_; @@ -542,7 +549,7 @@ class SatPropagator { // TODO(user): It is not yet 100% the case, but this can be guaranteed to be // called with a trail index that will always be the start of a new decision // level. - virtual void Untrail(const Trail& trail, int trail_index) { + virtual void Untrail(const Trail& /*trail*/, int trail_index) { propagation_trail_index_ = std::min(propagation_trail_index_, trail_index); } @@ -556,7 +563,7 @@ class SatPropagator { // The returned Span has to be valid until the literal is untrailed. A client // can use trail_.GetEmptyVectorToStoreReason() if it doesn't have a memory // location that already contains the reason. - virtual absl::Span Reason(const Trail& trail, + virtual absl::Span Reason(const Trail& /*trail*/, int /*trail_index*/) const { LOG(FATAL) << "Not implemented."; return {}; diff --git a/ortools/sat/sat_decision.h b/ortools/sat/sat_decision.h index eed1d2f703..7992cbdcee 100644 --- a/ortools/sat/sat_decision.h +++ b/ortools/sat/sat_decision.h @@ -204,24 +204,24 @@ class SatDecisionPolicy { // Stores variable activity and the number of time each variable was "bumped". // The later is only used with the ERWA heuristic. - absl::StrongVector activities_; - absl::StrongVector tie_breakers_; - absl::StrongVector num_bumps_; + util_intops::StrongVector activities_; + util_intops::StrongVector tie_breakers_; + util_intops::StrongVector num_bumps_; // If the polarity if forced (externally) we always use this first. - absl::StrongVector has_forced_polarity_; - absl::StrongVector forced_polarity_; + util_intops::StrongVector has_forced_polarity_; + util_intops::StrongVector forced_polarity_; // If we are in a stable phase, we follow the current target. bool in_stable_phase_ = false; int target_length_ = 0; - absl::StrongVector has_target_polarity_; - absl::StrongVector target_polarity_; + util_intops::StrongVector has_target_polarity_; + util_intops::StrongVector target_polarity_; // Otherwise we follow var_polarity_ which is reset at the beginning of // each new polarity phase. This is also overwritten by phase saving. // Each phase last for an arithmetically increasing number of conflicts. - absl::StrongVector var_polarity_; + util_intops::StrongVector var_polarity_; bool maybe_enable_phase_saving_ = true; int64_t polarity_phase_ = 0; int64_t num_conflicts_until_rephase_ = 1000; diff --git a/ortools/sat/sat_inprocessing.cc b/ortools/sat/sat_inprocessing.cc index eb21b53e1a..1d6dd85cd2 100644 --- a/ortools/sat/sat_inprocessing.cc +++ b/ortools/sat/sat_inprocessing.cc @@ -124,7 +124,9 @@ bool Inprocessing::PresolveLoop(SatPresolveOptions options) { // TODO(user): Combine the two? this way we don't create a full literal <-> // clause graph twice. It might make sense to reach the BCE fix point which // is unique before each variable elimination. - blocked_clause_simplifier_->DoOneRound(log_round_info); + if (!params_.fill_tightened_domains_in_response()) { + blocked_clause_simplifier_->DoOneRound(log_round_info); + } // TODO(user): this break some binary graph invariant. Fix! RETURN_IF_FALSE(RemoveFixedAndEquivalentVariables(log_round_info)); @@ -188,6 +190,9 @@ bool Inprocessing::InprocessingRound() { } // Try to spend a given ratio of time in the inprocessing. + // + // TODO(user): Tune the heuristic, in particular, with the current code we + // start some inprocessing before the first search. const double diff = start_dtime - reference_dtime_; if (total_dtime_ > params_.inprocessing_dtime_ratio() * diff) { return true; @@ -366,7 +371,7 @@ bool Inprocessing::RemoveFixedAndEquivalentVariables(bool log_info) { // Used to mark clause literals. const int num_literals(sat_solver_->NumVariables() * 2); - absl::StrongVector marked(num_literals, false); + util_intops::StrongVector marked(num_literals, false); clause_manager_->DeleteRemovedClauses(); clause_manager_->DetachAllClauses(); @@ -472,8 +477,8 @@ bool Inprocessing::SubsumeAndStrenghtenRound(bool log_info) { // Clause index in clauses. // TODO(user): Storing signatures here might be faster? - absl::StrongVector> one_watcher( - num_literals.value()); + util_intops::StrongVector> + one_watcher(num_literals.value()); // Clause signatures in the same order as clauses. std::vector signatures(clauses.size()); diff --git a/ortools/sat/sat_inprocessing.h b/ortools/sat/sat_inprocessing.h index 750363b01c..f7eadca3a7 100644 --- a/ortools/sat/sat_inprocessing.h +++ b/ortools/sat/sat_inprocessing.h @@ -244,20 +244,20 @@ class StampingSimplifier { int64_t num_fixed_ = 0; // Encode a spanning tree of the implication graph. - absl::StrongVector parents_; + util_intops::StrongVector parents_; // Adjacency list representation of the parents_ tree. - absl::StrongVector sizes_; - absl::StrongVector starts_; + util_intops::StrongVector sizes_; + util_intops::StrongVector starts_; std::vector children_; // Temporary data for the DFS. - absl::StrongVector marked_; + util_intops::StrongVector marked_; std::vector dfs_stack_; // First/Last visited index in a DFS of the tree above. - absl::StrongVector first_stamps_; - absl::StrongVector last_stamps_; + util_intops::StrongVector first_stamps_; + util_intops::StrongVector last_stamps_; }; // A clause c is "blocked" by a literal l if all clauses containing the @@ -299,18 +299,18 @@ class BlockedClauseSimplifier { int64_t num_inspected_literals_ = 0; // Temporary vector to mark literal of a clause. - absl::StrongVector marked_; + util_intops::StrongVector marked_; // List of literal to process. // TODO(user): use priority queue? - absl::StrongVector in_queue_; + util_intops::StrongVector in_queue_; std::deque queue_; // We compute the occurrence graph just once at the beginning of each round // and we do not shrink it as we remove blocked clauses. DEFINE_STRONG_INDEX_TYPE(rat_literal_clause_index); - absl::StrongVector clauses_; - absl::StrongVector> + util_intops::StrongVector clauses_; + util_intops::StrongVector> literal_to_clauses_; }; @@ -367,7 +367,7 @@ class BoundedVariableElimination { int64_t score_threshold_; // Temporary vector to mark literal of a clause and compute its resolvant. - absl::StrongVector marked_; + util_intops::StrongVector marked_; std::vector resolvant_; // Priority queue of variable to process. @@ -385,17 +385,17 @@ class BoundedVariableElimination { IntegerPriorityQueue queue_; // We update the queue_ in batch. - absl::StrongVector in_need_to_be_updated_; + util_intops::StrongVector in_need_to_be_updated_; std::vector need_to_be_updated_; // We compute the occurrence graph just once at the beginning of each round. // We maintains the sizes at all time and lazily shrink the graph with deleted // clauses. DEFINE_STRONG_INDEX_TYPE(ClauseIndex); - absl::StrongVector clauses_; - absl::StrongVector> + util_intops::StrongVector clauses_; + util_intops::StrongVector> literal_to_clauses_; - absl::StrongVector literal_to_num_clauses_; + util_intops::StrongVector literal_to_num_clauses_; }; } // namespace sat diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 8415ed797e..07eae618d1 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -23,7 +23,7 @@ option csharp_namespace = "Google.OrTools.Sat"; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 284 +// NEXT TAG: 295 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -290,6 +290,10 @@ message SatParameters { // solver, the time unit being as close as possible to a second. optional double max_deterministic_time = 67 [default = inf]; + // Stops after that number of batches has been scheduled. This only make sense + // when interleave_search is true. + optional int32 max_num_deterministic_batches = 291 [default = 0]; + // Maximum number of conflicts allowed to solve a problem. // // TODO(user): Maybe change the way the conflict limit is enforced? @@ -464,6 +468,25 @@ message SatParameters { // possible precedences between event and encoding the constraint. optional bool expand_reservoir_constraints = 182 [default = true]; + // Mainly useful for testing. + // + // If this and expand_reservoir_constraints is true, we use a different + // encoding of the reservoir constraint using circuit instead of precedences. + // Note that this is usually slower, but can exercise different part of the + // solver. Note that contrary to the precedence encoding, this easily support + // variable demands. + // + // WARNING: with this encoding, the constraint take a slighlty different + // meaning. The level must be within the reservoir for any permutation of the + // events. So we cannot have +100 and -100 at the same time if the maximum + // level is 10 (as autorized by the reservoir constraint). + optional bool expand_reservoir_using_circuit = 288 [default = false]; + + // Encore cumulative with fixed demands and capacity as a reservoir + // constraint. The only reason you might want to do that is to test the + // reservoir propagation code! + optional bool encode_cumulative_as_reservoir = 287 [default = false]; + // If the number of expressions in the lin_max is less that the max size // parameter, model expansion replaces target = max(xi) by linear constraint // with the introduction of new booleans bi such that bi => target == xi. @@ -570,7 +593,7 @@ message SatParameters { // A number of 1 means no parallelism. // // Note that 'num_workers' is the preferred name, but if it is set to zero, - // we will still read the deprecated 'num_search_worker'. + // we will still read the deprecated 'num_search_workers'. // // As of 2020-04-10, if you're using SAT via MPSolver (to solve integer // programs) this field is overridden with a value of 8, if the field is not @@ -579,12 +602,16 @@ message SatParameters { optional int32 num_workers = 206 [default = 0]; optional int32 num_search_workers = 100 [default = 0]; - // Obsolete parameter. No-op. - optional int32 min_num_lns_workers = 211 [default = 2]; + // We distinguish subsolvers that consume a full thread, and the ones that are + // always interleaved. If left at zero, we will fix this with a default + // formula that depends on num_workers. But if you start modifying what runs, + // you might want to fix that to a given value depending on the num_workers + // you use. + optional int32 num_full_subsolvers = 294 [default = 0]; // In multi-thread, the solver can be mainly seen as a portfolio of solvers // with different parameters. This field indicates the names of the parameters - // that are used in multithread. + // that are used in multithread. This only applies to "full" subsolvers. // // See cp_model_search.cc to see a list of the names and the default value (if // left empty) that looks like: @@ -605,10 +632,8 @@ message SatParameters { // has many terms. If there is no fixed strategy fixed will be ignored. And so // on. // - // The order is important, as only the first usable "num_workers - - // min_num_lns_workers" subsolvers will be scheduled. You can see in the log - // which one are selected for a given run. All the others will be LNS if there - // is an objective, or randomized SAT search for pure satisfiability problems. + // The order is important, as only the first num_full_subsolvers will be + // scheduled. You can see in the log which one are selected for a given run. repeated string subsolvers = 207; // A convenient way to add more workers types. @@ -616,8 +641,16 @@ message SatParameters { repeated string extra_subsolvers = 219; // Rather than fully specifying subsolvers, it is often convenient to just - // remove the ones that are not useful on a given problem. + // remove the ones that are not useful on a given problem or only keep + // specific ones for testing. Each string is interpreted as a "glob", so we + // support '*' and '?'. + // + // The way this work is that we will only accept a name that match a filter + // pattern (if non-empty) and do not match an ignore pattern. Note also that + // these fields work on LNS or LS names even if these are currently not + // specified via the subsolvers field. repeated string ignore_subsolvers = 209; + repeated string filter_subsolvers = 293; // It is possible to specify additional subsolver configuration. These can be // referred by their params.name() in the fields above. Note that only the @@ -626,9 +659,6 @@ message SatParameters { // named parameters will be merged into the subsolver configuration. repeated SatParameters subsolver_params = 210; - // TODO(user): Also define like for subsolvers the list of "active" - // type of neighborhood used. - // Experimental. If this is true, then we interleave all our major search // strategy and distribute the work amongst num_workers. // @@ -647,6 +677,10 @@ message SatParameters { // Allows sharing of new learned binary clause between workers. optional bool share_binary_clauses = 203 [default = true]; + // Allows sharing of short glue clauses between workers. + // Implicitly disabled if share_binary_clauses is false. + optional bool share_glue_clauses = 285 [default = false]; + // ========================================================================== // Debugging parameters // ========================================================================== @@ -774,6 +808,15 @@ message SatParameters { // depending on the problem, turning this off may lead to a faster solution. optional bool use_overload_checker_in_cumulative = 78 [default = false]; + // Enable a heuristic to solve cumulative constraints using a modified energy + // constraint. We modify the usual energy definition by applying a + // super-additive function (also called "conservative scale" or "dual-feasible + // function") to the demand and the durations of the tasks. + // + // This heuristic is fast but for most problems it does not help much to find + // a solution. + optional bool use_conservative_scale_overload_checker = 286 [default = false]; + // When this is true, the cumulative constraint is reinforced with timetable // edge finding, i.e., an additional level of reasoning based on the // conjunction of energy and mandatory parts. This additional level @@ -917,6 +960,12 @@ message SatParameters { // bound in the shaving search. optional double shaving_search_deterministic_time = 205 [default = 0.001]; + // Specifies the threshold between two modes in the shaving procedure. + // If the range of the variable/objective is less than this threshold, then + // the shaving procedure will try to remove values one by one. Otherwise, it + // will try to remove one range at a time. + optional int64 shaving_search_threshold = 290 [default = 64]; + // If true, search will search in ascending max objective value (when // minimizing) starting from the lower bound of the objective. optional bool use_objective_lb_search = 228 [default = false]; @@ -926,6 +975,10 @@ message SatParameters { // hardcoded objective value. optional bool use_objective_shaving_search = 253 [default = false]; + // This search takes all Boolean or integer variables, and maximize or + // minimize them in order to reduce their domain. + optional bool use_variables_shaving_search = 289 [default = false]; + // The solver ignores the pseudo costs of variables with number of recordings // less than this threshold. optional int64 pseudo_cost_reliability_threshold = 123 [default = 100]; @@ -943,6 +996,14 @@ message SatParameters { // the worst open node in the tree. optional bool optimize_with_lb_tree_search = 188 [default = false]; + // Experimental. Save the current LP basis at each node of the search tree so + // that when we jump around, we can load it and reduce the number of LP + // iterations needed. + // + // It currently works okay if we do not change the lp with cuts or + // simplification... More work is needed to make it robust in all cases. + optional bool save_lp_basis_in_lb_tree_search = 284 [default = false]; + // If non-negative, perform a binary search on the objective variable in order // to find an [min, max] interval outside of which the solver proved unsat/sat // under this amount of conflict. This can quickly reduce the objective domain @@ -964,7 +1025,7 @@ message SatParameters { // Disable every other type of subsolver, setting this turns CP-SAT into a // pure local-search solver. - optional bool test_feasibility_jump = 240 [default = false]; + optional bool use_ls_only = 240 [default = false]; // On each restart, we randomly choose if we use decay (with this parameter) // or no decay. @@ -974,14 +1035,16 @@ message SatParameters { optional int32 feasibility_jump_linearization_level = 257 [default = 2]; // This is a factor that directly influence the work before each restart. - // Setting this to zero disable restart, and increasing it lead to longer - // restarts. + // Increasing it leads to longer restart. optional int32 feasibility_jump_restart_factor = 258 [default = 1]; + // How much dtime for each LS batch. + optional double feasibility_jump_batch_dtime = 292 [default = 0.1]; + // Probability for a variable to have a non default value upon restarts or // perturbations. optional double feasibility_jump_var_randomization_probability = 247 - [default = 0.0]; + [default = 0.05]; // Max distance between the default value and the pertubated value relative to // the range of the domain of the variable. @@ -994,9 +1057,11 @@ message SatParameters { optional bool feasibility_jump_enable_restarts = 250 [default = true]; // Maximum size of no_overlap or no_overlap_2d constraint for a quadratic - // expansion. + // expansion. This might look a lot, but by expanding such constraint, we get + // a linear time evaluation per single variable moves instead of a slow O(n + // log n) one. optional int32 feasibility_jump_max_expanded_constraint_size = 264 - [default = 100]; + [default = 500]; // This will create incomplete subsolvers (that are not LNS subsolvers) // that use the feasibility jump code to find improving solution, treating @@ -1038,7 +1103,7 @@ message SatParameters { // total number of nodes that may be generated in the shared tree. If the // shared tree runs out of unassigned leaves, workers act as portfolio // workers. Note: this limit includes interior nodes, not just leaves. - optional int32 shared_tree_max_nodes_per_worker = 238 [default = 128]; + optional int32 shared_tree_max_nodes_per_worker = 238 [default = 100000]; enum SharedTreeSplitStrategy { // Uses the default strategy, currently equivalent to @@ -1238,7 +1303,9 @@ message SatParameters { // Whether we try to automatically detect the symmetries in a model and // exploit them. Currently, at level 1 we detect them in presolve and try // to fix Booleans. At level 2, we also do some form of dynamic symmetry - // breaking during search. + // breaking during search. At level 3, we also detect symmetries for very + // large models, which can be slow. At level 4, we try to break as much + // symmetry as possible in presolve. optional int32 symmetry_level = 183 [default = 2]; // The new linear propagation code treat all constraints at once and use diff --git a/ortools/sat/sat_runner.cc b/ortools/sat/sat_runner.cc index 0e19d7a042..f9b88c5400 100644 --- a/ortools/sat/sat_runner.cc +++ b/ortools/sat/sat_runner.cc @@ -39,6 +39,7 @@ #include "ortools/sat/sat_cnf_reader.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/util/file_util.h" +#include "ortools/util/sorted_interval_list.h" ABSL_FLAG( std::string, input, "", @@ -51,6 +52,10 @@ ABSL_FLAG( "Protobuf file containing a CpModelResponse. The solution will be used as a" " hint to bootstrap the search."); +ABSL_FLAG(std::string, domain_file, "", + "Protobuf file containing a CpModelResponse. If present, the " + "tightened models will be used to reduce the domain of variables."); + ABSL_FLAG(std::string, output, "", "If non-empty, write the response there. By default it uses the " "binary format except if the file extension is '.txt'."); @@ -88,7 +93,7 @@ std::string ExtractName(absl::string_view full_filename) { } bool LoadProblem(const std::string& filename, absl::string_view hint_file, - CpModelProto* cp_model) { + absl::string_view domain_file, CpModelProto* cp_model) { if (absl::EndsWith(filename, ".opb") || absl::EndsWith(filename, ".opb.bz2")) { OpbReader reader; @@ -108,22 +113,48 @@ bool LoadProblem(const std::string& filename, absl::string_view hint_file, LOG(FATAL) << "Cannot load file '" << filename << "'."; } } else { - LOG(INFO) << "Reading a CpModelProto."; CHECK_OK(ReadFileToProto(filename, cp_model)); } // Read the hint file. if (!hint_file.empty()) { CpSolverResponse response; - LOG(INFO) << "Reading a CpSolverResponse."; CHECK_OK(ReadFileToProto(hint_file, &response)); - CHECK_EQ(response.solution_size(), cp_model->variables_size()) - << "The hint proto is not compatible with the model proto"; + if (!response.solution().empty()) { + CHECK_EQ(response.solution_size(), cp_model->variables_size()) + << "The hint from the response proto is not compatible with the " + "model proto"; - cp_model->clear_solution_hint(); - for (int i = 0; i < cp_model->variables_size(); ++i) { - cp_model->mutable_solution_hint()->add_vars(i); - cp_model->mutable_solution_hint()->add_values(response.solution(i)); + cp_model->clear_solution_hint(); + for (int i = 0; i < cp_model->variables_size(); ++i) { + cp_model->mutable_solution_hint()->add_vars(i); + cp_model->mutable_solution_hint()->add_values(response.solution(i)); + } + } else { + LOG(INFO) << "The response proto has no solutions, ignoring."; + } + } + + // Read the tightened domain file. + if (!domain_file.empty()) { + CpSolverResponse response; + CHECK_OK(ReadFileToProto(domain_file, &response)); + if (!response.tightened_variables().empty()) { + CHECK_EQ(response.tightened_variables_size(), cp_model->variables_size()) + << "The tighened variables from the response proto is not " + "compatible with the model proto"; + + for (int i = 0; i < cp_model->variables_size(); ++i) { + IntegerVariableProto* var_proto = cp_model->mutable_variables(i); + const Domain tightened_domain = + ReadDomainFromProto(response.tightened_variables(i)); + const Domain new_domain = + ReadDomainFromProto(*var_proto).IntersectionWith(tightened_domain); + FillDomainInProto(new_domain, var_proto); + } + } else { + LOG(INFO) << "The response proto has no tightened variable domains, " + "ignoring."; } } @@ -154,7 +185,7 @@ int Run() { CpModelProto* cp_model = google::protobuf::Arena::Create(&arena); if (!LoadProblem(absl::GetFlag(FLAGS_input), absl::GetFlag(FLAGS_hint_file), - cp_model)) { + absl::GetFlag(FLAGS_domain_file), cp_model)) { CpSolverResponse response; response.set_status(CpSolverStatus::MODEL_INVALID); return EXIT_SUCCESS; diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index de5b523048..1284454e88 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -273,7 +273,7 @@ bool SatSolver::AddProblemClauseInternal(absl::Span literals) { AddBinaryClauseInternal(literals[0], literals[1]); } } else { - if (!clauses_propagator_->AddClause(literals, trail_)) { + if (!clauses_propagator_->AddClause(literals, trail_, /*lbd=*/-1)) { return SetModelUnsat(); } } @@ -432,14 +432,14 @@ int SatSolver::AddLearnedClauseAndEnqueueUnitPropagation( --num_learned_clause_before_cleanup_; SatClause* clause = - clauses_propagator_->AddRemovableClause(literals, trail_); + clauses_propagator_->AddRemovableClause(literals, trail_, lbd); // BumpClauseActivity() must be called after clauses_info_[clause] has // been created or it will have no effect. (*clauses_propagator_->mutable_clauses_info())[clause].lbd = lbd; BumpClauseActivity(clause); } else { - CHECK(clauses_propagator_->AddClause(literals, trail_)); + CHECK(clauses_propagator_->AddClause(literals, trail_, lbd)); } return lbd; } diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index d54e878f1c..dfb7b8aa86 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -100,7 +100,7 @@ struct EnergyEvent : BaseEvent { // It must be called before the EnergyEvent is used. ABSL_MUST_USE_RESULT bool FillEnergyLp( AffineExpression x_size, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, Model* model) { LinearConstraintBuilder tmp_energy(model); if (IsPresent()) { @@ -245,7 +245,7 @@ std::vector FindPossibleDemands(const EnergyEvent& event, bool CutIsEfficient( absl::Span events, IntegerValue window_start, IntegerValue window_end, double available_energy_lp, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, LinearConstraintBuilder* temp_builder) { temp_builder->Clear(); for (const EnergyEvent& event : events) { @@ -273,7 +273,7 @@ bool CutIsEfficient( // as the available energy. void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( absl::string_view cut_name, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, std::vector events, IntegerValue capacity, AffineExpression makespan, TimeLimit* time_limit, Model* model, LinearConstraintManager* manager) { @@ -480,7 +480,7 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( void GenerateCumulativeEnergeticCuts( const std::string& cut_name, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, std::vector events, const AffineExpression& capacity, TimeLimit* time_limit, Model* model, LinearConstraintManager* manager) { double max_possible_energy_lp = 0.0; @@ -848,7 +848,7 @@ struct CachedIntervalData { void GenerateCutsBetweenPairOfNonOverlappingTasks( const std::string& cut_name, - const absl::StrongVector& lp_values, + const util_intops::StrongVector& lp_values, std::vector events, IntegerValue capacity_max, Model* model, LinearConstraintManager* manager) { TopNCuts top_n_cuts(5); diff --git a/ortools/sat/shaving_solver.cc b/ortools/sat/shaving_solver.cc new file mode 100644 index 0000000000..a7ae9ea6bb --- /dev/null +++ b/ortools/sat/shaving_solver.cc @@ -0,0 +1,557 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/shaving_solver.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/base/thread_annotations.h" +#include "absl/flags/flag.h" +#include "absl/log/check.h" +#include "absl/random/distributions.h" +#include "absl/strings/str_cat.h" +#include "absl/synchronization/mutex.h" +#include "ortools/base/logging.h" +#include "ortools/graph/connected_components.h" +#include "ortools/sat/cp_model_lns.h" +#include "ortools/sat/cp_model_presolve.h" +#include "ortools/sat/cp_model_solver_helpers.h" +#include "ortools/sat/cp_model_utils.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/presolve_context.h" +#include "ortools/sat/subsolver.h" +#include "ortools/sat/synchronization.h" +#include "ortools/sat/util.h" +#include "ortools/util/sorted_interval_list.h" +#include "ortools/util/time_limit.h" + +namespace operations_research { +namespace sat { + +ObjectiveShavingSolver::ObjectiveShavingSolver( + const SatParameters& local_parameters, NeighborhoodGeneratorHelper* helper, + SharedClasses* shared) + : SubSolver(local_parameters.name(), FULL_PROBLEM), + local_params_(local_parameters), + helper_(helper), + shared_(shared), + local_proto_(shared->model_proto) {} + +ObjectiveShavingSolver::~ObjectiveShavingSolver() { + shared_->stat_tables.AddTimingStat(*this); +} + +bool ObjectiveShavingSolver::TaskIsAvailable() { + if (shared_->SearchIsDone()) return false; + + // We only support one task at the time. + absl::MutexLock mutex_lock(&mutex_); + return !task_in_flight_; +} + +std::function ObjectiveShavingSolver::GenerateTask(int64_t task_id) { + { + absl::MutexLock mutex_lock(&mutex_); + stop_current_chunk_.store(false); + task_in_flight_ = true; + objective_lb_ = shared_->response->GetInnerObjectiveLowerBound(); + objective_ub_ = shared_->response->GetInnerObjectiveUpperBound(); + } + return [this, task_id]() { + if (ResetModel(task_id)) { + SolveLoadedCpModel(local_proto_, local_sat_model_.get()); + const CpSolverResponse local_response = + local_sat_model_->GetOrCreate()->GetResponse(); + + if (local_response.status() == CpSolverStatus::OPTIMAL || + local_response.status() == CpSolverStatus::FEASIBLE) { + std::vector solution_values(local_response.solution().begin(), + local_response.solution().end()); + if (local_params_.cp_model_presolve()) { + const int num_original_vars = shared_->model_proto.variables_size(); + PostsolveResponseWrapper(local_params_, num_original_vars, + mapping_proto_, postsolve_mapping_, + &solution_values); + } + shared_->response->NewSolution(solution_values, Info()); + } else if (local_response.status() == CpSolverStatus::INFEASIBLE) { + absl::MutexLock mutex_lock(&mutex_); + shared_->response->UpdateInnerObjectiveBounds( + Info(), current_objective_target_ub_ + 1, objective_ub_); + } + } + + absl::MutexLock mutex_lock(&mutex_); + task_in_flight_ = false; + if (local_sat_model_ != nullptr) { + const double dtime = local_sat_model_->GetOrCreate() + ->GetElapsedDeterministicTime(); + AddTaskDeterministicDuration(dtime); + shared_->time_limit->AdvanceDeterministicTime(dtime); + } + }; +} + +void ObjectiveShavingSolver::Synchronize() { + absl::MutexLock mutex_lock(&mutex_); + if (!task_in_flight_) return; + + // We are just waiting for the inner code to check the time limit or + // to return nicely. + if (stop_current_chunk_) return; + + // TODO(user): Also stop if we have enough newly fixed / improved root level + // bounds so that we think it is worth represolving and restarting. + if (shared_->SearchIsDone()) { + stop_current_chunk_.store(true); + } + + // The current objective lower bound has been improved, restarting. + if (shared_->response->GetInnerObjectiveLowerBound() > objective_lb_) { + stop_current_chunk_.store(true); + } + + // A solution has been found that is better than the current target + // objective upper bound. Restarting to use a smaller delta. + if (shared_->response->GetInnerObjectiveUpperBound() <= + current_objective_target_ub_ && + current_objective_target_ub_ != objective_lb_) { + stop_current_chunk_.store(true); + } + + // If the range has been reduced enough to warrant a delta of 1, while the + // current search uses a delta > 1. Restarting to switch to the delta of 1. + if (current_objective_target_ub_ != objective_lb_ && + shared_->response->GetInnerObjectiveUpperBound() - + shared_->response->GetInnerObjectiveLowerBound() <= + local_params_.shaving_search_threshold()) { + stop_current_chunk_.store(true); + } +} + +std::string ObjectiveShavingSolver::Info() { + return absl::StrCat(name(), " (vars=", local_proto_.variables().size(), + " csts=", local_proto_.constraints().size(), ")"); +} + +bool ObjectiveShavingSolver::ResetModel(int64_t task_id) { + local_sat_model_ = std::make_unique(name()); + *local_sat_model_->GetOrCreate() = local_params_; + local_sat_model_->GetOrCreate()->set_random_seed( + CombineSeed(local_params_.random_seed(), task_id)); + + auto* time_limit = local_sat_model_->GetOrCreate(); + shared_->time_limit->UpdateLocalLimit(time_limit); + time_limit->RegisterSecondaryExternalBooleanAsLimit(&stop_current_chunk_); + + auto* random = local_sat_model_->GetOrCreate(); + + // We copy the model. + local_proto_ = shared_->model_proto; + *local_proto_.mutable_variables() = + helper_->FullNeighborhood().delta.variables(); + + // Store the current lb in local variable. + IntegerValue objective_lb; + IntegerValue chosen_objective_ub; + { + absl::MutexLock mutex_lock(&mutex_); + objective_lb = objective_lb_; + if (objective_ub_ - objective_lb <= + local_params_.shaving_search_threshold()) { + current_objective_target_ub_ = objective_lb; + } else { + const IntegerValue mid = (objective_ub_ - objective_lb) / 2; + current_objective_target_ub_ = + objective_lb + absl::LogUniform(*random, 0, mid.value()); + } + chosen_objective_ub = current_objective_target_ub_; + VLOG(2) << name() << ": from [" << objective_lb.value() << ".." + << objective_ub_.value() << "] <= " << chosen_objective_ub.value(); + } + + // We replace the objective by a constraint, objective in [lb, target_ub]. + // We modify local_proto_ to a pure feasibility problem. + // Not having the objective open up more presolve reduction. + Domain obj_domain = Domain(objective_lb.value(), chosen_objective_ub.value()); + if (local_proto_.objective().domain_size() > 1) { + // Intersect with the first interval of the objective domain. + obj_domain = + obj_domain.IntersectionWith(Domain(local_proto_.objective().domain(0), + local_proto_.objective().domain(1))); + } + if (local_proto_.objective().vars().size() == 1 && + local_proto_.objective().coeffs(0) == 1) { + auto* obj_var = + local_proto_.mutable_variables(local_proto_.objective().vars(0)); + const Domain reduced_var_domain = obj_domain.IntersectionWith( + Domain(obj_var->domain(0), obj_var->domain(1))); + FillDomainInProto(reduced_var_domain, obj_var); + } else { + auto* obj = local_proto_.add_constraints()->mutable_linear(); + *obj->mutable_vars() = local_proto_.objective().vars(); + *obj->mutable_coeffs() = local_proto_.objective().coeffs(); + FillDomainInProto(obj_domain, obj); + } + + // Clear the objective. + local_proto_.clear_objective(); + + // Dump? + if (absl::GetFlag(FLAGS_cp_model_dump_submodels)) { + const std::string name = + absl::StrCat(absl::GetFlag(FLAGS_cp_model_dump_prefix), + "objective_shaving_", objective_lb.value(), ".pb.txt"); + LOG(INFO) << "Dumping objective shaving model to '" << name << "'."; + CHECK(WriteModelProtoToFile(local_proto_, name)); + } + + // Presolve if asked. + if (local_params_.cp_model_presolve()) { + mapping_proto_.Clear(); + postsolve_mapping_.clear(); + auto context = std::make_unique( + local_sat_model_.get(), &local_proto_, &mapping_proto_); + const CpSolverStatus presolve_status = + PresolveCpModel(context.get(), &postsolve_mapping_); + if (presolve_status == CpSolverStatus::INFEASIBLE) { + absl::MutexLock mutex_lock(&mutex_); + shared_->response->UpdateInnerObjectiveBounds( + Info(), chosen_objective_ub + 1, kMaxIntegerValue); + return false; + } + } + + // Tricky: If we aborted during the presolve above, some constraints might + // be in a non-canonical form (like having duplicates, etc...) and it seem + // not all our propagator code deal with that properly. So it is important + // to abort right away here. + // + // We had a bug when the LoadCpModel() below was returning infeasible on + // such non fully-presolved model. + if (time_limit->LimitReached()) return false; + + LoadCpModel(local_proto_, local_sat_model_.get()); + return true; +} + +VariablesShavingSolver::VariablesShavingSolver( + const SatParameters& local_parameters, SharedClasses* shared) + : SubSolver(local_parameters.name(), FULL_PROBLEM), + local_params_(local_parameters), + shared_(shared), + stop_current_chunk_(false), + model_proto_(shared->model_proto) { + if (shared_->bounds != nullptr) { + shared_bounds_id_ = shared_->bounds->RegisterNewId(); + } + + absl::MutexLock mutex_lock(&mutex_); + for (const IntegerVariableProto& var_proto : model_proto_.variables()) { + var_domains_.push_back(ReadDomainFromProto(var_proto)); + } +} + +VariablesShavingSolver::~VariablesShavingSolver() { + if (!VLOG_IS_ON(1)) return; + if (shared_ == nullptr || shared_->stats == nullptr) return; + std::vector> stats; + absl::MutexLock mutex_lock(&mutex_); + stats.push_back({"variable_shaving/num_vars_tried", num_vars_tried_}); + stats.push_back({"variable_shaving/num_vars_shaved", num_vars_shaved_}); + stats.push_back( + {"variable_shaving/num_infeasible_found", num_infeasible_found_}); + shared_->stats->AddStats(stats); +} + +bool VariablesShavingSolver::TaskIsAvailable() { + return !shared_->SearchIsDone(); +} + +void VariablesShavingSolver::ProcessLocalResponse( + const CpSolverResponse& local_response, const State& state) { + if (local_response.status() != CpSolverStatus::INFEASIBLE) return; + + absl::MutexLock lock(&mutex_); + const Domain domain = var_domains_[state.var_index]; + Domain new_domain = domain; + ++num_infeasible_found_; + new_domain = domain.IntersectionWith(state.reduced_domain.Complement()); + VLOG(1) << name() << ": var(" << state.var_index << ") " << domain << " ==> " + << new_domain; + + if (domain != new_domain) { + ++num_vars_shaved_; + if (shared_->bounds != nullptr && !new_domain.IsEmpty()) { + shared_->bounds->ReportPotentialNewBounds( + name(), {state.var_index}, {new_domain.Min()}, {new_domain.Max()}); + } + var_domains_[state.var_index] = new_domain; + if (var_domains_[state.var_index].IsEmpty()) { + shared_->response->NotifyThatImprovingProblemIsInfeasible( + "Unsat during variables shaving"); + return; + } + } +} + +std::function VariablesShavingSolver::GenerateTask(int64_t task_id) { + return [this, task_id]() mutable { + Model local_sat_model; + CpModelProto shaving_proto; + State state; + if (ResetModel(task_id, &state, &local_sat_model, &shaving_proto)) { + SolveLoadedCpModel(shaving_proto, &local_sat_model); + const CpSolverResponse local_response = + local_sat_model.GetOrCreate()->GetResponse(); + ProcessLocalResponse(local_response, state); + } + + absl::MutexLock mutex_lock(&mutex_); + const double dtime = + local_sat_model.GetOrCreate()->GetElapsedDeterministicTime(); + AddTaskDeterministicDuration(dtime); + shared_->time_limit->AdvanceDeterministicTime(dtime); + }; +} + +void VariablesShavingSolver::Synchronize() { + absl::MutexLock mutex_lock(&mutex_); + // We are just waiting for the inner code to check the time limit or + // to return nicely. + if (stop_current_chunk_) return; + + if (shared_->SearchIsDone()) { + stop_current_chunk_.store(true); + } + + if (shared_->bounds != nullptr) { + std::vector model_variables; + std::vector new_lower_bounds; + std::vector new_upper_bounds; + shared_->bounds->GetChangedBounds(shared_bounds_id_, &model_variables, + &new_lower_bounds, &new_upper_bounds); + + for (int i = 0; i < model_variables.size(); ++i) { + const int var = model_variables[i]; + const int64_t new_lb = new_lower_bounds[i]; + const int64_t new_ub = new_upper_bounds[i]; + const Domain& old_domain = var_domains_[var]; + const Domain new_domain = + old_domain.IntersectionWith(Domain(new_lb, new_ub)); + if (new_domain.IsEmpty()) { + shared_->response->NotifyThatImprovingProblemIsInfeasible( + "Unsat during variables shaving"); + continue; + } + var_domains_[var] = new_domain; + } + } +} + +std::string VariablesShavingSolver::Info() { + return absl::StrCat(name(), " (vars=", model_proto_.variables().size(), + " csts=", model_proto_.constraints().size(), ")"); +} + +int64_t VariablesShavingSolver::DomainSize(int var) const + ABSL_SHARED_LOCKS_REQUIRED(mutex_) { + return var_domains_[var].Size(); +} + +bool VariablesShavingSolver::VarIsFixed(int int_var) const + ABSL_SHARED_LOCKS_REQUIRED(mutex_) { + return var_domains_[int_var].IsFixed(); +} + +bool VariablesShavingSolver::ConstraintIsInactive(int c) const + ABSL_SHARED_LOCKS_REQUIRED(mutex_) { + for (const int ref : model_proto_.constraints(c).enforcement_literal()) { + const int var = PositiveRef(ref); + if (VarIsFixed(var) && var_domains_[var].Min() == (var == ref ? 0 : 1)) { + return true; + } + } + return false; +} + +bool VariablesShavingSolver::FindNextVar(State* state) + ABSL_SHARED_LOCKS_REQUIRED(mutex_) { + const int num_vars = var_domains_.size(); + const int max_index = 2 * num_vars; + for (int i = 0; i < 2 * num_vars; ++i) { + if (++current_index_ == max_index) current_index_ = 0; + const int var = current_index_ / 2; + if (VarIsFixed(var)) continue; + // Let's not shave the single var objective. There are enough workers + // looking at it. + if (model_proto_.has_objective() && + model_proto_.objective().vars_size() == 1 && + var == model_proto_.objective().vars(0)) { + continue; + } + + state->var_index = var; + state->minimize = current_index_ % 2 == 0; + return true; + } + return false; +} + +void VariablesShavingSolver::CopyModelConnectedToVar( + State* state, Model* local_sat_model, CpModelProto* shaving_proto) + ABSL_SHARED_LOCKS_REQUIRED(mutex_) { + auto var_to_node = [](int var) { return var; }; + auto ct_to_node = [this](int ct) { + return ct + model_proto_.variables_size(); + }; + + // Build the connected component graph. + DenseConnectedComponentsFinder cc_finder; + cc_finder.SetNumberOfNodes(model_proto_.constraints_size() + + model_proto_.variables_size()); + for (int i = 0; i < model_proto_.constraints_size(); ++i) { + if (ConstraintIsInactive(i)) continue; + const ConstraintProto& ct = model_proto_.constraints(i); + const int ct_node = ct_to_node(i); + for (const int var : UsedVariables(ct)) { + if (VarIsFixed(var)) continue; + cc_finder.AddEdge(ct_node, var_to_node(var)); + } + for (const int interval : UsedIntervals(ct)) { + cc_finder.AddEdge(ct_node, ct_to_node(interval)); + } + } + + DCHECK(shaving_proto->variables().empty()); + DCHECK(shaving_proto->constraints().empty()); + const int root_index = var_to_node(state->var_index); + + const auto active_constraints = [&cc_finder, root_index, + &ct_to_node](int ct) { + return cc_finder.Connected(root_index, ct_to_node(ct)); + }; + + PresolveContext context(local_sat_model, shaving_proto, nullptr); + ImportModelAndDomainsWithBasicPresolveIntoContext( + model_proto_, var_domains_, active_constraints, &context); + + if (VLOG_IS_ON(2) && + shaving_proto->constraints_size() < model_proto_.constraints_size()) { + int num_active_variables = 0; + for (int i = 0; i < var_domains_.size(); ++i) { + if (cc_finder.Connected(root_index, var_to_node(i))) { + ++num_active_variables; + } + } + + LOG(INFO) << "#constraints:" << shaving_proto->constraints_size() << "/" + << model_proto_.constraints_size() + << " #variables:" << num_active_variables << "/" + << var_domains_.size(); + } + + const Domain domain = + ReadDomainFromProto(shaving_proto->variables(state->var_index)); + shaving_proto->clear_objective(); + + int64_t delta = 0; + if (domain.Size() > local_params_.shaving_search_threshold()) { + const int64_t mid_range = (domain.Max() - domain.Min()) / 2; + auto* random = local_sat_model->GetOrCreate(); + delta = absl::LogUniform(*random, 0, mid_range); + } + + if (state->minimize) { + state->reduced_domain = + domain.IntersectionWith({domain.Min(), domain.Min() + delta}); + } else { + state->reduced_domain = + domain.IntersectionWith({domain.Max() - delta, domain.Max()}); + } + + FillDomainInProto(state->reduced_domain, + shaving_proto->mutable_variables(state->var_index)); + + if (absl::GetFlag(FLAGS_cp_model_dump_submodels)) { + const std::string shaving_name = absl::StrCat( + absl::GetFlag(FLAGS_cp_model_dump_prefix), "shaving_var_", + state->var_index, (state->minimize ? "_min" : "_max"), ".pb.txt"); + LOG(INFO) << "Dumping shaving model to '" << shaving_name << "'."; + CHECK(WriteModelProtoToFile(*shaving_proto, shaving_name)); + } +} + +bool VariablesShavingSolver::ResetModel(int64_t task_id, State* state, + Model* local_sat_model, + CpModelProto* shaving_proto) { + *local_sat_model->GetOrCreate() = local_params_; + local_sat_model->GetOrCreate()->set_random_seed( + CombineSeed(local_params_.random_seed(), task_id)); + + { + absl::MutexLock lock(&mutex_); + if (!FindNextVar(state)) return false; + CopyModelConnectedToVar(state, local_sat_model, shaving_proto); + ++num_vars_tried_; + } + + auto* time_limit = local_sat_model->GetOrCreate(); + shared_->time_limit->UpdateLocalLimit(time_limit); + time_limit->RegisterSecondaryExternalBooleanAsLimit(&stop_current_chunk_); + time_limit->ChangeDeterministicLimit( + time_limit->GetElapsedDeterministicTime() + + local_params_.shaving_search_deterministic_time()); + + // Presolve if asked. + if (local_params_.cp_model_presolve()) { + std::vector postsolve_mapping; + CpModelProto mapping_proto; + auto context = std::make_unique( + local_sat_model, shaving_proto, &mapping_proto); + const CpSolverStatus presolve_status = + PresolveCpModel(context.get(), &postsolve_mapping); + if (presolve_status == CpSolverStatus::INFEASIBLE) { + CpSolverResponse tmp_response; + tmp_response.set_status(CpSolverStatus::INFEASIBLE); + ProcessLocalResponse(tmp_response, *state); + return false; + } + } + + auto* local_response_manager = + local_sat_model->GetOrCreate(); + local_response_manager->InitializeObjective(*shaving_proto); + local_response_manager->SetSynchronizationMode(true); + + // Tricky: If we aborted during the presolve above, some constraints might + // be in a non-canonical form (like having duplicates, etc...) and it seem + // not all our propagator code deal with that properly. So it is important + // to abort right away here. + // + // We had a bug when the LoadCpModel() below was returning infeasible on + // such non fully-presolved model. + if (time_limit->LimitReached()) return false; + + LoadCpModel(*shaving_proto, local_sat_model); + return true; +} + +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/shaving_solver.h b/ortools/sat/shaving_solver.h new file mode 100644 index 0000000000..0f33b3672b --- /dev/null +++ b/ortools/sat/shaving_solver.h @@ -0,0 +1,144 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_SAT_SHAVING_SOLVER_H_ +#define OR_TOOLS_SAT_SHAVING_SOLVER_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_lns.h" +#include "ortools/sat/cp_model_solver_helpers.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/sat_parameters.pb.h" +#include "ortools/sat/subsolver.h" +#include "ortools/util/sorted_interval_list.h" + +namespace operations_research { +namespace sat { + +class ObjectiveShavingSolver : public SubSolver { + public: + ObjectiveShavingSolver(const SatParameters& local_parameters, + NeighborhoodGeneratorHelper* helper, + SharedClasses* shared); + + ~ObjectiveShavingSolver() override; + + bool TaskIsAvailable() override; + + std::function GenerateTask(int64_t task_id) override; + void Synchronize() override; + + private: + std::string Info(); + + bool ResetModel(int64_t task_id); + + // This is fixed at construction. + SatParameters local_params_; + NeighborhoodGeneratorHelper* helper_; + SharedClasses* shared_; + + // Allow to control the local time limit in addition to a potential user + // defined external Boolean. + std::atomic stop_current_chunk_; + + // Local singleton repository and presolved local model. + std::unique_ptr local_sat_model_; + CpModelProto local_proto_; + + // For postsolving a feasible solution or improving objective lb. + std::vector postsolve_mapping_; + CpModelProto mapping_proto_; + + absl::Mutex mutex_; + IntegerValue objective_lb_ ABSL_GUARDED_BY(mutex_); + IntegerValue objective_ub_ ABSL_GUARDED_BY(mutex_); + IntegerValue current_objective_target_ub_ ABSL_GUARDED_BY(mutex_); + bool task_in_flight_ ABSL_GUARDED_BY(mutex_) = false; +}; + +class VariablesShavingSolver : public SubSolver { + public: + struct State { + int var_index; + bool minimize; + Domain reduced_domain; + }; + + VariablesShavingSolver(const SatParameters& local_parameters, + SharedClasses* shared); + + ~VariablesShavingSolver() override; + + bool TaskIsAvailable() override; + + void ProcessLocalResponse(const CpSolverResponse& local_response, + const State& state); + + std::function GenerateTask(int64_t task_id) override; + + void Synchronize() override; + + private: + std::string Info(); + + int64_t DomainSize(int var) const ABSL_SHARED_LOCKS_REQUIRED(mutex_); + + bool VarIsFixed(int int_var) const ABSL_SHARED_LOCKS_REQUIRED(mutex_); + + bool ConstraintIsInactive(int c) const ABSL_SHARED_LOCKS_REQUIRED(mutex_); + + bool FindNextVar(State* state) ABSL_SHARED_LOCKS_REQUIRED(mutex_); + + void CopyModelConnectedToVar(State* state, Model* local_sat_model, + CpModelProto* shaving_proto) + ABSL_SHARED_LOCKS_REQUIRED(mutex_); + + bool ResetModel(int64_t task_id, State* state, Model* local_sat_model, + CpModelProto* shaving_proto); + + // This is fixed at construction. + SatParameters local_params_; + SharedClasses* shared_; + int shared_bounds_id_ = -1; + + // Allow to control the local time limit in addition to a potential user + // defined external Boolean. + std::atomic stop_current_chunk_; + + const CpModelProto& model_proto_; + + absl::Mutex mutex_; + int current_index_ = -1; + std::vector var_domains_ ABSL_GUARDED_BY(mutex_); + + // Stats. + int num_vars_tried_ ABSL_GUARDED_BY(mutex_) = 0; + int num_vars_shaved_ ABSL_GUARDED_BY(mutex_) = 0; + int num_infeasible_found_ ABSL_GUARDED_BY(mutex_) = 0; +}; + +} // namespace sat +} // namespace operations_research + +#endif // OR_TOOLS_SAT_SHAVING_SOLVER_H_ diff --git a/ortools/sat/simplification.cc b/ortools/sat/simplification.cc index 7482192c73..a7f755add4 100644 --- a/ortools/sat/simplification.cc +++ b/ortools/sat/simplification.cc @@ -71,8 +71,9 @@ void SatPostsolver::FixVariable(Literal x) { } void SatPostsolver::ApplyMapping( - const absl::StrongVector& mapping) { - absl::StrongVector new_mapping; + const util_intops::StrongVector& + mapping) { + util_intops::StrongVector new_mapping; if (reverse_mapping_.size() < mapping.size()) { // We have new variables. while (reverse_mapping_.size() < mapping.size()) { @@ -253,9 +254,9 @@ void SatPresolver::AddClauseInternal(std::vector* clause) { DCHECK_EQ(signatures_.size(), clauses_.size()); } -absl::StrongVector +util_intops::StrongVector SatPresolver::VariableMapping() const { - absl::StrongVector result; + util_intops::StrongVector result; BooleanVariable new_var(0); for (BooleanVariable var(0); var < NumVariables(); ++var) { if (literal_to_clause_sizes_[Literal(var, true)] > 0 || @@ -279,7 +280,7 @@ void SatPresolver::LoadProblemIntoSatSolver(SatSolver* solver) { literal_to_clauses_.clear(); signatures_.clear(); - const absl::StrongVector mapping = + const util_intops::StrongVector mapping = VariableMapping(); int new_size = 0; for (BooleanVariable index : mapping) { @@ -1144,7 +1145,7 @@ class PropagationGraph { void ProbeAndFindEquivalentLiteral( SatSolver* solver, SatPostsolver* postsolver, DratProofHandler* drat_proof_handler, - absl::StrongVector* mapping, + util_intops::StrongVector* mapping, SolverLogger* logger) { WallTimer timer; timer.Start(); diff --git a/ortools/sat/simplification.h b/ortools/sat/simplification.h index 011bf2f17d..0e1663ea5a 100644 --- a/ortools/sat/simplification.h +++ b/ortools/sat/simplification.h @@ -74,8 +74,8 @@ class SatPostsolver { // // This can be called more than once. But each call must refer to the current // variables set (after all the previous mapping have been applied). - void ApplyMapping( - const absl::StrongVector& mapping); + void ApplyMapping(const util_intops::StrongVector& mapping); // Extracts the current assignment of the given solver and postsolve it. // @@ -128,7 +128,7 @@ class SatPostsolver { // All the added clauses will be mapped back to the initial variables using // this reverse mapping. This way, clauses_ and associated_literal_ are only // in term of the initial problem. - absl::StrongVector reverse_mapping_; + util_intops::StrongVector reverse_mapping_; // This will stores the fixed variables value and later the postsolved // assignment. @@ -169,7 +169,7 @@ class SatPresolver { // Registers a mapping to encode equivalent literals. // See ProbeAndFindEquivalentLiteral(). void SetEquivalentLiteralMapping( - const absl::StrongVector& mapping) { + const util_intops::StrongVector& mapping) { equiv_mapping_ = mapping; } @@ -204,7 +204,8 @@ class SatPresolver { // clause pointing to them. This return a mapping that maps this interval to // [0, new_size) such that now all variables are used. The unused variable // will be mapped to BooleanVariable(-1). - absl::StrongVector VariableMapping() const; + util_intops::StrongVector VariableMapping() + const; // Loads the current presolved problem in to the given sat solver. // Note that the variables will be re-indexed according to the mapping given @@ -305,7 +306,7 @@ class SatPresolver { BooleanVariable variable; double weight; }; - absl::StrongVector var_pq_elements_; + util_intops::StrongVector var_pq_elements_; AdjustablePriorityQueue var_pq_; // Literal priority queue for BVA. The literals are ordered by descending @@ -336,7 +337,7 @@ class SatPresolver { // Temporary data for SimpleBva(). absl::btree_set m_lit_; std::vector m_cls_; - absl::StrongVector literal_to_p_size_; + util_intops::StrongVector literal_to_p_size_; std::vector> flattened_p_; std::vector tmp_new_clause_; @@ -356,18 +357,18 @@ class SatPresolver { // Occurrence list. For each literal, contains the ClauseIndex of the clause // that contains it (ordered by clause index). - absl::StrongVector> + util_intops::StrongVector> literal_to_clauses_; // Because we only lazily clean the occurrence list after clause deletions, // we keep the size of the occurrence list (without the deleted clause) here. - absl::StrongVector literal_to_clause_sizes_; + util_intops::StrongVector literal_to_clause_sizes_; // Used for postsolve. SatPostsolver* postsolver_; // Equivalent literal mapping. - absl::StrongVector equiv_mapping_; + util_intops::StrongVector equiv_mapping_; int num_trivial_clauses_; SatParameters parameters_; @@ -433,7 +434,7 @@ int ComputeResolvantSize(Literal x, const std::vector& a, void ProbeAndFindEquivalentLiteral( SatSolver* solver, SatPostsolver* postsolver, DratProofHandler* drat_proof_handler, - absl::StrongVector* mapping, + util_intops::StrongVector* mapping, SolverLogger* = nullptr); } // namespace sat diff --git a/ortools/sat/stat_tables.cc b/ortools/sat/stat_tables.cc index 1d82db5733..e5b96d133d 100644 --- a/ortools/sat/stat_tables.cc +++ b/ortools/sat/stat_tables.cc @@ -68,8 +68,9 @@ SharedStatTables::SharedStatTables() { lns_table_.push_back( {"LNS stats", "Improv/Calls", "Closed", "Difficulty", "TimeLimit"}); - ls_table_.push_back({"LS stats", "Batches", "Restarts", "LinMoves", - "GenMoves", "CompoundMoves", "WeightUpdates"}); + ls_table_.push_back({"LS stats", "Batches", "Restarts/Perturbs", "LinMoves", + "GenMoves", "CompoundMoves", "Bactracks", + "WeightUpdates", "ScoreComputed"}); } void SharedStatTables::AddTimingStat(const SubSolver& subsolver) { @@ -245,13 +246,16 @@ void SharedStatTables::AddLsStat(absl::string_view name, int64_t num_batches, int64_t num_restarts, int64_t num_linear_moves, int64_t num_general_moves, int64_t num_compound_moves, - int64_t num_weight_updates) { + int64_t num_backtracks, + int64_t num_weight_updates, + int64_t num_scores_computed) { absl::MutexLock mutex_lock(&mutex_); ls_table_.push_back( {FormatName(name), FormatCounter(num_batches), FormatCounter(num_restarts), FormatCounter(num_linear_moves), FormatCounter(num_general_moves), FormatCounter(num_compound_moves), - FormatCounter(num_weight_updates)}); + FormatCounter(num_backtracks), FormatCounter(num_weight_updates), + FormatCounter(num_scores_computed)}); } void SharedStatTables::Display(SolverLogger* logger) { diff --git a/ortools/sat/stat_tables.h b/ortools/sat/stat_tables.h index 04d1bf1d23..18b0ffde46 100644 --- a/ortools/sat/stat_tables.h +++ b/ortools/sat/stat_tables.h @@ -48,7 +48,8 @@ class SharedStatTables { void AddLsStat(absl::string_view name, int64_t num_batches, int64_t num_restarts, int64_t num_linear_moves, int64_t num_general_moves, int64_t num_compound_moves, - int64_t num_weight_updates); + int64_t num_bactracks, int64_t num_weight_updates, + int64_t num_scores_computed); // Display the set of table at the end. void Display(SolverLogger* logger); diff --git a/ortools/sat/subsolver.cc b/ortools/sat/subsolver.cc index fd4d8c6842..885913187b 100644 --- a/ortools/sat/subsolver.cc +++ b/ortools/sat/subsolver.cc @@ -27,6 +27,7 @@ #include "absl/synchronization/mutex.h" #include "absl/time/clock.h" #include "absl/time/time.h" +#include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/timer.h" #if !defined(__PORTABLE_PLATFORM__) @@ -44,7 +45,7 @@ namespace { // // For now we use a really basic logic: call the least frequently called. int NextSubsolverToSchedule(std::vector>& subsolvers, - const std::vector& num_generated_tasks) { + absl::Span num_generated_tasks) { int best = -1; for (int i = 0; i < subsolvers.size(); ++i) { if (subsolvers[i] == nullptr) continue; @@ -110,14 +111,14 @@ void NonDeterministicLoop(std::vector>& subsolvers, } void DeterministicLoop(std::vector>& subsolvers, - int num_threads, int batch_size) { + int num_threads, int batch_size, int max_num_batches) { SequentialLoop(subsolvers); } #else // __PORTABLE_PLATFORM__ void DeterministicLoop(std::vector>& subsolvers, - int num_threads, int batch_size) { + int num_threads, int batch_size, int max_num_batches) { CHECK_GT(num_threads, 0); CHECK_GT(batch_size, 0); if (batch_size == 1) { @@ -133,10 +134,15 @@ void DeterministicLoop(std::vector>& subsolvers, to_run.reserve(batch_size); ThreadPool pool("DeterministicLoop", num_threads); pool.StartWorkers(); - while (true) { + for (int batch_index = 0;; ++batch_index) { + VLOG(2) << "Starting deterministic batch of size " << batch_size; SynchronizeAll(subsolvers); ClearSubsolversThatAreDone(num_in_flight_per_subsolvers, subsolvers); + // We abort the loop after the last synchronize to properly reports final + // status in case max_num_batches is used. + if (max_num_batches > 0 && batch_index >= max_num_batches) break; + // We first generate all task to run in this batch. // Note that we can't start the task right away since if a task finish // before we schedule everything, we will not be deterministic. diff --git a/ortools/sat/subsolver.h b/ortools/sat/subsolver.h index 9ed456bf3b..3dea6ceb4d 100644 --- a/ortools/sat/subsolver.h +++ b/ortools/sat/subsolver.h @@ -175,8 +175,11 @@ void NonDeterministicLoop(std::vector>& subsolvers, // which one to run. // 3/ wait for all task to finish. // 4/ repeat until no task can be generated in step 2. +// +// If max_num_batches is > 0, stop after that many batches. void DeterministicLoop(std::vector>& subsolvers, - int num_threads, int batch_size); + int num_threads, int batch_size, + int max_num_batches = 0); // Same as above, but specialized implementation for the case num_threads=1. // This avoids using a Threadpool altogether. It should have the same behavior diff --git a/ortools/sat/swig_helper.cc b/ortools/sat/swig_helper.cc index 8488320642..b03de25b90 100644 --- a/ortools/sat/swig_helper.cc +++ b/ortools/sat/swig_helper.cc @@ -19,6 +19,7 @@ #include #include +#include "absl/log/check.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_checker.h" @@ -148,6 +149,7 @@ void SolveWrapper::AddBestBoundCallback( } void SolveWrapper::AddBestBoundCallbackFromClass(BestBoundCallback* callback) { + DCHECK(callback != nullptr); model_.Add(NewBestBoundCallback( [callback](double bound) { callback->NewBestBound(bound); })); } diff --git a/ortools/sat/symmetry.h b/ortools/sat/symmetry.h index 3a5f383c41..f7fff5ebe4 100644 --- a/ortools/sat/symmetry.h +++ b/ortools/sat/symmetry.h @@ -114,7 +114,7 @@ class SymmetryPropagator : public SatPropagator { int permutation_index; Literal image; }; - absl::StrongVector> images_; + util_intops::StrongVector> images_; // For each permutation p, we maintain the list of all assigned literals // affected by p whose trail index is < propagation_trail_index_; sorted by @@ -145,7 +145,7 @@ class SymmetryPropagator : public SatPropagator { // The identity permutation over all the literals. // This is temporary modified to encode a sparse permutation and then always // restored to the identity. - mutable absl::StrongVector tmp_literal_mapping_; + mutable util_intops::StrongVector tmp_literal_mapping_; // Symmetry reason indexed by trail_index. struct ReasonInfo { diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index e0affdd1cb..6c8999dfa9 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -13,11 +13,15 @@ #include "ortools/sat/synchronization.h" +#include + #include #include #include #include +#include #include +#include #include #include #include @@ -25,6 +29,8 @@ #include #include +#include "absl/hash/hash.h" +#include "absl/time/time.h" #include "ortools/base/logging.h" #include "ortools/base/timer.h" #if !defined(__PORTABLE_PLATFORM__) @@ -77,7 +83,8 @@ void SharedLPSolutionRepository::NewLPSolution( // We always prefer to keep the solution from the last synchronize batch. absl::MutexLock mutex_lock(&mutex_); solution.rank = -num_synchronization_; - AddInternal(solution); + ++num_added_; + new_solutions_.push_back(solution); } void SharedIncompleteSolutionManager::AddSolution( @@ -318,6 +325,14 @@ void SharedResponseManager::UpdateInnerObjectiveBounds( if (ub_change) { inner_objective_upper_bound_ = ub.value(); } + + if (always_synchronize_) { + synchronized_inner_objective_lower_bound_ = + IntegerValue(inner_objective_lower_bound_); + synchronized_inner_objective_upper_bound_ = + IntegerValue(inner_objective_upper_bound_); + } + if (inner_objective_lower_bound_ > inner_objective_upper_bound_) { if (best_status_ == CpSolverStatus::FEASIBLE || best_status_ == CpSolverStatus::OPTIMAL) { @@ -385,12 +400,12 @@ void SharedResponseManager::AddUnsatCore(const std::vector& core) { IntegerValue SharedResponseManager::GetInnerObjectiveLowerBound() { absl::MutexLock mutex_lock(&mutex_); - return IntegerValue(inner_objective_lower_bound_); + return synchronized_inner_objective_lower_bound_; } IntegerValue SharedResponseManager::GetInnerObjectiveUpperBound() { absl::MutexLock mutex_lock(&mutex_); - return IntegerValue(inner_objective_upper_bound_); + return synchronized_inner_objective_upper_bound_; } void SharedResponseManager::Synchronize() { @@ -406,16 +421,6 @@ void SharedResponseManager::Synchronize() { logger_->FlushPendingThrottledLogs(); } -IntegerValue SharedResponseManager::SynchronizedInnerObjectiveLowerBound() { - absl::MutexLock mutex_lock(&mutex_); - return synchronized_inner_objective_lower_bound_; -} - -IntegerValue SharedResponseManager::SynchronizedInnerObjectiveUpperBound() { - absl::MutexLock mutex_lock(&mutex_); - return synchronized_inner_objective_upper_bound_; -} - IntegerValue SharedResponseManager::BestSolutionInnerObjectiveValue() { absl::MutexLock mutex_lock(&mutex_); return IntegerValue(best_solution_objective_value_); @@ -629,23 +634,18 @@ void SharedResponseManager::NewSolution( solution.variable_values.assign(solution_values.begin(), solution_values.end()); solution.info = solution_info; - solutions_.Add(solution); - } - - if (objective_or_null_ != nullptr) { + } else { const int64_t objective_value = ComputeInnerObjective(*objective_or_null_, solution_values); // Add this solution to the pool, even if it is not improving. - if (!solution_values.empty()) { - SharedSolutionRepository::Solution solution; - solution.variable_values.assign(solution_values.begin(), - solution_values.end()); - solution.rank = objective_value; - solution.info = solution_info; - solutions_.Add(solution); - } + SharedSolutionRepository::Solution solution; + solution.variable_values.assign(solution_values.begin(), + solution_values.end()); + solution.rank = objective_value; + solution.info = solution_info; + solutions_.Add(solution); // Ignore any non-strictly improving solution. if (objective_value > inner_objective_upper_bound_) return; @@ -1034,14 +1034,161 @@ int SharedBoundsManager::NumBoundsExported(const std::string& worker_name) { return it->second; } -SharedClausesManager::SharedClausesManager(bool always_synchronize) - : always_synchronize_(always_synchronize) {} +UniqueClauseStream::UniqueClauseStream() { + for (auto& buffer : clauses_by_size_) { + buffer.reserve(kMaxBufferedLiterals); + } +} + +bool UniqueClauseStream::Add(absl::Span clause) { + absl::MutexLock mutex_lock(&mutex_); + if (clause.size() > kMaxClauseSize || clause.size() <= 2) return false; + // This is just a safety check, the caller should have called CanAccept(). + if (NumLiteralsOfSize(clause.size()) + clause.size() > kMaxBufferedLiterals) { + return false; + } + if (BlockClause(clause)) { + std::vector* buffer = MutableBufferForSize(clause.size()); + buffer->insert(buffer->end(), clause.begin(), clause.end()); + return true; + } + return false; +} + +bool UniqueClauseStream::BlockClause(absl::Span clause) { + if (clause.size() > kMaxClauseSize) return false; + if (clause.size() <= 2) return false; + return fingerprints_.emplace(HashClause(clause)).second; +} + +bool UniqueClauseStream::Delete(absl::Span clause) { + const size_t fingerprint = HashClause(clause); + absl::MutexLock mutex_lock(&mutex_); + // Note a clause with this hash may be buffered, but not yet exported. + return fingerprints_.erase(fingerprint) == 1; +} + +CompactVectorVector UniqueClauseStream::NextBatch() { + CompactVectorVector buffer; + buffer.reserve(kMaxLiteralsPerBatch / kMinClauseSize, kMaxLiteralsPerBatch); + int to_fill = kMaxLiteralsPerBatch; + absl::MutexLock mutex_lock(&mutex_); + for (int size = kMinClauseSize; size <= kMaxClauseSize; ++size) { + CHECK_EQ(NumLiteralsOfSize(size) % size, 0); + while (to_fill >= size && NumLiteralsOfSize(size) > 0) { + absl::Span clause = NextClause(size); + if (fingerprints_.contains(HashClause(clause))) { + buffer.Add(NextClause(size)); + to_fill -= size; + } + PopClause(size); + } + } + return buffer; +} + +int UniqueClauseStream::FillUpstreamBuffer(UniqueClauseStream& upstream, + int size, + int max_clauses_to_export) { + int num_exported_clauses = 0; + absl::MutexLock mutex_lock(&mutex_); + while (NumLiteralsOfSize(size) > 0 && + num_exported_clauses < max_clauses_to_export) { + absl::Span clause = NextClause(size); + // Don't emit deleted clauses. + if (fingerprints_.contains(HashClause(clause)) && upstream.Add(clause)) { + ++num_exported_clauses; + } + PopClause(size); + } + return num_exported_clauses; +} + +int UniqueClauseStream::NumBufferedLiterals() const { + absl::MutexLock mutex_lock(&mutex_); + int result = 0; + for (const auto& buffer : clauses_by_size_) { + result += buffer.size(); + } + return result; +} + +bool UniqueClauseStream::CanAccept(int size, int lbd) const { + if (size <= 2 || size > kMaxClauseSize) return false; + absl::MutexLock mutex_lock(&mutex_); + if (lbd > lbd_threshold_) return false; + int num_literals_up_to_size = 0; + for (int i = kMinClauseSize; i <= size; ++i) { + num_literals_up_to_size += NumLiteralsOfSize(i); + } + return num_literals_up_to_size + size <= kMaxBufferedLiterals; +} + +void UniqueClauseStream::RemoveWorstClauses() { + absl::MutexLock mutex_lock(&mutex_); + int literals_to_remove = 0; + for (const auto& buffer : clauses_by_size_) { + literals_to_remove += buffer.size(); + } + literals_to_remove -= kMaxBufferedLiterals; + for (int size = kMaxClauseSize; size >= kMinClauseSize; --size) { + while (NumLiteralsOfSize(size) > 0) { + // Stop if removing one more clause of the current size would + // leave the buffer under full. Otherwise we might remove a shorter + // clause later! + if (literals_to_remove < size) return; + fingerprints_.erase(HashClause(NextClause(size))); + PopClause(size); + literals_to_remove -= size; + } + } +} + +void UniqueClauseStream::set_lbd_threshold(int lbd) { + absl::MutexLock mutex_lock(&mutex_); + lbd_threshold_ = lbd; +} + +size_t UniqueClauseStream::HashClause(absl::Span clause, + size_t hash_seed) { + size_t hash = absl::HashOf(hash_seed, clause.size()); + for (int i = 0; i < clause.size(); ++i) { + hash ^= absl::HashOf(clause[i], hash_seed); + } + return hash; +} + +absl::Span UniqueClauseStream::NextClause(int size) const { + absl::Span buffer = BufferForSize(size); + return buffer.subspan(buffer.size() - size, size); +} + +void UniqueClauseStream::PopClause(int size) { + std::vector* buffer = MutableBufferForSize(size); + buffer->erase(buffer->end() - size, buffer->end()); +} + +int UniqueClauseStream::NumClausesOfSize(int size) const { + return NumLiteralsOfSize(size) / size; +} + +int UniqueClauseStream::NumLiteralsOfSize(int size) const { + return BufferForSize(size).size(); +} + +SharedClausesManager::SharedClausesManager(bool always_synchronize, + absl::Duration share_frequency) + : always_synchronize_(always_synchronize), + share_frequency_(share_frequency) {} int SharedClausesManager::RegisterNewId() { absl::MutexLock mutex_lock(&mutex_); const int id = id_to_last_processed_binary_clause_.size(); id_to_last_processed_binary_clause_.resize(id + 1, 0); + id_to_last_returned_batch_.resize(id + 1, 0); + id_to_last_finished_batch_.resize(id + 1, 0); id_to_clauses_exported_.resize(id + 1, 0); + id_to_clause_stream_.emplace_back(); return id; } @@ -1059,7 +1206,7 @@ void SharedClausesManager::AddBinaryClause(int id, int lit1, int lit2) { const auto [unused_it, inserted] = added_binary_clauses_set_.insert(p); if (inserted) { added_binary_clauses_.push_back(p); - if (always_synchronize_) ++last_visible_clause_; + if (always_synchronize_) ++last_visible_binary_clause_; id_to_clauses_exported_[id]++; // Small optim. If the worker is already up to date with clauses to import, @@ -1071,16 +1218,31 @@ void SharedClausesManager::AddBinaryClause(int id, int lit1, int lit2) { } } +std::vector> SharedClausesManager::GetUnseenClauses( + int id) { + std::vector> result; + absl::MutexLock mutex_lock(&mutex_); + for (int i = id_to_last_returned_batch_[id]; i < batches_.size(); ++i) { + for (int j = 0; j < batches_[i].size(); ++j) { + result.push_back(batches_[i][j]); + } + } + id_to_last_finished_batch_[id] = id_to_last_returned_batch_[id]; + id_to_last_returned_batch_[id] = batches_.size(); + return result; +} + void SharedClausesManager::GetUnseenBinaryClauses( int id, std::vector>* new_clauses) { new_clauses->clear(); absl::MutexLock mutex_lock(&mutex_); const int last_binary_clause_seen = id_to_last_processed_binary_clause_[id]; - if (last_binary_clause_seen >= last_visible_clause_) return; + if (last_binary_clause_seen >= last_visible_binary_clause_) return; - new_clauses->assign(added_binary_clauses_.begin() + last_binary_clause_seen, - added_binary_clauses_.begin() + last_visible_clause_); - id_to_last_processed_binary_clause_[id] = last_visible_clause_; + new_clauses->assign( + added_binary_clauses_.begin() + last_binary_clause_seen, + added_binary_clauses_.begin() + last_visible_binary_clause_); + id_to_last_processed_binary_clause_[id] = last_visible_binary_clause_; } void SharedClausesManager::LogStatistics(SolverLogger* logger) { @@ -1102,8 +1264,97 @@ void SharedClausesManager::LogStatistics(SolverLogger* logger) { void SharedClausesManager::Synchronize() { absl::MutexLock mutex_lock(&mutex_); - last_visible_clause_ = added_binary_clauses_.size(); - // TODO(user): We could cleanup added_binary_clauses_ periodically. + last_visible_binary_clause_ = added_binary_clauses_.size(); + const int num_workers = id_to_clause_stream_.size(); + if (num_workers <= 1) return; + if (!share_timer_.IsRunning()) share_timer_.Start(); + if (share_timer_.GetDuration() < share_frequency_) return; + share_timer_.Restart(); + + // Tune LBD threshold for individual workers based on how the worker's buffer + // is. We aim to ensure workers can always export their fair share of clauses. + for (int id = 0; id < num_workers; ++id) { + UniqueClauseStream& stream = id_to_clause_stream_[id]; + const int lbd_threshold = stream.lbd_threshold(); + const int num_buffered_literals = stream.NumBufferedLiterals(); + const bool underfull = + num_buffered_literals < + UniqueClauseStream::kMaxLiteralsPerBatch / num_workers; + const bool overfull = + num_buffered_literals > UniqueClauseStream::kMaxLiteralsPerBatch; + const int new_lbd = std::clamp(lbd_threshold + underfull - overfull, 2, + UniqueClauseStream::kMaxClauseSize); + if (new_lbd != lbd_threshold) { + VLOG(2) << id_to_worker_name_[id] + << " sharing clauses with lbd <= " << new_lbd; + stream.set_lbd_threshold(new_lbd); + } + } + + std::vector ids(num_workers); + int literals_to_fill = UniqueClauseStream::kMaxLiteralsPerBatch; + for (int size = UniqueClauseStream::kMinClauseSize; + size <= UniqueClauseStream::kMaxClauseSize; ++size) { + ids.clear(); + for (int id = 0; id < num_workers; ++id) { + if (id_to_clause_stream_[id].NumBufferedLiteralsOfSize(size) > 0) { + ids.push_back(id); + } + } + // Use progressive filling to attempt to fill the batch with clauses of + // minimum size, this is max-min fair. + while (!ids.empty()) { + const int clauses_to_fill = literals_to_fill / size; + if (clauses_to_fill == 0) break; + // Some workers need to export more clauses to fill the batch due to + // rounding, but we don't want all workers to round up. + const int num_to_round_up = clauses_to_fill % ids.size(); + for (int i = 0; i < ids.size(); ++i) { + const bool round_up = i < num_to_round_up; + const int id = ids[i]; + const int shared = id_to_clause_stream_[id].FillUpstreamBuffer( + all_clauses_, size, clauses_to_fill / ids.size() + round_up); + id_to_clauses_exported_[id] += shared; + if (shared == 0 || + id_to_clause_stream_[id].NumBufferedLiteralsOfSize(size) == 0) { + ids[i] = ids.back(); + ids.pop_back(); + --i; + } + } + } + } + if (all_clauses_.NumBufferedLiterals() > 0) { + batches_.push_back(all_clauses_.NextBatch()); + VLOG(2) << "Batch #" << batches_.size() << " w/ " << batches_.back().size() + << " clauses max size = " + << batches_.back()[batches_.back().size() - 1].size(); + for (auto& stream : id_to_clause_stream_) { + stream.RemoveWorstClauses(); + } + } + // Delete batches that have been consumed by all workers. + // Keep a few batches around for startup (min finished batch doesn't count + // workers that haven't registered yet). + // This also ensures that our fingerprint table always contains the last few + // batches, so we reduce the chance of an old buffered duplicate clause on + // a worker being emitted from the global stream multiple times. + if (batches_.size() < kMinBatches) return; + const int min_finished_batch = + std::min(batches_.size() - kMinBatches, + *absl::c_min_element(id_to_last_finished_batch_)); + for (int i = 0; i < min_finished_batch; ++i) { + VLOG(2) << "Erasing batch"; + for (int i = 0; i < batches_.front().size(); ++i) { + all_clauses_.Delete(batches_.front()[i]); + } + batches_.pop_front(); + } + for (int id = 0; id < id_to_last_finished_batch_.size(); ++id) { + id_to_last_returned_batch_[id] -= min_finished_batch; + id_to_last_finished_batch_[id] -= min_finished_batch; + } + // TODO(user): We could cleanup binary clauses that have been consumed. } void SharedStatistics::AddStats( @@ -1118,7 +1369,6 @@ void SharedStatistics::Log(SolverLogger* logger) { absl::MutexLock mutex_lock(&mutex_); if (stats_.empty()) return; - SOLVER_LOG(logger, ""); SOLVER_LOG(logger, "Stats across workers (summed):"); std::vector> to_sort_; for (const auto& [key, count] : stats_) { @@ -1128,6 +1378,7 @@ void SharedStatistics::Log(SolverLogger* logger) { for (const auto& [key, count] : to_sort_) { SOLVER_LOG(logger, " ", key, ": ", FormatCounter(count)); } + SOLVER_LOG(logger, ""); } } // namespace sat diff --git a/ortools/sat/synchronization.h b/ortools/sat/synchronization.h index 2ba4999651..139e2f88a1 100644 --- a/ortools/sat/synchronization.h +++ b/ortools/sat/synchronization.h @@ -14,7 +14,9 @@ #ifndef OR_TOOLS_SAT_SYNCHRONIZATION_H_ #define OR_TOOLS_SAT_SYNCHRONIZATION_H_ +#include #include +#include #include #include #include @@ -97,6 +99,10 @@ class SharedSolutionRepository { // Returns the solution #i where i must be smaller than NumSolutions(). Solution GetSolution(int index) const; + // Returns the rank of the best known solution. + // You shouldn't call this if NumSolutions() is zero. + int64_t GetBestRank() const; + // Returns the variable value of variable 'var_index' from solution // 'solution_index' where solution_index must be smaller than NumSolutions() // and 'var_index' must be smaller than number of variables. @@ -106,9 +112,9 @@ class SharedSolutionRepository { Solution GetRandomBiasedSolution(absl::BitGenRef random) const; // Add a new solution. Note that it will not be added to the pool of solution - // right away. One must call Synchronize for this to happen. - // - // Works in O(num_solutions_to_keep_). + // right away. One must call Synchronize for this to happen. In order to be + // deterministic, this will keep all solutions until Synchronize() is called, + // so we need to be careful not to generate too many solutions at once. void Add(const Solution& solution); // Updates the current pool of solution with the one recently added. Note that @@ -122,21 +128,15 @@ class SharedSolutionRepository { std::vector TableLineStats() const { absl::MutexLock mutex_lock(&mutex_); return {FormatName(name_), FormatCounter(num_added_), - FormatCounter(num_queried_), FormatCounter(num_ignored_), - FormatCounter(num_synchronization_)}; + FormatCounter(num_queried_), FormatCounter(num_synchronization_)}; } protected: - // Helper method for adding the solutions once the mutex is acquired. - void AddInternal(const Solution& solution) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - const std::string name_; const int num_solutions_to_keep_; mutable absl::Mutex mutex_; int64_t num_added_ ABSL_GUARDED_BY(mutex_) = 0; - int64_t num_ignored_ ABSL_GUARDED_BY(mutex_) = 0; mutable int64_t num_queried_ ABSL_GUARDED_BY(mutex_) = 0; int64_t num_synchronization_ ABSL_GUARDED_BY(mutex_) = 0; @@ -281,15 +281,13 @@ class SharedResponseManager { // Note that these bound correspond to valid bound for the problem of finding // a strictly better objective than the current one. Thus the lower bound is // always a valid bound for the global problem, but the upper bound is NOT. + // + // This is always the last bounds in "always_synchronize" mode, otherwise it + // correspond to the bounds at the last Synchronize() call. + void Synchronize(); IntegerValue GetInnerObjectiveLowerBound(); IntegerValue GetInnerObjectiveUpperBound(); - // These functions return the same as the non-synchronized() version but - // only the values at the last time Synchronize() was called. - void Synchronize(); - IntegerValue SynchronizedInnerObjectiveLowerBound(); - IntegerValue SynchronizedInnerObjectiveUpperBound(); - // Returns the current best solution inner objective value or kInt64Max if // there is no solution. IntegerValue BestSolutionInnerObjectiveValue(); @@ -570,8 +568,115 @@ class SharedBoundsManager { int export_counter_ = 0; }; -// This class holds all the binary clauses that were found and shared by the -// workers. +// Emit a stream of clauses in batches without duplicates. Each batch has a +// fixed number of literals, containing the smallest clauses added. +// It has a finite size internal buffer that is a small multiple of the batch +// size. +// +// This class is thread-safe, the idea is to have one per worker plus a +// global one to deduplicate between workers to minimize contention. +// +// This uses a finite buffer, so some clauses may be dropped if we generate too +// many more than we export, but that is rarely a problem because we never +// overfill the "global" stream, and if we drop a clause on a worker, one of the +// following will most likely happen: +// 1. Some other worker learns the clause and shares it later. +// 2. All other workers also learn and drop the clause. +// 3. No other worker learns the clause, so it was not that helpful anyway. +// +// Note that this uses literals as encoded in a cp_model.proto. Thus, the +// literals can be negative numbers. +class UniqueClauseStream { + public: + static constexpr int kMinClauseSize = 3; + static constexpr int kMaxClauseSize = 8; + // Export 4KiB of clauses per batch. + static constexpr int kMaxLiteralsPerBatch = 4096 / sizeof(int); + // Bound the total literals we buffer, approximately enforced so shorter + // clauses can replace longer ones. + static constexpr int kMaxBufferedLiterals = 4 * kMaxLiteralsPerBatch; + + UniqueClauseStream(); + // Move only - this is an expensive class to copy. + UniqueClauseStream(const UniqueClauseStream&) = delete; + UniqueClauseStream(UniqueClauseStream&&) = default; + + // Adds the clause to a future batch and returns true if the clause was added. + // Otherwise returns false. This may return false if the buffer is full. + // It will not block the clause if it is dropped to avoid unbounded growth of + // the hash table. + bool Add(absl::Span clause) ABSL_LOCKS_EXCLUDED(mutex_); + + // Lazily deletes a clause with the same hash, returns true if it was present. + // The deleted clause will not be exported (either via NextBatch or + // FillUpstreamBuffer). A clause with the same hash may be re-added after + // calling Delete. If another clause with the same hash is added before the + // deleted clause is emitted then both clauses may be emitted. + bool Delete(absl::Span clause) ABSL_LOCKS_EXCLUDED(mutex_); + + // Returns a set of clauses totalling up to kMaxLiteralsPerBatch and removes + // exported clauses from the internal buffer. + CompactVectorVector NextBatch() ABSL_LOCKS_EXCLUDED(mutex_); + + // Adds up to max_clauses_to_export clauses of a given size to upstream and + // removes them from the internal buffer. + int FillUpstreamBuffer(UniqueClauseStream& upstream, int clause_size, + int max_clauses_to_export) ABSL_LOCKS_EXCLUDED(mutex_); + + // Returns the number of literals in the buffer in clauses with size <= + // max_size. + int NumBufferedLiteralsOfSize(int size) const ABSL_LOCKS_EXCLUDED(mutex_) { + absl::MutexLock lock(&mutex_); + return NumLiteralsOfSize(size); + } + int NumBufferedLiterals() const ABSL_LOCKS_EXCLUDED(mutex_); + + // Returns true if the stream can accept a clause of the specified size and + // LBD without dropping it. + bool CanAccept(int size, int lbd) const; + + // Delete longest clauses while keeping at least kMaxBufferedLiterals. + // This guarantees that CanAccept will return the same result as before, and + // at least the next 4 batches will contain the same clauses, but we will emit + // fewer old, long clauses many batches in the future. + void RemoveWorstClauses(); + + int lbd_threshold() const ABSL_LOCKS_EXCLUDED(mutex_) { + absl::MutexLock lock(&mutex_); + return lbd_threshold_; + } + void set_lbd_threshold(int lbd) ABSL_LOCKS_EXCLUDED(mutex_); + + // Computes a hash that is independent of the order of literals in the clause. + static size_t HashClause(absl::Span clause, size_t hash_seed = 0); + + private: + bool BlockClause(absl::Span clause) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + std::vector* MutableBufferForSize(int size) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { + return &clauses_by_size_[size - kMinClauseSize]; + } + absl::Span BufferForSize(int size) const + ABSL_SHARED_LOCKS_REQUIRED(mutex_) { + return clauses_by_size_[size - kMinClauseSize]; + } + absl::Span NextClause(int size) const + ABSL_SHARED_LOCKS_REQUIRED(mutex_); + void PopClause(int size) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + // Computes the number of clauses of a given size. + int NumClausesOfSize(int size) const ABSL_SHARED_LOCKS_REQUIRED(mutex_); + int NumLiteralsOfSize(int size) const ABSL_SHARED_LOCKS_REQUIRED(mutex_); + + mutable absl::Mutex mutex_; + int lbd_threshold_ ABSL_GUARDED_BY(mutex_) = 2; + absl::flat_hash_set fingerprints_ ABSL_GUARDED_BY(mutex_); + std::array, kMaxClauseSize - kMinClauseSize + 1> + clauses_by_size_ ABSL_GUARDED_BY(mutex_); +}; + +// This class holds clauses found and shared by workers. +// It is exact for binary clauses, but approximate for longer ones. // // It is thread-safe. // @@ -579,9 +684,15 @@ class SharedBoundsManager { // literals can be negative numbers. class SharedClausesManager { public: - explicit SharedClausesManager(bool always_synchronize); + explicit SharedClausesManager(bool always_synchronize, + absl::Duration share_frequency); void AddBinaryClause(int id, int lit1, int lit2); + // Returns new glue clauses. + // The spans are guaranteed to remain valid until the next call to + // SyncClauses(). + std::vector> GetUnseenClauses(int id); + // Fills new_clauses with // {{lit1 of clause1, lit2 of clause1}, // {lit1 of clause2, lit2 of clause2}, @@ -593,26 +704,48 @@ class SharedClausesManager { int RegisterNewId(); void SetWorkerNameForId(int id, const std::string& worker_name); + // A worker can add or remove clauses from its own clause set. + // Retains ownership of the returned ClauseFilter. + UniqueClauseStream* GetClauseStream(int id) { + absl::ReaderMutexLock mutex_lock(&mutex_); + return &id_to_clause_stream_[id]; + } + // Search statistics. void LogStatistics(SolverLogger* logger); // Unlocks waiting binary clauses for workers if always_synchronize is false. + // Periodically starts a new sharing round, making glue clauses visible. void Synchronize(); private: + static constexpr int kMinBatches = 10; absl::Mutex mutex_; - // Cache to avoid adding the same clause twice. + // Binary clauses: + // Cache to avoid adding the same binary clause twice. absl::flat_hash_set> added_binary_clauses_set_ ABSL_GUARDED_BY(mutex_); std::vector> added_binary_clauses_ ABSL_GUARDED_BY(mutex_); std::vector id_to_last_processed_binary_clause_ ABSL_GUARDED_BY(mutex_); - std::vector id_to_clauses_exported_; - int last_visible_clause_ ABSL_GUARDED_BY(mutex_) = 0; - const bool always_synchronize_ = true; + int last_visible_binary_clause_ ABSL_GUARDED_BY(mutex_) = 0; - // Used for reporting statistics. + // Longer clauses: + UniqueClauseStream all_clauses_ ABSL_GUARDED_BY(mutex_); + // This is slightly subtle - we need to track the batches that might be + // currently being processed by each worker. + std::vector id_to_last_returned_batch_ ABSL_GUARDED_BY(mutex_); + std::vector id_to_last_finished_batch_ ABSL_GUARDED_BY(mutex_); + std::deque> batches_ ABSL_GUARDED_BY(mutex_); + std::deque id_to_clause_stream_ ABSL_GUARDED_BY(mutex_); + WallTimer share_timer_ ABSL_GUARDED_BY(mutex_); + + const bool always_synchronize_ = true; + const absl::Duration share_frequency_; + + // Stats: + std::vector id_to_clauses_exported_; absl::flat_hash_map id_to_worker_name_; }; @@ -646,6 +779,13 @@ SharedSolutionRepository::GetSolution(int i) const { return solutions_[i]; } +template +int64_t SharedSolutionRepository::GetBestRank() const { + absl::MutexLock mutex_lock(&mutex_); + CHECK_GT(solutions_.size(), 0); + return solutions_[0].rank; +} + template ValueType SharedSolutionRepository::GetVariableValueInSolution( int var_index, int solution_index) const { @@ -695,29 +835,8 @@ template void SharedSolutionRepository::Add(const Solution& solution) { if (num_solutions_to_keep_ <= 0) return; absl::MutexLock mutex_lock(&mutex_); - AddInternal(solution); -} - -template -void SharedSolutionRepository::AddInternal( - const Solution& solution) { - int worse_solution_index = 0; - for (int i = 0; i < new_solutions_.size(); ++i) { - // Do not add identical solution. - if (new_solutions_[i] == solution) return; - if (new_solutions_[worse_solution_index] < new_solutions_[i]) { - worse_solution_index = i; - } - } - if (new_solutions_.size() < num_solutions_to_keep_) { - ++num_added_; - new_solutions_.push_back(solution); - } else if (solution < new_solutions_[worse_solution_index]) { - ++num_added_; - new_solutions_[worse_solution_index] = solution; - } else { - ++num_ignored_; - } + ++num_added_; + new_solutions_.push_back(solution); } template diff --git a/ortools/sat/theta_tree.cc b/ortools/sat/theta_tree.cc index 3a236d6d34..18a983f33c 100644 --- a/ortools/sat/theta_tree.cc +++ b/ortools/sat/theta_tree.cc @@ -27,7 +27,8 @@ ThetaLambdaTree::ThetaLambdaTree() = default; template typename ThetaLambdaTree::TreeNode -ThetaLambdaTree::ComposeTreeNodes(TreeNode left, TreeNode right) { +ThetaLambdaTree::ComposeTreeNodes(const TreeNode& left, + const TreeNode& right) { return {std::max(right.envelope, left.envelope + right.sum_of_energy_min), std::max(right.envelope_opt, right.sum_of_energy_min + @@ -213,11 +214,12 @@ IntegerType ThetaLambdaTree::GetEnvelopeOf(int event) const { template void ThetaLambdaTree::RefreshNode(int node) { + TreeNode* tree = tree_.data(); do { const int right = node | 1; const int left = right ^ 1; node >>= 1; - tree_[node] = ComposeTreeNodes(tree_[left], tree_[right]); + tree[node] = ComposeTreeNodes(tree[left], tree[right]); } while (node > 1); } diff --git a/ortools/sat/theta_tree.h b/ortools/sat/theta_tree.h index bd833b6b5d..d817a328b0 100644 --- a/ortools/sat/theta_tree.h +++ b/ortools/sat/theta_tree.h @@ -207,7 +207,7 @@ class ThetaLambdaTree { IntegerType max_of_energy_delta; }; - TreeNode ComposeTreeNodes(TreeNode left, TreeNode right); + TreeNode ComposeTreeNodes(const TreeNode& left, const TreeNode& right); int GetLeafFromEvent(int event) const; int GetEventFromLeaf(int leaf) const; diff --git a/ortools/sat/util.h b/ortools/sat/util.h index ba165d7126..fb5f337ee1 100644 --- a/ortools/sat/util.h +++ b/ortools/sat/util.h @@ -152,6 +152,41 @@ class CompactVectorVector { std::vector buffer_; }; +// We often have a vector with fixed capacity reserved outside the hot loops. +// Using this class instead save the capacity but most importantly link a lot +// less code for the push_back() calls which allow more inlining. +// +// TODO(user): Add more functions and unit-test. +template +class FixedCapacityVector { + public: + void ClearAndReserve(size_t size) { + size_ = 0; + data_.reset(new T[size]); + } + + T* data() const { return data_.get(); } + T* begin() const { return data_.get(); } + T* end() const { return data_.get() + size_; } + size_t size() const { return size_; } + bool empty() const { return size_ == 0; } + + T operator[](int i) const { return data_[i]; } + T& operator[](int i) { return data_[i]; } + + T back() const { return data_[size_ - 1]; } + T& back() { return data_[size_ - 1]; } + + void clear() { size_ = 0; } + void resize(size_t size) { size_ = size; } + void pop_back() { --size_; } + void push_back(T t) { data_[size_++] = t; } + + private: + int size_ = 0; + std::unique_ptr data_ = nullptr; +}; + // Prints a positive number with separators for easier reading (ex: 1'348'065). std::string FormatCounter(int64_t num); diff --git a/ortools/sat/var_domination.cc b/ortools/sat/var_domination.cc index 133eedaafb..f4368db2d5 100644 --- a/ortools/sat/var_domination.cc +++ b/ortools/sat/var_domination.cc @@ -226,8 +226,8 @@ bool VarDomination::EndFirstPhase() { // constraints. Still we should in most situation be a lot lower than that. const int kMaxInitialSize = 50; std::vector cropped_vars; - absl::StrongVector is_cropped(num_vars_with_negation_, - false); + util_intops::StrongVector is_cropped( + num_vars_with_negation_, false); // Fill the initial domination candidates. int non_cropped_size = 0; @@ -289,8 +289,8 @@ bool VarDomination::EndFirstPhase() { // Compute how many extra space we need for transposed values. // Note that it cannot be more than twice. int total_extra_space = 0; - absl::StrongVector extra_space(num_vars_with_negation_, - 0); + util_intops::StrongVector extra_space( + num_vars_with_negation_, 0); for (IntegerVariable var(0); var < num_vars_with_negation_; ++var) { for (const IntegerVariable dom : DominatingVariables(var)) { if (!is_cropped[NegationOf(dom)]) continue; @@ -1383,11 +1383,11 @@ void ScanModelForDualBoundStrengthening( namespace { -bool ProcessAtMostOne(absl::Span literals, - const std::string& message, - const VarDomination& var_domination, - absl::StrongVector* in_constraints, - PresolveContext* context) { +bool ProcessAtMostOne( + absl::Span literals, const std::string& message, + const VarDomination& var_domination, + util_intops::StrongVector* in_constraints, + PresolveContext* context) { for (const int ref : literals) { (*in_constraints)[VarDomination::RefToIntegerVariable(ref)] = true; } @@ -1432,9 +1432,24 @@ bool ExploitDominanceRelations(const VarDomination& var_domination, } if (!work_to_do) return true; - absl::StrongVector var_lb_to_ub_diff(num_vars * 2, - 0); - absl::StrongVector in_constraints(num_vars * 2, false); + const int64_t saved_num_operations = context->num_presolve_operations; + + // Strenghtening via domination. When a variable is dominated by a bunch of + // other, either we can do (var--, dom++) or if we can't (i.e all dominated + // variable at their upper bound) then maybe all constraint are satisfied if + // var is high enough and we can also decrease it. + util_intops::StrongVector can_freely_decrease_count( + num_vars * 2, 0); + util_intops::StrongVector can_freely_decrease_until( + num_vars * 2, std::numeric_limits::min()); + + // Temporary data that we fill/clear for each linear constraint. + util_intops::StrongVector var_lb_to_ub_diff( + num_vars * 2, 0); + + // Temporary data used for boolean constraints. + util_intops::StrongVector in_constraints(num_vars * 2, + false); absl::flat_hash_set> implications; const int num_constraints = cp_model.constraints_size(); @@ -1543,6 +1558,28 @@ bool ExploitDominanceRelations(const VarDomination& var_domination, return context->NotifyThatModelIsUnsat("linear equation unsat."); } + // Returns the change magnitude in min-activity (resp. max-activity) if all + // the given variables are fixed to their upper bound. + const auto get_delta = [context, &var_lb_to_ub_diff]( + bool use_min_side, + absl::Span vars) { + int64_t delta = 0; + for (const IntegerVariable var : vars) { + // Tricky: For now we skip complex domain as we are not sure they + // can be moved correctly. + if (context->DomainOf(VarDomination::IntegerVariableToRef(var)) + .NumIntervals() != 1) { + continue; + } + if (use_min_side) { + delta += std::max(int64_t{0}, var_lb_to_ub_diff[var]); + } else { + delta += std::max(int64_t{0}, -var_lb_to_ub_diff[var]); + } + } + return delta; + }; + // Look for dominated var. for (int i = 0; i < num_terms; ++i) { const int ref = ct.linear().vars(i); @@ -1550,6 +1587,42 @@ bool ExploitDominanceRelations(const VarDomination& var_domination, const int64_t coeff_magnitude = std::abs(coeff); if (context->IsFixed(ref)) continue; + // For strenghtening using domination, just consider >= constraint. + const bool only_lb = max_activity <= rhs_ub; + const bool only_ub = min_activity >= rhs_lb; + if (only_lb || only_ub) { + // Always transform to coeff_magnitude * current_ref + ... >= + const int current_ref = (coeff > 0) == only_lb ? ref : NegatedRef(ref); + const int64_t shifted_rhs = + only_lb ? rhs_lb - min_activity : max_activity - rhs_ub; + const IntegerVariable current_ivar = + VarDomination::RefToIntegerVariable(current_ref); + can_freely_decrease_count[NegationOf(current_ivar)]++; + + const int64_t delta = get_delta( + only_lb, var_domination.DominatingVariables(current_ivar)); + if (delta > 0) { + // When all dominated var are at their upper bound, we miss 'slack' + // to make the constraint trivially satisfiable. + const int64_t slack = shifted_rhs - delta; + const int64_t current_lb = context->MinOf(current_ref); + + // Any increase such that coeff * delta >= slack make the constraint + // trivial. + // + // Note(user): It look like even if any of the upper bound of the + // dominating var decrease, this should still be valid. Here we only + // decrease such a bound due to a dominance relation, so the slack + // when all dominating variable are at their bound should not really + // decrease. + const int64_t min_delta = + slack <= 0 ? 0 : CeilOfRatio(slack, coeff_magnitude); + can_freely_decrease_until[current_ivar] = std::max( + can_freely_decrease_until[current_ivar], current_lb + min_delta); + can_freely_decrease_count[current_ivar]++; + } + } + for (const int current_ref : {ref, NegatedRef(ref)}) { const absl::Span dominated_by = var_domination.DominatingVariables(current_ref); @@ -1564,23 +1637,9 @@ bool ExploitDominanceRelations(const VarDomination& var_domination, const int64_t slack = ub_side ? rhs_ub - min_activity : max_activity - rhs_lb; - // Compute the delta in activity if all dominating var moves to their - // other bound. - int64_t delta = 0; - for (const IntegerVariable ivar : dominated_by) { - // Tricky: For now we skip complex domain as we are not sure they - // can be moved correctly. - if (context->DomainOf(VarDomination::IntegerVariableToRef(ivar)) - .NumIntervals() != 1) { - continue; - } - if (ub_side) { - delta += std::max(int64_t{0}, var_lb_to_ub_diff[ivar]); - } else { - delta += std::max(int64_t{0}, -var_lb_to_ub_diff[ivar]); - } - } - + // Compute the delta in min-activity if all dominating var moves to + // their other bound. + const int64_t delta = get_delta(ub_side, dominated_by); const int64_t lb = context->MinOf(current_ref); if (delta + coeff_magnitude > slack) { context->UpdateRuleStats("domination: fixed to lb."); @@ -1611,10 +1670,7 @@ bool ExploitDominanceRelations(const VarDomination& var_domination, // Tricky: If there are holes, we can't just reduce the domain to // new_ub if it is not a valid value, so we need to compute the // Min() of the intersection. - new_ub = context->DomainOf(current_ref) - .IntersectionWith( - Domain(new_ub, std::numeric_limits::max())) - .Min(); + new_ub = context->DomainOf(current_ref).ValueAtOrAfter(new_ub); } if (new_ub < context->MaxOf(current_ref)) { context->UpdateRuleStats("domination: reduced ub."); @@ -1669,16 +1725,78 @@ bool ExploitDominanceRelations(const VarDomination& var_domination, // // TODO(user): generalize to non Booleans? int num_added = 0; - absl::StrongVector increase_is_forbidden(2 * num_vars, - false); + util_intops::StrongVector increase_is_forbidden( + 2 * num_vars, false); for (int positive_ref = 0; positive_ref < num_vars; ++positive_ref) { if (context->IsFixed(positive_ref)) continue; if (context->VariableIsNotUsedAnymore(positive_ref)) continue; if (context->VariableWasRemoved(positive_ref)) continue; - if (!context->CanBeUsedAsLiteral(positive_ref)) continue; + + // Increase the count for variable in the objective to account for the + // kObjectiveConstraint in their VarToConstraints() list. + if (!context->ObjectiveDomainIsConstraining()) { + const int64_t obj_coeff = context->ObjectiveCoeff(positive_ref); + if (obj_coeff > 0) { + can_freely_decrease_count[VarDomination::RefToIntegerVariable( + positive_ref)]++; + } else if (obj_coeff < 0) { + can_freely_decrease_count[NegationOf( + VarDomination::RefToIntegerVariable(positive_ref))]++; + } + } + for (const int ref : {positive_ref, NegatedRef(positive_ref)}) { const IntegerVariable var = VarDomination::RefToIntegerVariable(ref); if (increase_is_forbidden[NegationOf(var)]) continue; + if (can_freely_decrease_count[var] == + context->VarToConstraints(positive_ref).size()) { + // We need to account for domain with hole, hence the ValueAtOrAfter(). + int64_t lb = can_freely_decrease_until[var]; + lb = context->DomainOf(ref).ValueAtOrAfter(lb); + if (lb < context->MaxOf(ref)) { + // We have a candidate, however, we need to make sure the dominating + // variable upper bound didn't change. + // + // TODO(user): It look like testing this is not really necessary. + // The reduction done by this class seem to be order independent. + bool ok = true; + for (const IntegerVariable dom : + var_domination.DominatingVariables(var)) { + // Note that we assumed that a fixed point was reached before this + // is called, so modified_domains should have been empty as we + // entered this function. If not, the code is still correct, but we + // might miss some reduction, they will still likely be done later + // though. + if (increase_is_forbidden[dom] || + context->modified_domains[PositiveRef( + VarDomination::IntegerVariableToRef(dom))]) { + ok = false; + break; + } + } + if (increase_is_forbidden[NegationOf(var)]) { + ok = false; + } + if (ok) { + // TODO(user): Is this needed? + increase_is_forbidden[var] = true; + context->UpdateRuleStats( + "domination: dual strenghtening using dominance"); + if (!context->IntersectDomainWith( + ref, Domain(context->MinOf(ref), lb))) { + return false; + } + + // The rest of the loop only care about Booleans. + // And if this was boolean, we would have fixed it. + // If it became Boolean, we wait for the next call. + // TODO(user): maybe the last point can be improved. + continue; + } + } + } + + if (!context->CanBeUsedAsLiteral(positive_ref)) continue; for (const IntegerVariable dom : var_domination.DominatingVariables(ref)) { if (increase_is_forbidden[dom]) continue; @@ -1691,6 +1809,7 @@ bool ExploitDominanceRelations(const VarDomination& var_domination, ++num_added; context->AddImplication(ref, dom_ref); + context->UpdateNewConstraintsVariableUsage(); implications.insert({ref, dom_ref}); implications.insert({NegatedRef(dom_ref), NegatedRef(ref)}); @@ -1700,12 +1819,17 @@ bool ExploitDominanceRelations(const VarDomination& var_domination, } } } + if (num_added > 0) { VLOG(1) << "Added " << num_added << " domination implications."; - context->UpdateNewConstraintsVariableUsage(); context->UpdateRuleStats("domination: added implications", num_added); } + // TODO(user): We should probably be able to do something with this. + if (saved_num_operations == context->num_presolve_operations) { + context->UpdateRuleStats("TODO domination: unexploited dominations"); + } + return true; } diff --git a/ortools/sat/var_domination.h b/ortools/sat/var_domination.h index 3732a33e83..36428743be 100644 --- a/ortools/sat/var_domination.h +++ b/ortools/sat/var_domination.h @@ -182,16 +182,16 @@ class VarDomination { // S. std::vector tmp_vars_; std::unique_ptr partition_; - absl::StrongVector can_freely_decrease_; + util_intops::StrongVector can_freely_decrease_; // For all one sided constraints, we keep the bitmap of constraint indices // modulo 64 that block on the lower side each variable. int64_t ct_index_for_signature_ = 0; - absl::StrongVector block_down_signatures_; + util_intops::StrongVector block_down_signatures_; // Used by FilterUsingTempRanks(). int num_vars_with_negation_; - absl::StrongVector tmp_var_to_rank_; + util_intops::StrongVector tmp_var_to_rank_; // We don't use absl::Span() because the underlying buffer can be resized. // This however serve the same purpose. @@ -203,14 +203,16 @@ class VarDomination { // This hold the first phase best candidate. // Warning, the initial candidates span can overlap in the shared_buffer_. std::vector shared_buffer_; - absl::StrongVector has_initial_candidates_; - absl::StrongVector initial_candidates_; + util_intops::StrongVector has_initial_candidates_; + util_intops::StrongVector + initial_candidates_; // This will hold the final result. // Buffer with independent content for each vars. std::vector buffer_; std::vector other_buffer_; - absl::StrongVector dominating_vars_; + util_intops::StrongVector + dominating_vars_; }; // This detects variables that can move freely in one direction, or that can @@ -266,15 +268,16 @@ class DualBoundStrengthening { } // Starts with kMaxIntegerValue, and decrease as constraints are processed. - absl::StrongVector can_freely_decrease_until_; + util_intops::StrongVector + can_freely_decrease_until_; // How many times can_freely_decrease_until_[var] was set by a constraints. // If only one constraint is blocking, we can do more presolve. - absl::StrongVector num_locks_; + util_intops::StrongVector num_locks_; // If num_locks_[var] == 1, this will be the unique constraint that block var // in this direction. Note that it can be set to -1 if this wasn't recorded. - absl::StrongVector locking_ct_index_; + util_intops::StrongVector locking_ct_index_; int num_deleted_constraints_ = 0; }; diff --git a/ortools/sat/work_assignment.cc b/ortools/sat/work_assignment.cc index cfdbab56a0..0a9b3d9fbf 100644 --- a/ortools/sat/work_assignment.cc +++ b/ortools/sat/work_assignment.cc @@ -213,21 +213,6 @@ int SharedTreeManager::NumNodes() const { return nodes_.size(); } -int SharedTreeManager::SplitsToGeneratePerWorker() const { - absl::MutexLock mutex_lock(&mu_); - const int max_additional_nodes = max_nodes_ - static_cast(nodes_.size()); - const int total_splits_wanted = - std::min(num_splits_wanted_, - // Each split generates 2 nodes, so divide by 2, rounding up. - CeilOfRatio(max_additional_nodes, 2)); - // We want workers to propose too many splits as we expect to reject some, - // and it's more efficient to generate several splits on the same worker - // restart so we don't want to divide by num_workers_. - // But we also don't want more than half the splits to come from a single - // restart on a single worker so we divide by 2. - return CeilOfRatio(total_splits_wanted, 2); -} - bool SharedTreeManager::SyncTree(ProtoTrail& path) { absl::MutexLock mutex_lock(&mu_); std::vector> nodes = GetAssignedNodes(path); @@ -280,7 +265,7 @@ void SharedTreeManager::ProposeSplit(ProtoTrail& path, ProtoLiteral decision) { << "/" << nodes.size(); return; } - if (nodes_.size() >= max_nodes_) { + if (nodes_.size() + 2 > max_nodes_) { VLOG(2) << "Too many nodes to accept split"; return; } @@ -538,6 +523,7 @@ SharedTreeWorker::SharedTreeWorker(Model* model) helper_(model->GetOrCreate()), heuristics_(model->GetOrCreate()), restart_policy_(model->GetOrCreate()), + level_zero_callbacks_(model->GetOrCreate()), assigned_tree_lbds_(/*window_size=*/8) {} const std::vector& SharedTreeWorker::DecisionReason(int level) { @@ -608,7 +594,6 @@ bool SharedTreeWorker::SyncWithLocalTrail() { if (!helper_->BeforeTakingDecision()) return false; const int level = sat_solver_->CurrentDecisionLevel(); if (level >= assigned_tree_.MaxLevel()) break; - if (level == assigned_tree_.MaxLevel()) break; // The next decision is assigned, make sure it makes sense. const Literal next_decision = assigned_tree_literals_[level]; if (!sat_solver_->Assignment().LiteralIsAssigned(next_decision)) break; @@ -618,6 +603,7 @@ bool SharedTreeWorker::SyncWithLocalTrail() { << " assigned=" << assigned_tree_.MaxLevel(); manager_->CloseTree(assigned_tree_, level + 1); assigned_tree_literals_.clear(); + sat_solver_->Backtrack(0); } else { // The next level is implied by the current one. assigned_tree_.SetLevelImplied(level + 1); @@ -631,6 +617,8 @@ bool SharedTreeWorker::NextDecision(LiteralIndex* decision_index) { const auto& decision_policy = heuristics_->decision_policies[heuristics_->policy_index]; const int next_level = sat_solver_->CurrentDecisionLevel() + 1; + new_split_available_ = next_level == assigned_tree_.MaxLevel() + 1; + CHECK_EQ(assigned_tree_literals_.size(), assigned_tree_.MaxLevel()); if (next_level <= assigned_tree_.MaxLevel()) { VLOG(2) << "Following shared trail depth=" << next_level << " " @@ -675,10 +663,11 @@ bool SharedTreeWorker::NextDecision(LiteralIndex* decision_index) { } void SharedTreeWorker::MaybeProposeSplit() { - if (splits_wanted_ == 0 || + if (!new_split_available_ || sat_solver_->CurrentDecisionLevel() != assigned_tree_.MaxLevel() + 1) { return; } + new_split_available_ = false; const Literal split_decision = sat_solver_->Decisions()[assigned_tree_.MaxLevel()].literal; const std::optional encoded = EncodeDecision(split_decision); @@ -686,14 +675,7 @@ void SharedTreeWorker::MaybeProposeSplit() { CHECK_EQ(assigned_tree_literals_.size(), assigned_tree_.MaxLevel()); manager_->ProposeSplit(assigned_tree_, *encoded); if (assigned_tree_.MaxLevel() > assigned_tree_literals_.size()) { - --splits_wanted_; assigned_tree_literals_.push_back(split_decision); - } else { - // If we managed to encode the decision and it wasn't accepted, it's - // unlikely any splits in this subtree will be accepted, skip the - // unnecessary synchronisation until the next time we backtrack to level - // 0. - splits_wanted_ = 0; } CHECK_EQ(assigned_tree_literals_.size(), assigned_tree_.MaxLevel()); } @@ -710,9 +692,7 @@ bool SharedTreeWorker::ShouldReplaceSubtree() { restart_policy_->LbdAverageSinceReset(); } -void SharedTreeWorker::SyncWithSharedTree() { - splits_wanted_ = manager_->SplitsToGeneratePerWorker(); - VLOG(2) << "Splits wanted: " << splits_wanted_ << " " << parameters_->name(); +bool SharedTreeWorker::SyncWithSharedTree() { manager_->SyncTree(assigned_tree_); if (ShouldReplaceSubtree()) { ++num_trees_; @@ -733,6 +713,7 @@ void SharedTreeWorker::SyncWithSharedTree() { assigned_tree_literals_.push_back( DecodeDecision(assigned_tree_.Decision(i))); } + return true; } SatSolver::Status SharedTreeWorker::Search( @@ -744,6 +725,8 @@ SatSolver::Status SharedTreeWorker::Search( sat_solver_->Backtrack(0); encoder_->GetTrueLiteral(); encoder_->GetFalseLiteral(); + level_zero_callbacks_->callbacks.push_back( + [this]() { return SyncWithSharedTree(); }); const bool has_objective = objective_ != nullptr && objective_->objective_var != kNoIntegerVariable; std::vector clause; @@ -757,9 +740,6 @@ SatSolver::Status SharedTreeWorker::Search( num_restarts_ % heuristics_->decision_policies.size(); sat_solver_->Backtrack(0); } - if (trail_->CurrentDecisionLevel() == 0) { - SyncWithSharedTree(); - } if (!SyncWithLocalTrail()) return sat_solver_->UnsatStatus(); LiteralIndex decision_index; if (!NextDecision(&decision_index)) continue; diff --git a/ortools/sat/work_assignment.h b/ortools/sat/work_assignment.h index c11712dd5a..38b3b05738 100644 --- a/ortools/sat/work_assignment.h +++ b/ortools/sat/work_assignment.h @@ -146,9 +146,6 @@ class SharedTreeManager { int NumWorkers() const { return num_workers_; } int NumNodes() const ABSL_LOCKS_EXCLUDED(mu_); - // Returns the number of splits each worker should propose this restart. - int SplitsToGeneratePerWorker() const; - // Syncs the state of path with the shared search tree. // Clears `path` and returns false if the assigned subtree is closed or a // restart has invalidated the path. @@ -241,7 +238,7 @@ class SharedTreeWorker { // implications are synced. This is a noop if the search is deeper than the // assigned tree. Returns false if the problem is unsat. bool SyncWithLocalTrail(); - void SyncWithSharedTree(); + bool SyncWithSharedTree(); Literal DecodeDecision(ProtoLiteral literal); std::optional EncodeDecision(Literal decision); bool NextDecision(LiteralIndex* decision_index); @@ -269,6 +266,7 @@ class SharedTreeWorker { IntegerSearchHelper* helper_; SearchHeuristics* heuristics_; RestartPolicy* restart_policy_; + LevelZeroCallbackHelper* level_zero_callbacks_; int64_t num_restarts_ = 0; int64_t num_trees_ = 0; @@ -278,7 +276,11 @@ class SharedTreeWorker { // How many restarts had happened when the current tree was assigned? int64_t tree_assignment_restart_ = -1; - int splits_wanted_ = 1; + // True if the last decision may split the assigned tree and has not yet been + // proposed to the SharedTreeManager. + // We propagate the decision before sharing with the SharedTreeManager so we + // don't share any decision that immediately leads to conflict. + bool new_split_available_ = false; std::vector reason_; // Stores the average LBD of learned clauses for each tree assigned since it diff --git a/ortools/scheduling/CMakeLists.txt b/ortools/scheduling/CMakeLists.txt index 18c5ff241a..9329e2dade 100644 --- a/ortools/scheduling/CMakeLists.txt +++ b/ortools/scheduling/CMakeLists.txt @@ -26,5 +26,5 @@ target_include_directories(${NAME} PRIVATE target_link_libraries(${NAME} PRIVATE absl::strings protobuf::libprotobuf - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::scheduling ALIAS ${NAME}) diff --git a/ortools/util/BUILD.bazel b/ortools/util/BUILD.bazel index 191060df45..b2ee31597e 100644 --- a/ortools/util/BUILD.bazel +++ b/ortools/util/BUILD.bazel @@ -37,16 +37,20 @@ config_setting( ], ) -cc_library( - name = "adaptative_parameter_value", - hdrs = ["adaptative_parameter_value.h"], - deps = ["//ortools/base"], +# OptionalBoolean +proto_library( + name = "optional_boolean_proto", + srcs = ["optional_boolean.proto"], ) -cc_library( - name = "lazy_mutable_copy", - hdrs = ["lazy_mutable_copy.h"], - deps = ["@com_google_absl//absl/memory"], +cc_proto_library( + name = "optional_boolean_cc_proto", + deps = [":optional_boolean_proto"], +) + +py_proto_library( + name = "optional_boolean_py_pb2", + deps = [":optional_boolean_proto"], ) cc_library( @@ -59,19 +63,16 @@ cc_library( ) cc_library( - name = "flat_matrix", - hdrs = ["flat_matrix.h"], + name = "filelineiter", + hdrs = ["filelineiter.h"], deps = [ - "@com_google_absl//absl/types:span", + "//ortools/base", + "//ortools/base:file", + "@com_google_absl//absl/status", + "@com_google_absl//absl/strings", ], ) -cc_library( - name = "random_engine", - hdrs = ["random_engine.h"], - deps = [], -) - cc_library( name = "bitset", srcs = ["bitset.cc"], @@ -111,17 +112,6 @@ cc_library( deps = ["//ortools/base"], ) -#cc_library( -# name = "step_function", -# srcs = ["step_function.cc"], -# hdrs = ["step_function.h"], -# deps = [ -# "@com_google_absl//absl/strings", -# ":iterators", -# "//ortools/base", -# ], -#) - cc_library( name = "saturated_arithmetic", hdrs = ["saturated_arithmetic.h"], @@ -173,16 +163,8 @@ cc_library( cc_library( name = "string_array", hdrs = ["string_array.h"], -) - -cc_library( - name = "string_util", - srcs = ["string_util.cc"], - hdrs = ["string_util.h"], deps = [ - "//ortools/base", "@com_google_absl//absl/strings", - "@com_google_absl//absl/strings:str_format", ], ) @@ -192,6 +174,8 @@ cc_library( deps = [ "//ortools/base", "//ortools/base:hash", + "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/container:flat_hash_set", ], ) @@ -228,6 +212,15 @@ cc_library( ], ) +cc_library( + name = "sigint", + srcs = ["sigint.cc"], + hdrs = ["sigint.h"], + deps = [ + "//ortools/base", + ], +) + cc_library( name = "fp_utils", srcs = ["fp_utils.cc"], @@ -266,26 +259,6 @@ cc_library( deps = ["//ortools/base"], ) -cc_library( - name = "file_util", - srcs = ["file_util.cc"], - hdrs = ["file_util.h"], - deps = [ - "//ortools/base", - "//ortools/base:dump_vars", - "//ortools/base:file", - "//ortools/base:gzipstring", - "//ortools/base:hash", - "//ortools/base:recordio", - "//ortools/base:status_macros", - "@com_google_absl//absl/status", - "@com_google_absl//absl/status:statusor", - "@com_google_absl//absl/strings", - "@com_google_absl//absl/strings:str_format", - "@com_google_protobuf//:protobuf", - ], -) - cc_library( name = "proto_tools", srcs = ["proto_tools.cc"], @@ -322,23 +295,8 @@ py_proto_library( deps = [":int128_proto"], ) -# OptionalBoolean -proto_library( - name = "optional_boolean_proto", - srcs = ["optional_boolean.proto"], -) +# helper library for the swig wrappers. -cc_proto_library( - name = "optional_boolean_cc_proto", - deps = [":optional_boolean_proto"], -) - -py_proto_library( - name = "optional_boolean_py_pb2", - deps = [":optional_boolean_proto"], -) - -# SWIG cc_library( name = "functions_swig_helpers", hdrs = [ @@ -382,15 +340,6 @@ cc_library( ], ) -cc_library( - name = "sigint", - srcs = ["sigint.cc"], - hdrs = ["sigint.h"], - deps = [ - "//ortools/base", - ], -) - cc_library( name = "vector_or_function", hdrs = ["vector_or_function.h"], @@ -399,28 +348,7 @@ cc_library( ], ) -cc_library( - name = "filelineiter", - hdrs = ["filelineiter.h"], - deps = [ - "//ortools/base", - "//ortools/base:file", - "@com_google_absl//absl/status", - "@com_google_absl//absl/strings", - ], -) - -#cc_library( -# name = "bp_parser", -# srcs = ["bp_parser.cc"], -# hdrs = ["bp_parser.h"], -# deps = [ -# "@com_google_absl//absl/strings", -# ":filelineiter", -# "//ortools/base", -# "//ortools/base:file", -# ], -#) +# Parsers and readers. cc_library( name = "qap_reader", @@ -440,6 +368,55 @@ cc_library( ], ) +cc_library( + name = "file_util", + srcs = ["file_util.cc"], + hdrs = ["file_util.h"], + deps = [ + "//ortools/base", + "//ortools/base:dump_vars", + "//ortools/base:file", + "//ortools/base:gzipstring", + "//ortools/base:hash", + "//ortools/base:recordio", + "//ortools/base:status_macros", + "@com_google_absl//absl/status", + "@com_google_absl//absl/status:statusor", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", + "@com_google_protobuf//:protobuf", + ], +) + +cc_library( + name = "random_engine", + hdrs = ["random_engine.h"], + deps = [], +) + +cc_library( + name = "string_util", + srcs = ["string_util.cc"], + hdrs = ["string_util.h"], + deps = [ + "//ortools/base", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", + ], +) + +cc_library( + name = "adaptative_parameter_value", + hdrs = ["adaptative_parameter_value.h"], + deps = ["//ortools/base"], +) + +cc_library( + name = "lazy_mutable_copy", + hdrs = ["lazy_mutable_copy.h"], + deps = ["@com_google_absl//absl/memory"], +) + cc_library( name = "logging", srcs = ["logging.cc"], @@ -488,6 +465,14 @@ cc_library( ], ) +cc_library( + name = "flat_matrix", + hdrs = ["flat_matrix.h"], + deps = [ + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "fp_roundtrip_conv_testing", testonly = 1, @@ -497,6 +482,26 @@ cc_library( ], ) +cc_library( + name = "aligned_memory", + srcs = ["aligned_memory_internal.h"], + hdrs = ["aligned_memory.h"], + deps = [ + "//ortools/base:mathutil", + ], +) + +cc_library( + name = "vector_sum", + srcs = ["vector_sum_internal.h"], + hdrs = ["vector_sum.h"], + deps = [ + ":aligned_memory", + "@com_google_absl//absl/base:core_headers", + "@com_google_absl//absl/types:span", + ], +) + cc_library( name = "parse_proto", srcs = ["parse_proto.cc"], @@ -520,3 +525,12 @@ cc_library( "@com_google_absl//absl/synchronization", ], ) + +cc_library( + name = "dense_set", + hdrs = ["dense_set.h"], + deps = [ + "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", + ], +) diff --git a/ortools/util/CMakeLists.txt b/ortools/util/CMakeLists.txt index fbcf7f6ca0..c2cccbf2e7 100644 --- a/ortools/util/CMakeLists.txt +++ b/ortools/util/CMakeLists.txt @@ -32,5 +32,5 @@ target_link_libraries(${NAME} PRIVATE absl::strings absl::str_format protobuf::libprotobuf - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::util ALIAS ${NAME}) diff --git a/ortools/util/affine_relation.h b/ortools/util/affine_relation.h index 5d9a95629c..a98c404b2d 100644 --- a/ortools/util/affine_relation.h +++ b/ortools/util/affine_relation.h @@ -15,11 +15,11 @@ #define OR_TOOLS_UTIL_AFFINE_RELATION_H_ #include +#include #include #include "ortools/base/iterator_adaptors.h" #include "ortools/base/logging.h" -#include "ortools/base/macros.h" namespace operations_research { diff --git a/ortools/util/aligned_memory_internal.h b/ortools/util/aligned_memory_internal.h index fe50f79781..c0bc044e09 100644 --- a/ortools/util/aligned_memory_internal.h +++ b/ortools/util/aligned_memory_internal.h @@ -15,6 +15,7 @@ #define OR_TOOLS_UTIL_ALIGNED_MEMORY_INTERNAL_H_ #include +#include #include #include diff --git a/ortools/util/bitset.h b/ortools/util/bitset.h index c79de47e31..82299b66d9 100644 --- a/ortools/util/bitset.h +++ b/ortools/util/bitset.h @@ -28,7 +28,6 @@ #include "absl/log/check.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" namespace operations_research { diff --git a/ortools/util/dense_set.h b/ortools/util/dense_set.h new file mode 100644 index 0000000000..8211ac414a --- /dev/null +++ b/ortools/util/dense_set.h @@ -0,0 +1,151 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_UTIL_DENSE_SET_H_ +#define OR_TOOLS_UTIL_DENSE_SET_H_ + +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/types/span.h" + +namespace operations_research { +// A set of dense non-negative integer values stored in a dense vector. +// +// This is useful when we want to iterate over a small subset of the possible +// values and reuse the memory, or if we want to randomly sample from the set. +// +// If the set is usually small but occasionally very large, iterating over a +// regular hash_set would be less efficient as you would (internal to the hash +// table iterator) have have to iterate over all the buckets in the hash +// table even if empty. If you clear the set frequently to avoid this, you would +// grow and rehash when you have a larger set. +// +// If resize=false, users *must* call reserve(K) where K > any key before +// calling any other method. +template +class DenseSet { + public: + using iterator = typename std::vector::const_iterator; + using const_iterator = typename std::vector::const_iterator; + using value_type = T; + static constexpr bool kAutoResize = auto_resize; + + const_iterator begin() const { return values_.begin(); } + const_iterator end() const { return values_.end(); } + size_t size() const { return values_.size(); } + bool empty() const { return values_.empty(); } + void reserve(size_t size) { + values_.reserve(size); + if (size >= positions_.size()) positions_.resize(size, -1); + } + size_t capacity() const { return positions_.size(); } + + std::pair insert(T value) { + const int pos = Position(value); + if (pos == -1) { + DCHECK_GT(positions_.size(), ToInt(value)); + positions_[ToInt(value)] = values_.size(); + values_.push_back(value); + return {values_.begin() + positions_[ToInt(value)], true}; + } + return {values_.begin() + pos, false}; + } + + iterator find(T value) { + const int pos = Position(value); + DCHECK_GT(positions_.size(), ToInt(value)); + if (pos < 0) return values_.end(); + return values_.begin() + pos; + } + + bool contains(T value) const { + if (kAutoResize && ToInt(value) >= positions_.size()) return false; + return positions_[ToInt(value)] >= 0; + } + + void erase(iterator it) { + const T value = *it; + DCHECK_GT(positions_.size(), ToInt(value)); + positions_[ToInt(values_.back())] = it - values_.begin(); + positions_[ToInt(value)] = -1; + // This is a hack to allow erase to work with a const iterator. + values_[it - begin()] = values_.back(); + values_.pop_back(); + } + + int erase(T value) { + const int pos = Position(value); + if (pos < 0) return 0; + DCHECK_GT(positions_.size(), ToInt(value)); + positions_[ToInt(values_.back())] = pos; + values_[pos] = values_.back(); + values_.pop_back(); + positions_[ToInt(value)] = -1; + return 1; + } + + // The ordering is deterministic given the same sequence of inserts and + // erases but is arbitrary and should not be relied upon. + absl::Span values() const { return values_; } + + void clear() { + // We expect values_ to be much smaller than the total number of possible + // values, so just clear entries in the set. + for (const T value : values_) { + DCHECK_GT(positions_.size(), ToInt(value)); + positions_[ToInt(value)] = -1; + } + values_.clear(); + } + + private: + static int ToInt(T); + inline int Position(T value) { + int int_value = ToInt(value); + DCHECK_GE(int_value, 0); + // Automatic Resize increases the CPU time of microbenchmarks by ~30%, but + // even with kAutoResize=true, DenseSet is still 25x faster than a + // flat_hash_set. + if (kAutoResize && int_value >= positions_.size()) { + positions_.resize(ToInt(value) + 1, -1); + } + DCHECK_GT(positions_.size(), int_value); + return positions_[int_value]; + } + std::vector positions_; + std::vector values_; +}; + +// Like DenseSet, but does not automatically resize the internal position +// vector, which is ~30% faster. +template +using UnsafeDenseSet = DenseSet; + +template +inline int DenseSet::ToInt(T value) { + return value.value(); +} +template <> +inline int DenseSet::ToInt(int value) { + return value; +} +template <> +inline int DenseSet::ToInt(int value) { + return value; +} + +} // namespace operations_research +#endif // OR_TOOLS_UTIL_DENSE_SET_H_ diff --git a/ortools/util/file_util.cc b/ortools/util/file_util.cc index d486e4159f..db638ca8f9 100644 --- a/ortools/util/file_util.cc +++ b/ortools/util/file_util.cc @@ -14,6 +14,7 @@ #include "ortools/util/file_util.h" #include +#include #include "absl/log/check.h" #include "absl/status/status.h" @@ -26,6 +27,7 @@ #include "google/protobuf/message.h" #include "google/protobuf/text_format.h" #include "google/protobuf/util/json_util.h" +#include "ortools/base/file.h" #include "ortools/base/gzipstring.h" #include "ortools/base/helpers.h" #include "ortools/base/logging.h" diff --git a/ortools/util/file_util.h b/ortools/util/file_util.h index ba062d7c16..b3862dac82 100644 --- a/ortools/util/file_util.h +++ b/ortools/util/file_util.h @@ -21,7 +21,6 @@ #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "google/protobuf/message.h" -#include "ortools/base/dump_vars.h" #include "ortools/base/file.h" #include "ortools/base/options.h" #include "ortools/base/recordio.h" @@ -43,7 +42,7 @@ absl::Status ReadFileToProto( // boolean doesn't work for JSON inputs. bool allow_partial = false); -// Exaclty like ReadFileToProto(), but directly from the contents. +// Exactly like ReadFileToProto(), but directly from the contents. absl::Status StringToProto(absl::string_view data, google::protobuf::Message* proto, bool allow_partial = false); @@ -53,7 +52,7 @@ absl::StatusOr ReadFileToProto(absl::string_view filename, bool allow_partial = false) { Proto proto; RETURN_IF_ERROR(ReadFileToProto(filename, &proto, allow_partial)) - << DUMP_VARS(filename); + << "filename=" << filename; return proto; } diff --git a/ortools/util/flat_matrix.h b/ortools/util/flat_matrix.h index b9fc1fb7b4..292adfcb86 100644 --- a/ortools/util/flat_matrix.h +++ b/ortools/util/flat_matrix.h @@ -20,6 +20,7 @@ // vector had a fixed size: vector> has much worse performance in a // highly concurrent setting, because it does a lot of memory allocations. +#include #include #include "absl/types/span.h" diff --git a/ortools/util/fp_utils.h b/ortools/util/fp_utils.h index d0a3e20fc1..884b46a514 100644 --- a/ortools/util/fp_utils.h +++ b/ortools/util/fp_utils.h @@ -27,7 +27,8 @@ #include #include #include -#include // must be call before fenv_access see: https://github.com/microsoft/STL/issues/2613 +// Needed before fenv_access. See https://github.com/microsoft/STL/issues/2613. +#include // IWYU pragma:keep. #include #include "absl/log/check.h" diff --git a/ortools/util/functions_swig_test_helpers.h b/ortools/util/functions_swig_test_helpers.h index 878161ef6a..e0ca6d2d30 100644 --- a/ortools/util/functions_swig_test_helpers.h +++ b/ortools/util/functions_swig_test_helpers.h @@ -21,12 +21,11 @@ // simple static methods; because the Java wrapping of the latter made them hard // to find (whereas the class methods are easy to find). +#include #include #include #include -#include "ortools/base/types.h" - namespace operations_research { class FunctionSwigTestHelpers { public: @@ -79,7 +78,7 @@ class FunctionSwigTestHelpers { static void NoOpStringToVoid(std::function fun, std::string x) { - fun(x); + fun(std::move(x)); } }; @@ -87,7 +86,7 @@ class DelayedFunctionSwigTestHelpers { public: explicit DelayedFunctionSwigTestHelpers( std::function fun) - : fun_(fun) {} + : fun_(std::move(fun)) {} int64_t NoOpInt64PairToInt64(int64_t x, int64_t y) { return fun_(x, y); } diff --git a/ortools/util/integer_pq.h b/ortools/util/integer_pq.h index 1f952d3a50..7ae182f372 100644 --- a/ortools/util/integer_pq.h +++ b/ortools/util/integer_pq.h @@ -23,10 +23,10 @@ #ifndef OR_TOOLS_UTIL_INTEGER_PQ_H_ #define OR_TOOLS_UTIL_INTEGER_PQ_H_ +#include #include #include "ortools/base/logging.h" -#include "ortools/base/macros.h" namespace operations_research { diff --git a/ortools/util/logging.cc b/ortools/util/logging.cc index 1689cf7015..afed87a6ca 100644 --- a/ortools/util/logging.cc +++ b/ortools/util/logging.cc @@ -18,6 +18,7 @@ #include #include #include +#include #include "absl/strings/str_cat.h" @@ -27,7 +28,7 @@ SolverLogger::SolverLogger() { timer_.Start(); } void SolverLogger::AddInfoLoggingCallback( std::function callback) { - info_callbacks_.push_back(callback); + info_callbacks_.push_back(std::move(callback)); } void SolverLogger::ClearInfoLoggingCallbacks() { info_callbacks_.clear(); } diff --git a/ortools/util/logging.h b/ortools/util/logging.h index a6ccee1376..57e821ddc2 100644 --- a/ortools/util/logging.h +++ b/ortools/util/logging.h @@ -14,6 +14,7 @@ #ifndef OR_TOOLS_UTIL_LOGGING_H_ #define OR_TOOLS_UTIL_LOGGING_H_ +#include #include #include #include diff --git a/ortools/util/monoid_operation_tree.h b/ortools/util/monoid_operation_tree.h index f2655a71c6..d3c97917e6 100644 --- a/ortools/util/monoid_operation_tree.h +++ b/ortools/util/monoid_operation_tree.h @@ -16,6 +16,7 @@ #include #include +#include #include "absl/strings/str_format.h" #include "ortools/base/logging.h" diff --git a/ortools/util/proto_tools.h b/ortools/util/proto_tools.h index 216b2687ad..692e4209ac 100644 --- a/ortools/util/proto_tools.h +++ b/ortools/util/proto_tools.h @@ -20,6 +20,7 @@ #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" +#include "google/protobuf/descriptor.h" #include "google/protobuf/message.h" namespace operations_research { diff --git a/ortools/util/qap_reader.h b/ortools/util/qap_reader.h index 02145fca6f..654fa5875d 100644 --- a/ortools/util/qap_reader.h +++ b/ortools/util/qap_reader.h @@ -15,7 +15,6 @@ #define OR_TOOLS_UTIL_QAP_READER_H_ #include -#include #include #include "absl/strings/string_view.h" diff --git a/ortools/util/random_engine.h b/ortools/util/random_engine.h index 8a2aa07253..5c3b558966 100644 --- a/ortools/util/random_engine.h +++ b/ortools/util/random_engine.h @@ -16,8 +16,6 @@ #ifndef OR_TOOLS_UTIL_RANDOM_ENGINE_H_ #define OR_TOOLS_UTIL_RANDOM_ENGINE_H_ -#include - namespace operations_research { using random_engine_t = std::mt19937_64; diff --git a/ortools/util/rational_approximation.h b/ortools/util/rational_approximation.h index 8c3a7fd251..9d1208f29d 100644 --- a/ortools/util/rational_approximation.h +++ b/ortools/util/rational_approximation.h @@ -14,10 +14,9 @@ #ifndef OR_TOOLS_UTIL_RATIONAL_APPROXIMATION_H_ #define OR_TOOLS_UTIL_RATIONAL_APPROXIMATION_H_ +#include #include -#include "ortools/base/types.h" - namespace operations_research { // The type Fraction represents a number in the form of two integers: numerator diff --git a/ortools/util/rev.h b/ortools/util/rev.h index e2a7d68a68..5c78d9e0da 100644 --- a/ortools/util/rev.h +++ b/ortools/util/rev.h @@ -15,6 +15,8 @@ #ifndef OR_TOOLS_UTIL_REV_H_ #define OR_TOOLS_UTIL_REV_H_ +#include +#include #include #include "absl/container/flat_hash_map.h" @@ -126,7 +128,7 @@ class RevVector : public ReversibleInterface { private: std::vector end_of_level_; // In stack_. std::vector> stack_; - absl::StrongVector vector_; + util_intops::StrongVector vector_; }; template diff --git a/ortools/util/running_stat.h b/ortools/util/running_stat.h index 37833f1c6d..881c1c2eab 100644 --- a/ortools/util/running_stat.h +++ b/ortools/util/running_stat.h @@ -14,7 +14,9 @@ #ifndef OR_TOOLS_UTIL_RUNNING_STAT_H_ #define OR_TOOLS_UTIL_RUNNING_STAT_H_ +#include #include +#include #include "ortools/base/logging.h" diff --git a/ortools/util/saturated_arithmetic.h b/ortools/util/saturated_arithmetic.h index 8aa780cdbc..e2065ec3cc 100644 --- a/ortools/util/saturated_arithmetic.h +++ b/ortools/util/saturated_arithmetic.h @@ -19,6 +19,7 @@ #include #include "absl/base/casts.h" +#include "absl/log/check.h" #include "ortools/base/types.h" #include "ortools/util/bitset.h" diff --git a/ortools/util/solve_interrupter.h b/ortools/util/solve_interrupter.h index dfb899a1ee..ecdc5d9eb0 100644 --- a/ortools/util/solve_interrupter.h +++ b/ortools/util/solve_interrupter.h @@ -17,11 +17,9 @@ #include #include #include -#include #include #include "absl/base/thread_annotations.h" -#include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "ortools/base/linked_hash_map.h" #include "ortools/base/strong_int.h" diff --git a/ortools/util/sorted_interval_list.cc b/ortools/util/sorted_interval_list.cc index c79f55f1e7..7511e8ec55 100644 --- a/ortools/util/sorted_interval_list.cc +++ b/ortools/util/sorted_interval_list.cc @@ -630,7 +630,7 @@ Domain Domain::SquareSuperset() const { for (const int64_t value : abs_domain.Values()) { values.push_back(CapProd(value, value)); } - return Domain::FromValues(values); + return Domain::FromValues(std::move(values)); } } diff --git a/ortools/util/sorted_interval_list.h b/ortools/util/sorted_interval_list.h index 935e5bbea7..052d550ca1 100644 --- a/ortools/util/sorted_interval_list.h +++ b/ortools/util/sorted_interval_list.h @@ -25,7 +25,6 @@ #include "absl/container/inlined_vector.h" #include "absl/types/span.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" namespace operations_research { diff --git a/ortools/util/stats.h b/ortools/util/stats.h index 7e862648bd..c189bc091c 100644 --- a/ortools/util/stats.h +++ b/ortools/util/stats.h @@ -68,6 +68,7 @@ #ifndef OR_TOOLS_UTIL_STATS_H_ #define OR_TOOLS_UTIL_STATS_H_ +#include #include #include #include diff --git a/ortools/util/strong_integers.h b/ortools/util/strong_integers.h index 0d7c2f9f05..6153487a9c 100644 --- a/ortools/util/strong_integers.h +++ b/ortools/util/strong_integers.h @@ -57,6 +57,8 @@ #include #include // NOLINT +#include "absl/base/attributes.h" +#include "absl/base/port.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" diff --git a/ortools/util/time_limit.h b/ortools/util/time_limit.h index 1bfcc00b2b..aad7bbe71e 100644 --- a/ortools/util/time_limit.h +++ b/ortools/util/time_limit.h @@ -16,15 +16,20 @@ #include #include +#include #include #include #include #include +#include "absl/base/thread_annotations.h" #include "absl/container/flat_hash_map.h" #include "absl/flags/declare.h" +#include "absl/flags/flag.h" +#include "absl/log/check.h" #include "absl/synchronization/mutex.h" #include "absl/time/clock.h" +#include "absl/time/time.h" #include "ortools/base/timer.h" #include "ortools/base/types.h" #include "ortools/util/running_stat.h" diff --git a/ortools/util/tuple_set.h b/ortools/util/tuple_set.h index 14c536d882..cd685a6aa4 100644 --- a/ortools/util/tuple_set.h +++ b/ortools/util/tuple_set.h @@ -34,14 +34,13 @@ #define OR_TOOLS_UTIL_TUPLE_SET_H_ #include +#include #include #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "ortools/base/hash.h" #include "ortools/base/logging.h" -#include "ortools/base/macros.h" -#include "ortools/base/types.h" namespace operations_research { // ----- Main IntTupleSet class ----- diff --git a/ortools/util/vector_or_function.h b/ortools/util/vector_or_function.h index 91c51bbb96..282d1648cd 100644 --- a/ortools/util/vector_or_function.h +++ b/ortools/util/vector_or_function.h @@ -18,7 +18,6 @@ #include #include "ortools/base/logging.h" -#include "ortools/base/types.h" namespace operations_research { diff --git a/ortools/util/vector_sum_internal.h b/ortools/util/vector_sum_internal.h index 1245f11ec8..2b727e7d76 100644 --- a/ortools/util/vector_sum_internal.h +++ b/ortools/util/vector_sum_internal.h @@ -16,10 +16,12 @@ #include #include +#include #include #include #include "absl/base/attributes.h" +#include "absl/base/optimization.h" #include "absl/types/span.h" #include "ortools/util/aligned_memory.h" diff --git a/ortools/util/zvector.h b/ortools/util/zvector.h index ff12c1ed11..ec21520c70 100644 --- a/ortools/util/zvector.h +++ b/ortools/util/zvector.h @@ -15,7 +15,7 @@ #define OR_TOOLS_UTIL_ZVECTOR_H_ #if (defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \ - defined(__GNUC__) + defined(__GNUC__) #include #elif !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__MINGW64__) #include @@ -74,7 +74,7 @@ class ZVector { return base_[index]; } - const T operator[](int64_t index) const { + T operator[](int64_t index) const { DCHECK_LE(min_index_, index); DCHECK_GE(max_index_, index); DCHECK(base_ != nullptr); diff --git a/ortools/xpress/CMakeLists.txt b/ortools/xpress/CMakeLists.txt index 74831afe47..885536a55d 100644 --- a/ortools/xpress/CMakeLists.txt +++ b/ortools/xpress/CMakeLists.txt @@ -31,4 +31,4 @@ target_link_libraries(${NAME} PRIVATE absl::strings absl::str_format protobuf::libprotobuf - ${PROJECT_NAMESPACE}::${PROJECT_NAME}_proto) + ${PROJECT_NAMESPACE}::ortools_proto) diff --git a/patches/abseil-cpp-20240116.2.patch b/patches/abseil-cpp-20240116.2.patch index 894d9df7d4..6f126da41a 100644 --- a/patches/abseil-cpp-20240116.2.patch +++ b/patches/abseil-cpp-20240116.2.patch @@ -89,3 +89,22 @@ index 128cc0e9..11d65d55 100644 ) absl_cc_test( +diff --git a/CMake/AbseilHelpers.cmake b/CMake/AbseilHelpers.cmake +index c53b358..9906382 100644 +--- a/CMake/AbseilHelpers.cmake ++++ b/CMake/AbseilHelpers.cmake +@@ -250,6 +250,14 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") + elseif(_build_type STREQUAL "static" OR _build_type STREQUAL "shared") + add_library(${_NAME} "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) ++ if(APPLE) ++ set_target_properties(${_NAME} PROPERTIES ++ INSTALL_RPATH "@loader_path") ++ elseif(UNIX) ++ set_target_properties(${_NAME} PROPERTIES ++ POSITION_INDEPENDENT_CODE ON ++ INSTALL_RPATH "$ORIGIN") ++ endif() + target_link_libraries(${_NAME} + PUBLIC ${ABSL_CC_LIB_DEPS} + PRIVATE diff --git a/patches/pybind11_abseil.patch b/patches/pybind11_abseil.patch index a94f5368ad..dc970722ca 100644 --- a/patches/pybind11_abseil.patch +++ b/patches/pybind11_abseil.patch @@ -55,7 +55,7 @@ index ceb65a8..e142837 100644 include_directories(${TOP_LEVEL_DIR} ${pybind11_INCLUDE_DIRS}) diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt new file mode 100644 -index 0000000..ffe4d38 +index 0000000..826eda8 --- /dev/null +++ b/cmake/dependencies/CMakeLists.txt @@ -0,0 +1,15 @@ @@ -64,7 +64,7 @@ index 0000000..ffe4d38 +set(ABSL_PROPAGATE_CXX_STD ON) +set(BUILD_TESTING OFF) +FetchContent_Declare( -+ abseil-cpp ++ absl + URL https://github.com/abseil/abseil-cpp/archive/refs/tags/20230802.0.tar.gz + URL_HASH + SHA256=59d2976af9d6ecf001a81a35749a6e551a335b949d34918cfade07737b9d93c5) @@ -73,9 +73,9 @@ index 0000000..ffe4d38 + pybind11 + URL https://github.com/pybind/pybind11/archive/refs/heads/master.tar.gz) + -+FetchContent_MakeAvailable(abseil-cpp pybind11) ++FetchContent_MakeAvailable(absl pybind11) diff --git a/pybind11_abseil/BUILD b/pybind11_abseil/BUILD -index 4cff8b7..33e614a 100644 +index 791c245..33e614a 100644 --- a/pybind11_abseil/BUILD +++ b/pybind11_abseil/BUILD @@ -25,43 +25,39 @@ pybind_library( @@ -90,7 +90,7 @@ index 4cff8b7..33e614a 100644 visibility = ["//visibility:public"], deps = [ "@com_google_absl//absl/status", -- "@local_config_python//:python_headers", # buildcleaner: keep +- "@rules_python//python/cc:current_py_cc_headers", # buildcleaner: keep ], ) @@ -101,7 +101,7 @@ index 4cff8b7..33e614a 100644 visibility = ["//visibility:private"], deps = [ ":ok_status_singleton_lib", -- "@local_config_python//:python_headers", # buildcleaner: keep +- "@rules_python//python/cc:current_py_cc_headers", # buildcleaner: keep ], ) @@ -113,7 +113,7 @@ index 4cff8b7..33e614a 100644 - linkshared = 1, deps = [ ":ok_status_singleton_pyinit_google3", -- "@local_config_python//:python_headers", # buildcleaner: keep +- "@rules_python//python/cc:current_py_cc_headers", # buildcleaner: keep ], ) @@ -149,7 +149,7 @@ index 4cff8b7..33e614a 100644 - linkshared = 1, deps = [ ":status_pyinit_google3", -- "@local_config_python//:python_headers", # buildcleaner: keep +- "@rules_python//python/cc:current_py_cc_headers", # buildcleaner: keep ], ) diff --git a/patches/pybind11_bazel.patch b/patches/pybind11_bazel.patch index 328ee6b501..c570bea7a5 100644 --- a/patches/pybind11_bazel.patch +++ b/patches/pybind11_bazel.patch @@ -1,8 +1,17 @@ diff --git a/build_defs.bzl b/build_defs.bzl -index cde1e93..993b538 100644 +index 503ce33..e233bb0 100644 --- a/build_defs.bzl +++ b/build_defs.bzl -@@ -35,12 +35,19 @@ def pybind_extension( +@@ -5,8 +5,6 @@ + + """Build rules for pybind11.""" + +-load("@bazel_skylib//rules:copy_file.bzl", "copy_file") +- + def register_extension_info(**kwargs): + pass + +@@ -42,12 +40,19 @@ def pybind_extension( linkopts = [], tags = [], deps = [], @@ -20,12 +29,17 @@ index cde1e93..993b538 100644 + }), + visibility = visibility, copts = copts + PYBIND_COPTS + select({ - "@pybind11//:msvc_compiler": [], - "//conditions:default": [ -@@ -59,6 +66,45 @@ def pybind_extension( + Label("@pybind11//:msvc_compiler"): [], + "//conditions:default": ["-fvisibility=hidden"], +@@ -64,19 +69,42 @@ def pybind_extension( **kwargs ) +- copy_file( +- name = name + "_copy_so_to_pyd", +- src = name + ".so", +- out = name + ".pyd", +- testonly = kwargs.get("testonly") + native.cc_binary( + name = name + ".dll", + target_compatible_with = select({ @@ -52,19 +66,20 @@ index cde1e93..993b538 100644 + outs = [name + ".pyd"], + cmd = "cp $< $@", + visibility = visibility, -+ ) -+ + ) + +- native.alias( + native.py_library( -+ name = name, + name = name, +- actual = select({ +- "@platforms//os:windows": name + ".pyd", +- "//conditions:default": name + ".so", + data = select({ + "@platforms//os:windows": [":" + name + ".pyd"], + "//conditions:default": [":" + name + ".so"], -+ }), + }), + deps = py_deps, + visibility = visibility, -+ ) -+ -+ + ) + # Builds a pybind11 compatible library. This can be linked to a pybind_extension. - def pybind_library( - name, diff --git a/patches/pybind11_protobuf.patch b/patches/pybind11_protobuf.patch index de4d64948d..d16952621c 100644 --- a/patches/pybind11_protobuf.patch +++ b/patches/pybind11_protobuf.patch @@ -1,8 +1,8 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 46388d1..a035b25 100644 +index 2139dc0..1942ad0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -21,7 +21,7 @@ option(BUILD_TESTS "Build tests." OFF) +@@ -27,7 +27,7 @@ include(CTest) # ============================================================================ # Find Python @@ -11,7 +11,7 @@ index 46388d1..a035b25 100644 # ============================================================================ # Build dependencies -@@ -50,8 +50,10 @@ pybind11_add_module( +@@ -87,8 +87,10 @@ pybind11_add_module( pybind11_protobuf/proto_utils.h) target_link_libraries( @@ -24,16 +24,7 @@ index 46388d1..a035b25 100644 target_include_directories( pybind11_proto_utils PRIVATE ${PROJECT_SOURCE_DIR} ${protobuf_INCLUDE_DIRS} -@@ -60,7 +62,7 @@ target_include_directories( - # ============================================================================ - # pybind11_native_proto_caster shared library - add_library( -- pybind11_native_proto_caster SHARED -+ pybind11_native_proto_caster STATIC - # bazel: pybind_library: native_proto_caster - pybind11_protobuf/native_proto_caster.h - # bazel: pybind_library: enum_type_caster -@@ -82,10 +84,12 @@ target_link_libraries( +@@ -116,10 +118,11 @@ target_link_libraries( absl::optional protobuf::libprotobuf pybind11::pybind11 @@ -42,12 +33,11 @@ index 46388d1..a035b25 100644 target_include_directories( pybind11_native_proto_caster -+ PUBLIC -+ $ ++ PUBLIC $ PRIVATE ${PROJECT_SOURCE_DIR} ${protobuf_INCLUDE_DIRS} ${protobuf_SOURCE_DIR} ${pybind11_INCLUDE_DIRS}) -@@ -112,7 +116,7 @@ target_link_libraries( +@@ -143,7 +146,7 @@ target_link_libraries( absl::optional protobuf::libprotobuf pybind11::pybind11 @@ -56,38 +46,3 @@ index 46388d1..a035b25 100644 target_include_directories( pybind11_wrapped_proto_caster -diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt -index 111b34f..f78e946 100644 ---- a/cmake/dependencies/CMakeLists.txt -+++ b/cmake/dependencies/CMakeLists.txt -@@ -7,7 +7,7 @@ if(NOT absl_FOUND) - set(ABSL_PROPAGATE_CXX_STD ON) - set(ABSL_ENABLE_INSTALL ON) - FetchContent_Declare( -- absl -+ abseil-cpp - GIT_REPOSITORY ${_absl_repository} - GIT_TAG ${_absl_tag}) - endif() -@@ -35,17 +35,17 @@ if(NOT pybind11_FOUND) - endif() - - # ============================================================================ --# Make dependencies avaialble -+# Make dependencies available - --if(NOT absl_FOUND) -+if(NOT abseil-cpp_FOUND) - message(CHECK_START "Fetching Abseil-cpp") - list(APPEND CMAKE_MESSAGE_INDENT " ") -- FetchContent_MakeAvailable(absl) -+ FetchContent_MakeAvailable(abseil-cpp) - list(POP_BACK CMAKE_MESSAGE_INDENT) - message(CHECK_PASS "fetched") - endif() - --if(NOT Protobuf_FOUND) -+if(NOT protobuf_FOUND) - message(CHECK_START "Fetching Protobuf") - list(APPEND CMAKE_MESSAGE_INDENT " ") - FetchContent_MakeAvailable(Protobuf)