From 6467dc69b09e4cebe8834e33702208d165b422b5 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 28 Oct 2020 13:42:36 +0100 Subject: [PATCH] reformat the code; [CP-SAT] add dominated columns presolve --- makefiles/Makefile.gen.mk | 13 +- ortools/algorithms/dynamic_partition.cc | 31 +- ortools/algorithms/dynamic_permutation.cc | 6 +- ortools/algorithms/find_graph_symmetries.cc | 94 +- ortools/algorithms/find_graph_symmetries.h | 47 +- ortools/algorithms/hungarian.cc | 48 +- ortools/algorithms/knapsack_solver.cc | 242 ++-- .../algorithms/knapsack_solver_for_cuts.cc | 68 +- ortools/algorithms/sparse_permutation.cc | 6 +- ortools/base/bitmap.cc | 2 +- ortools/base/file.cc | 102 +- ortools/base/jniutil.h | 19 +- ortools/base/random.cc | 2 +- ortools/base/recordio.cc | 22 +- ortools/base/stl_util.h | 226 ++-- ortools/base/sysinfo.cc | 10 +- ortools/base/threadpool.cc | 6 +- ortools/base/timer.cc | 2 +- ortools/bop/bop_base.cc | 10 +- ortools/bop/bop_fs.cc | 48 +- ortools/bop/bop_lns.cc | 64 +- ortools/bop/bop_ls.cc | 87 +- ortools/bop/bop_portfolio.cc | 62 +- ortools/bop/bop_solution.cc | 10 +- ortools/bop/bop_solver.cc | 16 +- ortools/bop/bop_util.cc | 16 +- ortools/bop/complete_optimizer.cc | 12 +- ortools/bop/integral_solver.cc | 188 +-- ortools/data/jobshop_scheduling_parser.cc | 62 +- ortools/data/rcpsp_parser.cc | 50 +- ortools/data/set_covering_parser.cc | 18 +- ortools/flatzinc/checker.cc | 427 ++++--- ortools/flatzinc/cp_model_fz_solver.cc | 208 ++-- ortools/flatzinc/fz.cc | 12 +- ortools/flatzinc/model.cc | 96 +- ortools/flatzinc/parser.cc | 30 +- ortools/flatzinc/parser.tab.cc | 248 ++-- ortools/flatzinc/parser.yy.cc | 308 +++-- ortools/flatzinc/parser_main.cc | 4 +- ortools/flatzinc/parser_util.cc | 16 +- ortools/flatzinc/presolve.cc | 126 +- ortools/glop/basis_representation.cc | 55 +- ortools/glop/dual_edge_norms.cc | 18 +- ortools/glop/entering_variable.cc | 58 +- ortools/glop/initial_basis.cc | 30 +- ortools/glop/lp_solver.cc | 78 +- ortools/glop/lu_factorization.cc | 69 +- ortools/glop/markowitz.cc | 78 +- ortools/glop/preprocessor.cc | 206 ++-- ortools/glop/primal_edge_norms.cc | 26 +- ortools/glop/rank_one_update.h | 36 +- ortools/glop/reduced_costs.cc | 52 +- ortools/glop/revised_simplex.cc | 144 ++- ortools/glop/revised_simplex.h | 101 +- ortools/glop/update_row.cc | 24 +- ortools/glop/variable_values.cc | 30 +- ortools/glop/variables_info.cc | 22 +- ortools/graph/bellman_ford.cc | 10 +- ortools/graph/christofides.h | 30 +- ortools/graph/cliques.cc | 27 +- ortools/graph/cliques.h | 47 +- ortools/graph/connected_components.cc | 7 +- ortools/graph/connected_components.h | 32 +- ortools/graph/dijkstra.cc | 20 +- ortools/graph/ebert_graph.h | 116 +- ortools/graph/graph.h | 194 +-- ortools/graph/hamiltonian_path.h | 34 +- ortools/graph/io.h | 28 +- ortools/graph/iterators.h | 35 +- ortools/graph/linear_assignment.cc | 2 +- ortools/graph/linear_assignment.h | 48 +- ortools/graph/max_flow.cc | 26 +- ortools/graph/max_flow.h | 29 +- ortools/graph/min_cost_flow.cc | 23 +- ortools/graph/min_cost_flow.h | 17 +- ortools/graph/perfect_matching.cc | 114 +- ortools/graph/perfect_matching.h | 52 +- ortools/graph/shortestpaths.cc | 2 +- ortools/graph/topologicalsorter.cc | 20 +- ortools/graph/topologicalsorter.h | 122 +- ortools/graph/util.cc | 2 +- ortools/graph/util.h | 74 +- ortools/gscip/gscip.cc | 359 +++--- ortools/gscip/gscip_ext.cc | 46 +- ortools/gscip/gscip_parameters.cc | 30 +- ortools/gscip/legacy_scip_params.cc | 4 +- ortools/linear_solver/bop_interface.cc | 58 +- ortools/linear_solver/cbc_interface.cc | 44 +- ortools/linear_solver/clp_interface.cc | 76 +- ortools/linear_solver/glop_interface.cc | 70 +- ortools/linear_solver/glpk_interface.cc | 96 +- ortools/linear_solver/gurobi_environment.cc | 2 +- ortools/linear_solver/gurobi_environment.h | 2 +- ortools/linear_solver/gurobi_interface.cc | 222 ++-- ortools/linear_solver/gurobi_proto_solver.cc | 205 +-- ortools/linear_solver/gurobi_proto_solver.h | 15 +- ortools/linear_solver/linear_expr.cc | 46 +- ortools/linear_solver/linear_solver.cc | 252 ++-- ortools/linear_solver/linear_solver.h | 212 ++-- ortools/linear_solver/linear_solver.proto | 4 +- .../linear_solver/linear_solver_callback.cc | 14 +- .../linear_solver/linear_solver_callback.h | 25 +- ortools/linear_solver/model_exporter.cc | 210 ++-- ortools/linear_solver/model_validator.cc | 108 +- .../linear_solver/samples/bin_packing_mip.cc | 3 +- ortools/linear_solver/sat_interface.cc | 52 +- ortools/linear_solver/sat_proto_solver.cc | 8 +- ortools/linear_solver/sat_solver_utils.cc | 8 +- ortools/linear_solver/scip_callback.cc | 84 +- ortools/linear_solver/scip_interface.cc | 200 +-- ortools/linear_solver/scip_proto_solver.cc | 301 +++-- ortools/linear_solver/scip_proto_solver.h | 4 +- ortools/lp_data/lp_data.cc | 104 +- ortools/lp_data/lp_data.h | 93 +- ortools/lp_data/lp_data_utils.cc | 33 +- ortools/lp_data/lp_decomposer.cc | 26 +- ortools/lp_data/lp_print_utils.cc | 2 +- ortools/lp_data/lp_types.h | 62 +- ortools/lp_data/lp_utils.cc | 38 +- ortools/lp_data/lp_utils.h | 100 +- ortools/lp_data/matrix_scaler.cc | 32 +- ortools/lp_data/matrix_utils.cc | 22 +- ortools/lp_data/model_reader.cc | 8 +- ortools/lp_data/mps_reader.cc | 172 +-- ortools/lp_data/proto_utils.cc | 16 +- ortools/lp_data/sparse.cc | 144 +-- ortools/lp_data/sparse_column.cc | 4 +- ortools/lp_data/sparse_vector.h | 125 +- ortools/port/file_nonport.cc | 10 +- ortools/sat/BUILD | 18 + ortools/sat/all_different.cc | 60 +- ortools/sat/boolean_problem.cc | 129 +- ortools/sat/circuit.cc | 70 +- ortools/sat/clause.cc | 199 ++- ortools/sat/clause.h | 152 ++- ortools/sat/cp_constraints.cc | 10 +- ortools/sat/cp_model.cc | 232 ++-- ortools/sat/cp_model_checker.cc | 190 +-- ortools/sat/cp_model_expand.cc | 204 +-- ortools/sat/cp_model_lns.cc | 95 +- ortools/sat/cp_model_loader.cc | 280 ++--- ortools/sat/cp_model_objective.cc | 6 +- ortools/sat/cp_model_postsolve.cc | 20 +- ortools/sat/cp_model_presolve.cc | 596 ++++----- ortools/sat/cp_model_presolve.h | 98 +- ortools/sat/cp_model_search.cc | 46 +- ortools/sat/cp_model_solver.cc | 455 ++++--- ortools/sat/cp_model_symmetries.cc | 24 +- ortools/sat/cp_model_utils.cc | 38 +- ortools/sat/cumulative.cc | 42 +- ortools/sat/cumulative_energy.cc | 26 +- ortools/sat/cuts.cc | 297 ++--- ortools/sat/diffn.cc | 36 +- ortools/sat/disjunctive.cc | 104 +- ortools/sat/drat_checker.cc | 56 +- ortools/sat/drat_proof_handler.cc | 7 +- ortools/sat/encoding.cc | 110 +- ortools/sat/encoding.h | 79 +- ortools/sat/feasibility_pump.cc | 34 +- ortools/sat/feasibility_pump.h | 25 +- ortools/sat/implied_bounds.cc | 6 +- ortools/sat/integer.cc | 172 +-- ortools/sat/integer.h | 223 ++-- ortools/sat/integer_expr.cc | 84 +- ortools/sat/integer_search.cc | 190 ++- ortools/sat/integer_search.h | 60 +- ortools/sat/intervals.cc | 42 +- ortools/sat/intervals.h | 119 +- ortools/sat/linear_constraint.cc | 54 +- ortools/sat/linear_constraint_manager.cc | 32 +- ortools/sat/linear_programming_constraint.cc | 188 ++- ortools/sat/linear_programming_constraint.h | 114 +- ortools/sat/linear_relaxation.cc | 148 +-- ortools/sat/lp_utils.cc | 142 +-- ortools/sat/optimization.cc | 150 +-- ortools/sat/optimization.h | 65 +- ortools/sat/pb_constraint.cc | 114 +- ortools/sat/precedences.cc | 95 +- ortools/sat/precedences.h | 180 +-- ortools/sat/presolve_context.cc | 106 +- ortools/sat/presolve_context.h | 72 +- ortools/sat/presolve_util.cc | 24 +- ortools/sat/probing.cc | 65 +- ortools/sat/pseudo_costs.cc | 12 +- ortools/sat/restart.cc | 4 +- ortools/sat/rins.cc | 30 +- ortools/sat/rins.h | 19 +- ortools/sat/samples/multiple_knapsack_sat.cc | 2 +- .../stop_after_n_solutions_sample_sat.cc | 1 + ortools/sat/sat_decision.cc | 12 +- ortools/sat/sat_inprocessing.cc | 38 +- ortools/sat/sat_parameters.proto | 24 +- ortools/sat/sat_solver.cc | 147 ++- ortools/sat/scheduling_constraints.cc | 44 +- ortools/sat/simplification.cc | 106 +- ortools/sat/subsolver.cc | 20 +- ortools/sat/subsolver.h | 9 +- ortools/sat/symmetry.cc | 34 +- ortools/sat/synchronization.cc | 80 +- ortools/sat/table.cc | 118 +- ortools/sat/theta_tree.cc | 10 +- ortools/sat/timetable.cc | 16 +- ortools/sat/timetable_edgefinding.cc | 14 +- ortools/sat/util.cc | 10 +- ortools/sat/var_domination.cc | 1094 +++++++++++++++++ ortools/sat/var_domination.h | 263 ++++ ortools/sat/zero_half_cuts.cc | 30 +- ortools/util/file_util.cc | 4 +- ortools/util/fp_utils.cc | 44 +- ortools/util/graph_export.cc | 60 +- ortools/util/piecewise_linear_function.cc | 72 +- ortools/util/proto_tools.cc | 14 +- ortools/util/range_query_function.cc | 26 +- ortools/util/saturated_arithmetic.h | 42 +- ortools/util/sigint.cc | 2 +- ortools/util/sorted_interval_list.cc | 118 +- ortools/util/sorted_interval_list.h | 82 +- ortools/util/stats.cc | 20 +- ortools/util/time_limit.cc | 8 +- ortools/util/time_limit.h | 45 +- 220 files changed, 9673 insertions(+), 8137 deletions(-) create mode 100644 ortools/sat/var_domination.cc create mode 100644 ortools/sat/var_domination.h diff --git a/makefiles/Makefile.gen.mk b/makefiles/Makefile.gen.mk index 9d83399d61..1d9e8b08f4 100644 --- a/makefiles/Makefile.gen.mk +++ b/makefiles/Makefile.gen.mk @@ -1283,6 +1283,7 @@ SAT_DEPS = \ $(SRC_DIR)/ortools/sat/theta_tree.h \ $(SRC_DIR)/ortools/sat/timetable_edgefinding.h \ $(SRC_DIR)/ortools/sat/timetable.h \ + $(SRC_DIR)/ortools/sat/var_domination.h \ $(SRC_DIR)/ortools/sat/util.h \ $(SRC_DIR)/ortools/sat/zero_half_cuts.h \ $(GEN_DIR)/ortools/sat/boolean_problem.pb.h \ @@ -1349,6 +1350,7 @@ SAT_LIB_OBJS = \ $(OBJ_DIR)/sat/timetable.$O \ $(OBJ_DIR)/sat/timetable_edgefinding.$O \ $(OBJ_DIR)/sat/util.$O \ + $(OBJ_DIR)/sat/var_domination.$O \ $(OBJ_DIR)/sat/zero_half_cuts.$O \ $(OBJ_DIR)/sat/boolean_problem.pb.$O \ $(OBJ_DIR)/sat/cp_model.pb.$O \ @@ -2671,6 +2673,16 @@ objs/sat/util.$O: ortools/sat/util.cc ortools/sat/util.h \ ortools/base/stl_util.h | $(OBJ_DIR)/sat $(CCC) $(CFLAGS) -c $(SRC_DIR)$Sortools$Ssat$Sutil.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Sutil.$O +objs/sat/var_domination.$O: ortools/sat/var_domination.cc ortools/sat/var_domination.h \ + ortools/algorithms/sparse_permutation.h ortools/base/logging.h \ + ortools/base/commandlineflags.h ortools/base/integral_types.h \ + ortools/base/log_severity.h ortools/base/macros.h \ + ortools/base/vlog_is_on.h ortools/base/int_type_indexed_vector.h \ + ortools/base/int_type.h ortools/sat/sat_base.h ortools/sat/model.h \ + ortools/base/map_util.h ortools/base/typeid.h ortools/util/bitset.h \ + ortools/util/stats.h ortools/base/timer.h ortools/base/basictypes.h | $(OBJ_DIR)/sat + $(CCC) $(CFLAGS) -c $(SRC_DIR)$Sortools$Ssat$Svar_domination.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Svar_domination.$O + objs/sat/zero_half_cuts.$O: ortools/sat/zero_half_cuts.cc \ ortools/sat/zero_half_cuts.h ortools/lp_data/lp_types.h \ ortools/base/basictypes.h ortools/base/integral_types.h \ @@ -4784,4 +4796,3 @@ $(GEN_DIR)/ortools/gscip/gscip.pb.h: \ $(OBJ_DIR)/gscip/gscip.pb.$O: \ $(GEN_DIR)/ortools/gscip/gscip.pb.cc | $(OBJ_DIR)/gscip $(CCC) $(CFLAGS) -c $(GEN_PATH)$Sortools$Sgscip$Sgscip.pb.cc $(OBJ_OUT)$(OBJ_DIR)$Sgscip$Sgscip.pb.$O - diff --git a/ortools/algorithms/dynamic_partition.cc b/ortools/algorithms/dynamic_partition.cc index 5519862dd6..2886ca8b54 100644 --- a/ortools/algorithms/dynamic_partition.cc +++ b/ortools/algorithms/dynamic_partition.cc @@ -23,7 +23,7 @@ namespace operations_research { namespace { uint64 FprintOfInt32(int i) { - return util_hash::MurmurHash64(reinterpret_cast(&i), + return util_hash::MurmurHash64(reinterpret_cast(&i), sizeof(int)); } } // namespace @@ -40,11 +40,12 @@ DynamicPartition::DynamicPartition(int num_elements) { uint64 fprint = 0; for (int i = 0; i < num_elements; ++i) fprint ^= FprintOfInt32(i); part_.push_back(Part(/*start_index=*/0, /*end_index=*/num_elements, - /*parent_part=*/0, /*fprint=*/fprint)); + /*parent_part=*/0, + /*fprint=*/fprint)); } DynamicPartition::DynamicPartition( - const std::vector &initial_part_of_element) { + const std::vector& initial_part_of_element) { if (initial_part_of_element.empty()) return; part_of_ = initial_part_of_element; const int n = part_of_.size(); @@ -72,11 +73,11 @@ DynamicPartition::DynamicPartition( // Now that we have the correct start indices, we set the end indices to the // start indices, and incrementally add all elements to their part, adjusting // the end indices as we go. - for (Part &part : part_) part.end_index = part.start_index; + for (Part& part : part_) part.end_index = part.start_index; element_.assign(n, -1); index_of_.assign(n, -1); for (int element = 0; element < n; ++element) { - Part *const part = &part_[part_of_[element]]; + Part* const part = &part_[part_of_[element]]; element_[part->end_index] = element; index_of_[element] = part->end_index; ++part->end_index; @@ -92,7 +93,7 @@ DynamicPartition::DynamicPartition( } } -void DynamicPartition::Refine(const std::vector &distinguished_subset) { +void DynamicPartition::Refine(const std::vector& distinguished_subset) { // tmp_counter_of_part_[i] will contain the number of // elements in distinguished_subset that were part of part #i. tmp_counter_of_part_.resize(NumParts(), 0); @@ -162,7 +163,7 @@ void DynamicPartition::UndoRefineUntilNumPartsEqual(int original_num_parts) { DCHECK_GE(original_num_parts, 1); while (NumParts() > original_num_parts) { const int part_index = NumParts() - 1; - const Part &part = part_[part_index]; + const Part& part = part_[part_index]; const int parent_part_index = part.parent_part; DCHECK_LT(parent_part_index, part_index) << "UndoRefineUntilNumPartsEqual()" " called with " @@ -172,7 +173,7 @@ void DynamicPartition::UndoRefineUntilNumPartsEqual(int original_num_parts) { for (const int element : ElementsInPart(part_index)) { part_of_[element] = parent_part_index; } - Part *const parent_part = &part_[parent_part_index]; + Part* const parent_part = &part_[parent_part_index]; DCHECK_EQ(part.start_index, parent_part->end_index); parent_part->end_index = part.end_index; parent_part->fprint ^= part.fprint; @@ -184,7 +185,7 @@ std::string DynamicPartition::DebugString(DebugStringSorting sorting) const { if (sorting != SORT_LEXICOGRAPHICALLY && sorting != SORT_BY_PART) { return absl::StrFormat("Unsupported sorting: %d", sorting); } - std::vector > parts; + std::vector> parts; for (int i = 0; i < NumParts(); ++i) { IterablePart iterable_part = ElementsInPart(i); parts.emplace_back(iterable_part.begin(), iterable_part.end()); @@ -194,7 +195,7 @@ std::string DynamicPartition::DebugString(DebugStringSorting sorting) const { std::sort(parts.begin(), parts.end()); } std::string out; - for (const std::vector &part : parts) { + for (const std::vector& part : parts) { if (!out.empty()) out += " | "; out += absl::StrJoin(part, " "); } @@ -241,7 +242,7 @@ int MergingPartition::GetRootAndCompressPath(int node) { return root; } -void MergingPartition::KeepOnlyOneNodePerPart(std::vector *nodes) { +void MergingPartition::KeepOnlyOneNodePerPart(std::vector* nodes) { int num_nodes_kept = 0; for (const int node : *nodes) { const int representative = GetRootAndCompressPath(node); @@ -258,7 +259,7 @@ void MergingPartition::KeepOnlyOneNodePerPart(std::vector *nodes) { } int MergingPartition::FillEquivalenceClasses( - std::vector *node_equivalence_classes) { + std::vector* node_equivalence_classes) { node_equivalence_classes->assign(NumNodes(), -1); int num_roots = 0; for (int node = 0; node < NumNodes(); ++node) { @@ -273,17 +274,17 @@ int MergingPartition::FillEquivalenceClasses( } std::string MergingPartition::DebugString() { - std::vector > sorted_parts(NumNodes()); + std::vector> sorted_parts(NumNodes()); for (int i = 0; i < NumNodes(); ++i) { sorted_parts[GetRootAndCompressPath(i)].push_back(i); } - for (std::vector &part : sorted_parts) + for (std::vector& part : sorted_parts) std::sort(part.begin(), part.end()); std::sort(sorted_parts.begin(), sorted_parts.end()); // Note: typically, a lot of elements of "sorted_parts" will be empty, // but these won't be visible in the string that we construct below. std::string out; - for (const std::vector &part : sorted_parts) { + for (const std::vector& part : sorted_parts) { if (!out.empty()) out += " | "; out += absl::StrJoin(part, " "); } diff --git a/ortools/algorithms/dynamic_permutation.cc b/ortools/algorithms/dynamic_permutation.cc index 2022327eab..afcceb525a 100644 --- a/ortools/algorithms/dynamic_permutation.cc +++ b/ortools/algorithms/dynamic_permutation.cc @@ -24,8 +24,8 @@ DynamicPermutation::DynamicPermutation(int n) for (int i = 0; i < Size(); ++i) image_[i] = ancestor_[i] = i; } -void DynamicPermutation::AddMappings(const std::vector &src, - const std::vector &dst) { +void DynamicPermutation::AddMappings(const std::vector& src, + const std::vector& dst) { DCHECK_EQ(src.size(), dst.size()); mapping_src_size_stack_.push_back(mapping_src_stack_.size()); mapping_src_stack_.reserve(mapping_src_stack_.size() + src.size()); @@ -47,7 +47,7 @@ void DynamicPermutation::AddMappings(const std::vector &src, } void DynamicPermutation::UndoLastMappings( - std::vector *undone_mapping_src) { + std::vector* undone_mapping_src) { DCHECK(undone_mapping_src != nullptr); undone_mapping_src->clear(); if (mapping_src_size_stack_.empty()) return; // Nothing to undo. diff --git a/ortools/algorithms/find_graph_symmetries.cc b/ortools/algorithms/find_graph_symmetries.cc index c086e4fec9..ac1b520828 100644 --- a/ortools/algorithms/find_graph_symmetries.cc +++ b/ortools/algorithms/find_graph_symmetries.cc @@ -43,13 +43,13 @@ using util::GraphIsSymmetric; namespace { // Some routines used below. -void SwapFrontAndBack(std::vector *v) { +void SwapFrontAndBack(std::vector* v) { DCHECK(!v->empty()); std::swap((*v)[0], v->back()); } -bool PartitionsAreCompatibleAfterPartIndex(const DynamicPartition &p1, - const DynamicPartition &p2, +bool PartitionsAreCompatibleAfterPartIndex(const DynamicPartition& p1, + const DynamicPartition& p2, int part_index) { const int num_parts = p1.NumParts(); if (p2.NumParts() != num_parts) return false; @@ -71,9 +71,9 @@ bool PartitionsAreCompatibleAfterPartIndex(const DynamicPartition &p1, // be repeated in the list), and see if that's sufficient to make the whole // graph symmetry finder support multi-arcs. template -bool ListMapsToList(const List &l1, const List &l2, - const DynamicPermutation &permutation, - std::vector *tmp_node_mask) { +bool ListMapsToList(const List& l1, const List& l2, + const DynamicPermutation& permutation, + std::vector* tmp_node_mask) { int num_elements_delta = 0; bool match = true; for (const int mapped_x : l2) { @@ -98,7 +98,7 @@ bool ListMapsToList(const List &l1, const List &l2, } } // namespace -GraphSymmetryFinder::GraphSymmetryFinder(const Graph &graph, bool is_undirected) +GraphSymmetryFinder::GraphSymmetryFinder(const Graph& graph, bool is_undirected) : graph_(graph), tmp_dynamic_permutation_(NumNodes()), tmp_node_mask_(NumNodes(), false), @@ -146,7 +146,7 @@ GraphSymmetryFinder::GraphSymmetryFinder(const Graph &graph, bool is_undirected) } bool GraphSymmetryFinder::IsGraphAutomorphism( - const DynamicPermutation &permutation) const { + const DynamicPermutation& permutation) const { for (const int base : permutation.AllMappingsSrc()) { const int image = permutation.ImageOf(base); if (image == base) continue; @@ -175,10 +175,10 @@ namespace { // Specialized subroutine, to avoid code duplication: see its call site // and its self-explanatory code. template -inline void IncrementCounterForNonSingletons(const T &nodes, - const DynamicPartition &partition, - std::vector *node_count, - std::vector *nodes_seen) { +inline void IncrementCounterForNonSingletons(const T& nodes, + const DynamicPartition& partition, + std::vector* node_count, + std::vector* nodes_seen) { for (const int node : nodes) { if (partition.ElementsInSamePartAs(node).size() == 1) continue; const int count = ++(*node_count)[node]; @@ -188,9 +188,9 @@ inline void IncrementCounterForNonSingletons(const T &nodes, } // namespace void GraphSymmetryFinder::RecursivelyRefinePartitionByAdjacency( - int first_unrefined_part_index, DynamicPartition *partition) { + int first_unrefined_part_index, DynamicPartition* partition) { // Rename, for readability of the code below. - std::vector &tmp_nodes_with_nonzero_degree = tmp_stack_; + std::vector& tmp_nodes_with_nonzero_degree = tmp_stack_; // Assuming that the partition was refined based on the adjacency on // parts [0 .. first_unrefined_part_index) already, we simply need to @@ -246,7 +246,7 @@ void GraphSymmetryFinder::RecursivelyRefinePartitionByAdjacency( } void GraphSymmetryFinder::DistinguishNodeInPartition( - int node, DynamicPartition *partition, std::vector *new_singletons) { + int node, DynamicPartition* partition, std::vector* new_singletons) { const int original_num_parts = partition->NumParts(); partition->Refine(std::vector(1, node)); RecursivelyRefinePartitionByAdjacency(partition->PartOf(node), partition); @@ -276,8 +276,8 @@ void GraphSymmetryFinder::DistinguishNodeInPartition( namespace { void MergeNodeEquivalenceClassesAccordingToPermutation( - const SparsePermutation &perm, MergingPartition *node_equivalence_classes, - DenseDoublyLinkedList *sorted_representatives) { + const SparsePermutation& perm, MergingPartition* node_equivalence_classes, + DenseDoublyLinkedList* sorted_representatives) { for (int c = 0; c < perm.NumCycles(); ++c) { // TODO(user): use the global element->image iterator when it exists. int prev = -1; @@ -307,10 +307,10 @@ void MergeNodeEquivalenceClassesAccordingToPermutation( // representatives of the nodes of the targeted part are contiguous in that // linked list. void GetAllOtherRepresentativesInSamePartAs( - int representative_node, const DynamicPartition &partition, - const DenseDoublyLinkedList &representatives_sorted_by_index_in_partition, - MergingPartition *node_equivalence_classes, // Only for debugging. - std::vector *pruned_other_nodes) { + int representative_node, const DynamicPartition& partition, + const DenseDoublyLinkedList& representatives_sorted_by_index_in_partition, + MergingPartition* node_equivalence_classes, // Only for debugging. + std::vector* pruned_other_nodes) { pruned_other_nodes->clear(); const int part_index = partition.PartOf(representative_node); // Iterate on all contiguous representatives after the initial one... @@ -342,7 +342,7 @@ void GetAllOtherRepresentativesInSamePartAs( } } node_equivalence_classes->KeepOnlyOneNodePerPart(&expected_output); - for (int &x : expected_output) x = node_equivalence_classes->GetRoot(x); + for (int& x : expected_output) x = node_equivalence_classes->GetRoot(x); std::sort(expected_output.begin(), expected_output.end()); std::vector sorted_output = *pruned_other_nodes; std::sort(sorted_output.begin(), sorted_output.end()); @@ -353,9 +353,9 @@ void GetAllOtherRepresentativesInSamePartAs( } // namespace absl::Status GraphSymmetryFinder::FindSymmetries( - double time_limit_seconds, std::vector *node_equivalence_classes_io, - std::vector > *generators, - std::vector *factorized_automorphism_group_size) { + double time_limit_seconds, std::vector* node_equivalence_classes_io, + std::vector>* generators, + std::vector* factorized_automorphism_group_size) { // Initialization. time_limit_ = absl::make_unique(time_limit_seconds); IF_STATS_ENABLED(stats_.initialization_time.StartTimer()); @@ -380,7 +380,7 @@ absl::Status GraphSymmetryFinder::FindSymmetries( << base_partition.DebugString(DynamicPartition::SORT_BY_PART); MergingPartition node_equivalence_classes(NumNodes()); - std::vector > permutations_displacing_node(NumNodes()); + std::vector> permutations_displacing_node(NumNodes()); std::vector potential_root_image_nodes; IF_STATS_ENABLED(stats_.initialization_time.StopTimerAndAddElapsedTime()); @@ -551,9 +551,9 @@ namespace { // Knowing that we want to map some element of part #part_index of // "base_partition" to part #part_index of "image_partition", pick the "best" // such mapping, for the global search algorithm. -inline void GetBestMapping(const DynamicPartition &base_partition, - const DynamicPartition &image_partition, - int part_index, int *base_node, int *image_node) { +inline void GetBestMapping(const DynamicPartition& base_partition, + const DynamicPartition& image_partition, + int part_index, int* base_node, int* image_node) { // As of pending CL 66620435, we've loosely tried three variants of // GetBestMapping(): // 1) Just take the first element of the base part, map it to the first @@ -566,7 +566,7 @@ inline void GetBestMapping(const DynamicPartition &base_partition, // Variant 2) gives the best results on most benchmarks, in terms of speed, // but 3) yields much smaller supports for the sat_holeXXX benchmarks, as // long as it's combined with the other tweak enabled by - // absl::GetFlag(FLAGS_minimize_permutation_support_size). + // FLAGS_minimize_permutation_support_size. if (absl::GetFlag(FLAGS_minimize_permutation_support_size)) { // Variant 3). for (const int node : base_partition.ElementsInPart(part_index)) { @@ -595,11 +595,11 @@ inline void GetBestMapping(const DynamicPartition &base_partition, // notably the search state stack. This may improve readability. std::unique_ptr GraphSymmetryFinder::FindOneSuitablePermutation( - int root_node, int root_image_node, DynamicPartition *base_partition, - DynamicPartition *image_partition, - const std::vector > - &generators_found_so_far, - const std::vector > &permutations_displacing_node) { + int root_node, int root_image_node, DynamicPartition* base_partition, + DynamicPartition* image_partition, + const std::vector>& + generators_found_so_far, + const std::vector>& permutations_displacing_node) { // DCHECKs() and statistics. ScopedTimeDistributionUpdater search_time_updater(&stats_.search_time); DCHECK_EQ("", tmp_dynamic_permutation_.DebugString()); @@ -639,7 +639,7 @@ GraphSymmetryFinder::FindOneSuitablePermutation( // // Also, one should note that the base partition (before its refinement on // base_node) was deemed compatible with the image partition as it is now. - const SearchState &ss = search_states_.back(); + const SearchState& ss = search_states_.back(); const int image_node = ss.first_image_node >= 0 ? ss.first_image_node : ss.remaining_pruned_image_nodes.back(); @@ -773,7 +773,7 @@ GraphSymmetryFinder::FindOneSuitablePermutation( // 'current' image node of the upper SearchState (which might lead to us // backtracking it, and so on). while (!search_states_.empty()) { - SearchState *const last_ss = &search_states_.back(); + SearchState* const last_ss = &search_states_.back(); image_partition->UndoRefineUntilNumPartsEqual( last_ss->num_parts_before_trying_to_map_base_node); if (last_ss->first_image_node >= 0) { @@ -854,9 +854,9 @@ GraphSymmetryFinder::TailsOfIncomingArcsTo(int node) const { } void GraphSymmetryFinder::PruneOrbitsUnderPermutationsCompatibleWithPartition( - const DynamicPartition &partition, - const std::vector > &permutations, - const std::vector &permutation_indices, std::vector *nodes) { + const DynamicPartition& partition, + const std::vector>& permutations, + const std::vector& permutation_indices, std::vector* nodes) { VLOG(4) << " Pruning [" << absl::StrJoin(*nodes, ", ") << "]"; // TODO(user): apply a smarter test to decide whether to do the pruning // or not: we can accurately estimate the cost of pruning (iterate through @@ -869,14 +869,14 @@ void GraphSymmetryFinder::PruneOrbitsUnderPermutationsCompatibleWithPartition( // Iterate on all targeted permutations. If they are compatible, apply // them to tmp_partition_ which will contain the incrementally merged // equivalence classes. - std::vector &tmp_nodes_on_support = + std::vector& tmp_nodes_on_support = tmp_stack_; // Rename, for readability. DCHECK(tmp_nodes_on_support.empty()); // TODO(user): investigate further optimizations: maybe it's possible // to incrementally maintain the set of permutations that is compatible // with the current partition, instead of recomputing it here? for (const int p : permutation_indices) { - const SparsePermutation &permutation = *permutations[p]; + const SparsePermutation& permutation = *permutations[p]; // First, a quick compatibility check: the permutation's cycles must be // smaller or equal to the size of the part that they are included in. bool compatible = true; @@ -929,11 +929,11 @@ void GraphSymmetryFinder::PruneOrbitsUnderPermutationsCompatibleWithPartition( } bool GraphSymmetryFinder::ConfirmFullMatchOrFindNextMappingDecision( - const DynamicPartition &base_partition, - const DynamicPartition &image_partition, - const DynamicPermutation ¤t_permutation_candidate, - int *min_potential_mismatching_part_index_io, int *next_base_node, - int *next_image_node) const { + const DynamicPartition& base_partition, + const DynamicPartition& image_partition, + const DynamicPermutation& current_permutation_candidate, + int* min_potential_mismatching_part_index_io, int* next_base_node, + int* next_image_node) const { *next_base_node = -1; *next_image_node = -1; diff --git a/ortools/algorithms/find_graph_symmetries.h b/ortools/algorithms/find_graph_symmetries.h index 9854cc98eb..3006a4d953 100644 --- a/ortools/algorithms/find_graph_symmetries.h +++ b/ortools/algorithms/find_graph_symmetries.h @@ -55,12 +55,12 @@ class GraphSymmetryFinder { // // "graph" must not have multi-arcs. // TODO(user): support multi-arcs. - GraphSymmetryFinder(const Graph &graph, bool is_undirected); + GraphSymmetryFinder(const Graph& graph, bool is_undirected); // Whether the given permutation is an automorphism of the graph given at // construction. This costs O(sum(degree(x))) (the sum is over all nodes x // that are displaced by the permutation). - bool IsGraphAutomorphism(const DynamicPermutation &permutation) const; + bool IsGraphAutomorphism(const DynamicPermutation& permutation) const; // Find a set of generators of the automorphism subgroup of the graph that // respects the given node equivalence classes. The generators are themselves @@ -101,9 +101,9 @@ class GraphSymmetryFinder { // partially valid: its last element may be undervalued. But all prior // elements are valid factors of the automorphism group size. absl::Status FindSymmetries( - double time_limit_seconds, std::vector *node_equivalence_classes_io, - std::vector > *generators, - std::vector *factorized_automorphism_group_size); + double time_limit_seconds, std::vector* node_equivalence_classes_io, + std::vector >* generators, + std::vector* factorized_automorphism_group_size); // Fully refine the partition of nodes, using the graph as symmetry breaker. // This means applying the following steps on each part P of the partition: @@ -117,7 +117,7 @@ class GraphSymmetryFinder { // already partially refined on all parts #0...#K, then you should set // "first_unrefined_part_index" to K+1. void RecursivelyRefinePartitionByAdjacency(int first_unrefined_part_index, - DynamicPartition *partition); + DynamicPartition* partition); // **** Methods below are public FOR TESTING ONLY. **** @@ -125,11 +125,11 @@ class GraphSymmetryFinder { // fully refined, further refine it by {node}, and propagate by adjacency. // Also, optionally collect all the new singletons of the partition in // "new_singletons", sorted by their part number in the partition. - void DistinguishNodeInPartition(int node, DynamicPartition *partition, - std::vector *new_singletons_or_null); + void DistinguishNodeInPartition(int node, DynamicPartition* partition, + std::vector* new_singletons_or_null); private: - const Graph &graph_; + const Graph& graph_; inline int NumNodes() const { return graph_.num_nodes(); } @@ -162,11 +162,11 @@ class GraphSymmetryFinder { // is an inverted index from each node to all permutations (that we found) // that displace it. std::unique_ptr FindOneSuitablePermutation( - int root_node, int root_image_node, DynamicPartition *base_partition, - DynamicPartition *image_partition, - const std::vector > - &generators_found_so_far, - const std::vector > &permutations_displacing_node); + int root_node, int root_image_node, DynamicPartition* base_partition, + DynamicPartition* image_partition, + const std::vector >& + generators_found_so_far, + const std::vector >& permutations_displacing_node); // Data structure used by FindOneSuitablePermutation(). See the .cc struct SearchState { @@ -211,11 +211,11 @@ class GraphSymmetryFinder { // even if the partitions aren't actually a full match, because it uses // fingerprints to compare part. This should almost never happen. bool ConfirmFullMatchOrFindNextMappingDecision( - const DynamicPartition &base_partition, - const DynamicPartition &image_partition, - const DynamicPermutation ¤t_permutation_candidate, - int *min_potential_mismatching_part_index_io, int *next_base_node, - int *next_image_node) const; + const DynamicPartition& base_partition, + const DynamicPartition& image_partition, + const DynamicPermutation& current_permutation_candidate, + int* min_potential_mismatching_part_index_io, int* next_base_node, + int* next_image_node) const; // Subroutine of FindOneSuitablePermutation(), split out for modularity: // Keep only one node of "nodes" per orbit, where the orbits are described @@ -223,9 +223,9 @@ class GraphSymmetryFinder { // "permutation_indices" and that are compatible with "partition". // For each orbit, keep the first node that appears in "nodes". void PruneOrbitsUnderPermutationsCompatibleWithPartition( - const DynamicPartition &partition, - const std::vector > &all_permutations, - const std::vector &permutation_indices, std::vector *nodes); + const DynamicPartition& partition, + const std::vector >& all_permutations, + const std::vector& permutation_indices, std::vector* nodes); // Temporary objects used by some of the class methods, and owned by the // class to avoid (costly) re-allocation. Their resting states are described @@ -236,8 +236,7 @@ class GraphSymmetryFinder { std::vector tmp_stack_; // Empty. std::vector > tmp_nodes_with_degree_; // [0..N-1] = []. MergingPartition tmp_partition_; // Reset(N). - std::vector - tmp_compatible_permutations_; // Empty. + std::vector tmp_compatible_permutations_; // Empty. // Internal statistics, used for performance tuning and debugging. struct Stats : public StatsGroup { diff --git a/ortools/algorithms/hungarian.cc b/ortools/algorithms/hungarian.cc index 0374232d89..8f537d7ea5 100644 --- a/ortools/algorithms/hungarian.cc +++ b/ortools/algorithms/hungarian.cc @@ -38,15 +38,15 @@ class HungarianOptimizer { // be square (i.e. we can have different numbers of agents and tasks), but it // must be regular (i.e. there must be the same number of entries in each row // of the matrix). - explicit HungarianOptimizer(const std::vector > &costs); + explicit HungarianOptimizer(const std::vector>& costs); // Find an assignment which maximizes the total cost. // Returns the assignment in the two vectors passed as argument. // preimage[i] is assigned to image[i]. - void Maximize(std::vector *preimage, std::vector *image); + void Maximize(std::vector* preimage, std::vector* image); // Like Maximize(), but minimizing the cost instead. - void Minimize(std::vector *preimage, std::vector *image); + void Minimize(std::vector* preimage, std::vector* image); private: typedef void (HungarianOptimizer::*Step)(); @@ -56,7 +56,7 @@ class HungarianOptimizer { // Convert the final cost matrix into a set of assignments of preimage->image. // Returns the assignment in the two vectors passed as argument, the same as // Minimize and Maximize - void FindAssignments(std::vector *preimage, std::vector *image); + void FindAssignments(std::vector* preimage, std::vector* image); // Is the cell (row, col) starred? bool IsStarred(int row, int col) const { return marks_[row][col] == STAR; } @@ -123,7 +123,7 @@ class HungarianOptimizer { // Find an uncovered zero and store its coordinates in (zeroRow_, zeroCol_) // and return true, or return false if no such cell exists. - bool FindZero(int *zero_row, int *zero_col) const; + bool FindZero(int* zero_row, int* zero_col) const; // Print the matrix to stdout (for debugging.) void PrintMatrix(); @@ -177,7 +177,7 @@ class HungarianOptimizer { int matrix_size_; // The expanded cost matrix. - std::vector > costs_; + std::vector> costs_; // The greatest cost in the initial cost matrix. double max_cost_; @@ -187,7 +187,7 @@ class HungarianOptimizer { std::vector cols_covered_; // The marks_ (star/prime/none) on each element of the cost matrix. - std::vector > marks_; + std::vector> marks_; // The number of stars in each column - used to speed up coverStarredZeroes. std::vector stars_in_col_; @@ -205,7 +205,7 @@ class HungarianOptimizer { }; HungarianOptimizer::HungarianOptimizer( - const std::vector > &costs) + const std::vector>& costs) : matrix_size_(0), costs_(), max_cost_(0), @@ -270,8 +270,8 @@ HungarianOptimizer::HungarianOptimizer( // Find an assignment which maximizes the total cost. // Return an array of pairs of integers. Each pair (i, j) corresponds to // assigning agent i to task j. -void HungarianOptimizer::Maximize(std::vector *preimage, - std::vector *image) { +void HungarianOptimizer::Maximize(std::vector* preimage, + std::vector* image) { // Find a maximal assignment by subtracting each of the // original costs from max_cost_ and then minimizing. for (int row = 0; row < width_; ++row) { @@ -285,8 +285,8 @@ void HungarianOptimizer::Maximize(std::vector *preimage, // Find an assignment which minimizes the total cost. // Return an array of pairs of integers. Each pair (i, j) corresponds to // assigning agent i to task j. -void HungarianOptimizer::Minimize(std::vector *preimage, - std::vector *image) { +void HungarianOptimizer::Minimize(std::vector* preimage, + std::vector* image) { DoMunkres(); FindAssignments(preimage, image); } @@ -294,8 +294,8 @@ void HungarianOptimizer::Minimize(std::vector *preimage, // Convert the final cost matrix into a set of assignments of agents -> tasks. // Return an array of pairs of integers, the same as the return values of // Minimize() and Maximize() -void HungarianOptimizer::FindAssignments(std::vector *preimage, - std::vector *image) { +void HungarianOptimizer::FindAssignments(std::vector* preimage, + std::vector* image) { preimage->clear(); image->clear(); for (int row = 0; row < width_; ++row) { @@ -396,7 +396,7 @@ double HungarianOptimizer::FindSmallestUncovered() const { // Find an uncovered zero and store its co-ordinates in (zeroRow, zeroCol) // and return true, or return false if no such cell exists. -bool HungarianOptimizer::FindZero(int *zero_row, int *zero_col) const { +bool HungarianOptimizer::FindZero(int* zero_row, int* zero_col) const { for (int row = 0; row < matrix_size_; ++row) { if (RowCovered(row)) { continue; @@ -642,9 +642,9 @@ void HungarianOptimizer::AugmentPath() { state_ = &HungarianOptimizer::PrimeZeroes; } -bool InputContainsNan(const std::vector > &input) { - for (const auto &subvector : input) { - for (const auto &num : subvector) { +bool InputContainsNan(const std::vector>& input) { + for (const auto& subvector : input) { + for (const auto& num : subvector) { if (std::isnan(num)) { LOG(ERROR) << "The provided input contains " << num << "."; return true; @@ -655,9 +655,9 @@ bool InputContainsNan(const std::vector > &input) { } void MinimizeLinearAssignment( - const std::vector > &cost, - absl::flat_hash_map *direct_assignment, - absl::flat_hash_map *reverse_assignment) { + const std::vector>& cost, + absl::flat_hash_map* direct_assignment, + absl::flat_hash_map* reverse_assignment) { if (InputContainsNan(cost)) { LOG(ERROR) << "Returning before invoking the Hungarian optimizer."; return; @@ -673,9 +673,9 @@ void MinimizeLinearAssignment( } void MaximizeLinearAssignment( - const std::vector > &cost, - absl::flat_hash_map *direct_assignment, - absl::flat_hash_map *reverse_assignment) { + const std::vector>& cost, + absl::flat_hash_map* direct_assignment, + absl::flat_hash_map* reverse_assignment) { if (InputContainsNan(cost)) { LOG(ERROR) << "Returning before invoking the Hungarian optimizer."; return; diff --git a/ortools/algorithms/knapsack_solver.cc b/ortools/algorithms/knapsack_solver.cc index e7695c84a4..923cf1920a 100644 --- a/ortools/algorithms/knapsack_solver.cc +++ b/ortools/algorithms/knapsack_solver.cc @@ -37,8 +37,8 @@ const int kMaxNumberOf64Items = 64; struct CompareKnapsackItemsInDecreasingEfficiencyOrder { explicit CompareKnapsackItemsInDecreasingEfficiencyOrder(int64 _profit_max) : profit_max(_profit_max) {} - bool operator()(const KnapsackItemPtr &item1, - const KnapsackItemPtr &item2) const { + bool operator()(const KnapsackItemPtr& item1, + const KnapsackItemPtr& item2) const { return item1->GetEfficiency(profit_max) > item2->GetEfficiency(profit_max); } const int64 profit_max; @@ -50,8 +50,8 @@ struct CompareKnapsackItemsInDecreasingEfficiencyOrder { // prefer the one with the highest current profit, ie. usually the one closer // to a leaf. In practice, the main advantage is to have smaller path. struct CompareKnapsackSearchNodePtrInDecreasingUpperBoundOrder { - bool operator()(const KnapsackSearchNode *node_1, - const KnapsackSearchNode *node_2) const { + bool operator()(const KnapsackSearchNode* node_1, + const KnapsackSearchNode* node_2) const { const int64 profit_upper_bound_1 = node_1->profit_upper_bound(); const int64 profit_upper_bound_2 = node_2->profit_upper_bound(); if (profit_upper_bound_1 == profit_upper_bound_2) { @@ -62,7 +62,7 @@ struct CompareKnapsackSearchNodePtrInDecreasingUpperBoundOrder { }; typedef std::priority_queue< - KnapsackSearchNode *, std::vector, + KnapsackSearchNode*, std::vector, CompareKnapsackSearchNodePtrInDecreasingUpperBoundOrder> SearchQueue; @@ -98,8 +98,8 @@ int64 UpperBoundOfRatio(int64 numerator_1, int64 numerator_2, } // namespace // ----- KnapsackSearchNode ----- -KnapsackSearchNode::KnapsackSearchNode(const KnapsackSearchNode *const parent, - const KnapsackAssignment &assignment) +KnapsackSearchNode::KnapsackSearchNode(const KnapsackSearchNode* const parent, + const KnapsackAssignment& assignment) : depth_((parent == nullptr) ? 0 : parent->depth() + 1), parent_(parent), assignment_(assignment), @@ -108,13 +108,13 @@ KnapsackSearchNode::KnapsackSearchNode(const KnapsackSearchNode *const parent, next_item_id_(kNoSelection) {} // ----- KnapsackSearchPath ----- -KnapsackSearchPath::KnapsackSearchPath(const KnapsackSearchNode &from, - const KnapsackSearchNode &to) +KnapsackSearchPath::KnapsackSearchPath(const KnapsackSearchNode& from, + const KnapsackSearchNode& to) : from_(from), via_(nullptr), to_(to) {} void KnapsackSearchPath::Init() { - const KnapsackSearchNode *node_from = MoveUpToDepth(from_, to_.depth()); - const KnapsackSearchNode *node_to = MoveUpToDepth(to_, from_.depth()); + const KnapsackSearchNode* node_from = MoveUpToDepth(from_, to_.depth()); + const KnapsackSearchNode* node_to = MoveUpToDepth(to_, from_.depth()); CHECK_EQ(node_from->depth(), node_to->depth()); // Find common parent. @@ -125,9 +125,9 @@ void KnapsackSearchPath::Init() { via_ = node_from; } -const KnapsackSearchNode *KnapsackSearchPath::MoveUpToDepth( - const KnapsackSearchNode &node, int depth) const { - const KnapsackSearchNode *current_node = &node; +const KnapsackSearchNode* KnapsackSearchPath::MoveUpToDepth( + const KnapsackSearchNode& node, int depth) const { + const KnapsackSearchNode* current_node = &node; while (current_node->depth() > depth) { current_node = current_node->parent(); } @@ -144,7 +144,7 @@ void KnapsackState::Init(int number_of_items) { // Returns false when the state is invalid. bool KnapsackState::UpdateState(bool revert, - const KnapsackAssignment &assignment) { + const KnapsackAssignment& assignment) { if (revert) { is_bound_[assignment.item_id] = false; } else { @@ -159,7 +159,7 @@ bool KnapsackState::UpdateState(bool revert, } // ----- KnapsackPropagator ----- -KnapsackPropagator::KnapsackPropagator(const KnapsackState &state) +KnapsackPropagator::KnapsackPropagator(const KnapsackState& state) : items_(), current_profit_(0), profit_lower_bound_(0), @@ -168,8 +168,8 @@ KnapsackPropagator::KnapsackPropagator(const KnapsackState &state) KnapsackPropagator::~KnapsackPropagator() { gtl::STLDeleteElements(&items_); } -void KnapsackPropagator::Init(const std::vector &profits, - const std::vector &weights) { +void KnapsackPropagator::Init(const std::vector& profits, + const std::vector& weights) { const int number_of_items = profits.size(); items_.assign(number_of_items, static_cast(nullptr)); for (int i = 0; i < number_of_items; ++i) { @@ -182,7 +182,7 @@ void KnapsackPropagator::Init(const std::vector &profits, } bool KnapsackPropagator::Update(bool revert, - const KnapsackAssignment &assignment) { + const KnapsackAssignment& assignment) { if (assignment.is_in) { if (revert) { current_profit_ -= items_[assignment.item_id]->profit; @@ -194,9 +194,9 @@ bool KnapsackPropagator::Update(bool revert, } void KnapsackPropagator::CopyCurrentStateToSolution( - bool has_one_propagator, std::vector *solution) const { + bool has_one_propagator, std::vector* solution) const { CHECK(solution != nullptr); - for (const KnapsackItem *const item : items_) { + for (const KnapsackItem* const item : items_) { const int item_id = item->id; (*solution)[item_id] = state_.is_bound(item_id) && state_.is_in(item_id); } @@ -207,7 +207,7 @@ void KnapsackPropagator::CopyCurrentStateToSolution( // ----- KnapsackCapacityPropagator ----- KnapsackCapacityPropagator::KnapsackCapacityPropagator( - const KnapsackState &state, int64 capacity) + const KnapsackState& state, int64 capacity) : KnapsackPropagator(state), capacity_(capacity), consumed_capacity_(0), @@ -227,7 +227,7 @@ void KnapsackCapacityPropagator::ComputeProfitBounds() { int break_sorted_item_id = kNoSelection; const int number_of_sorted_items = sorted_items_.size(); for (int sorted_id = 0; sorted_id < number_of_sorted_items; ++sorted_id) { - const KnapsackItem *const item = sorted_items_[sorted_id]; + const KnapsackItem* const item = sorted_items_[sorted_id]; if (!state().is_bound(item->id)) { break_item_id_ = item->id; @@ -255,7 +255,7 @@ void KnapsackCapacityPropagator::InitPropagator() { break_item_id_ = kNoSelection; sorted_items_ = items(); profit_max_ = 0; - for (const KnapsackItem *const item : sorted_items_) { + for (const KnapsackItem* const item : sorted_items_) { profit_max_ = std::max(profit_max_, item->profit); } ++profit_max_; @@ -265,7 +265,7 @@ void KnapsackCapacityPropagator::InitPropagator() { // Returns false when the propagator fails. bool KnapsackCapacityPropagator::UpdatePropagator( - bool revert, const KnapsackAssignment &assignment) { + bool revert, const KnapsackAssignment& assignment) { if (assignment.is_in) { if (revert) { consumed_capacity_ -= items()[assignment.item_id]->weight; @@ -280,10 +280,10 @@ bool KnapsackCapacityPropagator::UpdatePropagator( } void KnapsackCapacityPropagator::CopyCurrentStateToSolutionPropagator( - std::vector *solution) const { + std::vector* solution) const { CHECK(solution != nullptr); int64 remaining_capacity = capacity_ - consumed_capacity_; - for (const KnapsackItem *const item : sorted_items_) { + for (const KnapsackItem* const item : sorted_items_) { if (!state().is_bound(item->id)) { if (remaining_capacity >= item->weight) { remaining_capacity -= item->weight; @@ -333,7 +333,7 @@ int64 KnapsackCapacityPropagator::GetAdditionalProfit(int64 remaining_capacity, } // ----- KnapsackGenericSolver ----- -KnapsackGenericSolver::KnapsackGenericSolver(const std::string &solver_name) +KnapsackGenericSolver::KnapsackGenericSolver(const std::string& solver_name) : BaseKnapsackSolver(solver_name), propagators_(), master_propagator_id_(kMasterPropagatorId), @@ -344,10 +344,9 @@ KnapsackGenericSolver::KnapsackGenericSolver(const std::string &solver_name) KnapsackGenericSolver::~KnapsackGenericSolver() { Clear(); } -void KnapsackGenericSolver::Init( - const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) { +void KnapsackGenericSolver::Init(const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) { CHECK_EQ(capacities.size(), weights.size()); Clear(); @@ -358,7 +357,7 @@ void KnapsackGenericSolver::Init( for (int i = 0; i < number_of_dimensions; ++i) { CHECK_EQ(number_of_items, weights[i].size()); - KnapsackCapacityPropagator *propagator = + KnapsackCapacityPropagator* propagator = new KnapsackCapacityPropagator(state_, capacities[i]); propagator->Init(profits, weights[i]); propagators_.push_back(propagator); @@ -368,8 +367,8 @@ void KnapsackGenericSolver::Init( void KnapsackGenericSolver::GetLowerAndUpperBoundWhenItem(int item_id, bool is_item_in, - int64 *lower_bound, - int64 *upper_bound) { + int64* lower_bound, + int64* upper_bound) { CHECK(lower_bound != nullptr); CHECK(upper_bound != nullptr); KnapsackAssignment assignment(item_id, is_item_in); @@ -392,8 +391,8 @@ void KnapsackGenericSolver::GetLowerAndUpperBoundWhenItem(int item_id, } } -int64 KnapsackGenericSolver::Solve(TimeLimit *time_limit, - bool *is_solution_optimal) { +int64 KnapsackGenericSolver::Solve(TimeLimit* time_limit, + bool* is_solution_optimal) { DCHECK(time_limit != nullptr); DCHECK(is_solution_optimal != nullptr); best_solution_profit_ = 0LL; @@ -401,7 +400,7 @@ int64 KnapsackGenericSolver::Solve(TimeLimit *time_limit, SearchQueue search_queue; const KnapsackAssignment assignment(kNoSelection, true); - KnapsackSearchNode *root_node = new KnapsackSearchNode(nullptr, assignment); + KnapsackSearchNode* root_node = new KnapsackSearchNode(nullptr, assignment); root_node->set_current_profit(GetCurrentProfit()); root_node->set_profit_upper_bound(GetAggregatedProfitUpperBound()); root_node->set_next_item_id(GetNextItemId()); @@ -414,14 +413,14 @@ int64 KnapsackGenericSolver::Solve(TimeLimit *time_limit, search_queue.push(search_nodes_.back()); } - KnapsackSearchNode *current_node = root_node; + KnapsackSearchNode* current_node = root_node; while (!search_queue.empty() && search_queue.top()->profit_upper_bound() > best_solution_profit_) { if (time_limit->LimitReached()) { *is_solution_optimal = false; break; } - KnapsackSearchNode *const node = search_queue.top(); + KnapsackSearchNode* const node = search_queue.top(); search_queue.pop(); if (node != current_node) { @@ -448,11 +447,11 @@ void KnapsackGenericSolver::Clear() { } // Returns false when at least one propagator fails. -bool KnapsackGenericSolver::UpdatePropagators(const KnapsackSearchPath &path) { +bool KnapsackGenericSolver::UpdatePropagators(const KnapsackSearchPath& path) { bool no_fail = true; // Revert previous changes. - const KnapsackSearchNode *node = &path.from(); - const KnapsackSearchNode *via = &path.via(); + const KnapsackSearchNode* node = &path.from(); + const KnapsackSearchNode* via = &path.via(); while (node != via) { no_fail = IncrementalUpdate(true, node->assignment()) && no_fail; node = node->parent(); @@ -468,7 +467,7 @@ bool KnapsackGenericSolver::UpdatePropagators(const KnapsackSearchPath &path) { int64 KnapsackGenericSolver::GetAggregatedProfitUpperBound() const { int64 upper_bound = kint64max; - for (KnapsackPropagator *const prop : propagators_) { + for (KnapsackPropagator* const prop : propagators_) { prop->ComputeProfitBounds(); const int64 propagator_upper_bound = prop->profit_upper_bound(); upper_bound = std::min(upper_bound, propagator_upper_bound); @@ -476,7 +475,7 @@ int64 KnapsackGenericSolver::GetAggregatedProfitUpperBound() const { return upper_bound; } -bool KnapsackGenericSolver::MakeNewNode(const KnapsackSearchNode &node, +bool KnapsackGenericSolver::MakeNewNode(const KnapsackSearchNode& node, bool is_in) { if (node.next_item_id() == kNoSelection) { return false; @@ -504,7 +503,7 @@ bool KnapsackGenericSolver::MakeNewNode(const KnapsackSearchNode &node, } // The node is relevant. - KnapsackSearchNode *relevant_node = new KnapsackSearchNode(&node, assignment); + KnapsackSearchNode* relevant_node = new KnapsackSearchNode(&node, assignment); relevant_node->set_current_profit(new_node.current_profit()); relevant_node->set_profit_upper_bound(new_node.profit_upper_bound()); relevant_node->set_next_item_id(new_node.next_item_id()); @@ -514,11 +513,11 @@ bool KnapsackGenericSolver::MakeNewNode(const KnapsackSearchNode &node, } bool KnapsackGenericSolver::IncrementalUpdate( - bool revert, const KnapsackAssignment &assignment) { + bool revert, const KnapsackAssignment& assignment) { // Do not stop on a failure: To be able to be incremental on the update, // partial solution (state) and propagators must all be in the same state. bool no_fail = state_.UpdateState(revert, assignment); - for (KnapsackPropagator *const prop : propagators_) { + for (KnapsackPropagator* const prop : propagators_) { no_fail = prop->Update(revert, assignment) && no_fail; } return no_fail; @@ -544,15 +543,15 @@ void KnapsackGenericSolver::UpdateBestSolution() { // number of items is less than 15. class KnapsackBruteForceSolver : public BaseKnapsackSolver { public: - explicit KnapsackBruteForceSolver(const std::string &solver_name); + explicit KnapsackBruteForceSolver(const std::string& solver_name); // Initializes the solver and enters the problem to be solved. - void Init(const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) override; + void Init(const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) override; // Solves the problem and returns the profit of the optimal solution. - int64 Solve(TimeLimit *time_limit, bool *is_solution_optimal) override; + int64 Solve(TimeLimit* time_limit, bool* is_solution_optimal) override; // Returns true if the item 'item_id' is packed in the optimal knapsack. bool best_solution(int item_id) const override { @@ -570,7 +569,7 @@ class KnapsackBruteForceSolver : public BaseKnapsackSolver { }; KnapsackBruteForceSolver::KnapsackBruteForceSolver( - const std::string &solver_name) + const std::string& solver_name) : BaseKnapsackSolver(solver_name), num_items_(0), capacity_(0LL), @@ -578,9 +577,9 @@ KnapsackBruteForceSolver::KnapsackBruteForceSolver( best_solution_(0U) {} void KnapsackBruteForceSolver::Init( - const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) { + const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) { // TODO(user): Implement multi-dimensional brute force solver. CHECK_EQ(weights.size(), 1) << "Brute force solver only works with one dimension."; @@ -600,8 +599,8 @@ void KnapsackBruteForceSolver::Init( capacity_ = capacities.at(0); } -int64 KnapsackBruteForceSolver::Solve(TimeLimit *time_limit, - bool *is_solution_optimal) { +int64 KnapsackBruteForceSolver::Solve(TimeLimit* time_limit, + bool* is_solution_optimal) { DCHECK(is_solution_optimal != nullptr); *is_solution_optimal = true; best_solution_profit_ = 0LL; @@ -674,15 +673,15 @@ struct KnapsackItemWithEfficiency { // than KnapsackGenericSolver. class Knapsack64ItemsSolver : public BaseKnapsackSolver { public: - explicit Knapsack64ItemsSolver(const std::string &solver_name); + explicit Knapsack64ItemsSolver(const std::string& solver_name); // Initializes the solver and enters the problem to be solved. - void Init(const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) override; + void Init(const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) override; // Solves the problem and returns the profit of the optimal solution. - int64 Solve(TimeLimit *time_limit, bool *is_solution_optimal) override; + int64 Solve(TimeLimit* time_limit, bool* is_solution_optimal) override; // Returns true if the item 'item_id' is packed in the optimal knapsack. bool best_solution(int item_id) const override { @@ -691,7 +690,7 @@ class Knapsack64ItemsSolver : public BaseKnapsackSolver { private: int GetBreakItemId(int64 capacity) const; - void GetLowerAndUpperBound(int64 *lower_bound, int64 *upper_bound) const; + void GetLowerAndUpperBound(int64* lower_bound, int64* upper_bound) const; void GoToNextState(bool has_failed); void BuildBestSolution(); @@ -716,13 +715,13 @@ class Knapsack64ItemsSolver : public BaseKnapsackSolver { // Comparator used to sort item in decreasing efficiency order bool CompareKnapsackItemWithEfficiencyInDecreasingEfficiencyOrder( - const KnapsackItemWithEfficiency &item1, - const KnapsackItemWithEfficiency &item2) { + const KnapsackItemWithEfficiency& item1, + const KnapsackItemWithEfficiency& item2) { return item1.efficiency > item2.efficiency; } // ----- Knapsack64ItemsSolver ----- -Knapsack64ItemsSolver::Knapsack64ItemsSolver(const std::string &solver_name) +Knapsack64ItemsSolver::Knapsack64ItemsSolver(const std::string& solver_name) : BaseKnapsackSolver(solver_name), sorted_items_(), sum_profits_(), @@ -737,10 +736,9 @@ Knapsack64ItemsSolver::Knapsack64ItemsSolver(const std::string &solver_name) rejected_items_profit_(0LL), rejected_items_weight_(0LL) {} -void Knapsack64ItemsSolver::Init( - const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) { +void Knapsack64ItemsSolver::Init(const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) { CHECK_EQ(weights.size(), 1) << "Brute force solver only works with one dimension."; CHECK_EQ(capacities.size(), weights.size()); @@ -778,8 +776,8 @@ void Knapsack64ItemsSolver::Init( } } -int64 Knapsack64ItemsSolver::Solve(TimeLimit *time_limit, - bool *is_solution_optimal) { +int64 Knapsack64ItemsSolver::Solve(TimeLimit* time_limit, + bool* is_solution_optimal) { DCHECK(is_solution_optimal != nullptr); *is_solution_optimal = true; const int num_items = sorted_items_.size(); @@ -828,8 +826,8 @@ int Knapsack64ItemsSolver::GetBreakItemId(int64 capacity) const { // Unfortunately, experiments show equivalent results with or without this // code optimization (only 1/7 of calls can be reused). // In order to simplify the code, this optimization is not implemented. -void Knapsack64ItemsSolver::GetLowerAndUpperBound(int64 *lower_bound, - int64 *upper_bound) const { +void Knapsack64ItemsSolver::GetLowerAndUpperBound(int64* lower_bound, + int64* upper_bound) const { const int64 available_capacity = capacity_ + rejected_items_weight_; const int break_item_id = GetBreakItemId(available_capacity); const int num_items = sorted_items_.size(); @@ -863,7 +861,7 @@ void Knapsack64ItemsSolver::GoToNextState(bool has_failed) { } else { // Backtrack to last item in. while ((state_ & mask) == 0ULL && state_depth_ >= 0) { - const KnapsackItemWithEfficiency &item = sorted_items_[state_depth_]; + const KnapsackItemWithEfficiency& item = sorted_items_[state_depth_]; rejected_items_profit_ -= item.profit; rejected_items_weight_ -= item.weight; --state_depth_; @@ -872,7 +870,7 @@ void Knapsack64ItemsSolver::GoToNextState(bool has_failed) { if (state_ & mask) { // Item was in, remove it. state_ = state_ & ~mask; - const KnapsackItemWithEfficiency &item = sorted_items_[state_depth_]; + const KnapsackItemWithEfficiency& item = sorted_items_[state_depth_]; rejected_items_profit_ += item.profit; rejected_items_weight_ += item.weight; state_weight_ -= item.weight; @@ -931,15 +929,15 @@ void Knapsack64ItemsSolver::BuildBestSolution() { // Ulrich Pferschy and David Pisinger, Springer book (ISBN 978-3540402862). class KnapsackDynamicProgrammingSolver : public BaseKnapsackSolver { public: - explicit KnapsackDynamicProgrammingSolver(const std::string &solver_name); + explicit KnapsackDynamicProgrammingSolver(const std::string& solver_name); // Initializes the solver and enters the problem to be solved. - void Init(const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) override; + void Init(const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) override; // Solves the problem and returns the profit of the optimal solution. - int64 Solve(TimeLimit *time_limit, bool *is_solution_optimal) override; + int64 Solve(TimeLimit* time_limit, bool* is_solution_optimal) override; // Returns true if the item 'item_id' is packed in the optimal knapsack. bool best_solution(int item_id) const override { @@ -959,7 +957,7 @@ class KnapsackDynamicProgrammingSolver : public BaseKnapsackSolver { // ----- KnapsackDynamicProgrammingSolver ----- KnapsackDynamicProgrammingSolver::KnapsackDynamicProgrammingSolver( - const std::string &solver_name) + const std::string& solver_name) : BaseKnapsackSolver(solver_name), profits_(), weights_(), @@ -969,9 +967,9 @@ KnapsackDynamicProgrammingSolver::KnapsackDynamicProgrammingSolver( best_solution_() {} void KnapsackDynamicProgrammingSolver::Init( - const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) { + const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) { CHECK_EQ(weights.size(), 1) << "Current implementation of the dynamic programming solver only deals" << " with one dimension."; @@ -1003,8 +1001,8 @@ int64 KnapsackDynamicProgrammingSolver::SolveSubProblem(int64 capacity, return selected_item_ids_.at(capacity); } -int64 KnapsackDynamicProgrammingSolver::Solve(TimeLimit *time_limit, - bool *is_solution_optimal) { +int64 KnapsackDynamicProgrammingSolver::Solve(TimeLimit* time_limit, + bool* is_solution_optimal) { DCHECK(is_solution_optimal != nullptr); *is_solution_optimal = true; const int64 capacity_plus_1 = capacity_ + 1; @@ -1031,15 +1029,15 @@ int64 KnapsackDynamicProgrammingSolver::Solve(TimeLimit *time_limit, class KnapsackMIPSolver : public BaseKnapsackSolver { public: KnapsackMIPSolver(MPSolver::OptimizationProblemType problem_type, - const std::string &solver_name); + const std::string& solver_name); // Initializes the solver and enters the problem to be solved. - void Init(const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) override; + void Init(const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) override; // Solves the problem and returns the profit of the optimal solution. - int64 Solve(TimeLimit *time_limit, bool *is_solution_optimal) override; + int64 Solve(TimeLimit* time_limit, bool* is_solution_optimal) override; // Returns true if the item 'item_id' is packed in the optimal knapsack. bool best_solution(int item_id) const override { @@ -1049,14 +1047,14 @@ class KnapsackMIPSolver : public BaseKnapsackSolver { private: MPSolver::OptimizationProblemType problem_type_; std::vector profits_; - std::vector > weights_; + std::vector> weights_; std::vector capacities_; std::vector best_solution_; }; KnapsackMIPSolver::KnapsackMIPSolver( MPSolver::OptimizationProblemType problem_type, - const std::string &solver_name) + const std::string& solver_name) : BaseKnapsackSolver(solver_name), problem_type_(problem_type), profits_(), @@ -1064,22 +1062,22 @@ KnapsackMIPSolver::KnapsackMIPSolver( capacities_(), best_solution_() {} -void KnapsackMIPSolver::Init(const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) { +void KnapsackMIPSolver::Init(const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) { profits_ = profits; weights_ = weights; capacities_ = capacities; } -int64 KnapsackMIPSolver::Solve(TimeLimit *time_limit, - bool *is_solution_optimal) { +int64 KnapsackMIPSolver::Solve(TimeLimit* time_limit, + bool* is_solution_optimal) { DCHECK(is_solution_optimal != nullptr); *is_solution_optimal = true; MPSolver solver(GetName(), problem_type_); const int num_items = profits_.size(); - std::vector variables; + std::vector variables; solver.MakeBoolVarArray(num_items, "x", &variables); // Add constraints. @@ -1088,7 +1086,7 @@ int64 KnapsackMIPSolver::Solve(TimeLimit *time_limit, << "Weights should be vector of num_dimensions (" << num_dimensions << ") vectors of size num_items (" << num_items << ")."; for (int i = 0; i < num_dimensions; ++i) { - MPConstraint *const ct = solver.MakeRowConstraint(0LL, capacities_.at(i)); + MPConstraint* const ct = solver.MakeRowConstraint(0LL, capacities_.at(i)); for (int j = 0; j < num_items; ++j) { ct->SetCoefficient(variables.at(j), weights_.at(i).at(j)); } @@ -1097,7 +1095,7 @@ int64 KnapsackMIPSolver::Solve(TimeLimit *time_limit, // Define objective to minimize. Minimization is used instead of maximization // because of an issue with CBC solver which does not always find the optimal // solution on maximization problems. - MPObjective *const objective = solver.MutableObjective(); + MPObjective* const objective = solver.MutableObjective(); for (int j = 0; j < num_items; ++j) { objective->SetCoefficient(variables.at(j), -profits_.at(j)); } @@ -1118,12 +1116,12 @@ int64 KnapsackMIPSolver::Solve(TimeLimit *time_limit, } // ----- KnapsackSolver ----- -KnapsackSolver::KnapsackSolver(const std::string &solver_name) +KnapsackSolver::KnapsackSolver(const std::string& solver_name) : KnapsackSolver(KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER, solver_name) {} KnapsackSolver::KnapsackSolver(SolverType solver_type, - const std::string &solver_name) + const std::string& solver_name) : solver_(), known_value_(), best_solution_(), @@ -1177,16 +1175,16 @@ KnapsackSolver::KnapsackSolver(SolverType solver_type, KnapsackSolver::~KnapsackSolver() {} -void KnapsackSolver::Init(const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) { +void KnapsackSolver::Init(const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) { time_limit_ = absl::make_unique(time_limit_seconds_); is_solution_optimal_ = false; additional_profit_ = 0LL; is_problem_solved_ = false; const int num_items = profits.size(); - std::vector > reduced_weights; + std::vector> reduced_weights; std::vector reduced_capacities; if (use_reduction_) { const int num_reduced_items = ReduceCapacities( @@ -1218,10 +1216,10 @@ void KnapsackSolver::Init(const std::vector &profits, } int KnapsackSolver::ReduceCapacities( - int num_items, const std::vector > &weights, - const std::vector &capacities, - std::vector > *reduced_weights, - std::vector *reduced_capacities) { + int num_items, const std::vector>& weights, + const std::vector& capacities, + std::vector>* reduced_weights, + std::vector* reduced_capacities) { known_value_.assign(num_items, false); best_solution_.assign(num_items, false); mapping_reduced_item_id_.assign(num_items, 0); @@ -1309,7 +1307,7 @@ int KnapsackSolver::ReduceProblem(int num_items) { } void KnapsackSolver::ComputeAdditionalProfit( - const std::vector &profits) { + const std::vector& profits) { const int num_items = profits.size(); additional_profit_ = 0LL; for (int item_id = 0; item_id < num_items; ++item_id) { @@ -1320,9 +1318,9 @@ void KnapsackSolver::ComputeAdditionalProfit( } void KnapsackSolver::InitReducedProblem( - const std::vector &profits, - const std::vector > &weights, - const std::vector &capacities) { + const std::vector& profits, + const std::vector>& weights, + const std::vector& capacities) { const int num_items = profits.size(); const int num_dimensions = capacities.size(); @@ -1334,10 +1332,10 @@ void KnapsackSolver::InitReducedProblem( } } - std::vector > reduced_weights; + std::vector> reduced_weights; std::vector reduced_capacities = capacities; for (int dim = 0; dim < num_dimensions; ++dim) { - const std::vector &one_dimension_weights = weights[dim]; + const std::vector& one_dimension_weights = weights[dim]; std::vector one_dimension_reduced_weights; for (int item_id = 0; item_id < num_items; ++item_id) { if (known_value_[item_id]) { @@ -1373,8 +1371,8 @@ std::string KnapsackSolver::GetName() const { return solver_->GetName(); } // ----- BaseKnapsackSolver ----- void BaseKnapsackSolver::GetLowerAndUpperBoundWhenItem(int item_id, bool is_item_in, - int64 *lower_bound, - int64 *upper_bound) { + int64* lower_bound, + int64* upper_bound) { CHECK(lower_bound != nullptr); CHECK(upper_bound != nullptr); *lower_bound = 0LL; diff --git a/ortools/algorithms/knapsack_solver_for_cuts.cc b/ortools/algorithms/knapsack_solver_for_cuts.cc index d87c3cd901..842a0fe1f6 100644 --- a/ortools/algorithms/knapsack_solver_for_cuts.cc +++ b/ortools/algorithms/knapsack_solver_for_cuts.cc @@ -31,8 +31,8 @@ const double kInfinity = std::numeric_limits::infinity(); struct CompareKnapsackItemsInDecreasingEfficiencyOrder { explicit CompareKnapsackItemsInDecreasingEfficiencyOrder(double _profit_max) : profit_max(_profit_max) {} - bool operator()(const KnapsackItemForCutsPtr &item1, - const KnapsackItemForCutsPtr &item2) const { + bool operator()(const KnapsackItemForCutsPtr& item1, + const KnapsackItemForCutsPtr& item2) const { return item1->GetEfficiency(profit_max) > item2->GetEfficiency(profit_max); } const double profit_max; @@ -44,8 +44,8 @@ struct CompareKnapsackItemsInDecreasingEfficiencyOrder { // prefer the one with the highest current profit. This is usually the one // closer to a leaf. In practice, the main advantage is to have smaller path. struct CompareKnapsackSearchNodePtrInDecreasingUpperBoundOrder { - bool operator()(const KnapsackSearchNodeForCuts *node_1, - const KnapsackSearchNodeForCuts *node_2) const { + bool operator()(const KnapsackSearchNodeForCuts* node_1, + const KnapsackSearchNodeForCuts* node_2) const { const double profit_upper_bound_1 = node_1->profit_upper_bound(); const double profit_upper_bound_2 = node_2->profit_upper_bound(); if (profit_upper_bound_1 == profit_upper_bound_2) { @@ -56,15 +56,15 @@ struct CompareKnapsackSearchNodePtrInDecreasingUpperBoundOrder { }; using SearchQueue = std::priority_queue< - KnapsackSearchNodeForCuts *, std::vector, + KnapsackSearchNodeForCuts*, std::vector, CompareKnapsackSearchNodePtrInDecreasingUpperBoundOrder>; } // namespace // ----- KnapsackSearchNodeForCuts ----- KnapsackSearchNodeForCuts::KnapsackSearchNodeForCuts( - const KnapsackSearchNodeForCuts *const parent, - const KnapsackAssignmentForCuts &assignment) + const KnapsackSearchNodeForCuts* const parent, + const KnapsackAssignmentForCuts& assignment) : depth_(parent == nullptr ? 0 : parent->depth() + 1), parent_(parent), assignment_(assignment), @@ -74,13 +74,13 @@ KnapsackSearchNodeForCuts::KnapsackSearchNodeForCuts( // ----- KnapsackSearchPathForCuts ----- KnapsackSearchPathForCuts::KnapsackSearchPathForCuts( - const KnapsackSearchNodeForCuts *from, const KnapsackSearchNodeForCuts *to) + const KnapsackSearchNodeForCuts* from, const KnapsackSearchNodeForCuts* to) : from_(from), via_(nullptr), to_(to) {} void KnapsackSearchPathForCuts::Init() { - const KnapsackSearchNodeForCuts *node_from = + const KnapsackSearchNodeForCuts* node_from = MoveUpToDepth(from_, to_->depth()); - const KnapsackSearchNodeForCuts *node_to = MoveUpToDepth(to_, from_->depth()); + const KnapsackSearchNodeForCuts* node_to = MoveUpToDepth(to_, from_->depth()); DCHECK_EQ(node_from->depth(), node_to->depth()); // Find common parent. @@ -91,8 +91,8 @@ void KnapsackSearchPathForCuts::Init() { via_ = node_from; } -const KnapsackSearchNodeForCuts *MoveUpToDepth( - const KnapsackSearchNodeForCuts *node, int depth) { +const KnapsackSearchNodeForCuts* MoveUpToDepth( + const KnapsackSearchNodeForCuts* node, int depth) { while (node->depth() > depth) { node = node->parent(); } @@ -109,7 +109,7 @@ void KnapsackStateForCuts::Init(int number_of_items) { // Returns false when the state is invalid. bool KnapsackStateForCuts::UpdateState( - bool revert, const KnapsackAssignmentForCuts &assignment) { + bool revert, const KnapsackAssignmentForCuts& assignment) { if (revert) { is_bound_[assignment.item_id] = false; } else { @@ -125,7 +125,7 @@ bool KnapsackStateForCuts::UpdateState( // ----- KnapsackPropagatorForCuts ----- KnapsackPropagatorForCuts::KnapsackPropagatorForCuts( - const KnapsackStateForCuts *state) + const KnapsackStateForCuts* state) : items_(), current_profit_(0), profit_lower_bound_(0), @@ -134,8 +134,8 @@ KnapsackPropagatorForCuts::KnapsackPropagatorForCuts( KnapsackPropagatorForCuts::~KnapsackPropagatorForCuts() {} -void KnapsackPropagatorForCuts::Init(const std::vector &profits, - const std::vector &weights, +void KnapsackPropagatorForCuts::Init(const std::vector& profits, + const std::vector& weights, const double capacity) { const int number_of_items = profits.size(); items_.clear(); @@ -152,7 +152,7 @@ void KnapsackPropagatorForCuts::Init(const std::vector &profits, } bool KnapsackPropagatorForCuts::Update( - bool revert, const KnapsackAssignmentForCuts &assignment) { + bool revert, const KnapsackAssignmentForCuts& assignment) { if (assignment.is_in) { if (revert) { current_profit_ -= items_[assignment.item_id]->profit; @@ -169,14 +169,14 @@ bool KnapsackPropagatorForCuts::Update( } void KnapsackPropagatorForCuts::CopyCurrentStateToSolution( - std::vector *solution) const { + std::vector* solution) const { DCHECK(solution != nullptr); for (int i(0); i < items_.size(); ++i) { const int item_id = items_[i]->id; (*solution)[item_id] = state_->is_bound(item_id) && state_->is_in(item_id); } double remaining_capacity = capacity_ - consumed_capacity_; - for (const KnapsackItemForCutsPtr &item : sorted_items_) { + for (const KnapsackItemForCutsPtr& item : sorted_items_) { if (!state().is_bound(item->id)) { if (remaining_capacity >= item->weight) { remaining_capacity -= item->weight; @@ -196,7 +196,7 @@ void KnapsackPropagatorForCuts::ComputeProfitBounds() { int break_sorted_item_id = kNoSelection; for (int sorted_id(0); sorted_id < sorted_items_.size(); ++sorted_id) { if (!state().is_bound(sorted_items_[sorted_id]->id)) { - const KnapsackItemForCutsPtr &item = sorted_items_[sorted_id]; + const KnapsackItemForCutsPtr& item = sorted_items_[sorted_id]; break_item_id_ = item->id; if (remaining_capacity >= item->weight) { remaining_capacity -= item->weight; @@ -231,7 +231,7 @@ void KnapsackPropagatorForCuts::InitPropagator() { i, items()[i]->weight, items()[i]->profit)); } profit_max_ = 0; - for (const KnapsackItemForCutsPtr &item : sorted_items_) { + for (const KnapsackItemForCutsPtr& item : sorted_items_) { profit_max_ = std::max(profit_max_, item->profit); } profit_max_ += 1.0; @@ -283,8 +283,8 @@ KnapsackSolverForCuts::KnapsackSolverForCuts(std::string solver_name) best_solution_profit_(0), solver_name_(std::move(solver_name)) {} -void KnapsackSolverForCuts::Init(const std::vector &profits, - const std::vector &weights, +void KnapsackSolverForCuts::Init(const std::vector& profits, + const std::vector& weights, const double capacity) { const int number_of_items(profits.size()); state_.Init(number_of_items); @@ -296,8 +296,8 @@ void KnapsackSolverForCuts::Init(const std::vector &profits, void KnapsackSolverForCuts::GetLowerAndUpperBoundWhenItem(int item_id, bool is_item_in, - double *lower_bound, - double *upper_bound) { + double* lower_bound, + double* upper_bound) { DCHECK(lower_bound != nullptr); DCHECK(upper_bound != nullptr); KnapsackAssignmentForCuts assignment(item_id, is_item_in); @@ -317,8 +317,8 @@ void KnapsackSolverForCuts::GetLowerAndUpperBoundWhenItem(int item_id, } } -double KnapsackSolverForCuts::Solve(TimeLimit *time_limit, - bool *is_solution_optimal) { +double KnapsackSolverForCuts::Solve(TimeLimit* time_limit, + bool* is_solution_optimal) { DCHECK(time_limit != nullptr); DCHECK(is_solution_optimal != nullptr); best_solution_profit_ = 0; @@ -332,7 +332,7 @@ double KnapsackSolverForCuts::Solve(TimeLimit *time_limit, root_node->set_profit_upper_bound(GetAggregatedProfitUpperBound()); root_node->set_next_item_id(GetNextItemId()); search_nodes_.push_back(std::move(root_node)); - const KnapsackSearchNodeForCuts *current_node = + const KnapsackSearchNodeForCuts* current_node = search_nodes_.back().get(); // Start with the root node. if (MakeNewNode(*current_node, false)) { @@ -362,7 +362,7 @@ double KnapsackSolverForCuts::Solve(TimeLimit *time_limit, *is_solution_optimal = false; break; } - KnapsackSearchNodeForCuts *const node = search_queue.top(); + KnapsackSearchNodeForCuts* const node = search_queue.top(); search_queue.pop(); if (node != current_node) { @@ -385,11 +385,11 @@ double KnapsackSolverForCuts::Solve(TimeLimit *time_limit, // Returns false when at least one propagator fails. bool KnapsackSolverForCuts::UpdatePropagators( - const KnapsackSearchPathForCuts &path) { + const KnapsackSearchPathForCuts& path) { bool no_fail = true; // Revert previous changes. - const KnapsackSearchNodeForCuts *node = &path.from(); - const KnapsackSearchNodeForCuts *const via = &path.via(); + const KnapsackSearchNodeForCuts* node = &path.from(); + const KnapsackSearchNodeForCuts* const via = &path.via(); while (node != via) { no_fail = IncrementalUpdate(true, node->assignment()) && no_fail; node = node->parent(); @@ -409,7 +409,7 @@ double KnapsackSolverForCuts::GetAggregatedProfitUpperBound() { return std::min(kInfinity, propagator_upper_bound); } -bool KnapsackSolverForCuts::MakeNewNode(const KnapsackSearchNodeForCuts &node, +bool KnapsackSolverForCuts::MakeNewNode(const KnapsackSearchNodeForCuts& node, bool is_in) { if (node.next_item_id() == kNoSelection) { return false; @@ -448,7 +448,7 @@ bool KnapsackSolverForCuts::MakeNewNode(const KnapsackSearchNodeForCuts &node, } bool KnapsackSolverForCuts::IncrementalUpdate( - bool revert, const KnapsackAssignmentForCuts &assignment) { + bool revert, const KnapsackAssignmentForCuts& assignment) { // Do not stop on a failure: To be able to be incremental on the update, // partial solution (state) and propagators must all be in the same state. bool no_fail = state_.UpdateState(revert, assignment); diff --git a/ortools/algorithms/sparse_permutation.cc b/ortools/algorithms/sparse_permutation.cc index 9c5f19393f..cdae55d18c 100644 --- a/ortools/algorithms/sparse_permutation.cc +++ b/ortools/algorithms/sparse_permutation.cc @@ -20,7 +20,7 @@ namespace operations_research { -void SparsePermutation::RemoveCycles(const std::vector &cycle_indices) { +void SparsePermutation::RemoveCycles(const std::vector& cycle_indices) { // TODO(user): make this a class member to avoid allocation if the complexity // becomes an issue. In this case, also optimize the loop below by not copying // the first cycles. @@ -52,7 +52,7 @@ void SparsePermutation::RemoveCycles(const std::vector &cycle_indices) { std::string SparsePermutation::DebugString() const { DCHECK_EQ(cycles_.empty(), cycle_ends_.empty()); if (!cycles_.empty()) DCHECK_EQ(cycles_.size(), cycle_ends_.back()); - std::vector > cycles; + std::vector> cycles; int start = 0; for (const int end : cycle_ends_) { // Find the minimum. @@ -68,7 +68,7 @@ std::string SparsePermutation::DebugString() const { } std::sort(cycles.begin(), cycles.end()); std::string out; - for (const std::vector &cycle : cycles) { + for (const std::vector& cycle : cycles) { if (!out.empty()) out += " "; out += "("; out += absl::StrJoin(cycle, " "); diff --git a/ortools/base/bitmap.cc b/ortools/base/bitmap.cc index 293e7f6b87..4834528a2d 100644 --- a/ortools/base/bitmap.cc +++ b/ortools/base/bitmap.cc @@ -28,7 +28,7 @@ void Bitmap::Resize(uint32 size, bool fill) { const uint32 old_array_size = array_size_; array_size_ = new_array_size; max_size_ = size; - uint64 *new_map = new uint64[array_size_]; + uint64* new_map = new uint64[array_size_]; memcpy(new_map, map_, old_array_size * sizeof(*map_)); delete[] map_; map_ = new_map; diff --git a/ortools/base/file.cc b/ortools/base/file.cc index 3ddedfafdd..6110690ffe 100644 --- a/ortools/base/file.cc +++ b/ortools/base/file.cc @@ -30,12 +30,12 @@ #include "ortools/base/file.h" #include "ortools/base/logging.h" -File::File(FILE *const f_des, const absl::string_view &name) +File::File(FILE* const f_des, const absl::string_view& name) : f_(f_des), name_(name) {} -bool File::Delete(const char *const name) { return remove(name) == 0; } +bool File::Delete(const char* const name) { return remove(name) == 0; } -bool File::Exists(const char *const name) { return access(name, F_OK) == 0; } +bool File::Exists(const char* const name) { return access(name, F_OK) == 0; } size_t File::Size() { struct stat f_stat; @@ -63,43 +63,43 @@ absl::Status File::Close(int flags) { absl::StrCat("Could not close file '", name_, "'")); } -void File::ReadOrDie(void *const buf, size_t size) { +void File::ReadOrDie(void* const buf, size_t size) { CHECK_EQ(fread(buf, 1, size, f_), size); } -size_t File::Read(void *const buf, size_t size) { +size_t File::Read(void* const buf, size_t size) { return fread(buf, 1, size, f_); } -void File::WriteOrDie(const void *const buf, size_t size) { +void File::WriteOrDie(const void* const buf, size_t size) { CHECK_EQ(fwrite(buf, 1, size, f_), size); } -size_t File::Write(const void *const buf, size_t size) { +size_t File::Write(const void* const buf, size_t size) { return fwrite(buf, 1, size, f_); } -File *File::OpenOrDie(const char *const name, const char *const flag) { - FILE *const f_des = fopen(name, flag); +File* File::OpenOrDie(const char* const name, const char* const flag) { + FILE* const f_des = fopen(name, flag); if (f_des == NULL) { std::cerr << "Cannot open " << name; exit(1); } - File *const f = new File(f_des, name); + File* const f = new File(f_des, name); return f; } -File *File::Open(const char *const name, const char *const flag) { - FILE *const f_des = fopen(name, flag); +File* File::Open(const char* const name, const char* const flag) { + FILE* const f_des = fopen(name, flag); if (f_des == NULL) return NULL; - File *const f = new File(f_des, name); + File* const f = new File(f_des, name); return f; } -char *File::ReadLine(char *const output, uint64 max_length) { +char* File::ReadLine(char* const output, uint64 max_length) { return fgets(output, max_length, f_); } -int64 File::ReadToString(std::string *const output, uint64 max_length) { +int64 File::ReadToString(std::string* const output, uint64 max_length) { CHECK(output != nullptr); output->clear(); @@ -123,11 +123,11 @@ int64 File::ReadToString(std::string *const output, uint64 max_length) { return (nread >= 0 ? static_cast(output->size()) : -1); } -size_t File::WriteString(const std::string &line) { +size_t File::WriteString(const std::string& line) { return Write(line.c_str(), line.size()); } -bool File::WriteLine(const std::string &line) { +bool File::WriteLine(const std::string& line) { if (Write(line.c_str(), line.size()) != line.size()) return false; return Write("\n", 1) == 1; } @@ -139,8 +139,8 @@ bool File::Open() const { return f_ != NULL; } void File::Init() {} namespace file { -absl::Status Open(const absl::string_view &filename, - const absl::string_view &mode, File **f, int flags) { +absl::Status Open(const absl::string_view& filename, + const absl::string_view& mode, File** f, int flags) { if (flags == Defaults()) { *f = File::Open(filename, mode.data()); if (*f != nullptr) { @@ -151,19 +151,19 @@ absl::Status Open(const absl::string_view &filename, absl::StrCat("Could not open '", filename, "'")); } -File *OpenOrDie(const absl::string_view &filename, - const absl::string_view &mode, int flags) { - File *f; +File* OpenOrDie(const absl::string_view& filename, + const absl::string_view& mode, int flags) { + File* f; CHECK_EQ(flags, Defaults()); f = File::Open(filename, mode.data()); CHECK(f != nullptr) << absl::StrCat("Could not open '", filename, "'"); return f; } -absl::Status GetContents(const absl::string_view &filename, std::string *output, +absl::Status GetContents(const absl::string_view& filename, std::string* output, int flags) { if (flags == Defaults()) { - File *file = File::Open(filename, "r"); + File* file = File::Open(filename, "r"); if (file != NULL) { const int64 size = file->Size(); if (file->ReadToString(output, size) == size) return absl::OkStatus(); @@ -173,7 +173,7 @@ absl::Status GetContents(const absl::string_view &filename, std::string *output, absl::StrCat("Could not read '", filename, "'")); } -absl::Status WriteString(File *file, const absl::string_view &contents, +absl::Status WriteString(File* file, const absl::string_view& contents, int flags) { if (flags == Defaults() && file != NULL && file->Write(contents.data(), contents.size()) == contents.size() && @@ -185,29 +185,29 @@ absl::Status WriteString(File *file, const absl::string_view &contents, absl::StrCat("Could not write ", contents.size(), " bytes")); } -absl::Status SetContents(const absl::string_view &filename, - const absl::string_view &contents, int flags) { +absl::Status SetContents(const absl::string_view& filename, + const absl::string_view& contents, int flags) { return WriteString(File::Open(filename, "w"), contents, flags); } -bool ReadFileToString(const absl::string_view &file_name, std::string *output) { +bool ReadFileToString(const absl::string_view& file_name, std::string* output) { return GetContents(file_name, output, file::Defaults()).ok(); } -bool WriteStringToFile(const std::string &data, - const absl::string_view &file_name) { +bool WriteStringToFile(const std::string& data, + const absl::string_view& file_name) { return SetContents(file_name, data, file::Defaults()).ok(); } namespace { class NoOpErrorCollector : public google::protobuf::io::ErrorCollector { public: - virtual void AddError(int line, int column, const std::string &message) {} + virtual void AddError(int line, int column, const std::string& message) {} }; } // namespace -bool ReadFileToProto(const absl::string_view &file_name, - google::protobuf::Message *proto) { +bool ReadFileToProto(const absl::string_view& file_name, + google::protobuf::Message* proto) { std::string str; if (!ReadFileToString(file_name, &str)) { LOG(INFO) << "Could not read " << file_name; @@ -235,37 +235,37 @@ bool ReadFileToProto(const absl::string_view &file_name, return false; } -void ReadFileToProtoOrDie(const absl::string_view &file_name, - google::protobuf::Message *proto) { +void ReadFileToProtoOrDie(const absl::string_view& file_name, + google::protobuf::Message* proto) { CHECK(ReadFileToProto(file_name, proto)) << "file_name: " << file_name; } -bool WriteProtoToASCIIFile(const google::protobuf::Message &proto, - const absl::string_view &file_name) { +bool WriteProtoToASCIIFile(const google::protobuf::Message& proto, + const absl::string_view& file_name) { std::string proto_string; return google::protobuf::TextFormat::PrintToString(proto, &proto_string) && WriteStringToFile(proto_string, file_name); } -void WriteProtoToASCIIFileOrDie(const google::protobuf::Message &proto, - const absl::string_view &file_name) { +void WriteProtoToASCIIFileOrDie(const google::protobuf::Message& proto, + const absl::string_view& file_name) { CHECK(WriteProtoToASCIIFile(proto, file_name)) << "file_name: " << file_name; } -bool WriteProtoToFile(const google::protobuf::Message &proto, - const absl::string_view &file_name) { +bool WriteProtoToFile(const google::protobuf::Message& proto, + const absl::string_view& file_name) { std::string proto_string; return proto.AppendToString(&proto_string) && WriteStringToFile(proto_string, file_name); } -void WriteProtoToFileOrDie(const google::protobuf::Message &proto, - const absl::string_view &file_name) { +void WriteProtoToFileOrDie(const google::protobuf::Message& proto, + const absl::string_view& file_name) { CHECK(WriteProtoToFile(proto, file_name)) << "file_name: " << file_name; } -absl::Status GetTextProto(const absl::string_view &filename, - google::protobuf::Message *proto, int flags) { +absl::Status GetTextProto(const absl::string_view& filename, + google::protobuf::Message* proto, int flags) { if (flags == Defaults()) { if (ReadFileToProto(filename, proto)) return absl::OkStatus(); } @@ -274,8 +274,8 @@ absl::Status GetTextProto(const absl::string_view &filename, absl::StrCat("Could not read proto from '", filename, "'.")); } -absl::Status SetTextProto(const absl::string_view &filename, - const google::protobuf::Message &proto, int flags) { +absl::Status SetTextProto(const absl::string_view& filename, + const google::protobuf::Message& proto, int flags) { if (flags == Defaults()) { if (WriteProtoToASCIIFile(proto, filename)) return absl::OkStatus(); } @@ -284,8 +284,8 @@ absl::Status SetTextProto(const absl::string_view &filename, absl::StrCat("Could not write proto to '", filename, "'.")); } -absl::Status SetBinaryProto(const absl::string_view &filename, - const google::protobuf::Message &proto, int flags) { +absl::Status SetBinaryProto(const absl::string_view& filename, + const google::protobuf::Message& proto, int flags) { if (flags == Defaults()) { if (WriteProtoToFile(proto, filename)) return absl::OkStatus(); } @@ -294,7 +294,7 @@ absl::Status SetBinaryProto(const absl::string_view &filename, absl::StrCat("Could not write proto to '", filename, "'.")); } -absl::Status Delete(const absl::string_view &path, int flags) { +absl::Status Delete(const absl::string_view& path, int flags) { if (flags == Defaults()) { if (remove(path.data()) == 0) return absl::OkStatus(); } @@ -302,7 +302,7 @@ absl::Status Delete(const absl::string_view &path, int flags) { absl::StrCat("Could not delete '", path, "'.")); } -absl::Status Exists(const absl::string_view &path, int flags) { +absl::Status Exists(const absl::string_view& path, int flags) { if (flags == Defaults()) { if (access(path.data(), F_OK) == 0) return absl::OkStatus(); } diff --git a/ortools/base/jniutil.h b/ortools/base/jniutil.h index 24cb68cb79..e7f7408e3a 100644 --- a/ortools/base/jniutil.h +++ b/ortools/base/jniutil.h @@ -24,18 +24,18 @@ class JNIUtil { public: // Creates a Java jstring from a null-terminated UTF-8 encoded C String. // The caller must delete the jstring reference. - static jstring MakeJString(JNIEnv *env, const char *cstr) { + static jstring MakeJString(JNIEnv* env, const char* cstr) { if (cstr == NULL) return NULL; return env->NewStringUTF(cstr); } // Creates a null-terminated UTF-8 encoded C string from a jstring. // The returned string should be "delete[]"-ed when no longer needed. - static char *MakeCString(JNIEnv *env, jstring str) { + static char* MakeCString(JNIEnv* env, jstring str) { if (str == NULL) return NULL; jsize length = env->GetStringUTFLength(str); - const char *src = env->GetStringUTFChars(str, NULL); - char *dst = new char[length + 1]; + const char* src = env->GetStringUTFChars(str, NULL); + char* dst = new char[length + 1]; memcpy(dst, src, length); dst[length] = '\0'; env->ReleaseStringUTFChars(str, src); @@ -44,24 +44,23 @@ class JNIUtil { // Creates a new char array from a jbyteArray. // The caller must delete[] the returned array. - static char *MakeCharArray(JNIEnv *env, jbyteArray a, int *size) { + static char* MakeCharArray(JNIEnv* env, jbyteArray a, int* size) { jsize n = env->GetArrayLength(a); *size = n; - jbyte *jba = new jbyte[n]; + jbyte* jba = new jbyte[n]; env->GetByteArrayRegion(a, 0, n, jba); // We make use of the fact that jbyte's are really just chars. // If this changes (different VM, etc.) things will break. - return reinterpret_cast(jba); + return reinterpret_cast(jba); } // Produces a jbyteArray from a char array. - static jbyteArray MakeJByteArray(JNIEnv *env, const char *a, int size) { + static jbyteArray MakeJByteArray(JNIEnv* env, const char* a, int size) { // Create empty array object jbyteArray output = env->NewByteArray(size); // Fill it - env->SetByteArrayRegion(output, 0, size, - reinterpret_cast(a)); + env->SetByteArrayRegion(output, 0, size, reinterpret_cast(a)); return output; } }; diff --git a/ortools/base/random.cc b/ortools/base/random.cc index 30c472b5ea..6f4621aae2 100644 --- a/ortools/base/random.cc +++ b/ortools/base/random.cc @@ -48,7 +48,7 @@ uint64 ACMRandom::operator()(uint64 val_max) { } namespace { -static inline uint32 Word32At(const char *ptr) { +static inline uint32 Word32At(const char* ptr) { return ((static_cast(ptr[0])) + (static_cast(ptr[1]) << 8) + (static_cast(ptr[2]) << 16) + (static_cast(ptr[3]) << 24)); diff --git a/ortools/base/recordio.cc b/ortools/base/recordio.cc index 5f564bd4d1..cf9835cf24 100644 --- a/ortools/base/recordio.cc +++ b/ortools/base/recordio.cc @@ -23,7 +23,7 @@ namespace recordio { const int RecordWriter::kMagicNumber = 0x3ed7230a; -RecordWriter::RecordWriter(File *const file) +RecordWriter::RecordWriter(File* const file) : file_(file), use_compression_(true) {} bool RecordWriter::Close() { return file_->Close(); } @@ -32,16 +32,16 @@ void RecordWriter::set_use_compression(bool use_compression) { use_compression_ = use_compression; } -std::string RecordWriter::Compress(std::string const &s) const { +std::string RecordWriter::Compress(std::string const& s) const { const unsigned long source_size = s.size(); // NOLINT - const char *source = s.c_str(); + const char* source = s.c_str(); unsigned long dsize = source_size + (source_size * 0.1f) + 16; // NOLINT std::unique_ptr destination(new char[dsize]); // Use compress() from zlib.h. const int result = - compress(reinterpret_cast(destination.get()), &dsize, - reinterpret_cast(source), source_size); + compress(reinterpret_cast(destination.get()), &dsize, + reinterpret_cast(source), source_size); if (result != Z_OK) { LOG(FATAL) << "Compress error occurred! Error code: " << result; @@ -49,18 +49,18 @@ std::string RecordWriter::Compress(std::string const &s) const { return std::string(destination.get(), dsize); } -RecordReader::RecordReader(File *const file) : file_(file) {} +RecordReader::RecordReader(File* const file) : file_(file) {} bool RecordReader::Close() { return file_->Close(); } -void RecordReader::Uncompress(const char *const source, uint64 source_size, - char *const output_buffer, +void RecordReader::Uncompress(const char* const source, uint64 source_size, + char* const output_buffer, uint64 output_size) const { unsigned long result_size = output_size; // NOLINT - // Use uncompress() from zlib.h + // Use uncompress() from zlib.h const int result = - uncompress(reinterpret_cast(output_buffer), &result_size, - reinterpret_cast(source), source_size); + uncompress(reinterpret_cast(output_buffer), &result_size, + reinterpret_cast(source), source_size); if (result != Z_OK) { LOG(FATAL) << "Uncompress error occurred! Error code: " << result; } diff --git a/ortools/base/stl_util.h b/ortools/base/stl_util.h index 8fbeca0075..969c6d6a7b 100644 --- a/ortools/base/stl_util.h +++ b/ortools/base/stl_util.h @@ -40,9 +40,9 @@ namespace internal { template class Equiv { public: - explicit Equiv(const LessFunc &f) : f_(f) {} + explicit Equiv(const LessFunc& f) : f_(f) {} template - bool operator()(const T &a, const T &b) const { + bool operator()(const T& a, const T& b) const { return !f_(b, a) && !f_(a, b); } @@ -55,14 +55,14 @@ class Equiv { // If specified, the 'less_func' is used to compose an // equivalence comparator for the sorting and uniqueness tests. template -inline void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func) { +inline void STLSortAndRemoveDuplicates(T* v, const LessFunc& less_func) { std::sort(v->begin(), v->end(), less_func); v->erase(std::unique(v->begin(), v->end(), gtl::internal::Equiv(less_func)), v->end()); } template -inline void STLSortAndRemoveDuplicates(T *v) { +inline void STLSortAndRemoveDuplicates(T* v) { std::sort(v->begin(), v->end()); v->erase(std::unique(v->begin(), v->end()), v->end()); } @@ -72,7 +72,7 @@ inline void STLSortAndRemoveDuplicates(T *v) { // The 'less_func' is used to compose an equivalence comparator for the sorting // and uniqueness tests. template -inline void STLStableSortAndRemoveDuplicates(T *v, const LessFunc &less_func) { +inline void STLStableSortAndRemoveDuplicates(T* v, const LessFunc& less_func) { std::stable_sort(v->begin(), v->end(), less_func); v->erase(std::unique(v->begin(), v->end(), gtl::internal::Equiv(less_func)), @@ -82,7 +82,7 @@ inline void STLStableSortAndRemoveDuplicates(T *v, const LessFunc &less_func) { // the first equivalent element for each equivalence set, using < comparison and // == equivalence testing. template -inline void STLStableSortAndRemoveDuplicates(T *v) { +inline void STLStableSortAndRemoveDuplicates(T* v) { std::stable_sort(v->begin(), v->end()); v->erase(std::unique(v->begin(), v->end()), v->end()); } @@ -90,29 +90,29 @@ inline void STLStableSortAndRemoveDuplicates(T *v) { // Remove every occurrence of element e in v. See // http://en.wikipedia.org/wiki/Erase-remove_idiom. template -void STLEraseAllFromSequence(T *v, const E &e) { +void STLEraseAllFromSequence(T* v, const E& e) { v->erase(std::remove(v->begin(), v->end(), e), v->end()); } template -void STLEraseAllFromSequence(std::list *c, const E &e) { +void STLEraseAllFromSequence(std::list* c, const E& e) { c->remove(e); } template -void STLEraseAllFromSequence(std::forward_list *c, const E &e) { +void STLEraseAllFromSequence(std::forward_list* c, const E& e) { c->remove(e); } // Remove each element e in v satisfying pred(e). template -void STLEraseAllFromSequenceIf(T *v, P pred) { +void STLEraseAllFromSequenceIf(T* v, P pred) { v->erase(std::remove_if(v->begin(), v->end(), pred), v->end()); } template -void STLEraseAllFromSequenceIf(std::list *c, P pred) { +void STLEraseAllFromSequenceIf(std::list* c, P pred) { c->remove_if(pred); } template -void STLEraseAllFromSequenceIf(std::forward_list *c, P pred) { +void STLEraseAllFromSequenceIf(std::forward_list* c, P pred) { c->remove_if(pred); } @@ -120,7 +120,7 @@ void STLEraseAllFromSequenceIf(std::forward_list *c, P pred) { // empty object. STL clear()/reserve(0) does not always free internal memory // allocated. template -void STLClearObject(T *obj) { +void STLClearObject(T* obj) { T tmp; tmp.swap(*obj); // This reserve(0) is needed because "T tmp" sometimes allocates memory (arena @@ -129,7 +129,7 @@ void STLClearObject(T *obj) { } // STLClearObject overload for deque, which is missing reserve(). template -void STLClearObject(std::deque *obj) { +void STLClearObject(std::deque* obj) { std::deque tmp; tmp.swap(*obj); } @@ -142,7 +142,7 @@ void STLClearObject(std::deque *obj) { // Note: The name is misleading since the object is always cleared, regardless // of its size. template -inline void STLClearIfBig(T *obj, size_t limit = 1 << 20) { +inline void STLClearIfBig(T* obj, size_t limit = 1 << 20) { if (obj->capacity() >= limit) { STLClearObject(obj); } else { @@ -151,7 +151,7 @@ inline void STLClearIfBig(T *obj, size_t limit = 1 << 20) { } // STLClearIfBig overload for deque, which is missing capacity(). template -inline void STLClearIfBig(std::deque *obj, size_t limit = 1 << 20) { +inline void STLClearIfBig(std::deque* obj, size_t limit = 1 << 20) { if (obj->size() >= limit) { STLClearObject(obj); } else { @@ -177,7 +177,7 @@ inline void STLClearIfBig(std::deque *obj, size_t limit = 1 << 20) { // subsequent clear operations cheap. Note that the default number of buckets is // 193 in the Gnu library implementation as of Jan '08. template -inline void STLClearHashIfBig(T *obj, size_t limit) { +inline void STLClearHashIfBig(T* obj, size_t limit) { if (obj->bucket_count() >= limit) { T tmp; tmp.swap(*obj); @@ -191,7 +191,7 @@ inline void STLClearHashIfBig(T *obj, size_t limit) { // *shrink* the capacity in some cases, which is usually not what users want. // The behavior of this function is similar to that of vector::reserve() but for // string. -inline void STLStringReserveIfNeeded(std::string *s, size_t min_capacity) { +inline void STLStringReserveIfNeeded(std::string* s, size_t min_capacity) { if (min_capacity > s->capacity()) s->reserve(min_capacity); } @@ -200,7 +200,7 @@ inline void STLStringReserveIfNeeded(std::string *s, size_t min_capacity) { // '0' bytes. Typically used when code is then going to overwrite the backing // store of the string with known data. template -inline void STLStringResizeUninitialized(std::basic_string *s, +inline void STLStringResizeUninitialized(std::basic_string* s, size_t new_size) { absl::strings_internal::STLStringResizeUninitialized(s, new_size); } @@ -212,7 +212,7 @@ inline void STLStringResizeUninitialized(std::basic_string *s, // the previous function.) template inline bool STLStringSupportsNontrashingResize( - const std::basic_string &s) { + const std::basic_string& s) { return absl::strings_internal::STLStringSupportsNontrashingResize(&s); } @@ -223,7 +223,7 @@ inline bool STLStringSupportsNontrashingResize( // Just use string::assign directly unless you have benchmarks showing that this // function makes your code faster. (Even then, a future version of // string::assign() may be faster than this.) -inline void STLAssignToString(std::string *str, const char *ptr, size_t n) { +inline void STLAssignToString(std::string* str, const char* ptr, size_t n) { STLStringResizeUninitialized(str, n); if (n == 0) return; memcpy(&*str->begin(), ptr, n); @@ -236,7 +236,7 @@ inline void STLAssignToString(std::string *str, const char *ptr, size_t n) { // Just use string::append directly unless you have benchmarks showing that this // function makes your code faster. (Even then, a future version of // string::append() may be faster than this.) -inline void STLAppendToString(std::string *str, const char *ptr, size_t n) { +inline void STLAppendToString(std::string* str, const char* ptr, size_t n) { if (n == 0) return; size_t old_size = str->size(); STLStringResizeUninitialized(str, old_size + n); @@ -257,7 +257,7 @@ inline void STLAppendToString(std::string *str, const char *ptr, size_t n) { // contiguous is officially part of the C++11 standard [string.require]/5. // According to Matt Austern, this should already work on all current C++98 // implementations. -inline char *string_as_array(std::string *str) { +inline char* string_as_array(std::string* str) { // DO NOT USE const_cast(str->data())! See the unittest for why. return str->empty() ? nullptr : &*str->begin(); } @@ -267,7 +267,7 @@ inline char *string_as_array(std::string *str) { // because it compares the internal hash tables which may be different if the // order of insertions and deletions differed. template -inline bool HashSetEquality(const HashSet &set_a, const HashSet &set_b) { +inline bool HashSetEquality(const HashSet& set_a, const HashSet& set_b) { if (set_a.size() != set_b.size()) return false; for (typename HashSet::const_iterator i = set_a.begin(); i != set_a.end(); ++i) @@ -279,7 +279,7 @@ inline bool HashSetEquality(const HashSet &set_a, const HashSet &set_b) { // multimap and hash_multimap will result in wrong behavior. template -inline bool HashMapEquality(const HashMap &map_a, const HashMap &map_b, +inline bool HashMapEquality(const HashMap& map_a, const HashMap& map_b, BinaryPredicate mapped_type_equal) { if (map_a.size() != map_b.size()) return false; for (typename HashMap::const_iterator i = map_a.begin(); i != map_a.end(); @@ -294,13 +294,13 @@ inline bool HashMapEquality(const HashMap &map_a, const HashMap &map_b, // We overload for 'map' without a specialized functor and simply use its // operator== function. template -inline bool HashMapEquality(const std::map &map_a, - const std::map &map_b) { +inline bool HashMapEquality(const std::map& map_a, + const std::map& map_b) { return map_a == map_b; } template -inline bool HashMapEquality(const HashMap &a, const HashMap &b) { +inline bool HashMapEquality(const HashMap& a, const HashMap& b) { using Mapped = typename HashMap::mapped_type; return HashMapEquality(a, b, std::equal_to()); } @@ -369,7 +369,7 @@ void STLDeleteContainerPairSecondPointers(ForwardIterator begin, // ElementDeleter (defined below), which ensures that your container's elements // are deleted when the ElementDeleter goes out of scope. template -void STLDeleteElements(T *container) { +void STLDeleteElements(T* container) { if (!container) return; STLDeleteContainerPointers(container->begin(), container->end()); container->clear(); @@ -379,7 +379,7 @@ void STLDeleteElements(T *container) { // deletes all the "value" components and clears the container. Does nothing in // the case it's given a nullptr. template -void STLDeleteValues(T *v) { +void STLDeleteValues(T* v) { if (!v) return; (STLDeleteContainerPairSecondPointers)(v->begin(), v->end()); v->clear(); @@ -393,8 +393,8 @@ void STLDeleteValues(T *v) { class BaseDeleter { public: virtual ~BaseDeleter() {} - BaseDeleter(const BaseDeleter &) = delete; - void operator=(const BaseDeleter &) = delete; + BaseDeleter(const BaseDeleter&) = delete; + void operator=(const BaseDeleter&) = delete; protected: BaseDeleter() {} @@ -407,15 +407,15 @@ class BaseDeleter { template class TemplatedElementDeleter : public BaseDeleter { public: - explicit TemplatedElementDeleter(STLContainer *ptr) : container_ptr_(ptr) {} + explicit TemplatedElementDeleter(STLContainer* ptr) : container_ptr_(ptr) {} virtual ~TemplatedElementDeleter() { STLDeleteElements(container_ptr_); } - TemplatedElementDeleter(const TemplatedElementDeleter &) = delete; - void operator=(const TemplatedElementDeleter &) = delete; + TemplatedElementDeleter(const TemplatedElementDeleter&) = delete; + void operator=(const TemplatedElementDeleter&) = delete; private: - STLContainer *container_ptr_; + STLContainer* container_ptr_; }; // ElementDeleter is an RAII (go/raii) object that deletes the elements in the @@ -434,16 +434,16 @@ class TemplatedElementDeleter : public BaseDeleter { class ElementDeleter { public: template - explicit ElementDeleter(STLContainer *ptr) + explicit ElementDeleter(STLContainer* ptr) : deleter_(new TemplatedElementDeleter(ptr)) {} ~ElementDeleter() { delete deleter_; } - ElementDeleter(const ElementDeleter &) = delete; - void operator=(const ElementDeleter &) = delete; + ElementDeleter(const ElementDeleter&) = delete; + void operator=(const ElementDeleter&) = delete; private: - BaseDeleter *deleter_; + BaseDeleter* deleter_; }; // Given a pointer to an STL container this class will delete all the value @@ -453,15 +453,15 @@ class ElementDeleter { template class TemplatedValueDeleter : public BaseDeleter { public: - explicit TemplatedValueDeleter(STLContainer *ptr) : container_ptr_(ptr) {} + explicit TemplatedValueDeleter(STLContainer* ptr) : container_ptr_(ptr) {} virtual ~TemplatedValueDeleter() { STLDeleteValues(container_ptr_); } - TemplatedValueDeleter(const TemplatedValueDeleter &) = delete; - void operator=(const TemplatedValueDeleter &) = delete; + TemplatedValueDeleter(const TemplatedValueDeleter&) = delete; + void operator=(const TemplatedValueDeleter&) = delete; private: - STLContainer *container_ptr_; + STLContainer* container_ptr_; }; // ValueDeleter is an RAII (go/raii) object that deletes the 'second' member in @@ -476,16 +476,16 @@ class TemplatedValueDeleter : public BaseDeleter { class ValueDeleter { public: template - explicit ValueDeleter(STLContainer *ptr) + explicit ValueDeleter(STLContainer* ptr) : deleter_(new TemplatedValueDeleter(ptr)) {} ~ValueDeleter() { delete deleter_; } - ValueDeleter(const ValueDeleter &) = delete; - void operator=(const ValueDeleter &) = delete; + ValueDeleter(const ValueDeleter&) = delete; + void operator=(const ValueDeleter&) = delete; private: - BaseDeleter *deleter_; + BaseDeleter* deleter_; }; // RAII (go/raii) object that deletes elements in the given container when it @@ -496,11 +496,11 @@ class ValueDeleter { template class STLElementDeleter { public: - STLElementDeleter(STLContainer *ptr) : container_ptr_(ptr) {} + STLElementDeleter(STLContainer* ptr) : container_ptr_(ptr) {} ~STLElementDeleter() { STLDeleteElements(container_ptr_); } private: - STLContainer *container_ptr_; + STLContainer* container_ptr_; }; // RAII (go/raii) object that deletes the values in the given container of @@ -511,11 +511,11 @@ class STLElementDeleter { template class STLValueDeleter { public: - STLValueDeleter(STLContainer *ptr) : container_ptr_(ptr) {} + STLValueDeleter(STLContainer* ptr) : container_ptr_(ptr) {} ~STLValueDeleter() { STLDeleteValues(container_ptr_); } private: - STLContainer *container_ptr_; + STLContainer* container_ptr_; }; // Sets the referenced pointer to nullptr and returns its original value. This @@ -530,9 +530,9 @@ class STLValueDeleter { // // v[1] is now nullptr and the Foo it previously pointed to is now // // stored in "safe" template -ABSL_MUST_USE_RESULT T *release_ptr(T **ptr) { +ABSL_MUST_USE_RESULT T* release_ptr(T** ptr) { assert(ptr); - T *tmp = *ptr; + T* tmp = *ptr; *ptr = nullptr; return tmp; } @@ -542,12 +542,12 @@ namespace stl_util_internal { // Like std::less, but allows heterogeneous arguments. struct TransparentLess { template - bool operator()(const T &a, const T &b) const { + bool operator()(const T& a, const T& b) const { // std::less is better than '<' here, because it can order pointers. return std::less()(a, b); } template - bool operator()(const T1 &a, const T2 &b) const { + bool operator()(const T1& a, const T2& b) const { return a < b; } }; @@ -560,12 +560,12 @@ template struct Unordered : std::false_type {}; template -struct Unordered > : std::true_type {}; +struct Unordered> : std::true_type {}; template struct Unordered, - absl::void_t > - : std::false_type {}; + absl::void_t> : std::false_type { +}; } // namespace stl_util_internal @@ -592,15 +592,15 @@ struct Unordered, // The form taking 4 arguments. All other forms call into this one. // Explicit comparator, append to output container. template -void STLSetDifference(const In1 &a, const In2 &b, Out *out, Compare compare) { +void STLSetDifference(const In1& a, const In2& b, Out* out, Compare compare) { static_assert(!gtl::stl_util_internal::Unordered::value, "In1 must be an ordered set"); static_assert(!gtl::stl_util_internal::Unordered::value, "In2 must be an ordered set"); assert(std::is_sorted(a.begin(), a.end(), compare)); assert(std::is_sorted(b.begin(), b.end(), compare)); - assert(static_cast(&a) != static_cast(out)); - assert(static_cast(&b) != static_cast(out)); + assert(static_cast(&a) != static_cast(out)); + assert(static_cast(&b) != static_cast(out)); std::set_difference(a.begin(), a.end(), b.begin(), b.end(), std::inserter(*out, out->end()), compare); } @@ -610,34 +610,34 @@ void STLSetDifference(const In1 &a, const In2 &b, Out *out, Compare compare) { // the 3-argument overload that treats the third argument as a comparator. template typename std::enable_if::value, void>::type -STLSetDifference(const In1 &a, const In2 &b, Out *out) { +STLSetDifference(const In1& a, const In2& b, Out* out) { STLSetDifference(a, b, out, gtl::stl_util_internal::TransparentLess()); } // Explicit comparator, explicit return type. template -Out STLSetDifferenceAs(const In1 &a, const In2 &b, Compare compare) { +Out STLSetDifferenceAs(const In1& a, const In2& b, Compare compare) { Out out; STLSetDifference(a, b, &out, compare); return out; } // Implicit comparator, explicit return type. template -Out STLSetDifferenceAs(const In1 &a, const In2 &b) { +Out STLSetDifferenceAs(const In1& a, const In2& b) { return STLSetDifferenceAs(a, b, gtl::stl_util_internal::TransparentLess()); } // Explicit comparator, implicit return type. template -In1 STLSetDifference(const In1 &a, const In2 &b, Compare compare) { +In1 STLSetDifference(const In1& a, const In2& b, Compare compare) { return STLSetDifferenceAs(a, b, compare); } // Implicit comparator, implicit return type. template -In1 STLSetDifference(const In1 &a, const In2 &b) { +In1 STLSetDifference(const In1& a, const In2& b) { return STLSetDifference(a, b, gtl::stl_util_internal::TransparentLess()); } template -In1 STLSetDifference(const In1 &a, const In1 &b) { +In1 STLSetDifference(const In1& a, const In1& b) { return STLSetDifference(a, b, gtl::stl_util_internal::TransparentLess()); } @@ -656,15 +656,15 @@ In1 STLSetDifference(const In1 &a, const In1 &b) { // // See std::set_union() for how set union is computed. template -void STLSetUnion(const In1 &a, const In2 &b, Out *out, Compare compare) { +void STLSetUnion(const In1& a, const In2& b, Out* out, Compare compare) { static_assert(!gtl::stl_util_internal::Unordered::value, "In1 must be an ordered set"); static_assert(!gtl::stl_util_internal::Unordered::value, "In2 must be an ordered set"); assert(std::is_sorted(a.begin(), a.end(), compare)); assert(std::is_sorted(b.begin(), b.end(), compare)); - assert(static_cast(&a) != static_cast(out)); - assert(static_cast(&b) != static_cast(out)); + assert(static_cast(&a) != static_cast(out)); + assert(static_cast(&b) != static_cast(out)); std::set_union(a.begin(), a.end(), b.begin(), b.end(), std::inserter(*out, out->end()), compare); } @@ -673,29 +673,29 @@ void STLSetUnion(const In1 &a, const In2 &b, Out *out, Compare compare) { // the 3-argument overload that treats the third argument as a comparator. template typename std::enable_if::value, void>::type STLSetUnion( - const In1 &a, const In2 &b, Out *out) { + const In1& a, const In2& b, Out* out) { return STLSetUnion(a, b, out, gtl::stl_util_internal::TransparentLess()); } template -Out STLSetUnionAs(const In1 &a, const In2 &b, Compare compare) { +Out STLSetUnionAs(const In1& a, const In2& b, Compare compare) { Out out; STLSetUnion(a, b, &out, compare); return out; } template -Out STLSetUnionAs(const In1 &a, const In2 &b) { +Out STLSetUnionAs(const In1& a, const In2& b) { return STLSetUnionAs(a, b, gtl::stl_util_internal::TransparentLess()); } template -In1 STLSetUnion(const In1 &a, const In2 &b, Compare compare) { +In1 STLSetUnion(const In1& a, const In2& b, Compare compare) { return STLSetUnionAs(a, b, compare); } template -In1 STLSetUnion(const In1 &a, const In2 &b) { +In1 STLSetUnion(const In1& a, const In2& b) { return STLSetUnion(a, b, gtl::stl_util_internal::TransparentLess()); } template -In1 STLSetUnion(const In1 &a, const In1 &b) { +In1 STLSetUnion(const In1& a, const In1& b) { return STLSetUnion(a, b, gtl::stl_util_internal::TransparentLess()); } @@ -715,7 +715,7 @@ In1 STLSetUnion(const In1 &a, const In1 &b) { // // See std::set_symmetric_difference() for how these elements are selected. template -void STLSetSymmetricDifference(const In1 &a, const In2 &b, Out *out, +void STLSetSymmetricDifference(const In1& a, const In2& b, Out* out, Compare compare) { static_assert(!gtl::stl_util_internal::Unordered::value, "In1 must be an ordered set"); @@ -723,8 +723,8 @@ void STLSetSymmetricDifference(const In1 &a, const In2 &b, Out *out, "In2 must be an ordered set"); assert(std::is_sorted(a.begin(), a.end(), compare)); assert(std::is_sorted(b.begin(), b.end(), compare)); - assert(static_cast(&a) != static_cast(out)); - assert(static_cast(&b) != static_cast(out)); + assert(static_cast(&a) != static_cast(out)); + assert(static_cast(&b) != static_cast(out)); std::set_symmetric_difference(a.begin(), a.end(), b.begin(), b.end(), std::inserter(*out, out->end()), compare); } @@ -733,32 +733,32 @@ void STLSetSymmetricDifference(const In1 &a, const In2 &b, Out *out, // the 3-argument overload that treats the third argument as a comparator. template typename std::enable_if::value, void>::type -STLSetSymmetricDifference(const In1 &a, const In2 &b, Out *out) { +STLSetSymmetricDifference(const In1& a, const In2& b, Out* out) { return STLSetSymmetricDifference(a, b, out, gtl::stl_util_internal::TransparentLess()); } template -Out STLSetSymmetricDifferenceAs(const In1 &a, const In2 &b, Compare comp) { +Out STLSetSymmetricDifferenceAs(const In1& a, const In2& b, Compare comp) { Out out; STLSetSymmetricDifference(a, b, &out, comp); return out; } template -Out STLSetSymmetricDifferenceAs(const In1 &a, const In2 &b) { +Out STLSetSymmetricDifferenceAs(const In1& a, const In2& b) { return STLSetSymmetricDifferenceAs( a, b, gtl::stl_util_internal::TransparentLess()); } template -In1 STLSetSymmetricDifference(const In1 &a, const In2 &b, Compare comp) { +In1 STLSetSymmetricDifference(const In1& a, const In2& b, Compare comp) { return STLSetSymmetricDifferenceAs(a, b, comp); } template -In1 STLSetSymmetricDifference(const In1 &a, const In2 &b) { +In1 STLSetSymmetricDifference(const In1& a, const In2& b) { return STLSetSymmetricDifference(a, b, gtl::stl_util_internal::TransparentLess()); } template -In1 STLSetSymmetricDifference(const In1 &a, const In1 &b) { +In1 STLSetSymmetricDifference(const In1& a, const In1& b) { return STLSetSymmetricDifference(a, b, gtl::stl_util_internal::TransparentLess()); } @@ -778,15 +778,15 @@ In1 STLSetSymmetricDifference(const In1 &a, const In1 &b) { // // See std::set_intersection() for how set intersection is computed. template -void STLSetIntersection(const In1 &a, const In2 &b, Out *out, Compare compare) { +void STLSetIntersection(const In1& a, const In2& b, Out* out, Compare compare) { static_assert(!gtl::stl_util_internal::Unordered::value, "In1 must be an ordered set"); static_assert(!gtl::stl_util_internal::Unordered::value, "In2 must be an ordered set"); assert(std::is_sorted(a.begin(), a.end(), compare)); assert(std::is_sorted(b.begin(), b.end(), compare)); - assert(static_cast(&a) != static_cast(out)); - assert(static_cast(&b) != static_cast(out)); + assert(static_cast(&a) != static_cast(out)); + assert(static_cast(&b) != static_cast(out)); std::set_intersection(a.begin(), a.end(), b.begin(), b.end(), std::inserter(*out, out->end()), compare); } @@ -795,38 +795,38 @@ void STLSetIntersection(const In1 &a, const In2 &b, Out *out, Compare compare) { // the 3-argument overload that treats the third argument as a comparator. template typename std::enable_if::value, void>::type -STLSetIntersection(const In1 &a, const In2 &b, Out *out) { +STLSetIntersection(const In1& a, const In2& b, Out* out) { return STLSetIntersection(a, b, out, gtl::stl_util_internal::TransparentLess()); } template -Out STLSetIntersectionAs(const In1 &a, const In2 &b, Compare compare) { +Out STLSetIntersectionAs(const In1& a, const In2& b, Compare compare) { Out out; STLSetIntersection(a, b, &out, compare); return out; } template -Out STLSetIntersectionAs(const In1 &a, const In2 &b) { +Out STLSetIntersectionAs(const In1& a, const In2& b) { return STLSetIntersectionAs(a, b, gtl::stl_util_internal::TransparentLess()); } template -In1 STLSetIntersection(const In1 &a, const In2 &b, Compare compare) { +In1 STLSetIntersection(const In1& a, const In2& b, Compare compare) { return STLSetIntersectionAs(a, b, compare); } template -In1 STLSetIntersection(const In1 &a, const In2 &b) { +In1 STLSetIntersection(const In1& a, const In2& b) { return STLSetIntersection(a, b, gtl::stl_util_internal::TransparentLess()); } template -In1 STLSetIntersection(const In1 &a, const In1 &b) { +In1 STLSetIntersection(const In1& a, const In1& b) { return STLSetIntersection(a, b, gtl::stl_util_internal::TransparentLess()); } // Returns true iff every element in "b" is also in "a". Both containers // must be sorted by the specified comparator, or by '<' if none is given. template -bool STLIncludes(const In1 &a, const In2 &b, Compare compare) { +bool STLIncludes(const In1& a, const In2& b, Compare compare) { static_assert(!gtl::stl_util_internal::Unordered::value, "In1 must be an ordered set"); static_assert(!gtl::stl_util_internal::Unordered::value, @@ -836,7 +836,7 @@ bool STLIncludes(const In1 &a, const In2 &b, Compare compare) { return std::includes(a.begin(), a.end(), b.begin(), b.end(), compare); } template -bool STLIncludes(const In1 &a, const In2 &b) { +bool STLIncludes(const In1& a, const In2& b) { return STLIncludes(a, b, gtl::stl_util_internal::TransparentLess()); } @@ -883,13 +883,13 @@ bool SortedRangesHaveIntersection(InputIterator1 begin1, InputIterator1 end1, // elements must be sorted either by the specified comparator, or by '<' if no // comparator is given. template -bool SortedContainersHaveIntersection(const In1 &in1, const In2 &in2, +bool SortedContainersHaveIntersection(const In1& in1, const In2& in2, Comp comparator) { return SortedRangesHaveIntersection(in1.begin(), in1.end(), in2.begin(), in2.end(), comparator); } template -bool SortedContainersHaveIntersection(const In1 &in1, const In2 &in2) { +bool SortedContainersHaveIntersection(const In1& in1, const In2& in2) { return SortedContainersHaveIntersection( in1, in2, gtl::stl_util_internal::TransparentLess()); } @@ -906,7 +906,7 @@ bool SortedContainersHaveIntersection(const In1 &in1, const In2 &in2) { // v.push_back("hi"); // LOG(INFO) << "Bytes allocated " << bytes; // -template > +template > class STLCountingAllocator : public Alloc { public: using Base = Alloc; @@ -914,11 +914,11 @@ class STLCountingAllocator : public Alloc { using size_type = typename Alloc::size_type; STLCountingAllocator() : bytes_used_(nullptr) {} - explicit STLCountingAllocator(int64 *b) : bytes_used_(b) {} + explicit STLCountingAllocator(int64* b) : bytes_used_(b) {} // Constructor used for rebinding template - STLCountingAllocator(const STLCountingAllocator &x) + STLCountingAllocator(const STLCountingAllocator& x) : Alloc(x), bytes_used_(x.bytes_used()) {} pointer allocate(size_type n, @@ -943,21 +943,21 @@ class STLCountingAllocator : public Alloc { using other = STLCountingAllocator; }; - int64 *bytes_used() const { return bytes_used_; } + int64* bytes_used() const { return bytes_used_; } private: - int64 *bytes_used_; + int64* bytes_used_; }; template class STLCountingAllocator : public A { public: STLCountingAllocator() : bytes_used_(nullptr) {} - explicit STLCountingAllocator(int64 *b) : bytes_used_(b) {} + explicit STLCountingAllocator(int64* b) : bytes_used_(b) {} // Constructor used for rebinding template - STLCountingAllocator(const STLCountingAllocator &x) + STLCountingAllocator(const STLCountingAllocator& x) : A(x), bytes_used_(x.bytes_used()) {} template @@ -967,23 +967,23 @@ class STLCountingAllocator : public A { public: using other = STLCountingAllocator; }; - int64 *bytes_used() const { return bytes_used_; } + int64* bytes_used() const { return bytes_used_; } private: - int64 *bytes_used_; + int64* bytes_used_; }; template -bool operator==(const STLCountingAllocator &a, - const STLCountingAllocator &b) { +bool operator==(const STLCountingAllocator& a, + const STLCountingAllocator& b) { using Base = typename STLCountingAllocator::Base; - return static_cast(a) == static_cast(b) && + return static_cast(a) == static_cast(b) && a.bytes_used() == b.bytes_used(); } template -bool operator!=(const STLCountingAllocator &a, - const STLCountingAllocator &b) { +bool operator!=(const STLCountingAllocator& a, + const STLCountingAllocator& b) { return !(a == b); } diff --git a/ortools/base/sysinfo.cc b/ortools/base/sysinfo.cc index 4a73bb8988..786297e13f 100644 --- a/ortools/base/sysinfo.cc +++ b/ortools/base/sysinfo.cc @@ -21,7 +21,7 @@ #include #include #elif defined(_MSC_VER) // WINDOWS - // clang-format off +// clang-format off #include #include // clang-format on @@ -52,21 +52,21 @@ int64 GetProcessMemoryUsage() { unsigned size = 0; char buf[30]; snprintf(buf, sizeof(buf), "/proc/%u/statm", (unsigned)getpid()); - FILE *const pf = fopen(buf, "r"); + FILE* const pf = fopen(buf, "r"); if (pf) { if (fscanf(pf, "%u", &size) != 1) return 0; } fclose(pf); return size * int64{1024}; } -#elif defined(__FreeBSD__) // FreeBSD +#elif defined(__FreeBSD__) // FreeBSD int64 GetProcessMemoryUsage() { int who = RUSAGE_SELF; struct rusage rusage; getrusage(who, &rusage); return (int64)(rusage.ru_maxrss * int64{1024}); } -#elif defined(_MSC_VER) // WINDOWS +#elif defined(_MSC_VER) // WINDOWS int64 GetProcessMemoryUsage() { HANDLE hProcess; PROCESS_MEMORY_COUNTERS pmc; @@ -81,7 +81,7 @@ int64 GetProcessMemoryUsage() { } return memory; } -#else // Unknown, returning 0. +#else // Unknown, returning 0. int64 GetProcessMemoryUsage() { return 0; } #endif diff --git a/ortools/base/threadpool.cc b/ortools/base/threadpool.cc index 1ad2032c68..506d601dfc 100644 --- a/ortools/base/threadpool.cc +++ b/ortools/base/threadpool.cc @@ -16,8 +16,8 @@ #include "ortools/base/logging.h" namespace operations_research { -void RunWorker(void *data) { - ThreadPool *const thread_pool = reinterpret_cast(data); +void RunWorker(void* data) { + ThreadPool* const thread_pool = reinterpret_cast(data); std::function work = thread_pool->GetNextTask(); while (work != NULL) { work(); @@ -25,7 +25,7 @@ void RunWorker(void *data) { } } -ThreadPool::ThreadPool(const std::string &prefix, int num_workers) +ThreadPool::ThreadPool(const std::string& prefix, int num_workers) : num_workers_(num_workers) {} ThreadPool::~ThreadPool() { diff --git a/ortools/base/timer.cc b/ortools/base/timer.cc index ae0d2db761..46f878db1f 100644 --- a/ortools/base/timer.cc +++ b/ortools/base/timer.cc @@ -13,7 +13,7 @@ #include "ortools/base/timer.h" -ScopedWallTime::ScopedWallTime(double *aggregate_time) +ScopedWallTime::ScopedWallTime(double* aggregate_time) : aggregate_time_(aggregate_time), timer_() { DCHECK(aggregate_time != NULL); timer_.Start(); diff --git a/ortools/bop/bop_base.cc b/ortools/bop/bop_base.cc index e7268dfa6e..4be2880f60 100644 --- a/ortools/bop/bop_base.cc +++ b/ortools/bop/bop_base.cc @@ -26,7 +26,7 @@ namespace bop { using ::operations_research::sat::LinearBooleanProblem; using ::operations_research::sat::LinearObjective; -BopOptimizerBase::BopOptimizerBase(const std::string &name) +BopOptimizerBase::BopOptimizerBase(const std::string& name) : name_(name), stats_(name) { SCOPED_TIME_STAT(&stats_); } @@ -63,7 +63,7 @@ std::string BopOptimizerBase::GetStatusString(Status status) { //------------------------------------------------------------------------------ const int64 ProblemState::kInitialStampValue(0); -ProblemState::ProblemState(const LinearBooleanProblem &problem) +ProblemState::ProblemState(const LinearBooleanProblem& problem) : original_problem_(problem), parameters_(), update_stamp_(kInitialStampValue + 1), @@ -76,7 +76,7 @@ ProblemState::ProblemState(const LinearBooleanProblem &problem) upper_bound_(kint64max) { // TODO(user): Extract to a function used by all solvers. // Compute trivial unscaled lower bound. - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); lower_bound_ = 0; for (int i = 0; i < objective.coefficients_size(); ++i) { // Fix template version for or-tools. @@ -88,7 +88,7 @@ ProblemState::ProblemState(const LinearBooleanProblem &problem) // TODO(user): refactor this to not rely on the optimization status. // All the information can be encoded in the learned_info bounds. bool ProblemState::MergeLearnedInfo( - const LearnedInfo &learned_info, + const LearnedInfo& learned_info, BopOptimizerBase::Status optimization_status) { const std::string kIndent(25, ' '); @@ -244,7 +244,7 @@ void ProblemState::MarkAsInfeasible() { ++update_stamp_; } -const std::vector &ProblemState::NewlyAddedBinaryClauses() +const std::vector& ProblemState::NewlyAddedBinaryClauses() const { return binary_clause_manager_.newly_added(); } diff --git a/ortools/bop/bop_fs.cc b/ortools/bop/bop_fs.cc index 04c7824330..641923e70b 100644 --- a/ortools/bop/bop_fs.cc +++ b/ortools/bop/bop_fs.cc @@ -40,7 +40,7 @@ using ::operations_research::glop::DenseRow; using ::operations_research::glop::GlopParameters; using ::operations_research::glop::RowIndex; -BopOptimizerBase::Status SolutionStatus(const BopSolution &solution, +BopOptimizerBase::Status SolutionStatus(const BopSolution& solution, int64 lower_bound) { // The lower bound might be greater that the cost of a feasible solution due // to rounding errors in the problem scaling and Glop. @@ -50,7 +50,7 @@ BopOptimizerBase::Status SolutionStatus(const BopSolution &solution, : BopOptimizerBase::LIMIT_REACHED; } -bool AllIntegralValues(const DenseRow &values, double tolerance) { +bool AllIntegralValues(const DenseRow& values, double tolerance) { for (const glop::Fractional value : values) { // Note that this test is correct because in this part of the code, Bop // only deals with boolean variables. @@ -61,7 +61,7 @@ bool AllIntegralValues(const DenseRow &values, double tolerance) { return true; } -void DenseRowToBopSolution(const DenseRow &values, BopSolution *solution) { +void DenseRowToBopSolution(const DenseRow& values, BopSolution* solution) { CHECK(solution != nullptr); CHECK_EQ(solution->Size(), values.size()); for (VariableIndex var(0); var < solution->Size(); ++var) { @@ -75,7 +75,7 @@ void DenseRowToBopSolution(const DenseRow &values, BopSolution *solution) { //------------------------------------------------------------------------------ GuidedSatFirstSolutionGenerator::GuidedSatFirstSolutionGenerator( - const std::string &name, Policy policy) + const std::string& name, Policy policy) : BopOptimizerBase(name), policy_(policy), abort_(false), @@ -85,7 +85,7 @@ GuidedSatFirstSolutionGenerator::GuidedSatFirstSolutionGenerator( GuidedSatFirstSolutionGenerator::~GuidedSatFirstSolutionGenerator() {} BopOptimizerBase::Status GuidedSatFirstSolutionGenerator::SynchronizeIfNeeded( - const ProblemState &problem_state) { + const ProblemState& problem_state) { if (state_update_stamp_ == problem_state.update_stamp()) { return BopOptimizerBase::CONTINUE; } @@ -98,7 +98,7 @@ BopOptimizerBase::Status GuidedSatFirstSolutionGenerator::SynchronizeIfNeeded( // Add in symmetries. if (problem_state.GetParameters() .exploit_symmetry_in_sat_first_solution()) { - std::vector > generators; + std::vector> generators; sat::FindLinearBooleanProblemSymmetries(problem_state.original_problem(), &generators); std::unique_ptr propagator( @@ -143,7 +143,7 @@ BopOptimizerBase::Status GuidedSatFirstSolutionGenerator::SynchronizeIfNeeded( } bool GuidedSatFirstSolutionGenerator::ShouldBeRun( - const ProblemState &problem_state) const { + const ProblemState& problem_state) const { if (abort_) return false; if (policy_ == Policy::kLpGuided && problem_state.lp_values().empty()) { return false; @@ -156,8 +156,8 @@ bool GuidedSatFirstSolutionGenerator::ShouldBeRun( } BopOptimizerBase::Status GuidedSatFirstSolutionGenerator::Optimize( - const BopParameters ¶meters, const ProblemState &problem_state, - LearnedInfo *learned_info, TimeLimit *time_limit) { + const BopParameters& parameters, const ProblemState& problem_state, + LearnedInfo* learned_info, TimeLimit* time_limit) { CHECK(learned_info != nullptr); CHECK(time_limit != nullptr); learned_info->Clear(); @@ -209,8 +209,8 @@ BopOptimizerBase::Status GuidedSatFirstSolutionGenerator::Optimize( // BopRandomFirstSolutionGenerator //------------------------------------------------------------------------------ BopRandomFirstSolutionGenerator::BopRandomFirstSolutionGenerator( - const std::string &name, const BopParameters ¶meters, - sat::SatSolver *sat_propagator, MTRandom *random) + const std::string& name, const BopParameters& parameters, + sat::SatSolver* sat_propagator, MTRandom* random) : BopOptimizerBase(name), random_(random), sat_propagator_(sat_propagator) {} @@ -219,20 +219,20 @@ BopRandomFirstSolutionGenerator::~BopRandomFirstSolutionGenerator() {} // Only run the RandomFirstSolution when there is an objective to minimize. bool BopRandomFirstSolutionGenerator::ShouldBeRun( - const ProblemState &problem_state) const { + const ProblemState& problem_state) const { return problem_state.original_problem().objective().literals_size() > 0; } BopOptimizerBase::Status BopRandomFirstSolutionGenerator::Optimize( - const BopParameters ¶meters, const ProblemState &problem_state, - LearnedInfo *learned_info, TimeLimit *time_limit) { + const BopParameters& parameters, const ProblemState& problem_state, + LearnedInfo* learned_info, TimeLimit* time_limit) { CHECK(learned_info != nullptr); CHECK(time_limit != nullptr); learned_info->Clear(); // Save the current solver heuristics. const sat::SatParameters saved_params = sat_propagator_->parameters(); - const std::vector > saved_prefs = + const std::vector> saved_prefs = sat_propagator_->AllPreferences(); const int kMaxNumConflicts = 10; @@ -335,8 +335,8 @@ BopOptimizerBase::Status BopRandomFirstSolutionGenerator::Optimize( //------------------------------------------------------------------------------ // LinearRelaxation //------------------------------------------------------------------------------ -LinearRelaxation::LinearRelaxation(const BopParameters ¶meters, - const std::string &name) +LinearRelaxation::LinearRelaxation(const BopParameters& parameters, + const std::string& name) : BopOptimizerBase(name), parameters_(parameters), state_update_stamp_(ProblemState::kInitialStampValue), @@ -353,7 +353,7 @@ LinearRelaxation::LinearRelaxation(const BopParameters ¶meters, LinearRelaxation::~LinearRelaxation() {} BopOptimizerBase::Status LinearRelaxation::SynchronizeIfNeeded( - const ProblemState &problem_state) { + const ProblemState& problem_state) { if (state_update_stamp_ == problem_state.update_stamp()) { return BopOptimizerBase::CONTINUE; } @@ -397,7 +397,7 @@ BopOptimizerBase::Status LinearRelaxation::SynchronizeIfNeeded( // Add learned binary clauses. if (parameters_.use_learned_binary_clauses_in_lp()) { - for (const sat::BinaryClause &clause : + for (const sat::BinaryClause& clause : problem_state.NewlyAddedBinaryClauses()) { const RowIndex constraint_index = lp_model_.CreateNewConstraint(); const int64 coefficient_a = clause.a.IsPositive() ? 1 : -1; @@ -434,14 +434,14 @@ BopOptimizerBase::Status LinearRelaxation::SynchronizeIfNeeded( // `BopParameters.max_lp_solve_for_feasibility_problems` to a non-zero value // (a negative value means no limit). // TODO(user): also deal with problem_already_solved_ -bool LinearRelaxation::ShouldBeRun(const ProblemState &problem_state) const { +bool LinearRelaxation::ShouldBeRun(const ProblemState& problem_state) const { return problem_state.original_problem().objective().literals_size() > 0 || parameters_.max_lp_solve_for_feasibility_problems() != 0; } BopOptimizerBase::Status LinearRelaxation::Optimize( - const BopParameters ¶meters, const ProblemState &problem_state, - LearnedInfo *learned_info, TimeLimit *time_limit) { + const BopParameters& parameters, const ProblemState& problem_state, + LearnedInfo* learned_info, TimeLimit* time_limit) { CHECK(learned_info != nullptr); CHECK(time_limit != nullptr); learned_info->Clear(); @@ -511,7 +511,7 @@ BopOptimizerBase::Status LinearRelaxation::Optimize( // can be used when a feasible solution is known, or when the false // best bound is computed. glop::ProblemStatus LinearRelaxation::Solve(bool incremental_solve, - TimeLimit *time_limit) { + TimeLimit* time_limit) { GlopParameters glop_params; if (incremental_solve) { glop_params.set_use_dual_simplex(true); @@ -527,7 +527,7 @@ glop::ProblemStatus LinearRelaxation::Solve(bool incremental_solve, } double LinearRelaxation::ComputeLowerBoundUsingStrongBranching( - LearnedInfo *learned_info, TimeLimit *time_limit) { + LearnedInfo* learned_info, TimeLimit* time_limit) { const glop::DenseRow initial_lp_values = lp_solver_.variable_values(); const double tolerance = lp_solver_.GetParameters().primal_feasibility_tolerance(); diff --git a/ortools/bop/bop_lns.cc b/ortools/bop/bop_lns.cc index 9b33e12739..72dbc2b521 100644 --- a/ortools/bop/bop_lns.cc +++ b/ortools/bop/bop_lns.cc @@ -44,8 +44,8 @@ using ::operations_research::sat::LinearBooleanProblem; //------------------------------------------------------------------------------ namespace { -void UseBopSolutionForSatAssignmentPreference(const BopSolution &solution, - sat::SatSolver *solver) { +void UseBopSolutionForSatAssignmentPreference(const BopSolution& solution, + sat::SatSolver* solver) { for (int i = 0; i < solution.Size(); ++i) { solver->SetAssignmentPreference( sat::Literal(sat::BooleanVariable(i), solution.Value(VariableIndex(i))), @@ -55,7 +55,7 @@ void UseBopSolutionForSatAssignmentPreference(const BopSolution &solution, } // namespace BopCompleteLNSOptimizer::BopCompleteLNSOptimizer( - const std::string &name, const BopConstraintTerms &objective_terms) + const std::string& name, const BopConstraintTerms& objective_terms) : BopOptimizerBase(name), state_update_stamp_(ProblemState::kInitialStampValue), objective_terms_(objective_terms) {} @@ -63,7 +63,7 @@ BopCompleteLNSOptimizer::BopCompleteLNSOptimizer( BopCompleteLNSOptimizer::~BopCompleteLNSOptimizer() {} BopOptimizerBase::Status BopCompleteLNSOptimizer::SynchronizeIfNeeded( - const ProblemState &problem_state, int num_relaxed_vars) { + const ProblemState& problem_state, int num_relaxed_vars) { if (state_update_stamp_ == problem_state.update_stamp()) { return BopOptimizerBase::CONTINUE; } @@ -107,13 +107,13 @@ BopOptimizerBase::Status BopCompleteLNSOptimizer::SynchronizeIfNeeded( } bool BopCompleteLNSOptimizer::ShouldBeRun( - const ProblemState &problem_state) const { + const ProblemState& problem_state) const { return problem_state.solution().IsFeasible(); } BopOptimizerBase::Status BopCompleteLNSOptimizer::Optimize( - const BopParameters ¶meters, const ProblemState &problem_state, - LearnedInfo *learned_info, TimeLimit *time_limit) { + const BopParameters& parameters, const ProblemState& problem_state, + LearnedInfo* learned_info, TimeLimit* time_limit) { SCOPED_TIME_STAT(&stats_); CHECK(learned_info != nullptr); CHECK(time_limit != nullptr); @@ -165,15 +165,15 @@ BopOptimizerBase::Status BopCompleteLNSOptimizer::Optimize( namespace { // Returns false if the limit is reached while solving the LP. bool UseLinearRelaxationForSatAssignmentPreference( - const BopParameters ¶meters, const LinearBooleanProblem &problem, - sat::SatSolver *sat_solver, TimeLimit *time_limit) { + const BopParameters& parameters, const LinearBooleanProblem& problem, + sat::SatSolver* sat_solver, TimeLimit* time_limit) { // TODO(user): Re-use the lp_model and lp_solver or build a model with only // needed constraints and variables. glop::LinearProgram lp_model; sat::ConvertBooleanProblemToLinearProgram(problem, &lp_model); // Set bounds of variables fixed by the sat_solver. - const sat::Trail &propagation_trail = sat_solver->LiteralTrail(); + const sat::Trail& propagation_trail = sat_solver->LiteralTrail(); for (int trail_index = 0; trail_index < propagation_trail.Index(); ++trail_index) { const sat::Literal fixed_literal = propagation_trail[trail_index]; @@ -211,9 +211,9 @@ bool UseLinearRelaxationForSatAssignmentPreference( // increased anyway. Maybe a better appproach is to start by relaxing something // like 10 variables instead of having a fixed percentage. BopAdaptiveLNSOptimizer::BopAdaptiveLNSOptimizer( - const std::string &name, bool use_lp_to_guide_sat, - NeighborhoodGenerator *neighborhood_generator, - sat::SatSolver *sat_propagator) + const std::string& name, bool use_lp_to_guide_sat, + NeighborhoodGenerator* neighborhood_generator, + sat::SatSolver* sat_propagator) : BopOptimizerBase(name), use_lp_to_guide_sat_(use_lp_to_guide_sat), neighborhood_generator_(neighborhood_generator), @@ -225,13 +225,13 @@ BopAdaptiveLNSOptimizer::BopAdaptiveLNSOptimizer( BopAdaptiveLNSOptimizer::~BopAdaptiveLNSOptimizer() {} bool BopAdaptiveLNSOptimizer::ShouldBeRun( - const ProblemState &problem_state) const { + const ProblemState& problem_state) const { return problem_state.solution().IsFeasible(); } BopOptimizerBase::Status BopAdaptiveLNSOptimizer::Optimize( - const BopParameters ¶meters, const ProblemState &problem_state, - LearnedInfo *learned_info, TimeLimit *time_limit) { + const BopParameters& parameters, const ProblemState& problem_state, + LearnedInfo* learned_info, TimeLimit* time_limit) { SCOPED_TIME_STAT(&stats_); CHECK(learned_info != nullptr); CHECK(time_limit != nullptr); @@ -258,7 +258,7 @@ BopOptimizerBase::Status BopAdaptiveLNSOptimizer::Optimize( // difficulty of the problem. There is one "target" difficulty for each // different numbers in the Luby sequence. Note that the initial value is // reused from the last run. - const BopParameters &local_parameters = parameters; + const BopParameters& local_parameters = parameters; int num_tries = 0; // TODO(user): remove? our limit is 1 by default. while (!time_limit->LimitReached() && num_tries < local_parameters.num_random_lns_tries()) { @@ -347,7 +347,7 @@ BopOptimizerBase::Status BopAdaptiveLNSOptimizer::Optimize( sat_solver.SetParameters(params); // Starts by adding the unit clauses to fix the variables. - const LinearBooleanProblem &problem = problem_state.original_problem(); + const LinearBooleanProblem& problem = problem_state.original_problem(); sat_solver.SetNumVariables(problem.num_variables()); for (int i = 0; i < sat_propagator_->LiteralTrail().Index(); ++i) { CHECK(sat_solver.AddUnitClause(sat_propagator_->LiteralTrail()[i])); @@ -410,11 +410,11 @@ BopOptimizerBase::Status BopAdaptiveLNSOptimizer::Optimize( namespace { std::vector ObjectiveVariablesAssignedToTheirLowCostValue( - const ProblemState &problem_state, - const BopConstraintTerms &objective_terms) { + const ProblemState& problem_state, + const BopConstraintTerms& objective_terms) { std::vector result; DCHECK(problem_state.solution().IsFeasible()); - for (const BopConstraintTerm &term : objective_terms) { + for (const BopConstraintTerm& term : objective_terms) { if (((problem_state.solution().Value(term.var_id) && term.weight < 0) || (!problem_state.solution().Value(term.var_id) && term.weight > 0))) { result.push_back( @@ -428,8 +428,8 @@ std::vector ObjectiveVariablesAssignedToTheirLowCostValue( } // namespace void ObjectiveBasedNeighborhood::GenerateNeighborhood( - const ProblemState &problem_state, double difficulty, - sat::SatSolver *sat_propagator) { + const ProblemState& problem_state, double difficulty, + sat::SatSolver* sat_propagator) { // Generate the set of variable we may fix and randomize their order. std::vector candidates = ObjectiveVariablesAssignedToTheirLowCostValue(problem_state, @@ -457,10 +457,10 @@ void ObjectiveBasedNeighborhood::GenerateNeighborhood( } void ConstraintBasedNeighborhood::GenerateNeighborhood( - const ProblemState &problem_state, double difficulty, - sat::SatSolver *sat_propagator) { + const ProblemState& problem_state, double difficulty, + sat::SatSolver* sat_propagator) { // Randomize the set of constraint - const LinearBooleanProblem &problem = problem_state.original_problem(); + const LinearBooleanProblem& problem = problem_state.original_problem(); const int num_constraints = problem.constraints_size(); std::vector ct_ids(num_constraints, 0); for (int ct_id = 0; ct_id < num_constraints; ++ct_id) ct_ids[ct_id] = ct_id; @@ -474,7 +474,7 @@ void ConstraintBasedNeighborhood::GenerateNeighborhood( std::vector variable_is_relaxed(problem.num_variables(), false); for (int i = 0; i < ct_ids.size(); ++i) { if (num_relaxed >= target) break; - const LinearBooleanConstraint &constraint = problem.constraints(ct_ids[i]); + const LinearBooleanConstraint& constraint = problem.constraints(ct_ids[i]); // We exclude really large constraints since they are probably note helpful // in picking a nice neighborhood. @@ -506,7 +506,7 @@ void ConstraintBasedNeighborhood::GenerateNeighborhood( } RelationGraphBasedNeighborhood::RelationGraphBasedNeighborhood( - const LinearBooleanProblem &problem, MTRandom *random) + const LinearBooleanProblem& problem, MTRandom* random) : random_(random) { const int num_variables = problem.num_variables(); columns_.resize(num_variables); @@ -520,7 +520,7 @@ RelationGraphBasedNeighborhood::RelationGraphBasedNeighborhood( // them. const double kSizeThreshold = 0.1; for (int i = 0; i < problem.constraints_size(); ++i) { - const LinearBooleanConstraint &constraint = problem.constraints(i); + const LinearBooleanConstraint& constraint = problem.constraints(i); if (constraint.literals_size() > kSizeThreshold * num_variables) continue; for (int j = 0; j < constraint.literals_size(); ++j) { const sat::Literal literal(constraint.literals(j)); @@ -531,8 +531,8 @@ RelationGraphBasedNeighborhood::RelationGraphBasedNeighborhood( } void RelationGraphBasedNeighborhood::GenerateNeighborhood( - const ProblemState &problem_state, double difficulty, - sat::SatSolver *sat_propagator) { + const ProblemState& problem_state, double difficulty, + sat::SatSolver* sat_propagator) { // Simply walk the graph until enough variable are relaxed. const int num_variables = sat_propagator->NumVariables(); const int target = round(difficulty * num_variables); @@ -549,7 +549,7 @@ void RelationGraphBasedNeighborhood::GenerateNeighborhood( const int var = queue.front(); queue.pop_front(); for (ConstraintIndex ct_index : columns_[VariableIndex(var)]) { - const LinearBooleanConstraint &constraint = + const LinearBooleanConstraint& constraint = problem_state.original_problem().constraints(ct_index.value()); for (int i = 0; i < constraint.literals_size(); ++i) { const sat::Literal literal(constraint.literals(i)); diff --git a/ortools/bop/bop_ls.cc b/ortools/bop/bop_ls.cc index 85d5e4c5e8..8c52e20160 100644 --- a/ortools/bop/bop_ls.cc +++ b/ortools/bop/bop_ls.cc @@ -29,9 +29,9 @@ using ::operations_research::sat::LinearObjective; // LocalSearchOptimizer //------------------------------------------------------------------------------ -LocalSearchOptimizer::LocalSearchOptimizer(const std::string &name, +LocalSearchOptimizer::LocalSearchOptimizer(const std::string& name, int max_num_decisions, - sat::SatSolver *sat_propagator) + sat::SatSolver* sat_propagator) : BopOptimizerBase(name), state_update_stamp_(ProblemState::kInitialStampValue), max_num_decisions_(max_num_decisions), @@ -41,13 +41,13 @@ LocalSearchOptimizer::LocalSearchOptimizer(const std::string &name, LocalSearchOptimizer::~LocalSearchOptimizer() {} bool LocalSearchOptimizer::ShouldBeRun( - const ProblemState &problem_state) const { + const ProblemState& problem_state) const { return problem_state.solution().IsFeasible(); } BopOptimizerBase::Status LocalSearchOptimizer::Optimize( - const BopParameters ¶meters, const ProblemState &problem_state, - LearnedInfo *learned_info, TimeLimit *time_limit) { + const BopParameters& parameters, const ProblemState& problem_state, + LearnedInfo* learned_info, TimeLimit* time_limit) { CHECK(learned_info != nullptr); CHECK(time_limit != nullptr); learned_info->Clear(); @@ -177,7 +177,7 @@ template class BacktrackableIntegerSet; AssignmentAndConstraintFeasibilityMaintainer:: AssignmentAndConstraintFeasibilityMaintainer( - const LinearBooleanProblem &problem) + const LinearBooleanProblem& problem) : by_variable_matrix_(problem.num_variables()), constraint_lower_bounds_(), constraint_upper_bounds_(), @@ -187,7 +187,7 @@ AssignmentAndConstraintFeasibilityMaintainer:: flipped_var_trail_backtrack_levels_(), flipped_var_trail_() { // Add the objective constraint as the first constraint. - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); CHECK_EQ(objective.literals_size(), objective.coefficients_size()); for (int i = 0; i < objective.literals_size(); ++i) { CHECK_GT(objective.literals(i), 0); @@ -204,7 +204,7 @@ AssignmentAndConstraintFeasibilityMaintainer:: // Add each constraint. ConstraintIndex num_constraints_with_objective(1); - for (const LinearBooleanConstraint &constraint : problem.constraints()) { + for (const LinearBooleanConstraint& constraint : problem.constraints()) { if (constraint.literals_size() <= 2) { // Infeasible binary constraints are automatically repaired by propagation // (when possible). Then there are no needs to consider the binary @@ -240,7 +240,7 @@ const ConstraintIndex AssignmentAndConstraintFeasibilityMaintainer::kObjectiveConstraint(0); void AssignmentAndConstraintFeasibilityMaintainer::SetReferenceSolution( - const BopSolution &reference_solution) { + const BopSolution& reference_solution) { CHECK(reference_solution.IsFeasible()); infeasible_constraint_set_.BacktrackAll(); @@ -254,7 +254,7 @@ void AssignmentAndConstraintFeasibilityMaintainer::SetReferenceSolution( constraint_values_.assign(NumConstraints(), 0); for (VariableIndex var(0); var < assignment_.Size(); ++var) { if (assignment_.Value(var)) { - for (const ConstraintEntry &entry : by_variable_matrix_[var]) { + for (const ConstraintEntry& entry : by_variable_matrix_[var]) { constraint_values_[entry.constraint] += entry.weight; } } @@ -293,14 +293,14 @@ void AssignmentAndConstraintFeasibilityMaintainer:: } void AssignmentAndConstraintFeasibilityMaintainer::Assign( - const std::vector &literals) { - for (const sat::Literal &literal : literals) { + const std::vector& literals) { + for (const sat::Literal& literal : literals) { const VariableIndex var(literal.Variable().value()); const bool value = literal.IsPositive(); if (assignment_.Value(var) != value) { flipped_var_trail_.push_back(var); assignment_.SetValue(var, value); - for (const ConstraintEntry &entry : by_variable_matrix_[var]) { + for (const ConstraintEntry& entry : by_variable_matrix_[var]) { const bool was_feasible = ConstraintIsFeasible(entry.constraint); constraint_values_[entry.constraint] += value ? entry.weight : -entry.weight; @@ -326,7 +326,7 @@ void AssignmentAndConstraintFeasibilityMaintainer::BacktrackOneLevel() { const bool new_value = !assignment_.Value(var); DCHECK_EQ(new_value, reference_.Value(var)); assignment_.SetValue(var, new_value); - for (const ConstraintEntry &entry : by_variable_matrix_[var]) { + for (const ConstraintEntry& entry : by_variable_matrix_[var]) { constraint_values_[entry.constraint] += new_value ? entry.weight : -entry.weight; } @@ -340,8 +340,8 @@ void AssignmentAndConstraintFeasibilityMaintainer::BacktrackAll() { while (!flipped_var_trail_backtrack_levels_.empty()) BacktrackOneLevel(); } -const std::vector - &AssignmentAndConstraintFeasibilityMaintainer::PotentialOneFlipRepairs() { +const std::vector& +AssignmentAndConstraintFeasibilityMaintainer::PotentialOneFlipRepairs() { if (!constraint_set_hasher_.IsInitialized()) { InitializeConstraintSetHasher(); } @@ -413,12 +413,11 @@ void AssignmentAndConstraintFeasibilityMaintainer:: constraint_set_hasher_.IgnoreElement( FromConstraintIndex(kObjectiveConstraint, false)); for (VariableIndex var(0); var < by_variable_matrix_.size(); ++var) { - // We add two entries, one for a positive flip (from false to true) and - // one + // We add two entries, one for a positive flip (from false to true) and one // for a negative flip (from true to false). for (const bool flip_is_positive : {true, false}) { uint64 hash = 0; - for (const ConstraintEntry &entry : by_variable_matrix_[var]) { + for (const ConstraintEntry& entry : by_variable_matrix_[var]) { const bool coeff_is_positive = entry.weight > 0; hash ^= constraint_set_hasher_.Hash(FromConstraintIndex( entry.constraint, @@ -435,9 +434,9 @@ void AssignmentAndConstraintFeasibilityMaintainer:: //------------------------------------------------------------------------------ OneFlipConstraintRepairer::OneFlipConstraintRepairer( - const LinearBooleanProblem &problem, - const AssignmentAndConstraintFeasibilityMaintainer &maintainer, - const sat::VariablesAssignment &sat_assignment) + const LinearBooleanProblem& problem, + const AssignmentAndConstraintFeasibilityMaintainer& maintainer, + const sat::VariablesAssignment& sat_assignment) : by_constraint_matrix_(problem.constraints_size() + 1), maintainer_(maintainer), sat_assignment_(sat_assignment) { @@ -448,7 +447,7 @@ OneFlipConstraintRepairer::OneFlipConstraintRepairer( // Add the objective constraint as the first constraint. ConstraintIndex num_constraint(0); - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); CHECK_EQ(objective.literals_size(), objective.coefficients_size()); for (int i = 0; i < objective.literals_size(); ++i) { CHECK_GT(objective.literals(i), 0); @@ -461,7 +460,7 @@ OneFlipConstraintRepairer::OneFlipConstraintRepairer( } // Add the non-binary problem constraints. - for (const LinearBooleanConstraint &constraint : problem.constraints()) { + for (const LinearBooleanConstraint& constraint : problem.constraints()) { if (constraint.literals_size() <= 2) { // Infeasible binary constraints are automatically repaired by propagation // (when possible). Then there are no needs to consider the binary @@ -494,10 +493,10 @@ ConstraintIndex OneFlipConstraintRepairer::ConstraintToRepair() const { // Optimization: We inspect the constraints in reverse order because the // objective one will always be first (in our current code) and with some // luck, we will break early instead of fully exploring it. - const std::vector &infeasible_constraints = + const std::vector& infeasible_constraints = maintainer_.PossiblyInfeasibleConstraints(); for (int index = infeasible_constraints.size() - 1; index >= 0; --index) { - const ConstraintIndex &i = infeasible_constraints[index]; + const ConstraintIndex& i = infeasible_constraints[index]; if (maintainer_.ConstraintIsFeasible(i)) continue; --num_infeasible_constraints_left; @@ -514,7 +513,7 @@ ConstraintIndex OneFlipConstraintRepairer::ConstraintToRepair() const { const int64 ub = maintainer_.ConstraintUpperBound(i); int32 num_branches = 0; - for (const ConstraintTerm &term : by_constraint_matrix_[i]) { + for (const ConstraintTerm& term : by_constraint_matrix_[i]) { if (sat_assignment_.VariableIsAssigned( sat::BooleanVariable(term.var.value()))) { continue; @@ -542,7 +541,7 @@ ConstraintIndex OneFlipConstraintRepairer::ConstraintToRepair() const { TermIndex OneFlipConstraintRepairer::NextRepairingTerm( ConstraintIndex ct_index, TermIndex init_term_index, TermIndex start_term_index) const { - const gtl::ITIVector &terms = + const gtl::ITIVector& terms = by_constraint_matrix_[ct_index]; const int64 constraint_value = maintainer_.ConstraintValue(ct_index); const int64 lb = maintainer_.ConstraintLowerBound(ct_index); @@ -595,15 +594,15 @@ sat::Literal OneFlipConstraintRepairer::GetFlip(ConstraintIndex ct_index, void OneFlipConstraintRepairer::SortTermsOfEachConstraints(int num_variables) { gtl::ITIVector objective(num_variables, 0); - for (const ConstraintTerm &term : + for (const ConstraintTerm& term : by_constraint_matrix_[AssignmentAndConstraintFeasibilityMaintainer:: kObjectiveConstraint]) { objective[term.var] = std::abs(term.weight); } - for (gtl::ITIVector &terms : + for (gtl::ITIVector& terms : by_constraint_matrix_) { std::sort(terms.begin(), terms.end(), - [&objective](const ConstraintTerm &a, const ConstraintTerm &b) { + [&objective](const ConstraintTerm& a, const ConstraintTerm& b) { return objective[a.var] > objective[b.var]; }); } @@ -613,13 +612,13 @@ void OneFlipConstraintRepairer::SortTermsOfEachConstraints(int num_variables) { // SatWrapper //------------------------------------------------------------------------------ -SatWrapper::SatWrapper(sat::SatSolver *sat_solver) : sat_solver_(sat_solver) {} +SatWrapper::SatWrapper(sat::SatSolver* sat_solver) : sat_solver_(sat_solver) {} void SatWrapper::BacktrackAll() { sat_solver_->Backtrack(0); } std::vector SatWrapper::FullSatTrail() const { std::vector propagated_literals; - const sat::Trail &trail = sat_solver_->LiteralTrail(); + const sat::Trail& trail = sat_solver_->LiteralTrail(); for (int trail_index = 0; trail_index < trail.Index(); ++trail_index) { propagated_literals.push_back(trail[trail_index]); } @@ -627,7 +626,7 @@ std::vector SatWrapper::FullSatTrail() const { } int SatWrapper::ApplyDecision(sat::Literal decision_literal, - std::vector *propagated_literals) { + std::vector* propagated_literals) { CHECK(!sat_solver_->Assignment().VariableIsAssigned( decision_literal.Variable())); CHECK(propagated_literals != nullptr); @@ -643,7 +642,7 @@ int SatWrapper::ApplyDecision(sat::Literal decision_literal, // Return the propagated literals, whenever there is a conflict or not. // In case of conflict, these literals will have to be added to the last // decision point after backtrack. - const sat::Trail &propagation_trail = sat_solver_->LiteralTrail(); + const sat::Trail& propagation_trail = sat_solver_->LiteralTrail(); for (int trail_index = new_trail_index; trail_index < propagation_trail.Index(); ++trail_index) { propagated_literals->push_back(propagation_trail[trail_index]); @@ -659,7 +658,7 @@ void SatWrapper::BacktrackOneLevel() { } } -void SatWrapper::ExtractLearnedInfo(LearnedInfo *info) { +void SatWrapper::ExtractLearnedInfo(LearnedInfo* info) { bop::ExtractLearnedInfoFromSatSolver(sat_solver_, info); } @@ -672,8 +671,8 @@ double SatWrapper::deterministic_time() const { //------------------------------------------------------------------------------ LocalSearchAssignmentIterator::LocalSearchAssignmentIterator( - const ProblemState &problem_state, int max_num_decisions, - int max_num_broken_constraints, SatWrapper *sat_wrapper) + const ProblemState& problem_state, int max_num_decisions, + int max_num_broken_constraints, SatWrapper* sat_wrapper) : max_num_decisions_(max_num_decisions), max_num_broken_constraints_(max_num_broken_constraints), maintainer_(problem_state.original_problem()), @@ -702,10 +701,10 @@ LocalSearchAssignmentIterator::~LocalSearchAssignmentIterator() { } void LocalSearchAssignmentIterator::Synchronize( - const ProblemState &problem_state) { + const ProblemState& problem_state) { better_solution_has_been_found_ = false; maintainer_.SetReferenceSolution(problem_state.solution()); - for (const SearchNode &node : search_nodes_) { + for (const SearchNode& node : search_nodes_) { initial_term_index_[node.constraint] = node.term_index; } search_nodes_.clear(); @@ -730,7 +729,7 @@ void LocalSearchAssignmentIterator::SynchronizeSatWrapper() { maintainer_.Assign(sat_wrapper_->FullSatTrail()); search_nodes_.clear(); - for (const SearchNode &node : copy) { + for (const SearchNode& node : copy) { if (!repairer_.RepairIsValid(node.constraint, node.term_index)) break; search_nodes_.push_back(node); ApplyDecision(repairer_.GetFlip(node.constraint, node.term_index)); @@ -746,7 +745,7 @@ void LocalSearchAssignmentIterator::UseCurrentStateAsReference() { // variable and the new reference, so there is no need to do: // maintainer_.Assign(sat_wrapper_->FullSatTrail()); - for (const SearchNode &node : search_nodes_) { + for (const SearchNode& node : search_nodes_) { initial_term_index_[node.constraint] = node.term_index; } search_nodes_.clear(); @@ -853,9 +852,9 @@ void LocalSearchAssignmentIterator::ApplyDecision(sat::Literal literal) { } void LocalSearchAssignmentIterator::InitializeTranspositionTableKey( - std::array *a) { + std::array* a) { int i = 0; - for (const SearchNode &n : search_nodes_) { + for (const SearchNode& n : search_nodes_) { // Negated because we already fliped this variable, so GetFlip() will // returns the old value. (*a)[i] = -repairer_.GetFlip(n.constraint, n.term_index).SignedValue(); diff --git a/ortools/bop/bop_portfolio.cc b/ortools/bop/bop_portfolio.cc index 94f0865eaa..4ee752e4aa 100644 --- a/ortools/bop/bop_portfolio.cc +++ b/ortools/bop/bop_portfolio.cc @@ -32,13 +32,13 @@ using ::operations_research::sat::LinearBooleanProblem; using ::operations_research::sat::LinearObjective; namespace { -void BuildObjectiveTerms(const LinearBooleanProblem &problem, - BopConstraintTerms *objective_terms) { +void BuildObjectiveTerms(const LinearBooleanProblem& problem, + BopConstraintTerms* objective_terms) { CHECK(objective_terms != nullptr); if (!objective_terms->empty()) return; - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); const size_t num_objective_terms = objective.literals_size(); CHECK_EQ(num_objective_terms, objective.coefficients_size()); for (int i = 0; i < num_objective_terms; ++i) { @@ -56,8 +56,8 @@ void BuildObjectiveTerms(const LinearBooleanProblem &problem, // PortfolioOptimizer //------------------------------------------------------------------------------ PortfolioOptimizer::PortfolioOptimizer( - const ProblemState &problem_state, const BopParameters ¶meters, - const BopSolverOptimizerSet &optimizer_set, const std::string &name) + const ProblemState& problem_state, const BopParameters& parameters, + const BopSolverOptimizerSet& optimizer_set, const std::string& name) : BopOptimizerBase(name), random_(), state_update_stamp_(ProblemState::kInitialStampValue), @@ -92,7 +92,7 @@ PortfolioOptimizer::~PortfolioOptimizer() { } BopOptimizerBase::Status PortfolioOptimizer::SynchronizeIfNeeded( - const ProblemState &problem_state) { + const ProblemState& problem_state) { if (state_update_stamp_ == problem_state.update_stamp()) { return BopOptimizerBase::CONTINUE; } @@ -118,8 +118,8 @@ BopOptimizerBase::Status PortfolioOptimizer::SynchronizeIfNeeded( } BopOptimizerBase::Status PortfolioOptimizer::Optimize( - const BopParameters ¶meters, const ProblemState &problem_state, - LearnedInfo *learned_info, TimeLimit *time_limit) { + const BopParameters& parameters, const ProblemState& problem_state, + LearnedInfo* learned_info, TimeLimit* time_limit) { CHECK(learned_info != nullptr); CHECK(time_limit != nullptr); learned_info->Clear(); @@ -146,7 +146,7 @@ BopOptimizerBase::Status PortfolioOptimizer::Optimize( LOG(INFO) << "All the optimizers are done."; return BopOptimizerBase::ABORT; } - BopOptimizerBase *const selected_optimizer = + BopOptimizerBase* const selected_optimizer = optimizers_[selected_optimizer_id]; if (parameters.log_search_progress() || VLOG_IS_ON(1)) { LOG(INFO) << " " << lower_bound_ << " .. " << upper_bound_ << " " @@ -201,8 +201,8 @@ BopOptimizerBase::Status PortfolioOptimizer::Optimize( } void PortfolioOptimizer::AddOptimizer( - const LinearBooleanProblem &problem, const BopParameters ¶meters, - const BopOptimizerMethod &optimizer_method) { + const LinearBooleanProblem& problem, const BopParameters& parameters, + const BopOptimizerMethod& optimizer_method) { switch (optimizer_method.type()) { case BopOptimizerMethod::SAT_CORE_BASED: optimizers_.push_back(new SatCoreBasedOptimizer("SatCoreBasedOptimizer")); @@ -229,42 +229,48 @@ void PortfolioOptimizer::AddOptimizer( case BopOptimizerMethod::RANDOM_VARIABLE_LNS: BuildObjectiveTerms(problem, &objective_terms_); optimizers_.push_back(new BopAdaptiveLNSOptimizer( - "RandomVariableLns", /*use_lp_to_guide_sat=*/false, + "RandomVariableLns", + /*use_lp_to_guide_sat=*/false, new ObjectiveBasedNeighborhood(&objective_terms_, random_.get()), &sat_propagator_)); break; case BopOptimizerMethod::RANDOM_VARIABLE_LNS_GUIDED_BY_LP: BuildObjectiveTerms(problem, &objective_terms_); optimizers_.push_back(new BopAdaptiveLNSOptimizer( - "RandomVariableLnsWithLp", /*use_lp_to_guide_sat=*/true, + "RandomVariableLnsWithLp", + /*use_lp_to_guide_sat=*/true, new ObjectiveBasedNeighborhood(&objective_terms_, random_.get()), &sat_propagator_)); break; case BopOptimizerMethod::RANDOM_CONSTRAINT_LNS: BuildObjectiveTerms(problem, &objective_terms_); optimizers_.push_back(new BopAdaptiveLNSOptimizer( - "RandomConstraintLns", /*use_lp_to_guide_sat=*/false, + "RandomConstraintLns", + /*use_lp_to_guide_sat=*/false, new ConstraintBasedNeighborhood(&objective_terms_, random_.get()), &sat_propagator_)); break; case BopOptimizerMethod::RANDOM_CONSTRAINT_LNS_GUIDED_BY_LP: BuildObjectiveTerms(problem, &objective_terms_); optimizers_.push_back(new BopAdaptiveLNSOptimizer( - "RandomConstraintLnsWithLp", /*use_lp_to_guide_sat=*/true, + "RandomConstraintLnsWithLp", + /*use_lp_to_guide_sat=*/true, new ConstraintBasedNeighborhood(&objective_terms_, random_.get()), &sat_propagator_)); break; case BopOptimizerMethod::RELATION_GRAPH_LNS: BuildObjectiveTerms(problem, &objective_terms_); optimizers_.push_back(new BopAdaptiveLNSOptimizer( - "RelationGraphLns", /*use_lp_to_guide_sat=*/false, + "RelationGraphLns", + /*use_lp_to_guide_sat=*/false, new RelationGraphBasedNeighborhood(problem, random_.get()), &sat_propagator_)); break; case BopOptimizerMethod::RELATION_GRAPH_LNS_GUIDED_BY_LP: BuildObjectiveTerms(problem, &objective_terms_); optimizers_.push_back(new BopAdaptiveLNSOptimizer( - "RelationGraphLnsWithLp", /*use_lp_to_guide_sat=*/true, + "RelationGraphLnsWithLp", + /*use_lp_to_guide_sat=*/true, new RelationGraphBasedNeighborhood(problem, random_.get()), &sat_propagator_)); break; @@ -294,13 +300,13 @@ void PortfolioOptimizer::AddOptimizer( } void PortfolioOptimizer::CreateOptimizers( - const LinearBooleanProblem &problem, const BopParameters ¶meters, - const BopSolverOptimizerSet &optimizer_set) { + const LinearBooleanProblem& problem, const BopParameters& parameters, + const BopSolverOptimizerSet& optimizer_set) { random_ = absl::make_unique(parameters.random_seed()); if (parameters.use_symmetry()) { VLOG(1) << "Finding symmetries of the problem."; - std::vector > generators; + std::vector> generators; sat::FindLinearBooleanProblemSymmetries(problem, &generators); std::unique_ptr propagator( new sat::SymmetryPropagator); @@ -314,7 +320,7 @@ void PortfolioOptimizer::CreateOptimizers( const int max_num_optimizers = optimizer_set.methods_size() + parameters.max_num_decisions_in_ls() - 1; optimizers_.reserve(max_num_optimizers); - for (const BopOptimizerMethod &optimizer_method : optimizer_set.methods()) { + for (const BopOptimizerMethod& optimizer_method : optimizer_set.methods()) { const OptimizerIndex old_size(optimizers_.size()); AddOptimizer(problem, parameters, optimizer_method); } @@ -326,7 +332,7 @@ void PortfolioOptimizer::CreateOptimizers( // OptimizerSelector //------------------------------------------------------------------------------ OptimizerSelector::OptimizerSelector( - const gtl::ITIVector &optimizers) + const gtl::ITIVector& optimizers) : run_infos_(), selected_index_(optimizers.size()) { for (OptimizerIndex i(0); i < optimizers.size(); ++i) { info_positions_.push_back(run_infos_.size()); @@ -360,7 +366,7 @@ OptimizerIndex OptimizerSelector::SelectOptimizer() { const double time_spent = run_infos_[selected_index_].time_spent_since_last_solution; for (int i = 0; i < selected_index_; ++i) { - const RunInfo &info = run_infos_[i]; + const RunInfo& info = run_infos_[i]; if (info.RunnableAndSelectable() && info.time_spent_since_last_solution < time_spent) { too_much_time_spent = true; @@ -388,7 +394,7 @@ void OptimizerSelector::UpdateScore(int64 gain, double time_spent) { const double kErosion = 0.2; const double kMinScore = 1E-6; - RunInfo &info = run_infos_[selected_index_]; + RunInfo& info = run_infos_[selected_index_]; const double old_score = info.score; info.score = std::max(kMinScore, old_score * (1 - kErosion) + kErosion * new_score); @@ -411,7 +417,7 @@ void OptimizerSelector::SetOptimizerRunnability(OptimizerIndex optimizer_index, std::string OptimizerSelector::PrintStats( OptimizerIndex optimizer_index) const { - const RunInfo &info = run_infos_[info_positions_[optimizer_index]]; + const RunInfo& info = run_infos_[info_positions_[optimizer_index]]; return absl::StrFormat( " %40s : %3d/%-3d (%6.2f%%) Total gain: %6d Total Dtime: %0.3f " "score: %f\n", @@ -422,14 +428,14 @@ std::string OptimizerSelector::PrintStats( int OptimizerSelector::NumCallsForOptimizer( OptimizerIndex optimizer_index) const { - const RunInfo &info = run_infos_[info_positions_[optimizer_index]]; + const RunInfo& info = run_infos_[info_positions_[optimizer_index]]; return info.num_calls; } void OptimizerSelector::DebugPrint() const { std::string str; for (int i = 0; i < run_infos_.size(); ++i) { - const RunInfo &info = run_infos_[i]; + const RunInfo& info = run_infos_[i]; LOG(INFO) << " " << info.name << " " << info.total_gain << " / " << info.time_spent << " = " << info.score << " " << info.selectable << " " << info.time_spent_since_last_solution; @@ -454,7 +460,7 @@ void OptimizerSelector::UpdateDeterministicTime(double time_spent) { void OptimizerSelector::UpdateOrder() { // Re-sort optimizers. std::stable_sort(run_infos_.begin(), run_infos_.end(), - [](const RunInfo &a, const RunInfo &b) -> bool { + [](const RunInfo& a, const RunInfo& b) -> bool { if (a.total_gain == 0 && b.total_gain == 0) return a.time_spent < b.time_spent; return a.score > b.score; diff --git a/ortools/bop/bop_solution.cc b/ortools/bop/bop_solution.cc index 51ec4f3201..093f08439b 100644 --- a/ortools/bop/bop_solution.cc +++ b/ortools/bop/bop_solution.cc @@ -23,8 +23,8 @@ using ::operations_research::sat::LinearObjective; //------------------------------------------------------------------------------ // BopSolution //------------------------------------------------------------------------------ -BopSolution::BopSolution(const LinearBooleanProblem &problem, - const std::string &name) +BopSolution::BopSolution(const LinearBooleanProblem& problem, + const std::string& name) : problem_(&problem), name_(name), values_(problem.num_variables(), false), @@ -33,7 +33,7 @@ BopSolution::BopSolution(const LinearBooleanProblem &problem, cost_(0), is_feasible_(false) { // Try the lucky assignment, i.e. the optimal one if feasible. - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); for (int i = 0; i < objective.coefficients_size(); ++i) { const VariableIndex var(objective.literals(i) - 1); values_[var] = objective.coefficients(i) < 0; @@ -43,7 +43,7 @@ BopSolution::BopSolution(const LinearBooleanProblem &problem, int64 BopSolution::ComputeCost() const { recompute_cost_ = false; int64 sum = 0; - const LinearObjective &objective = problem_->objective(); + const LinearObjective& objective = problem_->objective(); const size_t num_sparse_vars = objective.literals_size(); CHECK_EQ(num_sparse_vars, objective.coefficients_size()); for (int i = 0; i < num_sparse_vars; ++i) { @@ -58,7 +58,7 @@ int64 BopSolution::ComputeCost() const { bool BopSolution::ComputeIsFeasible() const { recompute_is_feasible_ = false; - for (const LinearBooleanConstraint &constraint : problem_->constraints()) { + for (const LinearBooleanConstraint& constraint : problem_->constraints()) { int64 sum = 0; const size_t num_sparse_vars = constraint.literals_size(); CHECK_EQ(num_sparse_vars, constraint.coefficients_size()); diff --git a/ortools/bop/bop_solver.cc b/ortools/bop/bop_solver.cc index ac8aa69c71..bff8a8b555 100644 --- a/ortools/bop/bop_solver.cc +++ b/ortools/bop/bop_solver.cc @@ -46,7 +46,7 @@ using ::operations_research::glop::DenseRow; // is proved infeasible. // Returns true when the problem_state has been changed. bool UpdateProblemStateBasedOnStatus(BopOptimizerBase::Status status, - ProblemState *problem_state) { + ProblemState* problem_state) { CHECK(nullptr != problem_state); if (BopOptimizerBase::OPTIMAL_SOLUTION_FOUND == status) { @@ -67,7 +67,7 @@ bool UpdateProblemStateBasedOnStatus(BopOptimizerBase::Status status, //------------------------------------------------------------------------------ // BopSolver //------------------------------------------------------------------------------ -BopSolver::BopSolver(const LinearBooleanProblem &problem) +BopSolver::BopSolver(const LinearBooleanProblem& problem) : problem_(problem), problem_state_(problem), parameters_(), @@ -83,7 +83,7 @@ BopSolveStatus BopSolver::Solve() { return SolveWithTimeLimit(time_limit.get()); } -BopSolveStatus BopSolver::SolveWithTimeLimit(TimeLimit *time_limit) { +BopSolveStatus BopSolver::SolveWithTimeLimit(TimeLimit* time_limit) { CHECK(time_limit != nullptr); SCOPED_TIME_STAT(&stats_); @@ -100,7 +100,7 @@ BopSolveStatus BopSolver::SolveWithTimeLimit(TimeLimit *time_limit) { : InternalMonothreadSolver(time_limit); } -BopSolveStatus BopSolver::InternalMonothreadSolver(TimeLimit *time_limit) { +BopSolveStatus BopSolver::InternalMonothreadSolver(TimeLimit* time_limit) { CHECK(time_limit != nullptr); LearnedInfo learned_info(problem_state_.original_problem()); PortfolioOptimizer optimizer(problem_state_, parameters_, @@ -135,20 +135,20 @@ BopSolveStatus BopSolver::InternalMonothreadSolver(TimeLimit *time_limit) { : BopSolveStatus::NO_SOLUTION_FOUND; } -BopSolveStatus BopSolver::InternalMultithreadSolver(TimeLimit *time_limit) { +BopSolveStatus BopSolver::InternalMultithreadSolver(TimeLimit* time_limit) { CHECK(time_limit != nullptr); // Not implemented. return BopSolveStatus::INVALID_PROBLEM; } -BopSolveStatus BopSolver::Solve(const BopSolution &first_solution) { +BopSolveStatus BopSolver::Solve(const BopSolution& first_solution) { std::unique_ptr time_limit = TimeLimit::FromParameters(parameters_); return SolveWithTimeLimit(first_solution, time_limit.get()); } -BopSolveStatus BopSolver::SolveWithTimeLimit(const BopSolution &first_solution, - TimeLimit *time_limit) { +BopSolveStatus BopSolver::SolveWithTimeLimit(const BopSolution& first_solution, + TimeLimit* time_limit) { SCOPED_TIME_STAT(&stats_); if (first_solution.IsFeasible()) { diff --git a/ortools/bop/bop_util.cc b/ortools/bop/bop_util.cc index 938468b85f..2d3cd5ee47 100644 --- a/ortools/bop/bop_util.cc +++ b/ortools/bop/bop_util.cc @@ -31,8 +31,8 @@ static const int kMaxBoost = 30; // Loads the problem state into the SAT solver. If the problem has already been // loaded in the sat_solver, fixed variables and objective bounds are updated. // Returns false when the problem is proved UNSAT. -bool InternalLoadStateProblemToSatSolver(const ProblemState &problem_state, - sat::SatSolver *sat_solver) { +bool InternalLoadStateProblemToSatSolver(const ProblemState& problem_state, + sat::SatSolver* sat_solver) { const bool first_time = (sat_solver->NumVariables() == 0); if (first_time) { sat_solver->SetNumVariables( @@ -85,7 +85,7 @@ bool InternalLoadStateProblemToSatSolver(const ProblemState &problem_state, } // anonymous namespace BopOptimizerBase::Status LoadStateProblemToSatSolver( - const ProblemState &problem_state, sat::SatSolver *sat_solver) { + const ProblemState& problem_state, sat::SatSolver* sat_solver) { if (InternalLoadStateProblemToSatSolver(problem_state, sat_solver)) { return BopOptimizerBase::CONTINUE; } @@ -95,8 +95,8 @@ BopOptimizerBase::Status LoadStateProblemToSatSolver( : BopOptimizerBase::INFEASIBLE; } -void ExtractLearnedInfoFromSatSolver(sat::SatSolver *solver, - LearnedInfo *info) { +void ExtractLearnedInfoFromSatSolver(sat::SatSolver* solver, + LearnedInfo* info) { CHECK(nullptr != solver); CHECK(nullptr != info); @@ -105,7 +105,7 @@ void ExtractLearnedInfoFromSatSolver(sat::SatSolver *solver, // Fixed variables. info->fixed_literals.clear(); - const sat::Trail &propagation_trail = solver->LiteralTrail(); + const sat::Trail& propagation_trail = solver->LiteralTrail(); const int root_size = solver->CurrentDecisionLevel() == 0 ? propagation_trail.Index() : solver->Decisions().front().trail_index; @@ -118,8 +118,8 @@ void ExtractLearnedInfoFromSatSolver(sat::SatSolver *solver, solver->ClearNewlyAddedBinaryClauses(); } -void SatAssignmentToBopSolution(const sat::VariablesAssignment &assignment, - BopSolution *solution) { +void SatAssignmentToBopSolution(const sat::VariablesAssignment& assignment, + BopSolution* solution) { CHECK(solution != nullptr); // Only extract the variables of the initial problem. diff --git a/ortools/bop/complete_optimizer.cc b/ortools/bop/complete_optimizer.cc index 0f8d3561a1..0d999f1b00 100644 --- a/ortools/bop/complete_optimizer.cc +++ b/ortools/bop/complete_optimizer.cc @@ -19,7 +19,7 @@ namespace operations_research { namespace bop { -SatCoreBasedOptimizer::SatCoreBasedOptimizer(const std::string &name) +SatCoreBasedOptimizer::SatCoreBasedOptimizer(const std::string& name) : BopOptimizerBase(name), state_update_stamp_(ProblemState::kInitialStampValue), initialized_(false), @@ -32,7 +32,7 @@ SatCoreBasedOptimizer::SatCoreBasedOptimizer(const std::string &name) SatCoreBasedOptimizer::~SatCoreBasedOptimizer() {} BopOptimizerBase::Status SatCoreBasedOptimizer::SynchronizeIfNeeded( - const ProblemState &problem_state) { + const ProblemState& problem_state) { if (state_update_stamp_ == problem_state.update_stamp()) { return BopOptimizerBase::CONTINUE; } @@ -52,7 +52,7 @@ BopOptimizerBase::Status SatCoreBasedOptimizer::SynchronizeIfNeeded( // This is used by the "stratified" approach. stratified_lower_bound_ = sat::Coefficient(0); - for (sat::EncodingNode *n : nodes_) { + for (sat::EncodingNode* n : nodes_) { stratified_lower_bound_ = std::max(stratified_lower_bound_, n->weight()); } } @@ -74,13 +74,13 @@ sat::SatSolver::Status SatCoreBasedOptimizer::SolveWithAssumptions() { // Only run this if there is an objective. bool SatCoreBasedOptimizer::ShouldBeRun( - const ProblemState &problem_state) const { + const ProblemState& problem_state) const { return problem_state.original_problem().objective().literals_size() > 0; } BopOptimizerBase::Status SatCoreBasedOptimizer::Optimize( - const BopParameters ¶meters, const ProblemState &problem_state, - LearnedInfo *learned_info, TimeLimit *time_limit) { + const BopParameters& parameters, const ProblemState& problem_state, + LearnedInfo* learned_info, TimeLimit* time_limit) { SCOPED_TIME_STAT(&stats_); CHECK(learned_info != nullptr); CHECK(time_limit != nullptr); diff --git a/ortools/bop/integral_solver.cc b/ortools/bop/integral_solver.cc index 6b14939682..93d6b0cf02 100644 --- a/ortools/bop/integral_solver.cc +++ b/ortools/bop/integral_solver.cc @@ -47,8 +47,8 @@ bool IsIntegerWithinTolerance(Fractional x) { // constraints have integer coefficients. // TODO(user): Move to SAT util. bool ProblemIsBooleanAndHasOnlyIntegralConstraints( - const LinearProgram &linear_problem) { - const glop::SparseMatrix &matrix = linear_problem.GetSparseMatrix(); + const LinearProgram& linear_problem) { + const glop::SparseMatrix& matrix = linear_problem.GetSparseMatrix(); for (ColIndex col(0); col < linear_problem.num_variables(); ++col) { const Fractional lower_bound = linear_problem.variable_lower_bounds()[col]; @@ -73,13 +73,13 @@ bool ProblemIsBooleanAndHasOnlyIntegralConstraints( // being booleans and all the constraints having only integral coefficients. // TODO(user): Move to SAT util. void BuildBooleanProblemWithIntegralConstraints( - const LinearProgram &linear_problem, const DenseRow &initial_solution, - LinearBooleanProblem *boolean_problem, - std::vector *boolean_initial_solution) { + const LinearProgram& linear_problem, const DenseRow& initial_solution, + LinearBooleanProblem* boolean_problem, + std::vector* boolean_initial_solution) { CHECK(boolean_problem != nullptr); boolean_problem->Clear(); - const glop::SparseMatrix &matrix = linear_problem.GetSparseMatrix(); + const glop::SparseMatrix& matrix = linear_problem.GetSparseMatrix(); // Create Boolean variables. for (ColIndex col(0); col < matrix.num_cols(); ++col) { boolean_problem->add_var_names(linear_problem.GetVariableName(col)); @@ -89,7 +89,7 @@ void BuildBooleanProblemWithIntegralConstraints( // Create constraints. for (RowIndex row(0); row < matrix.num_rows(); ++row) { - LinearBooleanConstraint *const constraint = + LinearBooleanConstraint* const constraint = boolean_problem->add_constraints(); constraint->set_name(linear_problem.GetConstraintName(row)); if (linear_problem.constraint_lower_bounds()[row] != -kInfinity) { @@ -105,7 +105,7 @@ void BuildBooleanProblemWithIntegralConstraints( // Store the constraint coefficients. for (ColIndex col(0); col < matrix.num_cols(); ++col) { for (const SparseColumn::Entry e : matrix.column(col)) { - LinearBooleanConstraint *const constraint = + LinearBooleanConstraint* const constraint = boolean_problem->mutable_constraints(e.row().value()); constraint->add_literals(col.value() + 1); constraint->add_coefficients(e.coefficient()); @@ -119,7 +119,7 @@ void BuildBooleanProblemWithIntegralConstraints( const int lb = std::round(linear_problem.variable_lower_bounds()[col]); const int ub = std::round(linear_problem.variable_upper_bounds()[col]); if (lb == ub) { - LinearBooleanConstraint *ct = boolean_problem->add_constraints(); + LinearBooleanConstraint* ct = boolean_problem->add_constraints(); ct->set_lower_bound(ub); ct->set_upper_bound(ub); ct->add_literals(col.value() + 1); @@ -138,7 +138,7 @@ void BuildBooleanProblemWithIntegralConstraints( GetBestScalingOfDoublesToInt64(coefficients, kint64max, &scaling_factor, &relative_error); const int64 gcd = ComputeGcdOfRoundedDoubles(coefficients, scaling_factor); - LinearObjective *const objective = boolean_problem->mutable_objective(); + LinearObjective* const objective = boolean_problem->mutable_objective(); objective->set_offset(linear_problem.objective_offset() * scaling_factor / gcd); @@ -196,13 +196,13 @@ class IntegralVariable { int GetNumberOfBooleanVariables() const { return bits_.size(); } - const std::vector &bits() const { return bits_; } - const std::vector &weights() const { return weights_; } + const std::vector& bits() const { return bits_; } + const std::vector& weights() const { return weights_; } int64 offset() const { return offset_; } // Returns the value of the integral variable based on the Boolean conversion // and the Boolean solution to the problem. - int64 GetSolutionValue(const BopSolution &solution) const; + int64 GetSolutionValue(const BopSolution& solution) const; // Returns the values of the Boolean variables based on the Boolean conversion // and the integral value of this variable. This only works for variables that @@ -263,7 +263,7 @@ void IntegralVariable::set_weight(VariableIndex var, int64 weight) { can_be_reversed_ = false; } -int64 IntegralVariable::GetSolutionValue(const BopSolution &solution) const { +int64 IntegralVariable::GetSolutionValue(const BopSolution& solution) const { int64 value = offset_; for (int i = 0; i < bits_.size(); ++i) { value += weights_[i] * solution.Value(bits_[i]); @@ -325,49 +325,49 @@ class IntegralProblemConverter { // solution is given (i.e. if its size is not zero), converts it into a // Boolean solution. // Returns false when the conversion fails. - bool ConvertToBooleanProblem(const LinearProgram &linear_problem, - const DenseRow &initial_solution, - LinearBooleanProblem *boolean_problem, - std::vector *boolean_initial_solution); + bool ConvertToBooleanProblem(const LinearProgram& linear_problem, + const DenseRow& initial_solution, + LinearBooleanProblem* boolean_problem, + std::vector* boolean_initial_solution); // Returns the value of a variable of the original problem based on the // Boolean conversion and the Boolean solution to the problem. int64 GetSolutionValue(ColIndex global_col, - const BopSolution &solution) const; + const BopSolution& solution) const; private: // Returns true when the linear_problem_ can be converted into a Boolean // problem. Note that floating weights and continuous variables are not // supported. - bool CheckProblem(const LinearProgram &linear_problem) const; + bool CheckProblem(const LinearProgram& linear_problem) const; // Initializes the type of each variable of the linear_problem_. - void InitVariableTypes(const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem); + void InitVariableTypes(const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem); // Converts all variables of the problem. - void ConvertAllVariables(const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem); + void ConvertAllVariables(const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem); // Adds all variables constraints, i.e. lower and upper bounds of variables. - void AddVariableConstraints(const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem); + void AddVariableConstraints(const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem); // Converts all constraints from LinearProgram to LinearBooleanProblem. - void ConvertAllConstraints(const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem); + void ConvertAllConstraints(const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem); // Converts the objective from LinearProgram to LinearBooleanProblem. - void ConvertObjective(const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem); + void ConvertObjective(const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem); // Converts the integral variable represented by col in the linear_problem_ // into an IntegralVariable using existing Boolean variables. // Returns false when existing Boolean variables are not enough to model // the integral variable. - bool ConvertUsingExistingBooleans(const LinearProgram &linear_problem, + bool ConvertUsingExistingBooleans(const LinearProgram& linear_problem, ColIndex col, - IntegralVariable *integral_var); + IntegralVariable* integral_var); // Creates the integral_var using the given linear_problem_ constraint. // The constraint is an equality constraint and contains only one integral @@ -378,15 +378,15 @@ class IntegralProblemConverter { // integral_var == (bound + sum(-w_i * b_i)) / w // Note that all divisions by w have to be integral as Bop only deals with // integral coefficients. - bool CreateVariableUsingConstraint(const LinearProgram &linear_problem, + bool CreateVariableUsingConstraint(const LinearProgram& linear_problem, RowIndex constraint, - IntegralVariable *integral_var); + IntegralVariable* integral_var); // Adds weighted integral variable represented by col to the current dense // constraint. Fractional AddWeightedIntegralVariable( ColIndex col, Fractional weight, - gtl::ITIVector *dense_weights); + gtl::ITIVector* dense_weights); // Scales weights and adds all non-zero scaled weights and literals to t. // t is a constraint or the objective. @@ -396,11 +396,11 @@ class IntegralProblemConverter { template double ScaleAndSparsifyWeights( double scaling_factor, int64 gcd, - const gtl::ITIVector &dense_weights, T *t); + const gtl::ITIVector& dense_weights, T* t); // Returns true when at least one element is non-zero. bool HasNonZeroWeights( - const gtl::ITIVector &dense_weights) const; + const gtl::ITIVector& dense_weights) const; bool problem_is_boolean_and_has_only_integral_constraints_; @@ -425,9 +425,9 @@ IntegralProblemConverter::IntegralProblemConverter() variable_types_() {} bool IntegralProblemConverter::ConvertToBooleanProblem( - const LinearProgram &linear_problem, const DenseRow &initial_solution, - LinearBooleanProblem *boolean_problem, - std::vector *boolean_initial_solution) { + const LinearProgram& linear_problem, const DenseRow& initial_solution, + LinearBooleanProblem* boolean_problem, + std::vector* boolean_initial_solution) { bool use_initial_solution = (initial_solution.size() > 0); if (use_initial_solution) { CHECK_EQ(initial_solution.size(), linear_problem.num_variables()) @@ -470,11 +470,11 @@ bool IntegralProblemConverter::ConvertToBooleanProblem( if (col >= 0) { (*boolean_initial_solution)[col] = (initial_solution[global_col] != 0); } else { - const IntegralVariable &integral_variable = + const IntegralVariable& integral_variable = integral_variables_[-col - 1]; - const std::vector &boolean_cols = + const std::vector& boolean_cols = integral_variable.bits(); - const std::vector &boolean_values = + const std::vector& boolean_values = integral_variable.GetBooleanSolutionValues( round(initial_solution[global_col])); if (!boolean_values.empty()) { @@ -492,7 +492,7 @@ bool IntegralProblemConverter::ConvertToBooleanProblem( } int64 IntegralProblemConverter::GetSolutionValue( - ColIndex global_col, const BopSolution &solution) const { + ColIndex global_col, const BopSolution& solution) const { if (problem_is_boolean_and_has_only_integral_constraints_) { return solution.Value(VariableIndex(global_col.value())); } @@ -503,7 +503,7 @@ int64 IntegralProblemConverter::GetSolutionValue( } bool IntegralProblemConverter::CheckProblem( - const LinearProgram &linear_problem) const { + const LinearProgram& linear_problem) const { for (ColIndex col(0); col < linear_problem.num_variables(); ++col) { if (!linear_problem.IsVariableInteger(col)) { LOG(ERROR) << "Variable " << linear_problem.GetVariableName(col) @@ -525,8 +525,8 @@ bool IntegralProblemConverter::CheckProblem( } void IntegralProblemConverter::InitVariableTypes( - const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem) { + const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem) { global_to_boolean_.assign(linear_problem.num_variables().value(), 0); variable_types_.assign(linear_problem.num_variables().value(), INTEGRAL); for (ColIndex col(0); col < linear_problem.num_variables(); ++col) { @@ -548,8 +548,8 @@ void IntegralProblemConverter::InitVariableTypes( } void IntegralProblemConverter::ConvertAllVariables( - const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem) { + const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem) { for (const ColIndex col : integral_indices_) { CHECK_EQ(INTEGRAL, variable_types_[col]); IntegralVariable integral_var; @@ -573,8 +573,8 @@ void IntegralProblemConverter::ConvertAllVariables( } void IntegralProblemConverter::ConvertAllConstraints( - const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem) { + const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem) { // TODO(user): This is the way it's done in glop/proto_utils.cc but having // to transpose looks unnecessary costly. glop::SparseMatrix transpose; @@ -612,7 +612,7 @@ void IntegralProblemConverter::ConvertAllConstraints( max_relative_error = std::max(relative_error, max_relative_error); max_scaling_factor = std::max(scaling_factor / gcd, max_scaling_factor); - LinearBooleanConstraint *constraint = boolean_problem->add_constraints(); + LinearBooleanConstraint* constraint = boolean_problem->add_constraints(); constraint->set_name(linear_problem.GetConstraintName(row)); const double bound_error = ScaleAndSparsifyWeights(scaling_factor, gcd, dense_weights, constraint); @@ -654,9 +654,9 @@ void IntegralProblemConverter::ConvertAllConstraints( } void IntegralProblemConverter::ConvertObjective( - const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem) { - LinearObjective *objective = boolean_problem->mutable_objective(); + const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem) { + LinearObjective* objective = boolean_problem->mutable_objective(); Fractional offset = 0.0; gtl::ITIVector dense_weights( num_boolean_variables_, 0.0); @@ -693,8 +693,8 @@ void IntegralProblemConverter::ConvertObjective( } void IntegralProblemConverter::AddVariableConstraints( - const LinearProgram &linear_problem, - LinearBooleanProblem *boolean_problem) { + const LinearProgram& linear_problem, + LinearBooleanProblem* boolean_problem) { for (ColIndex col(0); col < linear_problem.num_variables(); ++col) { const Fractional lower_bound = linear_problem.variable_lower_bounds()[col]; const Fractional upper_bound = linear_problem.variable_upper_bounds()[col]; @@ -707,7 +707,7 @@ void IntegralProblemConverter::AddVariableConstraints( if (is_fixed) { // Set the variable. const int fixed_value = lower_bound > -1.0 && upper_bound < 1.0 ? 0 : 1; - LinearBooleanConstraint *constraint = + LinearBooleanConstraint* constraint = boolean_problem->add_constraints(); constraint->set_lower_bound(fixed_value); constraint->set_upper_bound(fixed_value); @@ -718,8 +718,8 @@ void IntegralProblemConverter::AddVariableConstraints( CHECK_EQ(INTEGRAL_EXPRESSED_AS_BOOLEAN, variable_types_[col]); // Integral variable. if (lower_bound != -kInfinity || upper_bound != kInfinity) { - const IntegralVariable &integral_var = integral_variables_[-pos - 1]; - LinearBooleanConstraint *constraint = + const IntegralVariable& integral_var = integral_variables_[-pos - 1]; + LinearBooleanConstraint* constraint = boolean_problem->add_constraints(); for (int i = 0; i < integral_var.bits().size(); ++i) { constraint->add_literals(integral_var.bits()[i].value() + 1); @@ -739,13 +739,13 @@ void IntegralProblemConverter::AddVariableConstraints( } bool IntegralProblemConverter::ConvertUsingExistingBooleans( - const LinearProgram &linear_problem, ColIndex col, - IntegralVariable *integral_var) { + const LinearProgram& linear_problem, ColIndex col, + IntegralVariable* integral_var) { CHECK(nullptr != integral_var); CHECK_EQ(INTEGRAL, variable_types_[col]); - const SparseMatrix &matrix = linear_problem.GetSparseMatrix(); - const SparseMatrix &transpose = linear_problem.GetTransposeSparseMatrix(); + const SparseMatrix& matrix = linear_problem.GetSparseMatrix(); + const SparseMatrix& transpose = linear_problem.GetTransposeSparseMatrix(); for (const SparseColumn::Entry var_entry : matrix.column(col)) { const RowIndex constraint = var_entry.row(); const Fractional lb = linear_problem.constraint_lower_bounds()[constraint]; @@ -786,12 +786,12 @@ bool IntegralProblemConverter::ConvertUsingExistingBooleans( } bool IntegralProblemConverter::CreateVariableUsingConstraint( - const LinearProgram &linear_problem, RowIndex constraint, - IntegralVariable *integral_var) { + const LinearProgram& linear_problem, RowIndex constraint, + IntegralVariable* integral_var) { CHECK(nullptr != integral_var); integral_var->Clear(); - const SparseMatrix &transpose = linear_problem.GetTransposeSparseMatrix(); + const SparseMatrix& transpose = linear_problem.GetTransposeSparseMatrix(); gtl::ITIVector dense_weights( num_boolean_variables_, 0.0); Fractional scale = 1.0; @@ -809,7 +809,7 @@ bool IntegralProblemConverter::CreateVariableUsingConstraint( CHECK_EQ(INTEGRAL_EXPRESSED_AS_BOOLEAN, variable_types_[col]); const int pos = global_to_boolean_[col]; CHECK_GT(0, pos); - const IntegralVariable &local_integral_var = + const IntegralVariable& local_integral_var = integral_variables_[-pos - 1]; variable_offset -= constraint_entry.coefficient() * local_integral_var.offset(); @@ -843,7 +843,7 @@ bool IntegralProblemConverter::CreateVariableUsingConstraint( Fractional IntegralProblemConverter::AddWeightedIntegralVariable( ColIndex col, Fractional weight, - gtl::ITIVector *dense_weights) { + gtl::ITIVector* dense_weights) { CHECK(nullptr != dense_weights); if (weight == 0.0) { @@ -857,7 +857,7 @@ Fractional IntegralProblemConverter::AddWeightedIntegralVariable( (*dense_weights)[VariableIndex(pos)] += weight; } else { // Integral variable. - const IntegralVariable &integral_var = integral_variables_[-pos - 1]; + const IntegralVariable& integral_var = integral_variables_[-pos - 1]; for (int i = 0; i < integral_var.bits().size(); ++i) { (*dense_weights)[integral_var.bits()[i]] += integral_var.weights()[i] * weight; @@ -870,7 +870,7 @@ Fractional IntegralProblemConverter::AddWeightedIntegralVariable( template double IntegralProblemConverter::ScaleAndSparsifyWeights( double scaling_factor, int64 gcd, - const gtl::ITIVector &dense_weights, T *t) { + const gtl::ITIVector& dense_weights, T* t) { CHECK(nullptr != t); double bound_error = 0.0; @@ -886,7 +886,7 @@ double IntegralProblemConverter::ScaleAndSparsifyWeights( return bound_error; } bool IntegralProblemConverter::HasNonZeroWeights( - const gtl::ITIVector &dense_weights) const { + const gtl::ITIVector& dense_weights) const { for (const Fractional weight : dense_weights) { if (weight != 0.0) { return true; @@ -895,11 +895,11 @@ bool IntegralProblemConverter::HasNonZeroWeights( return false; } -bool CheckSolution(const LinearProgram &linear_problem, - const glop::DenseRow &variable_values) { +bool CheckSolution(const LinearProgram& linear_problem, + const glop::DenseRow& variable_values) { glop::DenseColumn constraint_values(linear_problem.num_constraints(), 0); - const SparseMatrix &matrix = linear_problem.GetSparseMatrix(); + const SparseMatrix& matrix = linear_problem.GetSparseMatrix(); for (ColIndex col(0); col < linear_problem.num_variables(); ++col) { const Fractional lower_bound = linear_problem.variable_lower_bounds()[col]; const Fractional upper_bound = linear_problem.variable_upper_bounds()[col]; @@ -930,12 +930,12 @@ bool CheckSolution(const LinearProgram &linear_problem, } // Solves the given linear program and returns the solve status. -BopSolveStatus InternalSolve(const LinearProgram &linear_problem, - const BopParameters ¶meters, - const DenseRow &initial_solution, - TimeLimit *time_limit, DenseRow *variable_values, - Fractional *objective_value, - Fractional *best_bound) { +BopSolveStatus InternalSolve(const LinearProgram& linear_problem, + const BopParameters& parameters, + const DenseRow& initial_solution, + TimeLimit* time_limit, DenseRow* variable_values, + Fractional* objective_value, + Fractional* best_bound) { CHECK(variable_values != nullptr); CHECK(objective_value != nullptr); CHECK(best_bound != nullptr); @@ -974,7 +974,7 @@ BopSolveStatus InternalSolve(const LinearProgram &linear_problem, if (status == BopSolveStatus::OPTIMAL_SOLUTION_FOUND || status == BopSolveStatus::FEASIBLE_SOLUTION_FOUND) { // Compute objective value. - const BopSolution &solution = bop_solver.best_solution(); + const BopSolution& solution = bop_solver.best_solution(); CHECK(solution.IsFeasible()); *objective_value = linear_problem.objective_offset(); @@ -995,11 +995,11 @@ BopSolveStatus InternalSolve(const LinearProgram &linear_problem, return status; } -void RunOneBop(const BopParameters ¶meters, int problem_index, - const DenseRow &initial_solution, TimeLimit *time_limit, - LPDecomposer *decomposer, DenseRow *variable_values, - Fractional *objective_value, Fractional *best_bound, - BopSolveStatus *status) { +void RunOneBop(const BopParameters& parameters, int problem_index, + const DenseRow& initial_solution, TimeLimit* time_limit, + LPDecomposer* decomposer, DenseRow* variable_values, + Fractional* objective_value, Fractional* best_bound, + BopSolveStatus* status) { CHECK(decomposer != nullptr); CHECK(variable_values != nullptr); CHECK(objective_value != nullptr); @@ -1039,18 +1039,18 @@ void RunOneBop(const BopParameters ¶meters, int problem_index, IntegralSolver::IntegralSolver() : parameters_(), variable_values_(), objective_value_(0.0) {} -BopSolveStatus IntegralSolver::Solve(const LinearProgram &linear_problem) { +BopSolveStatus IntegralSolver::Solve(const LinearProgram& linear_problem) { return Solve(linear_problem, DenseRow()); } BopSolveStatus IntegralSolver::SolveWithTimeLimit( - const LinearProgram &linear_problem, TimeLimit *time_limit) { + const LinearProgram& linear_problem, TimeLimit* time_limit) { return SolveWithTimeLimit(linear_problem, DenseRow(), time_limit); } BopSolveStatus IntegralSolver::Solve( - const LinearProgram &linear_problem, - const DenseRow &user_provided_initial_solution) { + const LinearProgram& linear_problem, + const DenseRow& user_provided_initial_solution) { std::unique_ptr time_limit = TimeLimit::FromParameters(parameters_); return SolveWithTimeLimit(linear_problem, user_provided_initial_solution, @@ -1058,8 +1058,8 @@ BopSolveStatus IntegralSolver::Solve( } BopSolveStatus IntegralSolver::SolveWithTimeLimit( - const LinearProgram &linear_problem, - const DenseRow &user_provided_initial_solution, TimeLimit *time_limit) { + const LinearProgram& linear_problem, + const DenseRow& user_provided_initial_solution, TimeLimit* time_limit) { // We make a copy so that we can clear it if the presolve is active. DenseRow initial_solution = user_provided_initial_solution; if (initial_solution.size() > 0) { @@ -1070,7 +1070,7 @@ BopSolveStatus IntegralSolver::SolveWithTimeLimit( // Some code path requires to copy the given linear_problem. When this // happens, we will simply change the target of this pointer. - LinearProgram const *lp = &linear_problem; + LinearProgram const* lp = &linear_problem; BopSolveStatus status; if (lp->num_variables() >= parameters_.decomposer_num_variables_threshold()) { diff --git a/ortools/data/jobshop_scheduling_parser.cc b/ortools/data/jobshop_scheduling_parser.cc index 152457d2e0..e818f822b3 100644 --- a/ortools/data/jobshop_scheduling_parser.cc +++ b/ortools/data/jobshop_scheduling_parser.cc @@ -50,7 +50,7 @@ void JsspParser::SetMachines(int machine_count) { } } -bool JsspParser::ParseFile(const std::string &filename) { +bool JsspParser::ParseFile(const std::string& filename) { problem_.Clear(); // Try to detect the type of the data file. // - fjs suffix -> Flexible Jobshop @@ -62,7 +62,7 @@ bool JsspParser::ParseFile(const std::string &filename) { } else { problem_type_ = JSSP; } - for (const std::string &line : FileLines(filename)) { + for (const std::string& line : FileLines(filename)) { if (line.empty()) { continue; } @@ -104,7 +104,7 @@ bool JsspParser::ParseFile(const std::string &filename) { return parser_state_ != PARSING_ERROR; } -void JsspParser::ProcessJsspLine(const std::string &line) { +void JsspParser::ProcessJsspLine(const std::string& line) { const std::vector words = absl::StrSplit(line, ' ', absl::SkipEmpty()); switch (parser_state_) { @@ -134,11 +134,11 @@ void JsspParser::ProcessJsspLine(const std::string &line) { } case JOB_COUNT_READ: { CHECK_GE(words.size(), declared_machine_count_ * 2); - Job *const job = problem_.mutable_jobs(current_job_index_); + Job* const job = problem_.mutable_jobs(current_job_index_); for (int i = 0; i < declared_machine_count_; ++i) { const int machine_id = strtoint32(words[2 * i]); const int64 duration = strtoint64(words[2 * i + 1]); - Task *const task = job->add_tasks(); + Task* const task = job->add_tasks(); task->add_machine(machine_id); task->add_duration(duration); } @@ -166,7 +166,7 @@ void JsspParser::ProcessJsspLine(const std::string &line) { } } -void JsspParser::ProcessTaillardLine(const std::string &line) { +void JsspParser::ProcessTaillardLine(const std::string& line) { const std::vector words = absl::StrSplit(line, ' ', absl::SkipEmpty()); @@ -216,10 +216,10 @@ void JsspParser::ProcessTaillardLine(const std::string &line) { } case JOB_LENGTH_READ: { CHECK_EQ(declared_machine_count_, words.size()); - Job *const job = problem_.mutable_jobs(current_job_index_); + Job* const job = problem_.mutable_jobs(current_job_index_); for (int i = 0; i < declared_machine_count_; ++i) { const int64 duration = strtoint64(words[i]); - Task *const task = job->add_tasks(); + Task* const task = job->add_tasks(); task->add_machine(i); task->add_duration(duration); } @@ -232,7 +232,7 @@ void JsspParser::ProcessTaillardLine(const std::string &line) { } } } -void JsspParser::ProcessFlexibleLine(const std::string &line) { +void JsspParser::ProcessFlexibleLine(const std::string& line) { const std::vector words = absl::StrSplit(line, ' ', absl::SkipEmpty()); switch (parser_state_) { @@ -247,10 +247,10 @@ void JsspParser::ProcessFlexibleLine(const std::string &line) { case JOB_COUNT_READ: { const int operations_count = strtoint32(words[0]); int index = 1; - Job *const job = problem_.mutable_jobs(current_job_index_); + Job* const job = problem_.mutable_jobs(current_job_index_); for (int operation = 0; operation < operations_count; ++operation) { const int alternatives_count = strtoint32(words[index++]); - Task *const task = job->add_tasks(); + Task* const task = job->add_tasks(); for (int alt = 0; alt < alternatives_count; alt++) { // Machine id are 1 based. const int machine_id = strtoint32(words[index++]) - 1; @@ -271,7 +271,7 @@ void JsspParser::ProcessFlexibleLine(const std::string &line) { } } } -void JsspParser::ProcessSdstLine(const std::string &line) { +void JsspParser::ProcessSdstLine(const std::string& line) { const std::vector words = absl::StrSplit(line, ' ', absl::SkipEmpty()); switch (parser_state_) { @@ -287,11 +287,11 @@ void JsspParser::ProcessSdstLine(const std::string &line) { } case JOB_COUNT_READ: { CHECK_EQ(words.size(), declared_machine_count_ * 2); - Job *const job = problem_.mutable_jobs(current_job_index_); + Job* const job = problem_.mutable_jobs(current_job_index_); for (int i = 0; i < declared_machine_count_; ++i) { const int machine_id = strtoint32(words[2 * i]); const int64 duration = strtoint64(words[2 * i + 1]); - Task *const task = job->add_tasks(); + Task* const task = job->add_tasks(); task->add_machine(machine_id); task->add_duration(duration); } @@ -316,9 +316,9 @@ void JsspParser::ProcessSdstLine(const std::string &line) { } case MACHINE_READ: { CHECK_EQ(declared_job_count_, words.size()); - Machine *const machine = + Machine* const machine = problem_.mutable_machines(current_machine_index_); - for (const std::string &w : words) { + for (const std::string& w : words) { const int64 t = strtoint64(w); machine->mutable_transition_time_matrix()->add_transition_time(t); } @@ -336,7 +336,7 @@ void JsspParser::ProcessSdstLine(const std::string &line) { } } -void JsspParser::ProcessTardinessLine(const std::string &line) { +void JsspParser::ProcessTardinessLine(const std::string& line) { const std::vector words = absl::StrSplit(line, ' ', absl::SkipEmpty()); switch (parser_state_) { @@ -350,7 +350,7 @@ void JsspParser::ProcessTardinessLine(const std::string &line) { } case JOB_COUNT_READ: { CHECK_GE(words.size(), 6); - Job *const job = problem_.mutable_jobs(current_job_index_); + Job* const job = problem_.mutable_jobs(current_job_index_); const int64 est = strtoint64(words[0]); if (est != 0L) { job->mutable_earliest_start()->set_value(est); @@ -364,7 +364,7 @@ void JsspParser::ProcessTardinessLine(const std::string &line) { for (int i = 0; i < num_operations; ++i) { const int machine_id = strtoint32(words[4 + 2 * i]) - 1; // 1 based. const int64 duration = strtoint64(words[5 + 2 * i]); - Task *const task = job->add_tasks(); + Task* const task = job->add_tasks(); task->add_machine(machine_id); task->add_duration(duration); } @@ -372,7 +372,7 @@ void JsspParser::ProcessTardinessLine(const std::string &line) { if (current_job_index_ == declared_job_count_) { // Fix tardiness weights if all integer from start. bool all_integral = true; - for (const Job &job : problem_.jobs()) { + for (const Job& job : problem_.jobs()) { if (job.lateness_cost_per_time_unit() % absl::GetFlag(FLAGS_jssp_scaling_up_factor) != 0) { @@ -381,7 +381,7 @@ void JsspParser::ProcessTardinessLine(const std::string &line) { } } if (all_integral) { - for (Job &job : *problem_.mutable_jobs()) { + for (Job& job : *problem_.mutable_jobs()) { job.set_lateness_cost_per_time_unit( job.lateness_cost_per_time_unit() / absl::GetFlag(FLAGS_jssp_scaling_up_factor)); @@ -401,7 +401,7 @@ void JsspParser::ProcessTardinessLine(const std::string &line) { } } -void JsspParser::ProcessPssLine(const std::string &line) { +void JsspParser::ProcessPssLine(const std::string& line) { const std::vector words = absl::StrSplit(line, ' ', absl::SkipEmpty()); switch (parser_state_) { @@ -435,8 +435,8 @@ void JsspParser::ProcessPssLine(const std::string &line) { CHECK_EQ(0, strtoint32(words[3])); const int machine_id = strtoint32(words[0]) - 1; const int duration = strtoint32(words[1]); - Job *const job = problem_.mutable_jobs(current_job_index_); - Task *const task = job->add_tasks(); + Job* const job = problem_.mutable_jobs(current_job_index_); + Task* const task = job->add_tasks(); task->add_machine(machine_id); task->add_duration(duration); if (++current_machine_index_ == declared_machine_count_) { @@ -447,7 +447,7 @@ void JsspParser::ProcessPssLine(const std::string &line) { parser_state_ = JOBS_READ; transition_index_ = 0; for (int m = 0; m < declared_machine_count_; ++m) { - Machine *const machine = problem_.mutable_machines(m); + Machine* const machine = problem_.mutable_machines(m); for (int i = 0; i < declared_job_count_ * declared_job_count_; ++i) { machine->mutable_transition_time_matrix()->add_transition_time(0); @@ -478,7 +478,7 @@ void JsspParser::ProcessPssLine(const std::string &line) { break; } const int transition = strtoint32(words[0]); - Machine *const machine = problem_.mutable_machines(m1); + Machine* const machine = problem_.mutable_machines(m1); machine->mutable_transition_time_matrix()->set_transition_time( job1 * declared_job_count_ + job2, transition); if (transition_index_ == size * size) { @@ -493,17 +493,17 @@ void JsspParser::ProcessPssLine(const std::string &line) { } } -void JsspParser::ProcessEarlyTardyLine(const std::string &line) { +void JsspParser::ProcessEarlyTardyLine(const std::string& line) { const std::vector words = absl::StrSplit(line, ' ', absl::SkipEmpty()); switch (parser_state_) { case JOB_COUNT_READ: { CHECK_EQ(words.size(), declared_machine_count_ * 2 + 3); - Job *const job = problem_.mutable_jobs(current_job_index_); + Job* const job = problem_.mutable_jobs(current_job_index_); for (int i = 0; i < declared_machine_count_; ++i) { const int machine_id = strtoint32(words[2 * i]); const int64 duration = strtoint64(words[2 * i + 1]); - Task *const task = job->add_tasks(); + Task* const task = job->add_tasks(); task->add_machine(machine_id); task->add_duration(duration); } @@ -527,13 +527,13 @@ void JsspParser::ProcessEarlyTardyLine(const std::string &line) { } } -int JsspParser::strtoint32(const std::string &word) { +int JsspParser::strtoint32(const std::string& word) { int result; CHECK(absl::SimpleAtoi(word, &result)); return result; } -int64 JsspParser::strtoint64(const std::string &word) { +int64 JsspParser::strtoint64(const std::string& word) { int64 result; CHECK(absl::SimpleAtoi(word, &result)); return result; diff --git a/ortools/data/rcpsp_parser.cc b/ortools/data/rcpsp_parser.cc index 4daccc0f5c..9f78ea2473 100644 --- a/ortools/data/rcpsp_parser.cc +++ b/ortools/data/rcpsp_parser.cc @@ -33,7 +33,7 @@ RcpspParser::RcpspParser() rcpsp_.set_horizon(-1); } -bool RcpspParser::ParseFile(const std::string &file_name) { +bool RcpspParser::ParseFile(const std::string& file_name) { if (load_status_ != NOT_STARTED) { return false; } @@ -43,7 +43,7 @@ bool RcpspParser::ParseFile(const std::string &file_name) { const bool is_patterson = absl::EndsWith(file_name, ".rcp"); load_status_ = HEADER_SECTION; - for (const std::string &line : FileLines(file_name)) { + for (const std::string& line : FileLines(file_name)) { if (is_rcpsp_max) { ProcessRcpspMaxLine(line); } else if (is_patterson) { @@ -65,7 +65,7 @@ bool RcpspParser::ParseFile(const std::string &file_name) { load_status_ == PARSING_FINISHED; } -void RcpspParser::ReportError(const std::string &line) { +void RcpspParser::ReportError(const std::string& line) { LOG(ERROR) << "Error: status = " << load_status_ << ", line = " << line; load_status_ = ERROR_FOUND; } @@ -75,7 +75,7 @@ void RcpspParser::SetNumDeclaredTasks(int t) { recipe_sizes_.resize(t + 2, 0); // The data format adds 2 sentinels. } -void RcpspParser::ProcessRcpspLine(const std::string &line) { +void RcpspParser::ProcessRcpspLine(const std::string& line) { if (absl::StartsWith(line, "***")) return; if (absl::StartsWith(line, "---")) return; @@ -116,14 +116,14 @@ void RcpspParser::ProcessRcpspLine(const std::string &line) { // Nothing to do. } else if (words.size() > 1 && words[1] == "renewable") { for (int i = 0; i < strtoint32(words[2]); ++i) { - Resource *const res = rcpsp_.add_resources(); + Resource* const res = rcpsp_.add_resources(); res->set_max_capacity(-1); res->set_renewable(true); res->set_unit_cost(0); } } else if (words.size() > 1 && words[1] == "nonrenewable") { for (int i = 0; i < strtoint32(words[2]); ++i) { - Resource *const res = rcpsp_.add_resources(); + Resource* const res = rcpsp_.add_resources(); res->set_max_capacity(-1); res->set_min_capacity(-1); res->set_renewable(false); @@ -169,7 +169,7 @@ void RcpspParser::ProcessRcpspLine(const std::string &line) { ReportError(line); break; } - Task *const task = rcpsp_.add_tasks(); + Task* const task = rcpsp_.add_tasks(); for (int i = 0; i < num_successors; ++i) { // The array of tasks is 0-based for us. task->add_successors(strtoint32(words[3 + i]) - 1); @@ -193,7 +193,7 @@ void RcpspParser::ProcessRcpspLine(const std::string &line) { ReportError(line); break; } - Recipe *const recipe = + Recipe* const recipe = rcpsp_.mutable_tasks(current_task_)->add_recipes(); recipe->set_duration(strtoint32(words[2])); for (int i = 0; i < rcpsp_.resources_size(); ++i) { @@ -207,7 +207,7 @@ void RcpspParser::ProcessRcpspLine(const std::string &line) { // New recipe for a current task. const int current_recipe = strtoint32(words[0]) - 1; CHECK_EQ(current_recipe, rcpsp_.tasks(current_task_).recipes_size()); - Recipe *const recipe = + Recipe* const recipe = rcpsp_.mutable_tasks(current_task_)->add_recipes(); recipe->set_duration(strtoint32(words[1])); for (int i = 0; i < rcpsp_.resources_size(); ++i) { @@ -251,7 +251,7 @@ void RcpspParser::ProcessRcpspLine(const std::string &line) { } } -void RcpspParser::ProcessRcpspMaxLine(const std::string &line) { +void RcpspParser::ProcessRcpspMaxLine(const std::string& line) { const std::vector words = absl::StrSplit(line, absl::ByAnyChar(" :\t[]\r"), absl::SkipEmpty()); @@ -281,7 +281,7 @@ void RcpspParser::ProcessRcpspMaxLine(const std::string &line) { if (rcpsp_.is_consumer_producer()) { const int num_nonrenewable_resources = strtoint32(words[1]); for (int i = 0; i < num_nonrenewable_resources; ++i) { - Resource *const res = rcpsp_.add_resources(); + Resource* const res = rcpsp_.add_resources(); res->set_max_capacity(-1); res->set_min_capacity(-1); res->set_renewable(false); @@ -291,13 +291,13 @@ void RcpspParser::ProcessRcpspMaxLine(const std::string &line) { const int num_renewable_resources = strtoint32(words[1]); const int num_nonrenewable_resources = strtoint32(words[2]); for (int i = 0; i < num_renewable_resources; ++i) { - Resource *const res = rcpsp_.add_resources(); + Resource* const res = rcpsp_.add_resources(); res->set_max_capacity(-1); res->set_renewable(true); res->set_unit_cost(0); } for (int i = 0; i < num_nonrenewable_resources; ++i) { - Resource *const res = rcpsp_.add_resources(); + Resource* const res = rcpsp_.add_resources(); res->set_max_capacity(-1); res->set_min_capacity(-1); res->set_renewable(false); @@ -336,7 +336,7 @@ void RcpspParser::ProcessRcpspMaxLine(const std::string &line) { recipe_sizes_[task_id] = num_recipes; const int num_successors = strtoint32(words[2]); - Task *const task = rcpsp_.add_tasks(); + Task* const task = rcpsp_.add_tasks(); // Read successors. for (int i = 0; i < num_successors; ++i) { @@ -356,10 +356,10 @@ void RcpspParser::ProcessRcpspMaxLine(const std::string &line) { const int num_successors = rcpsp_.tasks(t).successors_size(); int count = 0; for (int s = 0; s < num_successors; ++s) { - PerSuccessorDelays *const succ_delays = + PerSuccessorDelays* const succ_delays = rcpsp_.mutable_tasks(t)->add_successor_delays(); for (int r1 = 0; r1 < num_recipes; ++r1) { - PerRecipeDelays *const recipe_delays = + PerRecipeDelays* const recipe_delays = succ_delays->add_recipe_delays(); const int other = rcpsp_.tasks(t).successors(s); const int num_other_recipes = recipe_sizes_[other]; @@ -389,7 +389,7 @@ void RcpspParser::ProcessRcpspMaxLine(const std::string &line) { ReportError(line); break; } - Recipe *const recipe = + Recipe* const recipe = rcpsp_.mutable_tasks(current_task_)->add_recipes(); recipe->set_duration(strtoint32(words[2])); for (int i = 0; i < rcpsp_.resources_size(); ++i) { @@ -411,7 +411,7 @@ void RcpspParser::ProcessRcpspMaxLine(const std::string &line) { ReportError(line); break; } - Recipe *const recipe = + Recipe* const recipe = rcpsp_.mutable_tasks(current_task_)->add_recipes(); recipe->set_duration(0); for (int i = 0; i < rcpsp_.resources_size(); ++i) { @@ -425,7 +425,7 @@ void RcpspParser::ProcessRcpspMaxLine(const std::string &line) { // New recipe for a current task. const int current_recipe = strtoint32(words[0]) - 1; CHECK_EQ(current_recipe, rcpsp_.tasks(current_task_).recipes_size()); - Recipe *const recipe = + Recipe* const recipe = rcpsp_.mutable_tasks(current_task_)->add_recipes(); recipe->set_duration(strtoint32(words[1])); for (int i = 0; i < rcpsp_.resources_size(); ++i) { @@ -480,7 +480,7 @@ void RcpspParser::ProcessRcpspMaxLine(const std::string &line) { } } -void RcpspParser::ProcessPattersonLine(const std::string &line) { +void RcpspParser::ProcessPattersonLine(const std::string& line) { const std::vector words = absl::StrSplit(line, absl::ByAnyChar(" :\t[]\r"), absl::SkipEmpty()); @@ -501,7 +501,7 @@ void RcpspParser::ProcessPattersonLine(const std::string &line) { // Creates resources. const int num_renewable_resources = strtoint32(words[1]); for (int i = 0; i < num_renewable_resources; ++i) { - Resource *const res = rcpsp_.add_resources(); + Resource* const res = rcpsp_.add_resources(); res->set_max_capacity(-1); res->set_min_capacity(-1); res->set_renewable(true); @@ -534,8 +534,8 @@ void RcpspParser::ProcessPattersonLine(const std::string &line) { break; } CHECK_EQ(current_task_, rcpsp_.tasks_size()); - Task *const task = rcpsp_.add_tasks(); - Recipe *const recipe = task->add_recipes(); + Task* const task = rcpsp_.add_tasks(); + Recipe* const recipe = task->add_recipes(); recipe->set_duration(strtoint32(words[0])); const int num_resources = rcpsp_.resources_size(); @@ -590,13 +590,13 @@ void RcpspParser::ProcessPattersonLine(const std::string &line) { } } -int RcpspParser::strtoint32(const std::string &word) { +int RcpspParser::strtoint32(const std::string& word) { int result; CHECK(absl::SimpleAtoi(word, &result)); return result; } -int64 RcpspParser::strtoint64(const std::string &word) { +int64 RcpspParser::strtoint64(const std::string& word) { int64 result; CHECK(absl::SimpleAtoi(word, &result)); return result; diff --git a/ortools/data/set_covering_parser.cc b/ortools/data/set_covering_parser.cc index d7f1dbe351..062cbf4c32 100644 --- a/ortools/data/set_covering_parser.cc +++ b/ortools/data/set_covering_parser.cc @@ -22,22 +22,22 @@ namespace scp { ScpParser::ScpParser() : section_(INIT), line_(0), remaining_(0), current_(0) {} -bool ScpParser::LoadProblem(const std::string &filename, Format format, - ScpData *data) { +bool ScpParser::LoadProblem(const std::string& filename, Format format, + ScpData* data) { section_ = INIT; line_ = 0; remaining_ = 0; current_ = 0; - for (const std::string &line : FileLines(filename)) { + for (const std::string& line : FileLines(filename)) { ProcessLine(line, format, data); if (section_ == ERROR) return false; } return section_ == END; } -void ScpParser::ProcessLine(const std::string &line, Format format, - ScpData *data) { +void ScpParser::ProcessLine(const std::string& line, Format format, + ScpData* data) { line_++; const std::vector words = absl::StrSplit(line, absl::ByAnyChar(" :\t\r"), absl::SkipEmpty()); @@ -151,7 +151,7 @@ void ScpParser::ProcessLine(const std::string &line, Format format, LogError(line, "Too many columns in a row declaration"); return; } - for (const std::string &w : words) { + for (const std::string& w : words) { remaining_--; const int column = strtoint32(w) - 1; // 1 based. data->AddRowInColumn(current_, column); @@ -183,19 +183,19 @@ void ScpParser::ProcessLine(const std::string &line, Format format, } } -void ScpParser::LogError(const std::string &line, const std::string &message) { +void ScpParser::LogError(const std::string& line, const std::string& message) { LOG(ERROR) << "Error on line " << line_ << ": " << message << "(" << line << ")"; section_ = ERROR; } -int ScpParser::strtoint32(const std::string &word) { +int ScpParser::strtoint32(const std::string& word) { int result; CHECK(absl::SimpleAtoi(word, &result)); return result; } -int64 ScpParser::strtoint64(const std::string &word) { +int64 ScpParser::strtoint64(const std::string& word) { int64 result; CHECK(absl::SimpleAtoi(word, &result)); return result; diff --git a/ortools/flatzinc/checker.cc b/ortools/flatzinc/checker.cc index f844c49f4e..4e429827e3 100644 --- a/ortools/flatzinc/checker.cc +++ b/ortools/flatzinc/checker.cc @@ -27,8 +27,8 @@ namespace { // Helpers -int64 Eval(const Argument &arg, - const std::function &evaluator) { +int64 Eval(const Argument& arg, + const std::function& evaluator) { switch (arg.type) { case Argument::INT_VALUE: { return arg.Value(); @@ -43,12 +43,12 @@ int64 Eval(const Argument &arg, } } -int Size(const Argument &arg) { +int Size(const Argument& arg) { return std::max(arg.values.size(), arg.variables.size()); } -int64 EvalAt(const Argument &arg, int pos, - const std::function &evaluator) { +int64 EvalAt(const Argument& arg, int pos, + const std::function& evaluator) { switch (arg.type) { case Argument::INT_LIST: { return arg.ValueAt(pos); @@ -66,8 +66,8 @@ int64 EvalAt(const Argument &arg, int pos, // Checkers bool CheckAllDifferentInt( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { absl::flat_hash_set visited; for (int i = 0; i < Size(ct.arguments[0]); ++i) { const int64 value = EvalAt(ct.arguments[0], i, evaluator); @@ -81,8 +81,8 @@ bool CheckAllDifferentInt( } bool CheckAlldifferentExcept0( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { absl::flat_hash_set visited; for (int i = 0; i < Size(ct.arguments[0]); ++i) { const int64 value = EvalAt(ct.arguments[0], i, evaluator); @@ -95,8 +95,8 @@ bool CheckAlldifferentExcept0( return true; } -bool CheckAmong(const Constraint &ct, - const std::function &evaluator) { +bool CheckAmong(const Constraint& ct, + const std::function& evaluator) { const int64 expected = Eval(ct.arguments[0], evaluator); int64 count = 0; for (int i = 0; i < Size(ct.arguments[1]); ++i) { @@ -108,8 +108,8 @@ bool CheckAmong(const Constraint &ct, } bool CheckArrayBoolAnd( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { int64 result = 1; for (int i = 0; i < Size(ct.arguments[0]); ++i) { @@ -120,9 +120,8 @@ bool CheckArrayBoolAnd( return result == Eval(ct.arguments[1], evaluator); } -bool CheckArrayBoolOr( - const Constraint &ct, - const std::function &evaluator) { +bool CheckArrayBoolOr(const Constraint& ct, + const std::function& evaluator) { int64 result = 0; for (int i = 0; i < Size(ct.arguments[0]); ++i) { @@ -134,8 +133,8 @@ bool CheckArrayBoolOr( } bool CheckArrayBoolXor( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { int64 result = 0; for (int i = 0; i < Size(ct.arguments[0]); ++i) { @@ -146,8 +145,8 @@ bool CheckArrayBoolXor( } bool CheckArrayIntElement( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { if (ct.arguments[0].variables.size() == 2) { // TODO(user): Check 2D element. return true; @@ -160,8 +159,8 @@ bool CheckArrayIntElement( } bool CheckArrayIntElementNonShifted( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { CHECK_EQ(ct.arguments[0].variables.size(), 1); const int64 index = Eval(ct.arguments[0], evaluator); const int64 element = EvalAt(ct.arguments[1], index, evaluator); @@ -170,8 +169,8 @@ bool CheckArrayIntElementNonShifted( } bool CheckArrayVarIntElement( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { if (ct.arguments[0].variables.size() == 2) { // TODO(user): Check 2D element. return true; @@ -183,8 +182,8 @@ bool CheckArrayVarIntElement( return element == target; } -bool CheckAtMostInt(const Constraint &ct, - const std::function &evaluator) { +bool CheckAtMostInt(const Constraint& ct, + const std::function& evaluator) { const int64 expected = Eval(ct.arguments[0], evaluator); const int64 value = Eval(ct.arguments[2], evaluator); @@ -195,16 +194,16 @@ bool CheckAtMostInt(const Constraint &ct, return count <= expected; } -bool CheckBoolAnd(const Constraint &ct, - const std::function &evaluator) { +bool CheckBoolAnd(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 status = Eval(ct.arguments[2], evaluator); return status == std::min(left, right); } -bool CheckBoolClause(const Constraint &ct, - const std::function &evaluator) { +bool CheckBoolClause(const Constraint& ct, + const std::function& evaluator) { int64 result = 0; // Positive variables. @@ -221,31 +220,31 @@ bool CheckBoolClause(const Constraint &ct, return result; } -bool CheckBoolNot(const Constraint &ct, - const std::function &evaluator) { +bool CheckBoolNot(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); return left == 1 - right; } -bool CheckBoolOr(const Constraint &ct, - const std::function &evaluator) { +bool CheckBoolOr(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 status = Eval(ct.arguments[2], evaluator); return status == std::max(left, right); } -bool CheckBoolXor(const Constraint &ct, - const std::function &evaluator) { +bool CheckBoolXor(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 target = Eval(ct.arguments[2], evaluator); return target == (left + right == 1); } -bool CheckCircuit(const Constraint &ct, - const std::function &evaluator) { +bool CheckCircuit(const Constraint& ct, + const std::function& evaluator) { // There are two versions of the constraint. 0 based and 1 based. // Let's try to detect which one we have. const int size = Size(ct.arguments[0]); @@ -271,8 +270,8 @@ bool CheckCircuit(const Constraint &ct, return visited.size() == Size(ct.arguments[0]); } -int64 ComputeCount(const Constraint &ct, - const std::function &evaluator) { +int64 ComputeCount(const Constraint& ct, + const std::function& evaluator) { int64 result = 0; const int64 value = Eval(ct.arguments[1], evaluator); for (int i = 0; i < Size(ct.arguments[0]); ++i) { @@ -281,58 +280,58 @@ int64 ComputeCount(const Constraint &ct, return result; } -bool CheckCountEq(const Constraint &ct, - const std::function &evaluator) { +bool CheckCountEq(const Constraint& ct, + const std::function& evaluator) { const int64 count = ComputeCount(ct, evaluator); const int64 expected = Eval(ct.arguments[2], evaluator); return count == expected; } -bool CheckCountGeq(const Constraint &ct, - const std::function &evaluator) { +bool CheckCountGeq(const Constraint& ct, + const std::function& evaluator) { const int64 count = ComputeCount(ct, evaluator); const int64 expected = Eval(ct.arguments[2], evaluator); return count >= expected; } -bool CheckCountGt(const Constraint &ct, - const std::function &evaluator) { +bool CheckCountGt(const Constraint& ct, + const std::function& evaluator) { const int64 count = ComputeCount(ct, evaluator); const int64 expected = Eval(ct.arguments[2], evaluator); return count > expected; } -bool CheckCountLeq(const Constraint &ct, - const std::function &evaluator) { +bool CheckCountLeq(const Constraint& ct, + const std::function& evaluator) { const int64 count = ComputeCount(ct, evaluator); const int64 expected = Eval(ct.arguments[2], evaluator); return count <= expected; } -bool CheckCountLt(const Constraint &ct, - const std::function &evaluator) { +bool CheckCountLt(const Constraint& ct, + const std::function& evaluator) { const int64 count = ComputeCount(ct, evaluator); const int64 expected = Eval(ct.arguments[2], evaluator); return count < expected; } -bool CheckCountNeq(const Constraint &ct, - const std::function &evaluator) { +bool CheckCountNeq(const Constraint& ct, + const std::function& evaluator) { const int64 count = ComputeCount(ct, evaluator); const int64 expected = Eval(ct.arguments[2], evaluator); return count != expected; } -bool CheckCountReif(const Constraint &ct, - const std::function &evaluator) { +bool CheckCountReif(const Constraint& ct, + const std::function& evaluator) { const int64 count = ComputeCount(ct, evaluator); const int64 expected = Eval(ct.arguments[2], evaluator); const int64 status = Eval(ct.arguments[3], evaluator); return status == (expected == count); } -bool CheckCumulative(const Constraint &ct, - const std::function &evaluator) { +bool CheckCumulative(const Constraint& ct, + const std::function& evaluator) { // TODO(user): Improve complexity for large durations. const int64 capacity = Eval(ct.arguments[3], evaluator); const int size = Size(ct.arguments[0]); @@ -353,49 +352,48 @@ bool CheckCumulative(const Constraint &ct, return true; } -bool CheckDiffn(const Constraint &ct, - const std::function &evaluator) { +bool CheckDiffn(const Constraint& ct, + const std::function& evaluator) { return true; } -bool CheckDiffnK(const Constraint &ct, - const std::function &evaluator) { +bool CheckDiffnK(const Constraint& ct, + const std::function& evaluator) { return true; } bool CheckDiffnNonStrict( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { return true; } bool CheckDiffnNonStrictK( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { return true; } -bool CheckDisjunctive( - const Constraint &ct, - const std::function &evaluator) { +bool CheckDisjunctive(const Constraint& ct, + const std::function& evaluator) { return true; } bool CheckDisjunctiveStrict( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { return true; } bool CheckFalseConstraint( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { return false; } std::vector ComputeGlobalCardinalityCards( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { std::vector cards(Size(ct.arguments[1]), 0); absl::flat_hash_map positions; for (int i = 0; i < ct.arguments[1].values.size(); ++i) { @@ -413,8 +411,8 @@ std::vector ComputeGlobalCardinalityCards( } bool CheckGlobalCardinality( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const std::vector cards = ComputeGlobalCardinalityCards(ct, evaluator); CHECK_EQ(cards.size(), Size(ct.arguments[2])); for (int i = 0; i < Size(ct.arguments[2]); ++i) { @@ -427,8 +425,8 @@ bool CheckGlobalCardinality( } bool CheckGlobalCardinalityClosed( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const std::vector cards = ComputeGlobalCardinalityCards(ct, evaluator); CHECK_EQ(cards.size(), Size(ct.arguments[2])); for (int i = 0; i < Size(ct.arguments[2]); ++i) { @@ -445,8 +443,8 @@ bool CheckGlobalCardinalityClosed( } bool CheckGlobalCardinalityLowUp( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const std::vector cards = ComputeGlobalCardinalityCards(ct, evaluator); CHECK_EQ(cards.size(), ct.arguments[2].values.size()); CHECK_EQ(cards.size(), ct.arguments[3].values.size()); @@ -460,8 +458,8 @@ bool CheckGlobalCardinalityLowUp( } bool CheckGlobalCardinalityLowUpClosed( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const std::vector cards = ComputeGlobalCardinalityCards(ct, evaluator); CHECK_EQ(cards.size(), ct.arguments[2].values.size()); CHECK_EQ(cards.size(), ct.arguments[3].values.size()); @@ -479,8 +477,8 @@ bool CheckGlobalCardinalityLowUpClosed( } bool CheckGlobalCardinalityOld( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const int size = Size(ct.arguments[1]); std::vector cards(size, 0); for (int i = 0; i < Size(ct.arguments[0]); ++i) { @@ -498,138 +496,138 @@ bool CheckGlobalCardinalityOld( return true; } -bool CheckIntAbs(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntAbs(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); return std::abs(left) == right; } -bool CheckIntDiv(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntDiv(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 target = Eval(ct.arguments[2], evaluator); return target == left / right; } -bool CheckIntEq(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntEq(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); return left == right; } -bool CheckIntEqImp(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntEqImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return (status && (left == right)) || !status; } -bool CheckIntEqReif(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntEqReif(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return status == (left == right); } -bool CheckIntGe(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntGe(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); return left >= right; } -bool CheckIntGeImp(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntGeImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return (status && (left >= right)) || !status; } -bool CheckIntGeReif(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntGeReif(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return status == (left >= right); } -bool CheckIntGt(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntGt(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); return left > right; } -bool CheckIntGtImp(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntGtImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return (status && (left > right)) || !status; } -bool CheckIntGtReif(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntGtReif(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return status == (left > right); } -bool CheckIntLe(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLe(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); return left <= right; } -bool CheckIntLeImp(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLeImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return (status && (left <= right)) || !status; } -bool CheckIntLeReif(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLeReif(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return status == (left <= right); } -bool CheckIntLt(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLt(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); return left < right; } -bool CheckIntLtImp(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLtImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return (status && (left < right)) || !status; } -bool CheckIntLtReif(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLtReif(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return status == (left < right); } -int64 ComputeIntLin(const Constraint &ct, - const std::function &evaluator) { +int64 ComputeIntLin(const Constraint& ct, + const std::function& evaluator) { int64 result = 0; for (int i = 0; i < Size(ct.arguments[0]); ++i) { result += EvalAt(ct.arguments[0], i, evaluator) * @@ -638,16 +636,15 @@ int64 ComputeIntLin(const Constraint &ct, return result; } -bool CheckIntLinEq(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLinEq(const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); return left == right; } -bool CheckIntLinEqImp( - const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLinEqImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); const bool status = Eval(ct.arguments[3], evaluator) != 0; @@ -655,24 +652,23 @@ bool CheckIntLinEqImp( } bool CheckIntLinEqReif( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); const bool status = Eval(ct.arguments[3], evaluator) != 0; return status == (left == right); } -bool CheckIntLinGe(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLinGe(const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); return left >= right; } -bool CheckIntLinGeImp( - const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLinGeImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); const bool status = Eval(ct.arguments[3], evaluator) != 0; @@ -680,24 +676,23 @@ bool CheckIntLinGeImp( } bool CheckIntLinGeReif( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); const bool status = Eval(ct.arguments[3], evaluator) != 0; return status == (left >= right); } -bool CheckIntLinLe(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLinLe(const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); return left <= right; } -bool CheckIntLinLeImp( - const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLinLeImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); const bool status = Eval(ct.arguments[3], evaluator) != 0; @@ -705,24 +700,23 @@ bool CheckIntLinLeImp( } bool CheckIntLinLeReif( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); const bool status = Eval(ct.arguments[3], evaluator) != 0; return status == (left <= right); } -bool CheckIntLinNe(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLinNe(const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); return left != right; } -bool CheckIntLinNeImp( - const Constraint &ct, - const std::function &evaluator) { +bool CheckIntLinNeImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); const bool status = Eval(ct.arguments[3], evaluator) != 0; @@ -730,94 +724,94 @@ bool CheckIntLinNeImp( } bool CheckIntLinNeReif( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const int64 left = ComputeIntLin(ct, evaluator); const int64 right = Eval(ct.arguments[2], evaluator); const bool status = Eval(ct.arguments[3], evaluator) != 0; return status == (left != right); } -bool CheckIntMax(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntMax(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 status = Eval(ct.arguments[2], evaluator); return status == std::max(left, right); } -bool CheckIntMin(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntMin(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 status = Eval(ct.arguments[2], evaluator); return status == std::min(left, right); } -bool CheckIntMinus(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntMinus(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 target = Eval(ct.arguments[2], evaluator); return target == left - right; } -bool CheckIntMod(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntMod(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 target = Eval(ct.arguments[2], evaluator); return target == left % right; } -bool CheckIntNe(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntNe(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); return left != right; } -bool CheckIntNeImp(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntNeImp(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return (status && (left != right)) || !status; } -bool CheckIntNeReif(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntNeReif(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const bool status = Eval(ct.arguments[2], evaluator) != 0; return status == (left != right); } -bool CheckIntNegate(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntNegate(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); return left == -right; } -bool CheckIntPlus(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntPlus(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 target = Eval(ct.arguments[2], evaluator); return target == left + right; } -bool CheckIntTimes(const Constraint &ct, - const std::function &evaluator) { +bool CheckIntTimes(const Constraint& ct, + const std::function& evaluator) { const int64 left = Eval(ct.arguments[0], evaluator); const int64 right = Eval(ct.arguments[1], evaluator); const int64 target = Eval(ct.arguments[2], evaluator); return target == left * right; } -bool CheckInverse(const Constraint &ct, - const std::function &evaluator) { +bool CheckInverse(const Constraint& ct, + const std::function& evaluator) { CHECK_EQ(Size(ct.arguments[0]), Size(ct.arguments[1])); const int size = Size(ct.arguments[0]); // Check all bounds. @@ -840,8 +834,8 @@ bool CheckInverse(const Constraint &ct, return true; } -bool CheckLexLessInt(const Constraint &ct, - const std::function &evaluator) { +bool CheckLexLessInt(const Constraint& ct, + const std::function& evaluator) { CHECK_EQ(Size(ct.arguments[0]), Size(ct.arguments[1])); for (int i = 0; i < Size(ct.arguments[0]); ++i) { const int64 x = EvalAt(ct.arguments[0], i, evaluator); @@ -858,8 +852,8 @@ bool CheckLexLessInt(const Constraint &ct, } bool CheckLexLesseqInt( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { CHECK_EQ(Size(ct.arguments[0]), Size(ct.arguments[1])); for (int i = 0; i < Size(ct.arguments[0]); ++i) { const int64 x = EvalAt(ct.arguments[0], i, evaluator); @@ -876,8 +870,8 @@ bool CheckLexLesseqInt( } bool CheckMaximumArgInt( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const int64 max_index = Eval(ct.arguments[1], evaluator) - 1; const int64 max_value = EvalAt(ct.arguments[0], max_index, evaluator); // Checks that all value before max_index are < max_value. @@ -896,8 +890,8 @@ bool CheckMaximumArgInt( return true; } -bool CheckMaximumInt(const Constraint &ct, - const std::function &evaluator) { +bool CheckMaximumInt(const Constraint& ct, + const std::function& evaluator) { int64 max_value = kint64min; for (int i = 0; i < Size(ct.arguments[1]); ++i) { max_value = std::max(max_value, EvalAt(ct.arguments[1], i, evaluator)); @@ -906,8 +900,8 @@ bool CheckMaximumInt(const Constraint &ct, } bool CheckMinimumArgInt( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const int64 min_index = Eval(ct.arguments[1], evaluator) - 1; const int64 min_value = EvalAt(ct.arguments[0], min_index, evaluator); // Checks that all value before min_index are > min_value. @@ -926,8 +920,8 @@ bool CheckMinimumArgInt( return true; } -bool CheckMinimumInt(const Constraint &ct, - const std::function &evaluator) { +bool CheckMinimumInt(const Constraint& ct, + const std::function& evaluator) { int64 min_value = kint64max; for (int i = 0; i < Size(ct.arguments[1]); ++i) { min_value = std::min(min_value, EvalAt(ct.arguments[1], i, evaluator)); @@ -936,9 +930,9 @@ bool CheckMinimumInt(const Constraint &ct, } bool CheckNetworkFlowConservation( - const Argument &arcs, const Argument &balance_input, - const Argument &flow_vars, - const std::function &evaluator) { + const Argument& arcs, const Argument& balance_input, + const Argument& flow_vars, + const std::function& evaluator) { std::vector balance(balance_input.values); const int num_arcs = Size(arcs) / 2; @@ -957,16 +951,15 @@ bool CheckNetworkFlowConservation( return true; } -bool CheckNetworkFlow( - const Constraint &ct, - const std::function &evaluator) { +bool CheckNetworkFlow(const Constraint& ct, + const std::function& evaluator) { return CheckNetworkFlowConservation(ct.arguments[0], ct.arguments[1], ct.arguments[2], evaluator); } bool CheckNetworkFlowCost( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { if (!CheckNetworkFlowConservation(ct.arguments[0], ct.arguments[1], ct.arguments[3], evaluator)) { return false; @@ -983,8 +976,8 @@ bool CheckNetworkFlowCost( return total_cost == Eval(ct.arguments[4], evaluator); } -bool CheckNvalue(const Constraint &ct, - const std::function &evaluator) { +bool CheckNvalue(const Constraint& ct, + const std::function& evaluator) { const int64 count = Eval(ct.arguments[0], evaluator); absl::flat_hash_set all_values; for (int i = 0; i < Size(ct.arguments[1]); ++i) { @@ -994,37 +987,37 @@ bool CheckNvalue(const Constraint &ct, return count == all_values.size(); } -bool CheckRegular(const Constraint &ct, - const std::function &evaluator) { +bool CheckRegular(const Constraint& ct, + const std::function& evaluator) { return true; } -bool CheckRegularNfa(const Constraint &ct, - const std::function &evaluator) { +bool CheckRegularNfa(const Constraint& ct, + const std::function& evaluator) { return true; } -bool CheckSetIn(const Constraint &ct, - const std::function &evaluator) { +bool CheckSetIn(const Constraint& ct, + const std::function& evaluator) { const int64 value = Eval(ct.arguments[0], evaluator); return ct.arguments[1].Contains(value); } -bool CheckSetNotIn(const Constraint &ct, - const std::function &evaluator) { +bool CheckSetNotIn(const Constraint& ct, + const std::function& evaluator) { const int64 value = Eval(ct.arguments[0], evaluator); return !ct.arguments[1].Contains(value); } -bool CheckSetInReif(const Constraint &ct, - const std::function &evaluator) { +bool CheckSetInReif(const Constraint& ct, + const std::function& evaluator) { const int64 value = Eval(ct.arguments[0], evaluator); const int64 status = Eval(ct.arguments[2], evaluator); return status == ct.arguments[1].Contains(value); } -bool CheckSlidingSum(const Constraint &ct, - const std::function &evaluator) { +bool CheckSlidingSum(const Constraint& ct, + const std::function& evaluator) { const int64 low = Eval(ct.arguments[0], evaluator); const int64 up = Eval(ct.arguments[1], evaluator); const int64 length = Eval(ct.arguments[2], evaluator); @@ -1046,8 +1039,8 @@ bool CheckSlidingSum(const Constraint &ct, return true; } -bool CheckSort(const Constraint &ct, - const std::function &evaluator) { +bool CheckSort(const Constraint& ct, + const std::function& evaluator) { CHECK_EQ(Size(ct.arguments[0]), Size(ct.arguments[1])); absl::flat_hash_map init_count; absl::flat_hash_map sorted_count; @@ -1067,8 +1060,8 @@ bool CheckSort(const Constraint &ct, return true; } -bool CheckSubCircuit(const Constraint &ct, - const std::function &evaluator) { +bool CheckSubCircuit(const Constraint& ct, + const std::function& evaluator) { absl::flat_hash_set visited; // Find inactive nodes (pointing to themselves). int64 current = -1; @@ -1096,14 +1089,14 @@ bool CheckSubCircuit(const Constraint &ct, return visited.size() == Size(ct.arguments[0]); } -bool CheckTableInt(const Constraint &ct, - const std::function &evaluator) { +bool CheckTableInt(const Constraint& ct, + const std::function& evaluator) { return true; } bool CheckSymmetricAllDifferent( - const Constraint &ct, - const std::function &evaluator) { + const Constraint& ct, + const std::function& evaluator) { const int size = Size(ct.arguments[0]); for (int i = 0; i < size; ++i) { const int64 value = EvalAt(ct.arguments[0], i, evaluator) - 1; @@ -1119,8 +1112,8 @@ bool CheckSymmetricAllDifferent( } using CallMap = absl::flat_hash_map< - std::string, std::function)> >; + std::string, std::function)>>; CallMap CreateCallMap() { CallMap m; @@ -1260,13 +1253,13 @@ CallMap CreateCallMap() { } // namespace -bool CheckSolution(const Model &model, - const std::function &evaluator) { +bool CheckSolution(const Model& model, + const std::function& evaluator) { bool ok = true; const CallMap call_map = CreateCallMap(); - for (Constraint *ct : model.constraints()) { + for (Constraint* ct : model.constraints()) { if (!ct->active) continue; - const auto &checker = gtl::FindOrDie(call_map, ct->type); + const auto& checker = gtl::FindOrDie(call_map, ct->type); if (!checker(*ct, evaluator)) { FZLOG << "Failing constraint " << ct->DebugString() << FZENDL; ok = false; diff --git a/ortools/flatzinc/cp_model_fz_solver.cc b/ortools/flatzinc/cp_model_fz_solver.cc index 19927dc3b1..9bc21fa932 100644 --- a/ortools/flatzinc/cp_model_fz_solver.cc +++ b/ortools/flatzinc/cp_model_fz_solver.cc @@ -67,14 +67,14 @@ struct CpModelProtoWithMapping { // Convert a flatzinc argument to a variable or a list of variable. // Note that we always encode a constant argument with a constant variable. - int LookupVar(const fz::Argument &argument); - std::vector LookupVars(const fz::Argument &argument); + int LookupVar(const fz::Argument& argument); + std::vector LookupVars(const fz::Argument& argument); // Create and return the indices of the IntervalConstraint corresponding // to the flatzinc "interval" specified by a start var and a duration var. // This method will cache intervals with the key . - std::vector CreateIntervals(const std::vector &starts, - const std::vector &durations); + std::vector CreateIntervals(const std::vector& starts, + const std::vector& durations); // Create and return the index of the IntervalConstraint corresponding // to the flatzinc "interval" specified by a start var and a size var. @@ -88,26 +88,26 @@ struct CpModelProtoWithMapping { int GetOrCreateOptionalInterval(int start_var, int size_var, int opt_var); // Helpers to fill a ConstraintProto. - void FillAMinusBInDomain(const std::vector &domain, - const fz::Constraint &fz_ct, ConstraintProto *ct); - void FillLinearConstraintWithGivenDomain(const std::vector &domain, - const fz::Constraint &fz_ct, - ConstraintProto *ct); - void FillConstraint(const fz::Constraint &fz_ct, ConstraintProto *ct); - void FillReifOrImpliedConstraint(const fz::Constraint &fz_ct, - ConstraintProto *ct); + void FillAMinusBInDomain(const std::vector& domain, + const fz::Constraint& fz_ct, ConstraintProto* ct); + void FillLinearConstraintWithGivenDomain(const std::vector& domain, + const fz::Constraint& fz_ct, + ConstraintProto* ct); + void FillConstraint(const fz::Constraint& fz_ct, ConstraintProto* ct); + void FillReifOrImpliedConstraint(const fz::Constraint& fz_ct, + ConstraintProto* ct); // Translates the flatzinc search annotations into the CpModelProto // search_order field. void TranslateSearchAnnotations( - const std::vector &search_annotations); + const std::vector& search_annotations); // The output proto. CpModelProto proto; SatParameters parameters; // Mapping from flatzinc variables to CpModelProto variables. - absl::flat_hash_map fz_var_to_index; + absl::flat_hash_map fz_var_to_index; absl::flat_hash_map constant_value_to_index; absl::flat_hash_map, int> start_size_opt_tuple_to_interval; @@ -120,21 +120,21 @@ int CpModelProtoWithMapping::LookupConstant(int64 value) { // Create the constant on the fly. const int index = proto.variables_size(); - IntegerVariableProto *var_proto = proto.add_variables(); + IntegerVariableProto* var_proto = proto.add_variables(); var_proto->add_domain(value); var_proto->add_domain(value); constant_value_to_index[value] = index; return index; } -int CpModelProtoWithMapping::LookupVar(const fz::Argument &argument) { +int CpModelProtoWithMapping::LookupVar(const fz::Argument& argument) { if (argument.HasOneValue()) return LookupConstant(argument.Value()); CHECK_EQ(argument.type, fz::Argument::INT_VAR_REF); return fz_var_to_index[argument.Var()]; } std::vector CpModelProtoWithMapping::LookupVars( - const fz::Argument &argument) { + const fz::Argument& argument) { std::vector result; if (argument.type == fz::Argument::VOID_ARGUMENT) return result; if (argument.type == fz::Argument::INT_LIST) { @@ -146,7 +146,7 @@ std::vector CpModelProtoWithMapping::LookupVars( result.push_back(LookupConstant(argument.Value())); } else { CHECK_EQ(argument.type, fz::Argument::INT_VAR_REF_ARRAY); - for (fz::IntegerVariable *var : argument.variables) { + for (fz::IntegerVariable* var : argument.variables) { CHECK(var != nullptr); result.push_back(fz_var_to_index[var]); } @@ -168,16 +168,16 @@ int CpModelProtoWithMapping::GetOrCreateOptionalInterval(int start_var, } const int interval_index = proto.constraints_size(); - auto *ct = proto.add_constraints(); + auto* ct = proto.add_constraints(); if (opt_var != kint32max) { ct->add_enforcement_literal(opt_var); } - auto *interval = ct->mutable_interval(); + auto* interval = ct->mutable_interval(); interval->set_start(start_var); interval->set_size(size_var); interval->set_end(proto.variables_size()); - auto *end_var = proto.add_variables(); + auto* end_var = proto.add_variables(); const auto start_proto = proto.variables(start_var); const auto size_proto = proto.variables(size_var); end_var->add_domain(start_proto.domain(0) + size_proto.domain(0)); @@ -188,7 +188,7 @@ int CpModelProtoWithMapping::GetOrCreateOptionalInterval(int start_var, } std::vector CpModelProtoWithMapping::CreateIntervals( - const std::vector &starts, const std::vector &durations) { + const std::vector& starts, const std::vector& durations) { std::vector intervals; for (int i = 0; i < starts.size(); ++i) { intervals.push_back(GetOrCreateInterval(starts[i], durations[i])); @@ -197,9 +197,9 @@ std::vector CpModelProtoWithMapping::CreateIntervals( } void CpModelProtoWithMapping::FillAMinusBInDomain( - const std::vector &domain, const fz::Constraint &fz_ct, - ConstraintProto *ct) { - auto *arg = ct->mutable_linear(); + const std::vector& domain, const fz::Constraint& fz_ct, + ConstraintProto* ct) { + auto* arg = ct->mutable_linear(); if (fz_ct.arguments[1].type == fz::Argument::INT_VALUE) { const int64 value = fz_ct.arguments[1].Value(); const int var_a = LookupVar(fz_ct.arguments[0]); @@ -236,9 +236,9 @@ void CpModelProtoWithMapping::FillAMinusBInDomain( } void CpModelProtoWithMapping::FillLinearConstraintWithGivenDomain( - const std::vector &domain, const fz::Constraint &fz_ct, - ConstraintProto *ct) { - auto *arg = ct->mutable_linear(); + const std::vector& domain, const fz::Constraint& fz_ct, + ConstraintProto* ct) { + auto* arg = ct->mutable_linear(); for (const int64 domain_bound : domain) arg->add_domain(domain_bound); std::vector vars = LookupVars(fz_ct.arguments[1]); for (int i = 0; i < vars.size(); ++i) { @@ -247,13 +247,13 @@ void CpModelProtoWithMapping::FillLinearConstraintWithGivenDomain( } } -void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, - ConstraintProto *ct) { +void CpModelProtoWithMapping::FillConstraint(const fz::Constraint& fz_ct, + ConstraintProto* ct) { if (fz_ct.type == "false_constraint") { // An empty clause is always false. ct->mutable_bool_or(); } else if (fz_ct.type == "bool_clause") { - auto *arg = ct->mutable_bool_or(); + auto* arg = ct->mutable_bool_or(); for (const int var : LookupVars(fz_ct.arguments[0])) { arg->add_literals(TrueLiteral(var)); } @@ -269,7 +269,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, // not(x) => a == b ct->add_enforcement_literal(NegatedRef(x)); - auto *const refute = ct->mutable_linear(); + auto* const refute = ct->mutable_linear(); refute->add_vars(a); refute->add_coeffs(1); refute->add_vars(b); @@ -278,9 +278,9 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, refute->add_domain(0); // x => a + b == 1 - auto *ct2 = proto.add_constraints(); + auto* ct2 = proto.add_constraints(); ct2->add_enforcement_literal(x); - auto *const enforce = ct2->mutable_linear(); + auto* const enforce = ct2->mutable_linear(); enforce->add_vars(a); enforce->add_coeffs(1); enforce->add_vars(b); @@ -288,27 +288,27 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, enforce->add_domain(1); enforce->add_domain(1); } else if (fz_ct.type == "array_bool_or") { - auto *arg = ct->mutable_bool_or(); + auto* arg = ct->mutable_bool_or(); for (const int var : LookupVars(fz_ct.arguments[0])) { arg->add_literals(TrueLiteral(var)); } } else if (fz_ct.type == "array_bool_or_negated") { - auto *arg = ct->mutable_bool_and(); + auto* arg = ct->mutable_bool_and(); for (const int var : LookupVars(fz_ct.arguments[0])) { arg->add_literals(FalseLiteral(var)); } } else if (fz_ct.type == "array_bool_and") { - auto *arg = ct->mutable_bool_and(); + auto* arg = ct->mutable_bool_and(); for (const int var : LookupVars(fz_ct.arguments[0])) { arg->add_literals(TrueLiteral(var)); } } else if (fz_ct.type == "array_bool_and_negated") { - auto *arg = ct->mutable_bool_or(); + auto* arg = ct->mutable_bool_or(); for (const int var : LookupVars(fz_ct.arguments[0])) { arg->add_literals(FalseLiteral(var)); } } else if (fz_ct.type == "array_bool_xor") { - auto *arg = ct->mutable_bool_xor(); + auto* arg = ct->mutable_bool_xor(); for (const int var : LookupVars(fz_ct.arguments[0])) { arg->add_literals(TrueLiteral(var)); } @@ -324,7 +324,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, fz_ct.type == "bool2int") { FillAMinusBInDomain({0, 0}, fz_ct, ct); } else if (fz_ct.type == "bool_ne" || fz_ct.type == "bool_not") { - auto *arg = ct->mutable_linear(); + auto* arg = ct->mutable_linear(); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_coeffs(1); arg->add_vars(LookupVar(fz_ct.arguments[1])); @@ -337,7 +337,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, const int64 rhs = fz_ct.arguments[2].values[0]; FillLinearConstraintWithGivenDomain({rhs, rhs}, fz_ct, ct); } else if (fz_ct.type == "bool_lin_eq") { - auto *arg = ct->mutable_linear(); + auto* arg = ct->mutable_linear(); const std::vector vars = LookupVars(fz_ct.arguments[1]); for (int i = 0; i < vars.size(); ++i) { arg->add_vars(vars[i]); @@ -370,7 +370,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, FillLinearConstraintWithGivenDomain( {kint64min, rhs - 1, rhs + 1, kint64max}, fz_ct, ct); } else if (fz_ct.type == "set_in") { - auto *arg = ct->mutable_linear(); + auto* arg = ct->mutable_linear(); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_coeffs(1); if (fz_ct.arguments[1].type == fz::Argument::INT_LIST) { @@ -386,7 +386,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, LOG(FATAL) << "Wrong format"; } } else if (fz_ct.type == "set_in_negated") { - auto *arg = ct->mutable_linear(); + auto* arg = ct->mutable_linear(); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_coeffs(1); if (fz_ct.arguments[1].type == fz::Argument::INT_LIST) { @@ -405,35 +405,35 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, LOG(FATAL) << "Wrong format"; } } else if (fz_ct.type == "int_min") { - auto *arg = ct->mutable_int_min(); + auto* arg = ct->mutable_int_min(); arg->set_target(LookupVar(fz_ct.arguments[2])); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_vars(LookupVar(fz_ct.arguments[1])); } else if (fz_ct.type == "array_int_minimum" || fz_ct.type == "minimum_int") { - auto *arg = ct->mutable_int_min(); + auto* arg = ct->mutable_int_min(); arg->set_target(LookupVar(fz_ct.arguments[0])); for (const int var : LookupVars(fz_ct.arguments[1])) arg->add_vars(var); } else if (fz_ct.type == "int_max") { - auto *arg = ct->mutable_int_max(); + auto* arg = ct->mutable_int_max(); arg->set_target(LookupVar(fz_ct.arguments[2])); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_vars(LookupVar(fz_ct.arguments[1])); } else if (fz_ct.type == "array_int_maximum" || fz_ct.type == "maximum_int") { - auto *arg = ct->mutable_int_max(); + auto* arg = ct->mutable_int_max(); arg->set_target(LookupVar(fz_ct.arguments[0])); for (const int var : LookupVars(fz_ct.arguments[1])) arg->add_vars(var); } else if (fz_ct.type == "int_times") { - auto *arg = ct->mutable_int_prod(); + auto* arg = ct->mutable_int_prod(); arg->set_target(LookupVar(fz_ct.arguments[2])); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_vars(LookupVar(fz_ct.arguments[1])); } else if (fz_ct.type == "int_abs") { - auto *arg = ct->mutable_int_max(); + auto* arg = ct->mutable_int_max(); arg->set_target(LookupVar(fz_ct.arguments[1])); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_vars(-LookupVar(fz_ct.arguments[0]) - 1); } else if (fz_ct.type == "int_plus") { - auto *arg = ct->mutable_linear(); + auto* arg = ct->mutable_linear(); FillDomainInProto(Domain(0, 0), arg); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_coeffs(1); @@ -442,12 +442,12 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, arg->add_vars(LookupVar(fz_ct.arguments[2])); arg->add_coeffs(-1); } else if (fz_ct.type == "int_div") { - auto *arg = ct->mutable_int_div(); + auto* arg = ct->mutable_int_div(); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_vars(LookupVar(fz_ct.arguments[1])); arg->set_target(LookupVar(fz_ct.arguments[2])); } else if (fz_ct.type == "int_mod") { - auto *arg = ct->mutable_int_mod(); + auto* arg = ct->mutable_int_mod(); arg->set_target(LookupVar(fz_ct.arguments[2])); arg->add_vars(LookupVar(fz_ct.arguments[0])); arg->add_vars(LookupVar(fz_ct.arguments[1])); @@ -458,7 +458,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, fz_ct.type == "array_int_element_nonshifted") { if (fz_ct.arguments[0].type == fz::Argument::INT_VAR_REF || fz_ct.arguments[0].type == fz::Argument::INT_VALUE) { - auto *arg = ct->mutable_element(); + auto* arg = ct->mutable_element(); arg->set_index(LookupVar(fz_ct.arguments[0])); arg->set_target(LookupVar(fz_ct.arguments[2])); @@ -473,14 +473,14 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, // Special case added by the presolve or in flatzinc. We encode this // as a table constraint. CHECK(!absl::EndsWith(fz_ct.type, "_nonshifted")); - auto *arg = ct->mutable_table(); + auto* arg = ct->mutable_table(); // the constraint is: // values[coeff1 * vars[0] + coeff2 * vars[1] + offset] == target. for (const int var : LookupVars(fz_ct.arguments[0])) arg->add_vars(var); arg->add_vars(LookupVar(fz_ct.arguments[2])); // the target - const std::vector &values = fz_ct.arguments[1].values; + const std::vector& values = fz_ct.arguments[1].values; const int64 coeff1 = fz_ct.arguments[3].values[0]; const int64 coeff2 = fz_ct.arguments[3].values[1]; const int64 offset = fz_ct.arguments[4].values[0] - 1; @@ -497,11 +497,11 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, } } } else if (fz_ct.type == "ortools_table_int") { - auto *arg = ct->mutable_table(); + auto* arg = ct->mutable_table(); for (const int var : LookupVars(fz_ct.arguments[0])) arg->add_vars(var); for (const int64 value : fz_ct.arguments[1].values) arg->add_values(value); } else if (fz_ct.type == "ortools_regular") { - auto *arg = ct->mutable_automaton(); + auto* arg = ct->mutable_automaton(); for (const int var : LookupVars(fz_ct.arguments[0])) arg->add_vars(var); int count = 0; @@ -542,14 +542,14 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, } } } else if (fz_ct.type == "fzn_all_different_int") { - auto *arg = ct->mutable_all_diff(); + auto* arg = ct->mutable_all_diff(); for (const int var : LookupVars(fz_ct.arguments[0])) arg->add_vars(var); } else if (fz_ct.type == "fzn_circuit" || fz_ct.type == "fzn_subcircuit") { // Try to auto-detect if it is zero or one based. bool found_zero = false; bool found_size = false; const int size = fz_ct.arguments[0].variables.size(); - for (fz::IntegerVariable *const var : fz_ct.arguments[0].variables) { + for (fz::IntegerVariable* const var : fz_ct.arguments[0].variables) { if (var->domain.Min() == 0) found_zero = true; if (var->domain.Max() == size) found_size = true; } @@ -558,7 +558,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, const int max_index = min_index + fz_ct.arguments[0].variables.size() - 1; // The arc-based mutable circuit. - auto *circuit_arg = ct->mutable_circuit(); + auto* circuit_arg = ct->mutable_circuit(); // We fully encode all variables so we can use the literal based circuit. // TODO(user): avoid fully encoding more than once? @@ -581,7 +581,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, // Create one Boolean variable for this arc. const int literal = proto.variables_size(); { - auto *new_var = proto.add_variables(); + auto* new_var = proto.add_variables(); new_var->add_domain(0); new_var->add_domain(1); } @@ -593,7 +593,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, // literal => var == value. { - auto *ct = proto.add_constraints(); + auto* ct = proto.add_constraints(); ct->add_enforcement_literal(literal); ct->mutable_linear()->add_coeffs(1); ct->mutable_linear()->add_vars(var); @@ -603,7 +603,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, // not(literal) => var != value { - auto *ct = proto.add_constraints(); + auto* ct = proto.add_constraints(); ct->add_enforcement_literal(NegatedRef(literal)); ct->mutable_linear()->add_coeffs(1); ct->mutable_linear()->add_vars(var); @@ -618,7 +618,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, ++index; } } else if (fz_ct.type == "fzn_inverse") { - auto *arg = ct->mutable_inverse(); + auto* arg = ct->mutable_inverse(); const auto direct_variables = LookupVars(fz_ct.arguments[0]); const auto inverse_variables = LookupVars(fz_ct.arguments[1]); @@ -629,11 +629,11 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, // Try to auto-detect if it is zero or one based. bool found_zero = false; bool found_size = false; - for (fz::IntegerVariable *const var : fz_ct.arguments[0].variables) { + for (fz::IntegerVariable* const var : fz_ct.arguments[0].variables) { if (var->domain.Min() == 0) found_zero = true; if (var->domain.Max() == num_variables) found_size = true; } - for (fz::IntegerVariable *const var : fz_ct.arguments[1].variables) { + for (fz::IntegerVariable* const var : fz_ct.arguments[1].variables) { if (var->domain.Min() == 0) found_zero = true; if (var->domain.Max() == num_variables) found_size = true; } @@ -667,7 +667,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, const std::vector demands = LookupVars(fz_ct.arguments[2]); const int capacity = LookupVar(fz_ct.arguments[3]); - auto *arg = ct->mutable_cumulative(); + auto* arg = ct->mutable_cumulative(); arg->set_capacity(capacity); for (int i = 0; i < starts.size(); ++i) { // Special case for a 0-1 demand, we mark the interval as optional @@ -691,7 +691,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, const std::vector dy = LookupVars(fz_ct.arguments[3]); const std::vector x_intervals = CreateIntervals(x, dx); const std::vector y_intervals = CreateIntervals(y, dy); - auto *arg = ct->mutable_no_overlap_2d(); + auto* arg = ct->mutable_no_overlap_2d(); for (int i = 0; i < x.size(); ++i) { arg->add_x_intervals(x_intervals[i]); arg->add_y_intervals(y_intervals[i]); @@ -707,8 +707,8 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, // Flow conservation constraints. const int num_nodes = fz_ct.arguments[1].values.size(); - std::vector > flows_per_node(num_nodes); - std::vector > coeffs_per_node(num_nodes); + std::vector> flows_per_node(num_nodes); + std::vector> coeffs_per_node(num_nodes); const int num_arcs = fz_ct.arguments[0].values.size() / 2; for (int arc = 0; arc < num_arcs; arc++) { const int tail = fz_ct.arguments[0].values[2 * arc] - 1; @@ -721,7 +721,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, coeffs_per_node[head].push_back(-1); } for (int node = 0; node < num_nodes; node++) { - auto *arg = proto.add_constraints()->mutable_linear(); + auto* arg = proto.add_constraints()->mutable_linear(); arg->add_domain(fz_ct.arguments[1].values[node]); arg->add_domain(fz_ct.arguments[1].values[node]); for (int i = 0; i < flows_per_node[node].size(); ++i) { @@ -731,7 +731,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, } if (has_cost) { - auto *arg = proto.add_constraints()->mutable_linear(); + auto* arg = proto.add_constraints()->mutable_linear(); arg->add_domain(0); arg->add_domain(0); for (int arc = 0; arc < num_arcs; arc++) { @@ -750,7 +750,7 @@ void CpModelProtoWithMapping::FillConstraint(const fz::Constraint &fz_ct, } void CpModelProtoWithMapping::FillReifOrImpliedConstraint( - const fz::Constraint &fz_ct, ConstraintProto *ct) { + const fz::Constraint& fz_ct, ConstraintProto* ct) { // Start by adding a non-reified version of the same constraint. std::string simplified_type; if (absl::EndsWith(fz_ct.type, "_reif")) { @@ -829,7 +829,7 @@ void CpModelProtoWithMapping::FillReifOrImpliedConstraint( // Add the other side of the reification because CpModelProto only support // half reification. - ConstraintProto *negated_ct = proto.add_constraints(); + ConstraintProto* negated_ct = proto.add_constraints(); negated_ct->set_name(fz_ct.type + " (negated)"); negated_ct->add_enforcement_literal( sat::NegatedRef(ct->enforcement_literal(0))); @@ -838,25 +838,25 @@ void CpModelProtoWithMapping::FillReifOrImpliedConstraint( } void CpModelProtoWithMapping::TranslateSearchAnnotations( - const std::vector &search_annotations) { + const std::vector& search_annotations) { std::vector flat_annotations; - for (const fz::Annotation &annotation : search_annotations) { + for (const fz::Annotation& annotation : search_annotations) { fz::FlattenAnnotations(annotation, &flat_annotations); } - for (const fz::Annotation &annotation : flat_annotations) { + for (const fz::Annotation& annotation : flat_annotations) { if (annotation.IsFunctionCallWithIdentifier("int_search") || annotation.IsFunctionCallWithIdentifier("bool_search")) { - const std::vector &args = annotation.annotations; - std::vector vars; + const std::vector& args = annotation.annotations; + std::vector vars; args[0].AppendAllIntegerVariables(&vars); - DecisionStrategyProto *strategy = proto.add_search_strategy(); - for (fz::IntegerVariable *v : vars) { + DecisionStrategyProto* strategy = proto.add_search_strategy(); + for (fz::IntegerVariable* v : vars) { strategy->add_variables(gtl::FindOrDie(fz_var_to_index, v)); } - const fz::Annotation &choose = args[1]; + const fz::Annotation& choose = args[1]; if (choose.id == "input_order") { strategy->set_variable_selection_strategy( DecisionStrategyProto::CHOOSE_FIRST); @@ -876,7 +876,7 @@ void CpModelProtoWithMapping::TranslateSearchAnnotations( LOG(FATAL) << "Unsupported order: " << choose.id; } - const fz::Annotation &select = args[2]; + const fz::Annotation& select = args[2]; if (select.id == "indomain_min" || select.id == "indomain") { strategy->set_domain_reduction_strategy( DecisionStrategyProto::SELECT_MIN_VALUE); @@ -901,8 +901,8 @@ void CpModelProtoWithMapping::TranslateSearchAnnotations( // The format is fixed in the flatzinc specification. std::string SolutionString( - const fz::SolutionOutputSpecs &output, - const std::function &value_func) { + const fz::SolutionOutputSpecs& output, + const std::function& value_func) { if (output.variable != nullptr) { const int64 value = value_func(output.variable); if (output.display_as_boolean) { @@ -942,25 +942,25 @@ std::string SolutionString( } std::string SolutionString( - const fz::Model &model, - const std::function &value_func) { + const fz::Model& model, + const std::function& value_func) { std::string solution_string; - for (const auto &output_spec : model.output()) { + for (const auto& output_spec : model.output()) { solution_string.append(SolutionString(output_spec, value_func)); solution_string.append("\n"); } return solution_string; } -void LogInFlatzincFormat(const std::string &multi_line_input) { +void LogInFlatzincFormat(const std::string& multi_line_input) { std::vector lines = absl::StrSplit(multi_line_input, '\n', absl::SkipEmpty()); - for (const std::string &line : lines) { + for (const std::string& line : lines) { FZLOG << line << FZENDL; } } -void OutputFlatzincStats(const CpSolverResponse &response) { +void OutputFlatzincStats(const CpSolverResponse& response) { std::cout << "%%%mzn-stat: objective=" << response.objective_value() << std::endl; std::cout << "%%%mzn-stat: objectiveBound=" << response.best_objective_bound() @@ -978,9 +978,9 @@ void OutputFlatzincStats(const CpSolverResponse &response) { } // namespace -void SolveFzWithCpModelProto(const fz::Model &fz_model, - const fz::FlatzincSatParameters &p, - const std::string &sat_params) { +void SolveFzWithCpModelProto(const fz::Model& fz_model, + const fz::FlatzincSatParameters& p, + const std::string& sat_params) { if (!absl::GetFlag(FLAGS_use_flatzinc_format)) { LOG(INFO) << "*** Starting translation to CP-SAT"; } else if (p.verbose_logging) { @@ -994,10 +994,10 @@ void SolveFzWithCpModelProto(const fz::Model &fz_model, // plus eventually a bunch of constant variables that will be created // lazily. int num_variables = 0; - for (fz::IntegerVariable *fz_var : fz_model.variables()) { + for (fz::IntegerVariable* fz_var : fz_model.variables()) { if (!fz_var->active) continue; m.fz_var_to_index[fz_var] = num_variables++; - IntegerVariableProto *var = m.proto.add_variables(); + IntegerVariableProto* var = m.proto.add_variables(); var->set_name(fz_var->name); if (fz_var->domain.is_interval) { if (fz_var->domain.values.empty()) { @@ -1021,9 +1021,9 @@ void SolveFzWithCpModelProto(const fz::Model &fz_model, } // Translate the constraints. - for (fz::Constraint *fz_ct : fz_model.constraints()) { + for (fz::Constraint* fz_ct : fz_model.constraints()) { if (fz_ct == nullptr || !fz_ct->active) continue; - ConstraintProto *ct = m.proto.add_constraints(); + ConstraintProto* ct = m.proto.add_constraints(); ct->set_name(fz_ct->type); if (absl::EndsWith(fz_ct->type, "_reif") || absl::EndsWith(fz_ct->type, "_imp") || fz_ct->type == "array_bool_or" || @@ -1036,7 +1036,7 @@ void SolveFzWithCpModelProto(const fz::Model &fz_model, // Fill the objective. if (fz_model.objective() != nullptr) { - CpObjectiveProto *objective = m.proto.mutable_objective(); + CpObjectiveProto* objective = m.proto.mutable_objective(); objective->add_coeffs(1); if (fz_model.maximize()) { objective->set_scaling_factor(-1); @@ -1090,11 +1090,11 @@ void SolveFzWithCpModelProto(const fz::Model &fz_model, m.parameters.MergeFrom(flag_parameters); // We only need an observer if 'p.all_solutions' is true. - std::function solution_observer = nullptr; + std::function solution_observer = nullptr; if (p.display_all_solutions && absl::GetFlag(FLAGS_use_flatzinc_format)) { - solution_observer = [&fz_model, &m, &p](const CpSolverResponse &r) { + solution_observer = [&fz_model, &m, &p](const CpSolverResponse& r) { const std::string solution_string = - SolutionString(fz_model, [&m, &r](fz::IntegerVariable *v) { + SolutionString(fz_model, [&m, &r](fz::IntegerVariable* v) { return r.solution(gtl::FindOrDie(m.fz_var_to_index, v)); }); std::cout << solution_string << std::endl; @@ -1115,7 +1115,7 @@ void SolveFzWithCpModelProto(const fz::Model &fz_model, // Check the returned solution with the fz model checker. if (response.status() == CpSolverStatus::FEASIBLE || response.status() == CpSolverStatus::OPTIMAL) { - CHECK(CheckSolution(fz_model, [&response, &m](fz::IntegerVariable *v) { + CHECK(CheckSolution(fz_model, [&response, &m](fz::IntegerVariable* v) { return response.solution(gtl::FindOrDie(m.fz_var_to_index, v)); })); } @@ -1126,7 +1126,7 @@ void SolveFzWithCpModelProto(const fz::Model &fz_model, response.status() == CpSolverStatus::OPTIMAL) { if (!p.display_all_solutions) { // Already printed otherwise. const std::string solution_string = - SolutionString(fz_model, [&response, &m](fz::IntegerVariable *v) { + SolutionString(fz_model, [&response, &m](fz::IntegerVariable* v) { return response.solution(gtl::FindOrDie(m.fz_var_to_index, v)); }); std::cout << solution_string << std::endl; diff --git a/ortools/flatzinc/fz.cc b/ortools/flatzinc/fz.cc index f390baf323..4815945225 100644 --- a/ortools/flatzinc/fz.cc +++ b/ortools/flatzinc/fz.cc @@ -37,17 +37,17 @@ ABSL_FLAG(double, time_limit, 0, "time limit in seconds."); ABSL_FLAG(bool, all_solutions, false, "Search for all solutions."); -ABSL_FLAG(int32, num_solutions, 0, +ABSL_FLAG(int, num_solutions, 0, "Maximum number of solution to search for, 0 means unspecified."); ABSL_FLAG(bool, free_search, false, "If false, the solver must follow the defined search." "If true, other search are allowed."); -ABSL_FLAG(int32, threads, 0, "Number of threads the solver will use."); +ABSL_FLAG(int, threads, 0, "Number of threads the solver will use."); ABSL_FLAG(bool, presolve, true, "Presolve the model to simplify it."); ABSL_FLAG(bool, statistics, false, "Print solver statistics after search."); ABSL_FLAG(bool, read_from_stdin, false, "Read the FlatZinc from stdin, not from a file."); -ABSL_FLAG(int32, fz_seed, 0, "Random seed"); +ABSL_FLAG(int, fz_seed, 0, "Random seed"); ABSL_FLAG(std::string, fz_model_name, "stdin", "Define problem name when reading from stdin."); ABSL_FLAG(std::string, params, "", "SatParameters as a text proto."); @@ -59,7 +59,7 @@ using operations_research::ThreadPool; namespace operations_research { namespace fz { -std::vector FixAndParseParameters(int *argc, char ***argv) { +std::vector FixAndParseParameters(int* argc, char*** argv) { absl::SetFlag(&FLAGS_log_prefix, false); char all_param[] = "--all_solutions"; @@ -120,7 +120,7 @@ std::vector FixAndParseParameters(int *argc, char ***argv) { return residual_flags; } -Model ParseFlatzincModel(const std::string &input, bool input_is_filename) { +Model ParseFlatzincModel(const std::string& input, bool input_is_filename) { WallTimer timer; timer.Start(); // Read model. @@ -162,7 +162,7 @@ Model ParseFlatzincModel(const std::string &input, bool input_is_filename) { } // namespace fz } // namespace operations_research -int main(int argc, char **argv) { +int main(int argc, char** argv) { // Flatzinc specifications require single dash parameters (-a, -f, -p). // We need to fix parameters before parsing them. const std::vector residual_flags = diff --git a/ortools/flatzinc/model.cc b/ortools/flatzinc/model.cc index b7b8b8d41e..d6512337d6 100644 --- a/ortools/flatzinc/model.cc +++ b/ortools/flatzinc/model.cc @@ -113,7 +113,7 @@ Domain Domain::EmptyDomain() { return result; } -bool Domain::IntersectWithDomain(const Domain &domain) { +bool Domain::IntersectWithDomain(const Domain& domain) { if (domain.is_interval) { if (!domain.values.empty()) { return IntersectWithInterval(domain.values[0], domain.values[1]); @@ -188,7 +188,7 @@ bool Domain::IntersectWithInterval(int64 interval_min, int64 interval_max) { return false; } -bool Domain::IntersectWithListOfIntegers(const std::vector &integers) { +bool Domain::IntersectWithListOfIntegers(const std::vector& integers) { if (is_interval) { const int64 dmin = values.empty() ? kint64min : values[0]; const int64 dmax = values.empty() ? kint64max : values[1]; @@ -276,7 +276,7 @@ bool Domain::Contains(int64 value) const { namespace { bool IntervalOverlapValues(int64 lb, int64 ub, - const std::vector &values) { + const std::vector& values) { for (int64 value : values) { if (lb <= value && value <= ub) { return true; @@ -286,7 +286,7 @@ bool IntervalOverlapValues(int64 lb, int64 ub, } } // namespace -bool Domain::OverlapsIntList(const std::vector &vec) const { +bool Domain::OverlapsIntList(const std::vector& vec) const { if (IsAllInt64()) { return true; } @@ -295,7 +295,7 @@ bool Domain::OverlapsIntList(const std::vector &vec) const { return IntervalOverlapValues(values[0], values[1], vec); } else { // TODO(user): Better algorithm, sort and compare increasingly. - const std::vector &to_scan = + const std::vector& to_scan = values.size() <= vec.size() ? values : vec; const absl::flat_hash_set container = values.size() <= vec.size() @@ -324,7 +324,7 @@ bool Domain::OverlapsIntInterval(int64 lb, int64 ub) const { } } -bool Domain::OverlapsDomain(const Domain &other) const { +bool Domain::OverlapsDomain(const Domain& other) const { if (other.is_interval) { if (other.values.empty()) { return true; @@ -412,14 +412,14 @@ Argument Argument::DomainList(std::vector domains) { return result; } -Argument Argument::IntVarRef(IntegerVariable *const var) { +Argument Argument::IntVarRef(IntegerVariable* const var) { Argument result; result.type = INT_VAR_REF; result.variables.push_back(var); return result; } -Argument Argument::IntVarRefArray(std::vector vars) { +Argument Argument::IntVarRefArray(std::vector vars) { Argument result; result.type = INT_VAR_REF_ARRAY; result.variables = std::move(vars); @@ -432,7 +432,7 @@ Argument Argument::VoidArgument() { return result; } -Argument Argument::FromDomain(const Domain &domain) { +Argument Argument::FromDomain(const Domain& domain) { if (domain.is_interval) { if (domain.values.empty()) { return Argument::Interval(kint64min, kint64max); @@ -506,7 +506,7 @@ bool Argument::IsArrayOfValues() const { case INT_LIST: return true; case DOMAIN_LIST: { - for (const Domain &domain : domains) { + for (const Domain& domain : domains) { if (!domain.HasOneValue()) { return false; } @@ -516,7 +516,7 @@ bool Argument::IsArrayOfValues() const { case INT_VAR_REF: return false; case INT_VAR_REF_ARRAY: { - for (IntegerVariable *var : variables) { + for (IntegerVariable* var : variables) { if (!var->domain.HasOneValue()) { return false; } @@ -571,26 +571,26 @@ int64 Argument::ValueAt(int pos) const { } } -IntegerVariable *Argument::Var() const { +IntegerVariable* Argument::Var() const { return type == INT_VAR_REF ? variables[0] : nullptr; } -IntegerVariable *Argument::VarAt(int pos) const { +IntegerVariable* Argument::VarAt(int pos) const { return type == INT_VAR_REF_ARRAY ? variables[pos] : nullptr; } // ----- IntegerVariable ----- -IntegerVariable::IntegerVariable(const std::string &name_, - const Domain &domain_, bool temporary_) +IntegerVariable::IntegerVariable(const std::string& name_, + const Domain& domain_, bool temporary_) : name(name_), domain(domain_), temporary(temporary_), active(true) { if (!domain.is_interval) { gtl::STLSortAndRemoveDuplicates(&domain.values); } } -bool IntegerVariable::Merge(const std::string &other_name, - const Domain &other_domain, bool other_temporary) { +bool IntegerVariable::Merge(const std::string& other_name, + const Domain& other_domain, bool other_temporary) { if (temporary && !other_temporary) { temporary = false; name = other_name; @@ -654,7 +654,7 @@ Annotation Annotation::AnnotationList(std::vector list) { return result; } -Annotation Annotation::Identifier(const std::string &id) { +Annotation Annotation::Identifier(const std::string& id) { Annotation result; result.type = IDENTIFIER; result.interval_min = 0; @@ -663,7 +663,7 @@ Annotation Annotation::Identifier(const std::string &id) { return result; } -Annotation Annotation::FunctionCallWithArguments(const std::string &id, +Annotation Annotation::FunctionCallWithArguments(const std::string& id, std::vector args) { Annotation result; result.type = FUNCTION_CALL; @@ -674,7 +674,7 @@ Annotation Annotation::FunctionCallWithArguments(const std::string &id, return result; } -Annotation Annotation::FunctionCall(const std::string &id) { +Annotation Annotation::FunctionCall(const std::string& id) { Annotation result; result.type = FUNCTION_CALL; result.interval_min = 0; @@ -698,7 +698,7 @@ Annotation Annotation::IntegerValue(int64 value) { return result; } -Annotation Annotation::Variable(IntegerVariable *const var) { +Annotation Annotation::Variable(IntegerVariable* const var) { Annotation result; result.type = INT_VAR_REF; result.interval_min = 0; @@ -707,7 +707,7 @@ Annotation Annotation::Variable(IntegerVariable *const var) { return result; } -Annotation Annotation::VariableList(std::vector variables) { +Annotation Annotation::VariableList(std::vector variables) { Annotation result; result.type = INT_VAR_REF_ARRAY; result.interval_min = 0; @@ -716,7 +716,7 @@ Annotation Annotation::VariableList(std::vector variables) { return result; } -Annotation Annotation::String(const std::string &str) { +Annotation Annotation::String(const std::string& str) { Annotation result; result.type = STRING_VALUE; result.interval_min = 0; @@ -726,8 +726,8 @@ Annotation Annotation::String(const std::string &str) { } void Annotation::AppendAllIntegerVariables( - std::vector *const vars) const { - for (const Annotation &ann : annotations) { + std::vector* const vars) const { + for (const Annotation& ann : annotations) { ann.AppendAllIntegerVariables(vars); } if (!variables.empty()) { @@ -778,7 +778,7 @@ std::string SolutionOutputSpecs::Bounds::DebugString() const { } SolutionOutputSpecs SolutionOutputSpecs::SingleVariable( - const std::string &name, IntegerVariable *variable, + const std::string& name, IntegerVariable* variable, bool display_as_boolean) { SolutionOutputSpecs result; result.name = name; @@ -788,8 +788,8 @@ SolutionOutputSpecs SolutionOutputSpecs::SingleVariable( } SolutionOutputSpecs SolutionOutputSpecs::MultiDimensionalArray( - const std::string &name, std::vector bounds, - std::vector flat_variables, bool display_as_boolean) { + const std::string& name, std::vector bounds, + std::vector flat_variables, bool display_as_boolean) { SolutionOutputSpecs result; result.variable = nullptr; result.name = name; @@ -823,29 +823,29 @@ Model::~Model() { gtl::STLDeleteElements(&constraints_); } -IntegerVariable *Model::AddVariable(const std::string &name, - const Domain &domain, bool defined) { - IntegerVariable *const var = new IntegerVariable(name, domain, defined); +IntegerVariable* Model::AddVariable(const std::string& name, + const Domain& domain, bool defined) { + IntegerVariable* const var = new IntegerVariable(name, domain, defined); variables_.push_back(var); return var; } // TODO(user): Create only once constant per value. -IntegerVariable *Model::AddConstant(int64 value) { - IntegerVariable *const var = new IntegerVariable( +IntegerVariable* Model::AddConstant(int64 value) { + IntegerVariable* const var = new IntegerVariable( absl::StrCat(value), Domain::IntegerValue(value), true); variables_.push_back(var); return var; } -void Model::AddConstraint(const std::string &id, +void Model::AddConstraint(const std::string& id, std::vector arguments, bool is_domain) { - Constraint *const constraint = + Constraint* const constraint = new Constraint(id, std::move(arguments), is_domain); constraints_.push_back(constraint); } -void Model::AddConstraint(const std::string &id, +void Model::AddConstraint(const std::string& id, std::vector arguments) { AddConstraint(id, std::move(arguments), false); } @@ -859,14 +859,14 @@ void Model::Satisfy(std::vector search_annotations) { search_annotations_ = std::move(search_annotations); } -void Model::Minimize(IntegerVariable *obj, +void Model::Minimize(IntegerVariable* obj, std::vector search_annotations) { objective_ = obj; maximize_ = false; search_annotations_ = std::move(search_annotations); } -void Model::Maximize(IntegerVariable *obj, +void Model::Maximize(IntegerVariable* obj, std::vector search_annotations) { objective_ = obj; maximize_ = true; @@ -901,12 +901,12 @@ std::string Model::DebugString() const { } bool Model::IsInconsistent() const { - for (IntegerVariable *var : variables_) { + for (IntegerVariable* var : variables_) { if (var->domain.empty()) { return true; } } - for (Constraint *ct : constraints_) { + for (Constraint* ct : constraints_) { if (ct->type == "false_constraint") { return true; } @@ -919,7 +919,7 @@ bool Model::IsInconsistent() const { void ModelStatistics::PrintStatistics() const { FZLOG << "Model " << model_.name() << FZENDL; - for (const auto &it : constraints_per_type_) { + for (const auto& it : constraints_per_type_) { FZLOG << " - " << it.first << ": " << it.second.size() << FZENDL; } if (model_.objective() == nullptr) { @@ -933,16 +933,16 @@ void ModelStatistics::PrintStatistics() const { void ModelStatistics::BuildStatistics() { constraints_per_type_.clear(); constraints_per_variables_.clear(); - for (Constraint *const ct : model_.constraints()) { + for (Constraint* const ct : model_.constraints()) { if (ct != nullptr && ct->active) { constraints_per_type_[ct->type].push_back(ct); - absl::flat_hash_set marked; - for (const Argument &arg : ct->arguments) { - for (IntegerVariable *const var : arg.variables) { + absl::flat_hash_set marked; + for (const Argument& arg : ct->arguments) { + for (IntegerVariable* const var : arg.variables) { marked.insert(var); } } - for (const IntegerVariable *const var : marked) { + for (const IntegerVariable* const var : marked) { constraints_per_variables_[var].push_back(ct); } } @@ -950,10 +950,10 @@ void ModelStatistics::BuildStatistics() { } // Flatten Search annotations. -void FlattenAnnotations(const Annotation &ann, std::vector *out) { +void FlattenAnnotations(const Annotation& ann, std::vector* out) { if (ann.type == Annotation::ANNOTATION_LIST || ann.IsFunctionCallWithIdentifier("seq_search")) { - for (const Annotation &inner : ann.annotations) { + for (const Annotation& inner : ann.annotations) { FlattenAnnotations(inner, out); } } else { diff --git a/ortools/flatzinc/parser.cc b/ortools/flatzinc/parser.cc index 67984734fa..b4097dffc0 100644 --- a/ortools/flatzinc/parser.cc +++ b/ortools/flatzinc/parser.cc @@ -18,33 +18,33 @@ #include "ortools/flatzinc/parser.tab.hh" // Declare external functions in the flatzinc.tab.cc generated file. -extern int orfz_parse(operations_research::fz::ParserContext *parser, - operations_research::fz::Model *model, bool *ok, - void *scanner); -extern int orfz_lex_init(void **scanner); -extern int orfz_lex_destroy(void *scanner); -extern void orfz_set_in(FILE *in_str, void *yyscanner); +extern int orfz_parse(operations_research::fz::ParserContext* parser, + operations_research::fz::Model* model, bool* ok, + void* scanner); +extern int orfz_lex_init(void** scanner); +extern int orfz_lex_destroy(void* scanner); +extern void orfz_set_in(FILE* in_str, void* yyscanner); // Declare external functions and structures in the flatzinc.yy.cc // generated file. struct yy_buffer_state; -extern yy_buffer_state *orfz__scan_bytes(const char *input, int size, - void *scanner); -extern void orfz__delete_buffer(yy_buffer_state *b, void *scanner); +extern yy_buffer_state* orfz__scan_bytes(const char* input, int size, + void* scanner); +extern void orfz__delete_buffer(yy_buffer_state* b, void* scanner); namespace operations_research { namespace fz { // ----- public parsing API ----- -bool ParseFlatzincFile(const std::string &filename, Model *model) { +bool ParseFlatzincFile(const std::string& filename, Model* model) { // Init. - FILE *const input = fopen(filename.c_str(), "r"); + FILE* const input = fopen(filename.c_str(), "r"); if (input == nullptr) { LOG(INFO) << "Could not open file '" << filename << "'"; return false; } ParserContext context; bool ok = true; - void *scanner = nullptr; + void* scanner = nullptr; orfz_lex_init(&scanner); orfz_set_in(input, scanner); // Parse. @@ -57,13 +57,13 @@ bool ParseFlatzincFile(const std::string &filename, Model *model) { return ok; } -bool ParseFlatzincString(const std::string &input, Model *model) { +bool ParseFlatzincString(const std::string& input, Model* model) { // Init. ParserContext context; bool ok = true; - void *scanner = nullptr; + void* scanner = nullptr; orfz_lex_init(&scanner); - yy_buffer_state *const string_buffer = + yy_buffer_state* const string_buffer = orfz__scan_bytes(input.data(), input.size(), scanner); // Parse. orfz_parse(&context, model, &ok, scanner); diff --git a/ortools/flatzinc/parser.tab.cc b/ortools/flatzinc/parser.tab.cc index b53001e54b..18c09d0957 100644 --- a/ortools/flatzinc/parser.tab.cc +++ b/ortools/flatzinc/parser.tab.cc @@ -102,7 +102,7 @@ #define YY_NULLPTR 0 #endif #else -#define YY_NULLPTR ((void *)0) +#define YY_NULLPTR ((void*)0) #endif #endif @@ -150,11 +150,11 @@ enum yysymbol_kind_t { YYSYMBOL_predicate_argument = 37, /* predicate_argument */ YYSYMBOL_predicate_array_argument = 38, /* predicate_array_argument */ YYSYMBOL_predicate_ints = 39, /* predicate_ints */ - YYSYMBOL_variable_or_constant_declarations = 40, - /* variable_or_constant_declarations */ - YYSYMBOL_variable_or_constant_declaration = 41, - /* variable_or_constant_declaration */ - YYSYMBOL_optional_var_or_value = 42, /* optional_var_or_value */ + YYSYMBOL_variable_or_constant_declarations = + 40, /* variable_or_constant_declarations */ + YYSYMBOL_variable_or_constant_declaration = + 41, /* variable_or_constant_declaration */ + YYSYMBOL_optional_var_or_value = 42, /* optional_var_or_value */ YYSYMBOL_optional_var_or_value_array = 43, /* optional_var_or_value_array */ YYSYMBOL_var_or_value_array = 44, /* var_or_value_array */ YYSYMBOL_var_or_value = 45, /* var_or_value */ @@ -426,13 +426,13 @@ typedef int yy_state_fast_t; #ifndef YYMALLOC #define YYMALLOC malloc #if !defined malloc && !defined EXIT_SUCCESS -void *malloc(YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ +void* malloc(YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ #endif #endif #ifndef YYFREE #define YYFREE free #if !defined free && !defined EXIT_SUCCESS -void free(void *); /* INFRINGES ON USER NAME SPACE */ +void free(void*); /* INFRINGES ON USER NAME SPACE */ #endif #endif #endif @@ -551,11 +551,11 @@ static const yytype_int16 yyrline[] = { #if 1 /* The user-facing name of the symbol whose (internal) number is YYSYMBOL. No bounds checking. */ -static const char *yysymbol_name(yysymbol_kind_t yysymbol) YY_ATTRIBUTE_UNUSED; +static const char* yysymbol_name(yysymbol_kind_t yysymbol) YY_ATTRIBUTE_UNUSED; /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ -static const char *const yytname[] = {"\"end of file\"", +static const char* const yytname[] = {"\"end of file\"", "error", "\"invalid token\"", "ARRAY", @@ -621,7 +621,7 @@ static const char *const yytname[] = {"\"end of file\"", "solve", YY_NULLPTR}; -static const char *yysymbol_name(yysymbol_kind_t yysymbol) { +static const char* yysymbol_name(yysymbol_kind_t yysymbol) { return yytname[yysymbol]; } #endif @@ -832,10 +832,10 @@ enum { YYENOMEM = -2 }; `-----------------------------------*/ static void yy_symbol_value_print( - FILE *yyo, yysymbol_kind_t yykind, YYSTYPE const *const yyvaluep, - operations_research::fz::ParserContext *context, - operations_research::fz::Model *model, bool *ok, void *scanner) { - FILE *yyoutput = yyo; + FILE* yyo, yysymbol_kind_t yykind, YYSTYPE const* const yyvaluep, + operations_research::fz::ParserContext* context, + operations_research::fz::Model* model, bool* ok, void* scanner) { + FILE* yyoutput = yyo; YYUSE(yyoutput); YYUSE(context); YYUSE(model); @@ -845,7 +845,8 @@ static void yy_symbol_value_print( #ifdef YYPRINT if (yykind < YYNTOKENS) YYPRINT(yyo, yytoknum[yykind], *yyvaluep); #endif - YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN YYUSE(yykind); + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + YYUSE(yykind); YY_IGNORE_MAYBE_UNINITIALIZED_END } @@ -853,11 +854,11 @@ static void yy_symbol_value_print( | Print this symbol on YYO. | `---------------------------*/ -static void yy_symbol_print(FILE *yyo, yysymbol_kind_t yykind, - YYSTYPE const *const yyvaluep, - operations_research::fz::ParserContext *context, - operations_research::fz::Model *model, bool *ok, - void *scanner) { +static void yy_symbol_print(FILE* yyo, yysymbol_kind_t yykind, + YYSTYPE const* const yyvaluep, + operations_research::fz::ParserContext* context, + operations_research::fz::Model* model, bool* ok, + void* scanner) { YYFPRINTF(yyo, "%s %s (", yykind < YYNTOKENS ? "token" : "nterm", yysymbol_name(yykind)); @@ -870,7 +871,7 @@ static void yy_symbol_print(FILE *yyo, yysymbol_kind_t yykind, | TOP (included). | `------------------------------------------------------------------*/ -static void yy_stack_print(yy_state_t *yybottom, yy_state_t *yytop) { +static void yy_stack_print(yy_state_t* yybottom, yy_state_t* yytop) { YYFPRINTF(stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; @@ -888,10 +889,10 @@ static void yy_stack_print(yy_state_t *yybottom, yy_state_t *yytop) { | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ -static void yy_reduce_print(yy_state_t *yyssp, YYSTYPE *yyvsp, int yyrule, - operations_research::fz::ParserContext *context, - operations_research::fz::Model *model, bool *ok, - void *scanner) { +static void yy_reduce_print(yy_state_t* yyssp, YYSTYPE* yyvsp, int yyrule, + operations_research::fz::ParserContext* context, + operations_research::fz::Model* model, bool* ok, + void* scanner) { int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; @@ -940,7 +941,7 @@ int yydebug; /* Context of a parse error. */ typedef struct { - yy_state_t *yyssp; + yy_state_t* yyssp; yysymbol_kind_t yytoken; } yypcontext_t; @@ -950,7 +951,7 @@ typedef struct { be less than YYNTOKENS). Return YYENOMEM on memory exhaustion. Return 0 if there are more than YYARGN expected tokens, yet fill YYARG up to YYARGN. */ -static int yypcontext_expected_tokens(const yypcontext_t *yyctx, +static int yypcontext_expected_tokens(const yypcontext_t* yyctx, yysymbol_kind_t yyarg[], int yyargn) { /* Actual size of YYARG. */ int yycount = 0; @@ -984,7 +985,7 @@ static int yypcontext_expected_tokens(const yypcontext_t *yyctx, #define yystrlen(S) (YY_CAST(YYPTRDIFF_T, strlen(S))) #else /* Return the length of YYSTR. */ -static YYPTRDIFF_T yystrlen(const char *yystr) { +static YYPTRDIFF_T yystrlen(const char* yystr) { YYPTRDIFF_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; @@ -998,9 +999,9 @@ static YYPTRDIFF_T yystrlen(const char *yystr) { #else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ -static char *yystpcpy(char *yydest, const char *yysrc) { - char *yyd = yydest; - const char *yys = yysrc; +static char* yystpcpy(char* yydest, const char* yysrc) { + char* yyd = yydest; + const char* yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; @@ -1017,10 +1018,10 @@ static char *yystpcpy(char *yydest, const char *yysrc) { backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ -static YYPTRDIFF_T yytnamerr(char *yyres, const char *yystr) { +static YYPTRDIFF_T yytnamerr(char* yyres, const char* yystr) { if (*yystr == '"') { YYPTRDIFF_T yyn = 0; - char const *yyp = yystr; + char const* yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': @@ -1052,7 +1053,7 @@ static YYPTRDIFF_T yytnamerr(char *yyres, const char *yystr) { } #endif -static int yy_syntax_error_arguments(const yypcontext_t *yyctx, +static int yy_syntax_error_arguments(const yypcontext_t* yyctx, yysymbol_kind_t yyarg[], int yyargn) { /* Actual size of YYARG. */ int yycount = 0; @@ -1101,11 +1102,11 @@ static int yy_syntax_error_arguments(const yypcontext_t *yyctx, not large enough to hold the message. In that case, also set *YYMSG_ALLOC to the required number of bytes. Return YYENOMEM if the required number of bytes is too large to store. */ -static int yysyntax_error(YYPTRDIFF_T *yymsg_alloc, char **yymsg, - const yypcontext_t *yyctx) { +static int yysyntax_error(YYPTRDIFF_T* yymsg_alloc, char** yymsg, + const yypcontext_t* yyctx) { enum { YYARGS_MAX = 5 }; /* Internationalized format string. */ - const char *yyformat = YY_NULLPTR; + const char* yyformat = YY_NULLPTR; /* Arguments of yyformat: reported tokens (one for the "unexpected", one per "expected"). */ yysymbol_kind_t yyarg[YYARGS_MAX]; @@ -1158,7 +1159,7 @@ static int yysyntax_error(YYPTRDIFF_T *yymsg_alloc, char **yymsg, Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ { - char *yyp = *yymsg; + char* yyp = *yymsg; int yyi = 0; while ((*yyp = *yyformat) != '\0') if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { @@ -1176,11 +1177,11 @@ static int yysyntax_error(YYPTRDIFF_T *yymsg_alloc, char **yymsg, | Release the memory associated to this symbol. | `-----------------------------------------------*/ -static void yydestruct(const char *yymsg, yysymbol_kind_t yykind, - YYSTYPE *yyvaluep, - operations_research::fz::ParserContext *context, - operations_research::fz::Model *model, bool *ok, - void *scanner) { +static void yydestruct(const char* yymsg, yysymbol_kind_t yykind, + YYSTYPE* yyvaluep, + operations_research::fz::ParserContext* context, + operations_research::fz::Model* model, bool* ok, + void* scanner) { YYUSE(yyvaluep); YYUSE(context); YYUSE(model); @@ -1189,7 +1190,8 @@ static void yydestruct(const char *yymsg, yysymbol_kind_t yykind, if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT(yymsg, yykind, yyvaluep, yylocationp); - YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN YYUSE(yykind); + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + YYUSE(yykind); YY_IGNORE_MAYBE_UNINITIALIZED_END } @@ -1197,8 +1199,8 @@ static void yydestruct(const char *yymsg, yysymbol_kind_t yykind, | yyparse. | `----------*/ -int yyparse(operations_research::fz::ParserContext *context, - operations_research::fz::Model *model, bool *ok, void *scanner) { +int yyparse(operations_research::fz::ParserContext* context, + operations_research::fz::Model* model, bool* ok, void* scanner) { /* Lookahead token kind. */ int yychar; @@ -1223,13 +1225,13 @@ int yyparse(operations_research::fz::ParserContext *context, /* The state stack: array, bottom, top. */ yy_state_t yyssa[YYINITDEPTH]; - yy_state_t *yyss = yyssa; - yy_state_t *yyssp = yyss; + yy_state_t* yyss = yyssa; + yy_state_t* yyssp = yyss; /* The semantic value stack: array, bottom, top. */ YYSTYPE yyvsa[YYINITDEPTH]; - YYSTYPE *yyvs = yyvsa; - YYSTYPE *yyvsp = yyvs; + YYSTYPE* yyvs = yyvsa; + YYSTYPE* yyvsp = yyvs; int yyn; /* The return value of yyparse. */ @@ -1242,7 +1244,7 @@ int yyparse(operations_research::fz::ParserContext *context, /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; - char *yymsg = yymsgbuf; + char* yymsg = yymsgbuf; YYPTRDIFF_T yymsg_alloc = sizeof yymsgbuf; #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) @@ -1270,8 +1272,10 @@ yynewstate: yysetstate: YYDPRINTF((stderr, "Entering state %d\n", yystate)); YY_ASSERT(0 <= yystate && yystate < YYNSTATES); - YY_IGNORE_USELESS_CAST_BEGIN *yyssp = YY_CAST(yy_state_t, yystate); - YY_IGNORE_USELESS_CAST_END YY_STACK_PRINT(yyss, yyssp); + YY_IGNORE_USELESS_CAST_BEGIN + *yyssp = YY_CAST(yy_state_t, yystate); + YY_IGNORE_USELESS_CAST_END + YY_STACK_PRINT(yyss, yyssp); if (yyss + yystacksize - 1 <= yyssp) #if !defined yyoverflow && !defined YYSTACK_RELOCATE @@ -1286,8 +1290,8 @@ yysetstate: /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ - yy_state_t *yyss1 = yyss; - YYSTYPE *yyvs1 = yyvs; + yy_state_t* yyss1 = yyss; + YYSTYPE* yyvs1 = yyvs; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a @@ -1305,9 +1309,9 @@ yysetstate: if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { - yy_state_t *yyss1 = yyss; - union yyalloc *yyptr = - YY_CAST(union yyalloc *, + yy_state_t* yyss1 = yyss; + union yyalloc* yyptr = + YY_CAST(union yyalloc*, YYSTACK_ALLOC(YY_CAST(YYSIZE_T, YYSTACK_BYTES(yystacksize)))); if (!yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE(yyss_alloc, yyss); @@ -1320,9 +1324,12 @@ yysetstate: yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; - YY_IGNORE_USELESS_CAST_BEGIN YYDPRINTF( + YY_IGNORE_USELESS_CAST_BEGIN + YYDPRINTF( (stderr, "Stack size increased to %ld\n", YY_CAST(long, yystacksize))); - YY_IGNORE_USELESS_CAST_END if (yyss + yystacksize - 1 <= yyssp) YYABORT; + YY_IGNORE_USELESS_CAST_END + + if (yyss + yystacksize - 1 <= yyssp) YYABORT; } #endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */ @@ -1384,9 +1391,12 @@ yybackup: /* Shift the lookahead token. */ YY_SYMBOL_PRINT("Shifting", yytoken, &yylval, &yylloc); yystate = yyn; - YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; - YY_IGNORE_MAYBE_UNINITIALIZED_END /* Discard the shifted token. */ - yychar = ORFZ_EMPTY; + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + *++yyvsp = yylval; + YY_IGNORE_MAYBE_UNINITIALIZED_END + + /* Discard the shifted token. */ + yychar = ORFZ_EMPTY; goto yynewstate; /*-----------------------------------------------------------. @@ -1430,10 +1440,10 @@ yyreduce: { // Declaration of a (named) constant: we simply register it in the // parser's context, and don't store it in the model. - const Domain &domain = (yyvsp[-5].domain); - const std::string &identifier = (yyvsp[-3].string_value); - const Domain &assignment = (yyvsp[0].domain); - std::vector *const annotations = (yyvsp[-2].annotations); + const Domain& domain = (yyvsp[-5].domain); + const std::string& identifier = (yyvsp[-3].string_value); + const Domain& assignment = (yyvsp[0].domain); + std::vector* const annotations = (yyvsp[-2].annotations); if (!assignment.HasOneValue()) { // TODO(lperron): Check that the assignment is included in the domain. @@ -1453,13 +1463,13 @@ yyreduce: ']' */ #line 169 "./ortools/flatzinc/parser.yy" { - std::vector *const annotations = (yyvsp[-4].annotations); + std::vector* const annotations = (yyvsp[-4].annotations); // Declaration of a (named) constant array. See rule right above. CHECK_EQ((yyvsp[-12].integer_value), 1) << "Only [1..n] array are supported here."; const int64 num_constants = (yyvsp[-10].integer_value); - const std::string &identifier = (yyvsp[-5].string_value); - const std::vector *const assignments = (yyvsp[-1].integers); + const std::string& identifier = (yyvsp[-5].string_value); + const std::vector* const assignments = (yyvsp[-1].integers); CHECK(assignments != nullptr); CHECK_EQ(num_constants, assignments->size()); // TODO(lperron): CHECK all values within domain. @@ -1474,13 +1484,13 @@ yyreduce: ']' OF int_domain ':' IDENTIFIER annotations '=' '[' ']' */ #line 184 "./ortools/flatzinc/parser.yy" { - std::vector *const annotations = (yyvsp[-3].annotations); + std::vector* const annotations = (yyvsp[-3].annotations); // Declaration of a (named) constant array. See rule right above. CHECK_EQ((yyvsp[-11].integer_value), 1) << "Only [1..n] array are supported here."; const int64 num_constants = (yyvsp[-9].integer_value); CHECK_EQ(num_constants, 0) << "Empty arrays should have a size of 0"; - const std::string &identifier = (yyvsp[-4].string_value); + const std::string& identifier = (yyvsp[-4].string_value); context->integer_array_map[identifier] = std::vector(); delete annotations; } @@ -1492,13 +1502,13 @@ yyreduce: ']' */ #line 195 "./ortools/flatzinc/parser.yy" { - std::vector *const annotations = (yyvsp[-4].annotations); + std::vector* const annotations = (yyvsp[-4].annotations); // Declaration of a (named) constant array. See rule right above. CHECK_EQ((yyvsp[-12].integer_value), 1) << "Only [1..n] array are supported here."; const int64 num_constants = (yyvsp[-10].integer_value); - const std::string &identifier = (yyvsp[-5].string_value); - const std::vector *const assignments = (yyvsp[-1].doubles); + const std::string& identifier = (yyvsp[-5].string_value); + const std::vector* const assignments = (yyvsp[-1].doubles); CHECK(assignments != nullptr); CHECK_EQ(num_constants, assignments->size()); // TODO(lperron): CHECK all values within domain. @@ -1513,13 +1523,13 @@ yyreduce: ']' OF float_domain ':' IDENTIFIER annotations '=' '[' ']' */ #line 210 "./ortools/flatzinc/parser.yy" { - std::vector *const annotations = (yyvsp[-3].annotations); + std::vector* const annotations = (yyvsp[-3].annotations); // Declaration of a (named) constant array. See rule right above. CHECK_EQ((yyvsp[-11].integer_value), 1) << "Only [1..n] array are supported here."; const int64 num_constants = (yyvsp[-9].integer_value); CHECK_EQ(num_constants, 0) << "Empty arrays should have a size of 0"; - const std::string &identifier = (yyvsp[-4].string_value); + const std::string& identifier = (yyvsp[-4].string_value); context->float_array_map[identifier] = std::vector(); delete annotations; } @@ -1535,10 +1545,10 @@ yyreduce: CHECK_EQ((yyvsp[-12].integer_value), 1) << "Only [1..n] array are supported here."; const int64 num_constants = (yyvsp[-10].integer_value); - const Domain &domain = (yyvsp[-7].domain); - const std::string &identifier = (yyvsp[-5].string_value); - const std::vector *const assignments = (yyvsp[-1].domains); - const std::vector *const annotations = + const Domain& domain = (yyvsp[-7].domain); + const std::string& identifier = (yyvsp[-5].string_value); + const std::vector* const assignments = (yyvsp[-1].domains); + const std::vector* const annotations = (yyvsp[-4].annotations); CHECK(assignments != nullptr); CHECK_EQ(num_constants, assignments->size()); @@ -1568,13 +1578,13 @@ yyreduce: // constant, we'll create a new var stored in the model. If it's // assigned to another variable x then we simply adjust that // existing variable x according to the current (re-)declaration. - const Domain &domain = (yyvsp[-4].domain); - const std::string &identifier = (yyvsp[-2].string_value); - std::vector *const annotations = (yyvsp[-1].annotations); - const VariableRefOrValue &assignment = (yyvsp[0].var_or_value); + const Domain& domain = (yyvsp[-4].domain); + const std::string& identifier = (yyvsp[-2].string_value); + std::vector* const annotations = (yyvsp[-1].annotations); + const VariableRefOrValue& assignment = (yyvsp[0].var_or_value); const bool introduced = ContainsId(annotations, "var_is_introduced") || absl::StartsWith(identifier, "X_INTRODUCED"); - IntegerVariable *var = nullptr; + IntegerVariable* var = nullptr; if (!assignment.defined) { var = model->AddVariable(identifier, domain, introduced); } else if (assignment.variable == nullptr) { // just an integer constant. @@ -1608,10 +1618,10 @@ yyreduce: // IDENTIFIER[i] (1-based index). CHECK_EQ((yyvsp[-10].integer_value), 1); const int64 num_vars = (yyvsp[-8].integer_value); - const Domain &domain = (yyvsp[-4].domain); - const std::string &identifier = (yyvsp[-2].string_value); - std::vector *const annotations = (yyvsp[-1].annotations); - VariableRefOrValueArray *const assignments = + const Domain& domain = (yyvsp[-4].domain); + const std::string& identifier = (yyvsp[-2].string_value); + std::vector* const annotations = (yyvsp[-1].annotations); + VariableRefOrValueArray* const assignments = (yyvsp[0].var_or_value_array); CHECK(assignments == nullptr || assignments->variables.size() == num_vars); @@ -1619,7 +1629,7 @@ yyreduce: const bool introduced = ContainsId(annotations, "var_is_introduced") || absl::StartsWith(identifier, "X_INTRODUCED"); - std::vector vars(num_vars, nullptr); + std::vector vars(num_vars, nullptr); for (int i = 0; i < num_vars; ++i) { const std::string var_name = @@ -1633,7 +1643,7 @@ yyreduce: vars[i] = model->AddVariable(var_name, Domain::IntegerValue(value), introduced); } else { - IntegerVariable *const var = assignments->variables[i]; + IntegerVariable* const var = assignments->variables[i]; CHECK(var != nullptr); vars[i] = var; vars[i]->Merge(var_name, domain, introduced); @@ -1649,16 +1659,16 @@ yyreduce: // output. if (annotations != nullptr) { for (int i = 0; i < annotations->size(); ++i) { - const Annotation &ann = (*annotations)[i]; + const Annotation& ann = (*annotations)[i]; if (ann.IsFunctionCallWithIdentifier("output_array")) { // We have found an output annotation. CHECK_EQ(1, ann.annotations.size()); CHECK_EQ(Annotation::ANNOTATION_LIST, ann.annotations.back().type); - const Annotation &list = ann.annotations.back(); + const Annotation& list = ann.annotations.back(); // Let's build the vector of bounds. std::vector bounds; for (int a = 0; a < list.annotations.size(); ++a) { - const Annotation &bound = list.annotations[a]; + const Annotation& bound = list.annotations[a]; CHECK_EQ(Annotation::INTERVAL, bound.type); bounds.emplace_back(SolutionOutputSpecs::Bounds( bound.interval_min, bound.interval_max)); @@ -1745,7 +1755,7 @@ yyreduce: #line 369 "./ortools/flatzinc/parser.yy" { // A reference to an existing integer constant or variable. - const std::string &id = (yyvsp[0].string_value); + const std::string& id = (yyvsp[0].string_value); if (gtl::ContainsKey(context->integer_map, id)) { (yyval.var_or_value) = VariableRefOrValue::Value(gtl::FindOrDie(context->integer_map, id)); @@ -1765,7 +1775,7 @@ yyreduce: #line 382 "./ortools/flatzinc/parser.yy" { // A given element of an existing constant array or variable array. - const std::string &id = (yyvsp[-3].string_value); + const std::string& id = (yyvsp[-3].string_value); const int64 value = (yyvsp[-1].integer_value); if (gtl::ContainsKey(context->integer_array_map, id)) { (yyval.var_or_value) = VariableRefOrValue::Value( @@ -2071,10 +2081,10 @@ yyreduce: */ #line 488 "./ortools/flatzinc/parser.yy" { - const std::string &identifier = (yyvsp[-4].string_value); + const std::string& identifier = (yyvsp[-4].string_value); CHECK((yyvsp[-2].args) != nullptr) << "Missing argument in constraint"; - const std::vector &arguments = *(yyvsp[-2].args); - std::vector *const annotations = (yyvsp[0].annotations); + const std::vector& arguments = *(yyvsp[-2].args); + std::vector* const annotations = (yyvsp[0].annotations); model->AddConstraint(identifier, arguments, ContainsId(annotations, "domain")); @@ -2149,7 +2159,7 @@ yyreduce: case 79: /* argument: IDENTIFIER */ #line 513 "./ortools/flatzinc/parser.yy" { - const std::string &id = (yyvsp[0].string_value); + const std::string& id = (yyvsp[0].string_value); if (gtl::ContainsKey(context->integer_map, id)) { (yyval.arg) = Argument::IntegerValue(gtl::FindOrDie(context->integer_map, id)); @@ -2160,7 +2170,7 @@ yyreduce: const double d = gtl::FindOrDie(context->float_map, id); (yyval.arg) = Argument::IntegerValue(ConvertAsIntegerOrDie(d)); } else if (gtl::ContainsKey(context->float_array_map, id)) { - const auto &double_values = + const auto& double_values = gtl::FindOrDie(context->float_array_map, id); std::vector integer_values; for (const double d : double_values) { @@ -2175,12 +2185,12 @@ yyreduce: (yyval.arg) = Argument::IntVarRefArray( gtl::FindOrDie(context->variable_array_map, id)); } else if (gtl::ContainsKey(context->domain_map, id)) { - const Domain &d = gtl::FindOrDie(context->domain_map, id); + const Domain& d = gtl::FindOrDie(context->domain_map, id); (yyval.arg) = Argument::FromDomain(d); } else { CHECK(gtl::ContainsKey(context->domain_array_map, id)) << "Unknown identifier: " << id; - const std::vector &d = + const std::vector& d = gtl::FindOrDie(context->domain_array_map, id); (yyval.arg) = Argument::DomainList(d); } @@ -2191,7 +2201,7 @@ yyreduce: case 80: /* argument: IDENTIFIER '[' IVALUE ']' */ #line 544 "./ortools/flatzinc/parser.yy" { - const std::string &id = (yyvsp[-3].string_value); + const std::string& id = (yyvsp[-3].string_value); const int64 index = (yyvsp[-1].integer_value); if (gtl::ContainsKey(context->integer_array_map, id)) { (yyval.arg) = Argument::IntegerValue( @@ -2202,7 +2212,7 @@ yyreduce: } else { CHECK(gtl::ContainsKey(context->domain_array_map, id)) << "Unknown identifier: " << id; - const Domain &d = + const Domain& d = Lookup(gtl::FindOrDie(context->domain_array_map, id), index); (yyval.arg) = Argument::FromDomain(d); } @@ -2213,7 +2223,7 @@ yyreduce: case 81: /* argument: '[' var_or_value_array ']' */ #line 561 "./ortools/flatzinc/parser.yy" { - VariableRefOrValueArray *const arguments = (yyvsp[-1].var_or_value_array); + VariableRefOrValueArray* const arguments = (yyvsp[-1].var_or_value_array); CHECK(arguments != nullptr); bool has_variables = false; for (int i = 0; i < arguments->Size(); ++i) { @@ -2223,8 +2233,7 @@ yyreduce: } } if (has_variables) { - (yyval.arg) = - Argument::IntVarRefArray(std::vector()); + (yyval.arg) = Argument::IntVarRefArray(std::vector()); (yyval.arg).variables.reserve(arguments->Size()); for (int i = 0; i < arguments->Size(); ++i) { if (arguments->variables[i] != nullptr) { @@ -2315,7 +2324,7 @@ yyreduce: case 90: /* annotation: IDENTIFIER */ #line 609 "./ortools/flatzinc/parser.yy" { - const std::string &id = (yyvsp[0].string_value); + const std::string& id = (yyvsp[0].string_value); if (gtl::ContainsKey(context->variable_map, id)) { (yyval.annotation) = Annotation::Variable(gtl::FindOrDie(context->variable_map, id)); @@ -2332,7 +2341,7 @@ yyreduce: case 91: /* annotation: IDENTIFIER '(' annotation_arguments ')' */ #line 619 "./ortools/flatzinc/parser.yy" { - std::vector *const annotations = (yyvsp[-1].annotations); + std::vector* const annotations = (yyvsp[-1].annotations); if (annotations != nullptr) { (yyval.annotation) = Annotation::FunctionCallWithArguments( (yyvsp[-3].string_value), std::move(*annotations)); @@ -2360,7 +2369,7 @@ yyreduce: case 93: /* annotation: '[' annotation_arguments ']' */ #line 634 "./ortools/flatzinc/parser.yy" { - std::vector *const annotations = (yyvsp[-1].annotations); + std::vector* const annotations = (yyvsp[-1].annotations); if (annotations != nullptr) { (yyval.annotation) = Annotation::AnnotationList(std::move(*annotations)); @@ -2464,14 +2473,14 @@ yyerrlab: ++yynerrs; { yypcontext_t yyctx = {yyssp, yytoken}; - char const *yymsgp = YY_("syntax error"); + char const* yymsgp = YY_("syntax error"); int yysyntax_error_status; yysyntax_error_status = yysyntax_error(&yymsg_alloc, &yymsg, &yyctx); if (yysyntax_error_status == 0) yymsgp = yymsg; else if (yysyntax_error_status == -1) { if (yymsg != yymsgbuf) YYSTACK_FREE(yymsg); - yymsg = YY_CAST(char *, YYSTACK_ALLOC(YY_CAST(YYSIZE_T, yymsg_alloc))); + yymsg = YY_CAST(char*, YYSTACK_ALLOC(YY_CAST(YYSIZE_T, yymsg_alloc))); if (yymsg) { yysyntax_error_status = yysyntax_error(&yymsg_alloc, &yymsg, &yyctx); yymsgp = yymsg; @@ -2547,9 +2556,12 @@ yyerrlab1: YY_STACK_PRINT(yyss, yyssp); } - YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; - YY_IGNORE_MAYBE_UNINITIALIZED_END /* Shift the error token. */ - YY_SYMBOL_PRINT("Shifting", YY_ACCESSING_SYMBOL(yyn), yyvsp, yylsp); + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + *++yyvsp = yylval; + YY_IGNORE_MAYBE_UNINITIALIZED_END + + /* Shift the error token. */ + YY_SYMBOL_PRINT("Shifting", YY_ACCESSING_SYMBOL(yyn), yyvsp, yylsp); yystate = yyn; goto yynewstate; diff --git a/ortools/flatzinc/parser.yy.cc b/ortools/flatzinc/parser.yy.cc index c5a59e4b47..a8d3198add 100644 --- a/ortools/flatzinc/parser.yy.cc +++ b/ortools/flatzinc/parser.yy.cc @@ -433,15 +433,15 @@ typedef size_t yy_size_t; } while (0) /* Return all but the first "n" matched characters back to the input stream. */ -#define yyless(n) \ - do { \ - /* Undo effects of setting up yytext. */ \ - int yyless_macro_arg = (n); \ - YY_LESS_LINENO(yyless_macro_arg); \ - *yy_cp = yyg->yy_hold_char; \ - YY_RESTORE_YY_MORE_OFFSET yyg->yy_c_buf_p = yy_cp = \ - yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ - YY_DO_BEFORE_ACTION; /* set up yytext again */ \ +#define yyless(n) \ + do { \ + /* Undo effects of setting up yytext. */ \ + int yyless_macro_arg = (n); \ + YY_LESS_LINENO(yyless_macro_arg); \ + *yy_cp = yyg->yy_hold_char; \ + YY_RESTORE_YY_MORE_OFFSET \ + yyg->yy_c_buf_p = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ + YY_DO_BEFORE_ACTION; /* set up yytext again */ \ } while (0) #define unput(c) yyunput(c, yyg->yytext_ptr, yyscanner) @@ -494,16 +494,16 @@ struct yy_buffer_state { #define YY_BUFFER_NEW 0 #define YY_BUFFER_NORMAL 1 -/* When an EOF's been seen but there's still some text to process - * then we mark the buffer as YY_EOF_PENDING, to indicate that we - * shouldn't try reading from the input source any more. We might - * still have a bunch of tokens to match, though, because of - * possible backing-up. - * - * When we actually see the EOF, we change the status to "new" - * (via yyrestart()), so that the user can continue scanning by - * just pointing yyin at a new input file. - */ + /* When an EOF's been seen but there's still some text to process + * then we mark the buffer as YY_EOF_PENDING, to indicate that we + * shouldn't try reading from the input source any more. We might + * still have a bunch of tokens to match, though, because of + * possible backing-up. + * + * When we actually see the EOF, we change the status to "new" + * (via yyrestart()), so that the user can continue scanning by + * just pointing yyin at a new input file. + */ #define YY_BUFFER_EOF_PENDING 2 }; #endif /* !YY_STRUCT_YY_BUFFER_STATE */ @@ -602,8 +602,9 @@ static const flex_int16_t yy_accept[117] = { 0, 0, 0, 15, 24, 25, 0, 24, 24, 24, 24, 24, 24, 24, 24, 8, 24, 24, 24, 24, 24, 24, 21, 0, 23, 20, 19, 25, 24, 24, 24, 24, 24, 5, 24, 24, 24, 24, 11, 24, 24, 13, 0, 24, 2, 24, 24, 24, 24, 24, 24, - 24, 24, 16, 0, 22, 1, 24, 17, 4, 24, 24, 24, 24, 12, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 10, 24, 6, 7, 24, 24, 9, 3, 0}; + 24, 24, 16, 0, 22, 1, 24, 17, 4, 24, 24, 24, 24, 12, 24, 24, + + 24, 24, 24, 24, 24, 24, 24, 10, 24, 6, 7, 24, 24, 9, 3, 0}; static const YY_CHAR yy_ec[256] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1, @@ -611,41 +612,46 @@ static const YY_CHAR yy_ec[256] = { 1, 1, 1, 1, 1, 6, 1, 7, 8, 1, 9, 10, 10, 10, 10, 10, 10, 10, 11, 11, 12, 1, 1, 1, 1, 1, 1, 13, 13, 13, 13, 14, 13, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 1, 1, 1, 1, - 16, 1, 17, 18, 19, 20, 21, 22, 15, 15, 23, 15, 15, 24, 25, 26, 27, 28, 15, - 29, 30, 31, 32, 33, 15, 34, 35, 36, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 16, 1, 17, 18, 19, 20, + + 21, 22, 15, 15, 23, 15, 15, 24, 25, 26, 27, 28, 15, 29, 30, 31, 32, 33, 15, + 34, 35, 36, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1}; + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; static const YY_CHAR yy_meta[37] = {0, 1, 1, 2, 1, 1, 1, 1, 1, 3, 3, 3, 1, 4, 4, 5, 5, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; static const flex_int16_t yy_base[123] = { - 0, 0, 0, 174, 175, 175, 175, 169, 0, 28, 164, 32, 17, 159, - 0, 154, 140, 141, 140, 28, 140, 31, 143, 135, 40, 134, 145, 157, - 175, 0, 0, 0, 175, 53, 62, 40, 0, 175, 0, 0, 144, 130, - 131, 131, 132, 128, 123, 119, 126, 0, 130, 119, 118, 124, 115, 117, - 65, 71, 74, 46, 0, 0, 128, 120, 113, 112, 124, 0, 117, 116, - 118, 114, 0, 103, 114, 0, 81, 99, 0, 102, 111, 100, 105, 104, - 105, 97, 105, 0, 84, 87, 0, 96, 0, 0, 101, 100, 103, 99, - 0, 103, 83, 69, 83, 54, 55, 56, 49, 34, 0, 34, 0, 0, - 37, 16, 0, 0, 175, 98, 103, 106, 108, 111, 113}; + 0, 0, 0, 174, 175, 175, 175, 169, 0, 28, 164, 32, 17, 159, 0, + 154, 140, 141, 140, 28, 140, 31, 143, 135, 40, 134, 145, 157, 175, 0, + 0, 0, 175, 53, 62, 40, 0, 175, 0, 0, 144, 130, 131, 131, 132, + 128, 123, 119, 126, 0, 130, 119, 118, 124, 115, 117, 65, 71, 74, 46, + 0, 0, 128, 120, 113, 112, 124, 0, 117, 116, 118, 114, 0, 103, 114, + 0, 81, 99, 0, 102, 111, 100, 105, 104, 105, 97, 105, 0, 84, 87, + 0, 96, 0, 0, 101, 100, 103, 99, 0, 103, 83, + + 69, 83, 54, 55, 56, 49, 34, 0, 34, 0, 0, 37, 16, 0, 0, + 175, 98, 103, 106, 108, 111, 113}; static const flex_int16_t yy_def[123] = { - 0, 116, 1, 116, 116, 116, 116, 117, 118, 116, 116, 116, 11, 116, - 119, 120, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 117, - 116, 118, 11, 12, 116, 116, 116, 116, 121, 116, 119, 122, 120, 119, - 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, - 116, 116, 116, 116, 121, 122, 119, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 116, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 116, 116, 119, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 0, 116, 116, 116, 116, 116, 116}; + 0, 116, 1, 116, 116, 116, 116, 117, 118, 116, 116, 116, 11, 116, 119, + 120, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 117, 116, 118, + 11, 12, 116, 116, 116, 116, 121, 116, 119, 122, 120, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 116, 116, 116, 116, + 121, 122, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 116, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 116, 116, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 0, 116, 116, 116, 116, 116, 116}; static const flex_int16_t yy_nxt[212] = { 0, 4, 5, 6, 7, 8, 4, 9, 10, 11, 12, 12, 13, 14, 14, @@ -654,15 +660,17 @@ static const flex_int16_t yy_nxt[212] = { 44, 34, 115, 47, 59, 59, 116, 45, 34, 48, 59, 59, 51, 114, 35, 113, 52, 56, 56, 56, 112, 36, 53, 57, 57, 111, 58, 58, 58, 56, 56, 56, 110, 109, 76, 58, 58, 58, 58, 58, 58, 76, 88, 88, 108, - 89, 89, 89, 89, 89, 89, 89, 89, 89, 27, 107, 27, 27, 27, 29, - 106, 29, 29, 29, 38, 38, 38, 39, 39, 60, 60, 61, 61, 61, 105, - 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, - 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 75, 74, 73, 72, - 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 40, 28, 55, 54, 50, - 49, 46, 43, 42, 41, 40, 37, 32, 28, 116, 3, 116, 116, 116, 116, + 89, 89, 89, 89, 89, 89, 89, 89, 89, 27, 107, + + 27, 27, 27, 29, 106, 29, 29, 29, 38, 38, 38, 39, 39, 60, 60, + 61, 61, 61, 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, + 93, 92, 91, 90, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, + 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 40, + 28, 55, 54, 50, 49, 46, 43, 42, 41, 40, 37, 32, 28, 116, 3, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, - 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, - 116, 116}; + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116}; static const flex_int16_t yy_chk[212] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -671,15 +679,17 @@ static const flex_int16_t yy_chk[212] = { 19, 11, 113, 21, 35, 35, 12, 19, 11, 21, 59, 59, 24, 112, 11, 109, 24, 33, 33, 33, 107, 11, 24, 34, 34, 106, 34, 34, 34, 56, 56, 56, 105, 104, 56, 57, 57, 57, 58, 58, 58, 56, 76, 76, 103, - 76, 76, 76, 88, 88, 88, 89, 89, 89, 117, 102, 117, 117, 117, 118, - 101, 118, 118, 118, 119, 119, 119, 120, 120, 121, 121, 122, 122, 122, 100, - 99, 97, 96, 95, 94, 91, 86, 85, 84, 83, 82, 81, 80, 79, 77, - 74, 73, 71, 70, 69, 68, 66, 65, 64, 63, 62, 55, 54, 53, 52, - 51, 50, 48, 47, 46, 45, 44, 43, 42, 41, 40, 27, 26, 25, 23, - 22, 20, 18, 17, 16, 15, 13, 10, 7, 3, 116, 116, 116, 116, 116, + 76, 76, 76, 88, 88, 88, 89, 89, 89, 117, 102, + + 117, 117, 117, 118, 101, 118, 118, 118, 119, 119, 119, 120, 120, 121, 121, + 122, 122, 122, 100, 99, 97, 96, 95, 94, 91, 86, 85, 84, 83, 82, + 81, 80, 79, 77, 74, 73, 71, 70, 69, 68, 66, 65, 64, 63, 62, + 55, 54, 53, 52, 51, 50, 48, 47, 46, 45, 44, 43, 42, 41, 40, + 27, 26, 25, 23, 22, 20, 18, 17, 16, 15, 13, 10, 7, 3, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, - 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, - 116, 116}; + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116}; /* Table of booleans, true if rule could match eol. */ static const flex_int32_t yy_rule_can_match_eol[32] = { @@ -1029,7 +1039,7 @@ YY_DECL { switch (yy_act) { /* beginning of action switch */ case 0: /* must back up */ - /* undo the effects of YY_DO_BEFORE_ACTION */ + /* undo the effects of YY_DO_BEFORE_ACTION */ *yy_cp = yyg->yy_hold_char; yy_cp = yyg->yy_last_accepting_cpos; yy_current_state = yyg->yy_last_accepting_state; @@ -1041,160 +1051,220 @@ YY_DECL { { return ARRAY; } - YY_BREAK case 2 : YY_RULE_SETUP + YY_BREAK + case 2: + YY_RULE_SETUP #line 28 "./ortools/flatzinc/parser.lex" { return TOKEN_BOOL; } - YY_BREAK case 3 : YY_RULE_SETUP + YY_BREAK + case 3: + YY_RULE_SETUP #line 29 "./ortools/flatzinc/parser.lex" { return CONSTRAINT; } - YY_BREAK case 4 : YY_RULE_SETUP + YY_BREAK + case 4: + YY_RULE_SETUP #line 30 "./ortools/flatzinc/parser.lex" { return TOKEN_FLOAT; } - YY_BREAK case 5 : YY_RULE_SETUP + YY_BREAK + case 5: + YY_RULE_SETUP #line 31 "./ortools/flatzinc/parser.lex" { return TOKEN_INT; } - YY_BREAK case 6 : YY_RULE_SETUP + YY_BREAK + case 6: + YY_RULE_SETUP #line 32 "./ortools/flatzinc/parser.lex" { return MAXIMIZE; } - YY_BREAK case 7 : YY_RULE_SETUP + YY_BREAK + case 7: + YY_RULE_SETUP #line 33 "./ortools/flatzinc/parser.lex" { return MINIMIZE; } - YY_BREAK case 8 : YY_RULE_SETUP + YY_BREAK + case 8: + YY_RULE_SETUP #line 34 "./ortools/flatzinc/parser.lex" { return OF; } - YY_BREAK case 9 : YY_RULE_SETUP + YY_BREAK + case 9: + YY_RULE_SETUP #line 35 "./ortools/flatzinc/parser.lex" { return PREDICATE; } - YY_BREAK case 10 : YY_RULE_SETUP + YY_BREAK + case 10: + YY_RULE_SETUP #line 36 "./ortools/flatzinc/parser.lex" { return SATISFY; } - YY_BREAK case 11 : YY_RULE_SETUP + YY_BREAK + case 11: + YY_RULE_SETUP #line 37 "./ortools/flatzinc/parser.lex" { return SET; } - YY_BREAK case 12 : YY_RULE_SETUP + YY_BREAK + case 12: + YY_RULE_SETUP #line 38 "./ortools/flatzinc/parser.lex" { return SOLVE; } - YY_BREAK case 13 : YY_RULE_SETUP + YY_BREAK + case 13: + YY_RULE_SETUP #line 39 "./ortools/flatzinc/parser.lex" { return VAR; } - YY_BREAK case 14 : YY_RULE_SETUP + YY_BREAK + case 14: + YY_RULE_SETUP #line 40 "./ortools/flatzinc/parser.lex" { return DOTDOT; } - YY_BREAK case 15 : YY_RULE_SETUP + YY_BREAK + case 15: + YY_RULE_SETUP #line 41 "./ortools/flatzinc/parser.lex" { return COLONCOLON; } - YY_BREAK case 16 : YY_RULE_SETUP + YY_BREAK + case 16: + YY_RULE_SETUP #line 43 "./ortools/flatzinc/parser.lex" { yylval->integer_value = 1; return IVALUE; } - YY_BREAK case 17 : YY_RULE_SETUP + YY_BREAK + case 17: + YY_RULE_SETUP #line 47 "./ortools/flatzinc/parser.lex" { yylval->integer_value = 0; return IVALUE; } - YY_BREAK case 18 : YY_RULE_SETUP + YY_BREAK + case 18: + YY_RULE_SETUP #line 51 "./ortools/flatzinc/parser.lex" { CHECK(absl::SimpleAtoi(yytext, &yylval->integer_value)); return IVALUE; } - YY_BREAK case 19 : YY_RULE_SETUP + YY_BREAK + case 19: + YY_RULE_SETUP #line 55 "./ortools/flatzinc/parser.lex" { CHECK(absl::SimpleAtoi(yytext, &yylval->integer_value)); return IVALUE; } - YY_BREAK case 20 : YY_RULE_SETUP + YY_BREAK + case 20: + YY_RULE_SETUP #line 59 "./ortools/flatzinc/parser.lex" { CHECK(absl::SimpleAtoi(yytext, &yylval->integer_value)); return IVALUE; } - YY_BREAK case 21 : YY_RULE_SETUP + YY_BREAK + case 21: + YY_RULE_SETUP #line 63 "./ortools/flatzinc/parser.lex" { CHECK(absl::SimpleAtod(yytext, &yylval->double_value)); return DVALUE; } - YY_BREAK case 22 : YY_RULE_SETUP + YY_BREAK + case 22: + YY_RULE_SETUP #line 67 "./ortools/flatzinc/parser.lex" { CHECK(absl::SimpleAtod(yytext, &yylval->double_value)); return DVALUE; } - YY_BREAK case 23 : YY_RULE_SETUP + YY_BREAK + case 23: + YY_RULE_SETUP #line 71 "./ortools/flatzinc/parser.lex" { CHECK(absl::SimpleAtod(yytext, &yylval->double_value)); return DVALUE; } - YY_BREAK case 24 : YY_RULE_SETUP + YY_BREAK + case 24: + YY_RULE_SETUP #line 76 "./ortools/flatzinc/parser.lex" { yylval->string_value = yytext; return IDENTIFIER; } - YY_BREAK case 25 : YY_RULE_SETUP + YY_BREAK + case 25: + YY_RULE_SETUP #line 80 "./ortools/flatzinc/parser.lex" { yylval->string_value = yytext; return IDENTIFIER; } - YY_BREAK case 26 : YY_RULE_SETUP + YY_BREAK + case 26: + YY_RULE_SETUP #line 84 "./ortools/flatzinc/parser.lex" { yylval->string_value = yytext; return SVALUE; } - YY_BREAK case 27 : /* rule 27 can match eol */ - YY_RULE_SETUP + YY_BREAK + case 27: + /* rule 27 can match eol */ + YY_RULE_SETUP #line 85 "./ortools/flatzinc/parser.lex" - ; - YY_BREAK case 28 : YY_RULE_SETUP + ; + YY_BREAK + case 28: + YY_RULE_SETUP #line 86 "./ortools/flatzinc/parser.lex" - ; - YY_BREAK case 29 : YY_RULE_SETUP + ; + YY_BREAK + case 29: + YY_RULE_SETUP #line 87 "./ortools/flatzinc/parser.lex" - ; - YY_BREAK case 30 : YY_RULE_SETUP + ; + YY_BREAK + case 30: + YY_RULE_SETUP #line 88 "./ortools/flatzinc/parser.lex" { return yytext[0]; } - YY_BREAK case 31 : YY_RULE_SETUP + YY_BREAK + case 31: + YY_RULE_SETUP #line 89 "./ortools/flatzinc/parser.lex" - ECHO; + ECHO; YY_BREAK #line 1305 "./ortools/flatzinc/parser.yy.cc" case YY_STATE_EOF(INITIAL): @@ -1206,9 +1276,9 @@ YY_DECL { /* Undo the effects of YY_DO_BEFORE_ACTION. */ *yy_cp = yyg->yy_hold_char; - YY_RESTORE_YY_MORE_OFFSET if (YY_CURRENT_BUFFER_LVALUE - ->yy_buffer_status == - YY_BUFFER_NEW) { + YY_RESTORE_YY_MORE_OFFSET + + if (YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW) { /* We're scanning a new file or input source. It's * possible that this happened because the user * just pointed yyin at a new source and called @@ -1231,8 +1301,8 @@ YY_DECL { * in input(). */ if (yyg->yy_c_buf_p <= - &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars]) { - /* This was really a NUL. */ + &YY_CURRENT_BUFFER_LVALUE + ->yy_ch_buf[yyg->yy_n_chars]) { /* This was really a NUL. */ yy_state_type yy_next_state; yyg->yy_c_buf_p = yyg->yytext_ptr + yy_amount_of_matched_text; @@ -1257,11 +1327,15 @@ YY_DECL { yy_cp = ++yyg->yy_c_buf_p; yy_current_state = yy_next_state; goto yy_match; - } else { + } + + else { yy_cp = yyg->yy_c_buf_p; goto yy_find_action; } - } else + } + + else switch (yy_get_next_buffer(yyscanner)) { case EOB_ACT_END_OF_FILE: { yyg->yy_did_buffer_switch_on_eof = 0; @@ -1280,7 +1354,9 @@ YY_DECL { yy_act = YY_STATE_EOF(YY_START); goto do_action; - } else { + } + + else { if (!yyg->yy_did_buffer_switch_on_eof) YY_NEW_FILE; } break; @@ -1340,7 +1416,9 @@ static int yy_get_next_buffer(yyscan_t yyscanner) { * treat this as a final EOF. */ return EOB_ACT_END_OF_FILE; - } else { + } + + else { /* We matched some text prior to the EOB, first * process it. */ @@ -1380,7 +1458,8 @@ static int yy_get_next_buffer(yyscan_t yyscanner) { else b->yy_buf_size *= 2; - b->yy_ch_buf = (char *)/* Include room in for 2 EOB chars. */ + b->yy_ch_buf = (char *) + /* Include room in for 2 EOB chars. */ yyrealloc((void *)b->yy_ch_buf, (yy_size_t)(b->yy_buf_size + 2), yyscanner); } else @@ -1408,11 +1487,15 @@ static int yy_get_next_buffer(yyscan_t yyscanner) { if (number_to_move == YY_MORE_ADJ) { ret_val = EOB_ACT_END_OF_FILE; yyrestart(yyin, yyscanner); - } else { + } + + else { ret_val = EOB_ACT_LAST_MATCH; YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_EOF_PENDING; } - } else + } + + else ret_val = EOB_ACT_CONTINUE_SCAN; if ((yyg->yy_n_chars + number_to_move) > @@ -1472,8 +1555,8 @@ static yy_state_type yy_get_previous_state(yyscan_t yyscanner) { static yy_state_type yy_try_NUL_trans(yy_state_type yy_current_state, yyscan_t yyscanner) { int yy_is_jam; - struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; - /* This var may be unused depending upon options. */ + struct yyguts_t *yyg = (struct yyguts_t *) + yyscanner; /* This var may be unused depending upon options. */ char *yy_cp = yyg->yy_c_buf_p; YY_CHAR yy_c = 1; @@ -1505,7 +1588,7 @@ static void yyunput(int c, char *yy_bp, yyscan_t yyscanner) { if (yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2) { /* need to shift things up to make room */ - /* +2 for EOB chars. */ + /* +2 for EOB chars. */ int number_to_move = yyg->yy_n_chars + 2; char *dest = &YY_CURRENT_BUFFER_LVALUE ->yy_ch_buf[YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2]; @@ -1541,6 +1624,7 @@ static int yyinput(yyscan_t yyscanner) #else static int input(yyscan_t yyscanner) #endif + { int c; struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; @@ -1716,7 +1800,9 @@ void yy_delete_buffer(YY_BUFFER_STATE b, yyscan_t yyscanner) { * This function is sometimes called more than once on the same buffer, * such as during a yyrestart() or at EOF. */ -static void yy_init_buffer(YY_BUFFER_STATE b, FILE *file, yyscan_t yyscanner) { +static void yy_init_buffer(YY_BUFFER_STATE b, FILE *file, yyscan_t yyscanner) + +{ int oerrno = errno; struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; diff --git a/ortools/flatzinc/parser_main.cc b/ortools/flatzinc/parser_main.cc index 6cff9c5787..9f7bdbed71 100644 --- a/ortools/flatzinc/parser_main.cc +++ b/ortools/flatzinc/parser_main.cc @@ -31,7 +31,7 @@ ABSL_FLAG(bool, statistics, false, "Print model statistics"); namespace operations_research { namespace fz { -void ParseFile(const std::string &filename, bool presolve) { +void ParseFile(const std::string& filename, bool presolve) { WallTimer timer; timer.Start(); @@ -70,7 +70,7 @@ void ParseFile(const std::string &filename, bool presolve) { } // namespace fz } // namespace operations_research -int main(int argc, char **argv) { +int main(int argc, char** argv) { const char kUsage[] = "Parses a flatzinc .fzn file, optionally presolve it, and prints it in " "human-readable format"; diff --git a/ortools/flatzinc/parser_util.cc b/ortools/flatzinc/parser_util.cc index 37fa67d02c..88b7261fd9 100644 --- a/ortools/flatzinc/parser_util.cc +++ b/ortools/flatzinc/parser_util.cc @@ -25,13 +25,13 @@ #include "ortools/flatzinc/parser.tab.hh" #include "ortools/util/string_array.h" -extern int orfz_lex(YYSTYPE *, void *scanner); -extern int orfz_get_lineno(void *scanner); +extern int orfz_lex(YYSTYPE*, void* scanner); +extern int orfz_get_lineno(void* scanner); extern int orfz_debug; -void orfz_error(operations_research::fz::ParserContext *context, - operations_research::fz::Model *model, bool *ok, void *scanner, - const char *str) { +void orfz_error(operations_research::fz::ParserContext* context, + operations_research::fz::Model* model, bool* ok, void* scanner, + const char* str) { LOG(ERROR) << "Error: " << str << " in line no. " << orfz_get_lineno(scanner); *ok = false; } @@ -40,7 +40,7 @@ namespace operations_research { namespace fz { // Whether the given list of annotations contains the given identifier // (or function call). -bool ContainsId(std::vector *annotations, const std::string &id) { +bool ContainsId(std::vector* annotations, const std::string& id) { if (annotations != nullptr) { for (int i = 0; i < annotations->size(); ++i) { if (((*annotations)[i].type == Annotation::IDENTIFIER || @@ -53,7 +53,7 @@ bool ContainsId(std::vector *annotations, const std::string &id) { return false; } -bool AllDomainsHaveOneValue(const std::vector &domains) { +bool AllDomainsHaveOneValue(const std::vector& domains) { for (int i = 0; i < domains.size(); ++i) { if (!domains[i].HasOneValue()) { return false; @@ -72,7 +72,7 @@ int64 ConvertAsIntegerOrDie(double d) { // Array in flatzinc are 1 based. We use this trivial wrapper for all flatzinc // arrays. template -const T &Lookup(const std::vector &v, int index) { +const T& Lookup(const std::vector& v, int index) { // TODO(user): replace this by a macro for better logging. CHECK_GE(index, 1); CHECK_LE(index, v.size()); diff --git a/ortools/flatzinc/presolve.cc b/ortools/flatzinc/presolve.cc index 21f774c17c..d5014630ad 100644 --- a/ortools/flatzinc/presolve.cc +++ b/ortools/flatzinc/presolve.cc @@ -36,14 +36,14 @@ namespace { enum PresolveState { ALWAYS_FALSE, ALWAYS_TRUE, UNDECIDED }; // TODO(user): accept variables fixed to 0 or 1. -bool Has01Values(IntegerVariable *var) { +bool Has01Values(IntegerVariable* var) { return var->domain.Min() == 0 && var->domain.Max() == 1; } bool Is0Or1(int64 value) { return !(value & ~1LL); } template -bool IsArrayBoolean(const std::vector &values) { +bool IsArrayBoolean(const std::vector& values) { for (int i = 0; i < values.size(); ++i) { if (values[i] != 0 && values[i] != 1) { return false; @@ -53,7 +53,7 @@ bool IsArrayBoolean(const std::vector &values) { } template -bool AtMostOne0OrAtMostOne1(const std::vector &values) { +bool AtMostOne0OrAtMostOne1(const std::vector& values) { CHECK(IsArrayBoolean(values)); int num_zero = 0; int num_one = 0; @@ -70,12 +70,12 @@ bool AtMostOne0OrAtMostOne1(const std::vector &values) { return true; } -absl::flat_hash_set GetValueSet(const Argument &arg) { +absl::flat_hash_set GetValueSet(const Argument& arg) { absl::flat_hash_set result; if (arg.HasOneValue()) { result.insert(arg.Value()); } else { - const Domain &domain = arg.Var()->domain; + const Domain& domain = arg.Var()->domain; if (domain.is_interval && !domain.values.empty()) { for (int64 v = domain.values[0]; v <= domain.values[1]; ++v) { result.insert(v); @@ -87,7 +87,7 @@ absl::flat_hash_set GetValueSet(const Argument &arg) { return result; } -void SetConstraintAsIntEq(Constraint *ct, IntegerVariable *var, int64 value) { +void SetConstraintAsIntEq(Constraint* ct, IntegerVariable* var, int64 value) { CHECK(var != nullptr); ct->type = "int_eq"; ct->arguments.clear(); @@ -95,9 +95,9 @@ void SetConstraintAsIntEq(Constraint *ct, IntegerVariable *var, int64 value) { ct->arguments.push_back(Argument::IntegerValue(value)); } -bool OverlapsAt(const Argument &array, int pos, const Argument &other) { +bool OverlapsAt(const Argument& array, int pos, const Argument& other) { if (array.type == Argument::INT_VAR_REF_ARRAY) { - const Domain &domain = array.variables[pos]->domain; + const Domain& domain = array.variables[pos]->domain; if (domain.IsAllInt64()) { return true; } @@ -147,8 +147,8 @@ bool OverlapsAt(const Argument &array, int pos, const Argument &other) { } template -void AppendIfNotInSet(T *value, absl::flat_hash_set *s, - std::vector *vec) { +void AppendIfNotInSet(T* value, absl::flat_hash_set* s, + std::vector* vec) { if (s->insert(value).second) { vec->push_back(value); } @@ -177,7 +177,7 @@ void AppendIfNotInSet(T *value, absl::flat_hash_set *s, // Input: bool2int(b, x) // Action: Replace all instances of x by b. // Output: inactive constraint -void Presolver::PresolveBool2Int(Constraint *ct) { +void Presolver::PresolveBool2Int(Constraint* ct) { DCHECK_EQ(ct->type, "bool2int"); if (ct->arguments[0].HasOneValue() || ct->arguments[1].HasOneValue()) { // Rule 1. @@ -196,10 +196,10 @@ void Presolver::PresolveBool2Int(Constraint *ct) { // This rule stores the mapping to reconstruct the 2d element constraint. // This mapping can involve 1 or 2 variables dependening if y or z in A[y][z] // is a constant in the model). -void Presolver::PresolveStoreAffineMapping(Constraint *ct) { +void Presolver::PresolveStoreAffineMapping(Constraint* ct) { CHECK_EQ(2, ct->arguments[1].variables.size()); - IntegerVariable *const var0 = ct->arguments[1].variables[0]; - IntegerVariable *const var1 = ct->arguments[1].variables[1]; + IntegerVariable* const var0 = ct->arguments[1].variables[0]; + IntegerVariable* const var1 = ct->arguments[1].variables[1]; const int64 coeff0 = ct->arguments[0].values[0]; const int64 coeff1 = ct->arguments[0].values[1]; const int64 rhs = ct->arguments[2].Value(); @@ -212,11 +212,11 @@ void Presolver::PresolveStoreAffineMapping(Constraint *ct) { } } -void Presolver::PresolveStoreFlatteningMapping(Constraint *ct) { +void Presolver::PresolveStoreFlatteningMapping(Constraint* ct) { CHECK_EQ(3, ct->arguments[1].variables.size()); - IntegerVariable *const var0 = ct->arguments[1].variables[0]; - IntegerVariable *const var1 = ct->arguments[1].variables[1]; - IntegerVariable *const var2 = ct->arguments[1].variables[2]; + IntegerVariable* const var0 = ct->arguments[1].variables[0]; + IntegerVariable* const var1 = ct->arguments[1].variables[1]; + IntegerVariable* const var2 = ct->arguments[1].variables[2]; const int64 coeff0 = ct->arguments[0].values[0]; const int64 coeff1 = ct->arguments[0].values[1]; const int64 coeff2 = ct->arguments[0].values[2]; @@ -245,7 +245,7 @@ void Presolver::PresolveStoreFlatteningMapping(Constraint *ct) { } namespace { -bool IsIncreasingAndContiguous(const std::vector &values) { +bool IsIncreasingAndContiguous(const std::vector& values) { for (int i = 0; i < values.size() - 1; ++i) { if (values[i + 1] != values[i] + 1) { return false; @@ -254,7 +254,7 @@ bool IsIncreasingAndContiguous(const std::vector &values) { return true; } -bool AreOnesFollowedByMinusOne(const std::vector &coeffs) { +bool AreOnesFollowedByMinusOne(const std::vector& coeffs) { CHECK(!coeffs.empty()); for (int i = 0; i < coeffs.size() - 1; ++i) { if (coeffs[i] != 1) { @@ -265,7 +265,7 @@ bool AreOnesFollowedByMinusOne(const std::vector &coeffs) { } template -bool IsStrictPrefix(const std::vector &v1, const std::vector &v2) { +bool IsStrictPrefix(const std::vector& v1, const std::vector& v2) { if (v1.size() >= v2.size()) { return false; } @@ -296,14 +296,14 @@ bool IsStrictPrefix(const std::vector &v1, const std::vector &v2) { // Rule 4: // Input : array_int_element(x, [c1, .., cn], y) with x0 ci = c0 + i // Output: int_lin_eq([-1, 1], [y, x], 1 - c) (e.g. y = x + c - 1) -void Presolver::PresolveSimplifyElement(Constraint *ct) { +void Presolver::PresolveSimplifyElement(Constraint* ct) { if (ct->arguments[0].variables.size() != 1) return; - IntegerVariable *const index_var = ct->arguments[0].Var(); + IntegerVariable* const index_var = ct->arguments[0].Var(); // Rule 1. if (gtl::ContainsKey(affine_map_, index_var)) { - const AffineMapping &mapping = affine_map_[index_var]; - const Domain &domain = mapping.variable->domain; + const AffineMapping& mapping = affine_map_[index_var]; + const Domain& domain = mapping.variable->domain; if (domain.is_interval && domain.values.empty()) { // Invalid case. Ignore it. return; @@ -325,7 +325,7 @@ void Presolver::PresolveSimplifyElement(Constraint *ct) { return; } else if (mapping.offset + mapping.coefficient > 0 && domain.values[0] > 0) { - const std::vector &values = ct->arguments[1].values; + const std::vector& values = ct->arguments[1].values; std::vector new_values; for (int64 i = 1; i <= domain.values.back(); ++i) { const int64 index = i * mapping.coefficient + mapping.offset - 1; @@ -359,7 +359,7 @@ void Presolver::PresolveSimplifyElement(Constraint *ct) { // Rule 2. if (gtl::ContainsKey(array2d_index_map_, index_var)) { UpdateRuleStats("array_int_element: rewrite as a 2d element"); - const Array2DIndexMapping &mapping = array2d_index_map_[index_var]; + const Array2DIndexMapping& mapping = array2d_index_map_[index_var]; // Rewrite constraint. ct->arguments[0] = Argument::IntVarRefArray({mapping.variable1, mapping.variable2}); @@ -385,8 +385,8 @@ void Presolver::PresolveSimplifyElement(Constraint *ct) { // Rule 4. if (IsIncreasingAndContiguous(ct->arguments[1].values)) { const int64 start = ct->arguments[1].values.front(); - IntegerVariable *const index = ct->arguments[0].Var(); - IntegerVariable *const target = ct->arguments[2].Var(); + IntegerVariable* const index = ct->arguments[0].Var(); + IntegerVariable* const target = ct->arguments[2].Var(); UpdateRuleStats("array_int_element: rewrite as a linear constraint"); if (start == 1) { @@ -406,20 +406,20 @@ void Presolver::PresolveSimplifyElement(Constraint *ct) { // // Input : array_var_int_element(x0, [x1, .., xn], y) with x0 = a * x + b // Output: array_var_int_element(x, [x_a1, .., x_an], b) with a * i = b = ai -void Presolver::PresolveSimplifyExprElement(Constraint *ct) { +void Presolver::PresolveSimplifyExprElement(Constraint* ct) { if (ct->arguments[0].variables.size() != 1) return; - IntegerVariable *const index_var = ct->arguments[0].Var(); + IntegerVariable* const index_var = ct->arguments[0].Var(); if (gtl::ContainsKey(affine_map_, index_var)) { - const AffineMapping &mapping = affine_map_[index_var]; - const Domain &domain = mapping.variable->domain; + const AffineMapping& mapping = affine_map_[index_var]; + const Domain& domain = mapping.variable->domain; if ((domain.is_interval && domain.values.empty()) || domain.values[0] != 1 || mapping.offset + mapping.coefficient <= 0) { // Invalid case. Ignore it. return; } - const std::vector &vars = ct->arguments[1].variables; - std::vector new_vars; + const std::vector& vars = ct->arguments[1].variables; + std::vector new_vars; for (int64 i = domain.values.front(); i <= domain.values.back(); ++i) { const int64 index = i * mapping.coefficient + mapping.offset - 1; if (index < 0) { @@ -447,12 +447,12 @@ void Presolver::PresolveSimplifyExprElement(Constraint *ct) { } } -void Presolver::Run(Model *model) { +void Presolver::Run(Model* model) { // Should rewrite float constraints. if (absl::GetFlag(FLAGS_fz_floats_are_ints)) { // Treat float variables as int variables, convert constraints to int. - for (Constraint *const ct : model->constraints()) { - const std::string &id = ct->type; + for (Constraint* const ct : model->constraints()) { + const std::string& id = ct->type; if (id == "int2float") { ct->type = "int_eq"; } else if (id == "float_lin_le") { @@ -465,10 +465,10 @@ void Presolver::Run(Model *model) { // Regroup increasing sequence of int_lin_eq([1,..,1,-1], [x1, ..., xn, yn]) // into sequence of int_plus(x1, x2, y2), int_plus(y2, x3, y3)... - std::vector current_variables; - IntegerVariable *target_variable = nullptr; - Constraint *first_constraint = nullptr; - for (Constraint *const ct : model->constraints()) { + std::vector current_variables; + IntegerVariable* target_variable = nullptr; + Constraint* first_constraint = nullptr; + for (Constraint* const ct : model->constraints()) { if (target_variable == nullptr) { if (ct->type == "int_lin_eq" && ct->arguments[0].values.size() == 3 && AreOnesFollowedByMinusOne(ct->arguments[0].values) && @@ -508,7 +508,7 @@ void Presolver::Run(Model *model) { } // First pass. - for (Constraint *const ct : model->constraints()) { + for (Constraint* const ct : model->constraints()) { if (ct->active && ct->type == "bool2int") { PresolveBool2Int(ct); } else if (ct->active && ct->type == "int_lin_eq" && @@ -529,7 +529,7 @@ void Presolver::Run(Model *model) { } // Second pass. - for (Constraint *const ct : model->constraints()) { + for (Constraint* const ct : model->constraints()) { if (ct->type == "array_int_element" || ct->type == "array_bool_element") { PresolveSimplifyElement(ct); } @@ -541,7 +541,7 @@ void Presolver::Run(Model *model) { // Report presolve rules statistics. if (!successful_rules_.empty()) { - for (const auto &rule : successful_rules_) { + for (const auto& rule : successful_rules_) { if (rule.second == 1) { FZLOG << " - rule '" << rule.first << "' was applied 1 time" << FZENDL; } else { @@ -554,8 +554,8 @@ void Presolver::Run(Model *model) { // ----- Substitution support ----- -void Presolver::AddVariableSubstitution(IntegerVariable *from, - IntegerVariable *to) { +void Presolver::AddVariableSubstitution(IntegerVariable* from, + IntegerVariable* to) { CHECK(from != nullptr); CHECK(to != nullptr); // Apply the substitutions, if any. @@ -563,7 +563,7 @@ void Presolver::AddVariableSubstitution(IntegerVariable *from, to = FindRepresentativeOfVar(to); if (to->temporary) { // Let's switch to keep a non temporary as representative. - IntegerVariable *tmp = to; + IntegerVariable* tmp = to; to = from; from = tmp; } @@ -577,37 +577,37 @@ void Presolver::AddVariableSubstitution(IntegerVariable *from, } } -IntegerVariable *Presolver::FindRepresentativeOfVar(IntegerVariable *var) { +IntegerVariable* Presolver::FindRepresentativeOfVar(IntegerVariable* var) { if (var == nullptr) return nullptr; - IntegerVariable *start_var = var; + IntegerVariable* start_var = var; // First loop: find the top parent. for (;;) { - IntegerVariable *parent = + IntegerVariable* parent = gtl::FindWithDefault(var_representative_map_, var, var); if (parent == var) break; var = parent; } // Second loop: attach all the path to the top parent. while (start_var != var) { - IntegerVariable *const parent = var_representative_map_[start_var]; + IntegerVariable* const parent = var_representative_map_[start_var]; var_representative_map_[start_var] = var; start_var = parent; } return gtl::FindWithDefault(var_representative_map_, var, var); } -void Presolver::SubstituteEverywhere(Model *model) { +void Presolver::SubstituteEverywhere(Model* model) { // Rewrite the constraints. - for (Constraint *const ct : model->constraints()) { + for (Constraint* const ct : model->constraints()) { if (ct != nullptr && ct->active) { for (int i = 0; i < ct->arguments.size(); ++i) { - Argument &argument = ct->arguments[i]; + Argument& argument = ct->arguments[i]; switch (argument.type) { case Argument::INT_VAR_REF: case Argument::INT_VAR_REF_ARRAY: { for (int i = 0; i < argument.variables.size(); ++i) { - IntegerVariable *const old_var = argument.variables[i]; - IntegerVariable *const new_var = FindRepresentativeOfVar(old_var); + IntegerVariable* const old_var = argument.variables[i]; + IntegerVariable* const new_var = FindRepresentativeOfVar(old_var); if (new_var != old_var) { argument.variables[i] = new_var; } @@ -621,11 +621,11 @@ void Presolver::SubstituteEverywhere(Model *model) { } } // Rewrite the search. - for (Annotation *const ann : model->mutable_search_annotations()) { + for (Annotation* const ann : model->mutable_search_annotations()) { SubstituteAnnotation(ann); } // Rewrite the output. - for (SolutionOutputSpecs *const output : model->mutable_output()) { + for (SolutionOutputSpecs* const output : model->mutable_output()) { output->variable = FindRepresentativeOfVar(output->variable); for (int i = 0; i < output->flat_variables.size(); ++i) { output->flat_variables[i] = @@ -634,21 +634,21 @@ void Presolver::SubstituteEverywhere(Model *model) { } // Do not forget to merge domain that could have evolved asynchronously // during presolve. - for (const auto &iter : var_representative_map_) { + for (const auto& iter : var_representative_map_) { iter.second->domain.IntersectWithDomain(iter.first->domain); } // Change the objective variable. - IntegerVariable *const current_objective = model->objective(); + IntegerVariable* const current_objective = model->objective(); if (current_objective == nullptr) return; - IntegerVariable *const new_objective = + IntegerVariable* const new_objective = FindRepresentativeOfVar(current_objective); if (new_objective != current_objective) { model->SetObjective(new_objective); } } -void Presolver::SubstituteAnnotation(Annotation *ann) { +void Presolver::SubstituteAnnotation(Annotation* ann) { // TODO(user): Remove recursion. switch (ann->type) { case Annotation::ANNOTATION_LIST: diff --git a/ortools/glop/basis_representation.cc b/ortools/glop/basis_representation.cc index 46f6cfff27..49bdd36de5 100644 --- a/ortools/glop/basis_representation.cc +++ b/ortools/glop/basis_representation.cc @@ -26,7 +26,7 @@ namespace glop { const Fractional EtaMatrix::kSparseThreshold = 0.5; -EtaMatrix::EtaMatrix(ColIndex eta_col, const ScatteredColumn &direction) +EtaMatrix::EtaMatrix(ColIndex eta_col, const ScatteredColumn& direction) : eta_col_(eta_col), eta_col_coefficient_(direction[ColToRowIndex(eta_col)]), eta_coeff_(), @@ -48,7 +48,7 @@ EtaMatrix::EtaMatrix(ColIndex eta_col, const ScatteredColumn &direction) EtaMatrix::~EtaMatrix() {} -void EtaMatrix::LeftSolve(DenseRow *y) const { +void EtaMatrix::LeftSolve(DenseRow* y) const { RETURN_IF_NULL(y); DCHECK_EQ(RowToColIndex(eta_coeff_.size()), y->size()); if (!sparse_eta_coeff_.IsEmpty()) { @@ -58,7 +58,7 @@ void EtaMatrix::LeftSolve(DenseRow *y) const { } } -void EtaMatrix::RightSolve(DenseColumn *d) const { +void EtaMatrix::RightSolve(DenseColumn* d) const { RETURN_IF_NULL(d); DCHECK_EQ(eta_coeff_.size(), d->size()); @@ -72,7 +72,7 @@ void EtaMatrix::RightSolve(DenseColumn *d) const { } } -void EtaMatrix::SparseLeftSolve(DenseRow *y, ColIndexVector *pos) const { +void EtaMatrix::SparseLeftSolve(DenseRow* y, ColIndexVector* pos) const { RETURN_IF_NULL(y); DCHECK_EQ(RowToColIndex(eta_coeff_.size()), y->size()); @@ -95,7 +95,7 @@ void EtaMatrix::SparseLeftSolve(DenseRow *y, ColIndexVector *pos) const { if (!is_eta_col_in_pos) pos->push_back(eta_col_); } -void EtaMatrix::LeftSolveWithDenseEta(DenseRow *y) const { +void EtaMatrix::LeftSolveWithDenseEta(DenseRow* y) const { Fractional y_value = (*y)[eta_col_]; const RowIndex num_rows(eta_coeff_.size()); for (RowIndex row(0); row < num_rows; ++row) { @@ -104,7 +104,7 @@ void EtaMatrix::LeftSolveWithDenseEta(DenseRow *y) const { (*y)[eta_col_] = y_value / eta_col_coefficient_; } -void EtaMatrix::LeftSolveWithSparseEta(DenseRow *y) const { +void EtaMatrix::LeftSolveWithSparseEta(DenseRow* y) const { Fractional y_value = (*y)[eta_col_]; for (const SparseColumn::Entry e : sparse_eta_coeff_) { y_value -= (*y)[RowToColIndex(e.row())] * e.coefficient(); @@ -112,7 +112,7 @@ void EtaMatrix::LeftSolveWithSparseEta(DenseRow *y) const { (*y)[eta_col_] = y_value / eta_col_coefficient_; } -void EtaMatrix::RightSolveWithDenseEta(DenseColumn *d) const { +void EtaMatrix::RightSolveWithDenseEta(DenseColumn* d) const { const RowIndex eta_row = ColToRowIndex(eta_col_); const Fractional coeff = (*d)[eta_row] / eta_col_coefficient_; const RowIndex num_rows(eta_coeff_.size()); @@ -122,7 +122,7 @@ void EtaMatrix::RightSolveWithDenseEta(DenseColumn *d) const { (*d)[eta_row] = coeff; } -void EtaMatrix::RightSolveWithSparseEta(DenseColumn *d) const { +void EtaMatrix::RightSolveWithSparseEta(DenseColumn* d) const { const RowIndex eta_row = ColToRowIndex(eta_col_); const Fractional coeff = (*d)[eta_row] / eta_col_coefficient_; for (const SparseColumn::Entry e : sparse_eta_coeff_) { @@ -142,28 +142,28 @@ void EtaFactorization::Clear() { gtl::STLDeleteElements(&eta_matrix_); } void EtaFactorization::Update(ColIndex entering_col, RowIndex leaving_variable_row, - const ScatteredColumn &direction) { + const ScatteredColumn& direction) { const ColIndex leaving_variable_col = RowToColIndex(leaving_variable_row); - EtaMatrix *const eta_factorization = + EtaMatrix* const eta_factorization = new EtaMatrix(leaving_variable_col, direction); eta_matrix_.push_back(eta_factorization); } -void EtaFactorization::LeftSolve(DenseRow *y) const { +void EtaFactorization::LeftSolve(DenseRow* y) const { RETURN_IF_NULL(y); for (int i = eta_matrix_.size() - 1; i >= 0; --i) { eta_matrix_[i]->LeftSolve(y); } } -void EtaFactorization::SparseLeftSolve(DenseRow *y, ColIndexVector *pos) const { +void EtaFactorization::SparseLeftSolve(DenseRow* y, ColIndexVector* pos) const { RETURN_IF_NULL(y); for (int i = eta_matrix_.size() - 1; i >= 0; --i) { eta_matrix_[i]->SparseLeftSolve(y, pos); } } -void EtaFactorization::RightSolve(DenseColumn *d) const { +void EtaFactorization::RightSolve(DenseColumn* d) const { RETURN_IF_NULL(d); const size_t num_eta_matrices = eta_matrix_.size(); for (int i = 0; i < num_eta_matrices; ++i) { @@ -175,7 +175,7 @@ void EtaFactorization::RightSolve(DenseColumn *d) const { // BasisFactorization // -------------------------------------------------------- BasisFactorization::BasisFactorization( - const CompactSparseMatrix *compact_matrix, const RowToColMapping *basis) + const CompactSparseMatrix* compact_matrix, const RowToColMapping* basis) : stats_(), compact_matrix_(*compact_matrix), basis_(*basis), @@ -260,7 +260,7 @@ Status BasisFactorization::MiddleProductFormUpdate( scratchpad_non_zeros_.push_back(row); } // Subtract the column of U from scratchpad_. - const SparseColumn &column_of_u = + const SparseColumn& column_of_u = lu_factorization_.GetColumnOfU(RowToColIndex(leaving_variable_row)); for (const SparseColumn::Entry e : column_of_u) { scratchpad_[e.row()] -= e.coefficient(); @@ -283,7 +283,7 @@ Status BasisFactorization::MiddleProductFormUpdate( Status BasisFactorization::Update(ColIndex entering_col, RowIndex leaving_variable_row, - const ScatteredColumn &direction) { + const ScatteredColumn& direction) { if (num_updates_ < max_num_updates_) { SCOPED_TIME_STAT(&stats_); @@ -303,7 +303,7 @@ Status BasisFactorization::Update(ColIndex entering_col, return ForceRefactorization(); } -void BasisFactorization::LeftSolve(ScatteredRow *y) const { +void BasisFactorization::LeftSolve(ScatteredRow* y) const { SCOPED_TIME_STAT(&stats_); RETURN_IF_NULL(y); BumpDeterministicTimeForSolve(compact_matrix_.num_rows().value()); @@ -319,7 +319,7 @@ void BasisFactorization::LeftSolve(ScatteredRow *y) const { } } -void BasisFactorization::RightSolve(ScatteredColumn *d) const { +void BasisFactorization::RightSolve(ScatteredColumn* d) const { SCOPED_TIME_STAT(&stats_); RETURN_IF_NULL(d); BumpDeterministicTimeForSolve(d->non_zeros.size()); @@ -335,8 +335,8 @@ void BasisFactorization::RightSolve(ScatteredColumn *d) const { } } -const DenseColumn &BasisFactorization::RightSolveForTau( - const ScatteredColumn &a) const { +const DenseColumn& BasisFactorization::RightSolveForTau( + const ScatteredColumn& a) const { SCOPED_TIME_STAT(&stats_); BumpDeterministicTimeForSolve(compact_matrix_.num_rows().value()); if (use_middle_product_form_update_) { @@ -362,7 +362,7 @@ const DenseColumn &BasisFactorization::RightSolveForTau( } void BasisFactorization::LeftSolveForUnitRow(ColIndex j, - ScatteredRow *y) const { + ScatteredRow* y) const { SCOPED_TIME_STAT(&stats_); RETURN_IF_NULL(y); BumpDeterministicTimeForSolve(1); @@ -388,12 +388,11 @@ void BasisFactorization::LeftSolveForUnitRow(ColIndex j, } else { left_pool_mapping_[j] = storage_.AddDenseColumnWithNonZeros( Transpose(y->values), - *reinterpret_cast(&y->non_zeros)); + *reinterpret_cast(&y->non_zeros)); } } else { - DenseColumn *const x = reinterpret_cast(y); - RowIndexVector *const nz = - reinterpret_cast(&y->non_zeros); + DenseColumn* const x = reinterpret_cast(y); + RowIndexVector* const nz = reinterpret_cast(&y->non_zeros); storage_.ColumnCopyToClearedDenseColumnWithNonZeros(left_pool_mapping_[j], x, nz); } @@ -414,7 +413,7 @@ void BasisFactorization::LeftSolveForUnitRow(ColIndex j, } void BasisFactorization::TemporaryLeftSolveForUnitRow(ColIndex j, - ScatteredRow *y) const { + ScatteredRow* y) const { CHECK(IsRefactorized()); SCOPED_TIME_STAT(&stats_); RETURN_IF_NULL(y); @@ -427,7 +426,7 @@ void BasisFactorization::TemporaryLeftSolveForUnitRow(ColIndex j, } void BasisFactorization::RightSolveForProblemColumn(ColIndex col, - ScatteredColumn *d) const { + ScatteredColumn* d) const { SCOPED_TIME_STAT(&stats_); RETURN_IF_NULL(d); BumpDeterministicTimeForSolve( @@ -465,7 +464,7 @@ void BasisFactorization::RightSolveForProblemColumn(ColIndex col, } Fractional BasisFactorization::RightSolveSquaredNorm( - const ColumnView &a) const { + const ColumnView& a) const { SCOPED_TIME_STAT(&stats_); DCHECK(IsRefactorized()); BumpDeterministicTimeForSolve(a.num_entries().value()); diff --git a/ortools/glop/dual_edge_norms.cc b/ortools/glop/dual_edge_norms.cc index 0d55513231..f3f5ea7909 100644 --- a/ortools/glop/dual_edge_norms.cc +++ b/ortools/glop/dual_edge_norms.cc @@ -18,7 +18,7 @@ namespace operations_research { namespace glop { -DualEdgeNorms::DualEdgeNorms(const BasisFactorization &basis_factorization) +DualEdgeNorms::DualEdgeNorms(const BasisFactorization& basis_factorization) : basis_factorization_(basis_factorization), recompute_edge_squared_norms_(true) {} @@ -32,24 +32,24 @@ void DualEdgeNorms::ResizeOnNewRows(RowIndex new_size) { edge_squared_norms_.resize(new_size, 1.0); } -const DenseColumn &DualEdgeNorms::GetEdgeSquaredNorms() { +const DenseColumn& DualEdgeNorms::GetEdgeSquaredNorms() { if (recompute_edge_squared_norms_) ComputeEdgeSquaredNorms(); return edge_squared_norms_; } void DualEdgeNorms::UpdateDataOnBasisPermutation( - const ColumnPermutation &col_perm) { + const ColumnPermutation& col_perm) { if (recompute_edge_squared_norms_) return; ApplyColumnPermutationToRowIndexedVector(col_perm, &edge_squared_norms_); } void DualEdgeNorms::UpdateBeforeBasisPivot( ColIndex entering_col, RowIndex leaving_row, - const ScatteredColumn &direction, - const ScatteredRow &unit_row_left_inverse) { + const ScatteredColumn& direction, + const ScatteredRow& unit_row_left_inverse) { // No need to update if we will recompute it from scratch later. if (recompute_edge_squared_norms_) return; - const DenseColumn &tau = ComputeTau(TransposedView(unit_row_left_inverse)); + const DenseColumn& tau = ComputeTau(TransposedView(unit_row_left_inverse)); SCOPED_TIME_STAT(&stats_); // ||unit_row_left_inverse||^2 is the same as @@ -115,10 +115,10 @@ void DualEdgeNorms::ComputeEdgeSquaredNorms() { recompute_edge_squared_norms_ = false; } -const DenseColumn &DualEdgeNorms::ComputeTau( - const ScatteredColumn &unit_row_left_inverse) { +const DenseColumn& DualEdgeNorms::ComputeTau( + const ScatteredColumn& unit_row_left_inverse) { SCOPED_TIME_STAT(&stats_); - const DenseColumn &result = + const DenseColumn& result = basis_factorization_.RightSolveForTau(unit_row_left_inverse); IF_STATS_ENABLED(stats_.tau_density.Add(Density(Transpose(result)))); return result; diff --git a/ortools/glop/entering_variable.cc b/ortools/glop/entering_variable.cc index ca5801a36d..ae5be69da9 100644 --- a/ortools/glop/entering_variable.cc +++ b/ortools/glop/entering_variable.cc @@ -22,10 +22,10 @@ namespace operations_research { namespace glop { -EnteringVariable::EnteringVariable(const VariablesInfo &variables_info, - random_engine_t *random, - ReducedCosts *reduced_costs, - PrimalEdgeNorms *primal_edge_norms) +EnteringVariable::EnteringVariable(const VariablesInfo& variables_info, + random_engine_t* random, + ReducedCosts* reduced_costs, + PrimalEdgeNorms* primal_edge_norms) : variables_info_(variables_info), random_(random), reduced_costs_(reduced_costs), @@ -34,7 +34,7 @@ EnteringVariable::EnteringVariable(const VariablesInfo &variables_info, rule_(GlopParameters::DANTZIG), unused_columns_() {} -Status EnteringVariable::PrimalChooseEnteringColumn(ColIndex *entering_col) { +Status EnteringVariable::PrimalChooseEnteringColumn(ColIndex* entering_col) { SCOPED_TIME_STAT(&stats_); GLOP_RETURN_ERROR_IF_NULL(entering_col); @@ -87,21 +87,21 @@ Status EnteringVariable::PrimalChooseEnteringColumn(ColIndex *entering_col) { } Status EnteringVariable::DualChooseEnteringColumn( - const UpdateRow &update_row, Fractional cost_variation, - std::vector *bound_flip_candidates, ColIndex *entering_col, - Fractional *step) { + const UpdateRow& update_row, Fractional cost_variation, + std::vector* bound_flip_candidates, ColIndex* entering_col, + Fractional* step) { GLOP_RETURN_ERROR_IF_NULL(entering_col); GLOP_RETURN_ERROR_IF_NULL(step); - const DenseRow &update_coefficient = update_row.GetCoefficients(); - const DenseRow &reduced_costs = reduced_costs_->GetReducedCosts(); + const DenseRow& update_coefficient = update_row.GetCoefficients(); + const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts(); SCOPED_TIME_STAT(&stats_); breakpoints_.clear(); breakpoints_.reserve(update_row.GetNonZeroPositions().size()); const Fractional threshold = parameters_.ratio_test_zero_threshold(); - const DenseBitRow &can_decrease = variables_info_.GetCanDecreaseBitRow(); - const DenseBitRow &can_increase = variables_info_.GetCanIncreaseBitRow(); - const DenseBitRow &is_boxed = variables_info_.GetNonBasicBoxedVariables(); + const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow(); + const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow(); + const DenseBitRow& is_boxed = variables_info_.GetNonBasicBoxedVariables(); // Harris ratio test. See below for more explanation. Here this is used to // prune the first pass by not enqueueing ColWithRatio for columns that have @@ -266,12 +266,12 @@ Status EnteringVariable::DualChooseEnteringColumn( } Status EnteringVariable::DualPhaseIChooseEnteringColumn( - const UpdateRow &update_row, Fractional cost_variation, - ColIndex *entering_col, Fractional *step) { + const UpdateRow& update_row, Fractional cost_variation, + ColIndex* entering_col, Fractional* step) { GLOP_RETURN_ERROR_IF_NULL(entering_col); GLOP_RETURN_ERROR_IF_NULL(step); - const DenseRow &update_coefficient = update_row.GetCoefficients(); - const DenseRow &reduced_costs = reduced_costs_->GetReducedCosts(); + const DenseRow& update_coefficient = update_row.GetCoefficients(); + const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts(); SCOPED_TIME_STAT(&stats_); // List of breakpoints where a variable change from feasibility to @@ -283,9 +283,9 @@ Status EnteringVariable::DualPhaseIChooseEnteringColumn( const Fractional threshold = parameters_.ratio_test_zero_threshold(); const Fractional dual_feasibility_tolerance = reduced_costs_->GetDualFeasibilityTolerance(); - const DenseBitRow &can_decrease = variables_info_.GetCanDecreaseBitRow(); - const DenseBitRow &can_increase = variables_info_.GetCanIncreaseBitRow(); - const VariableTypeRow &variable_type = variables_info_.GetTypeRow(); + const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow(); + const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow(); + const VariableTypeRow& variable_type = variables_info_.GetTypeRow(); for (const ColIndex col : update_row.GetNonZeroPositions()) { // Boxed variables shouldn't be in the update position list because they // will be dealt with afterwards by MakeBoxedVariableDualFeasible(). @@ -369,7 +369,7 @@ Status EnteringVariable::DualPhaseIChooseEnteringColumn( return Status::OK(); } -void EnteringVariable::SetParameters(const GlopParameters ¶meters) { +void EnteringVariable::SetParameters(const GlopParameters& parameters) { parameters_ = parameters; } @@ -377,7 +377,7 @@ void EnteringVariable::SetPricingRule(GlopParameters::PricingRule rule) { rule_ = rule; } -DenseBitRow *EnteringVariable::ResetUnusedColumns() { +DenseBitRow* EnteringVariable::ResetUnusedColumns() { SCOPED_TIME_STAT(&stats_); const ColIndex num_cols = variables_info_.GetNumberOfColumns(); if (unused_columns_.size() != num_cols) { @@ -385,7 +385,7 @@ DenseBitRow *EnteringVariable::ResetUnusedColumns() { } // Invert the set of unused columns, minus the basis. - const DenseBitRow &is_basic = variables_info_.GetIsBasicBitRow(); + const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow(); for (ColIndex col(0); col < num_cols; ++col) { if (unused_columns_.IsSet(col)) { unused_columns_.Clear(col); @@ -399,11 +399,11 @@ DenseBitRow *EnteringVariable::ResetUnusedColumns() { } template -void EnteringVariable::DantzigChooseEnteringColumn(ColIndex *entering_col) { +void EnteringVariable::DantzigChooseEnteringColumn(ColIndex* entering_col) { DenseRow dummy; - const DenseRow &matrix_column_norms = + const DenseRow& matrix_column_norms = normalize ? primal_edge_norms_->GetMatrixColumnNorms() : dummy; - const DenseRow &reduced_costs = reduced_costs_->GetReducedCosts(); + const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts(); SCOPED_TIME_STAT(&stats_); Fractional best_price(0.0); @@ -431,11 +431,11 @@ void EnteringVariable::DantzigChooseEnteringColumn(ColIndex *entering_col) { // - To return the top-n choices if we want to consider multiple candidates in // the other parts of the simplex algorithm. template -void EnteringVariable::NormalizedChooseEnteringColumn(ColIndex *entering_col) { - const DenseRow &weights = use_steepest_edge +void EnteringVariable::NormalizedChooseEnteringColumn(ColIndex* entering_col) { + const DenseRow& weights = use_steepest_edge ? primal_edge_norms_->GetEdgeSquaredNorms() : primal_edge_norms_->GetDevexWeights(); - const DenseRow &reduced_costs = reduced_costs_->GetReducedCosts(); + const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts(); SCOPED_TIME_STAT(&stats_); Fractional best_price(0.0); diff --git a/ortools/glop/initial_basis.cc b/ortools/glop/initial_basis.cc index 981973e68b..a1b80f4536 100644 --- a/ortools/glop/initial_basis.cc +++ b/ortools/glop/initial_basis.cc @@ -21,11 +21,11 @@ namespace operations_research { namespace glop { -InitialBasis::InitialBasis(const CompactSparseMatrix &compact_matrix, - const DenseRow &objective, - const DenseRow &lower_bound, - const DenseRow &upper_bound, - const VariableTypeRow &variable_type) +InitialBasis::InitialBasis(const CompactSparseMatrix& compact_matrix, + const DenseRow& objective, + const DenseRow& lower_bound, + const DenseRow& upper_bound, + const VariableTypeRow& variable_type) : max_scaled_abs_cost_(0.0), bixby_column_comparator_(*this), triangular_column_comparator_(*this), @@ -36,7 +36,7 @@ InitialBasis::InitialBasis(const CompactSparseMatrix &compact_matrix, variable_type_(variable_type) {} void InitialBasis::CompleteBixbyBasis(ColIndex num_cols, - RowToColMapping *basis) { + RowToColMapping* basis) { // Initialize can_be_replaced ('I' in Bixby's paper) and has_zero_coefficient // ('r' in Bixby's paper). const RowIndex num_rows = compact_matrix_.num_rows(); @@ -64,7 +64,7 @@ void InitialBasis::CompleteBixbyBasis(ColIndex num_cols, for (int i = 0; i < candidates.size(); ++i) { bool enter_basis = false; const ColIndex candidate_col_index = candidates[i]; - const auto &candidate_col = compact_matrix_.column(candidate_col_index); + const auto& candidate_col = compact_matrix_.column(candidate_col_index); // Bixby's heuristic only works with scaled columns. This should be the // case by default since we only use this when the matrix is scaled, but @@ -98,28 +98,28 @@ void InitialBasis::CompleteBixbyBasis(ColIndex num_cols, } void InitialBasis::GetPrimalMarosBasis(ColIndex num_cols, - RowToColMapping *basis) { + RowToColMapping* basis) { return GetMarosBasis(num_cols, basis); } void InitialBasis::GetDualMarosBasis(ColIndex num_cols, - RowToColMapping *basis) { + RowToColMapping* basis) { return GetMarosBasis(num_cols, basis); } void InitialBasis::CompleteTriangularPrimalBasis(ColIndex num_cols, - RowToColMapping *basis) { + RowToColMapping* basis) { return CompleteTriangularBasis(num_cols, basis); } void InitialBasis::CompleteTriangularDualBasis(ColIndex num_cols, - RowToColMapping *basis) { + RowToColMapping* basis) { return CompleteTriangularBasis(num_cols, basis); } template void InitialBasis::CompleteTriangularBasis(ColIndex num_cols, - RowToColMapping *basis) { + RowToColMapping* basis) { // Initialize can_be_replaced. const RowIndex num_rows = compact_matrix_.num_rows(); DenseBooleanColumn can_be_replaced(num_rows, false); @@ -226,7 +226,7 @@ int InitialBasis::GetMarosPriority(RowIndex row) const { } template -void InitialBasis::GetMarosBasis(ColIndex num_cols, RowToColMapping *basis) { +void InitialBasis::GetMarosBasis(ColIndex num_cols, RowToColMapping* basis) { VLOG(1) << "Starting Maros crash procedure."; // Initialize basis to the all-slack basis. @@ -301,7 +301,7 @@ void InitialBasis::GetMarosBasis(ColIndex num_cols, RowToColMapping *basis) { // Make sure that the pivotal entry is not too small in magnitude. Fractional max_magnitude = 0; pivot_absolute_value = 0.0; - const auto &column_values = compact_matrix_.column(col); + const auto& column_values = compact_matrix_.column(col); for (const SparseColumn::Entry e : column_values) { const Fractional absolute_value = std::fabs(e.coefficient()); if (e.row() == max_rpf_row) pivot_absolute_value = absolute_value; @@ -351,7 +351,7 @@ void InitialBasis::GetMarosBasis(ColIndex num_cols, RowToColMapping *basis) { } void InitialBasis::ComputeCandidates(ColIndex num_cols, - std::vector *candidates) { + std::vector* candidates) { candidates->clear(); max_scaled_abs_cost_ = 0.0; for (ColIndex col(0); col < num_cols; ++col) { diff --git a/ortools/glop/lp_solver.cc b/ortools/glop/lp_solver.cc index 95f071bd49..c5b6bb483c 100644 --- a/ortools/glop/lp_solver.cc +++ b/ortools/glop/lp_solver.cc @@ -60,16 +60,14 @@ namespace operations_research { namespace glop { namespace { -// Writes a LinearProgram to a file if -// absl::GetFlag(FLAGS_lp_dump_to_proto_file) is true. -// The integer num is appended to the base name of the file. -// When this function is called from LPSolver::Solve(), num is usually the -// number of times Solve() was called. -// For a LinearProgram whose name is "LinPro", and num = 48, the default output -// file will be /tmp/LinPro-000048.pb.gz. +// Writes a LinearProgram to a file if FLAGS_lp_dump_to_proto_file is true. The +// integer num is appended to the base name of the file. When this function is +// called from LPSolver::Solve(), num is usually the number of times Solve() was +// called. For a LinearProgram whose name is "LinPro", and num = 48, the default +// output file will be /tmp/LinPro-000048.pb.gz. // // Warning: is a no-op on portable platforms (android, ios, etc). -void DumpLinearProgramIfRequiredByFlags(const LinearProgram &linear_program, +void DumpLinearProgramIfRequiredByFlags(const LinearProgram& linear_program, int num) { if (!absl::GetFlag(FLAGS_lp_dump_to_proto_file)) return; #ifdef __PORTABLE_PLATFORM__ @@ -112,22 +110,22 @@ void DumpLinearProgramIfRequiredByFlags(const LinearProgram &linear_program, LPSolver::LPSolver() : num_solves_(0) {} -void LPSolver::SetParameters(const GlopParameters ¶meters) { +void LPSolver::SetParameters(const GlopParameters& parameters) { parameters_ = parameters; } -const GlopParameters &LPSolver::GetParameters() const { return parameters_; } +const GlopParameters& LPSolver::GetParameters() const { return parameters_; } -GlopParameters *LPSolver::GetMutableParameters() { return ¶meters_; } +GlopParameters* LPSolver::GetMutableParameters() { return ¶meters_; } -ProblemStatus LPSolver::Solve(const LinearProgram &lp) { +ProblemStatus LPSolver::Solve(const LinearProgram& lp) { std::unique_ptr time_limit = TimeLimit::FromParameters(parameters_); return SolveWithTimeLimit(lp, time_limit.get()); } -ProblemStatus LPSolver::SolveWithTimeLimit(const LinearProgram &lp, - TimeLimit *time_limit) { +ProblemStatus LPSolver::SolveWithTimeLimit(const LinearProgram& lp, + TimeLimit* time_limit) { if (time_limit == nullptr) { LOG(DFATAL) << "SolveWithTimeLimit() called with a nullptr time_limit."; return ProblemStatus::ABNORMAL; @@ -218,8 +216,8 @@ void LPSolver::Clear() { } void LPSolver::SetInitialBasis( - const VariableStatusRow &variable_statuses, - const ConstraintStatusColumn &constraint_statuses) { + const VariableStatusRow& variable_statuses, + const ConstraintStatusColumn& constraint_statuses) { // Create the associated basis state. BasisState state; state.statuses = variable_statuses; @@ -258,7 +256,7 @@ void LPSolver::SetInitialBasis( namespace { // Computes the "real" problem objective from the one without offset nor // scaling. -Fractional ProblemObjectiveValue(const LinearProgram &lp, Fractional value) { +Fractional ProblemObjectiveValue(const LinearProgram& lp, Fractional value) { return lp.objective_scaling_factor() * (value + lp.objective_offset()); } @@ -271,8 +269,8 @@ Fractional AllowedError(Fractional tolerance, Fractional value) { // TODO(user): Try to also check the precision of an INFEASIBLE or UNBOUNDED // return status. -ProblemStatus LPSolver::LoadAndVerifySolution(const LinearProgram &lp, - const ProblemSolution &solution) { +ProblemStatus LPSolver::LoadAndVerifySolution(const LinearProgram& lp, + const ProblemSolution& solution) { if (!IsProblemSolutionConsistent(lp, solution)) { VLOG(1) << "Inconsistency detected in the solution."; ResizeSolution(lp.num_constraints(), lp.num_variables()); @@ -409,7 +407,7 @@ ProblemStatus LPSolver::LoadAndVerifySolution(const LinearProgram &lp, return status; } -bool LPSolver::IsOptimalSolutionOnFacet(const LinearProgram &lp) { +bool LPSolver::IsOptimalSolutionOnFacet(const LinearProgram& lp) { // Note(user): We use the following same two tolerances for the dual and // primal values. // TODO(user): investigate whether to use the tolerances defined in @@ -470,7 +468,7 @@ double LPSolver::DeterministicTime() const { : revised_simplex_->DeterministicTime(); } -void LPSolver::MovePrimalValuesWithinBounds(const LinearProgram &lp) { +void LPSolver::MovePrimalValuesWithinBounds(const LinearProgram& lp) { const ColIndex num_cols = lp.num_variables(); DCHECK_EQ(num_cols, primal_values_.size()); Fractional error = 0.0; @@ -487,7 +485,7 @@ void LPSolver::MovePrimalValuesWithinBounds(const LinearProgram &lp) { VLOG(1) << "Max. primal values move = " << error; } -void LPSolver::MoveDualValuesWithinBounds(const LinearProgram &lp) { +void LPSolver::MoveDualValuesWithinBounds(const LinearProgram& lp) { const RowIndex num_rows = lp.num_constraints(); DCHECK_EQ(num_rows, dual_values_.size()); const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0; @@ -521,8 +519,8 @@ void LPSolver::ResizeSolution(RowIndex num_rows, ColIndex num_cols) { constraint_statuses_.resize(num_rows, ConstraintStatus::FREE); } -void LPSolver::RunRevisedSimplexIfNeeded(ProblemSolution *solution, - TimeLimit *time_limit) { +void LPSolver::RunRevisedSimplexIfNeeded(ProblemSolution* solution, + TimeLimit* time_limit) { // Note that the transpose matrix is no longer needed at this point. // This helps reduce the peak memory usage of the solver. current_linear_program_.ClearTransposeMatrix(); @@ -576,7 +574,7 @@ void LogConstraintStatusError(RowIndex row, ConstraintStatus status, } // namespace bool LPSolver::IsProblemSolutionConsistent( - const LinearProgram &lp, const ProblemSolution &solution) const { + const LinearProgram& lp, const ProblemSolution& solution) const { const RowIndex num_rows = lp.num_constraints(); const ColIndex num_cols = lp.num_variables(); if (solution.variable_statuses.size() != num_cols) return false; @@ -707,7 +705,7 @@ bool LPSolver::IsProblemSolutionConsistent( // - Reduced cost is exactly zero for FREE and BASIC variables. // - Reduced cost is of the correct sign for variables at their bounds. Fractional LPSolver::ComputeMaxCostPerturbationToEnforceOptimality( - const LinearProgram &lp, bool *is_too_large) { + const LinearProgram& lp, bool* is_too_large) { Fractional max_cost_correction = 0.0; const ColIndex num_cols = lp.num_variables(); const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0; @@ -735,7 +733,7 @@ Fractional LPSolver::ComputeMaxCostPerturbationToEnforceOptimality( // This computes by how much the rhs must be perturbed to enforce the fact that // the constraint activities exactly reflect their status. Fractional LPSolver::ComputeMaxRhsPerturbationToEnforceOptimality( - const LinearProgram &lp, bool *is_too_large) { + const LinearProgram& lp, bool* is_too_large) { Fractional max_rhs_correction = 0.0; const RowIndex num_rows = lp.num_constraints(); const Fractional tolerance = parameters_.solution_feasibility_tolerance(); @@ -762,7 +760,7 @@ Fractional LPSolver::ComputeMaxRhsPerturbationToEnforceOptimality( return max_rhs_correction; } -void LPSolver::ComputeConstraintActivities(const LinearProgram &lp) { +void LPSolver::ComputeConstraintActivities(const LinearProgram& lp) { const RowIndex num_rows = lp.num_constraints(); const ColIndex num_cols = lp.num_variables(); DCHECK_EQ(num_cols, primal_values_.size()); @@ -773,7 +771,7 @@ void LPSolver::ComputeConstraintActivities(const LinearProgram &lp) { } } -void LPSolver::ComputeReducedCosts(const LinearProgram &lp) { +void LPSolver::ComputeReducedCosts(const LinearProgram& lp) { const RowIndex num_rows = lp.num_constraints(); const ColIndex num_cols = lp.num_variables(); DCHECK_EQ(num_rows, dual_values_.size()); @@ -784,7 +782,7 @@ void LPSolver::ComputeReducedCosts(const LinearProgram &lp) { } } -double LPSolver::ComputeObjective(const LinearProgram &lp) { +double LPSolver::ComputeObjective(const LinearProgram& lp) { const ColIndex num_cols = lp.num_variables(); DCHECK_EQ(num_cols, primal_values_.size()); KahanSum sum; @@ -810,7 +808,7 @@ double LPSolver::ComputeObjective(const LinearProgram &lp) { // not be in the original problem so that the current dual solution is always // feasible. It also involves changing the rounding mode to obtain exact // confidence intervals on the reduced costs. -double LPSolver::ComputeDualObjective(const LinearProgram &lp) { +double LPSolver::ComputeDualObjective(const LinearProgram& lp) { KahanSum dual_objective; // Compute the part coming from the row constraints. @@ -873,7 +871,7 @@ double LPSolver::ComputeDualObjective(const LinearProgram &lp) { return dual_objective.Value(); } -double LPSolver::ComputeMaxExpectedObjectiveError(const LinearProgram &lp) { +double LPSolver::ComputeMaxExpectedObjectiveError(const LinearProgram& lp) { const ColIndex num_cols = lp.num_variables(); DCHECK_EQ(num_cols, primal_values_.size()); const Fractional tolerance = parameters_.solution_feasibility_tolerance(); @@ -888,8 +886,8 @@ double LPSolver::ComputeMaxExpectedObjectiveError(const LinearProgram &lp) { return primal_objective_error; } -double LPSolver::ComputePrimalValueInfeasibility(const LinearProgram &lp, - bool *is_too_large) { +double LPSolver::ComputePrimalValueInfeasibility(const LinearProgram& lp, + bool* is_too_large) { double infeasibility = 0.0; const Fractional tolerance = parameters_.solution_feasibility_tolerance(); const ColIndex num_cols = lp.num_variables(); @@ -918,8 +916,8 @@ double LPSolver::ComputePrimalValueInfeasibility(const LinearProgram &lp, return infeasibility; } -double LPSolver::ComputeActivityInfeasibility(const LinearProgram &lp, - bool *is_too_large) { +double LPSolver::ComputeActivityInfeasibility(const LinearProgram& lp, + bool* is_too_large) { double infeasibility = 0.0; int num_problematic_rows(0); const RowIndex num_rows = lp.num_constraints(); @@ -969,8 +967,8 @@ double LPSolver::ComputeActivityInfeasibility(const LinearProgram &lp, return infeasibility; } -double LPSolver::ComputeDualValueInfeasibility(const LinearProgram &lp, - bool *is_too_large) { +double LPSolver::ComputeDualValueInfeasibility(const LinearProgram& lp, + bool* is_too_large) { const Fractional allowed_error = parameters_.solution_feasibility_tolerance(); const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0; double infeasibility = 0.0; @@ -993,8 +991,8 @@ double LPSolver::ComputeDualValueInfeasibility(const LinearProgram &lp, return infeasibility; } -double LPSolver::ComputeReducedCostInfeasibility(const LinearProgram &lp, - bool *is_too_large) { +double LPSolver::ComputeReducedCostInfeasibility(const LinearProgram& lp, + bool* is_too_large) { const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0; double infeasibility = 0.0; const ColIndex num_cols = lp.num_variables(); diff --git a/ortools/glop/lu_factorization.cc b/ortools/glop/lu_factorization.cc index 6b0dd42614..b4bf50de8b 100644 --- a/ortools/glop/lu_factorization.cc +++ b/ortools/glop/lu_factorization.cc @@ -42,7 +42,7 @@ void LuFactorization::Clear() { } Status LuFactorization::ComputeFactorization( - const CompactSparseMatrixView &compact_matrix) { + const CompactSparseMatrixView& compact_matrix) { SCOPED_TIME_STAT(&stats_); Clear(); if (compact_matrix.num_rows().value() != compact_matrix.num_cols().value()) { @@ -65,7 +65,7 @@ Status LuFactorization::ComputeFactorization( return Status::OK(); } -void LuFactorization::RightSolve(DenseColumn *x) const { +void LuFactorization::RightSolve(DenseColumn* x) const { SCOPED_TIME_STAT(&stats_); if (is_identity_factorization_) return; @@ -75,12 +75,12 @@ void LuFactorization::RightSolve(DenseColumn *x) const { ApplyPermutation(inverse_col_perm_, dense_column_scratchpad_, x); } -void LuFactorization::LeftSolve(DenseRow *y) const { +void LuFactorization::LeftSolve(DenseRow* y) const { SCOPED_TIME_STAT(&stats_); if (is_identity_factorization_) return; // We need to interpret y as a column for the permutation functions. - DenseColumn *const x = reinterpret_cast(y); + DenseColumn* const x = reinterpret_cast(y); ApplyInversePermutation(inverse_col_perm_, *x, &dense_column_scratchpad_); upper_.TransposeUpperSolve(&dense_column_scratchpad_); lower_.TransposeLowerSolve(&dense_column_scratchpad_); @@ -92,7 +92,7 @@ namespace { // norm of the given column, otherwise do the same with a sparse version. In // both cases column is cleared. Fractional ComputeSquaredNormAndResetToZero( - const std::vector &non_zeros, DenseColumn *column) { + const std::vector& non_zeros, DenseColumn* column) { Fractional sum = 0.0; if (non_zeros.empty()) { sum = SquaredNorm(*column); @@ -107,7 +107,7 @@ Fractional ComputeSquaredNormAndResetToZero( } } // namespace -Fractional LuFactorization::RightSolveSquaredNorm(const ColumnView &a) const { +Fractional LuFactorization::RightSolveSquaredNorm(const ColumnView& a) const { SCOPED_TIME_STAT(&stats_); if (is_identity_factorization_) return SquaredNorm(a); @@ -171,8 +171,8 @@ Fractional LuFactorization::DualEdgeSquaredNorm(RowIndex row) const { namespace { // Returns whether 'b' is equal to 'a' permuted by the given row permutation // 'perm'. -bool AreEqualWithPermutation(const DenseColumn &a, const DenseColumn &b, - const RowPermutation &perm) { +bool AreEqualWithPermutation(const DenseColumn& a, const DenseColumn& b, + const RowPermutation& perm) { const RowIndex num_rows = perm.size(); for (RowIndex row(0); row < num_rows; ++row) { if (a[row] != b[perm[row]]) return false; @@ -181,8 +181,8 @@ bool AreEqualWithPermutation(const DenseColumn &a, const DenseColumn &b, } } // namespace -void LuFactorization::RightSolveLWithPermutedInput(const DenseColumn &a, - ScatteredColumn *x) const { +void LuFactorization::RightSolveLWithPermutedInput(const DenseColumn& a, + ScatteredColumn* x) const { SCOPED_TIME_STAT(&stats_); if (!is_identity_factorization_) { DCHECK(AreEqualWithPermutation(a, x->values, row_perm_)); @@ -196,8 +196,8 @@ void LuFactorization::RightSolveLWithPermutedInput(const DenseColumn &a, } template -void LuFactorization::RightSolveLInternal(const Column &b, - ScatteredColumn *x) const { +void LuFactorization::RightSolveLInternal(const Column& b, + ScatteredColumn* x) const { // This code is equivalent to // b.PermutedCopyToDenseVector(row_perm_, num_rows, x); // but it also computes the first column index which does not correspond to an @@ -229,8 +229,8 @@ void LuFactorization::RightSolveLInternal(const Column &b, } } -void LuFactorization::RightSolveLForColumnView(const ColumnView &b, - ScatteredColumn *x) const { +void LuFactorization::RightSolveLForColumnView(const ColumnView& b, + ScatteredColumn* x) const { SCOPED_TIME_STAT(&stats_); DCHECK(IsAllZero(x->values)); x->non_zeros.clear(); @@ -245,7 +245,7 @@ void LuFactorization::RightSolveLForColumnView(const ColumnView &b, RightSolveLInternal(b, x); } -void LuFactorization::RightSolveLWithNonZeros(ScatteredColumn *x) const { +void LuFactorization::RightSolveLWithNonZeros(ScatteredColumn* x) const { if (is_identity_factorization_) return; if (x->non_zeros.empty()) { PermuteWithScratchpad(row_perm_, &dense_zero_scratchpad_, &x->values); @@ -264,8 +264,8 @@ void LuFactorization::RightSolveLWithNonZeros(ScatteredColumn *x) const { } } -void LuFactorization::RightSolveLForScatteredColumn(const ScatteredColumn &b, - ScatteredColumn *x) const { +void LuFactorization::RightSolveLForScatteredColumn(const ScatteredColumn& b, + ScatteredColumn* x) const { SCOPED_TIME_STAT(&stats_); DCHECK(IsAllZero(x->values)); x->non_zeros.clear(); @@ -283,13 +283,13 @@ void LuFactorization::RightSolveLForScatteredColumn(const ScatteredColumn &b, RightSolveLInternal(b, x); } -void LuFactorization::LeftSolveUWithNonZeros(ScatteredRow *y) const { +void LuFactorization::LeftSolveUWithNonZeros(ScatteredRow* y) const { SCOPED_TIME_STAT(&stats_); CHECK(col_perm_.empty()); if (is_identity_factorization_) return; - DenseColumn *const x = reinterpret_cast(&y->values); - RowIndexVector *const nz = reinterpret_cast(&y->non_zeros); + DenseColumn* const x = reinterpret_cast(&y->values); + RowIndexVector* const nz = reinterpret_cast(&y->non_zeros); transpose_upper_.ComputeRowsToConsiderInSortedOrder(nz); y->non_zeros_are_sorted = true; if (nz->empty()) { @@ -299,7 +299,7 @@ void LuFactorization::LeftSolveUWithNonZeros(ScatteredRow *y) const { } } -void LuFactorization::RightSolveUWithNonZeros(ScatteredColumn *x) const { +void LuFactorization::RightSolveUWithNonZeros(ScatteredColumn* x) const { SCOPED_TIME_STAT(&stats_); CHECK(col_perm_.empty()); if (is_identity_factorization_) return; @@ -318,14 +318,14 @@ void LuFactorization::RightSolveUWithNonZeros(ScatteredColumn *x) const { } bool LuFactorization::LeftSolveLWithNonZeros( - ScatteredRow *y, ScatteredColumn *result_before_permutation) const { + ScatteredRow* y, ScatteredColumn* result_before_permutation) const { SCOPED_TIME_STAT(&stats_); if (is_identity_factorization_) { // It is not advantageous to fill result_before_permutation in this case. return false; } - DenseColumn *const x = reinterpret_cast(&y->values); - std::vector *nz = reinterpret_cast(&y->non_zeros); + DenseColumn* const x = reinterpret_cast(&y->values); + std::vector* nz = reinterpret_cast(&y->non_zeros); // Hypersparse? transpose_lower_.ComputeRowsToConsiderInSortedOrder(nz); @@ -382,12 +382,12 @@ bool LuFactorization::LeftSolveLWithNonZeros( return true; } -void LuFactorization::LeftSolveLWithNonZeros(ScatteredRow *y) const { +void LuFactorization::LeftSolveLWithNonZeros(ScatteredRow* y) const { LeftSolveLWithNonZeros(y, nullptr); } ColIndex LuFactorization::LeftSolveUForUnitRow(ColIndex col, - ScatteredRow *y) const { + ScatteredRow* y) const { SCOPED_TIME_STAT(&stats_); DCHECK(IsAllZero(y->values)); DCHECK(y->non_zeros.empty()); @@ -406,9 +406,8 @@ ColIndex LuFactorization::LeftSolveUForUnitRow(ColIndex col, if (transpose_upper_.ColumnIsDiagonalOnly(permuted_col)) { (*y)[permuted_col] /= transpose_upper_.GetDiagonalCoefficient(permuted_col); } else { - RowIndexVector *const nz = - reinterpret_cast(&y->non_zeros); - DenseColumn *const x = reinterpret_cast(&y->values); + RowIndexVector* const nz = reinterpret_cast(&y->non_zeros); + DenseColumn* const x = reinterpret_cast(&y->values); transpose_upper_.ComputeRowsToConsiderInSortedOrder(nz); y->non_zeros_are_sorted = true; if (y->non_zeros.empty()) { @@ -420,7 +419,7 @@ ColIndex LuFactorization::LeftSolveUForUnitRow(ColIndex col, return permuted_col; } -const SparseColumn &LuFactorization::GetColumnOfU(ColIndex col) const { +const SparseColumn& LuFactorization::GetColumnOfU(ColIndex col) const { if (is_identity_factorization_) { column_of_upper_.Clear(); column_of_upper_.SetCoefficient(ColToRowIndex(col), 1.0); @@ -432,7 +431,7 @@ const SparseColumn &LuFactorization::GetColumnOfU(ColIndex col) const { } double LuFactorization::GetFillInPercentage( - const CompactSparseMatrixView &matrix) const { + const CompactSparseMatrixView& matrix) const { const int initial_num_entries = matrix.num_entries().value(); const int lu_num_entries = (lower_.num_entries() + upper_.num_entries()).value(); @@ -503,13 +502,13 @@ Fractional LuFactorization::ComputeInverseInfinityNorm() const { } Fractional LuFactorization::ComputeOneNormConditionNumber( - const CompactSparseMatrixView &matrix) const { + const CompactSparseMatrixView& matrix) const { if (is_identity_factorization_) return 1.0; return matrix.ComputeOneNorm() * ComputeInverseOneNorm(); } Fractional LuFactorization::ComputeInfinityNormConditionNumber( - const CompactSparseMatrixView &matrix) const { + const CompactSparseMatrixView& matrix) const { if (is_identity_factorization_) return 1.0; return matrix.ComputeInfinityNorm() * ComputeInverseInfinityNorm(); } @@ -521,7 +520,7 @@ Fractional LuFactorization::ComputeInverseInfinityNormUpperBound() const { namespace { // Returns the density of the sparse column 'b' w.r.t. the given permutation. -double ComputeDensity(const SparseColumn &b, const RowPermutation &row_perm) { +double ComputeDensity(const SparseColumn& b, const RowPermutation& row_perm) { double density = 0.0; for (const SparseColumn::Entry e : b) { if (row_perm[e.row()] != kNonPivotal && e.coefficient() != 0.0) { @@ -543,7 +542,7 @@ void LuFactorization::ComputeTransposeLower() const { transpose_lower_.PopulateFromTranspose(lower_); } -bool LuFactorization::CheckFactorization(const CompactSparseMatrixView &matrix, +bool LuFactorization::CheckFactorization(const CompactSparseMatrixView& matrix, Fractional tolerance) const { if (is_identity_factorization_) return true; SparseMatrix lu; diff --git a/ortools/glop/markowitz.cc b/ortools/glop/markowitz.cc index e46f10dd92..1263fba04e 100644 --- a/ortools/glop/markowitz.cc +++ b/ortools/glop/markowitz.cc @@ -24,8 +24,8 @@ namespace operations_research { namespace glop { Status Markowitz::ComputeRowAndColumnPermutation( - const CompactSparseMatrixView &basis_matrix, RowPermutation *row_perm, - ColumnPermutation *col_perm) { + const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm, + ColumnPermutation* col_perm) { SCOPED_TIME_STAT(&stats_); Clear(); const RowIndex num_rows = basis_matrix.num_rows(); @@ -140,10 +140,10 @@ Status Markowitz::ComputeRowAndColumnPermutation( return Status::OK(); } -Status Markowitz::ComputeLU(const CompactSparseMatrixView &basis_matrix, - RowPermutation *row_perm, - ColumnPermutation *col_perm, - TriangularMatrix *lower, TriangularMatrix *upper) { +Status Markowitz::ComputeLU(const CompactSparseMatrixView& basis_matrix, + RowPermutation* row_perm, + ColumnPermutation* col_perm, + TriangularMatrix* lower, TriangularMatrix* upper) { // The two first swaps allow to use less memory since this way upper_ // and lower_ will always stay empty at the end of this function. lower_.Swap(lower); @@ -177,7 +177,7 @@ struct MatrixEntry { Fractional coefficient; MatrixEntry(RowIndex r, ColIndex c, Fractional coeff) : row(r), col(c), coefficient(coeff) {} - bool operator<(const MatrixEntry &o) const { + bool operator<(const MatrixEntry& o) const { return (row == o.row) ? col < o.col : row < o.row; } }; @@ -185,13 +185,13 @@ struct MatrixEntry { } // namespace void Markowitz::ExtractSingletonColumns( - const CompactSparseMatrixView &basis_matrix, RowPermutation *row_perm, - ColumnPermutation *col_perm, int *index) { + const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm, + ColumnPermutation* col_perm, int* index) { SCOPED_TIME_STAT(&stats_); std::vector singleton_entries; const ColIndex num_cols = basis_matrix.num_cols(); for (ColIndex col(0); col < num_cols; ++col) { - const ColumnView &column = basis_matrix.column(col); + const ColumnView& column = basis_matrix.column(col); if (column.num_entries().value() == 1) { singleton_entries.push_back( MatrixEntry(column.GetFirstRow(), col, column.GetFirstCoefficient())); @@ -214,9 +214,9 @@ void Markowitz::ExtractSingletonColumns( num_cols.value()); } -bool Markowitz::IsResidualSingletonColumn(const ColumnView &column, - const RowPermutation &row_perm, - RowIndex *row) { +bool Markowitz::IsResidualSingletonColumn(const ColumnView& column, + const RowPermutation& row_perm, + RowIndex* row) { int residual_degree = 0; for (const auto e : column) { if (row_perm[e.row()] != kInvalidRow) continue; @@ -228,14 +228,14 @@ bool Markowitz::IsResidualSingletonColumn(const ColumnView &column, } void Markowitz::ExtractResidualSingletonColumns( - const CompactSparseMatrixView &basis_matrix, RowPermutation *row_perm, - ColumnPermutation *col_perm, int *index) { + const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm, + ColumnPermutation* col_perm, int* index) { SCOPED_TIME_STAT(&stats_); const ColIndex num_cols = basis_matrix.num_cols(); RowIndex row = kInvalidRow; for (ColIndex col(0); col < num_cols; ++col) { if ((*col_perm)[col] != kInvalidCol) continue; - const ColumnView &column = basis_matrix.column(col); + const ColumnView& column = basis_matrix.column(col); if (!IsResidualSingletonColumn(column, *row_perm, &row)) continue; (*col_perm)[col] = ColIndex(*index); (*row_perm)[row] = RowIndex(*index); @@ -247,7 +247,7 @@ void Markowitz::ExtractResidualSingletonColumns( num_cols.value()); } -const SparseColumn &Markowitz::ComputeColumn(const RowPermutation &row_perm, +const SparseColumn& Markowitz::ComputeColumn(const RowPermutation& row_perm, ColIndex col) { SCOPED_TIME_STAT(&stats_); // Is this the first time ComputeColumn() sees this column? This is a bit @@ -261,12 +261,12 @@ const SparseColumn &Markowitz::ComputeColumn(const RowPermutation &row_perm, // permuted_lower_.column(col) and we just need to split this column. Note // that this is just an optimization and the code would work if we just // assumed permuted_lower_column_needs_solve_[col] to be always true. - SparseColumn *lower_column = permuted_lower_.mutable_column(col); + SparseColumn* lower_column = permuted_lower_.mutable_column(col); if (permuted_lower_column_needs_solve_[col]) { // Solve a sparse triangular system. If the column 'col' of permuted_lower_ // was never computed before by ComputeColumn(), we use the column 'col' of // the matrix to factorize. - const ColumnView &input = + const ColumnView& input = first_time ? basis_matrix_->column(col) : ColumnView(*lower_column); lower_.PermutedLowerSparseSolve(input, row_perm, lower_column, permuted_upper_.mutable_column(col)); @@ -295,10 +295,10 @@ const SparseColumn &Markowitz::ComputeColumn(const RowPermutation &row_perm, return *lower_column; } -int64 Markowitz::FindPivot(const RowPermutation &row_perm, - const ColumnPermutation &col_perm, - RowIndex *pivot_row, ColIndex *pivot_col, - Fractional *pivot_coefficient) { +int64 Markowitz::FindPivot(const RowPermutation& row_perm, + const ColumnPermutation& col_perm, + RowIndex* pivot_row, ColIndex* pivot_col, + Fractional* pivot_coefficient) { SCOPED_TIME_STAT(&stats_); // Fast track for singleton columns. @@ -329,7 +329,7 @@ int64 Markowitz::FindPivot(const RowPermutation &row_perm, } return 0; } - const SparseColumn &column = ComputeColumn(row_perm, col); + const SparseColumn& column = ComputeColumn(row_perm, col); if (column.IsEmpty()) continue; *pivot_col = col; *pivot_row = column.GetFirstRow(); @@ -355,7 +355,7 @@ int64 Markowitz::FindPivot(const RowPermutation &row_perm, const ColIndex col = residual_matrix_non_zero_.GetFirstNonDeletedColumnFromRow(row); if (col == kInvalidCol) continue; - const SparseColumn &column = ComputeColumn(row_perm, col); + const SparseColumn& column = ComputeColumn(row_perm, col); if (column.IsEmpty()) continue; *pivot_col = col; @@ -407,7 +407,7 @@ int64 Markowitz::FindPivot(const RowPermutation &row_perm, // actually an upper bound on the number of non-zeros since there may be // numerical cancellations. Exploit this here? Note that it is already used // when we update the non_zero pattern of the residual matrix. - const SparseColumn &column = ComputeColumn(row_perm, col); + const SparseColumn& column = ComputeColumn(row_perm, col); DCHECK_EQ(column.num_entries(), col_degree); Fractional max_magnitude = 0.0; @@ -528,7 +528,7 @@ void Markowitz::RemoveColumnFromResidualMatrix(RowIndex pivot_row, void Markowitz::UpdateResidualMatrix(RowIndex pivot_row, ColIndex pivot_col) { SCOPED_TIME_STAT(&stats_); - const SparseColumn &pivot_column = permuted_lower_.column(pivot_col); + const SparseColumn& pivot_column = permuted_lower_.column(pivot_col); residual_matrix_non_zero_.Update(pivot_row, pivot_col, pivot_column); for (const ColIndex col : residual_matrix_non_zero_.RowNonZero(pivot_row)) { DCHECK_NE(col, pivot_col); @@ -558,9 +558,9 @@ void MatrixNonZeroPattern::Reset(RowIndex num_rows, ColIndex num_cols) { } void MatrixNonZeroPattern::InitializeFromMatrixSubset( - const CompactSparseMatrixView &basis_matrix, const RowPermutation &row_perm, - const ColumnPermutation &col_perm, std::vector *singleton_columns, - std::vector *singleton_rows) { + const CompactSparseMatrixView& basis_matrix, const RowPermutation& row_perm, + const ColumnPermutation& col_perm, std::vector* singleton_columns, + std::vector* singleton_rows) { const ColIndex num_cols = basis_matrix.num_cols(); const RowIndex num_rows = basis_matrix.num_rows(); @@ -638,7 +638,7 @@ bool MatrixNonZeroPattern::IsColumnDeleted(ColIndex col) const { } void MatrixNonZeroPattern::RemoveDeletedColumnsFromRow(RowIndex row) { - auto &ref = row_non_zero_[row]; + auto& ref = row_non_zero_[row]; int new_index = 0; const int end = ref.size(); for (int i = 0; i < end; ++i) { @@ -660,7 +660,7 @@ ColIndex MatrixNonZeroPattern::GetFirstNonDeletedColumnFromRow( } void MatrixNonZeroPattern::Update(RowIndex pivot_row, ColIndex pivot_col, - const SparseColumn &column) { + const SparseColumn& column) { // Since DeleteRowAndColumn() must be called just before this function, // the pivot column has been marked as deleted but degrees have not been // updated yet. Hence the +1. @@ -719,7 +719,7 @@ void MatrixNonZeroPattern::MergeInto(RowIndex pivot_row, RowIndex row) { bool_scratchpad_[col] = true; } - auto &non_zero = row_non_zero_[row]; + auto& non_zero = row_non_zero_[row]; const int old_size = non_zero.size(); for (const ColIndex col : row_non_zero_[pivot_row]) { if (bool_scratchpad_[col]) { @@ -738,9 +738,9 @@ namespace { // them and outputs the sorted result in out. The merge is stable and an element // of input_a will appear before the identical elements of the second input. template -void MergeSortedVectors(const V &input_a, W *out) { +void MergeSortedVectors(const V& input_a, W* out) { if (input_a.empty()) return; - const auto &input_b = *out; + const auto& input_b = *out; int index_a = input_a.size() - 1; int index_b = input_b.size() - 1; int index_out = input_a.size() + input_b.size(); @@ -772,8 +772,8 @@ void MergeSortedVectors(const V &input_a, W *out) { // pattern using this temporary vector. void MatrixNonZeroPattern::MergeIntoSorted(RowIndex pivot_row, RowIndex row) { // We want to add the entries of the input not already in the output. - const auto &input = row_non_zero_[pivot_row]; - const auto &output = row_non_zero_[row]; + const auto& input = row_non_zero_[pivot_row]; + const auto& output = row_non_zero_[row]; // These two resizes are because of the set_difference() output iterator api. col_scratchpad_.resize(input.size()); @@ -845,13 +845,13 @@ void SparseMatrixWithReusableColumnMemory::Reset(ColIndex num_cols) { columns_.clear(); } -const SparseColumn &SparseMatrixWithReusableColumnMemory::column( +const SparseColumn& SparseMatrixWithReusableColumnMemory::column( ColIndex col) const { if (mapping_[col] == -1) return empty_column_; return columns_[mapping_[col]]; } -SparseColumn *SparseMatrixWithReusableColumnMemory::mutable_column( +SparseColumn* SparseMatrixWithReusableColumnMemory::mutable_column( ColIndex col) { if (mapping_[col] != -1) return &columns_[mapping_[col]]; int new_col_index; diff --git a/ortools/glop/preprocessor.cc b/ortools/glop/preprocessor.cc index cc23cb4bb9..14af39c1ae 100644 --- a/ortools/glop/preprocessor.cc +++ b/ortools/glop/preprocessor.cc @@ -42,7 +42,7 @@ double trunc(double d) { return d > 0 ? floor(d) : ceil(d); } // -------------------------------------------------------- // Preprocessor // -------------------------------------------------------- -Preprocessor::Preprocessor(const GlopParameters *parameters) +Preprocessor::Preprocessor(const GlopParameters* parameters) : status_(ProblemStatus::INIT), parameters_(*parameters), in_mip_context_(false), @@ -58,7 +58,7 @@ Preprocessor::~Preprocessor() {} RunAndPushIfRelevant(std::unique_ptr(new name(¶meters_)), \ #name, time_limit_, lp) -bool MainLpPreprocessor::Run(LinearProgram *lp) { +bool MainLpPreprocessor::Run(LinearProgram* lp) { RETURN_VALUE_IF_NULL(lp, false); initial_num_rows_ = lp->num_constraints(); initial_num_cols_ = lp->num_variables(); @@ -127,8 +127,8 @@ bool MainLpPreprocessor::Run(LinearProgram *lp) { #undef RUN_PREPROCESSOR void MainLpPreprocessor::RunAndPushIfRelevant( - std::unique_ptr preprocessor, const std::string &name, - TimeLimit *time_limit, LinearProgram *lp) { + std::unique_ptr preprocessor, const std::string& name, + TimeLimit* time_limit, LinearProgram* lp) { RETURN_IF_NULL(preprocessor); RETURN_IF_NULL(time_limit); if (status_ != ProblemStatus::INIT || time_limit->LimitReached()) return; @@ -152,8 +152,7 @@ void MainLpPreprocessor::RunAndPushIfRelevant( (lp->num_constraints() - initial_num_rows_).value(), lp->num_variables().value(), (lp->num_variables() - initial_num_cols_).value(), - // static_cast is needed because the Android port uses - // int32. + // static_cast is needed because the Android port uses int32. static_cast(new_num_entries.value()), static_cast(new_num_entries.value() - initial_num_entries_.value())); @@ -171,7 +170,7 @@ void MainLpPreprocessor::RunAndPushIfRelevant( } } -void MainLpPreprocessor::RecoverSolution(ProblemSolution *solution) const { +void MainLpPreprocessor::RecoverSolution(ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); while (!preprocessors_.empty()) { preprocessors_.back()->RecoverSolution(solution); @@ -206,7 +205,7 @@ void ColumnDeletionHelper::MarkColumnForDeletionWithState( } void ColumnDeletionHelper::RestoreDeletedColumns( - ProblemSolution *solution) const { + ProblemSolution* solution) const { DenseRow new_primal_values; VariableStatusRow new_variable_statuses; ColIndex old_index(0); @@ -251,11 +250,11 @@ void RowDeletionHelper::UnmarkRow(RowIndex row) { is_row_deleted_[row] = false; } -const DenseBooleanColumn &RowDeletionHelper::GetMarkedRows() const { +const DenseBooleanColumn& RowDeletionHelper::GetMarkedRows() const { return is_row_deleted_; } -void RowDeletionHelper::RestoreDeletedRows(ProblemSolution *solution) const { +void RowDeletionHelper::RestoreDeletedRows(ProblemSolution* solution) const { DenseColumn new_dual_values; ConstraintStatusColumn new_constraint_statuses; RowIndex old_index(0); @@ -326,7 +325,7 @@ Fractional MagnitudeOrZeroIfInfinite(Fractional value) { // Returns the maximum magnitude of the finite variable bounds of the given // linear program. -Fractional ComputeMaxVariableBoundsMagnitude(const LinearProgram &lp) { +Fractional ComputeMaxVariableBoundsMagnitude(const LinearProgram& lp) { Fractional max_bounds_magnitude = 0.0; const ColIndex num_cols = lp.num_variables(); for (ColIndex col(0); col < num_cols; ++col) { @@ -340,7 +339,7 @@ Fractional ComputeMaxVariableBoundsMagnitude(const LinearProgram &lp) { } // namespace -bool EmptyColumnPreprocessor::Run(LinearProgram *lp) { +bool EmptyColumnPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); column_deletion_helper_.Clear(); @@ -384,7 +383,7 @@ bool EmptyColumnPreprocessor::Run(LinearProgram *lp) { return !column_deletion_helper_.IsEmpty(); } -void EmptyColumnPreprocessor::RecoverSolution(ProblemSolution *solution) const { +void EmptyColumnPreprocessor::RecoverSolution(ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); column_deletion_helper_.RestoreDeletedColumns(solution); @@ -401,7 +400,7 @@ namespace { // c * multiple is substracted from both the constraint upper and lower bound. void SubtractColumnMultipleFromConstraintBound(ColIndex col, Fractional multiple, - LinearProgram *lp) { + LinearProgram* lp) { for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) { const RowIndex row = e.row(); const Fractional delta = multiple * e.coefficient(); @@ -425,7 +424,7 @@ struct ColumnWithRepresentativeAndScaledCost { ColIndex representative; Fractional scaled_cost; - bool operator<(const ColumnWithRepresentativeAndScaledCost &other) const { + bool operator<(const ColumnWithRepresentativeAndScaledCost& other) const { if (representative == other.representative) { if (scaled_cost == other.scaled_cost) { return col < other.col; @@ -438,7 +437,7 @@ struct ColumnWithRepresentativeAndScaledCost { } // namespace -bool ProportionalColumnPreprocessor::Run(LinearProgram *lp) { +bool ProportionalColumnPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); ColMapping mapping = FindProportionalColumns( @@ -471,7 +470,7 @@ bool ProportionalColumnPreprocessor::Run(LinearProgram *lp) { const ColIndex num_cols = lp->num_variables(); column_factors_.assign(num_cols, 0.0); for (const ColIndex col : proportional_columns) { - const SparseColumn &column = lp->GetSparseColumn(col); + const SparseColumn& column = lp->GetSparseColumn(col); column_factors_[col] = column.GetFirstCoefficient(); } @@ -668,7 +667,7 @@ bool ProportionalColumnPreprocessor::Run(LinearProgram *lp) { } void ProportionalColumnPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); column_deletion_helper_.RestoreDeletedColumns(solution); @@ -782,11 +781,11 @@ void ProportionalColumnPreprocessor::RecoverSolution( // ProportionalRowPreprocessor // -------------------------------------------------------- -bool ProportionalRowPreprocessor::Run(LinearProgram *lp) { +bool ProportionalRowPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); const RowIndex num_rows = lp->num_constraints(); - const SparseMatrix &transpose = lp->GetTransposeSparseMatrix(); + const SparseMatrix& transpose = lp->GetTransposeSparseMatrix(); // Use the first coefficient of each row to compute the proportionality // factor. Note that the sign is important. @@ -794,7 +793,7 @@ bool ProportionalRowPreprocessor::Run(LinearProgram *lp) { // Note(user): using the first coefficient may not give the best precision. row_factors_.assign(num_rows, 0.0); for (RowIndex row(0); row < num_rows; ++row) { - const SparseColumn &row_transpose = transpose.column(RowToColIndex(row)); + const SparseColumn& row_transpose = transpose.column(RowToColIndex(row)); if (!row_transpose.IsEmpty()) { row_factors_[row] = row_transpose.GetFirstCoefficient(); } @@ -958,7 +957,7 @@ bool ProportionalRowPreprocessor::Run(LinearProgram *lp) { } void ProportionalRowPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); row_deletion_helper_.RestoreDeletedRows(solution); @@ -1031,7 +1030,7 @@ void ProportionalRowPreprocessor::RecoverSolution( // FixedVariablePreprocessor // -------------------------------------------------------- -bool FixedVariablePreprocessor::Run(LinearProgram *lp) { +bool FixedVariablePreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); const ColIndex num_cols = lp->num_variables(); @@ -1054,7 +1053,7 @@ bool FixedVariablePreprocessor::Run(LinearProgram *lp) { } void FixedVariablePreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); column_deletion_helper_.RestoreDeletedColumns(solution); @@ -1064,7 +1063,7 @@ void FixedVariablePreprocessor::RecoverSolution( // ForcingAndImpliedFreeConstraintPreprocessor // -------------------------------------------------------- -bool ForcingAndImpliedFreeConstraintPreprocessor::Run(LinearProgram *lp) { +bool ForcingAndImpliedFreeConstraintPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); const RowIndex num_rows = lp->num_constraints(); @@ -1154,7 +1153,7 @@ bool ForcingAndImpliedFreeConstraintPreprocessor::Run(LinearProgram *lp) { deleted_columns_.PopulateFromZero(num_rows, num_cols); costs_.resize(num_cols, 0.0); for (ColIndex col(0); col < num_cols; ++col) { - const SparseColumn &column = lp->GetSparseColumn(col); + const SparseColumn& column = lp->GetSparseColumn(col); const Fractional lower = lp->variable_lower_bounds()[col]; const Fractional upper = lp->variable_upper_bounds()[col]; bool is_forced = false; @@ -1231,7 +1230,7 @@ bool ForcingAndImpliedFreeConstraintPreprocessor::Run(LinearProgram *lp) { } void ForcingAndImpliedFreeConstraintPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); column_deletion_helper_.RestoreDeletedColumns(solution); @@ -1306,7 +1305,7 @@ struct ColWithDegree { ColIndex col; EntryIndex num_entries; ColWithDegree(ColIndex c, EntryIndex n) : col(c), num_entries(n) {} - bool operator<(const ColWithDegree &other) const { + bool operator<(const ColWithDegree& other) const { if (num_entries == other.num_entries) { return col < other.col; } @@ -1315,7 +1314,7 @@ struct ColWithDegree { }; } // namespace -bool ImpliedFreePreprocessor::Run(LinearProgram *lp) { +bool ImpliedFreePreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); const RowIndex num_rows = lp->num_constraints(); @@ -1502,7 +1501,7 @@ bool ImpliedFreePreprocessor::Run(LinearProgram *lp) { return num_implied_free_variables > 0; } -void ImpliedFreePreprocessor::RecoverSolution(ProblemSolution *solution) const { +void ImpliedFreePreprocessor::RecoverSolution(ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); const ColIndex num_cols = solution->variable_statuses.size(); @@ -1526,14 +1525,14 @@ void ImpliedFreePreprocessor::RecoverSolution(ProblemSolution *solution) const { // DoubletonFreeColumnPreprocessor // -------------------------------------------------------- -bool DoubletonFreeColumnPreprocessor::Run(LinearProgram *lp) { +bool DoubletonFreeColumnPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); // We will modify the matrix transpose and then push the change to the linear // program by calling lp->UseTransposeMatrixAsReference(). Note // that original_matrix will not change during this preprocessor run. - const SparseMatrix &original_matrix = lp->GetSparseMatrix(); - SparseMatrix *transpose = lp->GetMutableTransposeSparseMatrix(); + const SparseMatrix& original_matrix = lp->GetSparseMatrix(); + SparseMatrix* transpose = lp->GetMutableTransposeSparseMatrix(); const ColIndex num_cols(lp->num_variables()); for (ColIndex doubleton_col(0); doubleton_col < num_cols; ++doubleton_col) { @@ -1633,10 +1632,10 @@ bool DoubletonFreeColumnPreprocessor::Run(LinearProgram *lp) { } void DoubletonFreeColumnPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); row_deletion_helper_.RestoreDeletedRows(solution); - for (const RestoreInfo &r : Reverse(restore_stack_)) { + for (const RestoreInfo& r : Reverse(restore_stack_)) { // Correct the constraint status. switch (solution->variable_statuses[r.col]) { case VariableStatus::FIXED_VALUE: @@ -1702,7 +1701,7 @@ namespace { // Does the constraint block the variable to go to infinity in the given // direction? direction is either positive or negative and row is the index of // the constraint. -bool IsConstraintBlockingVariable(const LinearProgram &lp, Fractional direction, +bool IsConstraintBlockingVariable(const LinearProgram& lp, Fractional direction, RowIndex row) { return direction > 0.0 ? lp.constraint_upper_bounds()[row] != kInfinity : lp.constraint_lower_bounds()[row] != -kInfinity; @@ -1711,7 +1710,7 @@ bool IsConstraintBlockingVariable(const LinearProgram &lp, Fractional direction, } // namespace void UnconstrainedVariablePreprocessor::RemoveZeroCostUnconstrainedVariable( - ColIndex col, Fractional target_bound, LinearProgram *lp) { + ColIndex col, Fractional target_bound, LinearProgram* lp) { DCHECK_EQ(0.0, lp->objective_coefficients()[col]); if (deleted_rows_as_column_.IsEmpty()) { deleted_columns_.PopulateFromZero(lp->num_constraints(), @@ -1724,7 +1723,7 @@ void UnconstrainedVariablePreprocessor::RemoveZeroCostUnconstrainedVariable( is_unbounded_.resize(lp->num_variables(), false); } const bool is_unbounded_up = (target_bound == kInfinity); - const SparseColumn &column = lp->GetSparseColumn(col); + const SparseColumn& column = lp->GetSparseColumn(col); for (const SparseColumn::Entry e : column) { const RowIndex row = e.row(); if (!row_deletion_helper_.IsRowMarked(row)) { @@ -1757,7 +1756,7 @@ void UnconstrainedVariablePreprocessor::RemoveZeroCostUnconstrainedVariable( lp->variable_upper_bounds()[col])); } -bool UnconstrainedVariablePreprocessor::Run(LinearProgram *lp) { +bool UnconstrainedVariablePreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); @@ -1806,7 +1805,7 @@ bool UnconstrainedVariablePreprocessor::Run(LinearProgram *lp) { in_columns_to_process[col] = false; if (column_deletion_helper_.IsColumnMarked(col)) continue; - const SparseColumn &column = lp->GetSparseColumn(col); + const SparseColumn& column = lp->GetSparseColumn(col); const Fractional col_cost = lp->GetObjectiveCoefficientForMinimizationVersion(col); const Fractional col_lb = lp->variable_lower_bounds()[col]; @@ -1953,7 +1952,7 @@ bool UnconstrainedVariablePreprocessor::Run(LinearProgram *lp) { } if (!changed_rows.empty()) { - const SparseMatrix &transpose = lp->GetTransposeSparseMatrix(); + const SparseMatrix& transpose = lp->GetTransposeSparseMatrix(); for (const RowIndex row : changed_rows) { for (const SparseColumn::Entry entry : transpose.column(RowToColIndex(row))) { @@ -1986,7 +1985,7 @@ bool UnconstrainedVariablePreprocessor::Run(LinearProgram *lp) { } void UnconstrainedVariablePreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); column_deletion_helper_.RestoreDeletedColumns(solution); @@ -2021,7 +2020,7 @@ void UnconstrainedVariablePreprocessor::RecoverSolution( // this is called. DCHECK(IsFinite(rhs_[row])); if (last_deleted_column[row] != col || !IsFinite(rhs_[row])) continue; - const SparseColumn &row_as_column = + const SparseColumn& row_as_column = deleted_rows_as_column_.column(RowToColIndex(row)); const Fractional activity = rhs_[row] - ScalarProduct(solution->primal_values, row_as_column); @@ -2053,7 +2052,7 @@ void UnconstrainedVariablePreprocessor::RecoverSolution( // FreeConstraintPreprocessor // -------------------------------------------------------- -bool FreeConstraintPreprocessor::Run(LinearProgram *lp) { +bool FreeConstraintPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); const RowIndex num_rows = lp->num_constraints(); @@ -2069,7 +2068,7 @@ bool FreeConstraintPreprocessor::Run(LinearProgram *lp) { } void FreeConstraintPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); row_deletion_helper_.RestoreDeletedRows(solution); @@ -2079,7 +2078,7 @@ void FreeConstraintPreprocessor::RecoverSolution( // EmptyConstraintPreprocessor // -------------------------------------------------------- -bool EmptyConstraintPreprocessor::Run(LinearProgram *lp) { +bool EmptyConstraintPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); const RowIndex num_rows(lp->num_constraints()); @@ -2117,7 +2116,7 @@ bool EmptyConstraintPreprocessor::Run(LinearProgram *lp) { } void EmptyConstraintPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); row_deletion_helper_.RestoreDeletedRows(solution); @@ -2127,7 +2126,7 @@ void EmptyConstraintPreprocessor::RecoverSolution( // SingletonPreprocessor // -------------------------------------------------------- -SingletonUndo::SingletonUndo(OperationType type, const LinearProgram &lp, +SingletonUndo::SingletonUndo(OperationType type, const LinearProgram& lp, MatrixEntry e, ConstraintStatus status) : type_(type), is_maximization_(lp.IsMaximizationProblem()), @@ -2139,10 +2138,10 @@ SingletonUndo::SingletonUndo(OperationType type, const LinearProgram &lp, constraint_upper_bound_(lp.constraint_upper_bounds()[e.row]), constraint_status_(status) {} -void SingletonUndo::Undo(const GlopParameters ¶meters, - const SparseMatrix &deleted_columns, - const SparseMatrix &deleted_rows, - ProblemSolution *solution) const { +void SingletonUndo::Undo(const GlopParameters& parameters, + const SparseMatrix& deleted_columns, + const SparseMatrix& deleted_rows, + ProblemSolution* solution) const { switch (type_) { case SINGLETON_ROW: SingletonRowUndo(deleted_columns, solution); @@ -2160,7 +2159,7 @@ void SingletonUndo::Undo(const GlopParameters ¶meters, } void SingletonPreprocessor::DeleteSingletonRow(MatrixEntry e, - LinearProgram *lp) { + LinearProgram* lp) { Fractional implied_lower_bound = lp->constraint_lower_bounds()[e.row] / e.coeff; Fractional implied_upper_bound = @@ -2214,8 +2213,8 @@ void SingletonPreprocessor::DeleteSingletonRow(MatrixEntry e, } // The dual value of the row needs to be corrected to stay at the optimal. -void SingletonUndo::SingletonRowUndo(const SparseMatrix &deleted_columns, - ProblemSolution *solution) const { +void SingletonUndo::SingletonRowUndo(const SparseMatrix& deleted_columns, + ProblemSolution* solution) const { DCHECK_EQ(0, solution->dual_values[e_.row]); // If the variable is basic or free, we can just keep the constraint @@ -2282,7 +2281,7 @@ void SingletonUndo::SingletonRowUndo(const SparseMatrix &deleted_columns, } void SingletonPreprocessor::UpdateConstraintBoundsWithVariableBounds( - MatrixEntry e, LinearProgram *lp) { + MatrixEntry e, LinearProgram* lp) { Fractional lower_delta = -e.coeff * lp->variable_upper_bounds()[e.col]; Fractional upper_delta = -e.coeff * lp->variable_lower_bounds()[e.col]; if (e.coeff < 0.0) { @@ -2294,10 +2293,10 @@ void SingletonPreprocessor::UpdateConstraintBoundsWithVariableBounds( } bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable( - const MatrixEntry &matrix_entry, const LinearProgram &lp) const { + const MatrixEntry& matrix_entry, const LinearProgram& lp) const { DCHECK(in_mip_context_); DCHECK(lp.IsVariableInteger(matrix_entry.col)); - const SparseMatrix &transpose = lp.GetTransposeSparseMatrix(); + const SparseMatrix& transpose = lp.GetTransposeSparseMatrix(); for (const SparseColumn::Entry entry : transpose.column(RowToColIndex(matrix_entry.row))) { // Check if the variable is integer. @@ -2335,9 +2334,9 @@ bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable( } void SingletonPreprocessor::DeleteZeroCostSingletonColumn( - const SparseMatrix &transpose, MatrixEntry e, LinearProgram *lp) { + const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) { const ColIndex transpose_col = RowToColIndex(e.row); - const SparseColumn &column = transpose.column(transpose_col); + const SparseColumn& column = transpose.column(transpose_col); undo_stack_.push_back(SingletonUndo(SingletonUndo::ZERO_COST_SINGLETON_COLUMN, *lp, e, ConstraintStatus::FREE)); if (deleted_rows_.column(transpose_col).IsEmpty()) { @@ -2350,8 +2349,8 @@ void SingletonPreprocessor::DeleteZeroCostSingletonColumn( // We need to restore the variable value in order to satisfy the constraint. void SingletonUndo::ZeroCostSingletonColumnUndo( - const GlopParameters ¶meters, const SparseMatrix &deleted_rows, - ProblemSolution *solution) const { + const GlopParameters& parameters, const SparseMatrix& deleted_rows, + ProblemSolution* solution) const { // If the variable was fixed, this is easy. Note that this is the only // possible case if the current constraint status is FIXED. if (variable_upper_bound_ == variable_lower_bound_) { @@ -2463,10 +2462,10 @@ void SingletonUndo::ZeroCostSingletonColumnUndo( } void SingletonPreprocessor::DeleteSingletonColumnInEquality( - const SparseMatrix &transpose, MatrixEntry e, LinearProgram *lp) { + const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) { // Save information for the undo. const ColIndex transpose_col = RowToColIndex(e.row); - const SparseColumn &row_as_column = transpose.column(transpose_col); + const SparseColumn& row_as_column = transpose.column(transpose_col); undo_stack_.push_back( SingletonUndo(SingletonUndo::SINGLETON_COLUMN_IN_EQUALITY, *lp, e, ConstraintStatus::FREE)); @@ -2506,8 +2505,8 @@ void SingletonPreprocessor::DeleteSingletonColumnInEquality( } void SingletonUndo::SingletonColumnInEqualityUndo( - const GlopParameters ¶meters, const SparseMatrix &deleted_rows, - ProblemSolution *solution) const { + const GlopParameters& parameters, const SparseMatrix& deleted_rows, + ProblemSolution* solution) const { // First do the same as a zero-cost singleton column. ZeroCostSingletonColumnUndo(parameters, deleted_rows, solution); @@ -2521,14 +2520,14 @@ void SingletonUndo::SingletonColumnInEqualityUndo( } void SingletonUndo::MakeConstraintAnEqualityUndo( - ProblemSolution *solution) const { + ProblemSolution* solution) const { if (solution->constraint_statuses[e_.row] == ConstraintStatus::FIXED_VALUE) { solution->constraint_statuses[e_.row] = constraint_status_; } } bool SingletonPreprocessor::MakeConstraintAnEqualityIfPossible( - const SparseMatrix &transpose, MatrixEntry e, LinearProgram *lp) { + const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) { // TODO(user): We could skip early if the relevant constraint bound is // infinity. const Fractional cst_lower_bound = lp->constraint_lower_bounds()[e.row]; @@ -2539,8 +2538,8 @@ bool SingletonPreprocessor::MakeConstraintAnEqualityIfPossible( // "artificial" extra variable x with coefficient 1.0 could take while still // making the constraint feasible. The domain bounds for the constraint e.row // will be stored in row_lb_sum_[e.row] and row_ub_sum_[e.row]. - const DenseRow &variable_ubs = lp->variable_upper_bounds(); - const DenseRow &variable_lbs = lp->variable_lower_bounds(); + const DenseRow& variable_ubs = lp->variable_upper_bounds(); + const DenseRow& variable_lbs = lp->variable_lower_bounds(); if (e.row >= row_sum_is_cached_.size() || !row_sum_is_cached_[e.row]) { if (e.row >= row_sum_is_cached_.size()) { const int new_size = e.row.value() + 1; @@ -2678,11 +2677,11 @@ bool SingletonPreprocessor::MakeConstraintAnEqualityIfPossible( return false; } -bool SingletonPreprocessor::Run(LinearProgram *lp) { +bool SingletonPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); - const SparseMatrix &matrix = lp->GetSparseMatrix(); - const SparseMatrix &transpose = lp->GetTransposeSparseMatrix(); + const SparseMatrix& matrix = lp->GetSparseMatrix(); + const SparseMatrix& transpose = lp->GetTransposeSparseMatrix(); // Initialize column_to_process with the current singleton columns. ColIndex num_cols(matrix.num_cols()); @@ -2756,7 +2755,7 @@ bool SingletonPreprocessor::Run(LinearProgram *lp) { return !column_deletion_helper_.IsEmpty() || !row_deletion_helper_.IsEmpty(); } -void SingletonPreprocessor::RecoverSolution(ProblemSolution *solution) const { +void SingletonPreprocessor::RecoverSolution(ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); @@ -2777,7 +2776,7 @@ void SingletonPreprocessor::RecoverSolution(ProblemSolution *solution) const { } MatrixEntry SingletonPreprocessor::GetSingletonColumnMatrixEntry( - ColIndex col, const SparseMatrix &matrix) { + ColIndex col, const SparseMatrix& matrix) { for (const SparseColumn::Entry e : matrix.column(col)) { if (!row_deletion_helper_.IsRowMarked(e.row())) { DCHECK_NE(0.0, e.coefficient()); @@ -2791,7 +2790,7 @@ MatrixEntry SingletonPreprocessor::GetSingletonColumnMatrixEntry( } MatrixEntry SingletonPreprocessor::GetSingletonRowMatrixEntry( - RowIndex row, const SparseMatrix &transpose) { + RowIndex row, const SparseMatrix& transpose) { for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) { const ColIndex col = RowToColIndex(e.row()); if (!column_deletion_helper_.IsColumnMarked(col)) { @@ -2809,7 +2808,7 @@ MatrixEntry SingletonPreprocessor::GetSingletonRowMatrixEntry( // RemoveNearZeroEntriesPreprocessor // -------------------------------------------------------- -bool RemoveNearZeroEntriesPreprocessor::Run(LinearProgram *lp) { +bool RemoveNearZeroEntriesPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); const ColIndex num_cols = lp->num_variables(); @@ -2877,13 +2876,13 @@ bool RemoveNearZeroEntriesPreprocessor::Run(LinearProgram *lp) { } void RemoveNearZeroEntriesPreprocessor::RecoverSolution( - ProblemSolution *solution) const {} + ProblemSolution* solution) const {} // -------------------------------------------------------- // SingletonColumnSignPreprocessor // -------------------------------------------------------- -bool SingletonColumnSignPreprocessor::Run(LinearProgram *lp) { +bool SingletonColumnSignPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); const ColIndex num_cols = lp->num_variables(); @@ -2892,7 +2891,7 @@ bool SingletonColumnSignPreprocessor::Run(LinearProgram *lp) { changed_columns_.clear(); int num_singletons = 0; for (ColIndex col(0); col < num_cols; ++col) { - SparseColumn *sparse_column = lp->GetMutableSparseColumn(col); + SparseColumn* sparse_column = lp->GetMutableSparseColumn(col); const Fractional cost = lp->objective_coefficients()[col]; if (sparse_column->num_entries() == 1) { ++num_singletons; @@ -2912,7 +2911,7 @@ bool SingletonColumnSignPreprocessor::Run(LinearProgram *lp) { } void SingletonColumnSignPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); for (int i = 0; i < changed_columns_.size(); ++i) { @@ -2931,7 +2930,7 @@ void SingletonColumnSignPreprocessor::RecoverSolution( // DoubletonEqualityRowPreprocessor // -------------------------------------------------------- -bool DoubletonEqualityRowPreprocessor::Run(LinearProgram *lp) { +bool DoubletonEqualityRowPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); @@ -2943,7 +2942,7 @@ bool DoubletonEqualityRowPreprocessor::Run(LinearProgram *lp) { saved_row_upper_bounds_ = lp->constraint_upper_bounds(); // Note that we don't update the transpose during this preprocessor run. - const SparseMatrix &original_transpose = lp->GetTransposeSparseMatrix(); + const SparseMatrix& original_transpose = lp->GetTransposeSparseMatrix(); // Iterate over the rows that were already doubletons before this preprocessor // run, and whose items don't belong to a column that we deleted during this @@ -2951,7 +2950,7 @@ bool DoubletonEqualityRowPreprocessor::Run(LinearProgram *lp) { // we only modify rows that have an item on a deleted column. const RowIndex num_rows(lp->num_constraints()); for (RowIndex row(0); row < num_rows; ++row) { - const SparseColumn &original_row = + const SparseColumn& original_row = original_transpose.column(RowToColIndex(row)); if (original_row.num_entries() != 2 || lp->constraint_lower_bounds()[row] != @@ -3112,12 +3111,12 @@ bool DoubletonEqualityRowPreprocessor::Run(LinearProgram *lp) { } void DoubletonEqualityRowPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); column_deletion_helper_.RestoreDeletedColumns(solution); row_deletion_helper_.RestoreDeletedRows(solution); - for (const RestoreInfo &r : Reverse(restore_stack_)) { + for (const RestoreInfo& r : Reverse(restore_stack_)) { switch (solution->variable_statuses[r.col[MODIFIED]]) { case VariableStatus::FIXED_VALUE: LOG(DFATAL) << "FIXED variable produced by DoubletonPreprocessor!"; @@ -3139,7 +3138,7 @@ void DoubletonEqualityRowPreprocessor::RecoverSolution( // The bound was induced by a bound of one of the two original // variables. Put that original variable at its bound, and make // the other one basic. - const RestoreInfo::ColChoiceAndStatus &bound_backtracking = + const RestoreInfo::ColChoiceAndStatus& bound_backtracking = solution->variable_statuses[r.col[MODIFIED]] == VariableStatus::AT_LOWER_BOUND ? r.bound_backtracking_at_lower_bound @@ -3191,9 +3190,9 @@ void DoubletonEqualityRowPreprocessor::RecoverSolution( saved_row_upper_bounds_, solution); } -void FixConstraintWithFixedStatuses(const DenseColumn &row_lower_bounds, - const DenseColumn &row_upper_bounds, - ProblemSolution *solution) { +void FixConstraintWithFixedStatuses(const DenseColumn& row_lower_bounds, + const DenseColumn& row_upper_bounds, + ProblemSolution* solution) { const RowIndex num_rows = solution->constraint_statuses.size(); DCHECK_EQ(row_lower_bounds.size(), num_rows); DCHECK_EQ(row_upper_bounds.size(), num_rows); @@ -3214,7 +3213,7 @@ void FixConstraintWithFixedStatuses(const DenseColumn &row_lower_bounds, } void DoubletonEqualityRowPreprocessor:: - SwapDeletedAndModifiedVariableRestoreInfo(RestoreInfo *r) { + SwapDeletedAndModifiedVariableRestoreInfo(RestoreInfo* r) { using std::swap; swap(r->col[DELETED], r->col[MODIFIED]); swap(r->coeff[DELETED], r->coeff[MODIFIED]); @@ -3228,7 +3227,7 @@ void DoubletonEqualityRowPreprocessor:: // DualizerPreprocessor // -------------------------------------------------------- -bool DualizerPreprocessor::Run(LinearProgram *lp) { +bool DualizerPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); if (parameters_.solve_dual_problem() == GlopParameters::NEVER_DO) { @@ -3344,7 +3343,7 @@ bool DualizerPreprocessor::Run(LinearProgram *lp) { // Note(user): This assumes that LinearProgram.PopulateFromDual() uses // the first ColIndex and RowIndex for the rows and columns of the given // problem. -void DualizerPreprocessor::RecoverSolution(ProblemSolution *solution) const { +void DualizerPreprocessor::RecoverSolution(ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); @@ -3477,7 +3476,7 @@ ProblemStatus DualizerPreprocessor::ChangeStatusToDualStatus( // ShiftVariableBoundsPreprocessor // -------------------------------------------------------- -bool ShiftVariableBoundsPreprocessor::Run(LinearProgram *lp) { +bool ShiftVariableBoundsPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); @@ -3524,7 +3523,7 @@ bool ShiftVariableBoundsPreprocessor::Run(LinearProgram *lp) { offsets_[col] = offset; lp->SetVariableBounds(col, variable_initial_lbs_[col] - offset, variable_initial_ubs_[col] - offset); - const SparseColumn &sparse_column = lp->GetSparseColumn(col); + const SparseColumn& sparse_column = lp->GetSparseColumn(col); for (const SparseColumn::Entry e : sparse_column) { row_offsets[e.row()].Add(e.coefficient() * offset); } @@ -3546,7 +3545,7 @@ bool ShiftVariableBoundsPreprocessor::Run(LinearProgram *lp) { } void ShiftVariableBoundsPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); const ColIndex num_cols = solution->variable_statuses.size(); @@ -3577,7 +3576,7 @@ void ShiftVariableBoundsPreprocessor::RecoverSolution( // ScalingPreprocessor // -------------------------------------------------------- -bool ScalingPreprocessor::Run(LinearProgram *lp) { +bool ScalingPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); if (!parameters_.use_scaling()) return false; @@ -3600,7 +3599,7 @@ bool ScalingPreprocessor::Run(LinearProgram *lp) { return true; } -void ScalingPreprocessor::RecoverSolution(ProblemSolution *solution) const { +void ScalingPreprocessor::RecoverSolution(ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); @@ -3640,7 +3639,7 @@ void ScalingPreprocessor::RecoverSolution(ProblemSolution *solution) const { // ToMinimizationPreprocessor // -------------------------------------------------------- -bool ToMinimizationPreprocessor::Run(LinearProgram *lp) { +bool ToMinimizationPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); if (lp->IsMaximizationProblem()) { @@ -3658,22 +3657,23 @@ bool ToMinimizationPreprocessor::Run(LinearProgram *lp) { } void ToMinimizationPreprocessor::RecoverSolution( - ProblemSolution *solution) const {} + ProblemSolution* solution) const {} // -------------------------------------------------------- // AddSlackVariablesPreprocessor // -------------------------------------------------------- -bool AddSlackVariablesPreprocessor::Run(LinearProgram *lp) { +bool AddSlackVariablesPreprocessor::Run(LinearProgram* lp) { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_VALUE_IF_NULL(lp, false); - lp->AddSlackVariablesWhereNecessary(/*detect_integer_constraints=*/true); + lp->AddSlackVariablesWhereNecessary( + /*detect_integer_constraints=*/true); first_slack_col_ = lp->GetFirstSlackVariable(); return true; } void AddSlackVariablesPreprocessor::RecoverSolution( - ProblemSolution *solution) const { + ProblemSolution* solution) const { SCOPED_INSTRUCTION_COUNT(time_limit_); RETURN_IF_NULL(solution); diff --git a/ortools/glop/primal_edge_norms.cc b/ortools/glop/primal_edge_norms.cc index d4f6fe18e0..b48be4e60c 100644 --- a/ortools/glop/primal_edge_norms.cc +++ b/ortools/glop/primal_edge_norms.cc @@ -19,9 +19,9 @@ namespace operations_research { namespace glop { -PrimalEdgeNorms::PrimalEdgeNorms(const CompactSparseMatrix &compact_matrix, - const VariablesInfo &variables_info, - const BasisFactorization &basis_factorization) +PrimalEdgeNorms::PrimalEdgeNorms(const CompactSparseMatrix& compact_matrix, + const VariablesInfo& variables_info, + const BasisFactorization& basis_factorization) : compact_matrix_(compact_matrix), variables_info_(variables_info), basis_factorization_(basis_factorization), @@ -44,23 +44,23 @@ bool PrimalEdgeNorms::NeedsBasisRefactorization() const { return recompute_edge_squared_norms_; } -const DenseRow &PrimalEdgeNorms::GetEdgeSquaredNorms() { +const DenseRow& PrimalEdgeNorms::GetEdgeSquaredNorms() { if (recompute_edge_squared_norms_) ComputeEdgeSquaredNorms(); return edge_squared_norms_; } -const DenseRow &PrimalEdgeNorms::GetDevexWeights() { +const DenseRow& PrimalEdgeNorms::GetDevexWeights() { if (reset_devex_weights_) ResetDevexWeights(); return devex_weights_; } -const DenseRow &PrimalEdgeNorms::GetMatrixColumnNorms() { +const DenseRow& PrimalEdgeNorms::GetMatrixColumnNorms() { if (matrix_column_norms_.empty()) ComputeMatrixColumnNorms(); return matrix_column_norms_; } void PrimalEdgeNorms::TestEnteringEdgeNormPrecision( - ColIndex entering_col, const ScatteredColumn &direction) { + ColIndex entering_col, const ScatteredColumn& direction) { if (!recompute_edge_squared_norms_) { SCOPED_TIME_STAT(&stats_); // Recompute the squared norm of the edge used during this @@ -86,8 +86,8 @@ void PrimalEdgeNorms::TestEnteringEdgeNormPrecision( void PrimalEdgeNorms::UpdateBeforeBasisPivot(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, - const ScatteredColumn &direction, - UpdateRow *update_row) { + const ScatteredColumn& direction, + UpdateRow* update_row) { SCOPED_TIME_STAT(&stats_); DCHECK_NE(entering_col, leaving_col); if (!recompute_edge_squared_norms_) { @@ -140,7 +140,7 @@ void PrimalEdgeNorms::ComputeEdgeSquaredNorms() { // the value of direction is no longer needed. This will simplify the code and // avoid a copy here. void PrimalEdgeNorms::ComputeDirectionLeftInverse( - ColIndex entering_col, const ScatteredColumn &direction) { + ColIndex entering_col, const ScatteredColumn& direction) { SCOPED_TIME_STAT(&stats_); // Initialize direction_left_inverse_ to direction. Note the special case when @@ -184,8 +184,8 @@ void PrimalEdgeNorms::ComputeDirectionLeftInverse( void PrimalEdgeNorms::UpdateEdgeSquaredNorms(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, - const DenseColumn &direction, - const UpdateRow &update_row) { + const DenseColumn& direction, + const UpdateRow& update_row) { SCOPED_TIME_STAT(&stats_); // 'pivot' is the value of the entering_edge at 'leaving_row'. @@ -232,7 +232,7 @@ void PrimalEdgeNorms::UpdateEdgeSquaredNorms(ColIndex entering_col, void PrimalEdgeNorms::UpdateDevexWeights( ColIndex entering_col /* index q in the paper */, ColIndex leaving_col /* index p in the paper */, RowIndex leaving_row, - const DenseColumn &direction, const UpdateRow &update_row) { + const DenseColumn& direction, const UpdateRow& update_row) { SCOPED_TIME_STAT(&stats_); // Compared to steepest edge update, the DEVEX weight uses the largest of the diff --git a/ortools/glop/rank_one_update.h b/ortools/glop/rank_one_update.h index c5aa75dcb4..8d3c8a0ebe 100644 --- a/ortools/glop/rank_one_update.h +++ b/ortools/glop/rank_one_update.h @@ -43,7 +43,7 @@ class RankOneUpdateElementaryMatrix { // RankOneUpdateFactorization below: // - It uses less overall memory (and avoid allocation overhead). // - It has a better cache behavior for the RankOneUpdateFactorization solves. - RankOneUpdateElementaryMatrix(const CompactSparseMatrix *storage, + RankOneUpdateElementaryMatrix(const CompactSparseMatrix* storage, ColIndex u_index, ColIndex v_index, Fractional u_dot_v) : storage_(storage), @@ -58,13 +58,13 @@ class RankOneUpdateElementaryMatrix { // Solves T.x = rhs with rhs initialy in x (a column vector). // The non-zeros version keeps track of the new non-zeros. - void RightSolve(DenseColumn *x) const { + void RightSolve(DenseColumn* x) const { DCHECK(!IsSingular()); const Fractional multiplier = -storage_->ColumnScalarProduct(v_index_, Transpose(*x)) / mu_; storage_->ColumnAddMultipleToDenseColumn(u_index_, multiplier, x); } - void RightSolveWithNonZeros(ScatteredColumn *x) const { + void RightSolveWithNonZeros(ScatteredColumn* x) const { DCHECK(!IsSingular()); const Fractional multiplier = -storage_->ColumnScalarProduct(v_index_, Transpose(x->values)) / mu_; @@ -76,35 +76,35 @@ class RankOneUpdateElementaryMatrix { // Solves y.T = rhs with rhs initialy in y (a row vector). // The non-zeros version keeps track of the new non-zeros. - void LeftSolve(DenseRow *y) const { + void LeftSolve(DenseRow* y) const { DCHECK(!IsSingular()); const Fractional multiplier = -storage_->ColumnScalarProduct(u_index_, *y) / mu_; - storage_->ColumnAddMultipleToDenseColumn( - v_index_, multiplier, reinterpret_cast(y)); + storage_->ColumnAddMultipleToDenseColumn(v_index_, multiplier, + reinterpret_cast(y)); } - void LeftSolveWithNonZeros(ScatteredRow *y) const { + void LeftSolveWithNonZeros(ScatteredRow* y) const { DCHECK(!IsSingular()); const Fractional multiplier = -storage_->ColumnScalarProduct(u_index_, y->values) / mu_; if (multiplier != 0.0) { storage_->ColumnAddMultipleToSparseScatteredColumn( - v_index_, multiplier, reinterpret_cast(y)); + v_index_, multiplier, reinterpret_cast(y)); } } // Computes T.x for a given column vector. - void RightMultiply(DenseColumn *x) const { + void RightMultiply(DenseColumn* x) const { const Fractional multiplier = storage_->ColumnScalarProduct(v_index_, Transpose(*x)); storage_->ColumnAddMultipleToDenseColumn(u_index_, multiplier, x); } // Computes y.T for a given row vector. - void LeftMultiply(DenseRow *y) const { + void LeftMultiply(DenseRow* y) const { const Fractional multiplier = storage_->ColumnScalarProduct(u_index_, *y); - storage_->ColumnAddMultipleToDenseColumn( - v_index_, multiplier, reinterpret_cast(y)); + storage_->ColumnAddMultipleToDenseColumn(v_index_, multiplier, + reinterpret_cast(y)); } EntryIndex num_entries() const { @@ -122,7 +122,7 @@ class RankOneUpdateElementaryMatrix { // Note that we allow copy and assignment so we can store a // RankOneUpdateElementaryMatrix in an STL container. - const CompactSparseMatrix *storage_; + const CompactSparseMatrix* storage_; ColIndex u_index_; ColIndex v_index_; Fractional mu_; @@ -146,13 +146,13 @@ class RankOneUpdateFactorization { } // Updates the factorization. - void Update(const RankOneUpdateElementaryMatrix &update_matrix) { + void Update(const RankOneUpdateElementaryMatrix& update_matrix) { elementary_matrices_.push_back(update_matrix); num_entries_ += update_matrix.num_entries(); } // Left-solves all systems from right to left, i.e. y_i = y_{i+1}.(T_i)^{-1} - void LeftSolve(DenseRow *y) const { + void LeftSolve(DenseRow* y) const { RETURN_IF_NULL(y); for (int i = elementary_matrices_.size() - 1; i >= 0; --i) { elementary_matrices_[i].LeftSolve(y); @@ -161,7 +161,7 @@ class RankOneUpdateFactorization { // Same as LeftSolve(), but if the given non_zeros are not empty, then all // the new non-zeros in the result are appended to it. - void LeftSolveWithNonZeros(ScatteredRow *y) const { + void LeftSolveWithNonZeros(ScatteredRow* y) const { RETURN_IF_NULL(y); if (y->non_zeros.empty()) { LeftSolve(&y->values); @@ -185,7 +185,7 @@ class RankOneUpdateFactorization { } // Right-solves all systems from left to right, i.e. T_i.d_{i+1} = d_i - void RightSolve(DenseColumn *d) const { + void RightSolve(DenseColumn* d) const { RETURN_IF_NULL(d); const size_t end = elementary_matrices_.size(); for (int i = 0; i < end; ++i) { @@ -195,7 +195,7 @@ class RankOneUpdateFactorization { // Same as RightSolve(), but if the given non_zeros are not empty, then all // the new non-zeros in the result are appended to it. - void RightSolveWithNonZeros(ScatteredColumn *d) const { + void RightSolveWithNonZeros(ScatteredColumn* d) const { RETURN_IF_NULL(d); if (d->non_zeros.empty()) { RightSolve(&d->values); diff --git a/ortools/glop/reduced_costs.cc b/ortools/glop/reduced_costs.cc index 358e2199c7..e163d3673c 100644 --- a/ortools/glop/reduced_costs.cc +++ b/ortools/glop/reduced_costs.cc @@ -24,12 +24,12 @@ namespace operations_research { namespace glop { -ReducedCosts::ReducedCosts(const CompactSparseMatrix &matrix, - const DenseRow &objective, - const RowToColMapping &basis, - const VariablesInfo &variables_info, - const BasisFactorization &basis_factorization, - random_engine_t *random) +ReducedCosts::ReducedCosts(const CompactSparseMatrix& matrix, + const DenseRow& objective, + const RowToColMapping& basis, + const VariablesInfo& variables_info, + const BasisFactorization& basis_factorization, + random_engine_t* random) : matrix_(matrix), objective_(objective), basis_(basis), @@ -56,8 +56,8 @@ bool ReducedCosts::NeedsBasisRefactorization() const { } bool ReducedCosts::TestEnteringReducedCostPrecision( - ColIndex entering_col, const ScatteredColumn &direction, - Fractional *reduced_cost) { + ColIndex entering_col, const ScatteredColumn& direction, + Fractional* reduced_cost) { SCOPED_TIME_STAT(&stats_); if (recompute_basic_objective_) { ComputeBasicObjective(); @@ -143,8 +143,8 @@ Fractional ReducedCosts::ComputeMaximumDualInfeasibility() const { DCHECK(!recompute_reduced_costs_); if (recompute_reduced_costs_) return 0.0; Fractional maximum_dual_infeasibility = 0.0; - const DenseBitRow &can_decrease = variables_info_.GetCanDecreaseBitRow(); - const DenseBitRow &can_increase = variables_info_.GetCanIncreaseBitRow(); + const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow(); + const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow(); for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) { const Fractional rc = reduced_costs_[col]; if ((can_increase.IsSet(col) && rc < 0.0) || @@ -161,8 +161,8 @@ Fractional ReducedCosts::ComputeSumOfDualInfeasibilities() const { DCHECK(!recompute_reduced_costs_); if (recompute_reduced_costs_) return 0.0; Fractional dual_infeasibility_sum = 0.0; - const DenseBitRow &can_decrease = variables_info_.GetCanDecreaseBitRow(); - const DenseBitRow &can_increase = variables_info_.GetCanIncreaseBitRow(); + const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow(); + const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow(); for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) { const Fractional rc = reduced_costs_[col]; if ((can_increase.IsSet(col) && rc < 0.0) || @@ -175,8 +175,8 @@ Fractional ReducedCosts::ComputeSumOfDualInfeasibilities() const { void ReducedCosts::UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, - const ScatteredColumn &direction, - UpdateRow *update_row) { + const ScatteredColumn& direction, + UpdateRow* update_row) { SCOPED_TIME_STAT(&stats_); const ColIndex leaving_col = basis_[leaving_row]; DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col)); @@ -204,14 +204,14 @@ void ReducedCosts::SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col) { } void ReducedCosts::SetNonBasicVariableCostToZero(ColIndex col, - Fractional *current_cost) { + Fractional* current_cost) { DCHECK_NE(variables_info_.GetStatusRow()[col], VariableStatus::BASIC); DCHECK_EQ(current_cost, &objective_[col]); reduced_costs_[col] -= objective_[col]; *current_cost = 0.0; } -void ReducedCosts::SetParameters(const GlopParameters ¶meters) { +void ReducedCosts::SetParameters(const GlopParameters& parameters) { parameters_ = parameters; } @@ -315,13 +315,13 @@ void ReducedCosts::MaintainDualInfeasiblePositions(bool maintain) { } } -const DenseRow &ReducedCosts::GetReducedCosts() { +const DenseRow& ReducedCosts::GetReducedCosts() { SCOPED_TIME_STAT(&stats_); RecomputeReducedCostsAndPrimalEnteringCandidatesIfNeeded(); return reduced_costs_; } -const DenseColumn &ReducedCosts::GetDualValues() { +const DenseColumn& ReducedCosts::GetDualValues() { SCOPED_TIME_STAT(&stats_); ComputeBasicObjectiveLeftInverse(); return Transpose(basic_objective_left_inverse_.values); @@ -362,7 +362,7 @@ void ReducedCosts::ComputeReducedCosts() { const ColIndex num_cols = matrix_.num_cols(); reduced_costs_.resize(num_cols, 0.0); - const DenseBitRow &is_basic = variables_info_.GetIsBasicBitRow(); + const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow(); #ifdef OMP const int num_omp_threads = parameters_.num_omp_threads(); #else @@ -446,7 +446,7 @@ void ReducedCosts::ComputeBasicObjectiveLeftInverse() { void ReducedCosts::UpdateReducedCosts(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, Fractional pivot, - UpdateRow *update_row) { + UpdateRow* update_row) { DCHECK_NE(entering_col, leaving_col); DCHECK_NE(pivot, 0.0); if (recompute_reduced_costs_) return; @@ -489,7 +489,7 @@ void ReducedCosts::UpdateReducedCosts(ColIndex entering_col, // Always update the slack variable position so we have the dual values and // we can use them in ComputeCurrentDualResidualError(). - const ScatteredRow &unit_row_left_inverse = + const ScatteredRow& unit_row_left_inverse = update_row->GetUnitRowLeftInverse(); if (unit_row_left_inverse.non_zeros.empty()) { const ColIndex num_cols = unit_row_left_inverse.values.size(); @@ -515,8 +515,8 @@ void ReducedCosts::UpdateReducedCosts(ColIndex entering_col, bool ReducedCosts::IsValidPrimalEnteringCandidate(ColIndex col) const { const Fractional reduced_cost = reduced_costs_[col]; - const DenseBitRow &can_decrease = variables_info_.GetCanDecreaseBitRow(); - const DenseBitRow &can_increase = variables_info_.GetCanIncreaseBitRow(); + const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow(); + const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow(); const Fractional tolerance = dual_feasibility_tolerance_; return (can_increase.IsSet(col) && (reduced_cost < -tolerance)) || (can_decrease.IsSet(col) && (reduced_cost > tolerance)); @@ -541,11 +541,11 @@ void ReducedCosts::ResetDualInfeasibilityBitSet() { // converted to an int. It also uses an XOR (which appears to be faster) since // the two conditions on the reduced cost are exclusive. template -void ReducedCosts::UpdateEnteringCandidates(const ColumnsToUpdate &cols) { +void ReducedCosts::UpdateEnteringCandidates(const ColumnsToUpdate& cols) { SCOPED_TIME_STAT(&stats_); const Fractional tolerance = dual_feasibility_tolerance_; - const DenseBitRow &can_decrease = variables_info_.GetCanDecreaseBitRow(); - const DenseBitRow &can_increase = variables_info_.GetCanIncreaseBitRow(); + const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow(); + const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow(); for (const ColIndex col : cols) { const Fractional reduced_cost = reduced_costs_[col]; is_dual_infeasible_.SetBitFromOtherBitSets( diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index f8df0ebdda..b56efb49f9 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -121,7 +121,7 @@ void RevisedSimplex::ClearStateForNextSolve() { solution_state_.statuses.clear(); } -void RevisedSimplex::LoadStateForNextSolve(const BasisState &state) { +void RevisedSimplex::LoadStateForNextSolve(const BasisState& state) { SCOPED_TIME_STAT(&function_stats_); solution_state_ = state; solution_state_has_been_set_externally_ = true; @@ -131,7 +131,7 @@ void RevisedSimplex::NotifyThatMatrixIsUnchangedForNextSolve() { notify_that_matrix_is_unchanged_ = true; } -Status RevisedSimplex::Solve(const LinearProgram &lp, TimeLimit *time_limit) { +Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) { SCOPED_TIME_STAT(&function_stats_); DCHECK(lp.IsCleanedUp()); GLOP_RETURN_ERROR_IF_NULL(time_limit); @@ -435,7 +435,7 @@ Fractional RevisedSimplex::GetReducedCost(ColIndex col) const { return solution_reduced_costs_[col]; } -const DenseRow &RevisedSimplex::GetReducedCosts() const { +const DenseRow& RevisedSimplex::GetReducedCosts() const { return solution_reduced_costs_; } @@ -447,7 +447,7 @@ VariableStatus RevisedSimplex::GetVariableStatus(ColIndex col) const { return variables_info_.GetStatusRow()[col]; } -const BasisState &RevisedSimplex::GetState() const { return solution_state_; } +const BasisState& RevisedSimplex::GetState() const { return solution_state_; } Fractional RevisedSimplex::GetConstraintActivity(RowIndex row) const { // Note the negative sign since the slack variable is such that @@ -468,23 +468,23 @@ ConstraintStatus RevisedSimplex::GetConstraintStatus(RowIndex row) const { return VariableToConstraintStatus(s); } -const DenseRow &RevisedSimplex::GetPrimalRay() const { +const DenseRow& RevisedSimplex::GetPrimalRay() const { DCHECK_EQ(problem_status_, ProblemStatus::PRIMAL_UNBOUNDED); return solution_primal_ray_; } -const DenseColumn &RevisedSimplex::GetDualRay() const { +const DenseColumn& RevisedSimplex::GetDualRay() const { DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED); return solution_dual_ray_; } -const DenseRow &RevisedSimplex::GetDualRayRowCombination() const { +const DenseRow& RevisedSimplex::GetDualRayRowCombination() const { DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED); return solution_dual_ray_row_combination_; } ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; } -const BasisFactorization &RevisedSimplex::GetBasisFactorization() const { +const BasisFactorization& RevisedSimplex::GetBasisFactorization() const { DCHECK(basis_factorization_.GetColumnPermutation().empty()); return basis_factorization_; } @@ -550,8 +550,8 @@ void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue( } bool RevisedSimplex::BasisIsConsistent() const { - const DenseBitRow &is_basic = variables_info_.GetIsBasicBitRow(); - const VariableStatusRow &variable_statuses = variables_info_.GetStatusRow(); + const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow(); + const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow(); for (RowIndex row(0); row < num_rows_; ++row) { const ColIndex col = basis_[row]; if (!is_basic.IsSet(col)) return false; @@ -607,13 +607,13 @@ namespace { // Comparator used to sort column indices according to a given value vector. class ColumnComparator { public: - explicit ColumnComparator(const DenseRow &value) : value_(value) {} + explicit ColumnComparator(const DenseRow& value) : value_(value) {} bool operator()(ColIndex col_a, ColIndex col_b) const { return value_[col_a] < value_[col_b]; } private: - const DenseRow &value_; + const DenseRow& value_; }; } // namespace @@ -626,7 +626,7 @@ class ColumnComparator { // The error_ must be equal to the constraint activity for the current variable // values before this function is called. If error_[row] is 0.0, that mean this // constraint is currently feasible. -void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping *basis) { +void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) { SCOPED_TIME_STAT(&function_stats_); // Computes the singleton columns and the cost variation of the corresponding // variables (in the only possible direction, i.e away from its current bound) @@ -663,7 +663,7 @@ void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping *basis) { // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to // them) and that the SingletonColumnSignPreprocessor makes them all positive. // However, this code works for any coefficient value. - const DenseRow &variable_values = variable_values_.GetDenseRow(); + const DenseRow& variable_values = variable_values_.GetDenseRow(); for (const ColIndex col : singleton_column) { const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0)); @@ -717,8 +717,8 @@ void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping *basis) { } bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged( - const LinearProgram &lp, bool *only_change_is_new_rows, - bool *only_change_is_new_cols, ColIndex *num_new_cols) { + const LinearProgram& lp, bool* only_change_is_new_rows, + bool* only_change_is_new_cols, ColIndex* num_new_cols) { SCOPED_TIME_STAT(&function_stats_); DCHECK(only_change_is_new_rows != nullptr); DCHECK(only_change_is_new_cols != nullptr); @@ -775,7 +775,7 @@ bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged( } bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero( - const LinearProgram &lp, ColIndex num_new_cols) { + const LinearProgram& lp, ColIndex num_new_cols) { SCOPED_TIME_STAT(&function_stats_); DCHECK_EQ(lp.num_variables(), num_cols_); DCHECK_LE(num_new_cols, first_slack_col_); @@ -806,7 +806,7 @@ bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero( } bool RevisedSimplex::InitializeBoundsAndTestIfUnchanged( - const LinearProgram &lp) { + const LinearProgram& lp) { SCOPED_TIME_STAT(&function_stats_); lower_bound_.resize(num_cols_, 0.0); upper_bound_.resize(num_cols_, 0.0); @@ -830,7 +830,7 @@ bool RevisedSimplex::InitializeBoundsAndTestIfUnchanged( } bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( - const LinearProgram &lp) { + const LinearProgram& lp) { SCOPED_TIME_STAT(&function_stats_); bool objective_is_unchanged = true; @@ -863,7 +863,7 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged( return objective_is_unchanged; } -void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram &lp) { +void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) { objective_limit_reached_ = false; DCHECK(std::isfinite(objective_offset_)); DCHECK(std::isfinite(objective_scaling_factor_)); @@ -904,7 +904,7 @@ void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram &lp) { } void RevisedSimplex::InitializeVariableStatusesForWarmStart( - const BasisState &state, ColIndex num_new_cols) { + const BasisState& state, ColIndex num_new_cols) { variables_info_.InitializeAndComputeType(); RowIndex num_basic_variables(0); DCHECK_LE(num_new_cols, first_slack_col_); @@ -1108,7 +1108,7 @@ Status RevisedSimplex::CreateInitialBasis() { return InitializeFirstBasis(basis); } -Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping &basis) { +Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) { basis_ = basis; // For each row which does not have a basic column, assign it to the @@ -1152,15 +1152,14 @@ Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping &basis) { const Fractional tolerance = parameters_.primal_feasibility_tolerance(); if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) { VLOG(1) << absl::StrCat( - "The primal residual of the initial basis is " - "above the tolerance, ", + "The primal residual of the initial basis is above the tolerance, ", variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance); } } return Status::OK(); } -Status RevisedSimplex::Initialize(const LinearProgram &lp) { +Status RevisedSimplex::Initialize(const LinearProgram& lp) { parameters_ = initial_parameters_; PropagateParameters(); @@ -1237,7 +1236,7 @@ Status RevisedSimplex::Initialize(const LinearProgram &lp) { } else if (only_change_is_new_cols && only_new_bounds) { InitializeVariableStatusesForWarmStart(solution_state_, num_new_cols); const ColIndex first_new_col(first_slack_col_ - num_new_cols); - for (ColIndex &col_ref : basis_) { + for (ColIndex& col_ref : basis_) { if (col_ref >= first_new_col) { col_ref += num_new_cols; } @@ -1338,8 +1337,8 @@ void RevisedSimplex::DisplayBasicVariableStatistics() { int num_slack_variables = 0; int num_infeasible_variables = 0; - const DenseRow &variable_values = variable_values_.GetDenseRow(); - const VariableTypeRow &variable_types = variables_info_.GetTypeRow(); + const DenseRow& variable_values = variable_values_.GetDenseRow(); + const VariableTypeRow& variable_types = variables_info_.GetTypeRow(); const Fractional tolerance = parameters_.primal_feasibility_tolerance(); for (RowIndex row(0); row < num_rows_; ++row) { const ColIndex col = basis_[row]; @@ -1449,7 +1448,7 @@ void RevisedSimplex::CorrectErrorsOnVariableValues() { void RevisedSimplex::ComputeVariableValuesError() { SCOPED_TIME_STAT(&function_stats_); error_.AssignToZero(num_rows_); - const DenseRow &variable_values = variable_values_.GetDenseRow(); + const DenseRow& variable_values = variable_values_.GetDenseRow(); for (ColIndex col(0); col < num_cols_; ++col) { const Fractional value = variable_values[col]; compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_); @@ -1517,7 +1516,7 @@ Fractional RevisedSimplex::GetRatio(RowIndex row) const { template Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates( - Fractional bound_flip_ratio, SparseColumn *leaving_candidates) const { + Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const { SCOPED_TIME_STAT(&function_stats_); const Fractional harris_tolerance = parameters_.harris_tolerance_ratio() * @@ -1593,8 +1592,8 @@ bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) { // Ratio-test or Quotient-test. Choose the row of the leaving variable. // Known as CHUZR or CHUZRO in FORTRAN codes. Status RevisedSimplex::ChooseLeavingVariableRow( - ColIndex entering_col, Fractional reduced_cost, bool *refactorize, - RowIndex *leaving_row, Fractional *step_length, Fractional *target_bound) { + ColIndex entering_col, Fractional reduced_cost, bool* refactorize, + RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) { SCOPED_TIME_STAT(&function_stats_); GLOP_RETURN_ERROR_IF_NULL(refactorize); GLOP_RETURN_ERROR_IF_NULL(leaving_row); @@ -1782,7 +1781,7 @@ struct BreakPoint { // We want to process the breakpoints by increasing ratio and decreasing // coefficient magnitude (if the ratios are the same). Returns false if "this" // is before "other" in a priority queue. - bool operator<(const BreakPoint &other) const { + bool operator<(const BreakPoint& other) const { if (ratio == other.ratio) { if (coeff_magnitude == other.coeff_magnitude) { return row > other.row; @@ -1801,9 +1800,9 @@ struct BreakPoint { } // namespace void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow( - ColIndex entering_col, Fractional reduced_cost, bool *refactorize, - RowIndex *leaving_row, Fractional *step_length, - Fractional *target_bound) const { + ColIndex entering_col, Fractional reduced_cost, bool* refactorize, + RowIndex* leaving_row, Fractional* step_length, + Fractional* target_bound) const { SCOPED_TIME_STAT(&function_stats_); RETURN_IF_NULL(refactorize); RETURN_IF_NULL(leaving_row); @@ -1907,20 +1906,20 @@ void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow( } // This implements the pricing step for the dual simplex. -Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex *leaving_row, - Fractional *cost_variation, - Fractional *target_bound) { +Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row, + Fractional* cost_variation, + Fractional* target_bound) { GLOP_RETURN_ERROR_IF_NULL(leaving_row); GLOP_RETURN_ERROR_IF_NULL(cost_variation); // TODO(user): Reuse parameters_.optimization_rule() to decide if we use // steepest edge or the normal Dantzig pricing. - const DenseColumn &squared_norm = dual_edge_norms_.GetEdgeSquaredNorms(); + const DenseColumn& squared_norm = dual_edge_norms_.GetEdgeSquaredNorms(); SCOPED_TIME_STAT(&function_stats_); *leaving_row = kInvalidRow; Fractional best_price(0.0); - const DenseColumn &squared_infeasibilities = + const DenseColumn& squared_infeasibilities = variable_values_.GetPrimalSquaredInfeasibilities(); equivalent_leaving_choices_.clear(); for (const RowIndex row : variable_values_.GetPrimalInfeasiblePositions()) { @@ -1981,7 +1980,7 @@ bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type, void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row, ColIndex entering_col) { SCOPED_TIME_STAT(&function_stats_); - const VariableTypeRow &variable_type = variables_info_.GetTypeRow(); + const VariableTypeRow& variable_type = variables_info_.GetTypeRow(); const Fractional threshold = parameters_.ratio_test_zero_threshold(); // Convert the dual_pricing_vector_ from the old basis into the new one (which @@ -2019,20 +2018,19 @@ void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row, template void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange( - const Cols &cols) { + const Cols& cols) { SCOPED_TIME_STAT(&function_stats_); bool something_to_do = false; - const DenseBitRow &can_decrease = variables_info_.GetCanDecreaseBitRow(); - const DenseBitRow &can_increase = variables_info_.GetCanIncreaseBitRow(); - const DenseRow &reduced_costs = reduced_costs_.GetReducedCosts(); + const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow(); + const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow(); + const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts(); const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance(); for (ColIndex col : cols) { const Fractional reduced_cost = reduced_costs[col]; const Fractional sign = - (can_increase.IsSet(col) && reduced_cost < -tolerance) - ? 1.0 - : (can_decrease.IsSet(col) && reduced_cost > tolerance) ? -1.0 - : 0.0; + (can_increase.IsSet(col) && reduced_cost < -tolerance) ? 1.0 + : (can_decrease.IsSet(col) && reduced_cost > tolerance) ? -1.0 + : 0.0; if (sign != dual_infeasibility_improvement_direction_[col]) { if (sign == 0.0) { --num_dual_infeasible_positions_; @@ -2055,7 +2053,7 @@ void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange( initially_all_zero_scratchpad_.ClearNonZerosIfTooDense(); initially_all_zero_scratchpad_.ClearSparseMask(); - const VariableTypeRow &variable_type = variables_info_.GetTypeRow(); + const VariableTypeRow& variable_type = variables_info_.GetTypeRow(); const Fractional threshold = parameters_.ratio_test_zero_threshold(); basis_factorization_.RightSolve(&initially_all_zero_scratchpad_); if (initially_all_zero_scratchpad_.non_zeros.empty()) { @@ -2083,8 +2081,8 @@ void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange( } Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow( - RowIndex *leaving_row, Fractional *cost_variation, - Fractional *target_bound) { + RowIndex* leaving_row, Fractional* cost_variation, + Fractional* target_bound) { SCOPED_TIME_STAT(&function_stats_); GLOP_RETURN_ERROR_IF_NULL(leaving_row); GLOP_RETURN_ERROR_IF_NULL(cost_variation); @@ -2120,7 +2118,7 @@ Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow( // TODO(user): Reuse parameters_.optimization_rule() to decide if we use // steepest edge or the normal Dantzig pricing. - const DenseColumn &squared_norm = dual_edge_norms_.GetEdgeSquaredNorms(); + const DenseColumn& squared_norm = dual_edge_norms_.GetEdgeSquaredNorms(); // Now take a leaving variable that maximizes the infeasibility variation and // can leave the basis while being dual-feasible. @@ -2165,7 +2163,7 @@ Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow( template void RevisedSimplex::MakeBoxedVariableDualFeasible( - const BoxedVariableCols &cols, bool update_basic_values) { + const BoxedVariableCols& cols, bool update_basic_values) { SCOPED_TIME_STAT(&function_stats_); std::vector changed_cols; @@ -2173,11 +2171,11 @@ void RevisedSimplex::MakeBoxedVariableDualFeasible( // errors. Otherwise, this leads to cycling on many of the Netlib problems // since this is called at each iteration (because of the bound-flipping ratio // test). - const DenseRow &variable_values = variable_values_.GetDenseRow(); - const DenseRow &reduced_costs = reduced_costs_.GetReducedCosts(); + const DenseRow& variable_values = variable_values_.GetDenseRow(); + const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts(); const Fractional dual_feasibility_tolerance = reduced_costs_.GetDualFeasibilityTolerance(); - const VariableStatusRow &variable_status = variables_info_.GetStatusRow(); + const VariableStatusRow& variable_status = variables_info_.GetStatusRow(); for (const ColIndex col : cols) { const Fractional reduced_cost = reduced_costs[col]; const VariableStatus status = variable_status[col]; @@ -2245,7 +2243,7 @@ void RevisedSimplex::PermuteBasis() { // Fetch the current basis column permutation and return if it is empty which // means the permutation is the identity. - const ColumnPermutation &col_perm = + const ColumnPermutation& col_perm = basis_factorization_.GetColumnPermutation(); if (col_perm.empty()) return; @@ -2278,9 +2276,9 @@ Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col, const VariableStatus leaving_variable_status = lower_bound_[leaving_col] == upper_bound_[leaving_col] ? VariableStatus::FIXED_VALUE - : target_bound == lower_bound_[leaving_col] - ? VariableStatus::AT_LOWER_BOUND - : VariableStatus::AT_UPPER_BOUND; + : target_bound == lower_bound_[leaving_col] + ? VariableStatus::AT_LOWER_BOUND + : VariableStatus::AT_UPPER_BOUND; if (variable_values_.Get(leaving_col) != target_bound) { ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) - target_bound); @@ -2326,7 +2324,7 @@ bool RevisedSimplex::NeedsBasisRefactorization(bool refactorize) { return refactorize; } -Status RevisedSimplex::RefactorizeBasisIfNeeded(bool *refactorize) { +Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) { SCOPED_TIME_STAT(&function_stats_); if (NeedsBasisRefactorization(*refactorize)) { GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize()); @@ -2352,7 +2350,7 @@ Status RevisedSimplex::RefactorizeBasisIfNeeded(bool *refactorize) { // enter the basis, and a variable from x_B is selected to leave the basis. // To avoid explicit inversion of B, the algorithm solves two sub-systems: // y.B = c_B and B.d = a (a being the entering column). -Status RevisedSimplex::Minimize(TimeLimit *time_limit) { +Status RevisedSimplex::Minimize(TimeLimit* time_limit) { GLOP_RETURN_ERROR_IF_NULL(time_limit); Cleanup update_deterministic_time_on_return( [this, time_limit]() { AdvanceDeterministicTime(time_limit); }); @@ -2617,7 +2615,7 @@ Status RevisedSimplex::Minimize(TimeLimit *time_limit) { // it fails to make progress. // // Note that the returned status applies to the primal problem! -Status RevisedSimplex::DualMinimize(TimeLimit *time_limit) { +Status RevisedSimplex::DualMinimize(TimeLimit* time_limit) { Cleanup update_deterministic_time_on_return( [this, time_limit]() { AdvanceDeterministicTime(time_limit); }); num_consecutive_degenerate_iterations_ = 0; @@ -2918,7 +2916,7 @@ Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const { return objective_scaling_factor_ * (sum + objective_offset_); } -void RevisedSimplex::SetParameters(const GlopParameters ¶meters) { +void RevisedSimplex::SetParameters(const GlopParameters& parameters) { SCOPED_TIME_STAT(&function_stats_); random_.seed(parameters.random_seed()); initial_parameters_ = parameters; @@ -2973,7 +2971,7 @@ void RevisedSimplex::DisplayErrors() const { namespace { std::string StringifyMonomialWithFlags(const Fractional a, - const std::string &x) { + const std::string& x) { return StringifyMonomial( a, x, absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions)); } @@ -3020,7 +3018,7 @@ void RevisedSimplex::DisplayInfoOnVariables() const { void RevisedSimplex::DisplayVariableBounds() { if (VLOG_IS_ON(3)) { - const VariableTypeRow &variable_type = variables_info_.GetTypeRow(); + const VariableTypeRow& variable_type = variables_info_.GetTypeRow(); for (ColIndex col(0); col < num_cols_; ++col) { switch (variable_type[col]) { case VariableType::UNCONSTRAINED: @@ -3051,7 +3049,7 @@ void RevisedSimplex::DisplayVariableBounds() { } gtl::ITIVector RevisedSimplex::ComputeDictionary( - const DenseRow *column_scales) { + const DenseRow* column_scales) { gtl::ITIVector dictionary(num_rows_.value()); for (ColIndex col(0); col < num_cols_; ++col) { ComputeDirection(col); @@ -3073,7 +3071,7 @@ gtl::ITIVector RevisedSimplex::ComputeDictionary( } void RevisedSimplex::ComputeBasicVariablesForState( - const LinearProgram &linear_program, const BasisState &state) { + const LinearProgram& linear_program, const BasisState& state) { LoadStateForNextSolve(state); Status status = Initialize(linear_program); if (status.ok()) { @@ -3089,7 +3087,7 @@ void RevisedSimplex::DisplayRevisedSimplexDebugInfo() { DisplayInfoOnVariables(); std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue()); - const DenseRow &reduced_costs = reduced_costs_.GetReducedCosts(); + const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts(); for (const ColIndex col : variables_info_.GetNotBasicBitRow()) { absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col], variable_name_[col])); @@ -3098,7 +3096,7 @@ void RevisedSimplex::DisplayRevisedSimplexDebugInfo() { const RevisedSimplexDictionary dictionary(nullptr, this); RowIndex r(0); - for (const SparseRow &row : dictionary) { + for (const SparseRow& row : dictionary) { output.clear(); ColIndex basic_col = basis_[r]; absl::StrAppend(&output, variable_name_[basic_col], " = ", @@ -3149,7 +3147,7 @@ void RevisedSimplex::DisplayProblem() const { } } -void RevisedSimplex::AdvanceDeterministicTime(TimeLimit *time_limit) { +void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) { DCHECK(time_limit != nullptr); const double current_deterministic_time = DeterministicTime(); const double deterministic_time_delta = diff --git a/ortools/glop/revised_simplex.h b/ortools/glop/revised_simplex.h index 905e427d43..9546c86fad 100644 --- a/ortools/glop/revised_simplex.h +++ b/ortools/glop/revised_simplex.h @@ -52,8 +52,7 @@ // The following are very good references for terminology, data structures, // and algorithms. They all contain a wealth of references. // -// Vasek Chvátal, "Linear Programming," W.H. Freeman, 1983. ISBN -// 978-0716715870. +// Vasek Chvátal, "Linear Programming," W.H. Freeman, 1983. ISBN 978-0716715870. // http://www.amazon.com/dp/0716715872 // // Robert J. Vanderbei, "Linear Programming: Foundations and Extensions," @@ -150,8 +149,8 @@ class RevisedSimplex { RevisedSimplex(); // Sets or gets the algorithm parameters to be used on the next Solve(). - void SetParameters(const GlopParameters ¶meters); - const GlopParameters &GetParameters() const { return parameters_; } + void SetParameters(const GlopParameters& parameters); + const GlopParameters& GetParameters() const { return parameters_; } // Solves the given linear program. // @@ -166,15 +165,15 @@ class RevisedSimplex { // and try to use the previously computed solution as a warm-start. To disable // this behavior or give explicit warm-start data, use one of the State*() // functions below. - ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, - TimeLimit *time_limit); + ABSL_MUST_USE_RESULT Status Solve(const LinearProgram& lp, + TimeLimit* time_limit); // Do not use the current solution as a warm-start for the next Solve(). The // next Solve() will behave as if the class just got created. void ClearStateForNextSolve(); // Uses the given state as a warm-start for the next Solve() call. - void LoadStateForNextSolve(const BasisState &state); + void LoadStateForNextSolve(const BasisState& state); // Advanced usage. Tells the next Solve() that the matrix inside the linear // program will not change compared to the one used the last time Solve() was @@ -191,12 +190,12 @@ class RevisedSimplex { int64 GetNumberOfIterations() const; Fractional GetVariableValue(ColIndex col) const; Fractional GetReducedCost(ColIndex col) const; - const DenseRow &GetReducedCosts() const; + const DenseRow& GetReducedCosts() const; Fractional GetDualValue(RowIndex row) const; Fractional GetConstraintActivity(RowIndex row) const; VariableStatus GetVariableStatus(ColIndex col) const; ConstraintStatus GetConstraintStatus(RowIndex row) const; - const BasisState &GetState() const; + const BasisState& GetState() const; double DeterministicTime() const; bool objective_limit_reached() const { return objective_limit_reached_; } @@ -209,11 +208,11 @@ class RevisedSimplex { // // Note that when the problem is DUAL_UNBOUNDED, the dual ray is also known as // the Farkas proof of infeasibility of the problem. - const DenseRow &GetPrimalRay() const; - const DenseColumn &GetDualRay() const; + const DenseRow& GetPrimalRay() const; + const DenseColumn& GetDualRay() const; // This is the "dual ray" linear combination of the matrix rows. - const DenseRow &GetDualRayRowCombination() const; + const DenseRow& GetDualRayRowCombination() const; // Returns the index of the column in the basis and the basis factorization. // Note that the order of the column in the basis is important since it is the @@ -221,7 +220,7 @@ class RevisedSimplex { // class. ColIndex GetBasis(RowIndex row) const; - const ScatteredRow &GetUnitRowLeftInverse(RowIndex row) { + const ScatteredRow& GetUnitRowLeftInverse(RowIndex row) { return update_row_.ComputeAndGetUnitRowLeftInverse(row); } @@ -229,7 +228,7 @@ class RevisedSimplex { // have the correspondence between rows and columns of the dictionary. RowToColMapping GetBasisVector() const { return basis_; } - const BasisFactorization &GetBasisFactorization() const; + const BasisFactorization& GetBasisFactorization() const; // Returns statistics about this class as a string. std::string StatString(); @@ -238,12 +237,12 @@ class RevisedSimplex { // matrix as a vector of sparse rows so that it is easy to use it on the left // side in the matrix multiplication. Runs in O(num_non_zeros_in_matrix). // TODO(user): Use row scales as well. - RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales); + RowMajorSparseMatrix ComputeDictionary(const DenseRow* column_scales); // Initializes the matrix for the given 'linear_program' and 'state' and // computes the variable values for basic variables using non-basic variables. - void ComputeBasicVariablesForState(const LinearProgram &linear_program, - const BasisState &state); + void ComputeBasicVariablesForState(const LinearProgram& linear_program, + const BasisState& state); private: // Propagates parameters_ to all the other classes that need it. @@ -344,27 +343,27 @@ class RevisedSimplex { // only_change_is_new_cols to true if the only difference is that new columns // have been added, in which case also sets num_new_cols to the number of // new columns. - bool InitializeMatrixAndTestIfUnchanged(const LinearProgram &lp, - bool *only_change_is_new_rows, - bool *only_change_is_new_cols, - ColIndex *num_new_cols); + bool InitializeMatrixAndTestIfUnchanged(const LinearProgram& lp, + bool* only_change_is_new_rows, + bool* only_change_is_new_cols, + ColIndex* num_new_cols); // Initializes bound-related internal data. Returns true if unchanged. - bool InitializeBoundsAndTestIfUnchanged(const LinearProgram &lp); + bool InitializeBoundsAndTestIfUnchanged(const LinearProgram& lp); // Checks if the only change to the bounds is the addition of new columns, // and that the new columns have at least one bound equal to zero. bool OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero( - const LinearProgram &lp, ColIndex num_new_cols); + const LinearProgram& lp, ColIndex num_new_cols); // Initializes objective-related internal data. Returns true if unchanged. - bool InitializeObjectiveAndTestIfUnchanged(const LinearProgram &lp); + bool InitializeObjectiveAndTestIfUnchanged(const LinearProgram& lp); // Computes the stopping criterion on the problem objective value. - void InitializeObjectiveLimit(const LinearProgram &lp); + void InitializeObjectiveLimit(const LinearProgram& lp); // Initializes the variable statuses using a warm-start basis. - void InitializeVariableStatusesForWarmStart(const BasisState &state, + void InitializeVariableStatusesForWarmStart(const BasisState& state, ColIndex num_new_cols); // Initializes the starting basis. In most cases it starts by the all slack @@ -374,10 +373,10 @@ class RevisedSimplex { // Sets the initial basis to the given columns, try to factorize it and // recompute the basic variable values. ABSL_MUST_USE_RESULT Status - InitializeFirstBasis(const RowToColMapping &initial_basis); + InitializeFirstBasis(const RowToColMapping& initial_basis); // Entry point for the solver initialization. - ABSL_MUST_USE_RESULT Status Initialize(const LinearProgram &lp); + ABSL_MUST_USE_RESULT Status Initialize(const LinearProgram& lp); // Saves the current variable statuses in solution_state_. void SaveState(); @@ -388,7 +387,7 @@ class RevisedSimplex { // Tries to reduce the initial infeasibility (stored in error_) by using the // singleton columns present in the problem. A singleton column is a column // with only one non-zero. This is used by CreateInitialBasis(). - void UseSingletonColumnInInitialBasis(RowToColMapping *basis); + void UseSingletonColumnInInitialBasis(RowToColMapping* basis); // Returns the number of empty rows in the matrix, i.e. rows where all // the coefficients are zero. @@ -435,7 +434,7 @@ class RevisedSimplex { // columns with a ratio <= harris_ratio. template Fractional ComputeHarrisRatioAndLeavingCandidates( - Fractional bound_flip_ratio, SparseColumn *leaving_candidates) const; + Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const; // Chooses the leaving variable, considering the entering column and its // associated reduced cost. If there was a precision issue and the basis is @@ -443,10 +442,10 @@ class RevisedSimplex { // leaving variable is written in *leaving_row, and the step length // is written in *step_length. Status ChooseLeavingVariableRow(ColIndex entering_col, - Fractional reduced_cost, bool *refactorize, - RowIndex *leaving_row, - Fractional *step_length, - Fractional *target_bound); + Fractional reduced_cost, bool* refactorize, + RowIndex* leaving_row, + Fractional* step_length, + Fractional* target_bound); // Chooses the leaving variable for the primal phase-I algorithm. The // algorithm follows more or less what is described in Istvan Maros's book in @@ -455,10 +454,10 @@ class RevisedSimplex { // this file. void PrimalPhaseIChooseLeavingVariableRow(ColIndex entering_col, Fractional reduced_cost, - bool *refactorize, - RowIndex *leaving_row, - Fractional *step_length, - Fractional *target_bound) const; + bool* refactorize, + RowIndex* leaving_row, + Fractional* step_length, + Fractional* target_bound) const; // Chooses an infeasible basic variable. The returned values are: // - leaving_row: the basic index of the infeasible leaving variable @@ -469,8 +468,8 @@ class RevisedSimplex { // - target_bound: the bound at which the leaving variable should go when // leaving the basis. ABSL_MUST_USE_RESULT Status DualChooseLeavingVariableRow( - RowIndex *leaving_row, Fractional *cost_variation, - Fractional *target_bound); + RowIndex* leaving_row, Fractional* cost_variation, + Fractional* target_bound); // Updates the prices used by DualChooseLeavingVariableRow() after a simplex // iteration by using direction_. The prices are stored in @@ -482,7 +481,7 @@ class RevisedSimplex { // Updates the prices used by DualChooseLeavingVariableRow() when the reduced // costs of the given columns have changed. template - void DualPhaseIUpdatePriceOnReducedCostChange(const Cols &cols); + void DualPhaseIUpdatePriceOnReducedCostChange(const Cols& cols); // Same as DualChooseLeavingVariableRow() but for the phase I of the dual // simplex. Here the objective is not to minimize the primal infeasibility, @@ -492,8 +491,8 @@ class RevisedSimplex { // and Applications, October 2003, Volume 26, Issue 1, pp 63-81. // http://rd.springer.com/article/10.1023%2FA%3A1025102305440 ABSL_MUST_USE_RESULT Status DualPhaseIChooseLeavingVariableRow( - RowIndex *leaving_row, Fractional *cost_variation, - Fractional *target_bound); + RowIndex* leaving_row, Fractional* cost_variation, + Fractional* target_bound); // Makes sure the boxed variable are dual-feasible by setting them to the // correct bound according to their reduced costs. This is called @@ -504,7 +503,7 @@ class RevisedSimplex { // // If update_basic_values is true, the basic variable values are updated. template - void MakeBoxedVariableDualFeasible(const BoxedVariableCols &cols, + void MakeBoxedVariableDualFeasible(const BoxedVariableCols& cols, bool update_basic_values); // Computes the step needed to move the leaving_row basic variable to the @@ -541,15 +540,15 @@ class RevisedSimplex { // Calls basis_factorization_.Refactorize() depending on the result of // NeedsBasisRefactorization(). Invalidates any data structure that depends // on the current factorization. Sets refactorize to false. - Status RefactorizeBasisIfNeeded(bool *refactorize); + Status RefactorizeBasisIfNeeded(bool* refactorize); // Minimize the objective function, be it for satisfiability or for // optimization. Used by Solve(). - ABSL_MUST_USE_RESULT Status Minimize(TimeLimit *time_limit); + ABSL_MUST_USE_RESULT Status Minimize(TimeLimit* time_limit); // Same as Minimize() for the dual simplex algorithm. // TODO(user): remove duplicate code between the two functions. - ABSL_MUST_USE_RESULT Status DualMinimize(TimeLimit *time_limit); + ABSL_MUST_USE_RESULT Status DualMinimize(TimeLimit* time_limit); // Utility functions to return the current ColIndex of the slack column with // given number. Note that currently, such columns are always present in the @@ -561,7 +560,7 @@ class RevisedSimplex { // during the last call to this method. // TODO(user): Update the internals of revised simplex so that the time // limit is updated at the source and remove this method. - void AdvanceDeterministicTime(TimeLimit *time_limit); + void AdvanceDeterministicTime(TimeLimit* time_limit); // Problem status ProblemStatus problem_status_; @@ -679,7 +678,7 @@ class RevisedSimplex { // Temporary memory used by DualMinimize(). std::vector bound_flip_candidates_; - std::vector > pair_to_ignore_; + std::vector> pair_to_ignore_; // Total number of iterations performed. uint64 num_iterations_; @@ -797,8 +796,8 @@ class RevisedSimplexDictionary { // TODO(user): Overload this to take RevisedSimplex* alone when the // caller would normally pass a nullptr for col_scales so this and // ComputeDictionary can take a const& argument. - RevisedSimplexDictionary(const DenseRow *col_scales, - RevisedSimplex *revised_simplex) + RevisedSimplexDictionary(const DenseRow* col_scales, + RevisedSimplex* revised_simplex) : dictionary_( ABSL_DIE_IF_NULL(revised_simplex)->ComputeDictionary(col_scales)), basis_vars_(ABSL_DIE_IF_NULL(revised_simplex)->GetBasisVector()) {} diff --git a/ortools/glop/update_row.cc b/ortools/glop/update_row.cc index 10a86da1bb..ca56f55e7e 100644 --- a/ortools/glop/update_row.cc +++ b/ortools/glop/update_row.cc @@ -18,11 +18,11 @@ namespace operations_research { namespace glop { -UpdateRow::UpdateRow(const CompactSparseMatrix &matrix, - const CompactSparseMatrix &transposed_matrix, - const VariablesInfo &variables_info, - const RowToColMapping &basis, - const BasisFactorization &basis_factorization) +UpdateRow::UpdateRow(const CompactSparseMatrix& matrix, + const CompactSparseMatrix& transposed_matrix, + const VariablesInfo& variables_info, + const RowToColMapping& basis, + const BasisFactorization& basis_factorization) : matrix_(matrix), transposed_matrix_(transposed_matrix), variables_info_(variables_info), @@ -48,12 +48,12 @@ void UpdateRow::IgnoreUpdatePosition(ColIndex col) { coefficient_[col] = 0.0; } -const ScatteredRow &UpdateRow::GetUnitRowLeftInverse() const { +const ScatteredRow& UpdateRow::GetUnitRowLeftInverse() const { DCHECK(!compute_update_row_); return unit_row_left_inverse_; } -const ScatteredRow &UpdateRow::ComputeAndGetUnitRowLeftInverse( +const ScatteredRow& UpdateRow::ComputeAndGetUnitRowLeftInverse( RowIndex leaving_row) { Invalidate(); basis_factorization_.TemporaryLeftSolveForUnitRow(RowToColIndex(leaving_row), @@ -149,8 +149,8 @@ void UpdateRow::ComputeUpdateRow(RowIndex leaving_row) { static_cast(matrix_.num_cols().value()))); } -void UpdateRow::ComputeUpdateRowForBenchmark(const DenseRow &lhs, - const std::string &algorithm) { +void UpdateRow::ComputeUpdateRowForBenchmark(const DenseRow& lhs, + const std::string& algorithm) { unit_row_left_inverse_.values = lhs; ComputeNonZeros(lhs, &unit_row_left_inverse_filtered_non_zeros_); if (algorithm == "column") { @@ -165,13 +165,13 @@ void UpdateRow::ComputeUpdateRowForBenchmark(const DenseRow &lhs, } } -const DenseRow &UpdateRow::GetCoefficients() const { return coefficient_; } +const DenseRow& UpdateRow::GetCoefficients() const { return coefficient_; } -const ColIndexVector &UpdateRow::GetNonZeroPositions() const { +const ColIndexVector& UpdateRow::GetNonZeroPositions() const { return non_zero_position_list_; } -void UpdateRow::SetParameters(const GlopParameters ¶meters) { +void UpdateRow::SetParameters(const GlopParameters& parameters) { parameters_ = parameters; } diff --git a/ortools/glop/variable_values.cc b/ortools/glop/variable_values.cc index df21f6208a..dc8a6090e3 100644 --- a/ortools/glop/variable_values.cc +++ b/ortools/glop/variable_values.cc @@ -19,11 +19,11 @@ namespace operations_research { namespace glop { -VariableValues::VariableValues(const GlopParameters ¶meters, - const CompactSparseMatrix &matrix, - const RowToColMapping &basis, - const VariablesInfo &variables_info, - const BasisFactorization &basis_factorization) +VariableValues::VariableValues(const GlopParameters& parameters, + const CompactSparseMatrix& matrix, + const RowToColMapping& basis, + const VariablesInfo& variables_info, + const BasisFactorization& basis_factorization) : parameters_(parameters), matrix_(matrix), basis_(basis), @@ -33,8 +33,8 @@ VariableValues::VariableValues(const GlopParameters ¶meters, void VariableValues::SetNonBasicVariableValueFromStatus(ColIndex col) { SCOPED_TIME_STAT(&stats_); - const DenseRow &lower_bounds = variables_info_.GetVariableLowerBounds(); - const DenseRow &upper_bounds = variables_info_.GetVariableUpperBounds(); + const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds(); + const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds(); variable_values_.resize(matrix_.num_cols(), 0.0); switch (variables_info_.GetStatusRow()[col]) { case VariableStatus::FIXED_VALUE: @@ -65,9 +65,9 @@ void VariableValues::SetNonBasicVariableValueFromStatus(ColIndex col) { } void VariableValues::ResetAllNonBasicVariableValues() { - const DenseRow &lower_bounds = variables_info_.GetVariableLowerBounds(); - const DenseRow &upper_bounds = variables_info_.GetVariableUpperBounds(); - const VariableStatusRow &statuses = variables_info_.GetStatusRow(); + const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds(); + const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds(); + const VariableStatusRow& statuses = variables_info_.GetStatusRow(); const ColIndex num_cols = matrix_.num_cols(); variable_values_.resize(num_cols, 0.0); for (ColIndex col(0); col < num_cols; ++col) { @@ -141,7 +141,7 @@ Fractional VariableValues::ComputeSumOfPrimalInfeasibilities() const { return sum; } -void VariableValues::UpdateOnPivoting(const ScatteredColumn &direction, +void VariableValues::UpdateOnPivoting(const ScatteredColumn& direction, ColIndex entering_col, Fractional step) { SCOPED_TIME_STAT(&stats_); DCHECK(IsFinite(step)); @@ -165,7 +165,7 @@ void VariableValues::UpdateOnPivoting(const ScatteredColumn &direction, } void VariableValues::UpdateGivenNonBasicVariables( - const std::vector &cols_to_update, bool update_basic_variables) { + const std::vector& cols_to_update, bool update_basic_variables) { SCOPED_TIME_STAT(&stats_); if (!update_basic_variables) { for (ColIndex col : cols_to_update) { @@ -215,11 +215,11 @@ void VariableValues::UpdateGivenNonBasicVariables( initially_all_zero_scratchpad_.non_zeros.clear(); } -const DenseColumn &VariableValues::GetPrimalSquaredInfeasibilities() const { +const DenseColumn& VariableValues::GetPrimalSquaredInfeasibilities() const { return primal_squared_infeasibilities_; } -const DenseBitColumn &VariableValues::GetPrimalInfeasiblePositions() const { +const DenseBitColumn& VariableValues::GetPrimalInfeasiblePositions() const { return primal_infeasible_positions_; } @@ -242,7 +242,7 @@ void VariableValues::ResetPrimalInfeasibilityInformation() { } void VariableValues::UpdatePrimalInfeasibilityInformation( - const std::vector &rows) { + const std::vector& rows) { if (primal_squared_infeasibilities_.size() != matrix_.num_rows()) { ResetPrimalInfeasibilityInformation(); return; diff --git a/ortools/glop/variables_info.cc b/ortools/glop/variables_info.cc index d62ae4c6b4..bbe725804b 100644 --- a/ortools/glop/variables_info.cc +++ b/ortools/glop/variables_info.cc @@ -16,9 +16,9 @@ namespace operations_research { namespace glop { -VariablesInfo::VariablesInfo(const CompactSparseMatrix &matrix, - const DenseRow &lower_bound, - const DenseRow &upper_bound) +VariablesInfo::VariablesInfo(const CompactSparseMatrix& matrix, + const DenseRow& lower_bound, + const DenseRow& upper_bound) : matrix_(matrix), lower_bound_(lower_bound), upper_bound_(upper_bound), @@ -94,33 +94,33 @@ void VariablesInfo::UpdateToNonBasicStatus(ColIndex col, SetRelevance(col, relevance); } -const VariableTypeRow &VariablesInfo::GetTypeRow() const { +const VariableTypeRow& VariablesInfo::GetTypeRow() const { return variable_type_; } -const VariableStatusRow &VariablesInfo::GetStatusRow() const { +const VariableStatusRow& VariablesInfo::GetStatusRow() const { return variable_status_; } -const DenseBitRow &VariablesInfo::GetCanIncreaseBitRow() const { +const DenseBitRow& VariablesInfo::GetCanIncreaseBitRow() const { return can_increase_; } -const DenseBitRow &VariablesInfo::GetCanDecreaseBitRow() const { +const DenseBitRow& VariablesInfo::GetCanDecreaseBitRow() const { return can_decrease_; } -const DenseBitRow &VariablesInfo::GetIsRelevantBitRow() const { +const DenseBitRow& VariablesInfo::GetIsRelevantBitRow() const { return relevance_; } -const DenseBitRow &VariablesInfo::GetIsBasicBitRow() const { return is_basic_; } +const DenseBitRow& VariablesInfo::GetIsBasicBitRow() const { return is_basic_; } -const DenseBitRow &VariablesInfo::GetNotBasicBitRow() const { +const DenseBitRow& VariablesInfo::GetNotBasicBitRow() const { return not_basic_; } -const DenseBitRow &VariablesInfo::GetNonBasicBoxedVariables() const { +const DenseBitRow& VariablesInfo::GetNonBasicBoxedVariables() const { return non_basic_boxed_variables_; } diff --git a/ortools/graph/bellman_ford.cc b/ortools/graph/bellman_ford.cc index dc6cadf540..e6807a02cb 100644 --- a/ortools/graph/bellman_ford.cc +++ b/ortools/graph/bellman_ford.cc @@ -32,13 +32,13 @@ class BellmanFord { disconnected_distance_(disconnected_distance), distance_(new int64[node_count_]), predecessor_(new int[node_count_]) {} - bool ShortestPath(int end_node, std::vector *nodes); + bool ShortestPath(int end_node, std::vector* nodes); private: void Initialize(); void Update(); bool Check() const; - void FindPath(int dest, std::vector *nodes) const; + void FindPath(int dest, std::vector* nodes) const; const int node_count_; const int start_node_; @@ -87,7 +87,7 @@ bool BellmanFord::Check() const { return true; } -void BellmanFord::FindPath(int dest, std::vector *nodes) const { +void BellmanFord::FindPath(int dest, std::vector* nodes) const { int j = dest; nodes->push_back(j); while (predecessor_[j] != -1) { @@ -96,7 +96,7 @@ void BellmanFord::FindPath(int dest, std::vector *nodes) const { } } -bool BellmanFord::ShortestPath(int end_node, std::vector *nodes) { +bool BellmanFord::ShortestPath(int end_node, std::vector* nodes) { Initialize(); Update(); if (distance_[end_node] == kInfinity) { @@ -112,7 +112,7 @@ bool BellmanFord::ShortestPath(int end_node, std::vector *nodes) { bool BellmanFordShortestPath(int node_count, int start_node, int end_node, std::function graph, int64 disconnected_distance, - std::vector *nodes) { + std::vector* nodes) { BellmanFord bf(node_count, start_node, std::move(graph), disconnected_distance); return bf.ShortestPath(end_node, nodes); diff --git a/ortools/graph/christofides.h b/ortools/graph/christofides.h index a8d022698f..13ff581079 100644 --- a/ortools/graph/christofides.h +++ b/ortools/graph/christofides.h @@ -34,9 +34,9 @@ namespace operations_research { using ::util::CompleteGraph; -template < - typename CostType, typename ArcIndex = int64, typename NodeIndex = int32, - typename CostFunction = std::function > +template > class ChristofidesPathSolver { public: enum class MatchingAlgorithm { @@ -100,9 +100,9 @@ class ChristofidesPathSolver { // Computes a minimum weight perfect matching on an undirected graph. template std::vector< - std::pair > -ComputeMinimumWeightMatching(const GraphType &graph, - const WeightFunctionType &weight) { + std::pair> +ComputeMinimumWeightMatching(const GraphType& graph, + const WeightFunctionType& weight) { using ArcIndex = typename GraphType::ArcIndex; using NodeIndex = typename GraphType::NodeIndex; MinCostPerfectMatching matching(graph.num_nodes()); @@ -117,7 +117,7 @@ ComputeMinimumWeightMatching(const GraphType &graph, } MinCostPerfectMatching::Status status = matching.Solve(); DCHECK_EQ(status, MinCostPerfectMatching::OPTIMAL); - std::vector > match; + std::vector> match; for (NodeIndex tail : graph.AllNodes()) { const NodeIndex head = matching.Match(tail); if (tail < head) { // Both arcs are matched for a given edge, we keep one. @@ -134,9 +134,9 @@ ComputeMinimumWeightMatching(const GraphType &graph, // Christofides. template std::vector< - std::pair > -ComputeMinimumWeightMatchingWithMIP(const GraphType &graph, - const WeightFunctionType &weight) { + std::pair> +ComputeMinimumWeightMatchingWithMIP(const GraphType& graph, + const WeightFunctionType& weight) { using ArcIndex = typename GraphType::ArcIndex; using NodeIndex = typename GraphType::NodeIndex; MPModelProto model; @@ -152,7 +152,7 @@ ComputeMinimumWeightMatchingWithMIP(const GraphType &graph, const NodeIndex head = graph.Head(arc); if (node < head) { variable_indices[arc] = model.variable_size(); - MPVariableProto *const arc_var = model.add_variable(); + MPVariableProto* const arc_var = model.add_variable(); arc_var->set_lower_bound(0); arc_var->set_upper_bound(1); arc_var->set_is_integer(true); @@ -161,7 +161,7 @@ ComputeMinimumWeightMatchingWithMIP(const GraphType &graph, } // Creating matching constraint: // for all node i, sum(j) arc(i,j) + sum(j) arc(j,i) = 1 - MPConstraintProto *const one_of_ct = model.add_constraint(); + MPConstraintProto* const one_of_ct = model.add_constraint(); one_of_ct->set_lower_bound(1); one_of_ct->set_upper_bound(1); } @@ -171,7 +171,7 @@ ComputeMinimumWeightMatchingWithMIP(const GraphType &graph, if (node < head) { const int arc_var = variable_indices[arc]; DCHECK_GE(arc_var, 0); - MPConstraintProto *one_of_ct = model.mutable_constraint(node); + MPConstraintProto* one_of_ct = model.mutable_constraint(node); one_of_ct->add_var_index(arc_var); one_of_ct->add_coefficient(1); one_of_ct = model.mutable_constraint(head); @@ -193,7 +193,7 @@ ComputeMinimumWeightMatchingWithMIP(const GraphType &graph, CHECK_EQ(status, MPSolver::OPTIMAL); MPSolutionResponse response; mp_solver.FillSolutionResponseProto(&response); - std::vector > matching; + std::vector> matching; for (ArcIndex arc = 0; arc < variable_indices.size(); ++arc) { const int arc_var = variable_indices[arc]; if (arc_var >= 0 && response.variable_value(arc_var) > .9) { @@ -270,7 +270,7 @@ void ChristofidesPathSolver reduced_graph(reduced_size); - std::vector > closure_arcs; + std::vector> closure_arcs; switch (matching_) { case MatchingAlgorithm::MINIMUM_WEIGHT_MATCHING: { closure_arcs = ComputeMinimumWeightMatching( diff --git a/ortools/graph/cliques.cc b/ortools/graph/cliques.cc index c11822d34b..9bf67c88fe 100644 --- a/ortools/graph/cliques.cc +++ b/ortools/graph/cliques.cc @@ -49,10 +49,10 @@ inline bool Connects(std::function graph, int i, int j) { // is true, the algorithm stops further exploration and returns. // TODO(user) : rewrite this algorithm without recursion. void Search(std::function graph, - std::function &)> callback, - int *input_candidates, int first_candidate_index, - int num_input_candidates, std::vector *current_clique, - bool *stop) { + std::function&)> callback, + int* input_candidates, int first_candidate_index, + int num_input_candidates, std::vector* current_clique, + bool* stop) { // The pivot is a node from input_candidates that is disconnected from the // minimal number of nodes in the actual candidates (excluding the "not" set); // the algorithm then selects only candidates that are disconnected from the @@ -187,7 +187,7 @@ void Search(std::function graph, class FindAndEliminate { public: FindAndEliminate(std::function graph, int node_count, - std::function &)> callback) + std::function&)> callback) : graph_(graph), node_count_(node_count), callback_(callback) {} bool GraphCallback(int node1, int node2) { @@ -199,7 +199,7 @@ class FindAndEliminate { return Connects(graph_, node1, node2); } - bool SolutionCallback(const std::vector &solution) { + bool SolutionCallback(const std::vector& solution) { const int size = solution.size(); if (size > 1) { for (int i = 0; i < size - 1; ++i) { @@ -216,15 +216,15 @@ class FindAndEliminate { private: std::function graph_; int node_count_; - std::function &)> callback_; - absl::flat_hash_set > visited_; + std::function&)> callback_; + absl::flat_hash_set> visited_; }; } // namespace // This method implements the 'version2' of the Bron-Kerbosch // algorithm to find all maximal cliques in a undirected graph. void FindCliques(std::function graph, int node_count, - std::function &)> callback) { + std::function&)> callback) { std::unique_ptr initial_candidates(new int[node_count]); std::vector actual; @@ -237,9 +237,8 @@ void FindCliques(std::function graph, int node_count, &stop); } -void CoverArcsByCliques( - std::function graph, int node_count, - std::function &)> callback) { +void CoverArcsByCliques(std::function graph, int node_count, + std::function&)> callback) { FindAndEliminate cache(graph, node_count, callback); std::unique_ptr initial_candidates(new int[node_count]); std::vector actual; @@ -247,8 +246,8 @@ void CoverArcsByCliques( std::function cached_graph = [&cache](int i, int j) { return cache.GraphCallback(i, j); }; - std::function &)> cached_callback = - [&cache](const std::vector &res) { + std::function&)> cached_callback = + [&cache](const std::vector& res) { return cache.SolutionCallback(res); }; diff --git a/ortools/graph/cliques.h b/ortools/graph/cliques.h index b3af6af911..acbbf45784 100644 --- a/ortools/graph/cliques.h +++ b/ortools/graph/cliques.h @@ -42,7 +42,7 @@ namespace operations_research { // This function takes ownership of 'callback' and deletes it after it has run. // If 'callback' returns true, then the search for cliques stops. void FindCliques(std::function graph, int node_count, - std::function &)> callback); + std::function&)> callback); // Covers the maximum number of arcs of the graph with cliques. The graph // is described by the graph callback. graph->Run(i, j) indicates if @@ -51,15 +51,14 @@ void FindCliques(std::function graph, int node_count, // It calls 'callback' upon each clique. // It ignores cliques of size 1. void CoverArcsByCliques(std::function graph, int node_count, - std::function &)> callback); + std::function&)> callback); // Possible return values of the callback for reporting cliques. The returned // value determines whether the algorithm will continue the search. enum class CliqueResponse { // The algorithm will continue searching for other maximal cliques. CONTINUE, - // The algorithm will stop the search immediately. The search can be - // resumed + // The algorithm will stop the search immediately. The search can be resumed // by calling BronKerboschAlgorithm::Run (resp. RunIterations) again. STOP }; @@ -158,7 +157,7 @@ class BronKerboschAlgorithm { // this clique. See the description of the values of 'CliqueResponse' for more // details. using CliqueCallback = - std::function &)>; + std::function&)>; // Initializes the Bron-Kerbosch algorithm for the given graph and clique // callback function. @@ -191,7 +190,7 @@ class BronKerboschAlgorithm { // stopped. When it returns COMPLETED any subsequent call to the method will // resume the search from the beginning. BronKerboschAlgorithmStatus RunWithTimeLimit(int64 max_num_iterations, - TimeLimit *time_limit); + TimeLimit* time_limit); // Runs the Bron-Kerbosch algorithm for at most kint64max iterations, until // the time limit is excceded or until all cliques are enumerated. In @@ -202,7 +201,7 @@ class BronKerboschAlgorithm { // again and it will resume the work where the previous call had stopped. When // it returns COMPLETED any subsequent call to the method will resume the // search from the beginning. - BronKerboschAlgorithmStatus RunWithTimeLimit(TimeLimit *time_limit) { + BronKerboschAlgorithmStatus RunWithTimeLimit(TimeLimit* time_limit) { return RunWithTimeLimit(kint64max, time_limit); } @@ -219,14 +218,14 @@ class BronKerboschAlgorithm { // BronKerboschAlgorithm::InitializeState. struct State { State() {} - State(const State &other) + State(const State& other) : pivot(other.pivot), num_remaining_candidates(other.num_remaining_candidates), candidates(other.candidates), first_candidate_index(other.first_candidate_index), candidate_for_recursion(other.candidate_for_recursion) {} - State &operator=(const State &other) { + State& operator=(const State& other) { pivot = other.pivot; num_remaining_candidates = other.num_remaining_candidates; candidates = other.candidates; @@ -310,7 +309,7 @@ class BronKerboschAlgorithm { // Initializes the given state. Runs the pivot selection algorithm in the // state. - void InitializeState(State *state); + void InitializeState(State* state); // Returns true if (node1, node2) is an arc in the graph or if node1 == node2. inline bool IsArc(NodeIndex node1, NodeIndex node2) const { @@ -320,10 +319,10 @@ class BronKerboschAlgorithm { // Selects the next node for recursion. The selected node is either the pivot // (if it is not in the set "not") or a node that is disconnected from the // pivot. - CandidateIndex SelectCandidateIndexForRecursion(State *state); + CandidateIndex SelectCandidateIndexForRecursion(State* state); // Returns a human-readable string representation of the clique. - std::string CliqueDebugString(const std::vector &clique); + std::string CliqueDebugString(const std::vector& clique); // The callback called when the algorithm needs to determine if (node1, node2) // is an arc in the graph. @@ -352,11 +351,11 @@ class BronKerboschAlgorithm { // The current time limit used by the solver. The time limit is assigned by // the Run methods and it can be different for each call to run. - TimeLimit *time_limit_; + TimeLimit* time_limit_; }; template -void BronKerboschAlgorithm::InitializeState(State *state) { +void BronKerboschAlgorithm::InitializeState(State* state) { DCHECK(state != nullptr); const int num_candidates = state->candidates.size(); int num_disconnected_candidates = num_candidates; @@ -391,7 +390,7 @@ void BronKerboschAlgorithm::InitializeState(State *state) { template typename BronKerboschAlgorithm::CandidateIndex BronKerboschAlgorithm::SelectCandidateIndexForRecursion( - State *state) { + State* state) { DCHECK(state != nullptr); CandidateIndex disconnected_node_index = std::max(state->first_candidate_index, state->candidate_for_recursion); @@ -410,7 +409,7 @@ void BronKerboschAlgorithm::Initialize() { states_.reserve(num_nodes_); states_.emplace_back(); - State *const root_state = &states_.back(); + State* const root_state = &states_.back(); root_state->first_candidate_index = 0; root_state->candidate_for_recursion = 0; root_state->candidates.resize(num_nodes_, 0); @@ -426,7 +425,7 @@ void BronKerboschAlgorithm::PopState() { DCHECK(!states_.empty()); states_.pop_back(); if (!states_.empty()) { - State *const state = &states_.back(); + State* const state = &states_.back(); current_clique_.pop_back(); state->MoveFirstCandidateToNotSet(); } @@ -434,7 +433,7 @@ void BronKerboschAlgorithm::PopState() { template std::string BronKerboschAlgorithm::CliqueDebugString( - const std::vector &clique) { + const std::vector& clique) { std::string message = "Clique: [ "; for (const NodeIndex node : clique) { absl::StrAppend(&message, node, " "); @@ -451,7 +450,7 @@ void BronKerboschAlgorithm::PushState(NodeIndex selected) { << ", selected node = " << selected; gtl::ITIVector new_candidates; - State *const previous_state = &states_.back(); + State* const previous_state = &states_.back(); const double deterministic_time = kPushStateDeterministicTimeSecondsPerCandidate * previous_state->candidates.size(); @@ -499,7 +498,7 @@ void BronKerboschAlgorithm::PushState(NodeIndex selected) { // vector data was re-allocated in the process). We must avoid using // previous_state below here. states_.emplace_back(); - State *const new_state = &states_.back(); + State* const new_state = &states_.back(); new_state->candidates.swap(new_candidates); new_state->first_candidate_index = new_first_candidate_index; @@ -508,7 +507,7 @@ void BronKerboschAlgorithm::PushState(NodeIndex selected) { template BronKerboschAlgorithmStatus BronKerboschAlgorithm::RunWithTimeLimit( - int64 max_num_iterations, TimeLimit *time_limit) { + int64 max_num_iterations, TimeLimit* time_limit) { CHECK(time_limit != nullptr); time_limit_ = time_limit; if (states_.empty()) { @@ -518,7 +517,7 @@ BronKerboschAlgorithmStatus BronKerboschAlgorithm::RunWithTimeLimit( !states_.empty() && num_remaining_iterations_ > 0 && !time_limit->LimitReached(); --num_remaining_iterations_) { - State *const state = &states_.back(); + State* const state = &states_.back(); DVLOG(2) << "Loop: " << states_.size() << " states, " << state->num_remaining_candidates << " candidate to explore\n" << state->DebugString(); @@ -533,8 +532,8 @@ BronKerboschAlgorithmStatus BronKerboschAlgorithm::RunWithTimeLimit( const NodeIndex selected = state->candidates[selected_index]; DVLOG(2) << "Selected candidate = " << selected; - NodeIndex &f = state->candidates[state->first_candidate_index]; - NodeIndex &s = state->candidates[selected_index]; + NodeIndex& f = state->candidates[state->first_candidate_index]; + NodeIndex& s = state->candidates[selected_index]; std::swap(f, s); PushState(selected); diff --git a/ortools/graph/connected_components.cc b/ortools/graph/connected_components.cc index f663d9c09b..86f631e835 100644 --- a/ortools/graph/connected_components.cc +++ b/ortools/graph/connected_components.cc @@ -70,7 +70,7 @@ int DenseConnectedComponentsFinder::FindRoot(int node) { return root; } -const std::vector &DenseConnectedComponentsFinder::GetComponentRoots() { +const std::vector& DenseConnectedComponentsFinder::GetComponentRoots() { const int num_nodes = GetNumberOfNodes(); if (num_nodes != num_nodes_at_last_get_roots_call_) { // Add potential roots for each new node that did not exist the last time @@ -84,8 +84,7 @@ const std::vector &DenseConnectedComponentsFinder::GetComponentRoots() { } // Remove the roots that have been merged with other components. Each node - // only gets removed once from the roots vector, so the cost of FindRoot() - // is + // only gets removed once from the roots vector, so the cost of FindRoot() is // amortized against adding the edge. gtl::STLEraseAllFromSequenceIf( &roots_, [&](const int node) { return node != FindRoot(node); }); @@ -150,7 +149,7 @@ std::vector DenseConnectedComponentsFinder::GetComponentIds() { std::vector component_ids(GetNumberOfNodes(), -1); int current_component = 0; for (int node = 0; node < GetNumberOfNodes(); ++node) { - int &root_component = component_ids[FindRoot(node)]; + int& root_component = component_ids[FindRoot(node)]; if (root_component < 0) { // This is the first node in a yet unseen component. root_component = current_component; diff --git a/ortools/graph/connected_components.h b/ortools/graph/connected_components.h index d75808248e..a474c2e69a 100644 --- a/ortools/graph/connected_components.h +++ b/ortools/graph/connected_components.h @@ -71,7 +71,7 @@ namespace util { // GetConnectedComponents(graph); // returns [0, 0, 1, 0, 1, 0]. template std::vector GetConnectedComponents(int num_nodes, - const UndirectedGraph &graph); + const UndirectedGraph& graph); } // namespace util // NOTE(user): The rest of the functions below should also be in namespace @@ -82,10 +82,10 @@ class DenseConnectedComponentsFinder { public: DenseConnectedComponentsFinder() {} - DenseConnectedComponentsFinder(const DenseConnectedComponentsFinder &) = + DenseConnectedComponentsFinder(const DenseConnectedComponentsFinder&) = delete; - DenseConnectedComponentsFinder &operator=( - const DenseConnectedComponentsFinder &) = delete; + DenseConnectedComponentsFinder& operator=( + const DenseConnectedComponentsFinder&) = delete; // The main API is the same as ConnectedComponentsFinder (below): see the // homonymous functions there. @@ -97,7 +97,7 @@ class DenseConnectedComponentsFinder { // Gets the current set of root nodes in sorted order. Runs in amortized // O(#components) time. - const std::vector &GetComponentRoots(); + const std::vector& GetComponentRoots(); // Sets the number of nodes in the graph. The graph can only grow: this // dies if "num_nodes" is lower or equal to any of the values ever given @@ -150,8 +150,8 @@ struct ConnectedComponentsTypeHelper { // like a hash functor. template struct SelectContainer< - U, absl::enable_if_t()(std::declval()))>::value> > { + U, absl::enable_if_t()( + std::declval()))>::value>> { using Set = absl::flat_hash_set; using Map = absl::flat_hash_map; }; @@ -194,14 +194,14 @@ struct ConnectedComponentsTypeHelper { // ... and so on... // Of course, in this usage, the connected components finder retains // these pointers through its lifetime (though it doesn't dereference them). -template > +template > class ConnectedComponentsFinder { public: // Constructs a connected components finder. ConnectedComponentsFinder() {} - ConnectedComponentsFinder(const ConnectedComponentsFinder &) = delete; - ConnectedComponentsFinder &operator=(const ConnectedComponentsFinder &) = + ConnectedComponentsFinder(const ConnectedComponentsFinder&) = delete; + ConnectedComponentsFinder& operator=(const ConnectedComponentsFinder&) = delete; // Adds a node in the graph. It is OK to add the same node more than @@ -238,21 +238,21 @@ class ConnectedComponentsFinder { // - The first one returns the result, and stores each component in a vector. // This is the preferred version. // - The second one populates the result, and stores each component in a set. - std::vector > FindConnectedComponents() { + std::vector> FindConnectedComponents() { const auto component_ids = delegate_.GetComponentIds(); - std::vector > components(delegate_.GetNumberOfComponents()); - for (const auto &elem_id : index_) { + std::vector> components(delegate_.GetNumberOfComponents()); + for (const auto& elem_id : index_) { components[component_ids[elem_id.second]].push_back(elem_id.first); } return components; } void FindConnectedComponents( std::vector::Set> *components) { + T, CompareOrHashT>::Set>* components) { const auto component_ids = delegate_.GetComponentIds(); components->clear(); components->resize(delegate_.GetNumberOfComponents()); - for (const auto &elem_id : index_) { + for (const auto& elem_id : index_) { components->at(component_ids[elem_id.second]).insert(elem_id.first); } } @@ -294,7 +294,7 @@ class ConnectedComponentsFinder { namespace util { template std::vector GetConnectedComponents(int num_nodes, - const UndirectedGraph &graph) { + const UndirectedGraph& graph) { std::vector component_of_node(num_nodes, -1); std::vector bfs_queue; int num_components = 0; diff --git a/ortools/graph/dijkstra.cc b/ortools/graph/dijkstra.cc index b26204d9a6..687d974b1e 100644 --- a/ortools/graph/dijkstra.cc +++ b/ortools/graph/dijkstra.cc @@ -27,7 +27,7 @@ namespace { // Priority queue element class Element { public: - bool operator<(const Element &other) const { + bool operator<(const Element& other) const { return distance_ != other.distance_ ? distance_ > other.distance_ : node_ > other.node_; } @@ -59,7 +59,7 @@ class DijkstraSP { predecessor_(new int[node_count]), elements_(node_count) {} - bool ShortestPath(int end_node, std::vector *nodes) { + bool ShortestPath(int end_node, std::vector* nodes) { Initialize(); bool found = false; while (!frontier_.IsEmpty()) { @@ -96,7 +96,7 @@ class DijkstraSP { } } - int SelectClosestNode(int64 *distance) { + int SelectClosestNode(int64* distance) { const int node = frontier_.Top()->node(); *distance = frontier_.Top()->distance(); frontier_.Pop(); @@ -106,7 +106,7 @@ class DijkstraSP { } void Update(int node) { - for (const auto &other_node : not_visited_) { + for (const auto& other_node : not_visited_) { const int64 graph_node_i = graph_(node, other_node); if (graph_node_i != disconnected_distance_) { if (added_to_the_frontier_.find(other_node) == @@ -124,7 +124,7 @@ class DijkstraSP { } } - void FindPath(int dest, std::vector *nodes) { + void FindPath(int dest, std::vector* nodes) { int j = dest; nodes->push_back(j); while (predecessor_[j] != -1) { @@ -147,8 +147,8 @@ class DijkstraSP { bool DijkstraShortestPath(int node_count, int start_node, int end_node, std::function graph, int64 disconnected_distance, - std::vector *nodes) { - DijkstraSP > bf( + std::vector* nodes) { + DijkstraSP> bf( node_count, start_node, std::move(graph), disconnected_distance); return bf.ShortestPath(end_node, nodes); } @@ -156,9 +156,9 @@ bool DijkstraShortestPath(int node_count, int start_node, int end_node, bool StableDijkstraShortestPath(int node_count, int start_node, int end_node, std::function graph, int64 disconnected_distance, - std::vector *nodes) { - DijkstraSP > bf(node_count, start_node, std::move(graph), - disconnected_distance); + std::vector* nodes) { + DijkstraSP> bf(node_count, start_node, std::move(graph), + disconnected_distance); return bf.ShortestPath(end_node, nodes); } } // namespace operations_research diff --git a/ortools/graph/ebert_graph.h b/ortools/graph/ebert_graph.h index a8ddffca31..cf4c341b53 100644 --- a/ortools/graph/ebert_graph.h +++ b/ortools/graph/ebert_graph.h @@ -319,7 +319,7 @@ class StarGraphBase { // Iterator class for traversing all the nodes in the graph. class NodeIterator { public: - explicit NodeIterator(const DerivedGraph &graph) + explicit NodeIterator(const DerivedGraph& graph) : graph_(graph), head_(graph_.StartNode(kFirstNode)) {} // Returns true unless all the nodes have been traversed. @@ -333,7 +333,7 @@ class StarGraphBase { private: // A reference to the current DerivedGraph considered. - const DerivedGraph &graph_; + const DerivedGraph& graph_; // The index of the current node considered. NodeIndexType head_; @@ -342,7 +342,7 @@ class StarGraphBase { // Iterator class for traversing the arcs in the graph. class ArcIterator { public: - explicit ArcIterator(const DerivedGraph &graph) + explicit ArcIterator(const DerivedGraph& graph) : graph_(graph), arc_(graph_.StartArc(kFirstArc)) {} // Returns true unless all the arcs have been traversed. @@ -356,7 +356,7 @@ class StarGraphBase { private: // A reference to the current DerivedGraph considered. - const DerivedGraph &graph_; + const DerivedGraph& graph_; // The index of the current arc considered. ArcIndexType arc_; @@ -365,7 +365,7 @@ class StarGraphBase { // Iterator class for traversing the outgoing arcs associated to a given node. class OutgoingArcIterator { public: - OutgoingArcIterator(const DerivedGraph &graph, NodeIndexType node) + OutgoingArcIterator(const DerivedGraph& graph, NodeIndexType node) : graph_(graph), node_(graph_.StartNode(node)), arc_(graph_.StartArc(graph_.FirstOutgoingArc(node))) { @@ -374,7 +374,7 @@ class StarGraphBase { // This constructor takes an arc as extra argument and makes the iterator // start at arc. - OutgoingArcIterator(const DerivedGraph &graph, NodeIndexType node, + OutgoingArcIterator(const DerivedGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(graph), node_(graph_.StartNode(node)), @@ -383,7 +383,7 @@ class StarGraphBase { } // Can only assign from an iterator on the same graph. - void operator=(const OutgoingArcIterator &iterator) { + void operator=(const OutgoingArcIterator& iterator) { DCHECK(&iterator.graph_ == &graph_); node_ = iterator.node_; arc_ = iterator.arc_; @@ -413,7 +413,7 @@ class StarGraphBase { } // A reference to the current DerivedGraph considered. - const DerivedGraph &graph_; + const DerivedGraph& graph_; // The index of the node on which arcs are iterated. NodeIndexType node_; @@ -504,14 +504,14 @@ class StarGraphBase { private: // Shorthand: returns a const DerivedGraph*-typed version of our // "this" pointer. - inline const DerivedGraph *ThisAsDerived() const { - return static_cast(this); + inline const DerivedGraph* ThisAsDerived() const { + return static_cast(this); } // Shorthand: returns a DerivedGraph*-typed version of our "this" // pointer. - inline DerivedGraph *ThisAsDerived() { - return static_cast(this); + inline DerivedGraph* ThisAsDerived() { + return static_cast(this); } }; @@ -519,7 +519,7 @@ template class PermutationIndexComparisonByArcHead { public: explicit PermutationIndexComparisonByArcHead( - const ZVector &head) + const ZVector& head) : head_(head) {} bool operator()(ArcIndexType a, ArcIndexType b) const { @@ -527,7 +527,7 @@ class PermutationIndexComparisonByArcHead { } private: - const ZVector &head_; + const ZVector& head_; }; template @@ -573,8 +573,8 @@ class ForwardStaticGraph public: CycleHandlerForAnnotatedArcs( - PermutationCycleHandler *annotation_handler, - NodeIndexType *data) + PermutationCycleHandler* annotation_handler, + NodeIndexType* data) : ArrayIndexCycleHandler(&data[kFirstArc]), annotation_handler_(annotation_handler) {} @@ -595,7 +595,7 @@ class ForwardStaticGraph } private: - PermutationCycleHandler *annotation_handler_; + PermutationCycleHandler* annotation_handler_; DISALLOW_COPY_AND_ASSIGN(CycleHandlerForAnnotatedArcs); }; @@ -621,18 +621,18 @@ class ForwardStaticGraph ForwardStaticGraph( const NodeIndexType num_nodes, const ArcIndexType num_arcs, const bool sort_arcs_by_head, - std::vector > *client_input_arcs, + std::vector >* client_input_arcs, // TODO(user): For some reason, SWIG breaks if the // operations_research namespace is not explicit in the // following argument declaration. - operations_research::PermutationCycleHandler - *const client_cycle_handler) { + operations_research::PermutationCycleHandler* const + client_cycle_handler) { max_num_arcs_ = num_arcs; num_arcs_ = num_arcs; max_num_nodes_ = num_nodes; // A more convenient name for a parameter required by style to be // a pointer, because we modify its referent. - std::vector > &input_arcs = + std::vector >& input_arcs = *client_input_arcs; // We coopt the first_incident_arc_ array as a node-indexed vector @@ -1020,8 +1020,8 @@ class EbertGraphBase #if !SWIG template void GroupForwardArcsByFunctor( - const ArcIndexTypeStrictWeakOrderingFunctor &compare, - PermutationCycleHandler *annotation_handler) { + const ArcIndexTypeStrictWeakOrderingFunctor& compare, + PermutationCycleHandler* annotation_handler) { std::unique_ptr arc_permutation( new ArcIndexType[end_arc_index()]); @@ -1048,8 +1048,8 @@ class EbertGraphBase : public PermutationCycleHandler { public: CycleHandlerForAnnotatedArcs( - PermutationCycleHandler *annotation_handler, - DerivedGraph *graph) + PermutationCycleHandler* annotation_handler, + DerivedGraph* graph) : annotation_handler_(annotation_handler), graph_(graph), head_temp_(kNilNode), @@ -1084,7 +1084,7 @@ class EbertGraphBase // kNilArc value to mark entries in the array that have been // processed already. There is no need to be able to recover the // original permutation array entries once they have been seen. - void SetSeen(ArcIndexType *permutation_element) const override { + void SetSeen(ArcIndexType* permutation_element) const override { *permutation_element = kNilArc; } @@ -1095,8 +1095,8 @@ class EbertGraphBase ~CycleHandlerForAnnotatedArcs() override {} private: - PermutationCycleHandler *annotation_handler_; - DerivedGraph *graph_; + PermutationCycleHandler* annotation_handler_; + DerivedGraph* graph_; NodeIndexType head_temp_; NodeIndexType tail_temp_; @@ -1154,14 +1154,14 @@ class EbertGraphBase private: // Shorthand: returns a const DerivedGraph*-typed version of our // "this" pointer. - inline const DerivedGraph *ThisAsDerived() const { - return static_cast(this); + inline const DerivedGraph* ThisAsDerived() const { + return static_cast(this); } // Shorthand: returns a DerivedGraph*-typed version of our "this" // pointer. - inline DerivedGraph *ThisAsDerived() { - return static_cast(this); + inline DerivedGraph* ThisAsDerived() { + return static_cast(this); } void InitializeInternal(NodeIndexType max_num_nodes, @@ -1236,7 +1236,7 @@ class EbertGraph // graph. class OutgoingOrOppositeIncomingArcIterator { public: - OutgoingOrOppositeIncomingArcIterator(const EbertGraph &graph, + OutgoingOrOppositeIncomingArcIterator(const EbertGraph& graph, NodeIndexType node) : graph_(graph), node_(graph_.StartNode(node)), @@ -1247,7 +1247,7 @@ class EbertGraph // This constructor takes an arc as extra argument and makes the iterator // start at arc. - OutgoingOrOppositeIncomingArcIterator(const EbertGraph &graph, + OutgoingOrOppositeIncomingArcIterator(const EbertGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(graph), node_(graph_.StartNode(node)), @@ -1256,7 +1256,7 @@ class EbertGraph } // Can only assign from an iterator on the same graph. - void operator=(const OutgoingOrOppositeIncomingArcIterator &iterator) { + void operator=(const OutgoingOrOppositeIncomingArcIterator& iterator) { DCHECK(&iterator.graph_ == &graph_); node_ = iterator.node_; arc_ = iterator.arc_; @@ -1285,7 +1285,7 @@ class EbertGraph return true; } // A reference to the current EbertGraph considered. - const EbertGraph &graph_; + const EbertGraph& graph_; // The index of the node on which arcs are iterated. NodeIndexType node_; @@ -1297,7 +1297,7 @@ class EbertGraph // Iterator class for traversing the incoming arcs associated to a given node. class IncomingArcIterator { public: - IncomingArcIterator(const EbertGraph &graph, NodeIndexType node) + IncomingArcIterator(const EbertGraph& graph, NodeIndexType node) : graph_(graph), node_(graph_.StartNode(node)), arc_(graph_.StartArc(graph_.FirstIncomingArc(node))) { @@ -1306,7 +1306,7 @@ class EbertGraph // This constructor takes an arc as extra argument and makes the iterator // start at arc. - IncomingArcIterator(const EbertGraph &graph, NodeIndexType node, + IncomingArcIterator(const EbertGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(graph), node_(graph_.StartNode(node)), @@ -1316,7 +1316,7 @@ class EbertGraph } // Can only assign from an iterator on the same graph. - void operator=(const IncomingArcIterator &iterator) { + void operator=(const IncomingArcIterator& iterator) { DCHECK(&iterator.graph_ == &graph_); node_ = iterator.node_; arc_ = iterator.arc_; @@ -1347,7 +1347,7 @@ class EbertGraph return true; } // A reference to the current EbertGraph considered. - const EbertGraph &graph_; + const EbertGraph& graph_; // The index of the node on which arcs are iterated. NodeIndexType node_; @@ -1870,7 +1870,7 @@ namespace or_internal { // The TailArrayBuilder for graphs with reverse arcs does nothing. template struct TailArrayBuilder { - explicit TailArrayBuilder(GraphType *unused_graph) {} + explicit TailArrayBuilder(GraphType* unused_graph) {} bool BuildTailArray() const { return true; } }; @@ -1880,11 +1880,11 @@ struct TailArrayBuilder { // constructor. template struct TailArrayBuilder { - explicit TailArrayBuilder(GraphType *graph) : graph_(graph) {} + explicit TailArrayBuilder(GraphType* graph) : graph_(graph) {} bool BuildTailArray() const { return graph_->BuildTailArray(); } - GraphType *const graph_; + GraphType* const graph_; }; // The TailArrayReleaser class template is not expected to be used by @@ -1893,7 +1893,7 @@ struct TailArrayBuilder { // The TailArrayReleaser for graphs with reverse arcs does nothing. template struct TailArrayReleaser { - explicit TailArrayReleaser(GraphType *unused_graph) {} + explicit TailArrayReleaser(GraphType* unused_graph) {} void ReleaseTailArray() const {} }; @@ -1903,11 +1903,11 @@ struct TailArrayReleaser { // constructor. template struct TailArrayReleaser { - explicit TailArrayReleaser(GraphType *graph) : graph_(graph) {} + explicit TailArrayReleaser(GraphType* graph) : graph_(graph) {} void ReleaseTailArray() const { graph_->ReleaseTailArray(); } - GraphType *const graph_; + GraphType* const graph_; }; } // namespace or_internal @@ -1915,7 +1915,7 @@ struct TailArrayReleaser { template class TailArrayManager { public: - explicit TailArrayManager(GraphType *g) : graph_(g) {} + explicit TailArrayManager(GraphType* g) : graph_(g) {} bool BuildTailArrayFromAdjacencyListsIfForwardGraph() const { or_internal::TailArrayBuilder class ArcFunctorOrderingByTailAndHead { public: - explicit ArcFunctorOrderingByTailAndHead(const GraphType &graph) + explicit ArcFunctorOrderingByTailAndHead(const GraphType& graph) : graph_(graph) {} bool operator()(typename GraphType::ArcIndex a, @@ -1949,7 +1949,7 @@ class ArcFunctorOrderingByTailAndHead { } private: - const GraphType &graph_; + const GraphType& graph_; }; namespace or_internal { @@ -1988,9 +1988,9 @@ class GraphBuilderFromArcs { } // Builds the graph from the given arcs. - GraphType *Graph(PermutationCycleHandler - *client_cycle_handler) { - GraphType *graph = new GraphType(max_num_nodes_, num_arcs_, sort_arcs_, + GraphType* Graph(PermutationCycleHandler* + client_cycle_handler) { + GraphType* graph = new GraphType(max_num_nodes_, num_arcs_, sort_arcs_, &arcs_, client_cycle_handler); delete this; return graph; @@ -2039,8 +2039,8 @@ class GraphBuilderFromArcs { return graph_->AddArc(tail, head); } - GraphType *Graph(PermutationCycleHandler - *client_cycle_handler) { + GraphType* Graph(PermutationCycleHandler* + client_cycle_handler) { if (sort_arcs_) { TailArrayManager tail_array_manager(graph_); tail_array_manager.BuildTailArrayFromAdjacencyListsIfForwardGraph(); @@ -2048,13 +2048,13 @@ class GraphBuilderFromArcs { graph_->GroupForwardArcsByFunctor(arc_ordering, client_cycle_handler); tail_array_manager.ReleaseTailArrayIfForwardGraph(); } - GraphType *result = graph_; + GraphType* result = graph_; delete this; return result; } private: - GraphType *const graph_; + GraphType* const graph_; const bool sort_arcs_; }; @@ -2085,7 +2085,7 @@ class AnnotatedGraphBuildManager // and no arcs). // Returns false on an error. template -bool BuildLineGraph(const GraphType &graph, GraphType *const line_graph) { +bool BuildLineGraph(const GraphType& graph, GraphType* const line_graph) { if (line_graph == nullptr) { LOG(DFATAL) << "line_graph must not be NULL"; return false; diff --git a/ortools/graph/graph.h b/ortools/graph/graph.h index 67b9b7be41..5234bd6c4c 100644 --- a/ortools/graph/graph.h +++ b/ortools/graph/graph.h @@ -261,17 +261,17 @@ class BaseGraph { // TODO(user): remove the public functions below. They are just here during // the transition from the old ebert_graph api to this new graph api. template - void GroupForwardArcsByFunctor(const A &a, B *b) { + void GroupForwardArcsByFunctor(const A& a, B* b) { LOG(FATAL) << "Not supported"; } ArcIndexType max_end_arc_index() const { return arc_capacity_; } protected: // Functions commented when defined because they are implementation details. - void ComputeCumulativeSum(std::vector *v); - void BuildStartAndForwardHead(SVector *head, - std::vector *start, - std::vector *permutation); + void ComputeCumulativeSum(std::vector* v); + void BuildStartAndForwardHead(SVector* head, + std::vector* start, + std::vector* permutation); NodeIndexType num_nodes_; NodeIndexType node_capacity_; @@ -337,7 +337,7 @@ class ListGraph : public BaseGraph { // // Note that some implementations become immutable after calling Build(). void Build() { Build(nullptr); } - void Build(std::vector *permutation); + void Build(std::vector* permutation); // Do not use directly. class OutgoingArcIterator; @@ -424,7 +424,7 @@ class StaticGraph : public BaseGraph { // This loops over the heads of the OutgoingArcs(node). It is just a more // convenient way to achieve this. Moreover this interface is used by some // graph algorithms. - BeginEndWrapper operator[](NodeIndexType node) const; + BeginEndWrapper operator[](NodeIndexType node) const; void ReserveNodes(NodeIndexType bound) override; void ReserveArcs(ArcIndexType bound) override; @@ -432,7 +432,7 @@ class StaticGraph : public BaseGraph { ArcIndexType AddArc(NodeIndexType tail, NodeIndexType head); void Build() { Build(nullptr); } - void Build(std::vector *permutation); + void Build(std::vector* permutation); private: ArcIndexType DirectArcLimit(NodeIndexType node) const { @@ -525,7 +525,7 @@ class ReverseArcListGraph ArcIndexType AddArc(NodeIndexType tail, NodeIndexType head); void Build() { Build(nullptr); } - void Build(std::vector *permutation); + void Build(std::vector* permutation); private: std::vector start_; @@ -592,7 +592,7 @@ class ReverseArcStaticGraph // This loops over the heads of the OutgoingArcs(node). It is just a more // convenient way to achieve this. Moreover this interface is used by some // graph algorithms. - BeginEndWrapper operator[](NodeIndexType node) const; + BeginEndWrapper operator[](NodeIndexType node) const; ArcIndexType OppositeArc(ArcIndexType arc) const; // TODO(user): support Head() and Tail() before Build(), like StaticGraph<>. @@ -604,7 +604,7 @@ class ReverseArcStaticGraph ArcIndexType AddArc(NodeIndexType tail, NodeIndexType head); void Build() { Build(nullptr); } - void Build(std::vector *permutation); + void Build(std::vector* permutation); private: ArcIndexType DirectArcLimit(NodeIndexType node) const { @@ -679,7 +679,7 @@ class ReverseArcMixedGraph // This loops over the heads of the OutgoingArcs(node). It is just a more // convenient way to achieve this. Moreover this interface is used by some // graph algorithms. - BeginEndWrapper operator[](NodeIndexType node) const; + BeginEndWrapper operator[](NodeIndexType node) const; ArcIndexType OppositeArc(ArcIndexType arc) const; // TODO(user): support Head() and Tail() before Build(), like StaticGraph<>. @@ -691,7 +691,7 @@ class ReverseArcMixedGraph ArcIndexType AddArc(NodeIndexType tail, NodeIndexType head); void Build() { Build(nullptr); } - void Build(std::vector *permutation); + void Build(std::vector* permutation); private: ArcIndexType DirectArcLimit(NodeIndexType node) const { @@ -721,8 +721,8 @@ class ReverseArcMixedGraph // Some compiler do not know typeof(), so we have to use this extra function // internally. template -void PermuteWithExplicitElementType(const IntVector &permutation, - Array *array_to_permute, +void PermuteWithExplicitElementType(const IntVector& permutation, + Array* array_to_permute, ElementType unused) { std::vector temp(permutation.size()); for (int i = 0; i < permutation.size(); ++i) { @@ -734,7 +734,7 @@ void PermuteWithExplicitElementType(const IntVector &permutation, } template -void Permute(const IntVector &permutation, Array *array_to_permute) { +void Permute(const IntVector& permutation, Array* array_to_permute) { if (permutation.empty()) { return; } @@ -745,8 +745,8 @@ void Permute(const IntVector &permutation, Array *array_to_permute) { // We need a specialization for vector, because the default code uses // (*array_to_permute)[0] as ElementType, which isn't 'bool' in that case. template -void Permute(const IntVector &permutation, - std::vector *array_to_permute) { +void Permute(const IntVector& permutation, + std::vector* array_to_permute) { if (permutation.empty()) { return; } @@ -776,14 +776,14 @@ class SVector { ~SVector() { clear_and_dealloc(); } // Copy constructor and assignment operator. - SVector(const SVector &other) : SVector() { *this = other; } - SVector &operator=(const SVector &other) { + SVector(const SVector& other) : SVector() { *this = other; } + SVector& operator=(const SVector& other) { if (capacity_ < other.size_) { clear_and_dealloc(); // NOTE(user): Alternatively, our capacity could inherit from the other // vector's capacity, which can be (much) greater than its size. capacity_ = other.size_; - base_ = static_cast(malloc(2LL * capacity_ * sizeof(T))); + base_ = static_cast(malloc(2LL * capacity_ * sizeof(T))); CHECK(base_ != nullptr); base_ += capacity_; } else { // capacity_ >= other.size @@ -798,8 +798,8 @@ class SVector { } // Move constructor and move assignment operator. - SVector(SVector &&other) : SVector() { swap(other); } - SVector &operator=(SVector &&other) { + SVector(SVector&& other) : SVector() { swap(other); } + SVector& operator=(SVector&& other) { // NOTE(user): We could just swap() and let the other's destruction take // care of the clean-up, but it is probably less bug-prone to perform the // destruction immediately. @@ -808,13 +808,13 @@ class SVector { return *this; } - T &operator[](int n) { + T& operator[](int n) { DCHECK_LT(n, size_); DCHECK_GE(n, -size_); return base_[n]; } - const T &operator[](int n) const { + const T& operator[](int n) const { DCHECK_LT(n, size_); DCHECK_GE(n, -size_); return base_[n]; @@ -839,9 +839,9 @@ class SVector { void clear() { resize(0); } - T *data() const { return base_; } + T* data() const { return base_; } - void swap(SVector &x) { + void swap(SVector& x) { std::swap(base_, x.base_); std::swap(size_, x.size_); std::swap(capacity_, x.capacity_); @@ -852,9 +852,9 @@ class SVector { DCHECK_LE(n, max_size()); if (n > capacity_) { const int new_capacity = std::min(n, max_size()); - T *new_storage = static_cast(malloc(2LL * new_capacity * sizeof(T))); + T* new_storage = static_cast(malloc(2LL * new_capacity * sizeof(T))); CHECK(new_storage != nullptr); - T *new_base = new_storage + new_capacity; + T* new_base = new_storage + new_capacity; // TODO(user): in C++17 we could use std::uninitialized_move instead // of this loop. for (int i = -size_; i < size_; ++i) { @@ -870,7 +870,7 @@ class SVector { // NOTE(user): This doesn't currently support movable-only objects, but we // could fix that. - void grow(const T &left = T(), const T &right = T()) { + void grow(const T& left = T(), const T& right = T()) { if (size_ == capacity_) { // We have to copy the elements because they are allowed to be element of // *this. @@ -917,7 +917,7 @@ class SVector { return capacity_ + delta; } - T *base_; // Pointer to the element of index 0. + T* base_; // Pointer to the element of index 0. int size_; // Valid index are [- size_, size_). int capacity_; // Reserved index are [- capacity_, capacity_). }; @@ -975,7 +975,7 @@ void BaseGraph void BaseGraph:: - ComputeCumulativeSum(std::vector *v) { + ComputeCumulativeSum(std::vector* v) { ArcIndexType sum = 0; for (int i = 0; i < num_nodes_; ++i) { ArcIndexType temp = (*v)[i]; @@ -992,9 +992,9 @@ void BaseGraph:: // - Update "permutation" to reflect the change, unless it is NULL. template void BaseGraph:: - BuildStartAndForwardHead(SVector *head, - std::vector *start, - std::vector *permutation) { + BuildStartAndForwardHead(SVector* head, + std::vector* start, + std::vector* permutation) { // Computes the outgoing degree of each nodes and check if we need to permute // something or not. Note that the tails are currently stored in the positive // range of the SVector head. @@ -1075,13 +1075,13 @@ void BaseGraph:: #define DEFINE_STL_ITERATOR_FUNCTIONS(iterator_class_name) \ using iterator_category = std::input_iterator_tag; \ using difference_type = ptrdiff_t; \ - using pointer = const ArcIndexType *; \ - using reference = const ArcIndexType &; \ + using pointer = const ArcIndexType*; \ + using reference = const ArcIndexType&; \ using value_type = ArcIndexType; \ - bool operator!=(const iterator_class_name &other) const { \ + bool operator!=(const iterator_class_name& other) const { \ return this->index_ != other.index_; \ } \ - bool operator==(const iterator_class_name &other) const { \ + bool operator==(const iterator_class_name& other) const { \ return this->index_ == other.index_; \ } \ ArcIndexType operator*() const { return this->Index(); } \ @@ -1162,7 +1162,7 @@ void ListGraph::ReserveArcs(ArcIndexType bound) { template void ListGraph::Build( - std::vector *permutation) { + std::vector* permutation) { if (permutation != nullptr) { permutation->clear(); } @@ -1171,11 +1171,11 @@ void ListGraph::Build( template class ListGraph::OutgoingArcIterator { public: - OutgoingArcIterator(const ListGraph &graph, NodeIndexType node) + OutgoingArcIterator(const ListGraph& graph, NodeIndexType node) : graph_(graph), index_(graph.start_[node]) { DCHECK(graph.IsNodeValid(node)); } - OutgoingArcIterator(const ListGraph &graph, NodeIndexType node, + OutgoingArcIterator(const ListGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(graph), index_(arc) { DCHECK(graph.IsNodeValid(node)); @@ -1191,7 +1191,7 @@ class ListGraph::OutgoingArcIterator { DEFINE_STL_ITERATOR_FUNCTIONS(OutgoingArcIterator); private: - const ListGraph &graph_; + const ListGraph& graph_; ArcIndexType index_; }; @@ -1200,15 +1200,15 @@ class ListGraph::OutgoingHeadIterator { public: using iterator_category = std::input_iterator_tag; using difference_type = ptrdiff_t; - using pointer = const NodeIndexType *; - using reference = const NodeIndexType &; + using pointer = const NodeIndexType*; + using reference = const NodeIndexType&; using value_type = NodeIndexType; - OutgoingHeadIterator(const ListGraph &graph, NodeIndexType node) + OutgoingHeadIterator(const ListGraph& graph, NodeIndexType node) : graph_(graph), index_(graph.start_[node]) { DCHECK(graph.IsNodeValid(node)); } - OutgoingHeadIterator(const ListGraph &graph, NodeIndexType node, + OutgoingHeadIterator(const ListGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(graph), index_(arc) { DCHECK(graph.IsNodeValid(node)); @@ -1221,16 +1221,16 @@ class ListGraph::OutgoingHeadIterator { index_ = graph_.next_[index_]; } - bool operator!=(const typename ListGraph::OutgoingHeadIterator - &other) const { + bool operator!=( + const typename ListGraph< + NodeIndexType, ArcIndexType>::OutgoingHeadIterator& other) const { return index_ != other.index_; } NodeIndexType operator*() const { return Index(); } void operator++() { Next(); } private: - const ListGraph &graph_; + const ListGraph& graph_; ArcIndexType index_; }; @@ -1239,9 +1239,9 @@ class ListGraph::OutgoingHeadIterator { DEFINE_RANGE_BASED_ARC_ITERATION(StaticGraph, Outgoing, DirectArcLimit(node)); template -BeginEndWrapper +BeginEndWrapper StaticGraph::operator[](NodeIndexType node) const { - return BeginEndWrapper( + return BeginEndWrapper( head_.data() + start_[node], head_.data() + DirectArcLimit(node)); } @@ -1324,7 +1324,7 @@ NodeIndexType StaticGraph::Head( // chance of cache hit during the computation. template void StaticGraph::Build( - std::vector *permutation) { + std::vector* permutation) { DCHECK(!is_built_); if (is_built_) return; is_built_ = true; @@ -1384,9 +1384,9 @@ void StaticGraph::Build( template class StaticGraph::OutgoingArcIterator { public: - OutgoingArcIterator(const StaticGraph &graph, NodeIndexType node) + OutgoingArcIterator(const StaticGraph& graph, NodeIndexType node) : index_(graph.start_[node]), limit_(graph.DirectArcLimit(node)) {} - OutgoingArcIterator(const StaticGraph &graph, NodeIndexType node, + OutgoingArcIterator(const StaticGraph& graph, NodeIndexType node, ArcIndexType arc) : index_(arc), limit_(graph.DirectArcLimit(node)) { DCHECK_GE(arc, graph.start_[node]); @@ -1512,7 +1512,7 @@ ArcIndexType ReverseArcListGraph::AddArc( template void ReverseArcListGraph::Build( - std::vector *permutation) { + std::vector* permutation) { if (permutation != nullptr) { permutation->clear(); } @@ -1521,11 +1521,11 @@ void ReverseArcListGraph::Build( template class ReverseArcListGraph::OutgoingArcIterator { public: - OutgoingArcIterator(const ReverseArcListGraph &graph, NodeIndexType node) + OutgoingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node) : graph_(graph), index_(graph.start_[node]) { DCHECK(graph.IsNodeValid(node)); } - OutgoingArcIterator(const ReverseArcListGraph &graph, NodeIndexType node, + OutgoingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(graph), index_(arc) { DCHECK(graph.IsNodeValid(node)); @@ -1542,7 +1542,7 @@ class ReverseArcListGraph::OutgoingArcIterator { DEFINE_STL_ITERATOR_FUNCTIONS(OutgoingArcIterator); private: - const ReverseArcListGraph &graph_; + const ReverseArcListGraph& graph_; ArcIndexType index_; }; @@ -1550,12 +1550,12 @@ template class ReverseArcListGraph::OppositeIncomingArcIterator { public: - OppositeIncomingArcIterator(const ReverseArcListGraph &graph, + OppositeIncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node) : graph_(graph), index_(graph.reverse_start_[node]) { DCHECK(graph.IsNodeValid(node)); } - OppositeIncomingArcIterator(const ReverseArcListGraph &graph, + OppositeIncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(graph), index_(arc) { DCHECK(graph.IsNodeValid(node)); @@ -1573,7 +1573,7 @@ class ReverseArcListGraph class ReverseArcListGraph::IncomingArcIterator : public OppositeIncomingArcIterator { public: - IncomingArcIterator(const ReverseArcListGraph &graph, NodeIndexType node) + IncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node) : OppositeIncomingArcIterator(graph, node) {} - IncomingArcIterator(const ReverseArcListGraph &graph, NodeIndexType node, + IncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node, ArcIndexType arc) : OppositeIncomingArcIterator( graph, node, @@ -1603,13 +1603,13 @@ template class ReverseArcListGraph::OutgoingOrOppositeIncomingArcIterator { public: - OutgoingOrOppositeIncomingArcIterator(const ReverseArcListGraph &graph, + OutgoingOrOppositeIncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node) : graph_(graph), index_(graph.reverse_start_[node]), node_(node) { DCHECK(graph.IsNodeValid(node)); if (index_ == Base::kNilArc) index_ = graph.start_[node]; } - OutgoingOrOppositeIncomingArcIterator(const ReverseArcListGraph &graph, + OutgoingOrOppositeIncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(graph), index_(arc), node_(node) { DCHECK(graph.IsNodeValid(node)); @@ -1633,7 +1633,7 @@ class ReverseArcListGraph class ReverseArcListGraph::OutgoingHeadIterator { public: - OutgoingHeadIterator(const ReverseArcListGraph &graph, NodeIndexType node) + OutgoingHeadIterator(const ReverseArcListGraph& graph, NodeIndexType node) : graph_(&graph), index_(graph.start_[node]) { DCHECK(graph.IsNodeValid(node)); } - OutgoingHeadIterator(const ReverseArcListGraph &graph, NodeIndexType node, + OutgoingHeadIterator(const ReverseArcListGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(&graph), index_(arc) { DCHECK(graph.IsNodeValid(node)); @@ -1662,7 +1662,7 @@ class ReverseArcListGraph::OutgoingHeadIterator { DEFINE_STL_ITERATOR_FUNCTIONS(OutgoingHeadIterator); private: - const ReverseArcListGraph *graph_; + const ReverseArcListGraph* graph_; ArcIndexType index_; }; @@ -1691,10 +1691,10 @@ ArcIndexType ReverseArcStaticGraph::InDegree( } template -BeginEndWrapper +BeginEndWrapper ReverseArcStaticGraph::operator[]( NodeIndexType node) const { - return BeginEndWrapper( + return BeginEndWrapper( head_.data() + start_[node], head_.data() + DirectArcLimit(node)); } @@ -1753,7 +1753,7 @@ ArcIndexType ReverseArcStaticGraph::AddArc( template void ReverseArcStaticGraph::Build( - std::vector *permutation) { + std::vector* permutation) { DCHECK(!is_built_); if (is_built_) return; is_built_ = true; @@ -1799,9 +1799,9 @@ void ReverseArcStaticGraph::Build( template class ReverseArcStaticGraph::OutgoingArcIterator { public: - OutgoingArcIterator(const ReverseArcStaticGraph &graph, NodeIndexType node) + OutgoingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node) : index_(graph.start_[node]), limit_(graph.DirectArcLimit(node)) {} - OutgoingArcIterator(const ReverseArcStaticGraph &graph, NodeIndexType node, + OutgoingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node, ArcIndexType arc) : index_(arc), limit_(graph.DirectArcLimit(node)) { DCHECK_GE(arc, graph.start_[node]); @@ -1827,7 +1827,7 @@ template class ReverseArcStaticGraph::OppositeIncomingArcIterator { public: - OppositeIncomingArcIterator(const ReverseArcStaticGraph &graph, + OppositeIncomingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node) : graph_(graph), limit_(graph.ReverseArcLimit(node)), @@ -1835,7 +1835,7 @@ class ReverseArcStaticGraph class ReverseArcStaticGraph::IncomingArcIterator : public OppositeIncomingArcIterator { public: - IncomingArcIterator(const ReverseArcStaticGraph &graph, NodeIndexType node) + IncomingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node) : OppositeIncomingArcIterator(graph, node) {} - IncomingArcIterator(const ReverseArcStaticGraph &graph, NodeIndexType node, + IncomingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node, ArcIndexType arc) : OppositeIncomingArcIterator(graph, node, arc == graph.ReverseArcLimit(node) @@ -1884,7 +1884,7 @@ template class ReverseArcStaticGraph< NodeIndexType, ArcIndexType>::OutgoingOrOppositeIncomingArcIterator { public: - OutgoingOrOppositeIncomingArcIterator(const ReverseArcStaticGraph &graph, + OutgoingOrOppositeIncomingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node) : index_(graph.reverse_start_[node]), first_limit_(graph.ReverseArcLimit(node)), @@ -1894,7 +1894,7 @@ class ReverseArcStaticGraph< DCHECK(graph.IsNodeValid(node)); DCHECK((index_ < first_limit_) || (index_ >= next_start_)); } - OutgoingOrOppositeIncomingArcIterator(const ReverseArcStaticGraph &graph, + OutgoingOrOppositeIncomingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node, ArcIndexType arc) : index_(arc), first_limit_(graph.ReverseArcLimit(node)), @@ -1950,10 +1950,10 @@ ArcIndexType ReverseArcMixedGraph::InDegree( } template -BeginEndWrapper +BeginEndWrapper ReverseArcMixedGraph::operator[]( NodeIndexType node) const { - return BeginEndWrapper( + return BeginEndWrapper( head_.data() + start_[node], head_.data() + DirectArcLimit(node)); } @@ -2011,7 +2011,7 @@ ArcIndexType ReverseArcMixedGraph::AddArc( template void ReverseArcMixedGraph::Build( - std::vector *permutation) { + std::vector* permutation) { DCHECK(!is_built_); if (is_built_) return; is_built_ = true; @@ -2039,9 +2039,9 @@ void ReverseArcMixedGraph::Build( template class ReverseArcMixedGraph::OutgoingArcIterator { public: - OutgoingArcIterator(const ReverseArcMixedGraph &graph, NodeIndexType node) + OutgoingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node) : index_(graph.start_[node]), limit_(graph.DirectArcLimit(node)) {} - OutgoingArcIterator(const ReverseArcMixedGraph &graph, NodeIndexType node, + OutgoingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node, ArcIndexType arc) : index_(arc), limit_(graph.DirectArcLimit(node)) { DCHECK_GE(arc, graph.start_[node]); @@ -2067,14 +2067,14 @@ template class ReverseArcMixedGraph::OppositeIncomingArcIterator { public: - OppositeIncomingArcIterator(const ReverseArcMixedGraph &graph, + OppositeIncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node) : graph_(&graph) { DCHECK(graph.is_built_); DCHECK(graph.IsNodeValid(node)); index_ = graph.reverse_start_[node]; } - OppositeIncomingArcIterator(const ReverseArcMixedGraph &graph, + OppositeIncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(&graph), index_(arc) { DCHECK(graph.is_built_); @@ -2092,7 +2092,7 @@ class ReverseArcMixedGraph class ReverseArcMixedGraph::IncomingArcIterator : public OppositeIncomingArcIterator { public: - IncomingArcIterator(const ReverseArcMixedGraph &graph, NodeIndexType node) + IncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node) : OppositeIncomingArcIterator(graph, node) {} - IncomingArcIterator(const ReverseArcMixedGraph &graph, NodeIndexType node, + IncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node, ArcIndexType arc) : OppositeIncomingArcIterator( graph, node, arc == Base::kNilArc ? arc : graph.OppositeArc(arc)) {} @@ -2119,7 +2119,7 @@ template class ReverseArcMixedGraph< NodeIndexType, ArcIndexType>::OutgoingOrOppositeIncomingArcIterator { public: - OutgoingOrOppositeIncomingArcIterator(const ReverseArcMixedGraph &graph, + OutgoingOrOppositeIncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node) : graph_(&graph) { limit_ = graph.DirectArcLimit(node); // also DCHECKs node and is_built_. @@ -2129,7 +2129,7 @@ class ReverseArcMixedGraph< index_ = restart_; } } - OutgoingOrOppositeIncomingArcIterator(const ReverseArcMixedGraph &graph, + OutgoingOrOppositeIncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node, ArcIndexType arc) : graph_(&graph) { limit_ = graph.DirectArcLimit(node); @@ -2157,7 +2157,7 @@ class ReverseArcMixedGraph< DEFINE_STL_ITERATOR_FUNCTIONS(OutgoingOrOppositeIncomingArcIterator); private: - const ReverseArcMixedGraph *graph_; + const ReverseArcMixedGraph* graph_; ArcIndexType index_; ArcIndexType restart_; ArcIndexType limit_; @@ -2277,7 +2277,7 @@ class CompleteBipartiteGraph // Deprecated interface. class OutgoingArcIterator { public: - OutgoingArcIterator(const CompleteBipartiteGraph &graph, NodeIndexType node) + OutgoingArcIterator(const CompleteBipartiteGraph& graph, NodeIndexType node) : index_(graph.right_nodes_ * node), limit_(node >= graph.left_nodes_ ? index_ : graph.right_nodes_ * (node + 1)) {} diff --git a/ortools/graph/hamiltonian_path.h b/ortools/graph/hamiltonian_path.h index e90c9b2005..e07bfe34d7 100644 --- a/ortools/graph/hamiltonian_path.h +++ b/ortools/graph/hamiltonian_path.h @@ -104,7 +104,7 @@ template class ElementIterator { public: explicit ElementIterator(Set set) : current_set_(set) {} - bool operator!=(const ElementIterator &other) const { + bool operator!=(const ElementIterator& other) const { return current_set_ != other.current_set_; } @@ -112,7 +112,7 @@ class ElementIterator { int operator*() const { return current_set_.SmallestElement(); } // Advances the iterator by removing its smallest element. - const ElementIterator &operator++() { + const ElementIterator& operator++() { current_set_ = current_set_.RemoveSmallestElement(); return *this; } @@ -199,7 +199,7 @@ class Set { return ElementIterator(Set(value_)); } ElementIterator end() const { return ElementIterator(Set(0)); } - bool operator!=(const Set &other) const { return value_ != other.value_; } + bool operator!=(const Set& other) const { return value_ != other.value_; } private: // The Integer representing the set. @@ -230,14 +230,14 @@ class SetRangeIterator { // STL iterator-related methods. SetType operator*() const { return current_set_; } - bool operator!=(const SetRangeIterator &other) const { + bool operator!=(const SetRangeIterator& other) const { return current_set_ != other.current_set_; } // Computes the next set with the same cardinality using Gosper's hack. // ftp://publications.ai.mit.edu/ai-publications/pdf/AIM-239.pdf ITEM 175 // Also translated in C https://www.cl.cam.ac.uk/~am21/hakmemc.html - const SetRangeIterator &operator++() { + const SetRangeIterator& operator++() { const IntegerType c = current_set_.SmallestSingleton().value(); const IntegerType a = current_set_.value(); const IntegerType r = c + current_set_.value(); @@ -342,7 +342,7 @@ class LatticeMemoryManager { int max_card_; // binomial_coefficients_[n][k] contains (n choose k). - std::vector > binomial_coefficients_; + std::vector> binomial_coefficients_; // base_offset_[card] contains the base offset for all f(set, node) with // card(set) == card. @@ -494,7 +494,7 @@ class HamiltonianPathSolver { // Deprecated API. Stores HamiltonianPath(BestHamiltonianPathEndNode()) into // *path. - void HamiltonianPath(std::vector *path); + void HamiltonianPath(std::vector* path); // Returns the cost of the TSP tour. CostType TravelingSalesmanCost(); @@ -503,7 +503,7 @@ class HamiltonianPathSolver { std::vector TravelingSalesmanPath(); // Deprecated API. - void TravelingSalesmanPath(std::vector *path); + void TravelingSalesmanPath(std::vector* path); // Returns true if there won't be precision issues. // This is always true for integers, but not for floating-point types. @@ -561,7 +561,7 @@ class HamiltonianPathSolver { std::vector ComputePath(CostType cost, NodeSet set, int end); // Returns true if the path covers all nodes, and its cost is equal to cost. - bool PathIsValid(const std::vector &path, CostType cost); + bool PathIsValid(const std::vector& path, CostType cost); // Cost function used to build Hamiltonian paths. MatrixOrFunction cost_; @@ -584,7 +584,7 @@ class HamiltonianPathSolver { // The vector of smallest Hamiltonian paths starting at 0, indexed by their // end nodes. - std::vector > hamiltonian_paths_; + std::vector> hamiltonian_paths_; // The end node that gives the smallest Hamiltonian path. The smallest // Hamiltonian path starting at 0 of all @@ -666,8 +666,7 @@ void HamiltonianPathSolver::Solve() { // on cardinality. for (int card = 2; card <= num_nodes_; ++card) { // Iterate on sets of same cardinality. - for (NodeSet set : - SetRangeWithCardinality >(card, num_nodes_)) { + for (NodeSet set : SetRangeWithCardinality>(card, num_nodes_)) { // Using BaseOffset and maintaining the node ranks, to reduce the // computational effort for accessing the data. const uint64 set_offset = mem_.BaseOffset(card, set); @@ -764,7 +763,7 @@ std::vector HamiltonianPathSolver::ComputePath( template bool HamiltonianPathSolver::PathIsValid( - const std::vector &path, CostType cost) { + const std::vector& path, CostType cost) { NodeSet coverage(0); for (int node : path) { coverage = coverage.AddElement(node); @@ -849,7 +848,7 @@ std::vector HamiltonianPathSolver::HamiltonianPath( template void HamiltonianPathSolver::HamiltonianPath( - std::vector *path) { + std::vector* path) { *path = HamiltonianPath(best_hamiltonian_path_end_node_); } @@ -869,7 +868,7 @@ HamiltonianPathSolver::TravelingSalesmanPath() { template void HamiltonianPathSolver::TravelingSalesmanPath( - std::vector *path) { + std::vector* path) { *path = TravelingSalesmanPath(); } @@ -881,8 +880,7 @@ class PruningHamiltonianSolver { // TSP cost, and stops further search if it exceeds the current best solution. // For the heuristics to determine future lower bound over visited nodeset S - // and last visited node k, the cost of minimum spanning tree of (V \ S) ∪ - // {k} + // and last visited node k, the cost of minimum spanning tree of (V \ S) ∪ {k} // is calculated and added to the current cost(S). The cost of MST is // guaranteed to be smaller than or equal to the cost of Hamiltonian path, // because Hamiltonian path is a spanning tree itself. @@ -952,7 +950,7 @@ void PruningHamiltonianSolver::Solve(int end_node) { mem_.Init(num_nodes_); NodeSet start_set = NodeSet::Singleton(0); - std::stack > state_stack; + std::stack> state_stack; state_stack.push(std::make_pair(start_set, 0)); while (!state_stack.empty()) { diff --git a/ortools/graph/io.h b/ortools/graph/io.h index da03846e51..40c7deebab 100644 --- a/ortools/graph/io.h +++ b/ortools/graph/io.h @@ -47,7 +47,7 @@ enum GraphToStringFormat { PRINT_GRAPH_ADJACENCY_LISTS_SORTED, }; template -std::string GraphToString(const Graph &graph, GraphToStringFormat format); +std::string GraphToString(const Graph& graph, GraphToStringFormat format); // Read a graph file in the simple ".g" format: the file should be a text file // containing only space-separated integers, whose first line is: @@ -80,9 +80,9 @@ std::string GraphToString(const Graph &graph, GraphToStringFormat format); // ... // } template -absl::StatusOr ReadGraphFile( - const std::string &filename, bool directed, - std::vector *num_nodes_with_color_or_null); +absl::StatusOr ReadGraphFile( + const std::string& filename, bool directed, + std::vector* num_nodes_with_color_or_null); // Writes a graph to the ".g" file format described above. If "directed" is // true, all arcs are written to the file. If it is false, the graph is expected @@ -97,14 +97,14 @@ absl::StatusOr ReadGraphFile( // This method is the reverse of ReadGraphFile (with the same value for // "directed"). template -absl::Status WriteGraphToFile(const Graph &graph, const std::string &filename, +absl::Status WriteGraphToFile(const Graph& graph, const std::string& filename, bool directed, - const std::vector &num_nodes_with_color); + const std::vector& num_nodes_with_color); // Implementations of the templated methods. template -std::string GraphToString(const Graph &graph, GraphToStringFormat format) { +std::string GraphToString(const Graph& graph, GraphToStringFormat format) { std::string out; std::vector adj; for (const typename Graph::NodeIndex node : graph.AllNodes()) { @@ -129,14 +129,14 @@ std::string GraphToString(const Graph &graph, GraphToStringFormat format) { } template -absl::StatusOr ReadGraphFile( - const std::string &filename, bool directed, - std::vector *num_nodes_with_color_or_null) { +absl::StatusOr ReadGraphFile( + const std::string& filename, bool directed, + std::vector* num_nodes_with_color_or_null) { std::unique_ptr graph; int64 num_nodes = -1; int64 num_expected_lines = -1; int64 num_lines_read = 0; - for (const std::string &line : FileLines(filename)) { + for (const std::string& line : FileLines(filename)) { ++num_lines_read; if (num_lines_read == 1) { std::vector header_ints; @@ -222,10 +222,10 @@ absl::StatusOr ReadGraphFile( } template -absl::Status WriteGraphToFile(const Graph &graph, const std::string &filename, +absl::Status WriteGraphToFile(const Graph& graph, const std::string& filename, bool directed, - const std::vector &num_nodes_with_color) { - FILE *f = fopen(filename.c_str(), "w"); + const std::vector& num_nodes_with_color) { + FILE* f = fopen(filename.c_str(), "w"); if (f == nullptr) { return absl::Status(absl::StatusCode::kInvalidArgument, "Could not open file: '" + filename + "'"); diff --git a/ortools/graph/iterators.h b/ortools/graph/iterators.h index dccf63d3a1..1997bcd42a 100644 --- a/ortools/graph/iterators.h +++ b/ortools/graph/iterators.h @@ -69,12 +69,12 @@ inline BeginEndWrapper BeginEndRange( // values) since the caller already knows the key. template inline BeginEndWrapper EqualRange( - MultiMap &multi_map, const typename MultiMap::key_type &key) { + MultiMap& multi_map, const typename MultiMap::key_type& key) { return BeginEndRange(multi_map.equal_range(key)); } template inline BeginEndWrapper EqualRange( - const MultiMap &multi_map, const typename MultiMap::key_type &key) { + const MultiMap& multi_map, const typename MultiMap::key_type& key) { return BeginEndRange(multi_map.equal_range(key)); } @@ -85,17 +85,17 @@ inline BeginEndWrapper EqualRange( template class BeginEndReverseIteratorWrapper { public: - explicit BeginEndReverseIteratorWrapper(const Container &c) : c_(c) {} + explicit BeginEndReverseIteratorWrapper(const Container& c) : c_(c) {} typename Container::const_reverse_iterator begin() const { return c_.rbegin(); } typename Container::const_reverse_iterator end() const { return c_.rend(); } private: - const Container &c_; + const Container& c_; }; template -BeginEndReverseIteratorWrapper Reverse(const Container &c) { +BeginEndReverseIteratorWrapper Reverse(const Container& c) { return BeginEndReverseIteratorWrapper(c); } @@ -105,21 +105,21 @@ class IntegerRangeIterator : public std::iterator { public: explicit IntegerRangeIterator(IntegerType value) : index_(value) {} - IntegerRangeIterator(const IntegerRangeIterator &other) + IntegerRangeIterator(const IntegerRangeIterator& other) : index_(other.index_) {} - IntegerRangeIterator &operator=(const IntegerRangeIterator &other) { + IntegerRangeIterator& operator=(const IntegerRangeIterator& other) { index_ = other.index_; } - bool operator!=(const IntegerRangeIterator &other) const { + bool operator!=(const IntegerRangeIterator& other) const { // This may seems weird, but using < instead of != avoid almost-infinite // loop if one use IntegerRange(1, 0) below for instance. return index_ < other.index_; } - bool operator==(const IntegerRangeIterator &other) const { + bool operator==(const IntegerRangeIterator& other) const { return index_ == other.index_; } IntegerType operator*() const { return index_; } - IntegerRangeIterator &operator++() { + IntegerRangeIterator& operator++() { ++index_; return *this; } @@ -143,11 +143,10 @@ class IntegerRangeIterator // for (const ArcIndex arc : graph.AllOutgoingArcs()); // for (const NodeIndex node : graph.AllNodes()); template -class IntegerRange - : public BeginEndWrapper > { +class IntegerRange : public BeginEndWrapper> { public: IntegerRange(IntegerType begin, IntegerType end) - : BeginEndWrapper >( + : BeginEndWrapper>( IntegerRangeIterator(begin), IntegerRangeIterator(end)) {} }; @@ -155,15 +154,15 @@ class IntegerRange // Allow iterating over a vector as a mutable vector. template struct MutableVectorIteration { - explicit MutableVectorIteration(std::vector *v) : v_(v) {} + explicit MutableVectorIteration(std::vector* v) : v_(v) {} struct Iterator { explicit Iterator(typename std::vector::iterator it) : it_(it) {} - T *operator*() { return &*it_; } - Iterator &operator++() { + T* operator*() { return &*it_; } + Iterator& operator++() { it_++; return *this; } - bool operator!=(const Iterator &other) const { return other.it_ != it_; } + bool operator!=(const Iterator& other) const { return other.it_ != it_; } private: typename std::vector::iterator it_; @@ -172,7 +171,7 @@ struct MutableVectorIteration { Iterator end() { return Iterator(v_->end()); } private: - std::vector *const v_; + std::vector* const v_; }; } // namespace util diff --git a/ortools/graph/linear_assignment.cc b/ortools/graph/linear_assignment.cc index 9fcef932b8..31d24d50c8 100644 --- a/ortools/graph/linear_assignment.cc +++ b/ortools/graph/linear_assignment.cc @@ -18,7 +18,7 @@ ABSL_FLAG(int64, assignment_alpha, 5, "Divisor for epsilon at each Refine " "step of LinearSumAssignment."); -ABSL_FLAG(int32, assignment_progress_logging_period, 5000, +ABSL_FLAG(int, assignment_progress_logging_period, 5000, "Number of relabelings to do between logging progress messages " "when verbose level is 4 or more."); ABSL_FLAG(bool, assignment_stack_order, true, diff --git a/ortools/graph/linear_assignment.h b/ortools/graph/linear_assignment.h index e6b19740ed..b9d292628b 100644 --- a/ortools/graph/linear_assignment.h +++ b/ortools/graph/linear_assignment.h @@ -215,7 +215,7 @@ #ifndef SWIG ABSL_DECLARE_FLAG(int64, assignment_alpha); -ABSL_DECLARE_FLAG(int32, assignment_progress_logging_period); +ABSL_DECLARE_FLAG(int, assignment_progress_logging_period); ABSL_DECLARE_FLAG(bool, assignment_stack_order); #endif @@ -231,7 +231,7 @@ class LinearSumAssignment { // Constructor for the case in which we will build the graph // incrementally as we discover arc costs, as might be done with any // of the dynamic graph representations such as StarGraph or ForwardStarGraph. - LinearSumAssignment(const GraphType &graph, NodeIndex num_left_nodes); + LinearSumAssignment(const GraphType& graph, NodeIndex num_left_nodes); // Constructor for the case in which the underlying graph cannot be // built until after all the arc costs are known, as is the case @@ -244,7 +244,7 @@ class LinearSumAssignment { // Sets the graph used by the LinearSumAssignment instance, for use // when the graph layout can be determined only after arc costs are // set. This happens, for example, when we use a ForwardStarStaticGraph. - void SetGraph(const GraphType *graph) { + void SetGraph(const GraphType* graph) { DCHECK(graph_ == nullptr); graph_ = graph; } @@ -259,8 +259,8 @@ class LinearSumAssignment { // // Passes ownership of the cycle handler to the caller. // - operations_research::PermutationCycleHandler - *ArcAnnotationCycleHandler(); + operations_research::PermutationCycleHandler* + ArcAnnotationCycleHandler(); // Optimizes the layout of the graph for the access pattern our // implementation will use. @@ -275,10 +275,10 @@ class LinearSumAssignment { // constructed such that each node's outgoing arcs are sorted by // head node index before the // LinearSumAssignment::SetGraph() method is called. - void OptimizeGraphLayout(GraphType *graph); + void OptimizeGraphLayout(GraphType* graph); // Allows tests, iterators, etc., to inspect our underlying graph. - inline const GraphType &Graph() const { return *graph_; } + inline const GraphType& Graph() const { return *graph_; } // These handy member functions make the code more compact, and we // expose them to clients so that client code that doesn't have @@ -358,10 +358,10 @@ class LinearSumAssignment { class BipartiteLeftNodeIterator { public: - BipartiteLeftNodeIterator(const GraphType &graph, NodeIndex num_left_nodes) + BipartiteLeftNodeIterator(const GraphType& graph, NodeIndex num_left_nodes) : num_left_nodes_(num_left_nodes), node_iterator_(0) {} - explicit BipartiteLeftNodeIterator(const LinearSumAssignment &assignment) + explicit BipartiteLeftNodeIterator(const LinearSumAssignment& assignment) : num_left_nodes_(assignment.NumLeftNodes()), node_iterator_(0) {} NodeIndex Index() const { return node_iterator_; } @@ -384,7 +384,7 @@ class LinearSumAssignment { relabelings_ = 0; refinements_ = 0; } - void Add(const Stats &that) { + void Add(const Stats& that) { pushes_ += that.pushes_; double_pushes_ += that.double_pushes_; relabelings_ += that.relabelings_; @@ -527,7 +527,7 @@ class LinearSumAssignment { // The graph underlying the problem definition we are given. Not // owned by *this. - const GraphType *graph_; + const GraphType* graph_; // The number of nodes on the left side of the graph we are given. NodeIndex num_left_nodes_; @@ -863,7 +863,7 @@ class LinearSumAssignment { // false if the value of the bound doesn't fit in CostValue. inline CostValue PriceChangeBound(CostValue old_epsilon, CostValue new_epsilon, - bool *in_range) const { + bool* in_range) const { const CostValue n = graph_->num_nodes(); // We work in double-precision floating point to determine whether // we'll overflow the integral CostValue type's range of @@ -961,7 +961,7 @@ const CostValue LinearSumAssignment::kMinEpsilon = 1; template LinearSumAssignment::LinearSumAssignment( - const GraphType &graph, const NodeIndex num_left_nodes) + const GraphType& graph, const NodeIndex num_left_nodes) : graph_(&graph), num_left_nodes_(num_left_nodes), success_(false), @@ -977,9 +977,9 @@ LinearSumAssignment::LinearSumAssignment( matched_node_(num_left_nodes, 2 * num_left_nodes - 1), scaled_arc_cost_(graph.max_end_arc_index(), 0), active_nodes_(absl::GetFlag(FLAGS_assignment_stack_order) - ? static_cast( + ? static_cast( new ActiveNodeStack()) - : static_cast( + : static_cast( new ActiveNodeQueue())) {} template @@ -1000,9 +1000,9 @@ LinearSumAssignment::LinearSumAssignment( matched_node_(num_left_nodes, 2 * num_left_nodes - 1), scaled_arc_cost_(num_arcs, 0), active_nodes_(absl::GetFlag(FLAGS_assignment_stack_order) - ? static_cast( + ? static_cast( new ActiveNodeStack()) - : static_cast( + : static_cast( new ActiveNodeQueue())) {} template @@ -1023,7 +1023,7 @@ void LinearSumAssignment::SetArcCost(ArcIndex arc, CostValue cost) { template class CostValueCycleHandler : public PermutationCycleHandler { public: - explicit CostValueCycleHandler(std::vector *cost) + explicit CostValueCycleHandler(std::vector* cost) : temp_(0), cost_(cost) {} void SetTempFromIndex(ArcIndexType source) override { @@ -1043,7 +1043,7 @@ class CostValueCycleHandler : public PermutationCycleHandler { private: CostValue temp_; - std::vector *const cost_; + std::vector* const cost_; DISALLOW_COPY_AND_ASSIGN(CostValueCycleHandler); }; @@ -1056,7 +1056,7 @@ class CostValueCycleHandler : public PermutationCycleHandler { template class ArcIndexOrderingByTailNode { public: - explicit ArcIndexOrderingByTailNode(const GraphType &graph) : graph_(graph) {} + explicit ArcIndexOrderingByTailNode(const GraphType& graph) : graph_(graph) {} // Says ArcIndex a is less than ArcIndex b if arc a's tail is less // than arc b's tail. If their tails are equal, orders according to @@ -1069,7 +1069,7 @@ class ArcIndexOrderingByTailNode { } private: - const GraphType &graph_; + const GraphType& graph_; // Copy and assign are allowed; they have to be for STL to work // with this functor, although it seems like a bug for STL to be @@ -1078,14 +1078,14 @@ class ArcIndexOrderingByTailNode { // Passes ownership of the cycle handler to the caller. template -PermutationCycleHandler - *LinearSumAssignment::ArcAnnotationCycleHandler() { +PermutationCycleHandler* +LinearSumAssignment::ArcAnnotationCycleHandler() { return new CostValueCycleHandler( &scaled_arc_cost_); } template -void LinearSumAssignment::OptimizeGraphLayout(GraphType *graph) { +void LinearSumAssignment::OptimizeGraphLayout(GraphType* graph) { // The graph argument is only to give us a non-const-qualified // handle on the graph we already have. Any different graph is // nonsense. diff --git a/ortools/graph/max_flow.cc b/ortools/graph/max_flow.cc index 3c59f6e810..3f96edd207 100644 --- a/ortools/graph/max_flow.cc +++ b/ortools/graph/max_flow.cc @@ -70,7 +70,7 @@ SimpleMaxFlow::Status SimpleMaxFlow::Solve(NodeIndex source, NodeIndex sink) { underlying_graph_->AddArc(arc_tail_[arc], arc_head_[arc]); } underlying_graph_->Build(&arc_permutation_); - underlying_max_flow_ = absl::make_unique >( + underlying_max_flow_ = absl::make_unique>( underlying_graph_.get(), source, sink); for (ArcIndex arc = 0; arc < num_arcs; ++arc) { ArcIndex permuted_arc = @@ -106,12 +106,12 @@ FlowQuantity SimpleMaxFlow::OptimalFlow() const { return optimal_flow_; } FlowQuantity SimpleMaxFlow::Flow(ArcIndex arc) const { return arc_flow_[arc]; } -void SimpleMaxFlow::GetSourceSideMinCut(std::vector *result) { +void SimpleMaxFlow::GetSourceSideMinCut(std::vector* result) { if (underlying_max_flow_ == nullptr) return; underlying_max_flow_->GetSourceSideMinCut(result); } -void SimpleMaxFlow::GetSinkSideMinCut(std::vector *result) { +void SimpleMaxFlow::GetSinkSideMinCut(std::vector* result) { if (underlying_max_flow_ == nullptr) return; underlying_max_flow_->GetSinkSideMinCut(result); } @@ -122,7 +122,7 @@ FlowModel SimpleMaxFlow::CreateFlowModelOfLastSolve() { } template -GenericMaxFlow::GenericMaxFlow(const Graph *graph, NodeIndex source, +GenericMaxFlow::GenericMaxFlow(const Graph* graph, NodeIndex source, NodeIndex sink) : graph_(graph), node_excess_(), @@ -223,12 +223,12 @@ void GenericMaxFlow::SetArcFlow(ArcIndex arc, FlowQuantity new_flow) { template void GenericMaxFlow::GetSourceSideMinCut( - std::vector *result) { + std::vector* result) { ComputeReachableNodes(source_, result); } template -void GenericMaxFlow::GetSinkSideMinCut(std::vector *result) { +void GenericMaxFlow::GetSinkSideMinCut(std::vector* result) { ComputeReachableNodes(sink_, result); } @@ -315,7 +315,7 @@ bool GenericMaxFlow::CheckRelabelPrecondition(NodeIndex node) const { } template -std::string GenericMaxFlow::DebugString(const std::string &context, +std::string GenericMaxFlow::DebugString(const std::string& context, ArcIndex arc) const { const NodeIndex tail = Tail(arc); const NodeIndex head = Head(arc); @@ -930,7 +930,7 @@ const FlowQuantity GenericMaxFlow::kMaxFlowQuantity = template template void GenericMaxFlow::ComputeReachableNodes( - NodeIndex start, std::vector *result) { + NodeIndex start, std::vector* result) { // If start is not a valid node index, it can reach only itself. // Note(user): This is needed because source and sink are given independently // of the graph and sometimes before it is even constructed. @@ -967,13 +967,13 @@ FlowModel GenericMaxFlow::CreateFlowModel() { FlowModel model; model.set_problem_type(FlowModel::MAX_FLOW); for (int n = 0; n < graph_->num_nodes(); ++n) { - Node *node = model.add_node(); + Node* node = model.add_node(); node->set_id(n); if (n == source_) node->set_supply(1); if (n == sink_) node->set_supply(-1); } for (int a = 0; a < graph_->num_arcs(); ++a) { - Arc *arc = model.add_arc(); + Arc* arc = model.add_arc(); arc->set_tail_node_id(graph_->Tail(a)); arc->set_head_node_id(graph_->Head(a)); arc->set_capacity(Capacity(a)); @@ -986,8 +986,8 @@ FlowModel GenericMaxFlow::CreateFlowModel() { // TODO(user): moves this code out of a .cc file and include it at the end of // the header so it can work with any graph implementation ? template class GenericMaxFlow; -template class GenericMaxFlow< ::util::ReverseArcListGraph<> >; -template class GenericMaxFlow< ::util::ReverseArcStaticGraph<> >; -template class GenericMaxFlow< ::util::ReverseArcMixedGraph<> >; +template class GenericMaxFlow<::util::ReverseArcListGraph<>>; +template class GenericMaxFlow<::util::ReverseArcStaticGraph<>>; +template class GenericMaxFlow<::util::ReverseArcMixedGraph<>>; } // namespace operations_research diff --git a/ortools/graph/max_flow.h b/ortools/graph/max_flow.h index d4b4b1661e..9ab0996c3a 100644 --- a/ortools/graph/max_flow.h +++ b/ortools/graph/max_flow.h @@ -109,8 +109,7 @@ // // TODO(user): an alternative would be to evaluate: // A.V. Goldberg, "The Partial Augment-Relabel Algorithm for the Maximum Flow -// Problem.” In Proceedings of Algorithms ESA, LNCS 5193:466-477, Springer -// 2008. +// Problem.” In Proceedings of Algorithms ESA, LNCS 5193:466-477, Springer 2008. // http://www.springerlink.com/index/5535k2j1mt646338.pdf // // An interesting general reference on network flows is: @@ -211,13 +210,13 @@ class SimpleMaxFlow { // Returns the nodes reachable from the source by non-saturated arcs (.i.e. // arc with Flow(arc) < Capacity(arc)), the outgoing arcs of this set form a // minimum cut. This works only if Solve() returned OPTIMAL. - void GetSourceSideMinCut(std::vector *result); + void GetSourceSideMinCut(std::vector* result); // Returns the nodes that can reach the sink by non-saturated arcs, the // outgoing arcs of this set form a minimum cut. Note that if this is the // complement set of GetNodeReachableFromSource(), then the min-cut is unique. // This works only if Solve() returned OPTIMAL. - void GetSinkSideMinCut(std::vector *result); + void GetSinkSideMinCut(std::vector* result); // Creates the protocol buffer representation of the problem used by the last // Solve() call. This is mainly useful for debugging. @@ -283,7 +282,7 @@ class PriorityQueueWithRestrictedPush { private: // Helper function to get the last element of a vector and pop it. - Element PopBack(std::vector > *queue); + Element PopBack(std::vector >* queue); // This is the heart of the algorithm. basically we split the elements by // parity of their priority and the precondition on the Push() ensures that @@ -332,11 +331,11 @@ class GenericMaxFlow : public MaxFlowStatusClass { // to be fully built yet, but its capacity reservation are used to initialize // the memory of this class. source and sink must also be valid node of // graph. - GenericMaxFlow(const Graph *graph, NodeIndex source, NodeIndex sink); + GenericMaxFlow(const Graph* graph, NodeIndex source, NodeIndex sink); virtual ~GenericMaxFlow() {} // Returns the graph associated to the current object. - const Graph *graph() const { return graph_; } + const Graph* graph() const { return graph_; } // Returns the status of last call to Solve(). NOT_SOLVED is returned if // Solve() has never been called or if the problem has been modified in such a @@ -384,7 +383,7 @@ class GenericMaxFlow : public MaxFlowStatusClass { // Returns the nodes reachable from the source in the residual graph, the // outgoing arcs of this set form a minimum cut. - void GetSourceSideMinCut(std::vector *result); + void GetSourceSideMinCut(std::vector* result); // Returns the nodes that can reach the sink in the residual graph, the // outgoing arcs of this set form a minimum cut. Note that if this is the @@ -393,7 +392,7 @@ class GenericMaxFlow : public MaxFlowStatusClass { // TODO(user): In the two-phases algorithm, we can get this minimum cut // without doing the second phase. Add an option for this if there is a need // to, note that the second phase is pretty fast so the gain will be small. - void GetSinkSideMinCut(std::vector *result); + void GetSinkSideMinCut(std::vector* result); // Checks the consistency of the input, i.e. that capacities on the arcs are // non-negative or null. @@ -452,7 +451,7 @@ class GenericMaxFlow : public MaxFlowStatusClass { // Returns context concatenated with information about arc // in a human-friendly way. - std::string DebugString(const std::string &context, ArcIndex arc) const; + std::string DebugString(const std::string& context, ArcIndex arc) const; // Initializes the container active_nodes_. void InitializeActiveNodeContainer(); @@ -466,7 +465,7 @@ class GenericMaxFlow : public MaxFlowStatusClass { } // Push element to the active node container. - void PushActiveNode(const NodeIndex &node) { + void PushActiveNode(const NodeIndex& node) { if (process_node_by_height_) { active_node_by_height_.Push(node, node_potential_[node]); } else { @@ -539,13 +538,13 @@ class GenericMaxFlow : public MaxFlowStatusClass { // Returns the set of nodes reachable from start in the residual graph or in // the reverse residual graph (if reverse is true). template - void ComputeReachableNodes(NodeIndex start, std::vector *result); + void ComputeReachableNodes(NodeIndex start, std::vector* result); // Maximum manageable flow. static const FlowQuantity kMaxFlowQuantity; // A pointer to the graph passed as argument. - const Graph *graph_; + const Graph* graph_; // An array representing the excess for each node in graph_. QuantityArray node_excess_; @@ -652,7 +651,7 @@ class GenericMaxFlow : public MaxFlowStatusClass { // TODO(user): Modify this code and remove it. class MaxFlow : public GenericMaxFlow { public: - MaxFlow(const StarGraph *graph, NodeIndex source, NodeIndex target) + MaxFlow(const StarGraph* graph, NodeIndex source, NodeIndex target) : GenericMaxFlow(graph, source, target) {} }; @@ -703,7 +702,7 @@ Element PriorityQueueWithRestrictedPush::Pop() { template Element PriorityQueueWithRestrictedPush::PopBack( - std::vector > *queue) { + std::vector >* queue) { DCHECK(!queue->empty()); Element element = queue->back().first; queue->pop_back(); diff --git a/ortools/graph/min_cost_flow.cc b/ortools/graph/min_cost_flow.cc index bbaa5059f5..54254c5bd9 100644 --- a/ortools/graph/min_cost_flow.cc +++ b/ortools/graph/min_cost_flow.cc @@ -45,7 +45,7 @@ namespace operations_research { template GenericMinCostFlow::GenericMinCostFlow( - const Graph *graph) + const Graph* graph) : graph_(graph), node_excess_(), node_potential_(), @@ -266,7 +266,7 @@ bool GenericMinCostFlow:: template std::string GenericMinCostFlow::DebugString( - const std::string &context, ArcIndex arc) const { + const std::string& context, ArcIndex arc) const { const NodeIndex tail = Tail(arc); const NodeIndex head = Head(arc); // Reduced cost is computed directly without calling ReducedCost to avoid @@ -290,8 +290,8 @@ GenericMinCostFlow::DebugString( template bool GenericMinCostFlow:: - CheckFeasibility(std::vector *const infeasible_supply_node, - std::vector *const infeasible_demand_node) { + CheckFeasibility(std::vector* const infeasible_supply_node, + std::vector* const infeasible_demand_node) { SCOPED_TIME_STAT(&stats_); // Create a new graph, which is a copy of graph_, with the following // modifications: @@ -982,16 +982,15 @@ bool GenericMinCostFlow::IsArcDirect( // TODO(user): Move this code out of a .cc file and include it at the end of // the header so it can work with any graph implementation? template class GenericMinCostFlow; -template class GenericMinCostFlow< ::util::ReverseArcListGraph<> >; -template class GenericMinCostFlow< ::util::ReverseArcStaticGraph<> >; -template class GenericMinCostFlow< ::util::ReverseArcMixedGraph<> >; -template class GenericMinCostFlow< - ::util::ReverseArcStaticGraph >; +template class GenericMinCostFlow<::util::ReverseArcListGraph<>>; +template class GenericMinCostFlow<::util::ReverseArcStaticGraph<>>; +template class GenericMinCostFlow<::util::ReverseArcMixedGraph<>>; +template class GenericMinCostFlow<::util::ReverseArcStaticGraph>; // A more memory-efficient version for large graphs. -template class GenericMinCostFlow< ::util::ReverseArcStaticGraph, - /*ArcFlowType=*/int16, - /*ArcScaledCostType=*/int32>; +template class GenericMinCostFlow<::util::ReverseArcStaticGraph, + /*ArcFlowType=*/int16, + /*ArcScaledCostType=*/int32>; SimpleMinCostFlow::SimpleMinCostFlow(NodeIndex reserve_num_nodes, ArcIndex reserve_num_arcs) { diff --git a/ortools/graph/min_cost_flow.h b/ortools/graph/min_cost_flow.h index f58e359ea5..60862465ae 100644 --- a/ortools/graph/min_cost_flow.h +++ b/ortools/graph/min_cost_flow.h @@ -117,8 +117,7 @@ // The algorithm is not able to detect the infeasibility of a problem (i.e., // when a bottleneck in the network prohibits sending all the supplies.) // Worse, it could in some cases loop forever. This is why feasibility checking -// is enabled by default -// (absl::GetFlag(FLAGS_min_cost_flow_check_feasibility)=true.) +// is enabled by default (FLAGS_min_cost_flow_check_feasibility=true.) // Feasibility checking is implemented using a max-flow, which has a much lower // complexity. The impact on performance is negligible, while the risk of being // caught in an endless loop is removed. Note that using the feasibility checker @@ -336,10 +335,10 @@ class GenericMinCostFlow : public MinCostFlowBase { // Initialize a MinCostFlow instance on the given graph. The graph does not // need to be fully built yet, but its capacity reservation is used to // initialize the memory of this class. - explicit GenericMinCostFlow(const Graph *graph); + explicit GenericMinCostFlow(const Graph* graph); // Returns the graph associated to the current object. - const Graph *graph() const { return graph_; } + const Graph* graph() const { return graph_; } // Returns the status of last call to Solve(). NOT_SOLVED is returned if // Solve() has never been called or if the problem has been modified in such a @@ -371,8 +370,8 @@ class GenericMinCostFlow : public MinCostFlowBase { // demands are accessible through FeasibleSupply. // Note that CheckFeasibility is called by Solve() when the flag // min_cost_flow_check_feasibility is set to true (which is the default.) - bool CheckFeasibility(std::vector *const infeasible_supply_node, - std::vector *const infeasible_demand_node); + bool CheckFeasibility(std::vector* const infeasible_supply_node, + std::vector* const infeasible_demand_node); // Makes the min-cost flow problem solvable by truncating supplies and // demands to a level acceptable by the network. There may be several ways to @@ -457,7 +456,7 @@ class GenericMinCostFlow : public MinCostFlowBase { // Returns context concatenated with information about a given arc // in a human-friendly way. - std::string DebugString(const std::string &context, ArcIndex arc) const; + std::string DebugString(const std::string& context, ArcIndex arc) const; // Resets the first_admissible_arc_ array to the first incident arc of each // node. @@ -521,7 +520,7 @@ class GenericMinCostFlow : public MinCostFlowBase { bool IsArcValid(ArcIndex arc) const; // Pointer to the graph passed as argument. - const Graph *graph_; + const Graph* graph_; // An array representing the supply (if > 0) or the demand (if < 0) // for each node in graph_. @@ -613,7 +612,7 @@ class GenericMinCostFlow : public MinCostFlowBase { // New clients should use SimpleMinCostFlow if they can. class MinCostFlow : public GenericMinCostFlow { public: - explicit MinCostFlow(const StarGraph *graph) : GenericMinCostFlow(graph) {} + explicit MinCostFlow(const StarGraph* graph) : GenericMinCostFlow(graph) {} }; #endif // SWIG diff --git a/ortools/graph/perfect_matching.cc b/ortools/graph/perfect_matching.cc index 361c1dc057..f163810929 100644 --- a/ortools/graph/perfect_matching.cc +++ b/ortools/graph/perfect_matching.cc @@ -165,7 +165,7 @@ bool BlossomGraph::Initialize() { // Update the slack of each edges now that nodes might have non-zero duals. // Note that we made sure that all updated slacks are non-negative. for (EdgeIndex e(0); e < edges_.size(); ++e) { - Edge &mutable_edge = edges_[e]; + Edge& mutable_edge = edges_[e]; mutable_edge.pseudo_slack -= nodes_[mutable_edge.tail].pseudo_dual + nodes_[mutable_edge.head].pseudo_dual; DCHECK_GE(mutable_edge.pseudo_slack, 0); @@ -193,7 +193,7 @@ bool BlossomGraph::Initialize() { // // TODO(user): Optimize by merging this loop with the one above? for (const EdgeIndex e : graph_[n]) { - const Edge &edge = edges_[e]; + const Edge& edge = edges_[e]; if (edge.pseudo_slack != 0) continue; if (!NodeIsMatched(edge.OtherEnd(n))) { nodes_[edge.tail].type = 0; @@ -240,7 +240,7 @@ bool BlossomGraph::Initialize() { if (!unmatched_nodes_.empty()) { primal_update_edge_queue_.clear(); for (EdgeIndex e(0); e < edges_.size(); ++e) { - Edge &edge = edges_[e]; + Edge& edge = edges_[e]; const bool tail_is_plus = nodes_[edge.tail].IsPlus(); const bool head_is_plus = nodes_[edge.head].IsPlus(); if (tail_is_plus && head_is_plus) { @@ -260,7 +260,7 @@ CostValue BlossomGraph::ComputeMaxCommonTreeDualDeltaAndResetPrimalEdgeQueue() { // TODO(user): Avoid this linear loop. CostValue best_update = kMaxCostValue; for (NodeIndex n(0); n < nodes_.size(); ++n) { - const Node &node = nodes_[n]; + const Node& node = nodes_[n]; if (node.IsBlossom() && node.IsMinus()) { best_update = std::min(best_update, Dual(node)); } @@ -292,13 +292,13 @@ CostValue BlossomGraph::ComputeMaxCommonTreeDualDeltaAndResetPrimalEdgeQueue() { primal_update_edge_queue_.clear(); if (plus_plus_slack == best_update) { plus_plus_pq_.AllTop(&tmp_all_tops_); - for (const Edge *pt : tmp_all_tops_) { + for (const Edge* pt : tmp_all_tops_) { primal_update_edge_queue_.push_back(EdgeIndex(pt - &edges_.front())); } } if (plus_free_slack == best_update) { plus_free_pq_.AllTop(&tmp_all_tops_); - for (const Edge *pt : tmp_all_tops_) { + for (const Edge* pt : tmp_all_tops_) { primal_update_edge_queue_.push_back(EdgeIndex(pt - &edges_.front())); } } @@ -319,7 +319,7 @@ void BlossomGraph::UpdateAllTrees(CostValue delta) { if (DEBUG_MODE) { for (NodeIndex n(0); n < nodes_.size(); ++n) { - const Node &node = nodes_[n]; + const Node& node = nodes_[n]; if (node.IsPlus()) DebugUpdateNodeDual(n, delta); if (node.IsMinus()) DebugUpdateNodeDual(n, -delta); } @@ -328,13 +328,13 @@ void BlossomGraph::UpdateAllTrees(CostValue delta) { bool BlossomGraph::NodeIsMatched(NodeIndex n) const { // An unmatched node must be a tree root. - const Node &node = nodes_[n]; + const Node& node = nodes_[n]; CHECK(node.match != n || (node.root == n && node.IsPlus())); return node.match != n; } NodeIndex BlossomGraph::Match(NodeIndex n) const { - const Node &node = nodes_[n]; + const Node& node = nodes_[n]; if (DEBUG_MODE) { if (node.IsMinus()) CHECK_EQ(node.parent, node.match); if (node.IsPlus()) CHECK_EQ(n, node.match); @@ -346,7 +346,7 @@ NodeIndex BlossomGraph::Match(NodeIndex n) const { // do not miss any potential edges. void BlossomGraph::DebugCheckNoPossiblePrimalUpdates() { for (EdgeIndex e(0); e < edges_.size(); ++e) { - const Edge &edge = edges_[e]; + const Edge& edge = edges_[e]; if (Head(edge) == Tail(edge)) continue; CHECK(!nodes_[Tail(edge)].is_internal); @@ -371,7 +371,7 @@ void BlossomGraph::DebugCheckNoPossiblePrimalUpdates() { } } } - for (const Node &node : nodes_) { + for (const Node& node : nodes_) { if (node.IsMinus() && node.IsBlossom() && Dual(node) == 0) { LOG(FATAL) << "Possible expand!"; } @@ -395,7 +395,7 @@ void BlossomGraph::PrimalUpdates() { // since it has been inserted in the tight edges queue. It's cheaper to // detect it here and skip it than it would be to dynamically update the // queue to only keep actually tight edges at all times. - const Edge &edge = edges_[e]; + const Edge& edge = edges_[e]; if (Slack(edge) != 0) continue; NodeIndex tail = Tail(edge); @@ -416,11 +416,11 @@ void BlossomGraph::PrimalUpdates() { // Shrink all potential Blossom. for (const EdgeIndex e : possible_shrink_) { - const Edge &edge = edges_[e]; + const Edge& edge = edges_[e]; const NodeIndex tail = Tail(edge); const NodeIndex head = Head(edge); - const Node &tail_node = nodes_[tail]; - const Node &head_node = nodes_[head]; + const Node& tail_node = nodes_[tail]; + const Node& head_node = nodes_[head]; if (tail_node.IsPlus() && head_node.IsPlus() && tail_node.root == head_node.root && tail != head) { Shrink(e); @@ -437,7 +437,7 @@ void BlossomGraph::PrimalUpdates() { // blossom before trying the other operations though. int num_expands = 0; for (NodeIndex n(0); n < nodes_.size(); ++n) { - const Node &node = nodes_[n]; + const Node& node = nodes_[n]; if (node.IsMinus() && node.IsBlossom() && Dual(node) == 0) { ++num_expands; Expand(n); @@ -449,18 +449,18 @@ void BlossomGraph::PrimalUpdates() { bool BlossomGraph::DebugDualsAreFeasible() const { // The slack of all edge must be non-negative. - for (const Edge &edge : edges_) { + for (const Edge& edge : edges_) { if (Slack(edge) < 0) return false; } // The dual of all Blossom must be non-negative. - for (const Node &node : nodes_) { + for (const Node& node : nodes_) { if (node.IsBlossom() && Dual(node) < 0) return false; } return true; } -bool BlossomGraph::DebugEdgeIsTightAndExternal(const Edge &edge) const { +bool BlossomGraph::DebugEdgeIsTightAndExternal(const Edge& edge) const { if (Tail(edge) == Head(edge)) return false; if (nodes_[Tail(edge)].IsInternal()) return false; if (nodes_[Head(edge)].IsInternal()) return false; @@ -479,7 +479,7 @@ void BlossomGraph::Grow(EdgeIndex e, NodeIndex tail, NodeIndex head) { const NodeIndex root = nodes_[tail].root; const NodeIndex leaf = Match(head); - Node &head_node = nodes_[head]; + Node& head_node = nodes_[head]; head_node.root = root; head_node.parent = tail; head_node.type = -1; @@ -489,7 +489,7 @@ void BlossomGraph::Grow(EdgeIndex e, NodeIndex tail, NodeIndex head) { head_node.pseudo_dual += tree_dual; for (const NodeIndex subnode : SubNodes(head)) { for (const EdgeIndex e : graph_[subnode]) { - Edge &edge = edges_[e]; + Edge& edge = edges_[e]; const NodeIndex other_end = OtherEnd(edge, subnode); if (other_end == head) continue; edge.pseudo_slack -= tree_dual; @@ -497,7 +497,7 @@ void BlossomGraph::Grow(EdgeIndex e, NodeIndex tail, NodeIndex head) { } } - Node &leaf_node = nodes_[leaf]; + Node& leaf_node = nodes_[leaf]; leaf_node.root = root; leaf_node.parent = head; leaf_node.type = +1; @@ -506,11 +506,11 @@ void BlossomGraph::Grow(EdgeIndex e, NodeIndex tail, NodeIndex head) { leaf_node.pseudo_dual -= tree_dual; for (const NodeIndex subnode : SubNodes(leaf)) { for (const EdgeIndex e : graph_[subnode]) { - Edge &edge = edges_[e]; + Edge& edge = edges_[e]; const NodeIndex other_end = OtherEnd(edge, subnode); if (other_end == leaf) continue; edge.pseudo_slack += tree_dual; - const Node &other_node = nodes_[other_end]; + const Node& other_node = nodes_[other_end]; if (other_node.IsPlus()) { // The edge switch from [+] -- [0] to [+] -- [+]. DCHECK(plus_free_pq_.Contains(&edge)); @@ -536,7 +536,7 @@ void BlossomGraph::Grow(EdgeIndex e, NodeIndex tail, NodeIndex head) { } void BlossomGraph::AppendNodePathToRoot(NodeIndex n, - std::vector *path) const { + std::vector* path) const { while (true) { path->push_back(n); n = nodes_[n].parent; @@ -547,7 +547,7 @@ void BlossomGraph::AppendNodePathToRoot(NodeIndex n, void BlossomGraph::Augment(EdgeIndex e) { ++num_augments_; - const Edge &edge = edges_[e]; + const Edge& edge = edges_[e]; VLOG(2) << "Augment " << Tail(edge) << " -> " << Head(edge); DCHECK(DebugEdgeIsTightAndExternal(edge)); DCHECK(nodes_[Tail(edge)].IsPlus()); @@ -581,7 +581,7 @@ void BlossomGraph::Augment(EdgeIndex e) { // will only be performed at most num_initial_unmatched_nodes / 2 times // though. for (NodeIndex n(0); n < nodes_.size(); ++n) { - Node &node = nodes_[n]; + Node& node = nodes_[n]; if (node.IsInternal()) continue; const NodeIndex root = node.root; if (root != root_a && root != root_b) continue; @@ -590,7 +590,7 @@ void BlossomGraph::Augment(EdgeIndex e) { node.pseudo_dual += delta; for (const NodeIndex subnode : SubNodes(n)) { for (const EdgeIndex e : graph_[subnode]) { - Edge &edge = edges_[e]; + Edge& edge = edges_[e]; const NodeIndex other_end = OtherEnd(edge, subnode); if (other_end == n) continue; edge.pseudo_slack -= delta; @@ -598,7 +598,7 @@ void BlossomGraph::Augment(EdgeIndex e) { // If the other end is not in one of the two trees, and it is a plus // node, we add it the plus_free queue. All previous [+]--[0] and // [+]--[+] edges need to be removed from the queues. - const Node &other_node = nodes_[other_end]; + const Node& other_node = nodes_[other_end]; if (other_node.root != root_a && other_node.root != root_b && other_node.IsPlus()) { if (plus_plus_pq_.Contains(&edge)) plus_plus_pq_.Remove(&edge); @@ -650,7 +650,7 @@ int BlossomGraph::GetDepth(NodeIndex n) const { void BlossomGraph::Shrink(EdgeIndex e) { ++num_shrinks_; - const Edge &edge = edges_[e]; + const Edge& edge = edges_[e]; DCHECK(DebugEdgeIsTightAndExternal(edge)); DCHECK(nodes_[Tail(edge)].IsPlus()); DCHECK(nodes_[Head(edge)].IsPlus()); @@ -696,7 +696,7 @@ void BlossomGraph::Shrink(EdgeIndex e) { lca_index = tail; VLOG(2) << "LCA " << lca_index; } - Node &lca = nodes_[lca_index]; + Node& lca = nodes_[lca_index]; DCHECK(lca.IsPlus()); // Fill the cycle. @@ -710,13 +710,13 @@ void BlossomGraph::Shrink(EdgeIndex e) { // Save all values that will be needed if we expand this Blossom later. CHECK_GT(blossom.size(), 1); - Node &backup_node = nodes_[blossom[1]]; + Node& backup_node = nodes_[blossom[1]]; #ifndef NDEBUG backup_node.saved_dual = lca.dual; #endif backup_node.saved_pseudo_dual = lca.pseudo_dual + tree_dual; -// Set the new dual of the node to zero. + // Set the new dual of the node to zero. #ifndef NDEBUG lca.dual = 0; #endif @@ -735,7 +735,7 @@ void BlossomGraph::Shrink(EdgeIndex e) { // Update the dual of all edges and the priority queueus. for (const NodeIndex n : blossom) { - Node &mutable_node = nodes_[n]; + Node& mutable_node = nodes_[n]; const bool was_minus = mutable_node.IsMinus(); const CostValue slack_adjust = mutable_node.IsMinus() ? tree_dual : -tree_dual; @@ -753,7 +753,7 @@ void BlossomGraph::Shrink(EdgeIndex e) { root_blossom_node_[subnode] = lca_index; for (const EdgeIndex e : graph_[subnode]) { - Edge &edge = edges_[e]; + Edge& edge = edges_[e]; const NodeIndex other_end = OtherEnd(edge, subnode); // Skip edge that are already internal. @@ -774,7 +774,7 @@ void BlossomGraph::Shrink(EdgeIndex e) { // the other node at all. It might be possible once we store the // parent edge instead of the parent node since then we will only need // to know if this edges point to a new-internal node or not. - Node &mutable_other_node = nodes_[other_end]; + Node& mutable_other_node = nodes_[other_end]; if (mutable_other_node.is_internal) { DCHECK(!plus_free_pq_.Contains(&edge)); if (plus_plus_pq_.Contains(&edge)) plus_plus_pq_.Remove(&edge); @@ -832,7 +832,7 @@ BlossomGraph::EdgeIndex BlossomGraph::FindTightExternalEdgeBetweenNodes( DCHECK_EQ(head, root_blossom_node_[head]); for (const NodeIndex subnode : SubNodes(tail)) { for (const EdgeIndex e : graph_[subnode]) { - const Edge &edge = edges_[e]; + const Edge& edge = edges_[e]; const NodeIndex other_end = OtherEnd(edge, subnode); if (other_end == head && Slack(edge) == 0) { return e; @@ -846,7 +846,7 @@ void BlossomGraph::Expand(NodeIndex to_expand) { ++num_expands_; VLOG(2) << "Expand " << to_expand; - Node &node_to_expand = nodes_[to_expand]; + Node& node_to_expand = nodes_[to_expand]; DCHECK(node_to_expand.IsBlossom()); DCHECK(node_to_expand.IsMinus()); DCHECK_EQ(Dual(node_to_expand), 0); @@ -857,7 +857,7 @@ void BlossomGraph::Expand(NodeIndex to_expand) { FindTightExternalEdgeBetweenNodes(to_expand, node_to_expand.parent); // First, restore the saved fields. - Node &backup_node = nodes_[node_to_expand.blossom[1]]; + Node& backup_node = nodes_[node_to_expand.blossom[1]]; #ifndef NDEBUG node_to_expand.dual = backup_node.saved_dual; #endif @@ -894,7 +894,7 @@ void BlossomGraph::Expand(NodeIndex to_expand) { // Split the cycle in two halves: nodes in [start..end] in path1, and // nodes in [end..start] in path2. Note the inclusive intervals. - const std::vector &cycle = blossom; + const std::vector& cycle = blossom; std::vector path1; std::vector path2; { @@ -915,8 +915,8 @@ void BlossomGraph::Expand(NodeIndex to_expand) { if (path1.size() % 2 == 0) path1.swap(path2); // Use better aliases than 'path1' and 'path2' in the code below. - std::vector &path_in_tree = path1; - const std::vector &free_pairs = path2; + std::vector& path_in_tree = path1; + const std::vector& free_pairs = path2; // Strip path2 from the start and end, which aren't needed. path2.erase(path2.begin()); @@ -965,7 +965,7 @@ void BlossomGraph::Expand(NodeIndex to_expand) { nodes_[n].pseudo_dual += adjust; for (const NodeIndex subnode : SubNodes(n)) { for (const EdgeIndex e : graph_[subnode]) { - Edge &edge = edges_[e]; + Edge& edge = edges_[e]; const NodeIndex other_end = OtherEnd(edge, subnode); if (other_end == n) continue; @@ -985,7 +985,7 @@ void BlossomGraph::Expand(NodeIndex to_expand) { // Update edge queues. if (node_is_plus) { - const Node &other_node = nodes_[other_end]; + const Node& other_node = nodes_[other_end]; DCHECK(!plus_plus_pq_.Contains(&edge)); DCHECK(!plus_free_pq_.Contains(&edge)); if (other_node.IsPlus()) { @@ -1013,7 +1013,7 @@ void BlossomGraph::Expand(NodeIndex to_expand) { // Update edges slack and priority queue for the adjacent edges. for (const NodeIndex subnode : SubNodes(n)) { for (const EdgeIndex e : graph_[subnode]) { - Edge &edge = edges_[e]; + Edge& edge = edges_[e]; const NodeIndex other_end = OtherEnd(edge, subnode); if (other_end == n) continue; @@ -1055,7 +1055,7 @@ void BlossomGraph::ExpandAllBlossoms() { // Queue of blossoms to expand. std::vector queue; for (NodeIndex n(0); n < nodes_.size(); ++n) { - Node &node = nodes_[n]; + Node& node = nodes_[n]; if (node.IsInternal()) continue; // When this is called, there should be no more trees. @@ -1069,7 +1069,7 @@ void BlossomGraph::ExpandAllBlossoms() { const NodeIndex to_expand = queue.back(); queue.pop_back(); - Node &node_to_expand = nodes_[to_expand]; + Node& node_to_expand = nodes_[to_expand]; DCHECK(node_to_expand.IsBlossom()); // Find the edge used to match to_expand with Match(to_expand). @@ -1077,7 +1077,7 @@ void BlossomGraph::ExpandAllBlossoms() { FindTightExternalEdgeBetweenNodes(to_expand, node_to_expand.match); // Restore the saved data. - Node &backup_node = nodes_[node_to_expand.blossom[1]]; + Node& backup_node = nodes_[node_to_expand.blossom[1]]; #ifndef NDEBUG node_to_expand.dual = backup_node.saved_dual; #endif @@ -1143,7 +1143,7 @@ void BlossomGraph::ExpandAllBlossoms() { } } -const std::vector &BlossomGraph::SubNodes(NodeIndex n) { +const std::vector& BlossomGraph::SubNodes(NodeIndex n) { // This should be only called on an external node. However, in Shrink() we // mark the node as internal early, so we just make sure the node as no saved // blossom field here. @@ -1153,7 +1153,7 @@ const std::vector &BlossomGraph::SubNodes(NodeIndex n) { // is in fact a blossom. subnodes_ = {n}; for (int i = 0; i < subnodes_.size(); ++i) { - const Node &node = nodes_[subnodes_[i]]; + const Node& node = nodes_[subnodes_[i]]; // Since the first node in each list is always the node above, we just // skip it to avoid listing twice the nodes. @@ -1175,7 +1175,7 @@ const std::vector &BlossomGraph::SubNodes(NodeIndex n) { } std::string BlossomGraph::NodeDebugString(NodeIndex n) const { - const Node &node = nodes_[n]; + const Node& node = nodes_[n]; if (node.is_internal) { return absl::StrCat("[I] #", n.value()); } @@ -1191,7 +1191,7 @@ std::string BlossomGraph::NodeDebugString(NodeIndex n) const { } std::string BlossomGraph::EdgeDebugString(EdgeIndex e) const { - const Edge &edge = edges_[e]; + const Edge& edge = edges_[e]; if (nodes_[Tail(edge)].is_internal || nodes_[Head(edge)].is_internal) { return absl::StrCat(Tail(edge).value(), "<->", Head(edge).value(), " internal "); @@ -1216,7 +1216,7 @@ void BlossomGraph::DebugUpdateNodeDual(NodeIndex n, CostValue delta) { nodes_[n].dual += delta; for (const NodeIndex subnode : SubNodes(n)) { for (const EdgeIndex e : graph_[subnode]) { - Edge &edge = edges_[e]; + Edge& edge = edges_[e]; const NodeIndex other_end = OtherEnd(edge, subnode); if (other_end == n) continue; edges_[e].slack -= delta; @@ -1225,9 +1225,9 @@ void BlossomGraph::DebugUpdateNodeDual(NodeIndex n, CostValue delta) { #endif } -CostValue BlossomGraph::Slack(const Edge &edge) const { - const Node &tail_node = nodes_[Tail(edge)]; - const Node &head_node = nodes_[Head(edge)]; +CostValue BlossomGraph::Slack(const Edge& edge) const { + const Node& tail_node = nodes_[Tail(edge)]; + const Node& head_node = nodes_[Head(edge)]; CostValue slack = edge.pseudo_slack; if (Tail(edge) == Head(edge)) return slack; // Internal... @@ -1243,7 +1243,7 @@ CostValue BlossomGraph::Slack(const Edge &edge) const { } // Returns the dual value of the given node (which might be a pseudo-node). -CostValue BlossomGraph::Dual(const Node &node) const { +CostValue BlossomGraph::Dual(const Node& node) const { const CostValue dual = node.pseudo_dual + node.type * nodes_[node.root].tree_dual_delta; #ifndef NDEBUG diff --git a/ortools/graph/perfect_matching.h b/ortools/graph/perfect_matching.h index 11b7832ebc..659d9948ed 100644 --- a/ortools/graph/perfect_matching.h +++ b/ortools/graph/perfect_matching.h @@ -110,7 +110,7 @@ class MinCostPerfectMatching { DCHECK(optimal_solution_found_); return matches_[node]; } - const std::vector &Matches() const { + const std::vector& Matches() const { DCHECK(optimal_solution_found_); return matches_; } @@ -151,11 +151,9 @@ class MinCostPerfectMatching { // // A possible rooted tree: [+] -- [-] ==== [+] // \ -// [-] ==== [+] ---- -// [-] === [+] +// [-] ==== [+] ---- [-] === [+] // \ -// -// [-] === [+] +// [-] === [+] // // A single unmatched node is also a tree: [+] // @@ -242,12 +240,12 @@ class BlossomGraph { // all the other nodes are internal nodes. std::vector blossom; -// This allows to store information about a new blossom node created by -// Shrink() so that we can properly restore it on Expand(). Note that we -// store the saved information on the second node of a blossom cycle (and -// not the blossom node itself) because that node will be "hidden" until the -// blossom is expanded so this way, we do not need more than one set of -// saved information per node. + // This allows to store information about a new blossom node created by + // Shrink() so that we can properly restore it on Expand(). Note that we + // store the saved information on the second node of a blossom cycle (and + // not the blossom node itself) because that node will be "hidden" until the + // blossom is expanded so this way, we do not need more than one set of + // saved information per node. #ifndef NDEBUG CostValue saved_dual; #endif @@ -276,7 +274,7 @@ class BlossomGraph { // our queues since we want the lowest pseudo_slack first. void SetHeapIndex(int index) { pq_position = index; } int GetHeapIndex() const { return pq_position; } - bool operator>(const Edge &other) const { + bool operator>(const Edge& other) const { return pseudo_slack > other.pseudo_slack; } @@ -362,10 +360,10 @@ class BlossomGraph { void ExpandAllBlossoms(); // Return the "slack" of the given edge. - CostValue Slack(const Edge &edge) const; + CostValue Slack(const Edge& edge) const; // Returns the dual value of the given node (which might be a pseudo-node). - CostValue Dual(const Node &node) const; + CostValue Dual(const Node& node) const; // Display to VLOG(1) some statistic about the solve. void DisplayStats() const; @@ -385,12 +383,12 @@ class BlossomGraph { // Returns true iff this is an external edge with a slack of zero. // An external edge is an edge between two external nodes. - bool DebugEdgeIsTightAndExternal(const Edge &edge) const; + bool DebugEdgeIsTightAndExternal(const Edge& edge) const; // Getters to access node/edges from outside the class. // Only used in tests. - const Edge &GetEdge(int e) const { return edges_[EdgeIndex(e)]; } - const Node &GetNode(int n) const { return nodes_[NodeIndex(n)]; } + const Edge& GetEdge(int e) const { return edges_[EdgeIndex(e)]; } + const Node& GetNode(int n) const { return nodes_[NodeIndex(n)]; } // Display information for debugging. std::string NodeDebugString(NodeIndex n) const; @@ -407,7 +405,7 @@ class BlossomGraph { EdgeIndex FindTightExternalEdgeBetweenNodes(NodeIndex tail, NodeIndex head); // Appends the path from n to the root of its tree. Used by Augment(). - void AppendNodePathToRoot(NodeIndex n, std::vector *path) const; + void AppendNodePathToRoot(NodeIndex n, std::vector* path) const; // Returns the depth of a node in its tree. Used by Shrink(). int GetDepth(NodeIndex n) const; @@ -419,23 +417,23 @@ class BlossomGraph { // up to date anymore. It is important to use these functions instead in all // the places where this can happen. That is basically everywhere except in // the initialization. - NodeIndex Tail(const Edge &edge) const { + NodeIndex Tail(const Edge& edge) const { return root_blossom_node_[edge.tail]; } - NodeIndex Head(const Edge &edge) const { + NodeIndex Head(const Edge& edge) const { return root_blossom_node_[edge.head]; } // Returns the Head() or Tail() that does not correspond to node. Node that // node must be one of the original index in the given edge, this is DCHECKed // by edge.OtherEnd(). - NodeIndex OtherEnd(const Edge &edge, NodeIndex node) const { + NodeIndex OtherEnd(const Edge& edge, NodeIndex node) const { return root_blossom_node_[edge.OtherEnd(node)]; } // Same as OtherEnd() but the given node should either be Tail(edge) or // Head(edge) and do not need to be one of the original node of this edge. - NodeIndex OtherEndFromExternalNode(const Edge &edge, NodeIndex node) const { + NodeIndex OtherEndFromExternalNode(const Edge& edge, NodeIndex node) const { const NodeIndex head = Head(edge); if (head != node) { DCHECK_EQ(node, Tail(edge)); @@ -447,7 +445,7 @@ class BlossomGraph { // Returns the given node and if this node is a blossom, all its internal // nodes (recursively). Note that any call to SubNodes() invalidate the // previously returned reference. - const std::vector &SubNodes(NodeIndex n); + const std::vector& SubNodes(NodeIndex n); // Just used to check that initialized is called exactly once. bool is_initialized_ = false; @@ -462,7 +460,7 @@ class BlossomGraph { // The current graph incidence. Note that one EdgeIndex should appear in // exactly two places (on its tail and head incidence list). - gtl::ITIVector > graph_; + gtl::ITIVector> graph_; // Used by SubNodes(). std::vector subnodes_; @@ -478,9 +476,9 @@ class BlossomGraph { std::vector possible_shrink_; // Priority queues of edges of a certain types. - AdjustablePriorityQueue > plus_plus_pq_; - AdjustablePriorityQueue > plus_free_pq_; - std::vector tmp_all_tops_; + AdjustablePriorityQueue> plus_plus_pq_; + AdjustablePriorityQueue> plus_free_pq_; + std::vector tmp_all_tops_; // The dual objective. Increase as the algorithm progress. This is a lower // bound on the min-cost of a perfect matching. diff --git a/ortools/graph/shortestpaths.cc b/ortools/graph/shortestpaths.cc index 569ee8e702..1592feb957 100644 --- a/ortools/graph/shortestpaths.cc +++ b/ortools/graph/shortestpaths.cc @@ -24,5 +24,5 @@ #include "ortools/base/logging.h" #include "ortools/base/macros.h" -ABSL_FLAG(int32, shortestpaths_disconnected_distance, 200000, +ABSL_FLAG(int, shortestpaths_disconnected_distance, 200000, "Distance returned when two node are disconnected"); diff --git a/ortools/graph/topologicalsorter.cc b/ortools/graph/topologicalsorter.cc index 9d827f075b..93a5f4335f 100644 --- a/ortools/graph/topologicalsorter.cc +++ b/ortools/graph/topologicalsorter.cc @@ -27,13 +27,13 @@ namespace internal { namespace { template -inline void PopTop(IntQueue *q, int *top) { +inline void PopTop(IntQueue* q, int* top) { *top = q->front(); q->pop(); } template -void PopTop(std::priority_queue *q, int *top) { +void PopTop(std::priority_queue* q, int* top) { *top = q->top(); q->pop(); } @@ -64,7 +64,7 @@ void DenseIntTopologicalSorterTpl::AddEdge(int from, int to) { AddNode(std::max(from, to)); - AdjacencyList &adj_list = adjacency_lists_[from]; + AdjacencyList& adj_list = adjacency_lists_[from]; const uint32 adj_list_size = adj_list.size(); if (adj_list_size <= kLazyDuplicateDetectionSizeThreshold) { for (AdjacencyList::const_iterator it = adj_list.begin(); @@ -91,7 +91,7 @@ void DenseIntTopologicalSorterTpl::AddEdge(int from, int to) { template bool DenseIntTopologicalSorterTpl::GetNext( - int *next_node_index, bool *cyclic, std::vector *output_cycle_nodes) { + int* next_node_index, bool* cyclic, std::vector* output_cycle_nodes) { if (!TraversalStarted()) { StartTraversal(); } @@ -144,7 +144,7 @@ void DenseIntTopologicalSorterTpl::StartTraversal() { // too many, since we removed them progressively, and it is actually // cheaper to keep them at this point. for (int from = 0; from < num_nodes; ++from) { - AdjacencyList &adj_list = adjacency_lists_[from]; + AdjacencyList& adj_list = adjacency_lists_[from]; for (AdjacencyList::const_iterator it = adj_list.begin(); it != adj_list.end(); ++it) { ++indegree_[*it]; @@ -165,7 +165,7 @@ void DenseIntTopologicalSorterTpl::StartTraversal() { // static template int DenseIntTopologicalSorterTpl::RemoveDuplicates( - std::vector *lists, int skip_lists_smaller_than) { + std::vector* lists, int skip_lists_smaller_than) { // We can always skip lists with less than 2 elements. if (skip_lists_smaller_than < 2) { skip_lists_smaller_than = 2; @@ -215,7 +215,7 @@ int DenseIntTopologicalSorterTpl::RemoveDuplicates( // but at the cost of more code complexity. template void DenseIntTopologicalSorterTpl::ExtractCycle( - std::vector *cycle_nodes) const { + std::vector* cycle_nodes) const { const int num_nodes = adjacency_lists_.size(); cycle_nodes->clear(); // To find a cycle, we start a DFS from each yet-unvisited node and @@ -241,7 +241,7 @@ void DenseIntTopologicalSorterTpl::ExtractCycle( dfs_stack.push_back(DfsState(start_node)); in_cur_stack[start_node] = true; while (!dfs_stack.empty()) { - DfsState *cur_state = &dfs_stack.back(); + DfsState* cur_state = &dfs_stack.back(); if (cur_state->adj_list_index >= adjacency_lists_[cur_state->node].size()) { no_cycle_reachable_from[cur_state->node] = true; @@ -286,13 +286,13 @@ template class DenseIntTopologicalSorterTpl; } // namespace internal std::vector FindCycleInDenseIntGraph( - int num_nodes, const std::vector > &arcs) { + int num_nodes, const std::vector>& arcs) { std::vector cycle; if (num_nodes < 1) { return cycle; } internal::DenseIntTopologicalSorterTpl sorter(num_nodes); - for (const auto &arc : arcs) { + for (const auto& arc : arcs) { sorter.AddEdge(arc.first, arc.second); } sorter.ExtractCycle(&cycle); diff --git a/ortools/graph/topologicalsorter.h b/ortools/graph/topologicalsorter.h index 9ea5a2f9ae..8a3ef82c15 100644 --- a/ortools/graph/topologicalsorter.h +++ b/ortools/graph/topologicalsorter.h @@ -77,13 +77,13 @@ namespace util { // "topological_order". Returns false if the graph is cyclic. // Works in O(num_nodes + arcs.size()), and is pretty fast. inline ABSL_MUST_USE_RESULT bool DenseIntTopologicalSort( - int num_nodes, const std::vector > &arcs, - std::vector *topological_order); + int num_nodes, const std::vector>& arcs, + std::vector* topological_order); // Like DenseIntTopologicalSort, but stable. inline ABSL_MUST_USE_RESULT bool DenseIntStableTopologicalSort( - int num_nodes, const std::vector > &arcs, - std::vector *topological_order); + int num_nodes, const std::vector>& arcs, + std::vector* topological_order); // Finds a cycle in the directed graph given as argument: nodes are dense // integers in 0..num_nodes-1, and (directed) arcs are pairs of nodes @@ -92,38 +92,38 @@ inline ABSL_MUST_USE_RESULT bool DenseIntStableTopologicalSort( // if the cycle 1->4->3->1 exists. // If the graph is acyclic, returns an empty vector. ABSL_MUST_USE_RESULT std::vector FindCycleInDenseIntGraph( - int num_nodes, const std::vector > &arcs); + int num_nodes, const std::vector>& arcs); // Like the two above, but with generic node types. The nodes must be provided. // Can be significantly slower, but still linear. template ABSL_MUST_USE_RESULT bool TopologicalSort( - const std::vector &nodes, const std::vector > &arcs, - std::vector *topological_order); + const std::vector& nodes, const std::vector>& arcs, + std::vector* topological_order); template ABSL_MUST_USE_RESULT bool StableTopologicalSort( - const std::vector &nodes, const std::vector > &arcs, - std::vector *topological_order); + const std::vector& nodes, const std::vector>& arcs, + std::vector* topological_order); // "OrDie()" versions of the 4 functions above. Those directly return the // topological order, which makes their API even simpler. inline std::vector DenseIntTopologicalSortOrDie( - int num_nodes, const std::vector > &arcs); + int num_nodes, const std::vector>& arcs); inline std::vector DenseIntStableTopologicalSortOrDie( - int num_nodes, const std::vector > &arcs); + int num_nodes, const std::vector>& arcs); template -std::vector TopologicalSortOrDie(const std::vector &nodes, - const std::vector > &arcs); +std::vector TopologicalSortOrDie(const std::vector& nodes, + const std::vector>& arcs); template std::vector StableTopologicalSortOrDie( - const std::vector &nodes, const std::vector > &arcs); + const std::vector& nodes, const std::vector>& arcs); namespace internal { // Internal wrapper around the *TopologicalSort classes. template ABSL_MUST_USE_RESULT bool RunTopologicalSorter( - Sorter *sorter, const std::vector > &arcs, - std::vector *topological_order_or_cycle); + Sorter* sorter, const std::vector>& arcs, + std::vector* topological_order_or_cycle); // Do not use the templated class directly, instead use one of the // typedefs DenseIntTopologicalSorter or DenseIntStableTopologicalSorter. @@ -173,8 +173,8 @@ class DenseIntTopologicalSorterTpl { // Performs in O(average degree) in average. If a cycle is detected // and "output_cycle_nodes" isn't NULL, it will require an additional // O(number of edges + number of nodes in the graph) time. - bool GetNext(int *next_node_index, bool *cyclic, - std::vector *output_cycle_nodes = NULL); + bool GetNext(int* next_node_index, bool* cyclic, + std::vector* output_cycle_nodes = NULL); int GetCurrentFringeSize() { StartTraversal(); @@ -190,11 +190,11 @@ class DenseIntTopologicalSorterTpl { // AdjacencyList of size greater or equal to skip_lists_smaller_than, // in linear time. Returns the total number of duplicates removed. // This method is exposed for unit testing purposes only. - static int RemoveDuplicates(std::vector *lists, + static int RemoveDuplicates(std::vector* lists, int skip_lists_smaller_than); // To extract a cycle. When there is no cycle, cycle_nodes will be empty. - void ExtractCycle(std::vector *cycle_nodes) const; + void ExtractCycle(std::vector* cycle_nodes) const; private: // Outgoing adjacency lists. @@ -207,8 +207,8 @@ class DenseIntTopologicalSorterTpl { typename std::conditional< stable_sort, // We use greater so that the lowest elements gets popped first. - std::priority_queue, std::greater >, - std::queue >::type nodes_with_zero_indegree_; + std::priority_queue, std::greater>, + std::queue>::type nodes_with_zero_indegree_; std::vector indegree_; // Used internally by AddEdge() to decide whether to trigger @@ -227,7 +227,8 @@ extern template class DenseIntTopologicalSorterTpl; // Recommended version for general usage. The stability makes it more // deterministic, and its behavior is guaranteed to never change. -typedef ::util::internal::DenseIntTopologicalSorterTpl +typedef ::util::internal::DenseIntTopologicalSorterTpl< + /*stable_sort=*/true> DenseIntStableTopologicalSorter; // Use this version if you are certain you don't care about the @@ -235,7 +236,8 @@ typedef ::util::internal::DenseIntTopologicalSorterTpl // performance gain can be more significant for large graphs with large // numbers of source nodes (for example 2 Million nodes with 2 Million // random edges sees a factor of 0.7 difference in completion time). -typedef ::util::internal::DenseIntTopologicalSorterTpl +typedef ::util::internal::DenseIntTopologicalSorterTpl< + /*stable_sort=*/false> DenseIntTopologicalSorter; // A copy of each Node is stored internally. Duplicated edges are allowed, @@ -276,13 +278,13 @@ class TopologicalSorter { // endpoints used in a call to AddEdge(). Dies with a fatal error if // called after a traversal has been started (see TraversalStarted()), // or if more than INT_MAX nodes are being added. - void AddNode(const T &node) { int_sorter_.AddNode(LookupOrInsertNode(node)); } + void AddNode(const T& node) { int_sorter_.AddNode(LookupOrInsertNode(node)); } // Adds a directed edge with the given endpoints to the graph. There // is no requirement (nor is it an error) to call AddNode() for the // endpoints. Dies with a fatal error if called after a traversal // has been started (see TraversalStarted()). - void AddEdge(const T &from, const T &to) { + void AddEdge(const T& from, const T& to) { // The lookups are not inlined into AddEdge because we need to ensure that // "from" is inserted before "to". const int from_int = LookupOrInsertNode(from); @@ -307,8 +309,8 @@ class TopologicalSorter { // // This starts a traversal (if not started already). Note that the // graph can only be traversed once. - bool GetNext(T *node, bool *cyclic_ptr, - std::vector *output_cycle_nodes = NULL) { + bool GetNext(T* node, bool* cyclic_ptr, + std::vector* output_cycle_nodes = NULL) { StartTraversal(); int node_index; if (!int_sorter_.GetNext(&node_index, cyclic_ptr, @@ -341,7 +343,7 @@ class TopologicalSorter { nodes_.resize(node_to_index_.size()); // We move elements from the absl::flat_hash_map to this vector, without // extra copy (if they are movable). - for (auto &node_and_index : node_to_index_) { + for (auto& node_and_index : node_to_index_) { nodes_[node_and_index.second] = std::move(node_and_index.first); } gtl::STLClearHashIfBig(&node_to_index_, 1 << 16); @@ -370,7 +372,7 @@ class TopologicalSorter { // Lookup an existing node's index, or add the node and return the // new index that was assigned to it. - int LookupOrInsertNode(const T &node) { + int LookupOrInsertNode(const T& node) { return gtl::LookupOrInsert(&node_to_index_, node, node_to_index_.size()); } @@ -382,10 +384,10 @@ namespace internal { // If not, returns false and outputs a cycle in "cycle" (if not null). template ABSL_MUST_USE_RESULT bool RunTopologicalSorter( - Sorter *sorter, const std::vector > &arcs, - std::vector *topological_order, std::vector *cycle) { + Sorter* sorter, const std::vector>& arcs, + std::vector* topological_order, std::vector* cycle) { topological_order->clear(); - for (const auto &arc : arcs) { + for (const auto& arc : arcs) { sorter->AddEdge(arc.first, arc.second); } bool cyclic = false; @@ -399,8 +401,8 @@ ABSL_MUST_USE_RESULT bool RunTopologicalSorter( template ABSL_MUST_USE_RESULT bool DenseIntTopologicalSortImpl( - int num_nodes, const std::vector > &arcs, - std::vector *topological_order) { + int num_nodes, const std::vector>& arcs, + std::vector* topological_order) { DenseIntTopologicalSorterTpl sorter(num_nodes); return RunTopologicalSorter( &sorter, arcs, topological_order, nullptr); @@ -408,10 +410,10 @@ ABSL_MUST_USE_RESULT bool DenseIntTopologicalSortImpl( template ABSL_MUST_USE_RESULT bool TopologicalSortImpl( - const std::vector &nodes, const std::vector > &arcs, - std::vector *topological_order) { + const std::vector& nodes, const std::vector>& arcs, + std::vector* topological_order) { TopologicalSorter sorter; - for (const T &node : nodes) { + for (const T& node : nodes) { sorter.AddNode(node); } return RunTopologicalSorter(&sorter, arcs, @@ -421,7 +423,7 @@ ABSL_MUST_USE_RESULT bool TopologicalSortImpl( // Now, the OrDie() versions, which directly return the topological order. template std::vector RunTopologicalSorterOrDie( - Sorter *sorter, const std::vector > &arcs) { + Sorter* sorter, const std::vector>& arcs) { std::vector topo_order; CHECK(RunTopologicalSorter(sorter, arcs, &topo_order, &topo_order)) << "Found cycle: " << gtl::LogContainer(topo_order); @@ -430,16 +432,16 @@ std::vector RunTopologicalSorterOrDie( template std::vector DenseIntTopologicalSortOrDieImpl( - int num_nodes, const std::vector > &arcs) { + int num_nodes, const std::vector>& arcs) { DenseIntTopologicalSorterTpl sorter(num_nodes); return RunTopologicalSorterOrDie(&sorter, arcs); } template std::vector TopologicalSortOrDieImpl( - const std::vector &nodes, const std::vector > &arcs) { + const std::vector& nodes, const std::vector>& arcs) { TopologicalSorter sorter; - for (const T &node : nodes) { + for (const T& node : nodes) { sorter.AddNode(node); } return RunTopologicalSorterOrDie(&sorter, arcs); @@ -448,53 +450,53 @@ std::vector TopologicalSortOrDieImpl( // Implementations of the "simple API" functions declared at the top. inline bool DenseIntTopologicalSort( - int num_nodes, const std::vector > &arcs, - std::vector *topological_order) { + int num_nodes, const std::vector>& arcs, + std::vector* topological_order) { return internal::DenseIntTopologicalSortImpl(num_nodes, arcs, topological_order); } inline bool DenseIntStableTopologicalSort( - int num_nodes, const std::vector > &arcs, - std::vector *topological_order) { + int num_nodes, const std::vector>& arcs, + std::vector* topological_order) { return internal::DenseIntTopologicalSortImpl(num_nodes, arcs, topological_order); } template -bool TopologicalSort(const std::vector &nodes, - const std::vector > &arcs, - std::vector *topological_order) { +bool TopologicalSort(const std::vector& nodes, + const std::vector>& arcs, + std::vector* topological_order) { return internal::TopologicalSortImpl(nodes, arcs, topological_order); } template -bool StableTopologicalSort(const std::vector &nodes, - const std::vector > &arcs, - std::vector *topological_order) { +bool StableTopologicalSort(const std::vector& nodes, + const std::vector>& arcs, + std::vector* topological_order) { return internal::TopologicalSortImpl(nodes, arcs, topological_order); } inline std::vector DenseIntTopologicalSortOrDie( - int num_nodes, const std::vector > &arcs) { + int num_nodes, const std::vector>& arcs) { return internal::DenseIntTopologicalSortOrDieImpl(num_nodes, arcs); } inline std::vector DenseIntStableTopologicalSortOrDie( - int num_nodes, const std::vector > &arcs) { + int num_nodes, const std::vector>& arcs) { return internal::DenseIntTopologicalSortOrDieImpl(num_nodes, arcs); } template -std::vector TopologicalSortOrDie(const std::vector &nodes, - const std::vector > &arcs) { +std::vector TopologicalSortOrDie(const std::vector& nodes, + const std::vector>& arcs) { return internal::TopologicalSortOrDieImpl(nodes, arcs); } template std::vector StableTopologicalSortOrDie( - const std::vector &nodes, const std::vector > &arcs) { + const std::vector& nodes, const std::vector>& arcs) { return internal::TopologicalSortOrDieImpl(nodes, arcs); } @@ -516,16 +518,16 @@ class TopologicalSorter namespace util { namespace graph { inline std::vector DenseIntTopologicalSortOrDie( - int num_nodes, const std::vector > &arcs) { + int num_nodes, const std::vector>& arcs) { return ::util::DenseIntTopologicalSortOrDie(num_nodes, arcs); } inline std::vector DenseIntStableTopologicalSortOrDie( - int num_nodes, const std::vector > &arcs) { + int num_nodes, const std::vector>& arcs) { return ::util::DenseIntStableTopologicalSortOrDie(num_nodes, arcs); } template std::vector StableTopologicalSortOrDie( - const std::vector &nodes, const std::vector > &arcs) { + const std::vector& nodes, const std::vector>& arcs) { return ::util::StableTopologicalSortOrDie(nodes, arcs); } diff --git a/ortools/graph/util.cc b/ortools/graph/util.cc index f39d8eba17..7c98abe1cf 100644 --- a/ortools/graph/util.cc +++ b/ortools/graph/util.cc @@ -15,7 +15,7 @@ namespace util { -bool IsSubsetOf0N(const std::vector &v, int n) { +bool IsSubsetOf0N(const std::vector& v, int n) { std::vector mask(n, false); for (const int i : v) { if (i < 0 || i >= n || mask[i]) return false; diff --git a/ortools/graph/util.h b/ortools/graph/util.h index 9c80dbcf26..dc43e63038 100644 --- a/ortools/graph/util.h +++ b/ortools/graph/util.h @@ -49,17 +49,17 @@ namespace util { // GraphHasSelfArcs() and GraphIsWeaklyConnected() which also support // non-finalized StaticGraph<>. template -bool GraphHasSelfArcs(const Graph &graph); +bool GraphHasSelfArcs(const Graph& graph); template -bool GraphHasDuplicateArcs(const Graph &graph); +bool GraphHasDuplicateArcs(const Graph& graph); template -bool GraphIsSymmetric(const Graph &graph); +bool GraphIsSymmetric(const Graph& graph); template -bool GraphIsWeaklyConnected(const Graph &graph); +bool GraphIsWeaklyConnected(const Graph& graph); // Returns a fresh copy of a given graph. template -std::unique_ptr CopyGraph(const Graph &graph); +std::unique_ptr CopyGraph(const Graph& graph); // Creates a remapped copy of graph "graph", where node i becomes node // new_node_index[i]. @@ -67,8 +67,8 @@ std::unique_ptr CopyGraph(const Graph &graph); // behavior is undefined (it may die). // Note that you can call IsValidPermutation() to check it yourself. template -std::unique_ptr RemapGraph(const Graph &graph, - const std::vector &new_node_index); +std::unique_ptr RemapGraph(const Graph& graph, + const std::vector& new_node_index); // Gets the induced subgraph of "graph" restricted to the nodes in "nodes": // the resulting graph will have exactly nodes.size() nodes, and its @@ -81,8 +81,8 @@ std::unique_ptr RemapGraph(const Graph &graph, // Current complexity: O(num old nodes + num new arcs). It could easily // be done in O(num new nodes + num new arcs) but with a higher constant. template -std::unique_ptr GetSubgraphOfNodes(const Graph &graph, - const std::vector &nodes); +std::unique_ptr GetSubgraphOfNodes(const Graph& graph, + const std::vector& nodes); // This can be used to view a directed graph (that supports reverse arcs) // from graph.h as un undirected graph: operator[](node) returns a @@ -98,13 +98,13 @@ std::unique_ptr GetSubgraphOfNodes(const Graph &graph, template class UndirectedAdjacencyListsOfDirectedGraph { public: - explicit UndirectedAdjacencyListsOfDirectedGraph(const Graph &graph) + explicit UndirectedAdjacencyListsOfDirectedGraph(const Graph& graph) : graph_(graph) {} typedef typename Graph::OutgoingOrOppositeIncomingArcIterator ArcIterator; class AdjacencyListIterator : public ArcIterator { public: - explicit AdjacencyListIterator(const Graph &graph, ArcIterator &&arc_it) + explicit AdjacencyListIterator(const Graph& graph, ArcIterator&& arc_it) : ArcIterator(arc_it), graph_(graph) {} // Overwrite operator* to return the heads of the arcs. typename Graph::NodeIndex operator*() const { @@ -112,25 +112,25 @@ class UndirectedAdjacencyListsOfDirectedGraph { } private: - const Graph &graph_; + const Graph& graph_; }; // Returns a pseudo-container of all the nodes adjacent to "node". BeginEndWrapper operator[](int node) const { - const auto &arc_range = graph_.OutgoingOrOppositeIncomingArcs(node); + const auto& arc_range = graph_.OutgoingOrOppositeIncomingArcs(node); return {AdjacencyListIterator(graph_, arc_range.begin()), AdjacencyListIterator(graph_, arc_range.end())}; } private: - const Graph &graph_; + const Graph& graph_; }; // Computes the weakly connected components of a directed graph that // provides the OutgoingOrOppositeIncomingArcs() API, and returns them // as a mapping from node to component index. See GetConnectedComponens(). template -std::vector GetWeaklyConnectedComponents(const Graph &graph) { +std::vector GetWeaklyConnectedComponents(const Graph& graph) { return GetConnectedComponents( graph.num_nodes(), UndirectedAdjacencyListsOfDirectedGraph(graph)); } @@ -138,16 +138,16 @@ std::vector GetWeaklyConnectedComponents(const Graph &graph) { // Returns true iff the given vector is a subset of [0..n-1], i.e. // all elements i are such that 0 <= i < n and no two elements are equal. // "n" must be >= 0 or the result is undefined. -bool IsSubsetOf0N(const std::vector &v, int n); +bool IsSubsetOf0N(const std::vector& v, int n); // Returns true iff the given vector is a permutation of [0..size()-1]. -inline bool IsValidPermutation(const std::vector &v) { +inline bool IsValidPermutation(const std::vector& v) { return IsSubsetOf0N(v, v.size()); } // Returns a copy of "graph", without self-arcs and duplicate arcs. template -std::unique_ptr RemoveSelfArcsAndDuplicateArcs(const Graph &graph); +std::unique_ptr RemoveSelfArcsAndDuplicateArcs(const Graph& graph); // Given an arc path, changes it to a sub-path with the same source and // destination but without any cycle. Nothing happen if the path was already @@ -160,11 +160,11 @@ std::unique_ptr RemoveSelfArcsAndDuplicateArcs(const Graph &graph); // take some arc costs and return the cheapest path instead. Or return the // shortest path in term of number of arcs. template -void RemoveCyclesFromPath(const Graph &graph, std::vector *arc_path); +void RemoveCyclesFromPath(const Graph& graph, std::vector* arc_path); // Returns true iff the given path contains a cycle. template -bool PathHasCycle(const Graph &graph, const std::vector &arc_path); +bool PathHasCycle(const Graph& graph, const std::vector& arc_path); // Returns a vector representing a mapping from arcs to arcs such that each arc // is mapped to another arc with its (tail, head) flipped, if such an arc @@ -180,15 +180,15 @@ bool PathHasCycle(const Graph &graph, const std::vector &arc_path); // unique, hence the function name. // // PERFORMANCE: If you see this function taking too much memory and/or too much -// time, reach out to @user: one could halve the memory usage and speed it up. +// time, reach out to viger@: one could halve the memory usage and speed it up. template -std::vector ComputeOnePossibleReverseArcMapping(const Graph &graph, +std::vector ComputeOnePossibleReverseArcMapping(const Graph& graph, bool die_if_not_symmetric); // Implementations of the templated methods. template -bool GraphHasSelfArcs(const Graph &graph) { +bool GraphHasSelfArcs(const Graph& graph) { for (const auto arc : graph.AllForwardArcs()) { if (graph.Tail(arc) == graph.Head(arc)) return true; } @@ -196,7 +196,7 @@ bool GraphHasSelfArcs(const Graph &graph) { } template -bool GraphHasDuplicateArcs(const Graph &graph) { +bool GraphHasDuplicateArcs(const Graph& graph) { typedef typename Graph::ArcIndex ArcIndex; typedef typename Graph::NodeIndex NodeIndex; std::vector tmp_node_mask(graph.num_nodes(), false); @@ -214,7 +214,7 @@ bool GraphHasDuplicateArcs(const Graph &graph) { } template -bool GraphIsSymmetric(const Graph &graph) { +bool GraphIsSymmetric(const Graph& graph) { typedef typename Graph::NodeIndex NodeIndex; typedef typename Graph::ArcIndex ArcIndex; // Create a reverse copy of the graph. @@ -243,7 +243,7 @@ bool GraphIsSymmetric(const Graph &graph) { } template -bool GraphIsWeaklyConnected(const Graph &graph) { +bool GraphIsWeaklyConnected(const Graph& graph) { typedef typename Graph::NodeIndex NodeIndex; static_assert(std::numeric_limits::max() <= INT_MAX, "GraphIsWeaklyConnected() isn't yet implemented for graphs" @@ -259,7 +259,7 @@ bool GraphIsWeaklyConnected(const Graph &graph) { } template -std::unique_ptr CopyGraph(const Graph &graph) { +std::unique_ptr CopyGraph(const Graph& graph) { std::unique_ptr new_graph( new Graph(graph.num_nodes(), graph.num_arcs())); for (const auto node : graph.AllNodes()) { @@ -272,8 +272,8 @@ std::unique_ptr CopyGraph(const Graph &graph) { } template -std::unique_ptr RemapGraph(const Graph &old_graph, - const std::vector &new_node_index) { +std::unique_ptr RemapGraph(const Graph& old_graph, + const std::vector& new_node_index) { DCHECK(IsValidPermutation(new_node_index)) << "Invalid permutation"; const int num_nodes = old_graph.num_nodes(); CHECK_EQ(new_node_index.size(), num_nodes); @@ -291,8 +291,8 @@ std::unique_ptr RemapGraph(const Graph &old_graph, } template -std::unique_ptr GetSubgraphOfNodes(const Graph &old_graph, - const std::vector &nodes) { +std::unique_ptr GetSubgraphOfNodes(const Graph& old_graph, + const std::vector& nodes) { typedef typename Graph::NodeIndex NodeIndex; typedef typename Graph::ArcIndex ArcIndex; DCHECK(IsSubsetOf0N(nodes, old_graph.num_nodes())) << "Invalid subset"; @@ -325,7 +325,7 @@ std::unique_ptr GetSubgraphOfNodes(const Graph &old_graph, } template -std::unique_ptr RemoveSelfArcsAndDuplicateArcs(const Graph &graph) { +std::unique_ptr RemoveSelfArcsAndDuplicateArcs(const Graph& graph) { std::unique_ptr g(new Graph(graph.num_nodes(), graph.num_arcs())); typedef typename Graph::ArcIndex ArcIndex; typedef typename Graph::NodeIndex NodeIndex; @@ -347,7 +347,7 @@ std::unique_ptr RemoveSelfArcsAndDuplicateArcs(const Graph &graph) { } template -void RemoveCyclesFromPath(const Graph &graph, std::vector *arc_path) { +void RemoveCyclesFromPath(const Graph& graph, std::vector* arc_path) { if (arc_path->empty()) return; // This maps each node to the latest arc in the given path that leaves it. @@ -372,7 +372,7 @@ void RemoveCyclesFromPath(const Graph &graph, std::vector *arc_path) { } template -bool PathHasCycle(const Graph &graph, const std::vector &arc_path) { +bool PathHasCycle(const Graph& graph, const std::vector& arc_path) { if (arc_path.empty()) return false; std::set seen; seen.insert(graph.Tail(arc_path.front())); @@ -384,13 +384,13 @@ bool PathHasCycle(const Graph &graph, const std::vector &arc_path) { template std::vector ComputeOnePossibleReverseArcMapping( - const Graph &graph, bool die_if_not_symmetric) { + const Graph& graph, bool die_if_not_symmetric) { std::vector reverse_arc(graph.num_arcs(), -1); // We need a multi-map since a given (tail,head) may appear several times. // NOTE(user): It's free, in terms of space, to use InlinedVector // rather than std::vector. See go/inlined-vector-size. absl::flat_hash_map, - absl::InlinedVector > + absl::InlinedVector> arc_map; for (int arc = 0; arc < graph.num_arcs(); ++arc) { @@ -421,7 +421,7 @@ std::vector ComputeOnePossibleReverseArcMapping( // Algorithm check, for debugging. if (DEBUG_MODE) { int64 num_unmapped_arcs = 0; - for (const auto &p : arc_map) { + for (const auto& p : arc_map) { num_unmapped_arcs += p.second.size(); } DCHECK_EQ(std::count(reverse_arc.begin(), reverse_arc.end(), -1), diff --git a/ortools/gscip/gscip.cc b/ortools/gscip/gscip.cc index 19bb8d7bce..a53465877a 100644 --- a/ortools/gscip/gscip.cc +++ b/ortools/gscip/gscip.cc @@ -105,8 +105,8 @@ GScipOutput::Status ConvertStatus(const SCIP_STATUS scip_status) { } } -SCIP_PARAMEMPHASIS -ConvertEmphasis(const GScipParameters::Emphasis gscip_emphasis) { +SCIP_PARAMEMPHASIS ConvertEmphasis( + const GScipParameters::Emphasis gscip_emphasis) { switch (gscip_emphasis) { case GScipParameters::DEFAULT_EMPHASIS: return SCIP_PARAMEMPHASIS_DEFAULT; @@ -152,18 +152,18 @@ SCIP_PARAMSETTING ConvertMetaParamValue( } } // namespace -const GScipVariableOptions &DefaultGScipVariableOptions() { +const GScipVariableOptions& DefaultGScipVariableOptions() { static GScipVariableOptions var_options; return var_options; } -const GScipConstraintOptions &DefaultGScipConstraintOptions() { +const GScipConstraintOptions& DefaultGScipConstraintOptions() { static GScipConstraintOptions constraint_options; return constraint_options; } -absl::Status GScip::SetParams(const GScipParameters ¶ms, - const std::string &legacy_params) { +absl::Status GScip::SetParams(const GScipParameters& params, + const std::string& legacy_params) { SCIPsetMessagehdlrQuiet(scip_, params.silence_output()); if (!params.search_logs_filename().empty()) { SCIPsetMessagehdlrLogfile(scip_, params.search_logs_filename().c_str()); @@ -178,19 +178,19 @@ absl::Status GScip::SetParams(const GScipParameters ¶ms, scip_, ConvertMetaParamValue(params.presolve()), set_param_quiet)); RETURN_IF_SCIP_ERROR(SCIPsetSeparating( scip_, ConvertMetaParamValue(params.separating()), set_param_quiet)); - for (const auto &bool_param : params.bool_params()) { + for (const auto& bool_param : params.bool_params()) { RETURN_IF_SCIP_ERROR( (SCIPsetBoolParam(scip_, bool_param.first.c_str(), bool_param.second))); } - for (const auto &int_param : params.int_params()) { + for (const auto& int_param : params.int_params()) { RETURN_IF_SCIP_ERROR( (SCIPsetIntParam(scip_, int_param.first.c_str(), int_param.second))); } - for (const auto &long_param : params.long_params()) { + for (const auto& long_param : params.long_params()) { RETURN_IF_SCIP_ERROR((SCIPsetLongintParam(scip_, long_param.first.c_str(), long_param.second))); } - for (const auto &char_param : params.char_params()) { + for (const auto& char_param : params.char_params()) { if (char_param.second.size() != 1) { return absl::InvalidArgumentError( absl::StrCat("Character parameters must be single character strings, " @@ -200,11 +200,11 @@ absl::Status GScip::SetParams(const GScipParameters ¶ms, RETURN_IF_SCIP_ERROR((SCIPsetCharParam(scip_, char_param.first.c_str(), char_param.second[0]))); } - for (const auto &string_param : params.string_params()) { + for (const auto& string_param : params.string_params()) { RETURN_IF_SCIP_ERROR((SCIPsetStringParam(scip_, string_param.first.c_str(), string_param.second.c_str()))); } - for (const auto &real_param : params.real_params()) { + for (const auto& real_param : params.real_params()) { RETURN_IF_SCIP_ERROR( (SCIPsetRealParam(scip_, real_param.first.c_str(), real_param.second))); } @@ -215,9 +215,9 @@ absl::Status GScip::SetParams(const GScipParameters ¶ms, return absl::OkStatus(); } -absl::StatusOr > GScip::Create( - const std::string &problem_name) { - SCIP *scip = nullptr; +absl::StatusOr> GScip::Create( + const std::string& problem_name) { + SCIP* scip = nullptr; RETURN_IF_SCIP_ERROR(SCIPcreate(&scip)); RETURN_IF_SCIP_ERROR(SCIPincludeDefaultPlugins(scip)); RETURN_IF_SCIP_ERROR(SCIPcreateProbBasic(scip, problem_name.c_str())); @@ -225,7 +225,7 @@ absl::StatusOr > GScip::Create( return absl::WrapUnique(new GScip(scip)); } -GScip::GScip(SCIP *scip) : scip_(scip) {} +GScip::GScip(SCIP* scip) : scip_(scip) {} double GScip::ScipInf() { return SCIPinfinity(scip_); } @@ -248,12 +248,12 @@ bool GScip::InterruptSolve() { absl::Status GScip::CleanUp() { if (scip_ != nullptr) { - for (SCIP_VAR *variable : variables_) { + for (SCIP_VAR* variable : variables_) { if (variable != nullptr) { RETURN_IF_SCIP_ERROR(SCIPreleaseVar(scip_, &variable)); } } - for (SCIP_CONS *constraint : constraints_) { + for (SCIP_CONS* constraint : constraints_) { if (constraint != nullptr) { RETURN_IF_SCIP_ERROR(SCIPreleaseCons(scip_, &constraint)); } @@ -268,15 +268,17 @@ GScip::~GScip() { LOG_IF(DFATAL, !clean_up_status.ok()) << clean_up_status; } -absl::StatusOr GScip::AddVariable( +absl::StatusOr GScip::AddVariable( double lb, double ub, double obj_coef, GScipVarType var_type, - const std::string &var_name, const GScipVariableOptions &options) { - SCIP_VAR *var = nullptr; + const std::string& var_name, const GScipVariableOptions& options) { + SCIP_VAR* var = nullptr; lb = ScipInfClamp(lb); ub = ScipInfClamp(ub); - RETURN_IF_SCIP_ERROR(SCIPcreateVarBasic( - scip_, /*var=*/&var, /*name=*/var_name.c_str(), /*lb=*/lb, /*ub=*/ub, - /*obj=*/obj_coef, ConvertVarType(var_type))); + RETURN_IF_SCIP_ERROR(SCIPcreateVarBasic(scip_, /*var=*/&var, + /*name=*/var_name.c_str(), + /*lb=*/lb, /*ub=*/ub, + /*obj=*/obj_coef, + ConvertVarType(var_type))); RETURN_IF_SCIP_ERROR(SCIPvarSetInitial(var, options.initial)); RETURN_IF_SCIP_ERROR(SCIPvarSetRemovable(var, options.removable)); RETURN_IF_SCIP_ERROR(SCIPaddVar(scip_, var)); @@ -289,7 +291,7 @@ absl::StatusOr GScip::AddVariable( } absl::Status GScip::MaybeKeepConstraintAlive( - SCIP_CONS *constraint, const GScipConstraintOptions &options) { + SCIP_CONS* constraint, const GScipConstraintOptions& options) { if (options.keep_alive) { constraints_.insert(constraint); } else { @@ -298,21 +300,25 @@ absl::Status GScip::MaybeKeepConstraintAlive( return absl::OkStatus(); } -absl::StatusOr GScip::AddLinearConstraint( - const GScipLinearRange &range, const std::string &name, - const GScipConstraintOptions &options) { - SCIP_CONS *constraint = nullptr; +absl::StatusOr GScip::AddLinearConstraint( + const GScipLinearRange& range, const std::string& name, + const GScipConstraintOptions& options) { + SCIP_CONS* constraint = nullptr; RETURN_ERROR_UNLESS(range.variables.size() == range.coefficients.size()) << "Error adding constraint: " << name << "."; RETURN_IF_SCIP_ERROR(SCIPcreateConsLinear( scip_, &constraint, name.c_str(), range.variables.size(), - const_cast(range.variables.data()), - const_cast(range.coefficients.data()), + const_cast(range.variables.data()), + const_cast(range.coefficients.data()), ScipInfClamp(range.lower_bound), ScipInfClamp(range.upper_bound), - /*initial=*/options.initial, /*separate=*/options.separate, - /*enforce=*/options.enforce, /*check=*/options.check, - /*propagate=*/options.propagate, /*local=*/options.local, - /*modifiable=*/options.modifiable, /*dynamic=*/options.dynamic, + /*initial=*/options.initial, + /*separate=*/options.separate, + /*enforce=*/options.enforce, + /*check=*/options.check, + /*propagate=*/options.propagate, + /*local=*/options.local, + /*modifiable=*/options.modifiable, + /*dynamic=*/options.dynamic, /*removable=*/options.removable, /*stickingatnode=*/options.sticking_at_node)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip_, constraint)); @@ -320,10 +326,10 @@ absl::StatusOr GScip::AddLinearConstraint( return constraint; } -absl::StatusOr GScip::AddQuadraticConstraint( - const GScipQuadraticRange &range, const std::string &name, - const GScipConstraintOptions &options) { - SCIP_CONS *constraint = nullptr; +absl::StatusOr GScip::AddQuadraticConstraint( + const GScipQuadraticRange& range, const std::string& name, + const GScipConstraintOptions& options) { + SCIP_CONS* constraint = nullptr; const int num_lin_vars = range.linear_variables.size(); RETURN_ERROR_UNLESS(num_lin_vars == range.linear_coefficients.size()) << "Error adding quadratic constraint: " << name << " in linear term."; @@ -334,89 +340,105 @@ absl::StatusOr GScip::AddQuadraticConstraint( << "Error adding quadratic constraint: " << name << " in quadratic term."; RETURN_IF_SCIP_ERROR(SCIPcreateConsQuadratic( scip_, &constraint, name.c_str(), num_lin_vars, - const_cast(range.linear_variables.data()), - const_cast(range.linear_coefficients.data()), num_quad_vars, - const_cast(range.quadratic_variables1.data()), - const_cast(range.quadratic_variables2.data()), - const_cast(range.quadratic_coefficients.data()), + const_cast(range.linear_variables.data()), + const_cast(range.linear_coefficients.data()), num_quad_vars, + const_cast(range.quadratic_variables1.data()), + const_cast(range.quadratic_variables2.data()), + const_cast(range.quadratic_coefficients.data()), ScipInfClamp(range.lower_bound), ScipInfClamp(range.upper_bound), - /*initial=*/options.initial, /*separate=*/options.separate, - /*enforce=*/options.enforce, /*check=*/options.check, - /*propagate=*/options.propagate, /*local=*/options.local, - /*modifiable=*/options.modifiable, /*dynamic=*/options.dynamic, + /*initial=*/options.initial, + /*separate=*/options.separate, + /*enforce=*/options.enforce, + /*check=*/options.check, + /*propagate=*/options.propagate, + /*local=*/options.local, + /*modifiable=*/options.modifiable, + /*dynamic=*/options.dynamic, /*removable=*/options.removable)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip_, constraint)); RETURN_IF_ERROR(MaybeKeepConstraintAlive(constraint, options)); return constraint; } -absl::StatusOr GScip::AddIndicatorConstraint( - const GScipIndicatorConstraint &indicator_constraint, - const std::string &name, const GScipConstraintOptions &options) { - SCIP_VAR *indicator = indicator_constraint.indicator_variable; +absl::StatusOr GScip::AddIndicatorConstraint( + const GScipIndicatorConstraint& indicator_constraint, + const std::string& name, const GScipConstraintOptions& options) { + SCIP_VAR* indicator = indicator_constraint.indicator_variable; RETURN_ERROR_UNLESS(indicator != nullptr) << "Error adding indicator constraint: " << name << "."; if (indicator_constraint.negate_indicator) { RETURN_IF_SCIP_ERROR(SCIPgetNegatedVar(scip_, indicator, &indicator)); } - SCIP_CONS *constraint = nullptr; + SCIP_CONS* constraint = nullptr; RETURN_ERROR_UNLESS(indicator_constraint.variables.size() == indicator_constraint.coefficients.size()) << "Error adding indicator constraint: " << name << "."; RETURN_IF_SCIP_ERROR(SCIPcreateConsIndicator( scip_, &constraint, name.c_str(), indicator, indicator_constraint.variables.size(), - const_cast(indicator_constraint.variables.data()), - const_cast(indicator_constraint.coefficients.data()), + const_cast(indicator_constraint.variables.data()), + const_cast(indicator_constraint.coefficients.data()), ScipInfClamp(indicator_constraint.upper_bound), - /*initial=*/options.initial, /*separate=*/options.separate, - /*enforce=*/options.enforce, /*check=*/options.check, - /*propagate=*/options.propagate, /*local=*/options.local, - /*dynamic=*/options.dynamic, /*removable=*/options.removable, + /*initial=*/options.initial, + /*separate=*/options.separate, + /*enforce=*/options.enforce, + /*check=*/options.check, + /*propagate=*/options.propagate, + /*local=*/options.local, + /*dynamic=*/options.dynamic, + /*removable=*/options.removable, /*stickingatnode=*/options.sticking_at_node)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip_, constraint)); RETURN_IF_ERROR(MaybeKeepConstraintAlive(constraint, options)); return constraint; } -absl::StatusOr GScip::AddAndConstraint( - const GScipLogicalConstraintData &logical_data, const std::string &name, - const GScipConstraintOptions &options) { +absl::StatusOr GScip::AddAndConstraint( + const GScipLogicalConstraintData& logical_data, const std::string& name, + const GScipConstraintOptions& options) { RETURN_ERROR_UNLESS(logical_data.resultant != nullptr) << "Error adding and constraint: " << name << "."; - SCIP_CONS *constraint = nullptr; - RETURN_IF_SCIP_ERROR(SCIPcreateConsAnd( - scip_, &constraint, name.c_str(), logical_data.resultant, - logical_data.operators.size(), - const_cast(logical_data.operators.data()), - /*initial=*/options.initial, /*separate=*/options.separate, - /*enforce=*/options.enforce, /*check=*/options.check, - /*propagate=*/options.propagate, /*local=*/options.local, - /*modifiable=*/options.modifiable, /*dynamic=*/options.dynamic, - /*removable=*/options.removable, - /*stickingatnode=*/options.sticking_at_node)); + SCIP_CONS* constraint = nullptr; + RETURN_IF_SCIP_ERROR( + SCIPcreateConsAnd(scip_, &constraint, name.c_str(), + logical_data.resultant, logical_data.operators.size(), + const_cast(logical_data.operators.data()), + /*initial=*/options.initial, + /*separate=*/options.separate, + /*enforce=*/options.enforce, + /*check=*/options.check, + /*propagate=*/options.propagate, + /*local=*/options.local, + /*modifiable=*/options.modifiable, + /*dynamic=*/options.dynamic, + /*removable=*/options.removable, + /*stickingatnode=*/options.sticking_at_node)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip_, constraint)); RETURN_IF_ERROR(MaybeKeepConstraintAlive(constraint, options)); return constraint; } -absl::StatusOr GScip::AddOrConstraint( - const GScipLogicalConstraintData &logical_data, const std::string &name, - const GScipConstraintOptions &options) { +absl::StatusOr GScip::AddOrConstraint( + const GScipLogicalConstraintData& logical_data, const std::string& name, + const GScipConstraintOptions& options) { RETURN_ERROR_UNLESS(logical_data.resultant != nullptr) << "Error adding or constraint: " << name << "."; - SCIP_CONS *constraint = nullptr; - RETURN_IF_SCIP_ERROR(SCIPcreateConsOr( - scip_, &constraint, name.c_str(), logical_data.resultant, - logical_data.operators.size(), - const_cast(logical_data.operators.data()), - /*initial=*/options.initial, /*separate=*/options.separate, - /*enforce=*/options.enforce, /*check=*/options.check, - /*propagate=*/options.propagate, /*local=*/options.local, - /*modifiable=*/options.modifiable, /*dynamic=*/options.dynamic, - /*removable=*/options.removable, - /*stickingatnode=*/options.sticking_at_node)); + SCIP_CONS* constraint = nullptr; + RETURN_IF_SCIP_ERROR( + SCIPcreateConsOr(scip_, &constraint, name.c_str(), logical_data.resultant, + logical_data.operators.size(), + const_cast(logical_data.operators.data()), + /*initial=*/options.initial, + /*separate=*/options.separate, + /*enforce=*/options.enforce, + /*check=*/options.check, + /*propagate=*/options.propagate, + /*local=*/options.local, + /*modifiable=*/options.modifiable, + /*dynamic=*/options.dynamic, + /*removable=*/options.removable, + /*stickingatnode=*/options.sticking_at_node)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip_, constraint)); RETURN_IF_ERROR(MaybeKeepConstraintAlive(constraint, options)); return constraint; @@ -424,8 +446,8 @@ absl::StatusOr GScip::AddOrConstraint( namespace { -absl::Status ValidateSOSData(const GScipSOSData &sos_data, - const std::string &name) { +absl::Status ValidateSOSData(const GScipSOSData& sos_data, + const std::string& name) { RETURN_ERROR_UNLESS(!sos_data.variables.empty()) << "Error adding SOS constraint: " << name << "."; if (!sos_data.weights.empty()) { @@ -444,45 +466,53 @@ absl::Status ValidateSOSData(const GScipSOSData &sos_data, } // namespace -absl::StatusOr GScip::AddSOS1Constraint( - const GScipSOSData &sos_data, const std::string &name, - const GScipConstraintOptions &options) { +absl::StatusOr GScip::AddSOS1Constraint( + const GScipSOSData& sos_data, const std::string& name, + const GScipConstraintOptions& options) { RETURN_IF_ERROR(ValidateSOSData(sos_data, name)); - SCIP_CONS *constraint = nullptr; - double *weights = nullptr; + SCIP_CONS* constraint = nullptr; + double* weights = nullptr; if (!sos_data.weights.empty()) { - weights = const_cast(sos_data.weights.data()); + weights = const_cast(sos_data.weights.data()); } RETURN_IF_SCIP_ERROR(SCIPcreateConsSOS1( scip_, &constraint, name.c_str(), sos_data.variables.size(), - const_cast(sos_data.variables.data()), weights, - /*initial=*/options.initial, /*separate=*/options.separate, - /*enforce=*/options.enforce, /*check=*/options.check, - /*propagate=*/options.propagate, /*local=*/options.local, - /*dynamic=*/options.dynamic, /*removable=*/options.removable, + const_cast(sos_data.variables.data()), weights, + /*initial=*/options.initial, + /*separate=*/options.separate, + /*enforce=*/options.enforce, + /*check=*/options.check, + /*propagate=*/options.propagate, + /*local=*/options.local, + /*dynamic=*/options.dynamic, + /*removable=*/options.removable, /*stickingatnode=*/options.sticking_at_node)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip_, constraint)); RETURN_IF_ERROR(MaybeKeepConstraintAlive(constraint, options)); return constraint; } -absl::StatusOr GScip::AddSOS2Constraint( - const GScipSOSData &sos_data, const std::string &name, - const GScipConstraintOptions &options) { +absl::StatusOr GScip::AddSOS2Constraint( + const GScipSOSData& sos_data, const std::string& name, + const GScipConstraintOptions& options) { RETURN_IF_ERROR(ValidateSOSData(sos_data, name)); - SCIP_CONS *constraint = nullptr; - double *weights = nullptr; + SCIP_CONS* constraint = nullptr; + double* weights = nullptr; if (!sos_data.weights.empty()) { - weights = const_cast(sos_data.weights.data()); + weights = const_cast(sos_data.weights.data()); } RETURN_IF_SCIP_ERROR(SCIPcreateConsSOS2( scip_, &constraint, name.c_str(), sos_data.variables.size(), - const_cast(sos_data.variables.data()), weights, - /*initial=*/options.initial, /*separate=*/options.separate, - /*enforce=*/options.enforce, /*check=*/options.check, - /*propagate=*/options.propagate, /*local=*/options.local, - /*dynamic=*/options.dynamic, /*removable=*/options.removable, + const_cast(sos_data.variables.data()), weights, + /*initial=*/options.initial, + /*separate=*/options.separate, + /*enforce=*/options.enforce, + /*check=*/options.check, + /*propagate=*/options.propagate, + /*local=*/options.local, + /*dynamic=*/options.dynamic, + /*removable=*/options.removable, /*stickingatnode=*/options.sticking_at_node)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip_, constraint)); RETURN_IF_ERROR(MaybeKeepConstraintAlive(constraint, options)); @@ -508,36 +538,36 @@ bool GScip::ObjectiveIsMaximize() { double GScip::ObjectiveOffset() { return SCIPgetOrigObjoffset(scip_); } -absl::Status GScip::SetBranchingPriority(SCIP_VAR *var, int priority) { +absl::Status GScip::SetBranchingPriority(SCIP_VAR* var, int priority) { RETURN_IF_SCIP_ERROR(SCIPchgVarBranchPriority(scip_, var, priority)); return absl::OkStatus(); } -absl::Status GScip::SetLb(SCIP_VAR *var, double lb) { +absl::Status GScip::SetLb(SCIP_VAR* var, double lb) { lb = ScipInfClamp(lb); RETURN_IF_SCIP_ERROR(SCIPchgVarLb(scip_, var, lb)); return absl::OkStatus(); } -absl::Status GScip::SetUb(SCIP_VAR *var, double ub) { +absl::Status GScip::SetUb(SCIP_VAR* var, double ub) { ub = ScipInfClamp(ub); RETURN_IF_SCIP_ERROR(SCIPchgVarUb(scip_, var, ub)); return absl::OkStatus(); } -absl::Status GScip::SetObjCoef(SCIP_VAR *var, double obj_coef) { +absl::Status GScip::SetObjCoef(SCIP_VAR* var, double obj_coef) { RETURN_IF_SCIP_ERROR(SCIPchgVarObj(scip_, var, obj_coef)); return absl::OkStatus(); } -absl::Status GScip::SetVarType(SCIP_VAR *var, GScipVarType var_type) { +absl::Status GScip::SetVarType(SCIP_VAR* var, GScipVarType var_type) { SCIP_Bool infeasible; RETURN_IF_SCIP_ERROR( SCIPchgVarType(scip_, var, ConvertVarType(var_type), &infeasible)); return absl::OkStatus(); } -absl::Status GScip::DeleteVariable(SCIP_VAR *var) { +absl::Status GScip::DeleteVariable(SCIP_VAR* var) { SCIP_Bool did_delete; RETURN_IF_SCIP_ERROR(SCIPdelVar(scip_, var, &did_delete)); RETURN_ERROR_UNLESS(static_cast(did_delete)) @@ -548,8 +578,8 @@ absl::Status GScip::DeleteVariable(SCIP_VAR *var) { } absl::Status GScip::CanSafeBulkDelete( - const absl::flat_hash_set &vars) { - for (SCIP_CONS *constraint : constraints_) { + const absl::flat_hash_set& vars) { + for (SCIP_CONS* constraint : constraints_) { if (!IsConstraintLinear(constraint)) { return absl::InvalidArgumentError(absl::StrCat( "Model contains nonlinear constraint: ", Name(constraint))); @@ -558,96 +588,95 @@ absl::Status GScip::CanSafeBulkDelete( return absl::OkStatus(); } -absl::Status GScip::SafeBulkDelete( - const absl::flat_hash_set &vars) { +absl::Status GScip::SafeBulkDelete(const absl::flat_hash_set& vars) { RETURN_IF_ERROR(CanSafeBulkDelete(vars)); // Now, we can assume that all constraints are linear. - for (SCIP_CONS *constraint : constraints_) { - const absl::Span nonzeros = + for (SCIP_CONS* constraint : constraints_) { + const absl::Span nonzeros = LinearConstraintVariables(constraint); - const std::vector nonzeros_copy(nonzeros.begin(), - nonzeros.end()); - for (SCIP_VAR *var : nonzeros_copy) { + const std::vector nonzeros_copy(nonzeros.begin(), + nonzeros.end()); + for (SCIP_VAR* var : nonzeros_copy) { if (vars.contains(var)) { RETURN_IF_ERROR(SetLinearConstraintCoef(constraint, var, 0.0)); } } } - for (SCIP_VAR *const var : vars) { + for (SCIP_VAR* const var : vars) { RETURN_IF_ERROR(DeleteVariable(var)); } return absl::OkStatus(); } -double GScip::Lb(SCIP_VAR *var) { +double GScip::Lb(SCIP_VAR* var) { return ScipInfUnclamp(SCIPvarGetLbOriginal(var)); } -double GScip::Ub(SCIP_VAR *var) { +double GScip::Ub(SCIP_VAR* var) { return ScipInfUnclamp(SCIPvarGetUbOriginal(var)); } -double GScip::ObjCoef(SCIP_VAR *var) { return SCIPvarGetObj(var); } +double GScip::ObjCoef(SCIP_VAR* var) { return SCIPvarGetObj(var); } -GScipVarType GScip::VarType(SCIP_VAR *var) { +GScipVarType GScip::VarType(SCIP_VAR* var) { return ConvertVarType(SCIPvarGetType(var)); } -absl::string_view GScip::Name(SCIP_VAR *var) { return SCIPvarGetName(var); } +absl::string_view GScip::Name(SCIP_VAR* var) { return SCIPvarGetName(var); } -absl::string_view GScip::ConstraintType(SCIP_CONS *constraint) { +absl::string_view GScip::ConstraintType(SCIP_CONS* constraint) { return absl::string_view(SCIPconshdlrGetName(SCIPconsGetHdlr(constraint))); } -bool GScip::IsConstraintLinear(SCIP_CONS *constraint) { +bool GScip::IsConstraintLinear(SCIP_CONS* constraint) { return ConstraintType(constraint) == kLinearConstraintHandlerName; } absl::Span GScip::LinearConstraintCoefficients( - SCIP_CONS *constraint) { + SCIP_CONS* constraint) { int num_vars = SCIPgetNVarsLinear(scip_, constraint); return absl::MakeConstSpan(SCIPgetValsLinear(scip_, constraint), num_vars); } -absl::Span GScip::LinearConstraintVariables( - SCIP_CONS *constraint) { +absl::Span GScip::LinearConstraintVariables( + SCIP_CONS* constraint) { int num_vars = SCIPgetNVarsLinear(scip_, constraint); return absl::MakeConstSpan(SCIPgetVarsLinear(scip_, constraint), num_vars); } -double GScip::LinearConstraintLb(SCIP_CONS *constraint) { +double GScip::LinearConstraintLb(SCIP_CONS* constraint) { return ScipInfUnclamp(SCIPgetLhsLinear(scip_, constraint)); } -double GScip::LinearConstraintUb(SCIP_CONS *constraint) { +double GScip::LinearConstraintUb(SCIP_CONS* constraint) { return ScipInfUnclamp(SCIPgetRhsLinear(scip_, constraint)); } -absl::string_view GScip::Name(SCIP_CONS *constraint) { +absl::string_view GScip::Name(SCIP_CONS* constraint) { return SCIPconsGetName(constraint); } -absl::Status GScip::SetLinearConstraintLb(SCIP_CONS *constraint, double lb) { +absl::Status GScip::SetLinearConstraintLb(SCIP_CONS* constraint, double lb) { lb = ScipInfClamp(lb); RETURN_IF_SCIP_ERROR(SCIPchgLhsLinear(scip_, constraint, lb)); return absl::OkStatus(); } -absl::Status GScip::SetLinearConstraintUb(SCIP_CONS *constraint, double ub) { +absl::Status GScip::SetLinearConstraintUb(SCIP_CONS* constraint, double ub) { ub = ScipInfClamp(ub); RETURN_IF_SCIP_ERROR(SCIPchgRhsLinear(scip_, constraint, ub)); return absl::OkStatus(); } -absl::Status GScip::DeleteConstraint(SCIP_CONS *constraint) { +absl::Status GScip::DeleteConstraint(SCIP_CONS* constraint) { RETURN_IF_SCIP_ERROR(SCIPdelCons(scip_, constraint)); constraints_.erase(constraint); RETURN_IF_SCIP_ERROR(SCIPreleaseCons(scip_, &constraint)); return absl::OkStatus(); } -absl::Status GScip::SetLinearConstraintCoef(SCIP_CONS *constraint, - SCIP_VAR *var, double value) { +absl::Status GScip::SetLinearConstraintCoef(SCIP_CONS* constraint, + SCIP_VAR* var, double value) { // TODO(user): this operation is slow (linear in the nnz in the constraint). // It would be better to just use a bulk operation, but there doesn't appear // to be any? @@ -656,8 +685,8 @@ absl::Status GScip::SetLinearConstraintCoef(SCIP_CONS *constraint, } absl::StatusOr GScip::SuggestHint( - const GScipSolution &partial_solution) { - SCIP_SOL *solution; + const GScipSolution& partial_solution) { + SCIP_SOL* solution; const int scip_num_vars = SCIPgetNOrigVars(scip_); const bool is_solution_partial = partial_solution.size() < scip_num_vars; if (is_solution_partial) { @@ -668,7 +697,7 @@ absl::StatusOr GScip::SuggestHint( << "Error suggesting hint."; RETURN_IF_SCIP_ERROR(SCIPcreateSol(scip_, &solution, nullptr)); } - for (const auto &var_value_pair : partial_solution) { + for (const auto& var_value_pair : partial_solution) { RETURN_IF_SCIP_ERROR(SCIPsetSolVal(scip_, solution, var_value_pair.first, var_value_pair.second)); } @@ -676,8 +705,8 @@ absl::StatusOr GScip::SuggestHint( SCIP_Bool is_feasible; RETURN_IF_SCIP_ERROR(SCIPcheckSol( scip_, solution, /*printreason=*/false, /*completely=*/true, - /*checkbounds=*/true, /*checkintegrality=*/true, - /*checklprows=*/true, &is_feasible)); + /*checkbounds=*/true, /*checkintegrality=*/true, /*checklprows=*/true, + &is_feasible)); if (!static_cast(is_feasible)) { RETURN_IF_SCIP_ERROR(SCIPfreeSol(scip_, &solution)); return GScipHintResult::kInfeasible; @@ -692,8 +721,8 @@ absl::StatusOr GScip::SuggestHint( } } -absl::StatusOr GScip::Solve(const GScipParameters ¶ms, - const std::string &legacy_params) { +absl::StatusOr GScip::Solve(const GScipParameters& params, + const std::string& legacy_params) { // A four step process: // 1. Apply parameters. // 2. Solve the problem. @@ -741,7 +770,7 @@ absl::StatusOr GScip::Solve(const GScipParameters ¶ms, RETURN_IF_SCIP_ERROR(SCIPprintStatistics(scip_, nullptr)); } if (!params.detailed_solving_stats_filename().empty()) { - FILE *file = fopen(params.detailed_solving_stats_filename().c_str(), "w"); + FILE* file = fopen(params.detailed_solving_stats_filename().c_str(), "w"); if (file == nullptr) { return absl::InvalidArgumentError(absl::StrCat( "Could not open file: ", params.detailed_solving_stats_filename(), @@ -759,17 +788,17 @@ absl::StatusOr GScip::Solve(const GScipParameters ¶ms, // Step 3: Extract solution information. // Some outputs are available unconditionally, and some are only ready if at // least presolve succeeded. - GScipSolvingStats *stats = result.gscip_output.mutable_stats(); + GScipSolvingStats* stats = result.gscip_output.mutable_stats(); const int num_scip_solutions = SCIPgetNSols(scip_); const int num_returned_solutions = std::min(num_scip_solutions, std::max(1, params.num_solutions())); - SCIP_SOL **all_solutions = SCIPgetSols(scip_); + SCIP_SOL** all_solutions = SCIPgetSols(scip_); stats->set_best_objective(ScipInfUnclamp(SCIPgetPrimalbound(scip_))); for (int i = 0; i < num_returned_solutions; ++i) { - SCIP_SOL *scip_sol = all_solutions[i]; + SCIP_SOL* scip_sol = all_solutions[i]; const double obj_value = ScipInfUnclamp(SCIPgetSolOrigObj(scip_, scip_sol)); GScipSolution solution; - for (SCIP_VAR *v : variables_) { + for (SCIP_VAR* v : variables_) { solution[v] = SCIPgetSolVal(scip_, scip_sol, v); } result.solutions.push_back(solution); @@ -777,7 +806,7 @@ absl::StatusOr GScip::Solve(const GScipParameters ¶ms, } // Can only check for primal ray if we made it past presolve. if (stage != SCIP_STAGE_PRESOLVING && SCIPhasPrimalRay(scip_)) { - for (SCIP_VAR *v : variables_) { + for (SCIP_VAR* v : variables_) { result.primal_ray[v] = SCIPgetPrimalRayVal(scip_, v); } } @@ -801,7 +830,7 @@ absl::StatusOr GScip::Solve(const GScipParameters ¶ms, } absl::StatusOr GScip::DefaultBoolParamValue( - const std::string ¶meter_name) { + const std::string& parameter_name) { SCIP_Bool default_value; RETURN_IF_SCIP_ERROR( SCIPgetBoolParam(scip_, parameter_name.c_str(), &default_value)); @@ -809,7 +838,7 @@ absl::StatusOr GScip::DefaultBoolParamValue( } absl::StatusOr GScip::DefaultIntParamValue( - const std::string ¶meter_name) { + const std::string& parameter_name) { int default_value; RETURN_IF_SCIP_ERROR( SCIPgetIntParam(scip_, parameter_name.c_str(), &default_value)); @@ -817,7 +846,7 @@ absl::StatusOr GScip::DefaultIntParamValue( } absl::StatusOr GScip::DefaultLongParamValue( - const std::string ¶meter_name) { + const std::string& parameter_name) { SCIP_Longint result; RETURN_IF_SCIP_ERROR( SCIPgetLongintParam(scip_, parameter_name.c_str(), &result)); @@ -825,7 +854,7 @@ absl::StatusOr GScip::DefaultLongParamValue( } absl::StatusOr GScip::DefaultRealParamValue( - const std::string ¶meter_name) { + const std::string& parameter_name) { double result; RETURN_IF_SCIP_ERROR( SCIPgetRealParam(scip_, parameter_name.c_str(), &result)); @@ -833,7 +862,7 @@ absl::StatusOr GScip::DefaultRealParamValue( } absl::StatusOr GScip::DefaultCharParamValue( - const std::string ¶meter_name) { + const std::string& parameter_name) { char result; RETURN_IF_SCIP_ERROR( SCIPgetCharParam(scip_, parameter_name.c_str(), &result)); @@ -841,8 +870,8 @@ absl::StatusOr GScip::DefaultCharParamValue( } absl::StatusOr GScip::DefaultStringParamValue( - const std::string ¶meter_name) { - char *result; + const std::string& parameter_name) { + char* result; RETURN_IF_SCIP_ERROR( SCIPgetStringParam(scip_, parameter_name.c_str(), &result)); return std::string(result); diff --git a/ortools/gscip/gscip_ext.cc b/ortools/gscip/gscip_ext.cc index f1a268bee1..6ecd183f7d 100644 --- a/ortools/gscip/gscip_ext.cc +++ b/ortools/gscip/gscip_ext.cc @@ -20,8 +20,8 @@ namespace operations_research { namespace { -std::string MaybeExtendName(const std::string &base_name, - const std::string &extension) { +std::string MaybeExtendName(const std::string& base_name, + const std::string& extension) { if (base_name.empty()) { return ""; } @@ -30,13 +30,13 @@ std::string MaybeExtendName(const std::string &base_name, } // namespace -GScipLinearExpr::GScipLinearExpr(SCIP_VAR *variable) { terms[variable] = 1.0; } +GScipLinearExpr::GScipLinearExpr(SCIP_VAR* variable) { terms[variable] = 1.0; } GScipLinearExpr::GScipLinearExpr(double offset) : offset(offset) {} -GScipLinearExpr Difference(GScipLinearExpr left, const GScipLinearExpr &right) { +GScipLinearExpr Difference(GScipLinearExpr left, const GScipLinearExpr& right) { left.offset -= right.offset; - for (const auto &term : right.terms) { + for (const auto& term : right.terms) { left.terms[term.first] -= term.second; } return left; @@ -44,7 +44,7 @@ GScipLinearExpr Difference(GScipLinearExpr left, const GScipLinearExpr &right) { GScipLinearExpr Negate(GScipLinearExpr expr) { expr.offset = -expr.offset; - for (auto &term : expr.terms) { + for (auto& term : expr.terms) { term.second = -term.second; } return expr; @@ -52,27 +52,27 @@ GScipLinearExpr Negate(GScipLinearExpr expr) { // Returns the range -inf <= left.terms - right.terms <= right.offset - // left.offset -GScipLinearRange Le(const GScipLinearExpr left, const GScipLinearExpr &right) { +GScipLinearRange Le(const GScipLinearExpr left, const GScipLinearExpr& right) { GScipLinearExpr diff = Difference(left, right); GScipLinearRange result; result.lower_bound = -std::numeric_limits::infinity(); result.upper_bound = -diff.offset; - for (const auto &term : diff.terms) { + for (const auto& term : diff.terms) { result.variables.push_back(term.first); result.coefficients.push_back(term.second); } return result; } -absl::Status CreateAbs(GScip *gscip, SCIP_Var *x, SCIP_Var *abs_x, - const std::string &name) { +absl::Status CreateAbs(GScip* gscip, SCIP_Var* x, SCIP_Var* abs_x, + const std::string& name) { return CreateMaximum(gscip, GScipLinearExpr(abs_x), {GScipLinearExpr(x), Negate(GScipLinearExpr(x))}, name); } -absl::Status CreateMaximum(GScip *gscip, const GScipLinearExpr &resultant, - const std::vector &terms, - const std::string &name) { +absl::Status CreateMaximum(GScip* gscip, const GScipLinearExpr& resultant, + const std::vector& terms, + const std::string& name) { // TODO(user): it may be better to write this in terms of the disjuntive // constraint, we need to support disjunctions in gscip.h to do this. // @@ -81,7 +81,7 @@ absl::Status CreateMaximum(GScip *gscip, const GScipLinearExpr &resultant, // x_i <= y // z_i => y <= x_i // \sum_i z_i == 1 - std::vector indicators; + std::vector indicators; for (int i = 0; i < terms.size(); ++i) { auto z = gscip->AddVariable(0.0, 1.0, 0.0, GScipVarType::kInteger, MaybeExtendName(name, absl::StrCat("z_", i))); @@ -126,21 +126,21 @@ absl::Status CreateMaximum(GScip *gscip, const GScipLinearExpr &resultant, .status(); } -absl::Status CreateMinimum(GScip *gscip, const GScipLinearExpr &resultant, - const std::vector &terms, - const std::string &name) { +absl::Status CreateMinimum(GScip* gscip, const GScipLinearExpr& resultant, + const std::vector& terms, + const std::string& name) { std::vector negated_terms; negated_terms.reserve(terms.size()); - for (const GScipLinearExpr &e : terms) { + for (const GScipLinearExpr& e : terms) { negated_terms.push_back(Negate(e)); } return CreateMaximum(gscip, Negate(resultant), negated_terms, name); } absl::Status AddQuadraticObjectiveTerm( - GScip *gscip, std::vector quadratic_variables1, - std::vector quadratic_variables2, - std::vector quadratic_coefficients, const std::string &name) { + GScip* gscip, std::vector quadratic_variables1, + std::vector quadratic_variables2, + std::vector quadratic_coefficients, const std::string& name) { constexpr double kInf = std::numeric_limits::infinity(); auto obj_term = gscip->AddVariable(-kInf, kInf, 1.0, GScipVarType::kContinuous, @@ -168,8 +168,8 @@ absl::Status AddQuadraticObjectiveTerm( } absl::Status CreateIndicatorRange( - GScip *gscip, const GScipIndicatorRangeConstraint &indicator_range, - const std::string &name, const GScipConstraintOptions &options) { + GScip* gscip, const GScipIndicatorRangeConstraint& indicator_range, + const std::string& name, const GScipConstraintOptions& options) { if (std::isfinite(indicator_range.range.upper_bound)) { GScipIndicatorConstraint ub_constraint; ub_constraint.upper_bound = indicator_range.range.upper_bound; diff --git a/ortools/gscip/gscip_parameters.cc b/ortools/gscip/gscip_parameters.cc index 7fcd88e27e..583257637a 100644 --- a/ortools/gscip/gscip_parameters.cc +++ b/ortools/gscip/gscip_parameters.cc @@ -28,7 +28,7 @@ constexpr absl::string_view kRandomSeedParam = "randomization/randomseedshift"; } // namespace void SetTimeLimit(const absl::Duration time_limit, - GScipParameters *parameters) { + GScipParameters* parameters) { if (time_limit < absl::Seconds(1e20) && time_limit > absl::Duration()) { (*parameters->mutable_real_params())[std::string(kLimitsTime)] = absl::ToDoubleSeconds(time_limit); @@ -37,7 +37,7 @@ void SetTimeLimit(const absl::Duration time_limit, } } -absl::Duration TimeLimit(const GScipParameters ¶meters) { +absl::Duration TimeLimit(const GScipParameters& parameters) { if (parameters.real_params().contains(std::string(kLimitsTime))) { const double scip_limit = parameters.real_params().at(std::string(kLimitsTime)); @@ -52,73 +52,73 @@ absl::Duration TimeLimit(const GScipParameters ¶meters) { return absl::InfiniteDuration(); } -bool TimeLimitSet(const GScipParameters ¶meters) { +bool TimeLimitSet(const GScipParameters& parameters) { return parameters.real_params().contains(std::string(kLimitsTime)); } -void SetMaxNumThreads(int num_threads, GScipParameters *parameters) { +void SetMaxNumThreads(int num_threads, GScipParameters* parameters) { CHECK_GE(num_threads, 1); (*parameters->mutable_int_params())[std::string(kParallelMaxNThreads)] = num_threads; } -int MaxNumThreads(const GScipParameters ¶meters) { +int MaxNumThreads(const GScipParameters& parameters) { if (parameters.int_params().contains(std::string(kParallelMaxNThreads))) { return parameters.int_params().at(std::string(kParallelMaxNThreads)); } return 1; } -bool MaxNumThreadsSet(const GScipParameters ¶meters) { +bool MaxNumThreadsSet(const GScipParameters& parameters) { return parameters.int_params().contains(std::string(kParallelMaxNThreads)); } -void SetLogLevel(GScipParameters *parameters, int log_level) { +void SetLogLevel(GScipParameters* parameters, int log_level) { CHECK_GE(log_level, 0); CHECK_LE(log_level, 5); (*parameters->mutable_int_params())[std::string(kDisplayVerbLevel)] = log_level; } -int LogLevel(const GScipParameters ¶meters) { +int LogLevel(const GScipParameters& parameters) { return parameters.int_params().contains(std::string(kDisplayVerbLevel)) ? parameters.int_params().at(std::string(kDisplayVerbLevel)) : 4; } -bool LogLevelSet(const GScipParameters ¶meters) { +bool LogLevelSet(const GScipParameters& parameters) { return parameters.int_params().contains(std::string(kDisplayVerbLevel)); } -void SetOutputEnabled(GScipParameters *parameters, bool output_enabled) { +void SetOutputEnabled(GScipParameters* parameters, bool output_enabled) { if (output_enabled) { parameters->mutable_int_params()->erase(std::string(kDisplayVerbLevel)); } else { (*parameters->mutable_int_params())[std::string(kDisplayVerbLevel)] = 0; } } -bool OutputEnabled(const GScipParameters ¶meters) { +bool OutputEnabled(const GScipParameters& parameters) { return !parameters.int_params().contains(std::string(kDisplayVerbLevel)) || (parameters.int_params().at(std::string(kDisplayVerbLevel)) > 0); } -bool OutputEnabledSet(const GScipParameters ¶meters) { +bool OutputEnabledSet(const GScipParameters& parameters) { return LogLevelSet(parameters); } -void SetRandomSeed(GScipParameters *parameters, int random_seed) { +void SetRandomSeed(GScipParameters* parameters, int random_seed) { random_seed = std::max(0, random_seed); (*parameters->mutable_int_params())[std::string(kRandomSeedParam)] = random_seed; } -int RandomSeed(const GScipParameters ¶meters) { +int RandomSeed(const GScipParameters& parameters) { if (RandomSeedSet(parameters)) { return parameters.int_params().at(std::string(kRandomSeedParam)); } return -1; // Unset value. } -bool RandomSeedSet(const GScipParameters ¶meters) { +bool RandomSeedSet(const GScipParameters& parameters) { return parameters.int_params().contains(std::string(kRandomSeedParam)); } } // namespace operations_research diff --git a/ortools/gscip/legacy_scip_params.cc b/ortools/gscip/legacy_scip_params.cc index a976b03e2d..875bfb5848 100644 --- a/ortools/gscip/legacy_scip_params.cc +++ b/ortools/gscip/legacy_scip_params.cc @@ -29,7 +29,7 @@ namespace operations_research { absl::Status LegacyScipSetSolverSpecificParameters( - const std::string ¶meters, SCIP *scip) { + const std::string& parameters, SCIP* scip) { for (const auto parameter : absl::StrSplit(parameters, absl::ByAnyChar(",\n"), absl::SkipWhitespace())) { std::vector key_value = absl::StrSplit( @@ -47,7 +47,7 @@ absl::Status LegacyScipSetSolverSpecificParameters( absl::RemoveExtraAsciiWhitespace(&value); const double infinity = SCIPinfinity(scip); - SCIP_PARAM *param = SCIPgetParam(scip, name.c_str()); + SCIP_PARAM* param = SCIPgetParam(scip, name.c_str()); if (param == nullptr) { return absl::InvalidArgumentError( absl::StrFormat("Invalid parameter name '%s'", name)); diff --git a/ortools/linear_solver/bop_interface.cc b/ortools/linear_solver/bop_interface.cc index 826b5bedbc..57175b7f89 100644 --- a/ortools/linear_solver/bop_interface.cc +++ b/ortools/linear_solver/bop_interface.cc @@ -49,11 +49,11 @@ MPSolver::ResultStatus TranslateProblemStatus(bop::BopSolveStatus status) { class BopInterface : public MPSolverInterface { public: - explicit BopInterface(MPSolver *const solver); + explicit BopInterface(MPSolver* const solver); ~BopInterface() override; // ----- Solve ----- - MPSolver::ResultStatus Solve(const MPSolverParameters ¶m) override; + MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; // ----- Model modifications and extraction ----- void Reset() override; @@ -61,13 +61,13 @@ class BopInterface : public MPSolverInterface { void SetVariableBounds(int index, double lb, double ub) override; void SetVariableInteger(int index, bool integer) override; void SetConstraintBounds(int index, double lb, double ub) override; - void AddRowConstraint(MPConstraint *const ct) override; - void AddVariable(MPVariable *const var) override; - void SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, double new_value, + void AddRowConstraint(MPConstraint* const ct) override; + void AddVariable(MPVariable* const var) override; + void SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) override; - void ClearConstraint(MPConstraint *const constraint) override; - void SetObjectiveCoefficient(const MPVariable *const variable, + void ClearConstraint(MPConstraint* const constraint) override; + void SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) override; void SetObjectiveOffset(double value) override; void ClearObjective() override; @@ -86,13 +86,13 @@ class BopInterface : public MPSolverInterface { std::string SolverVersion() const override; bool InterruptSolve() override; - void *underlying_solver() override; + void* underlying_solver() override; void ExtractNewVariables() override; void ExtractNewConstraints() override; void ExtractObjective() override; - void SetParameters(const MPSolverParameters ¶m) override; + void SetParameters(const MPSolverParameters& param) override; void SetRelativeMipGap(double value) override; void SetPrimalTolerance(double value) override; void SetDualTolerance(double value) override; @@ -100,7 +100,7 @@ class BopInterface : public MPSolverInterface { void SetScalingMode(int value) override; void SetLpAlgorithm(int value) override; bool SetSolverSpecificParametersAsString( - const std::string ¶meters) override; + const std::string& parameters) override; private: void NonIncrementalChange(); @@ -114,7 +114,7 @@ class BopInterface : public MPSolverInterface { std::atomic interrupt_solver_; }; -BopInterface::BopInterface(MPSolver *const solver) +BopInterface::BopInterface(MPSolver* const solver) : MPSolverInterface(solver), linear_program_(), bop_solver_(), @@ -125,7 +125,7 @@ BopInterface::BopInterface(MPSolver *const solver) BopInterface::~BopInterface() {} -MPSolver::ResultStatus BopInterface::Solve(const MPSolverParameters ¶m) { +MPSolver::ResultStatus BopInterface::Solve(const MPSolverParameters& param) { // Check whenever the solve has already been stopped by the user. if (interrupt_solver_) { Reset(); @@ -156,7 +156,7 @@ MPSolver::ResultStatus BopInterface::Solve(const MPSolverParameters ¶m) { << "Filling the missing positions with zeros..."; } initial_solution.assign(glop::ColIndex(num_vars), glop::Fractional(0.0)); - for (const std::pair &p : + for (const std::pair& p : solver_->solution_hint_) { initial_solution[glop::ColIndex(p.first->index())] = glop::Fractional(p.second); @@ -188,7 +188,7 @@ MPSolver::ResultStatus BopInterface::Solve(const MPSolverParameters ¶m) { const size_t num_vars = solver_->variables_.size(); column_status_.resize(num_vars, MPSolver::FREE); for (int var_id = 0; var_id < num_vars; ++var_id) { - MPVariable *const var = solver_->variables_[var_id]; + MPVariable* const var = solver_->variables_[var_id]; const glop::ColIndex lp_solver_var_id(var->index()); const glop::Fractional solution_value = bop_solver_.variable_values()[lp_solver_var_id]; @@ -225,25 +225,25 @@ void BopInterface::SetConstraintBounds(int index, double lb, double ub) { NonIncrementalChange(); } -void BopInterface::AddRowConstraint(MPConstraint *const ct) { +void BopInterface::AddRowConstraint(MPConstraint* const ct) { NonIncrementalChange(); } -void BopInterface::AddVariable(MPVariable *const var) { +void BopInterface::AddVariable(MPVariable* const var) { NonIncrementalChange(); } -void BopInterface::SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, +void BopInterface::SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) { NonIncrementalChange(); } -void BopInterface::ClearConstraint(MPConstraint *const constraint) { +void BopInterface::ClearConstraint(MPConstraint* const constraint) { NonIncrementalChange(); } -void BopInterface::SetObjectiveCoefficient(const MPVariable *const variable, +void BopInterface::SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) { NonIncrementalChange(); } @@ -291,7 +291,7 @@ bool BopInterface::InterruptSolve() { return true; } -void *BopInterface::underlying_solver() { return &bop_solver_; } +void* BopInterface::underlying_solver() { return &bop_solver_; } // TODO(user): remove duplication with GlopInterface. void BopInterface::ExtractNewVariables() { @@ -300,7 +300,7 @@ void BopInterface::ExtractNewVariables() { const glop::ColIndex num_cols(solver_->variables_.size()); for (glop::ColIndex col(last_variable_index_); col < num_cols; ++col) { - MPVariable *const var = solver_->variables_[col.value()]; + MPVariable* const var = solver_->variables_[col.value()]; const glop::ColIndex new_col = linear_program_.CreateNewVariable(); DCHECK_EQ(new_col, col); set_variable_as_extracted(col.value(), true); @@ -318,7 +318,7 @@ void BopInterface::ExtractNewConstraints() { const glop::RowIndex num_rows(solver_->constraints_.size()); for (glop::RowIndex row(0); row < num_rows; ++row) { - MPConstraint *const ct = solver_->constraints_[row.value()]; + MPConstraint* const ct = solver_->constraints_[row.value()]; set_constraint_as_extracted(row.value(), true); const double lb = ct->lb(); @@ -327,7 +327,7 @@ void BopInterface::ExtractNewConstraints() { DCHECK_EQ(new_row, row); linear_program_.SetConstraintBounds(row, lb, ub); - for (const auto &entry : ct->coefficients_) { + for (const auto& entry : ct->coefficients_) { const int var_index = entry.first->index(); DCHECK(variable_is_extracted(var_index)); const glop::ColIndex col(var_index); @@ -340,7 +340,7 @@ void BopInterface::ExtractNewConstraints() { // TODO(user): remove duplication with GlopInterface. void BopInterface::ExtractObjective() { linear_program_.SetObjectiveOffset(solver_->Objective().offset()); - for (const auto &entry : solver_->objective_->coefficients_) { + for (const auto& entry : solver_->objective_->coefficients_) { const int var_index = entry.first->index(); const glop::ColIndex col(var_index); const double coeff = entry.second; @@ -348,7 +348,7 @@ void BopInterface::ExtractObjective() { } } -void BopInterface::SetParameters(const MPSolverParameters ¶m) { +void BopInterface::SetParameters(const MPSolverParameters& param) { parameters_.Clear(); SetCommonParameters(param); } @@ -376,7 +376,7 @@ void BopInterface::SetPresolveMode(int value) { } bool BopInterface::SetSolverSpecificParametersAsString( - const std::string ¶meters) { + const std::string& parameters) { const bool ok = google::protobuf::TextFormat::MergeFromString(parameters, ¶meters_); bop_solver_.SetParameters(parameters_); @@ -389,7 +389,7 @@ void BopInterface::NonIncrementalChange() { } // Register BOP in the global linear solver factory. -MPSolverInterface *BuildBopInterface(MPSolver *const solver) { +MPSolverInterface* BuildBopInterface(MPSolver* const solver) { return new BopInterface(solver); } diff --git a/ortools/linear_solver/cbc_interface.cc b/ortools/linear_solver/cbc_interface.cc index c0408c8aa8..b796fc01d6 100644 --- a/ortools/linear_solver/cbc_interface.cc +++ b/ortools/linear_solver/cbc_interface.cc @@ -45,7 +45,7 @@ namespace operations_research { class CBCInterface : public MPSolverInterface { public: // Constructor that takes a name for the underlying glpk solver. - explicit CBCInterface(MPSolver *const solver); + explicit CBCInterface(MPSolver* const solver); ~CBCInterface() override; // ----- Reset ----- @@ -64,7 +64,7 @@ class CBCInterface : public MPSolverInterface { // ----- Solve ----- // Solve the problem using the parameter values specified. - MPSolver::ResultStatus Solve(const MPSolverParameters ¶m) override; + MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; // TODO(user): separate the solve from the model extraction. virtual void ExtractModel() {} @@ -80,22 +80,22 @@ class CBCInterface : public MPSolverInterface { void SetConstraintBounds(int row_index, double lb, double ub) override; // Add constraint incrementally. - void AddRowConstraint(MPConstraint *const ct) override; + void AddRowConstraint(MPConstraint* const ct) override; // Add variable incrementally. - void AddVariable(MPVariable *const var) override; + void AddVariable(MPVariable* const var) override; // Change a coefficient in a constraint. - void SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, double new_value, + void SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) override { sync_status_ = MUST_RELOAD; } // Clear a constraint from all its terms. - void ClearConstraint(MPConstraint *const constraint) override { + void ClearConstraint(MPConstraint* const constraint) override { sync_status_ = MUST_RELOAD; } // Change a coefficient in the linear objective. - void SetObjectiveCoefficient(const MPVariable *const variable, + void SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) override { sync_status_ = MUST_RELOAD; } @@ -131,7 +131,7 @@ class CBCInterface : public MPSolverInterface { // TODO(user): Maybe we should expose the CbcModel build from osi_ // instead, but a new CbcModel is built every time Solve is called, // so it is not possible right now. - void *underlying_solver() override { return reinterpret_cast(&osi_); } + void* underlying_solver() override { return reinterpret_cast(&osi_); } private: // Reset best objective bound to +/- infinity depending on the @@ -139,7 +139,7 @@ class CBCInterface : public MPSolverInterface { void ResetBestObjectiveBound(); // Set all parameters in the underlying solver. - void SetParameters(const MPSolverParameters ¶m) override; + void SetParameters(const MPSolverParameters& param) override; // Set each parameter in the underlying solver. void SetRelativeMipGap(double value) override; void SetPrimalTolerance(double value) override; @@ -161,7 +161,7 @@ class CBCInterface : public MPSolverInterface { // ----- Solver ----- // Creates a LP/MIP instance with the specified name and minimization objective. -CBCInterface::CBCInterface(MPSolver *const solver) +CBCInterface::CBCInterface(MPSolver* const solver) : MPSolverInterface(solver), iterations_(0), nodes_(0), @@ -235,17 +235,17 @@ void CBCInterface::SetConstraintBounds(int index, double lb, double ub) { } } -void CBCInterface::AddRowConstraint(MPConstraint *const ct) { +void CBCInterface::AddRowConstraint(MPConstraint* const ct) { sync_status_ = MUST_RELOAD; } -void CBCInterface::AddVariable(MPVariable *const var) { +void CBCInterface::AddVariable(MPVariable* const var) { sync_status_ = MUST_RELOAD; } // Solve the LP/MIP. Returns true only if the optimal solution was revealed. // Returns the status of the search. -MPSolver::ResultStatus CBCInterface::Solve(const MPSolverParameters ¶m) { +MPSolver::ResultStatus CBCInterface::Solve(const MPSolverParameters& param) { // CBC requires unique variable and constraint names. By using Lookup*, we // generate variable and constraint indices and ensure the duplicate name // crash will happen here with a readable error message. @@ -286,7 +286,7 @@ MPSolver::ResultStatus CBCInterface::Solve(const MPSolverParameters ¶m) { solver_->Objective().offset(), "dummy", false); const int nb_vars = solver_->variables_.size(); for (int i = 0; i < nb_vars; ++i) { - MPVariable *const var = solver_->variables_[i]; + MPVariable* const var = solver_->variables_[i]; set_variable_as_extracted(i, true); const double obj_coeff = solver_->Objective().GetCoefficient(var); if (var->name().empty()) { @@ -301,7 +301,7 @@ MPSolver::ResultStatus CBCInterface::Solve(const MPSolverParameters ¶m) { // Define constraints. int max_row_length = 0; for (int i = 0; i < solver_->constraints_.size(); ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; set_constraint_as_extracted(i, true); if (ct->coefficients_.size() > max_row_length) { max_row_length = ct->coefficients_.size(); @@ -311,10 +311,10 @@ MPSolver::ResultStatus CBCInterface::Solve(const MPSolverParameters ¶m) { std::unique_ptr coefs(new double[max_row_length]); for (int i = 0; i < solver_->constraints_.size(); ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; const int size = ct->coefficients_.size(); int j = 0; - for (const auto &entry : ct->coefficients_) { + for (const auto& entry : ct->coefficients_) { const int index = MPSolverVarIndexToCbcVarIndex(entry.first->index()); indices[j] = index; coefs[j] = entry.second; @@ -444,11 +444,11 @@ MPSolver::ResultStatus CBCInterface::Solve(const MPSolverParameters ¶m) { // Get the results objective_value_ = model.getObjValue(); VLOG(1) << "objective=" << objective_value_; - const double *const values = model.bestSolution(); + const double* const values = model.bestSolution(); if (values != nullptr) { // if optimal or feasible solution is found. for (int i = 0; i < solver_->variables_.size(); ++i) { - MPVariable *const var = solver_->variables_[i]; + MPVariable* const var = solver_->variables_[i]; const int var_index = MPSolverVarIndexToCbcVarIndex(var->index()); const double val = values[var_index]; var->set_solution_value(val); @@ -496,7 +496,7 @@ double CBCInterface::best_objective_bound() const { // of the code. I will improve the parameter support only if there is // a relevant use case. -void CBCInterface::SetParameters(const MPSolverParameters ¶m) { +void CBCInterface::SetParameters(const MPSolverParameters& param) { SetCommonParameters(param); SetMIPParameters(param); } @@ -541,7 +541,7 @@ void CBCInterface::SetLpAlgorithm(int value) { SetUnsupportedIntegerParam(MPSolverParameters::LP_ALGORITHM); } -MPSolverInterface *BuildCBCInterface(MPSolver *const solver) { +MPSolverInterface* BuildCBCInterface(MPSolver* const solver) { return new CBCInterface(solver); } diff --git a/ortools/linear_solver/clp_interface.cc b/ortools/linear_solver/clp_interface.cc index 034f766508..6535bd12b6 100644 --- a/ortools/linear_solver/clp_interface.cc +++ b/ortools/linear_solver/clp_interface.cc @@ -42,7 +42,7 @@ namespace operations_research { class CLPInterface : public MPSolverInterface { public: // Constructor that takes a name for the underlying CLP solver. - explicit CLPInterface(MPSolver *const solver); + explicit CLPInterface(MPSolver* const solver); ~CLPInterface() override; // Sets the optimization direction (min/max). @@ -50,7 +50,7 @@ class CLPInterface : public MPSolverInterface { // ----- Solve ----- // Solve the problem using the parameter values specified. - MPSolver::ResultStatus Solve(const MPSolverParameters ¶m) override; + MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; // ----- Model modifications and extraction ----- // Resets extracted model @@ -62,18 +62,18 @@ class CLPInterface : public MPSolverInterface { void SetConstraintBounds(int row_index, double lb, double ub) override; // Add constraint incrementally. - void AddRowConstraint(MPConstraint *const ct) override; + void AddRowConstraint(MPConstraint* const ct) override; // Add variable incrementally. - void AddVariable(MPVariable *const var) override; + void AddVariable(MPVariable* const var) override; // Change a coefficient in a constraint. - void SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, double new_value, + void SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) override; // Clear a constraint from all its terms. - void ClearConstraint(MPConstraint *const constraint) override; + void ClearConstraint(MPConstraint* const constraint) override; // Change a coefficient in the linear objective. - void SetObjectiveCoefficient(const MPVariable *const variable, + void SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) override; // Change the constant term in the linear objective. void SetObjectiveOffset(double offset) override; @@ -105,8 +105,8 @@ class CLPInterface : public MPSolverInterface { std::string SolverVersion() const override { return "Clp " CLP_VERSION; } - void *underlying_solver() override { - return reinterpret_cast(clp_.get()); + void* underlying_solver() override { + return reinterpret_cast(clp_.get()); } private: @@ -114,7 +114,7 @@ class CLPInterface : public MPSolverInterface { void CreateDummyVariableForEmptyConstraints(); // Set all parameters in the underlying solver. - void SetParameters(const MPSolverParameters ¶m) override; + void SetParameters(const MPSolverParameters& param) override; // Reset to their default value the parameters for which CLP has a // stateful API. To be called after the solve so that the next solve // starts from a clean parameter state. @@ -138,7 +138,7 @@ class CLPInterface : public MPSolverInterface { // ----- Solver ----- // Creates a LP/MIP instance with the specified name and minimization objective. -CLPInterface::CLPInterface(MPSolver *const solver) +CLPInterface::CLPInterface(MPSolver* const solver) : MPSolverInterface(solver), clp_(new ClpSimplex), options_(new ClpSolve) { clp_->setStrParam(ClpProbName, solver_->name_); clp_->setOptimizationDirection(1); @@ -191,8 +191,8 @@ void CLPInterface::SetConstraintBounds(int index, double lb, double ub) { } } -void CLPInterface::SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, +void CLPInterface::SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) { InvalidateSolutionSynchronization(); if (constraint_is_extracted(constraint->index()) && @@ -212,11 +212,11 @@ void CLPInterface::SetCoefficient(MPConstraint *const constraint, } // Not cached -void CLPInterface::ClearConstraint(MPConstraint *const constraint) { +void CLPInterface::ClearConstraint(MPConstraint* const constraint) { InvalidateSolutionSynchronization(); // Constraint may not have been extracted yet. if (!constraint_is_extracted(constraint->index())) return; - for (const auto &entry : constraint->coefficients_) { + for (const auto& entry : constraint->coefficients_) { DCHECK(variable_is_extracted(entry.first->index())); clp_->modifyCoefficient(constraint->index(), MPSolverVarIndexToClpVarIndex(entry.first->index()), @@ -225,7 +225,7 @@ void CLPInterface::ClearConstraint(MPConstraint *const constraint) { } // Cached -void CLPInterface::SetObjectiveCoefficient(const MPVariable *const variable, +void CLPInterface::SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) { InvalidateSolutionSynchronization(); if (variable_is_extracted(variable->index())) { @@ -248,7 +248,7 @@ void CLPInterface::SetObjectiveOffset(double offset) { void CLPInterface::ClearObjective() { InvalidateSolutionSynchronization(); // Clear linear terms - for (const auto &entry : solver_->objective_->coefficients_) { + for (const auto& entry : solver_->objective_->coefficients_) { const int mpsolver_var_index = entry.first->index(); // Variable may have not been extracted yet. if (!variable_is_extracted(mpsolver_var_index)) { @@ -262,11 +262,11 @@ void CLPInterface::ClearObjective() { clp_->setObjectiveOffset(0.0); } -void CLPInterface::AddRowConstraint(MPConstraint *const ct) { +void CLPInterface::AddRowConstraint(MPConstraint* const ct) { sync_status_ = MUST_RELOAD; } -void CLPInterface::AddVariable(MPVariable *const var) { +void CLPInterface::AddVariable(MPVariable* const var) { sync_status_ = MUST_RELOAD; } @@ -290,7 +290,7 @@ void CLPInterface::ExtractNewVariables() { clp_->resize(0, total_num_vars + 1); CreateDummyVariableForEmptyConstraints(); for (int i = 0; i < total_num_vars; ++i) { - MPVariable *const var = solver_->variables_[i]; + MPVariable* const var = solver_->variables_[i]; set_variable_as_extracted(i, true); if (!var->name().empty()) { std::string name = var->name(); @@ -306,7 +306,7 @@ void CLPInterface::ExtractNewVariables() { // clp_->addColumns. But this is good enough for now. // Create new variables. for (int j = last_variable_index_; j < total_num_vars; ++j) { - MPVariable *const var = solver_->variables_[j]; + MPVariable* const var = solver_->variables_[j]; DCHECK(!variable_is_extracted(j)); set_variable_as_extracted(j, true); // The true objective coefficient will be set later in ExtractObjective. @@ -320,9 +320,9 @@ void CLPInterface::ExtractNewVariables() { } // Add new variables to existing constraints. for (int i = 0; i < last_constraint_index_; i++) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; const int ct_index = ct->index(); - for (const auto &entry : ct->coefficients_) { + for (const auto& entry : ct->coefficients_) { const int mpsolver_var_index = entry.first->index(); DCHECK(variable_is_extracted(mpsolver_var_index)); if (mpsolver_var_index >= last_variable_index_) { @@ -343,7 +343,7 @@ void CLPInterface::ExtractNewConstraints() { // Find the length of the longest row. int max_row_length = 0; for (int i = last_constraint_index_; i < total_num_rows; ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; DCHECK(!constraint_is_extracted(ct->index())); set_constraint_as_extracted(ct->index(), true); if (ct->coefficients_.size() > max_row_length) { @@ -357,7 +357,7 @@ void CLPInterface::ExtractNewConstraints() { CoinBuild build_object; // Add each new constraint. for (int i = last_constraint_index_; i < total_num_rows; ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; DCHECK(constraint_is_extracted(ct->index())); int size = ct->coefficients_.size(); if (size == 0) { @@ -367,7 +367,7 @@ void CLPInterface::ExtractNewConstraints() { size = 1; } int j = 0; - for (const auto &entry : ct->coefficients_) { + for (const auto& entry : ct->coefficients_) { const int mpsolver_var_index = entry.first->index(); DCHECK(variable_is_extracted(mpsolver_var_index)); indices[j] = MPSolverVarIndexToClpVarIndex(mpsolver_var_index); @@ -379,7 +379,7 @@ void CLPInterface::ExtractNewConstraints() { // Add and name the rows. clp_->addRows(build_object); for (int i = last_constraint_index_; i < total_num_rows; ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; if (!ct->name().empty()) { std::string name = ct->name(); clp_->setRowName(ct->index(), name); @@ -391,7 +391,7 @@ void CLPInterface::ExtractNewConstraints() { void CLPInterface::ExtractObjective() { // Linear objective: set objective coefficients for all variables // (some might have been modified) - for (const auto &entry : solver_->objective_->coefficients_) { + for (const auto& entry : solver_->objective_->coefficients_) { clp_->setObjectiveCoefficient( MPSolverVarIndexToClpVarIndex(entry.first->index()), entry.second); } @@ -402,7 +402,7 @@ void CLPInterface::ExtractObjective() { } // Extracts model and solve the LP/MIP. Returns the status of the search. -MPSolver::ResultStatus CLPInterface::Solve(const MPSolverParameters ¶m) { +MPSolver::ResultStatus CLPInterface::Solve(const MPSolverParameters& param) { try { WallTimer timer; timer.Start(); @@ -479,10 +479,10 @@ MPSolver::ResultStatus CLPInterface::Solve(const MPSolverParameters ¶m) { // Get the results objective_value_ = clp_->objectiveValue(); VLOG(1) << "objective=" << objective_value_; - const double *const values = clp_->getColSolution(); - const double *const reduced_costs = clp_->getReducedCost(); + const double* const values = clp_->getColSolution(); + const double* const reduced_costs = clp_->getReducedCost(); for (int i = 0; i < solver_->variables_.size(); ++i) { - MPVariable *const var = solver_->variables_[i]; + MPVariable* const var = solver_->variables_[i]; const int clp_var_index = MPSolverVarIndexToClpVarIndex(var->index()); const double val = values[clp_var_index]; var->set_solution_value(val); @@ -491,9 +491,9 @@ MPSolver::ResultStatus CLPInterface::Solve(const MPSolverParameters ¶m) { var->set_reduced_cost(reduced_cost); VLOG(4) << var->name() << ": reduced cost = " << reduced_cost; } - const double *const dual_values = clp_->getRowPrice(); + const double* const dual_values = clp_->getRowPrice(); for (int i = 0; i < solver_->constraints_.size(); ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; const int constraint_index = ct->index(); const double dual_value = dual_values[constraint_index]; ct->set_dual_value(dual_value); @@ -504,7 +504,7 @@ MPSolver::ResultStatus CLPInterface::Solve(const MPSolverParameters ¶m) { ResetParameters(); sync_status_ = SOLUTION_SYNCHRONIZED; return result_status_; - } catch (CoinError &e) { + } catch (CoinError& e) { LOG(WARNING) << "Caught exception in Coin LP: " << e.message(); result_status_ = MPSolver::ABNORMAL; return result_status_; @@ -567,7 +567,7 @@ MPSolver::BasisStatus CLPInterface::column_status(int variable_index) const { // ------ Parameters ------ -void CLPInterface::SetParameters(const MPSolverParameters ¶m) { +void CLPInterface::SetParameters(const MPSolverParameters& param) { SetCommonParameters(param); } @@ -630,7 +630,7 @@ void CLPInterface::SetLpAlgorithm(int value) { } } -MPSolverInterface *BuildCLPInterface(MPSolver *const solver) { +MPSolverInterface* BuildCLPInterface(MPSolver* const solver) { return new CLPInterface(solver); } diff --git a/ortools/linear_solver/glop_interface.cc b/ortools/linear_solver/glop_interface.cc index db257d0811..d9dd01edcd 100644 --- a/ortools/linear_solver/glop_interface.cc +++ b/ortools/linear_solver/glop_interface.cc @@ -33,11 +33,11 @@ namespace {} // Anonymous namespace class GLOPInterface : public MPSolverInterface { public: - explicit GLOPInterface(MPSolver *const solver); + explicit GLOPInterface(MPSolver* const solver); ~GLOPInterface() override; // ----- Solve ----- - MPSolver::ResultStatus Solve(const MPSolverParameters ¶m) override; + MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; bool InterruptSolve() override; // ----- Model modifications and extraction ----- @@ -46,13 +46,13 @@ class GLOPInterface : public MPSolverInterface { void SetVariableBounds(int index, double lb, double ub) override; void SetVariableInteger(int index, bool integer) override; void SetConstraintBounds(int index, double lb, double ub) override; - void AddRowConstraint(MPConstraint *const ct) override; - void AddVariable(MPVariable *const var) override; - void SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, double new_value, + void AddRowConstraint(MPConstraint* const ct) override; + void AddVariable(MPVariable* const var) override; + void SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) override; - void ClearConstraint(MPConstraint *const constraint) override; - void SetObjectiveCoefficient(const MPVariable *const variable, + void ClearConstraint(MPConstraint* const constraint) override; + void SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) override; void SetObjectiveOffset(double value) override; void ClearObjective() override; @@ -70,17 +70,17 @@ class GLOPInterface : public MPSolverInterface { bool IsMIP() const override; std::string SolverVersion() const override; - void *underlying_solver() override; + void* underlying_solver() override; void ExtractNewVariables() override; void ExtractNewConstraints() override; void ExtractObjective() override; void SetStartingLpBasis( - const std::vector &variable_statuses, - const std::vector &constraint_statuses) override; + const std::vector& variable_statuses, + const std::vector& constraint_statuses) override; - void SetParameters(const MPSolverParameters ¶m) override; + void SetParameters(const MPSolverParameters& param) override; void SetRelativeMipGap(double value) override; void SetPrimalTolerance(double value) override; void SetDualTolerance(double value) override; @@ -88,7 +88,7 @@ class GLOPInterface : public MPSolverInterface { void SetScalingMode(int value) override; void SetLpAlgorithm(int value) override; bool SetSolverSpecificParametersAsString( - const std::string ¶meters) override; + const std::string& parameters) override; private: void NonIncrementalChange(); @@ -101,7 +101,7 @@ class GLOPInterface : public MPSolverInterface { std::atomic interrupt_solver_; }; -GLOPInterface::GLOPInterface(MPSolver *const solver) +GLOPInterface::GLOPInterface(MPSolver* const solver) : MPSolverInterface(solver), linear_program_(), lp_solver_(), @@ -112,7 +112,7 @@ GLOPInterface::GLOPInterface(MPSolver *const solver) GLOPInterface::~GLOPInterface() {} -MPSolver::ResultStatus GLOPInterface::Solve(const MPSolverParameters ¶m) { +MPSolver::ResultStatus GLOPInterface::Solve(const MPSolverParameters& param) { // Re-extract the problem from scratch. We don't support modifying the // LinearProgram in sync with changes done in the MPSolver. ResetExtractionInformation(); @@ -148,7 +148,7 @@ MPSolver::ResultStatus GLOPInterface::Solve(const MPSolverParameters ¶m) { const size_t num_vars = solver_->variables_.size(); column_status_.resize(num_vars, MPSolver::FREE); for (int var_id = 0; var_id < num_vars; ++var_id) { - MPVariable *const var = solver_->variables_[var_id]; + MPVariable* const var = solver_->variables_[var_id]; const glop::ColIndex lp_solver_var_id(var->index()); const glop::Fractional solution_value = @@ -167,7 +167,7 @@ MPSolver::ResultStatus GLOPInterface::Solve(const MPSolverParameters ¶m) { const size_t num_constraints = solver_->constraints_.size(); row_status_.resize(num_constraints, MPSolver::FREE); for (int ct_id = 0; ct_id < num_constraints; ++ct_id) { - MPConstraint *const ct = solver_->constraints_[ct_id]; + MPConstraint* const ct = solver_->constraints_[ct_id]; const glop::RowIndex lp_solver_ct_id(ct->index()); const glop::Fractional dual_value = @@ -209,25 +209,25 @@ void GLOPInterface::SetConstraintBounds(int index, double lb, double ub) { NonIncrementalChange(); } -void GLOPInterface::AddRowConstraint(MPConstraint *const ct) { +void GLOPInterface::AddRowConstraint(MPConstraint* const ct) { NonIncrementalChange(); } -void GLOPInterface::AddVariable(MPVariable *const var) { +void GLOPInterface::AddVariable(MPVariable* const var) { NonIncrementalChange(); } -void GLOPInterface::SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, +void GLOPInterface::SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) { NonIncrementalChange(); } -void GLOPInterface::ClearConstraint(MPConstraint *const constraint) { +void GLOPInterface::ClearConstraint(MPConstraint* const constraint) { NonIncrementalChange(); } -void GLOPInterface::SetObjectiveCoefficient(const MPVariable *const variable, +void GLOPInterface::SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) { NonIncrementalChange(); } @@ -269,7 +269,7 @@ std::string GLOPInterface::SolverVersion() const { return "Glop-0.0"; } -void *GLOPInterface::underlying_solver() { return &lp_solver_; } +void* GLOPInterface::underlying_solver() { return &lp_solver_; } void GLOPInterface::ExtractNewVariables() { DCHECK_EQ(0, last_variable_index_); @@ -277,7 +277,7 @@ void GLOPInterface::ExtractNewVariables() { const glop::ColIndex num_cols(solver_->variables_.size()); for (glop::ColIndex col(last_variable_index_); col < num_cols; ++col) { - MPVariable *const var = solver_->variables_[col.value()]; + MPVariable* const var = solver_->variables_[col.value()]; const glop::ColIndex new_col = linear_program_.CreateNewVariable(); DCHECK_EQ(new_col, col); set_variable_as_extracted(col.value(), true); @@ -290,7 +290,7 @@ void GLOPInterface::ExtractNewConstraints() { const glop::RowIndex num_rows(solver_->constraints_.size()); for (glop::RowIndex row(0); row < num_rows; ++row) { - MPConstraint *const ct = solver_->constraints_[row.value()]; + MPConstraint* const ct = solver_->constraints_[row.value()]; set_constraint_as_extracted(row.value(), true); const double lb = ct->lb(); @@ -299,7 +299,7 @@ void GLOPInterface::ExtractNewConstraints() { DCHECK_EQ(new_row, row); linear_program_.SetConstraintBounds(row, lb, ub); - for (const auto &entry : ct->coefficients_) { + for (const auto& entry : ct->coefficients_) { const int var_index = entry.first->index(); DCHECK(variable_is_extracted(var_index)); const glop::ColIndex col(var_index); @@ -311,7 +311,7 @@ void GLOPInterface::ExtractNewConstraints() { void GLOPInterface::ExtractObjective() { linear_program_.SetObjectiveOffset(solver_->Objective().offset()); - for (const auto &entry : solver_->objective_->coefficients_) { + for (const auto& entry : solver_->objective_->coefficients_) { const int var_index = entry.first->index(); const glop::ColIndex col(var_index); const double coeff = entry.second; @@ -320,20 +320,20 @@ void GLOPInterface::ExtractObjective() { } void GLOPInterface::SetStartingLpBasis( - const std::vector &variable_statuses, - const std::vector &constraint_statuses) { + const std::vector& variable_statuses, + const std::vector& constraint_statuses) { glop::VariableStatusRow glop_variable_statuses; glop::ConstraintStatusColumn glop_constraint_statuses; - for (const MPSolver::BasisStatus &status : variable_statuses) { + for (const MPSolver::BasisStatus& status : variable_statuses) { glop_variable_statuses.push_back(MPSolverToGlopVariableStatus(status)); } - for (const MPSolver::BasisStatus &status : constraint_statuses) { + for (const MPSolver::BasisStatus& status : constraint_statuses) { glop_constraint_statuses.push_back(MPSolverToGlopConstraintStatus(status)); } lp_solver_.SetInitialBasis(glop_variable_statuses, glop_constraint_statuses); } -void GLOPInterface::SetParameters(const MPSolverParameters ¶m) { +void GLOPInterface::SetParameters(const MPSolverParameters& param) { parameters_.Clear(); SetCommonParameters(param); SetScalingMode(param.GetIntegerParam(MPSolverParameters::SCALING)); @@ -412,7 +412,7 @@ void GLOPInterface::SetLpAlgorithm(int value) { } bool GLOPInterface::SetSolverSpecificParametersAsString( - const std::string ¶meters) { + const std::string& parameters) { // NOTE(user): Android build uses protocol buffers in lite mode, and // parsing data from text format is not supported there. To allow solver // specific parameters from string on Android, we first need to switch to @@ -430,7 +430,7 @@ void GLOPInterface::NonIncrementalChange() { } // Register GLOP in the global linear solver factory. -MPSolverInterface *BuildGLOPInterface(MPSolver *const solver) { +MPSolverInterface* BuildGLOPInterface(MPSolver* const solver) { return new GLOPInterface(solver); } diff --git a/ortools/linear_solver/glpk_interface.cc b/ortools/linear_solver/glpk_interface.cc index 91471eab1d..9aa4ebfd04 100644 --- a/ortools/linear_solver/glpk_interface.cc +++ b/ortools/linear_solver/glpk_interface.cc @@ -59,10 +59,10 @@ class GLPKInformation { }; // Function to be called in the GLPK callback -void GLPKGatherInformationCallback(glp_tree *tree, void *info) { +void GLPKGatherInformationCallback(glp_tree* tree, void* info) { CHECK(tree != nullptr); CHECK(info != nullptr); - GLPKInformation *glpk_info = reinterpret_cast(info); + GLPKInformation* glpk_info = reinterpret_cast(info); switch (glp_ios_reason(tree)) { // The best bound and the number of nodes change only when GLPK // branches, generates cuts or finds an integer solution. @@ -93,7 +93,7 @@ int MPSolverIndexToGlpkIndex(int index) { return index + 1; } class GLPKInterface : public MPSolverInterface { public: // Constructor that takes a name for the underlying glpk solver. - GLPKInterface(MPSolver *const solver, bool mip); + GLPKInterface(MPSolver* const solver, bool mip); ~GLPKInterface() override; // Sets the optimization direction (min/max). @@ -101,7 +101,7 @@ class GLPKInterface : public MPSolverInterface { // ----- Solve ----- // Solve the problem using the parameter values specified. - MPSolver::ResultStatus Solve(const MPSolverParameters ¶m) override; + MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; // ----- Model modifications and extraction ----- // Resets extracted model @@ -114,17 +114,17 @@ class GLPKInterface : public MPSolverInterface { double ub) override; // Add Constraint incrementally. - void AddRowConstraint(MPConstraint *const ct) override; + void AddRowConstraint(MPConstraint* const ct) override; // Add variable incrementally. - void AddVariable(MPVariable *const var) override; + void AddVariable(MPVariable* const var) override; // Change a coefficient in a constraint. - void SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, double new_value, + void SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) override; // Clear a constraint from all its terms. - void ClearConstraint(MPConstraint *const constraint) override; + void ClearConstraint(MPConstraint* const constraint) override; // Change a coefficient in the linear objective - void SetObjectiveCoefficient(const MPVariable *const variable, + void SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) override; // Change the constant term in the linear objective. void SetObjectiveOffset(double value) override; @@ -163,16 +163,16 @@ class GLPKInterface : public MPSolverInterface { return absl::StrFormat("GLPK %s", glp_version()); } - void *underlying_solver() override { return reinterpret_cast(lp_); } + void* underlying_solver() override { return reinterpret_cast(lp_); } double ComputeExactConditionNumber() const override; private: // Configure the solver's parameters. - void ConfigureGLPKParameters(const MPSolverParameters ¶m); + void ConfigureGLPKParameters(const MPSolverParameters& param); // Set all parameters in the underlying solver. - void SetParameters(const MPSolverParameters ¶m) override; + void SetParameters(const MPSolverParameters& param) override; // Set each parameter in the underlying solver. void SetRelativeMipGap(double value) override; void SetPrimalTolerance(double value) override; @@ -182,8 +182,8 @@ class GLPKInterface : public MPSolverInterface { void SetLpAlgorithm(int value) override; void ExtractOldConstraints(); - void ExtractOneConstraint(MPConstraint *const constraint, int *const indices, - double *const coefs); + void ExtractOneConstraint(MPConstraint* const constraint, int* const indices, + double* const coefs); // Transforms basis status from GLPK integer code to MPSolver::BasisStatus. MPSolver::BasisStatus TransformGLPKBasisStatus(int glpk_basis_status) const; @@ -191,17 +191,17 @@ class GLPKInterface : public MPSolverInterface { // The L1-norm |A| is defined as max_j sum_i |a_ij| // This method is available only for continuous problems. double ComputeScaledBasisL1Norm(int num_rows, int num_cols, - double *row_scaling_factor, - double *column_scaling_factor) const; + double* row_scaling_factor, + double* column_scaling_factor) const; // Computes the L1-norm of the inverse of the current scaled // basis. // This method is available only for continuous problems. double ComputeInverseScaledBasisL1Norm(int num_rows, int num_cols, - double *row_scaling_factor, - double *column_scaling_factor) const; + double* row_scaling_factor, + double* column_scaling_factor) const; - glp_prob *lp_; + glp_prob* lp_; bool mip_; // Parameters @@ -212,7 +212,7 @@ class GLPKInterface : public MPSolverInterface { }; // Creates a LP/MIP instance with the specified name and minimization objective. -GLPKInterface::GLPKInterface(MPSolver *const solver, bool mip) +GLPKInterface::GLPKInterface(MPSolver* const solver, bool mip) : MPSolverInterface(solver), lp_(nullptr), mip_(mip) { lp_ = glp_create_prob(); glp_set_prob_name(lp_, solver_->name_.c_str()); @@ -314,8 +314,8 @@ void GLPKInterface::SetConstraintBounds(int mpsolver_constraint_index, } } -void GLPKInterface::SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, +void GLPKInterface::SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) { InvalidateSolutionSynchronization(); // GLPK does not allow to modify one coefficient at a time, so we @@ -333,7 +333,7 @@ void GLPKInterface::SetCoefficient(MPConstraint *const constraint, } // Not cached -void GLPKInterface::ClearConstraint(MPConstraint *const constraint) { +void GLPKInterface::ClearConstraint(MPConstraint* const constraint) { InvalidateSolutionSynchronization(); // Constraint may have not been extracted yet. if (constraint_is_extracted(constraint->index())) { @@ -343,7 +343,7 @@ void GLPKInterface::ClearConstraint(MPConstraint *const constraint) { } // Cached -void GLPKInterface::SetObjectiveCoefficient(const MPVariable *const variable, +void GLPKInterface::SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) { sync_status_ = MUST_RELOAD; } @@ -356,7 +356,7 @@ void GLPKInterface::SetObjectiveOffset(double value) { // Clear objective of all its terms (linear) void GLPKInterface::ClearObjective() { InvalidateSolutionSynchronization(); - for (const auto &entry : solver_->objective_->coefficients_) { + for (const auto& entry : solver_->objective_->coefficients_) { const int mpsolver_var_index = entry.first->index(); // Variable may have not been extracted yet. if (!variable_is_extracted(mpsolver_var_index)) { @@ -369,11 +369,11 @@ void GLPKInterface::ClearObjective() { glp_set_obj_coef(lp_, 0, 0.0); } -void GLPKInterface::AddRowConstraint(MPConstraint *const ct) { +void GLPKInterface::AddRowConstraint(MPConstraint* const ct) { sync_status_ = MUST_RELOAD; } -void GLPKInterface::AddVariable(MPVariable *const var) { +void GLPKInterface::AddVariable(MPVariable* const var) { sync_status_ = MUST_RELOAD; } @@ -383,7 +383,7 @@ void GLPKInterface::ExtractNewVariables() { if (total_num_vars > last_variable_index_) { glp_add_cols(lp_, total_num_vars - last_variable_index_); for (int j = last_variable_index_; j < solver_->variables_.size(); ++j) { - MPVariable *const var = solver_->variables_[j]; + MPVariable* const var = solver_->variables_[j]; set_variable_as_extracted(j, true); if (!var->name().empty()) { glp_set_col_name(lp_, MPSolverIndexToGlpkIndex(j), var->name().c_str()); @@ -410,7 +410,7 @@ void GLPKInterface::ExtractOldConstraints() { std::unique_ptr coefs(new double[max_constraint_size + 1]); for (int i = 0; i < last_constraint_index_; ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; DCHECK(constraint_is_extracted(i)); const int size = ct->coefficients_.size(); if (size == 0) { @@ -426,12 +426,12 @@ void GLPKInterface::ExtractOldConstraints() { // Extract one constraint. Arrays indices and coefs must be // preallocated to have enough space to contain the constraint's // coefficients. -void GLPKInterface::ExtractOneConstraint(MPConstraint *const constraint, - int *const indices, - double *const coefs) { +void GLPKInterface::ExtractOneConstraint(MPConstraint* const constraint, + int* const indices, + double* const coefs) { // GLPK convention is to start indexing at 1. int k = 1; - for (const auto &entry : constraint->coefficients_) { + for (const auto& entry : constraint->coefficients_) { DCHECK(variable_is_extracted(entry.first->index())); indices[k] = MPSolverIndexToGlpkIndex(entry.first->index()); coefs[k] = entry.second; @@ -449,7 +449,7 @@ void GLPKInterface::ExtractNewConstraints() { glp_add_rows(lp_, total_num_rows - last_constraint_index_); int num_coefs = 0; for (int i = last_constraint_index_; i < total_num_rows; ++i) { - MPConstraint *ct = solver_->constraints_[i]; + MPConstraint* ct = solver_->constraints_[i]; set_constraint_as_extracted(i, true); if (ct->name().empty()) { glp_set_row_name(lp_, MPSolverIndexToGlpkIndex(i), @@ -475,8 +475,8 @@ void GLPKInterface::ExtractNewConstraints() { std::unique_ptr coefs(new double[num_coefs + 1]); int k = 1; for (int i = 0; i < solver_->constraints_.size(); ++i) { - MPConstraint *ct = solver_->constraints_[i]; - for (const auto &entry : ct->coefficients_) { + MPConstraint* ct = solver_->constraints_[i]; + for (const auto& entry : ct->coefficients_) { DCHECK(variable_is_extracted(entry.first->index())); constraint_indices[k] = MPSolverIndexToGlpkIndex(ct->index()); variable_indices[k] = MPSolverIndexToGlpkIndex(entry.first->index()); @@ -506,7 +506,7 @@ void GLPKInterface::ExtractNewConstraints() { void GLPKInterface::ExtractObjective() { // Linear objective: set objective coefficients for all variables // (some might have been modified). - for (const auto &entry : solver_->objective_->coefficients_) { + for (const auto& entry : solver_->objective_->coefficients_) { glp_set_obj_coef(lp_, MPSolverIndexToGlpkIndex(entry.first->index()), entry.second); } @@ -515,7 +515,7 @@ void GLPKInterface::ExtractObjective() { } // Solve the problem using the parameter values specified. -MPSolver::ResultStatus GLPKInterface::Solve(const MPSolverParameters ¶m) { +MPSolver::ResultStatus GLPKInterface::Solve(const MPSolverParameters& param) { WallTimer timer; timer.Start(); @@ -572,7 +572,7 @@ MPSolver::ResultStatus GLPKInterface::Solve(const MPSolverParameters ¶m) { } VLOG(1) << "objective=" << objective_value_; for (int i = 0; i < solver_->variables_.size(); ++i) { - MPVariable *const var = solver_->variables_[i]; + MPVariable* const var = solver_->variables_[i]; double val; if (mip_) { val = glp_mip_col_val(lp_, MPSolverIndexToGlpkIndex(i)); @@ -589,7 +589,7 @@ MPSolver::ResultStatus GLPKInterface::Solve(const MPSolverParameters ¶m) { } } for (int i = 0; i < solver_->constraints_.size(); ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; if (!mip_) { const double dual_value = glp_get_row_dual(lp_, MPSolverIndexToGlpkIndex(i)); @@ -779,8 +779,8 @@ double GLPKInterface::ComputeExactConditionNumber() const { } double GLPKInterface::ComputeScaledBasisL1Norm( - int num_rows, int num_cols, double *row_scaling_factor, - double *column_scaling_factor) const { + int num_rows, int num_cols, double* row_scaling_factor, + double* column_scaling_factor) const { double norm = 0.0; std::unique_ptr values(new double[num_rows + 1]); std::unique_ptr indices(new int[num_rows + 1]); @@ -816,8 +816,8 @@ double GLPKInterface::ComputeScaledBasisL1Norm( } double GLPKInterface::ComputeInverseScaledBasisL1Norm( - int num_rows, int num_cols, double *row_scaling_factor, - double *column_scaling_factor) const { + int num_rows, int num_cols, double* row_scaling_factor, + double* column_scaling_factor) const { // Compute the LU factorization if it doesn't exist yet. if (!glp_bf_exists(lp_)) { const int factorize_status = glp_factorize(lp_); @@ -891,7 +891,7 @@ double GLPKInterface::ComputeInverseScaledBasisL1Norm( // ------ Parameters ------ -void GLPKInterface::ConfigureGLPKParameters(const MPSolverParameters ¶m) { +void GLPKInterface::ConfigureGLPKParameters(const MPSolverParameters& param) { if (mip_) { glp_init_iocp(&mip_param_); // Time limit @@ -925,7 +925,7 @@ void GLPKInterface::ConfigureGLPKParameters(const MPSolverParameters ¶m) { SetParameters(param); } -void GLPKInterface::SetParameters(const MPSolverParameters ¶m) { +void GLPKInterface::SetParameters(const MPSolverParameters& param) { SetCommonParameters(param); if (mip_) { SetMIPParameters(param); @@ -988,7 +988,7 @@ void GLPKInterface::SetLpAlgorithm(int value) { } } -MPSolverInterface *BuildGLPKInterface(bool mip, MPSolver *const solver) { +MPSolverInterface* BuildGLPKInterface(bool mip, MPSolver* const solver) { return new GLPKInterface(solver, mip); } diff --git a/ortools/linear_solver/gurobi_environment.cc b/ortools/linear_solver/gurobi_environment.cc index 872e65805f..4f1bac8c63 100644 --- a/ortools/linear_solver/gurobi_environment.cc +++ b/ortools/linear_solver/gurobi_environment.cc @@ -23,7 +23,7 @@ #include "ortools/linear_solver/linear_solver.h" namespace operations_research { -absl::Status LoadGurobiEnvironment(GRBenv **env) { +absl::Status LoadGurobiEnvironment(GRBenv** env) { constexpr int GRB_OK = 0; const char kGurobiEnvErrorMsg[] = "Could not load Gurobi environment. Is gurobi correctly installed and " diff --git a/ortools/linear_solver/gurobi_environment.h b/ortools/linear_solver/gurobi_environment.h index 848b8c7007..162ca8fefd 100644 --- a/ortools/linear_solver/gurobi_environment.h +++ b/ortools/linear_solver/gurobi_environment.h @@ -31,7 +31,7 @@ typedef struct _GRBenv GRBenv; } namespace operations_research { -absl::Status LoadGurobiEnvironment(GRBenv **env); +absl::Status LoadGurobiEnvironment(GRBenv** env); #define CB_ARGS GRBmodel *model, void *cbdata, int where, void *usrdata extern std::function DirectlySolveProto( - const MPModelRequest &request) override; + const MPModelRequest& request) override; // Writes the model. - void Write(const std::string &filename) override; + void Write(const std::string& filename) override; // ----- Model modifications and extraction ----- // Resets extracted model @@ -97,18 +97,18 @@ class GurobiInterface : public MPSolverInterface { void SetConstraintBounds(int row_index, double lb, double ub) override; // Adds Constraint incrementally. - void AddRowConstraint(MPConstraint *const ct) override; - bool AddIndicatorConstraint(MPConstraint *const ct) override; + void AddRowConstraint(MPConstraint* const ct) override; + bool AddIndicatorConstraint(MPConstraint* const ct) override; // Adds variable incrementally. - void AddVariable(MPVariable *const var) override; + void AddVariable(MPVariable* const var) override; // Changes a coefficient in a constraint. - void SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, double new_value, + void SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) override; // Clears a constraint from all its terms. - void ClearConstraint(MPConstraint *const constraint) override; + void ClearConstraint(MPConstraint* const constraint) override; // Changes a coefficient in the linear objective - void SetObjectiveCoefficient(const MPVariable *const variable, + void SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) override; // Changes the constant term in the linear objective. void SetObjectiveOffset(double value) override; @@ -152,9 +152,7 @@ class GurobiInterface : public MPSolverInterface { return true; } - void *underlying_solver() override { - return reinterpret_cast(model_); - } + void* underlying_solver() override { return reinterpret_cast(model_); } double ComputeExactConditionNumber() const override { if (!IsContinuous()) { @@ -182,12 +180,12 @@ class GurobiInterface : public MPSolverInterface { // Iterates through the solutions in Gurobi's solution pool. bool NextSolution() override; - void SetCallback(MPCallback *mp_callback) override; + void SetCallback(MPCallback* mp_callback) override; bool SupportsCallbacks() const override { return true; } private: // Sets all parameters in the underlying solver. - void SetParameters(const MPSolverParameters ¶m) override; + void SetParameters(const MPSolverParameters& param) override; // Sets solver-specific parameters (avoiding using files). The previous // implementations supported multi-line strings of the form: // parameter_i value_i\n @@ -200,7 +198,7 @@ class GurobiInterface : public MPSolverInterface { // extra benefit of unifying the way we handle specific parameters for both // proto-based solves and for MPModel solves. bool SetSolverSpecificParametersAsString( - const std::string ¶meters) override; + const std::string& parameters) override; // Sets each parameter in the underlying solver. void SetRelativeMipGap(double value) override; void SetPrimalTolerance(double value) override; @@ -209,7 +207,7 @@ class GurobiInterface : public MPSolverInterface { void SetScalingMode(int value) override; void SetLpAlgorithm(int value) override; - bool ReadParameterFile(const std::string &filename) override; + bool ReadParameterFile(const std::string& filename) override; std::string ValidFileExtensionForParameterFile() const override; MPSolver::BasisStatus TransformGRBVarBasisStatus( @@ -220,27 +218,27 @@ class GurobiInterface : public MPSolverInterface { // See the implementation note at the top of file on incrementalism. bool ModelIsNonincremental() const; - void SetIntAttr(const char *name, int value); - int GetIntAttr(const char *name) const; - void SetDoubleAttr(const char *name, double value); - double GetDoubleAttr(const char *name) const; - void SetIntAttrElement(const char *name, int index, int value); - int GetIntAttrElement(const char *name, int index) const; - void SetDoubleAttrElement(const char *name, int index, double value); - double GetDoubleAttrElement(const char *name, int index) const; - std::vector GetDoubleAttrArray(const char *name, int elements); - void SetCharAttrElement(const char *name, int index, char value); - char GetCharAttrElement(const char *name, int index) const; + void SetIntAttr(const char* name, int value); + int GetIntAttr(const char* name) const; + void SetDoubleAttr(const char* name, double value); + double GetDoubleAttr(const char* name) const; + void SetIntAttrElement(const char* name, int index, int value); + int GetIntAttrElement(const char* name, int index) const; + void SetDoubleAttrElement(const char* name, int index, double value); + double GetDoubleAttrElement(const char* name, int index) const; + std::vector GetDoubleAttrArray(const char* name, int elements); + void SetCharAttrElement(const char* name, int index, char value); + char GetCharAttrElement(const char* name, int index) const; void CheckedGurobiCall(int err) const; int SolutionCount() const; - GRBmodel *model_; - GRBenv *env_; + GRBmodel* model_; + GRBenv* env_; bool mip_; int current_solution_index_; - MPCallback *callback_ = nullptr; + MPCallback* callback_ = nullptr; bool update_branching_priorities_ = false; // Has length equal to the number of MPVariables in // MPSolverInterface::solver_. Values are the index of the corresponding @@ -264,39 +262,39 @@ class GurobiInterface : public MPSolverInterface { namespace { -void CheckedGurobiCall(int err, GRBenv *const env) { +void CheckedGurobiCall(int err, GRBenv* const env) { CHECK_EQ(0, err) << "Fatal error with code " << err << ", due to " << GRBgeterrormsg(env); } // For interacting directly with the Gurobi C API for callbacks. struct GurobiInternalCallbackContext { - GRBmodel *model; - void *gurobi_internal_callback_data; + GRBmodel* model; + void* gurobi_internal_callback_data; int where; }; class GurobiMPCallbackContext : public MPCallbackContext { public: - GurobiMPCallbackContext(GRBenv *env, - const std::vector *mp_var_to_gurobi_var, + GurobiMPCallbackContext(GRBenv* env, + const std::vector* mp_var_to_gurobi_var, int num_gurobi_vars, bool might_add_cuts, bool might_add_lazy_constraints); // Implementation of the interface. MPCallbackEvent Event() override; bool CanQueryVariableValues() override; - double VariableValue(const MPVariable *variable) override; - void AddCut(const LinearRange &cutting_plane) override; - void AddLazyConstraint(const LinearRange &lazy_constraint) override; + double VariableValue(const MPVariable* variable) override; + void AddCut(const LinearRange& cutting_plane) override; + void AddLazyConstraint(const LinearRange& lazy_constraint) override; double SuggestSolution( - const absl::flat_hash_map &solution) override; + const absl::flat_hash_map& solution) override; int64 NumExploredNodes() override; // Call this method to update the internal state of the callback context // before passing it to MPCallback::RunCallback(). void UpdateFromGurobiState( - const GurobiInternalCallbackContext &gurobi_internal_context); + const GurobiInternalCallbackContext& gurobi_internal_context); private: // Wraps GRBcbget(), used to query the state of the solver. See @@ -304,16 +302,16 @@ class GurobiMPCallbackContext : public MPCallbackContext { // for callback_code values. template T GurobiCallbackGet( - const GurobiInternalCallbackContext &gurobi_internal_context, + const GurobiInternalCallbackContext& gurobi_internal_context, int callback_code); void CheckedGurobiCall(int gurobi_error_code) const; template - void AddGeneratedConstraint(const LinearRange &linear_range, + void AddGeneratedConstraint(const LinearRange& linear_range, GRBConstraintFunction grb_constraint_function); - GRBenv *const env_; - const std::vector *const mp_var_to_gurobi_var_; + GRBenv* const env_; + const std::vector* const mp_var_to_gurobi_var_; const int num_gurobi_vars_; const bool might_add_cuts_; @@ -330,7 +328,7 @@ void GurobiMPCallbackContext::CheckedGurobiCall(int gurobi_error_code) const { } GurobiMPCallbackContext::GurobiMPCallbackContext( - GRBenv *env, const std::vector *mp_var_to_gurobi_var, + GRBenv* env, const std::vector* mp_var_to_gurobi_var, int num_gurobi_vars, bool might_add_cuts, bool might_add_lazy_constraints) : env_(ABSL_DIE_IF_NULL(env)), mp_var_to_gurobi_var_(ABSL_DIE_IF_NULL(mp_var_to_gurobi_var)), @@ -339,7 +337,7 @@ GurobiMPCallbackContext::GurobiMPCallbackContext( might_add_lazy_constraints_(might_add_lazy_constraints) {} void GurobiMPCallbackContext::UpdateFromGurobiState( - const GurobiInternalCallbackContext &gurobi_internal_context) { + const GurobiInternalCallbackContext& gurobi_internal_context) { current_gurobi_internal_callback_context_ = gurobi_internal_context; variable_values_extracted_ = false; } @@ -361,13 +359,13 @@ int64 GurobiMPCallbackContext::NumExploredNodes() { template T GurobiMPCallbackContext::GurobiCallbackGet( - const GurobiInternalCallbackContext &gurobi_internal_context, + const GurobiInternalCallbackContext& gurobi_internal_context, const int callback_code) { T result = 0; CheckedGurobiCall( GRBcbget(gurobi_internal_context.gurobi_internal_callback_data, gurobi_internal_context.where, callback_code, - static_cast(&result))); + static_cast(&result))); return result; } @@ -389,9 +387,9 @@ MPCallbackEvent GurobiMPCallbackContext::Event() { return MPCallbackEvent::kMessage; case GRB_CB_BARRIER: return MPCallbackEvent::kBarrier; - // TODO(b/112427356): in Gurobi 8.0, there is a new callback location. - // case GRB_CB_MULTIOBJ: - // return MPCallbackEvent::kMultiObj; + // TODO(b/112427356): in Gurobi 8.0, there is a new callback location. + // case GRB_CB_MULTIOBJ: + // return MPCallbackEvent::kMultiObj; default: LOG_FIRST_N(ERROR, 1) << "Gurobi callback at unknown where=" << current_gurobi_internal_callback_context_.where; @@ -412,7 +410,7 @@ bool GurobiMPCallbackContext::CanQueryVariableValues() { return false; } -double GurobiMPCallbackContext::VariableValue(const MPVariable *variable) { +double GurobiMPCallbackContext::VariableValue(const MPVariable* variable) { CHECK(variable != nullptr); if (!variable_values_extracted_) { const MPCallbackEvent where = Event(); @@ -430,7 +428,7 @@ double GurobiMPCallbackContext::VariableValue(const MPVariable *variable) { CheckedGurobiCall(GRBcbget( current_gurobi_internal_callback_context_.gurobi_internal_callback_data, current_gurobi_internal_callback_context_.where, gurobi_get_var_param, - static_cast(gurobi_variable_values_.data()))); + static_cast(gurobi_variable_values_.data()))); variable_values_extracted_ = true; } return gurobi_variable_values_[mp_var_to_gurobi_var_->at(variable->index())]; @@ -438,14 +436,14 @@ double GurobiMPCallbackContext::VariableValue(const MPVariable *variable) { template void GurobiMPCallbackContext::AddGeneratedConstraint( - const LinearRange &linear_range, + const LinearRange& linear_range, GRBConstraintFunction grb_constraint_function) { std::vector variable_indices; std::vector variable_coefficients; const int num_terms = linear_range.linear_expr().terms().size(); variable_indices.reserve(num_terms); variable_coefficients.reserve(num_terms); - for (const auto &var_coef_pair : linear_range.linear_expr().terms()) { + for (const auto& var_coef_pair : linear_range.linear_expr().terms()) { variable_indices.push_back( mp_var_to_gurobi_var_->at(var_coef_pair.first->index())); variable_coefficients.push_back(var_coef_pair.second); @@ -466,7 +464,7 @@ void GurobiMPCallbackContext::AddGeneratedConstraint( } } -void GurobiMPCallbackContext::AddCut(const LinearRange &cutting_plane) { +void GurobiMPCallbackContext::AddCut(const LinearRange& cutting_plane) { CHECK(might_add_cuts_); const MPCallbackEvent where = Event(); CHECK(where == MPCallbackEvent::kMipNode) @@ -476,7 +474,7 @@ void GurobiMPCallbackContext::AddCut(const LinearRange &cutting_plane) { } void GurobiMPCallbackContext::AddLazyConstraint( - const LinearRange &lazy_constraint) { + const LinearRange& lazy_constraint) { CHECK(might_add_lazy_constraints_); const MPCallbackEvent where = Event(); CHECK(where == MPCallbackEvent::kMipNode || @@ -488,7 +486,7 @@ void GurobiMPCallbackContext::AddLazyConstraint( } double GurobiMPCallbackContext::SuggestSolution( - const absl::flat_hash_map &solution) { + const absl::flat_hash_map& solution) { const MPCallbackEvent where = Event(); CHECK(where == MPCallbackEvent::kMipNode) << "Feasible solutions can only be added at MIP_NODE, tried to add " @@ -496,8 +494,8 @@ double GurobiMPCallbackContext::SuggestSolution( << ToString(where); std::vector full_solution(num_gurobi_vars_, GRB_UNDEFINED); - for (const auto &variable_value : solution) { - const MPVariable *var = variable_value.first; + for (const auto& variable_value : solution) { + const MPVariable* var = variable_value.first; full_solution[mp_var_to_gurobi_var_->at(var->index())] = variable_value.second; } @@ -511,16 +509,16 @@ double GurobiMPCallbackContext::SuggestSolution( } struct MPCallbackWithGurobiContext { - GurobiMPCallbackContext *context; - MPCallback *callback; + GurobiMPCallbackContext* context; + MPCallback* callback; }; // NOTE(user): This function must have this exact API, because we are passing // it to Gurobi as a callback. -int STDCALL CallbackImpl(GRBmodel *model, void *gurobi_internal_callback_data, - int where, void *raw_model_and_callback) { - MPCallbackWithGurobiContext *const callback_with_context = - static_cast(raw_model_and_callback); +int __stdcall CallbackImpl(GRBmodel* model, void* gurobi_internal_callback_data, + int where, void* raw_model_and_callback) { + MPCallbackWithGurobiContext* const callback_with_context = + static_cast(raw_model_and_callback); CHECK(callback_with_context != nullptr); CHECK(callback_with_context->context != nullptr); CHECK(callback_with_context->callback != nullptr); @@ -538,49 +536,49 @@ void GurobiInterface::CheckedGurobiCall(int err) const { ::operations_research::CheckedGurobiCall(err, env_); } -void GurobiInterface::SetIntAttr(const char *name, int value) { +void GurobiInterface::SetIntAttr(const char* name, int value) { CheckedGurobiCall(GRBsetintattr(model_, name, value)); } -int GurobiInterface::GetIntAttr(const char *name) const { +int GurobiInterface::GetIntAttr(const char* name) const { int value; CheckedGurobiCall(GRBgetintattr(model_, name, &value)); return value; } -void GurobiInterface::SetDoubleAttr(const char *name, double value) { +void GurobiInterface::SetDoubleAttr(const char* name, double value) { CheckedGurobiCall(GRBsetdblattr(model_, name, value)); } -double GurobiInterface::GetDoubleAttr(const char *name) const { +double GurobiInterface::GetDoubleAttr(const char* name) const { double value; CheckedGurobiCall(GRBgetdblattr(model_, name, &value)); return value; } -void GurobiInterface::SetIntAttrElement(const char *name, int index, +void GurobiInterface::SetIntAttrElement(const char* name, int index, int value) { CheckedGurobiCall(GRBsetintattrelement(model_, name, index, value)); } -int GurobiInterface::GetIntAttrElement(const char *name, int index) const { +int GurobiInterface::GetIntAttrElement(const char* name, int index) const { int value; CheckedGurobiCall(GRBgetintattrelement(model_, name, index, &value)); return value; } -void GurobiInterface::SetDoubleAttrElement(const char *name, int index, +void GurobiInterface::SetDoubleAttrElement(const char* name, int index, double value) { CheckedGurobiCall(GRBsetdblattrelement(model_, name, index, value)); } -double GurobiInterface::GetDoubleAttrElement(const char *name, +double GurobiInterface::GetDoubleAttrElement(const char* name, int index) const { double value; CheckedGurobiCall(GRBgetdblattrelement(model_, name, index, &value)); return value; } -std::vector GurobiInterface::GetDoubleAttrArray(const char *name, +std::vector GurobiInterface::GetDoubleAttrArray(const char* name, int elements) { std::vector results(elements); CheckedGurobiCall( @@ -588,18 +586,18 @@ std::vector GurobiInterface::GetDoubleAttrArray(const char *name, return results; } -void GurobiInterface::SetCharAttrElement(const char *name, int index, +void GurobiInterface::SetCharAttrElement(const char* name, int index, char value) { CheckedGurobiCall(GRBsetcharattrelement(model_, name, index, value)); } -char GurobiInterface::GetCharAttrElement(const char *name, int index) const { +char GurobiInterface::GetCharAttrElement(const char* name, int index) const { char value; CheckedGurobiCall(GRBgetcharattrelement(model_, name, index, &value)); return value; } // Creates a LP/MIP instance with the specified name and minimization objective. -GurobiInterface::GurobiInterface(MPSolver *const solver, bool mip) +GurobiInterface::GurobiInterface(MPSolver* const solver, bool mip) : MPSolverInterface(solver), model_(nullptr), env_(nullptr), @@ -690,22 +688,22 @@ void GurobiInterface::SetConstraintBounds(int index, double lb, double ub) { // constraints. } -void GurobiInterface::AddRowConstraint(MPConstraint *const ct) { +void GurobiInterface::AddRowConstraint(MPConstraint* const ct) { sync_status_ = MUST_RELOAD; } -bool GurobiInterface::AddIndicatorConstraint(MPConstraint *const ct) { +bool GurobiInterface::AddIndicatorConstraint(MPConstraint* const ct) { had_nonincremental_change_ = true; sync_status_ = MUST_RELOAD; return !IsContinuous(); } -void GurobiInterface::AddVariable(MPVariable *const ct) { +void GurobiInterface::AddVariable(MPVariable* const ct) { sync_status_ = MUST_RELOAD; } -void GurobiInterface::SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, +void GurobiInterface::SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) { InvalidateSolutionSynchronization(); if (!had_nonincremental_change_ && variable_is_extracted(variable->index()) && @@ -726,7 +724,7 @@ void GurobiInterface::SetCoefficient(MPConstraint *const constraint, } } -void GurobiInterface::ClearConstraint(MPConstraint *const constraint) { +void GurobiInterface::ClearConstraint(MPConstraint* const constraint) { had_nonincremental_change_ = true; sync_status_ = MUST_RELOAD; // TODO(user): this is difficult to make incremental, like @@ -734,7 +732,7 @@ void GurobiInterface::ClearConstraint(MPConstraint *const constraint) { // range constraints introduce. } -void GurobiInterface::SetObjectiveCoefficient(const MPVariable *const variable, +void GurobiInterface::SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) { InvalidateSolutionSynchronization(); if (!had_nonincremental_change_ && variable_is_extracted(variable->index())) { @@ -759,7 +757,7 @@ void GurobiInterface::ClearObjective() { InvalidateSolutionSynchronization(); if (!had_nonincremental_change_) { SetObjectiveOffset(0.0); - for (const auto &entry : solver_->objective_->coefficients_) { + for (const auto& entry : solver_->objective_->coefficients_) { SetObjectiveCoefficient(entry.first, 0.0); } } else { @@ -922,7 +920,7 @@ void GurobiInterface::ExtractNewVariables() { if (total_num_vars > last_variable_index_) { // Define new variables. for (int j = last_variable_index_; j < total_num_vars; ++j) { - const MPVariable *const var = solver_->variables_.at(j); + const MPVariable* const var = solver_->variables_.at(j); set_variable_as_extracted(var->index(), true); CheckedGurobiCall(GRBaddvar( model_, 0, // numnz @@ -943,11 +941,11 @@ void GurobiInterface::ExtractNewVariables() { // there is an indicator constraint), we should never enter this loop, as // last_variable_index_ will be reset to zero before ExtractNewVariables() // is called. - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; const int grb_ct_idx = mp_cons_to_gurobi_linear_cons_.at(ct->index()); DCHECK_GE(grb_ct_idx, 0); DCHECK(ct->indicator_variable() == nullptr); - for (const auto &entry : ct->coefficients_) { + for (const auto& entry : ct->coefficients_) { const int var_index = entry.first->index(); DCHECK(variable_is_extracted(var_index)); @@ -973,21 +971,21 @@ void GurobiInterface::ExtractNewConstraints() { if (last_constraint_index_ < total_num_rows) { // Add each new constraint. for (int row = last_constraint_index_; row < total_num_rows; ++row) { - MPConstraint *const ct = solver_->constraints_[row]; + MPConstraint* const ct = solver_->constraints_[row]; set_constraint_as_extracted(row, true); const int size = ct->coefficients_.size(); std::vector grb_vars; std::vector coefs; grb_vars.reserve(size); coefs.reserve(size); - for (const auto &entry : ct->coefficients_) { + for (const auto& entry : ct->coefficients_) { const int var_index = entry.first->index(); CHECK(variable_is_extracted(var_index)); grb_vars.push_back(mp_var_to_gurobi_var_.at(var_index)); coefs.push_back(entry.second); } - char *const name = - ct->name().empty() ? nullptr : const_cast(ct->name().c_str()); + char* const name = + ct->name().empty() ? nullptr : const_cast(ct->name().c_str()); if (ct->indicator_variable() != nullptr) { const int grb_ind_var = mp_var_to_gurobi_var_.at(ct->indicator_variable()->index()); @@ -1042,7 +1040,7 @@ void GurobiInterface::ExtractObjective() { // ------ Parameters ----- -void GurobiInterface::SetParameters(const MPSolverParameters ¶m) { +void GurobiInterface::SetParameters(const MPSolverParameters& param) { SetCommonParameters(param); if (mip_) { SetMIPParameters(param); @@ -1050,7 +1048,7 @@ void GurobiInterface::SetParameters(const MPSolverParameters ¶m) { } bool GurobiInterface::SetSolverSpecificParametersAsString( - const std::string ¶meters) { + const std::string& parameters) { return SetSolverSpecificParameters(parameters, GRBgetenv(model_)).ok(); } @@ -1150,7 +1148,7 @@ int GurobiInterface::SolutionCount() const { } bool GurobiInterface::ModelIsNonincremental() const { - for (const MPConstraint *c : solver_->constraints()) { + for (const MPConstraint* c : solver_->constraints()) { if (c->indicator_variable() != nullptr) { return true; } @@ -1158,7 +1156,7 @@ bool GurobiInterface::ModelIsNonincremental() const { return false; } -MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters ¶m) { +MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters& param) { WallTimer timer; timer.Start(); @@ -1179,7 +1177,7 @@ MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters ¶m) { absl::FormatDuration(timer.GetDuration())); // Set solution hints if any. - for (const std::pair &p : + for (const std::pair& p : solver_->solution_hint_) { SetDoubleAttrElement(GRB_DBL_ATTR_START, mp_var_to_gurobi_var_.at(p.first->index()), p.second); @@ -1187,7 +1185,7 @@ MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters ¶m) { // Pass branching priority annotations if at least one has been updated. if (update_branching_priorities_) { - for (const MPVariable *var : solver_->variables_) { + for (const MPVariable* var : solver_->variables_) { SetIntAttrElement(GRB_INT_ATTR_BRANCHPRIORITY, mp_var_to_gurobi_var_.at(var->index()), var->branching_priority()); @@ -1224,7 +1222,7 @@ MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters ¶m) { mp_callback_with_context.context = gurobi_context.get(); mp_callback_with_context.callback = callback_; CheckedGurobiCall(GRBsetcallbackfunc( - model_, CallbackImpl, static_cast(&mp_callback_with_context))); + model_, CallbackImpl, static_cast(&mp_callback_with_context))); gurobi_precrush = callback_->might_add_cuts(); gurobi_lazy_constraint = callback_->might_add_lazy_constraints(); } @@ -1285,7 +1283,7 @@ MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters ¶m) { const std::vector grb_variable_values = GetDoubleAttrArray(GRB_DBL_ATTR_X, num_gurobi_vars_); for (int i = 0; i < solver_->variables_.size(); ++i) { - MPVariable *const var = solver_->variables_[i]; + MPVariable* const var = solver_->variables_[i]; const double val = grb_variable_values.at(mp_var_to_gurobi_var_.at(i)); var->set_solution_value(val); VLOG(3) << var->name() << ", value = " << val; @@ -1296,7 +1294,7 @@ MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters ¶m) { const std::vector grb_reduced_costs = GetDoubleAttrArray(GRB_DBL_ATTR_RC, num_gurobi_vars_); for (int i = 0; i < solver_->variables_.size(); ++i) { - MPVariable *const var = solver_->variables_[i]; + MPVariable* const var = solver_->variables_[i]; const double rc = grb_reduced_costs.at(mp_var_to_gurobi_var_.at(i)); var->set_reduced_cost(rc); VLOG(4) << var->name() << ", reduced cost = " << rc; @@ -1307,7 +1305,7 @@ MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters ¶m) { std::vector grb_dual_values = GetDoubleAttrArray(GRB_DBL_ATTR_PI, num_gurobi_linear_cons_); for (int i = 0; i < solver_->constraints_.size(); ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; const double dual_value = grb_dual_values.at(mp_cons_to_gurobi_linear_cons_.at(i)); ct->set_dual_value(dual_value); @@ -1323,7 +1321,7 @@ MPSolver::ResultStatus GurobiInterface::Solve(const MPSolverParameters ¶m) { } absl::optional GurobiInterface::DirectlySolveProto( - const MPModelRequest &request) { + const MPModelRequest& request) { // Here we reuse the Gurobi environment to support single-use license that // forbids creating a second environment if one already exists. const auto status_or = GurobiSolveProto(request, env_); @@ -1363,7 +1361,7 @@ bool GurobiInterface::NextSolution() { GetDoubleAttrArray(GRB_DBL_ATTR_XN, num_gurobi_vars_); for (int i = 0; i < solver_->variables_.size(); ++i) { - MPVariable *const var = solver_->variables_[i]; + MPVariable* const var = solver_->variables_[i]; var->set_solution_value( grb_variable_values.at(mp_var_to_gurobi_var_.at(i))); } @@ -1372,7 +1370,7 @@ bool GurobiInterface::NextSolution() { return true; } -void GurobiInterface::Write(const std::string &filename) { +void GurobiInterface::Write(const std::string& filename) { if (sync_status_ == MUST_RELOAD) { Reset(); } @@ -1386,7 +1384,7 @@ void GurobiInterface::Write(const std::string &filename) { } } -bool GurobiInterface::ReadParameterFile(const std::string &filename) { +bool GurobiInterface::ReadParameterFile(const std::string& filename) { // A non-zero return value indicates that a problem occurred. return GRBreadparams(GRBgetenv(model_), filename.c_str()) == 0; } @@ -1395,11 +1393,11 @@ std::string GurobiInterface::ValidFileExtensionForParameterFile() const { return ".prm"; } -MPSolverInterface *BuildGurobiInterface(bool mip, MPSolver *const solver) { +MPSolverInterface* BuildGurobiInterface(bool mip, MPSolver* const solver) { return new GurobiInterface(solver, mip); } -void GurobiInterface::SetCallback(MPCallback *mp_callback) { +void GurobiInterface::SetCallback(MPCallback* mp_callback) { callback_ = mp_callback; } diff --git a/ortools/linear_solver/gurobi_proto_solver.cc b/ortools/linear_solver/gurobi_proto_solver.cc index 84e3f6d608..8323709f8b 100644 --- a/ortools/linear_solver/gurobi_proto_solver.cc +++ b/ortools/linear_solver/gurobi_proto_solver.cc @@ -23,6 +23,7 @@ #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" +#include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/types/optional.h" #include "ortools/base/cleanup.h" @@ -38,25 +39,25 @@ namespace { constexpr int GRB_OK = 0; inline absl::Status GurobiCodeToUtilStatus(int error_code, - const char *source_file, + const char* source_file, int source_line, - const char *statement, - GRBenv *const env) { + const char* statement, + GRBenv* const env) { if (error_code == GRB_OK) return absl::OkStatus(); return absl::InvalidArgumentError(absl::StrFormat( "Gurobi error code %d (file '%s', line %d) on '%s': %s", error_code, source_file, source_line, statement, GRBgeterrormsg(env))); } -int AddIndicatorConstraint(const MPGeneralConstraintProto &gen_cst, - GRBmodel *gurobi_model, - std::vector *tmp_variables, - std::vector *tmp_coefficients) { +int AddIndicatorConstraint(const MPGeneralConstraintProto& gen_cst, + GRBmodel* gurobi_model, + std::vector* tmp_variables, + std::vector* tmp_coefficients) { CHECK(gurobi_model != nullptr); CHECK(tmp_variables != nullptr); CHECK(tmp_coefficients != nullptr); - const auto &ind_cst = gen_cst.indicator_constraint(); + const auto& ind_cst = gen_cst.indicator_constraint(); MPConstraintProto cst = ind_cst.constraint(); if (cst.lower_bound() > -std::numeric_limits::infinity()) { int status = GRBaddgenconstrIndicator( @@ -81,9 +82,9 @@ int AddIndicatorConstraint(const MPGeneralConstraintProto &gen_cst, return GRB_OK; } -int AddSosConstraint(const MPSosConstraint &sos_cst, GRBmodel *gurobi_model, - std::vector *tmp_variables, - std::vector *tmp_weights) { +int AddSosConstraint(const MPSosConstraint& sos_cst, GRBmodel* gurobi_model, + std::vector* tmp_variables, + std::vector* tmp_weights) { CHECK(gurobi_model != nullptr); CHECK(tmp_variables != nullptr); CHECK(tmp_weights != nullptr); @@ -107,31 +108,35 @@ int AddSosConstraint(const MPSosConstraint &sos_cst, GRBmodel *gurobi_model, ? GRB_SOS_TYPE1 : GRB_SOS_TYPE2}; std::vector begins = {0}; - return GRBaddsos( - gurobi_model, /*numsos=*/1, /*nummembers=*/sos_cst.var_index_size(), - /*types=*/types.data(), /*beg=*/begins.data(), - /*ind=*/tmp_variables->data(), /*weight*/ tmp_weights->data()); + return GRBaddsos(gurobi_model, /*numsos=*/1, + /*nummembers=*/sos_cst.var_index_size(), + /*types=*/types.data(), + /*beg=*/begins.data(), /*ind=*/tmp_variables->data(), + /*weight*/ tmp_weights->data()); } -int AddQuadraticConstraint(const MPGeneralConstraintProto &gen_cst, - GRBmodel *gurobi_model) { +int AddQuadraticConstraint(const MPGeneralConstraintProto& gen_cst, + GRBmodel* gurobi_model) { CHECK(gurobi_model != nullptr); constexpr double kInfinity = std::numeric_limits::infinity(); CHECK(gen_cst.has_quadratic_constraint()); - const MPQuadraticConstraint &quad_cst = gen_cst.quadratic_constraint(); + const MPQuadraticConstraint& quad_cst = gen_cst.quadratic_constraint(); - auto addqconstr = [](GRBmodel *gurobi_model, MPQuadraticConstraint quad_cst, - char sense, double rhs, const std::string &name) { + auto addqconstr = [](GRBmodel* gurobi_model, MPQuadraticConstraint quad_cst, + char sense, double rhs, const std::string& name) { return GRBaddqconstr( - gurobi_model, /*numlnz=*/quad_cst.var_index_size(), + gurobi_model, + /*numlnz=*/quad_cst.var_index_size(), /*lind=*/quad_cst.mutable_var_index()->mutable_data(), /*lval=*/quad_cst.mutable_coefficient()->mutable_data(), /*numqnz=*/quad_cst.qvar1_index_size(), /*qrow=*/quad_cst.mutable_qvar1_index()->mutable_data(), /*qcol=*/quad_cst.mutable_qvar2_index()->mutable_data(), /*qval=*/quad_cst.mutable_qcoefficient()->mutable_data(), - /*sense=*/sense, /*rhs=*/rhs, /*QCname=*/name.c_str()); + /*sense=*/sense, + /*rhs=*/rhs, + /*QCname=*/name.c_str()); }; if (quad_cst.has_lower_bound() && quad_cst.lower_bound() > -kInfinity) { @@ -152,39 +157,42 @@ int AddQuadraticConstraint(const MPGeneralConstraintProto &gen_cst, return GRB_OK; } -int AddAndConstraint(const MPGeneralConstraintProto &gen_cst, - GRBmodel *gurobi_model, std::vector *tmp_variables) { +int AddAndConstraint(const MPGeneralConstraintProto& gen_cst, + GRBmodel* gurobi_model, std::vector* tmp_variables) { CHECK(gurobi_model != nullptr); CHECK(tmp_variables != nullptr); auto and_cst = gen_cst.and_constraint(); return GRBaddgenconstrAnd( - gurobi_model, /*name=*/gen_cst.name().c_str(), + gurobi_model, + /*name=*/gen_cst.name().c_str(), /*resvar=*/and_cst.resultant_var_index(), /*nvars=*/and_cst.var_index_size(), /*vars=*/and_cst.mutable_var_index()->mutable_data()); } -int AddOrConstraint(const MPGeneralConstraintProto &gen_cst, - GRBmodel *gurobi_model, std::vector *tmp_variables) { +int AddOrConstraint(const MPGeneralConstraintProto& gen_cst, + GRBmodel* gurobi_model, std::vector* tmp_variables) { CHECK(gurobi_model != nullptr); CHECK(tmp_variables != nullptr); auto or_cst = gen_cst.or_constraint(); - return GRBaddgenconstrOr(gurobi_model, /*name=*/gen_cst.name().c_str(), + return GRBaddgenconstrOr(gurobi_model, + /*name=*/gen_cst.name().c_str(), /*resvar=*/or_cst.resultant_var_index(), /*nvars=*/or_cst.var_index_size(), /*vars=*/or_cst.mutable_var_index()->mutable_data()); } -int AddMinConstraint(const MPGeneralConstraintProto &gen_cst, - GRBmodel *gurobi_model, std::vector *tmp_variables) { +int AddMinConstraint(const MPGeneralConstraintProto& gen_cst, + GRBmodel* gurobi_model, std::vector* tmp_variables) { CHECK(gurobi_model != nullptr); CHECK(tmp_variables != nullptr); auto min_cst = gen_cst.min_constraint(); return GRBaddgenconstrMin( - gurobi_model, /*name=*/gen_cst.name().c_str(), + gurobi_model, + /*name=*/gen_cst.name().c_str(), /*resvar=*/min_cst.resultant_var_index(), /*nvars=*/min_cst.var_index_size(), /*vars=*/min_cst.mutable_var_index()->mutable_data(), @@ -193,14 +201,15 @@ int AddMinConstraint(const MPGeneralConstraintProto &gen_cst, : std::numeric_limits::infinity()); } -int AddMaxConstraint(const MPGeneralConstraintProto &gen_cst, - GRBmodel *gurobi_model, std::vector *tmp_variables) { +int AddMaxConstraint(const MPGeneralConstraintProto& gen_cst, + GRBmodel* gurobi_model, std::vector* tmp_variables) { CHECK(gurobi_model != nullptr); CHECK(tmp_variables != nullptr); auto max_cst = gen_cst.max_constraint(); return GRBaddgenconstrMax( - gurobi_model, /*name=*/gen_cst.name().c_str(), + gurobi_model, + /*name=*/gen_cst.name().c_str(), /*resvar=*/max_cst.resultant_var_index(), /*nvars=*/max_cst.var_index_size(), /*vars=*/max_cst.mutable_var_index()->mutable_data(), @@ -210,63 +219,53 @@ int AddMaxConstraint(const MPGeneralConstraintProto &gen_cst, } } // namespace -absl::Status SetSolverSpecificParameters(const std::string ¶meters, - GRBenv *gurobi) { - std::string error_message(""); - for (const auto parameter : absl::StrSplit(parameters, absl::ByAnyChar("\n,"), - absl::SkipWhitespace())) { - // If the line is a comment, we skip it. - if (parameter.empty() || parameter[0] == '#') { - continue; - } - // This double creation of sub-strings is wasteful, but probably does not - // matter much in this context. Still better than going through a file. - std::vector key_value = absl::StrSplit( - parameter, absl::ByAnyChar("= "), absl::SkipWhitespace()); - // If one parameter fails, we keep processing the list of parameters. - if (key_value.size() != 2) { - const std::string current_message = - absl::StrCat("Cannot parse parameter '", parameter, - "'. Expected format is 'ParameterName value'"); - LOG(WARNING) << current_message; - if (error_message.empty()) { - error_message = current_message; - } else { - absl::StrAppend(&error_message, "\n", current_message); +absl::Status SetSolverSpecificParameters(const std::string& parameters, + GRBenv* gurobi) { + if (parameters.empty()) return absl::OkStatus(); + std::vector error_messages; + for (absl::string_view line : absl::StrSplit(parameters, '\n')) { + // Comment tokens end at the next new-line, or the end of the string. + // The first character must be '#' + if (line[0] == '#') continue; + for (absl::string_view token : + absl::StrSplit(line, ',', absl::SkipWhitespace())) { + if (token.empty()) continue; + std::vector key_value = + absl::StrSplit(token, absl::ByAnyChar(" ="), absl::SkipWhitespace()); + // If one parameter fails, we keep processing the list of parameters. + if (key_value.size() != 2) { + const std::string current_message = + absl::StrCat("Cannot parse parameter '", token, + "'. Expected format is 'ParameterName value' or " + "'ParameterName=value'"); + error_messages.push_back(current_message); + continue; } - continue; - } - // Again, if setting one parameter fails, we notify and keep moving down - // the list. - const int gurobi_code = - GRBsetparam(gurobi, key_value[0].c_str(), key_value[1].c_str()); - if (gurobi_code != 0) { - const std::string current_message = absl::StrCat( - "Error setting parameter '", key_value[0], "' to value '", - key_value[1], "': ", GRBgeterrormsg(gurobi)); - LOG(WARNING) << current_message; - if (error_message.empty()) { - error_message = current_message; - } else { - absl::StrAppend(&error_message, "\n", current_message); + const int gurobi_code = + GRBsetparam(gurobi, key_value[0].c_str(), key_value[1].c_str()); + if (gurobi_code != GRB_OK) { + const std::string current_message = absl::StrCat( + "Error setting parameter '", key_value[0], "' to value '", + key_value[1], "': ", GRBgeterrormsg(gurobi)); + error_messages.push_back(current_message); + continue; } - continue; + VLOG(2) << absl::StrCat("Set parameter '", key_value[0], "' to value '", + key_value[1]); } - VLOG(2) << absl::StrCat("Set parameter '", key_value[0], "' to value '", - key_value[1]); } - if (error_message.empty()) return absl::OkStatus(); - return absl::InvalidArgumentError(error_message); + if (error_messages.empty()) return absl::OkStatus(); + return absl::InvalidArgumentError(absl::StrJoin(error_messages, "\n")); } absl::StatusOr GurobiSolveProto( - const MPModelRequest &request, GRBenv *gurobi_env) { + const MPModelRequest& request, GRBenv* gurobi_env) { MPSolutionResponse response; - const absl::optional > optional_model = + const absl::optional> optional_model = ExtractValidMPModelOrPopulateResponseStatus(request, &response); if (!optional_model) return response; - const MPModelProto &model = optional_model->get(); + const MPModelProto& model = optional_model->get(); // We set `gurobi_env` to point to a new environment if no existing one is // provided. We must make sure that we free this environment when we exit this @@ -285,7 +284,7 @@ absl::StatusOr GurobiSolveProto( RETURN_IF_ERROR(LoadGurobiEnvironment(&gurobi_env)); } - GRBmodel *gurobi_model = nullptr; + GRBmodel* gurobi_model = nullptr; auto gurobi_model_deleter = absl::MakeCleanup([&]() { const int error_code = GRBfreemodel(gurobi_model); LOG_IF(DFATAL, error_code != GRB_OK) @@ -298,17 +297,22 @@ absl::StatusOr GurobiSolveProto( RETURN_IF_ERROR( \ GurobiCodeToUtilStatus(x, __FILE__, __LINE__, #x, gurobi_env)); - RETURN_IF_GUROBI_ERROR( - GRBnewmodel(gurobi_env, &gurobi_model, model.name().c_str(), - /*numvars=*/0, /*obj=*/nullptr, /*lb=*/nullptr, - /*ub=*/nullptr, /*vtype=*/nullptr, /*varnames=*/nullptr)); + RETURN_IF_GUROBI_ERROR(GRBnewmodel(gurobi_env, &gurobi_model, + model.name().c_str(), + /*numvars=*/0, + /*obj=*/nullptr, + /*lb=*/nullptr, + /*ub=*/nullptr, + /*vtype=*/nullptr, + /*varnames=*/nullptr)); if (request.has_solver_specific_parameters()) { const auto parameters_status = SetSolverSpecificParameters( request.solver_specific_parameters(), GRBgetenv(gurobi_model)); if (!parameters_status.ok()) { response.set_status(MPSOLVER_MODEL_INVALID_SOLVER_PARAMETERS); - response.set_status_str(std::string(parameters_status.message())); + response.set_status_str( + std::string(parameters_status.message())); // NOLINT return response; } } @@ -328,9 +332,9 @@ absl::StatusOr GurobiSolveProto( std::vector lb(variable_size); std::vector ub(variable_size); std::vector ctype(variable_size); - std::vector varnames(variable_size); + std::vector varnames(variable_size); for (int v = 0; v < variable_size; ++v) { - const MPVariableProto &variable = model.variable(v); + const MPVariableProto& variable = model.variable(v); obj_coeffs[v] = variable.objective_coefficient(); lb[v] = variable.lower_bound(); ub[v] = variable.upper_bound(); @@ -341,9 +345,9 @@ absl::StatusOr GurobiSolveProto( RETURN_IF_GUROBI_ERROR( GRBaddvars(gurobi_model, variable_size, 0, nullptr, nullptr, nullptr, - /*obj=*/obj_coeffs.data(), /*lb=*/lb.data(), - /*ub=*/ub.data(), /*vtype=*/ctype.data(), - /*varnames=*/const_cast(varnames.data()))); + /*obj=*/obj_coeffs.data(), + /*lb=*/lb.data(), /*ub=*/ub.data(), /*vtype=*/ctype.data(), + /*varnames=*/const_cast(varnames.data()))); // Set solution hints if any. for (int i = 0; i < model.solution_hint().var_index_size(); ++i) { @@ -357,7 +361,7 @@ absl::StatusOr GurobiSolveProto( std::vector ct_variables; std::vector ct_coefficients; for (int c = 0; c < model.constraint_size(); ++c) { - const MPConstraintProto &constraint = model.constraint(c); + const MPConstraintProto& constraint = model.constraint(c); const int size = constraint.var_index_size(); ct_variables.resize(size, 0); ct_coefficients.resize(size, 0); @@ -370,22 +374,22 @@ absl::StatusOr GurobiSolveProto( if (constraint.lower_bound() == constraint.upper_bound()) { RETURN_IF_GUROBI_ERROR(GRBaddconstr( gurobi_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - /*cval=*/ct_coefficients.data(), /*sense=*/GRB_EQUAL, - /*rhs=*/constraint.lower_bound(), + /*cval=*/ct_coefficients.data(), + /*sense=*/GRB_EQUAL, /*rhs=*/constraint.lower_bound(), /*constrname=*/constraint.name().c_str())); } else if (constraint.lower_bound() == -std::numeric_limits::infinity()) { RETURN_IF_GUROBI_ERROR(GRBaddconstr( gurobi_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - /*cval=*/ct_coefficients.data(), /*sense=*/GRB_LESS_EQUAL, - /*rhs=*/constraint.upper_bound(), + /*cval=*/ct_coefficients.data(), + /*sense=*/GRB_LESS_EQUAL, /*rhs=*/constraint.upper_bound(), /*constrname=*/constraint.name().c_str())); } else if (constraint.upper_bound() == std::numeric_limits::infinity()) { RETURN_IF_GUROBI_ERROR(GRBaddconstr( gurobi_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - /*cval=*/ct_coefficients.data(), /*sense=*/GRB_GREATER_EQUAL, - /*rhs=*/constraint.lower_bound(), + /*cval=*/ct_coefficients.data(), + /*sense=*/GRB_GREATER_EQUAL, /*rhs=*/constraint.lower_bound(), /*constrname=*/constraint.name().c_str())); } else { RETURN_IF_GUROBI_ERROR(GRBaddrangeconstr( @@ -397,7 +401,7 @@ absl::StatusOr GurobiSolveProto( } } - for (const auto &gen_cst : model.general_constraint()) { + for (const auto& gen_cst : model.general_constraint()) { switch (gen_cst.general_constraint_case()) { case MPGeneralConstraintProto::kIndicatorConstraint: { RETURN_IF_GUROBI_ERROR(AddIndicatorConstraint( @@ -416,7 +420,8 @@ absl::StatusOr GurobiSolveProto( } case MPGeneralConstraintProto::kAbsConstraint: { RETURN_IF_GUROBI_ERROR(GRBaddgenconstrAbs( - gurobi_model, /*name=*/gen_cst.name().c_str(), + gurobi_model, + /*name=*/gen_cst.name().c_str(), /*resvar=*/gen_cst.abs_constraint().resultant_var_index(), /*argvar=*/gen_cst.abs_constraint().var_index())); break; diff --git a/ortools/linear_solver/gurobi_proto_solver.h b/ortools/linear_solver/gurobi_proto_solver.h index 845de30c20..957787043a 100644 --- a/ortools/linear_solver/gurobi_proto_solver.h +++ b/ortools/linear_solver/gurobi_proto_solver.h @@ -30,19 +30,20 @@ namespace operations_research { // Please note though that the provided environment should not be actively used // by another thread at the same time. absl::StatusOr GurobiSolveProto( - const MPModelRequest &request, GRBenv *gurobi_env = nullptr); + const MPModelRequest& request, GRBenv* gurobi_env = nullptr); // Set parameters specified in the string. The format of the string is a series // of tokens separated by either '\n' or by ',' characters. // Any token whose first character is a '#' or has zero length is skiped. -// Any other token has to has the form: +// Comment tokens (i.e. those starting with #) can contain ',' characters. +// Any other token has the form: // parameter_name(separator)value // where (separator) is either '=' or ' '. // A valid string can look-like: -// "#\n# Gurobi-specific parameters\n\nThreads=1\nPresolve 2,SolutionLimit=100" -// This function will process each and every token, even if an intermediate -// token is unrecognized. -absl::Status SetSolverSpecificParameters(const std::string ¶meters, - GRBenv *gurobi); +// "#\n# Gurobi-specific parameters, still part of the +// comment\n\nThreads=1\nPresolve 2,SolutionLimit=100" This function will +// process each and every token, even if an intermediate token is unrecognized. +absl::Status SetSolverSpecificParameters(const std::string& parameters, + GRBenv* gurobi); } // namespace operations_research #endif // OR_TOOLS_LINEAR_SOLVER_GUROBI_PROTO_SOLVER_H_ diff --git a/ortools/linear_solver/linear_expr.cc b/ortools/linear_solver/linear_expr.cc index 30b90b059d..bfbcc96c77 100644 --- a/ortools/linear_solver/linear_expr.cc +++ b/ortools/linear_solver/linear_expr.cc @@ -25,32 +25,32 @@ LinearExpr::LinearExpr(double constant) : offset_(constant), terms_() {} LinearExpr::LinearExpr() : LinearExpr(0.0) {} -LinearExpr::LinearExpr(const MPVariable *var) : LinearExpr(0.0) { +LinearExpr::LinearExpr(const MPVariable* var) : LinearExpr(0.0) { terms_[var] = 1.0; } -LinearExpr &LinearExpr::operator+=(const LinearExpr &rhs) { - for (const auto &kv : rhs.terms_) { +LinearExpr& LinearExpr::operator+=(const LinearExpr& rhs) { + for (const auto& kv : rhs.terms_) { terms_[kv.first] += kv.second; } offset_ += rhs.offset_; return *this; } -LinearExpr &LinearExpr::operator-=(const LinearExpr &rhs) { - for (const auto &kv : rhs.terms_) { +LinearExpr& LinearExpr::operator-=(const LinearExpr& rhs) { + for (const auto& kv : rhs.terms_) { terms_[kv.first] -= kv.second; } offset_ -= rhs.offset_; return *this; } -LinearExpr &LinearExpr::operator*=(double rhs) { +LinearExpr& LinearExpr::operator*=(double rhs) { if (rhs == 0) { terms_.clear(); offset_ = 0; } else if (rhs != 1) { - for (auto &kv : terms_) { + for (auto& kv : terms_) { kv.second *= rhs; } offset_ *= rhs; @@ -58,7 +58,7 @@ LinearExpr &LinearExpr::operator*=(double rhs) { return *this; } -LinearExpr &LinearExpr::operator/=(double rhs) { +LinearExpr& LinearExpr::operator/=(double rhs) { DCHECK_NE(rhs, 0); return (*this) *= 1 / rhs; } @@ -74,7 +74,7 @@ LinearExpr LinearExpr::NotVar(LinearExpr var) { double LinearExpr::SolutionValue() const { double solution = offset_; - for (const auto &pair : terms_) { + for (const auto& pair : terms_) { solution += pair.first->solution_value() * pair.second; } return solution; @@ -82,8 +82,8 @@ double LinearExpr::SolutionValue() const { namespace { -void AppendTerm(const double coef, const std::string &var_name, - const bool is_first, std::string *s) { +void AppendTerm(const double coef, const std::string& var_name, + const bool is_first, std::string* s) { if (is_first) { if (coef == 1.0) { absl::StrAppend(s, var_name); @@ -103,7 +103,7 @@ void AppendTerm(const double coef, const std::string &var_name, } } -void AppendOffset(const double offset, const bool is_first, std::string *s) { +void AppendOffset(const double offset, const bool is_first, std::string* s) { if (is_first) { absl::StrAppend(s, offset); } else { @@ -117,17 +117,17 @@ void AppendOffset(const double offset, const bool is_first, std::string *s) { } // namespace std::string LinearExpr::ToString() const { - std::vector vars_in_order; - for (const auto &var_val_pair : terms_) { + std::vector vars_in_order; + for (const auto& var_val_pair : terms_) { vars_in_order.push_back(var_val_pair.first); } std::sort(vars_in_order.begin(), vars_in_order.end(), - [](const MPVariable *v, const MPVariable *u) { + [](const MPVariable* v, const MPVariable* u) { return v->index() < u->index(); }); std::string result; bool is_first = true; - for (const MPVariable *var : vars_in_order) { + for (const MPVariable* var : vars_in_order) { // MPSolver gives names to all variables, even if you don't. DCHECK(!var->name().empty()); AppendTerm(terms_.at(var), var->name(), is_first, &result); @@ -138,16 +138,16 @@ std::string LinearExpr::ToString() const { return result; } -std::ostream &operator<<(std::ostream &stream, const LinearExpr &linear_expr) { +std::ostream& operator<<(std::ostream& stream, const LinearExpr& linear_expr) { stream << linear_expr.ToString(); return stream; } -LinearExpr operator+(LinearExpr lhs, const LinearExpr &rhs) { +LinearExpr operator+(LinearExpr lhs, const LinearExpr& rhs) { lhs += rhs; return lhs; } -LinearExpr operator-(LinearExpr lhs, const LinearExpr &rhs) { +LinearExpr operator-(LinearExpr lhs, const LinearExpr& rhs) { lhs -= rhs; return lhs; } @@ -164,7 +164,7 @@ LinearExpr operator*(double lhs, LinearExpr rhs) { return rhs; } -LinearRange::LinearRange(double lower_bound, const LinearExpr &linear_expr, +LinearRange::LinearRange(double lower_bound, const LinearExpr& linear_expr, double upper_bound) : lower_bound_(lower_bound), linear_expr_(linear_expr), @@ -174,13 +174,13 @@ LinearRange::LinearRange(double lower_bound, const LinearExpr &linear_expr, linear_expr_ -= linear_expr_.offset(); } -LinearRange operator<=(const LinearExpr &lhs, const LinearExpr &rhs) { +LinearRange operator<=(const LinearExpr& lhs, const LinearExpr& rhs) { return LinearRange(-std::numeric_limits::infinity(), lhs - rhs, 0); } -LinearRange operator==(const LinearExpr &lhs, const LinearExpr &rhs) { +LinearRange operator==(const LinearExpr& lhs, const LinearExpr& rhs) { return LinearRange(0, lhs - rhs, 0); } -LinearRange operator>=(const LinearExpr &lhs, const LinearExpr &rhs) { +LinearRange operator>=(const LinearExpr& lhs, const LinearExpr& rhs) { return LinearRange(0, lhs - rhs, std::numeric_limits::infinity()); } diff --git a/ortools/linear_solver/linear_solver.cc b/ortools/linear_solver/linear_solver.cc index 5c90b42276..9260084e25 100644 --- a/ortools/linear_solver/linear_solver.cc +++ b/ortools/linear_solver/linear_solver.cc @@ -87,13 +87,13 @@ bool SolverTypeIsMip(MPModelRequest::SolverType solver_type) { return false; } -double MPConstraint::GetCoefficient(const MPVariable *const var) const { +double MPConstraint::GetCoefficient(const MPVariable* const var) const { DLOG_IF(DFATAL, !interface_->solver_->OwnsVariable(var)) << var; if (var == nullptr) return 0.0; return gtl::FindWithDefault(coefficients_, var, 0.0); } -void MPConstraint::SetCoefficient(const MPVariable *const var, double coeff) { +void MPConstraint::SetCoefficient(const MPVariable* const var, double coeff) { DLOG_IF(DFATAL, !interface_->solver_->OwnsVariable(var)) << var; if (var == nullptr) return; if (coeff == 0.0) { @@ -156,7 +156,7 @@ MPSolver::BasisStatus MPConstraint::basis_status() const { bool MPConstraint::ContainsNewVariables() { const int last_variable_index = interface_->last_variable_index(); - for (const auto &entry : coefficients_) { + for (const auto& entry : coefficients_) { const int variable_index = entry.first->index(); if (variable_index >= last_variable_index || !interface_->variable_is_extracted(variable_index)) { @@ -168,13 +168,13 @@ bool MPConstraint::ContainsNewVariables() { // ----- MPObjective ----- -double MPObjective::GetCoefficient(const MPVariable *const var) const { +double MPObjective::GetCoefficient(const MPVariable* const var) const { DLOG_IF(DFATAL, !interface_->solver_->OwnsVariable(var)) << var; if (var == nullptr) return 0.0; return gtl::FindWithDefault(coefficients_, var, 0.0); } -void MPObjective::SetCoefficient(const MPVariable *const var, double coeff) { +void MPObjective::SetCoefficient(const MPVariable* const var, double coeff) { DLOG_IF(DFATAL, !interface_->solver_->OwnsVariable(var)) << var; if (var == nullptr) return; if (coeff == 0.0) { @@ -195,7 +195,7 @@ void MPObjective::SetOffset(double value) { } namespace { -void CheckLinearExpr(const MPSolver &solver, const LinearExpr &linear_expr) { +void CheckLinearExpr(const MPSolver& solver, const LinearExpr& linear_expr) { for (auto var_value_pair : linear_expr.terms()) { CHECK(solver.OwnsVariable(var_value_pair.first)) << "Bad MPVariable* in LinearExpr, did you try adding an integer to an " @@ -204,22 +204,22 @@ void CheckLinearExpr(const MPSolver &solver, const LinearExpr &linear_expr) { } } // namespace -void MPObjective::OptimizeLinearExpr(const LinearExpr &linear_expr, +void MPObjective::OptimizeLinearExpr(const LinearExpr& linear_expr, bool is_maximization) { CheckLinearExpr(*interface_->solver_, linear_expr); interface_->ClearObjective(); coefficients_.clear(); SetOffset(linear_expr.offset()); - for (const auto &kv : linear_expr.terms()) { + for (const auto& kv : linear_expr.terms()) { SetCoefficient(kv.first, kv.second); } SetOptimizationDirection(is_maximization); } -void MPObjective::AddLinearExpr(const LinearExpr &linear_expr) { +void MPObjective::AddLinearExpr(const LinearExpr& linear_expr) { CheckLinearExpr(*interface_->solver_, linear_expr); SetOffset(offset_ + linear_expr.offset()); - for (const auto &kv : linear_expr.terms()) { + for (const auto& kv : linear_expr.terms()) { SetCoefficient(kv.first, GetCoefficient(kv.first) + kv.second); } } @@ -328,7 +328,7 @@ std::string MPSolver::SolverVersion() const { return interface_->SolverVersion(); } -void *MPSolver::underlying_solver() { return interface_->underlying_solver(); } +void* MPSolver::underlying_solver() { return interface_->underlying_solver(); } // ---- Solver-specific parameters ---- @@ -344,7 +344,7 @@ absl::Status MPSolver::SetNumThreads(int num_threads) { } bool MPSolver::SetSolverSpecificParametersAsString( - const std::string ¶meters) { + const std::string& parameters) { solver_specific_parameter_string_ = parameters; return interface_->SetSolverSpecificParametersAsString(parameters); } @@ -352,34 +352,34 @@ bool MPSolver::SetSolverSpecificParametersAsString( // ----- Solver ----- #if defined(USE_CLP) || defined(USE_CBC) -extern MPSolverInterface *BuildCLPInterface(MPSolver *const solver); +extern MPSolverInterface* BuildCLPInterface(MPSolver* const solver); #endif #if defined(USE_CBC) -extern MPSolverInterface *BuildCBCInterface(MPSolver *const solver); +extern MPSolverInterface* BuildCBCInterface(MPSolver* const solver); #endif #if defined(USE_GLPK) -extern MPSolverInterface *BuildGLPKInterface(bool mip, MPSolver *const solver); +extern MPSolverInterface* BuildGLPKInterface(bool mip, MPSolver* const solver); #endif -extern MPSolverInterface *BuildBopInterface(MPSolver *const solver); -extern MPSolverInterface *BuildGLOPInterface(MPSolver *const solver); -extern MPSolverInterface *BuildSatInterface(MPSolver *const solver); +extern MPSolverInterface* BuildBopInterface(MPSolver* const solver); +extern MPSolverInterface* BuildGLOPInterface(MPSolver* const solver); +extern MPSolverInterface* BuildSatInterface(MPSolver* const solver); #if defined(USE_SCIP) -extern MPSolverInterface *BuildSCIPInterface(MPSolver *const solver); +extern MPSolverInterface* BuildSCIPInterface(MPSolver* const solver); #endif -extern MPSolverInterface *BuildGurobiInterface(bool mip, - MPSolver *const solver); +extern MPSolverInterface* BuildGurobiInterface(bool mip, + MPSolver* const solver); #if defined(USE_CPLEX) -extern MPSolverInterface *BuildCplexInterface(bool mip, MPSolver *const solver); +extern MPSolverInterface* BuildCplexInterface(bool mip, MPSolver* const solver); -extern MPSolverInterface *BuildGLOPInterface(MPSolver *const solver); +extern MPSolverInterface* BuildGLOPInterface(MPSolver* const solver); #endif #if defined(USE_XPRESS) -extern MPSolverInterface *BuildXpressInterface(bool mip, - MPSolver *const solver); +extern MPSolverInterface* BuildXpressInterface(bool mip, + MPSolver* const solver); #endif namespace { -MPSolverInterface *BuildSolverInterface(MPSolver *const solver) { +MPSolverInterface* BuildSolverInterface(MPSolver* const solver) { DCHECK(solver != nullptr); switch (solver->ProblemType()) { case MPSolver::BOP_INTEGER_PROGRAMMING: @@ -442,7 +442,7 @@ int NumDigits(int n) { } } // namespace -MPSolver::MPSolver(const std::string &name, +MPSolver::MPSolver(const std::string& name, OptimizationProblemType problem_type) : name_(name), problem_type_(problem_type), @@ -526,10 +526,11 @@ constexpr {MPSolver::KNAPSACK_MIXED_INTEGER_PROGRAMMING, "knapsack"}, {MPSolver::CPLEX_MIXED_INTEGER_PROGRAMMING, "cplex"}, {MPSolver::XPRESS_MIXED_INTEGER_PROGRAMMING, "xpress"}, + }; // static bool MPSolver::ParseSolverType(absl::string_view solver_id, - MPSolver::OptimizationProblemType *type) { + MPSolver::OptimizationProblemType* type) { // Normalize the solver id. const std::string id = absl::StrReplaceAll(absl::AsciiStrToUpper(solver_id), {{"-", "_"}}); @@ -555,7 +556,7 @@ bool MPSolver::ParseSolverType(absl::string_view solver_id, } // Reverse lookup in the kOptimizationProblemTypeNames[] array. - for (auto &named_solver : kOptimizationProblemTypeNames) { + for (auto& named_solver : kOptimizationProblemTypeNames) { if (named_solver.name == lower_id) { *type = named_solver.problem_type; return true; @@ -567,7 +568,7 @@ bool MPSolver::ParseSolverType(absl::string_view solver_id, const absl::string_view ToString( MPSolver::OptimizationProblemType optimization_problem_type) { - for (const auto &named_solver : kOptimizationProblemTypeNames) { + for (const auto& named_solver : kOptimizationProblemTypeNames) { if (named_solver.problem_type == optimization_problem_type) { return named_solver.name; } @@ -578,8 +579,8 @@ const absl::string_view ToString( } bool AbslParseFlag(const absl::string_view text, - MPSolver::OptimizationProblemType *solver_type, - std::string *error) { + MPSolver::OptimizationProblemType* solver_type, + std::string* error) { DCHECK(solver_type != nullptr); DCHECK(error != nullptr); const bool result = MPSolver::ParseSolverType(text, solver_type); @@ -591,14 +592,14 @@ bool AbslParseFlag(const absl::string_view text, /* static */ MPSolver::OptimizationProblemType MPSolver::ParseSolverTypeOrDie( - const std::string &solver_id) { + const std::string& solver_id) { MPSolver::OptimizationProblemType problem_type; CHECK(MPSolver::ParseSolverType(solver_id, &problem_type)) << solver_id; return problem_type; } /* static */ -MPSolver *MPSolver::CreateSolver(const std::string &solver_id) { +MPSolver* MPSolver::CreateSolver(const std::string& solver_id) { MPSolver::OptimizationProblemType problem_type; if (!MPSolver::ParseSolverType(solver_id, &problem_type)) { LOG(WARNING) << "Unrecognized solver type: " << solver_id; @@ -612,7 +613,7 @@ MPSolver *MPSolver::CreateSolver(const std::string &solver_id) { return new MPSolver("", problem_type); } -MPVariable *MPSolver::LookupVariableOrNull(const std::string &var_name) const { +MPVariable* MPSolver::LookupVariableOrNull(const std::string& var_name) const { if (!variable_name_to_index_) GenerateVariableNameIndex(); absl::flat_hash_map::const_iterator it = @@ -621,8 +622,8 @@ MPVariable *MPSolver::LookupVariableOrNull(const std::string &var_name) const { return variables_[it->second]; } -MPConstraint *MPSolver::LookupConstraintOrNull( - const std::string &constraint_name) const { +MPConstraint* MPSolver::LookupConstraintOrNull( + const std::string& constraint_name) const { if (!constraint_name_to_index_) GenerateConstraintNameIndex(); const auto it = constraint_name_to_index_->find(constraint_name); @@ -633,7 +634,7 @@ MPConstraint *MPSolver::LookupConstraintOrNull( // ----- Methods using protocol buffers ----- MPSolverResponseStatus MPSolver::LoadModelFromProto( - const MPModelProto &input_model, std::string *error_message) { + const MPModelProto& input_model, std::string* error_message) { // The variable and constraint names are dropped, because we allow // duplicate names in the proto (they're not considered as 'ids'), // unlike the MPSolver C++ API which crashes if there are duplicate names. @@ -644,7 +645,7 @@ MPSolverResponseStatus MPSolver::LoadModelFromProto( } MPSolverResponseStatus MPSolver::LoadModelFromProtoWithUniqueNamesOrDie( - const MPModelProto &input_model, std::string *error_message) { + const MPModelProto& input_model, std::string* error_message) { // Force variable and constraint name indexing (which CHECKs name uniqueness). GenerateVariableNameIndex(); GenerateConstraintNameIndex(); @@ -655,8 +656,8 @@ MPSolverResponseStatus MPSolver::LoadModelFromProtoWithUniqueNamesOrDie( } MPSolverResponseStatus MPSolver::LoadModelFromProtoInternal( - const MPModelProto &input_model, bool clear_names, - bool check_model_validity, std::string *error_message) { + const MPModelProto& input_model, bool clear_names, + bool check_model_validity, std::string* error_message) { CHECK(error_message != nullptr); if (check_model_validity) { const std::string error = FindErrorInMPModelProto(input_model); @@ -683,12 +684,12 @@ MPSolverResponseStatus MPSolver::LoadModelFromProtoInternal( return MPSOLVER_MODEL_INVALID; } - MPObjective *const objective = MutableObjective(); + MPObjective* const objective = MutableObjective(); // Passing empty names makes the MPSolver generate unique names. const std::string empty; for (int i = 0; i < input_model.variable_size(); ++i) { - const MPVariableProto &var_proto = input_model.variable(i); - MPVariable *variable = + const MPVariableProto& var_proto = input_model.variable(i); + MPVariable* variable = MakeNumVar(var_proto.lower_bound(), var_proto.upper_bound(), clear_names ? empty : var_proto.name()); variable->SetInteger(var_proto.is_integer()); @@ -698,13 +699,13 @@ MPSolverResponseStatus MPSolver::LoadModelFromProtoInternal( objective->SetCoefficient(variable, var_proto.objective_coefficient()); } - for (const MPConstraintProto &ct_proto : input_model.constraint()) { + for (const MPConstraintProto& ct_proto : input_model.constraint()) { if (ct_proto.lower_bound() == -infinity() && ct_proto.upper_bound() == infinity()) { continue; } - MPConstraint *const ct = + MPConstraint* const ct = MakeRowConstraint(ct_proto.lower_bound(), ct_proto.upper_bound(), clear_names ? empty : ct_proto.name()); ct->set_is_lazy(ct_proto.is_lazy()); @@ -714,11 +715,11 @@ MPSolverResponseStatus MPSolver::LoadModelFromProtoInternal( } } - for (const MPGeneralConstraintProto &general_constraint : + for (const MPGeneralConstraintProto& general_constraint : input_model.general_constraint()) { switch (general_constraint.general_constraint_case()) { case MPGeneralConstraintProto::kIndicatorConstraint: { - const auto &proto = + const auto& proto = general_constraint.indicator_constraint().constraint(); if (proto.lower_bound() == -infinity() && proto.upper_bound() == infinity()) { @@ -726,7 +727,7 @@ MPSolverResponseStatus MPSolver::LoadModelFromProtoInternal( } const int constraint_index = NumConstraints(); - MPConstraint *const constraint = new MPConstraint( + MPConstraint* const constraint = new MPConstraint( constraint_index, proto.lower_bound(), proto.upper_bound(), clear_names ? "" : proto.name(), interface_.get()); if (constraint_name_to_index_) { @@ -742,7 +743,7 @@ MPSolverResponseStatus MPSolver::LoadModelFromProtoInternal( proto.coefficient(j)); } - MPVariable *const variable = + MPVariable* const variable = variables_[general_constraint.indicator_constraint().var_index()]; constraint->indicator_variable_ = variable; constraint->indicator_value_ = @@ -802,7 +803,7 @@ MPSolverResponseStatus ResultStatusToMPSolverResponseStatus( } } // namespace -void MPSolver::FillSolutionResponseProto(MPSolutionResponse *response) const { +void MPSolver::FillSolutionResponseProto(MPSolutionResponse* response) const { CHECK(response != nullptr); response->Clear(); response->set_status( @@ -830,8 +831,8 @@ void MPSolver::FillSolutionResponseProto(MPSolutionResponse *response) const { } // static -void MPSolver::SolveWithProto(const MPModelRequest &model_request, - MPSolutionResponse *response) { +void MPSolver::SolveWithProto(const MPModelRequest& model_request, + MPSolutionResponse* response) { CHECK(response != nullptr); MPSolver solver(model_request.model().name(), static_cast( @@ -846,7 +847,7 @@ void MPSolver::SolveWithProto(const MPModelRequest &model_request, return; } - const absl::optional > optional_model = + const absl::optional> optional_model = ExtractValidMPModelOrPopulateResponseStatus(model_request, response); if (!optional_model) { LOG_IF(WARNING, model_request.enable_internal_solver_output()) @@ -898,15 +899,15 @@ void MPSolver::SolveWithProto(const MPModelRequest &model_request, } } -void MPSolver::ExportModelToProto(MPModelProto *output_model) const { +void MPSolver::ExportModelToProto(MPModelProto* output_model) const { DCHECK(output_model != nullptr); output_model->Clear(); // Name output_model->set_name(Name()); // Variables for (int j = 0; j < variables_.size(); ++j) { - const MPVariable *const var = variables_[j]; - MPVariableProto *const variable_proto = output_model->add_variable(); + const MPVariable* const var = variables_[j]; + MPVariableProto* const variable_proto = output_model->add_variable(); // TODO(user): Add option to avoid filling the var name to avoid overly // large protocol buffers. variable_proto->set_name(var->name()); @@ -928,20 +929,20 @@ void MPSolver::ExportModelToProto(MPModelProto *output_model) const { // This step is needed as long as the variable indices are given by the // underlying solver at the time of model extraction. // TODO(user): remove this step. - absl::flat_hash_map var_to_index; + absl::flat_hash_map var_to_index; for (int j = 0; j < variables_.size(); ++j) { var_to_index[variables_[j]] = j; } // Constraints for (int i = 0; i < constraints_.size(); ++i) { - MPConstraint *const constraint = constraints_[i]; - MPConstraintProto *constraint_proto; + MPConstraint* const constraint = constraints_[i]; + MPConstraintProto* constraint_proto; if (constraint->indicator_variable() != nullptr) { - MPGeneralConstraintProto *const general_constraint_proto = + MPGeneralConstraintProto* const general_constraint_proto = output_model->add_general_constraint(); general_constraint_proto->set_name(constraint->name()); - MPIndicatorConstraint *const indicator_constraint_proto = + MPIndicatorConstraint* const indicator_constraint_proto = general_constraint_proto->mutable_indicator_constraint(); indicator_constraint_proto->set_var_index( constraint->indicator_variable()->index()); @@ -956,9 +957,9 @@ void MPSolver::ExportModelToProto(MPModelProto *output_model) const { constraint_proto->set_is_lazy(constraint->is_lazy()); // Vector linear_term will contain pairs (variable index, coeff), that will // be sorted by variable index. - std::vector > linear_term; - for (const auto &entry : constraint->coefficients_) { - const MPVariable *const var = entry.first; + std::vector> linear_term; + for (const auto& entry : constraint->coefficients_) { + const MPVariable* const var = entry.first; const int var_index = gtl::FindWithDefault(var_to_index, var, -1); DCHECK_NE(-1, var_index); const double coeff = entry.second; @@ -968,7 +969,7 @@ void MPSolver::ExportModelToProto(MPModelProto *output_model) const { // few terms. std::sort(linear_term.begin(), linear_term.end()); // Now use linear term. - for (const std::pair &var_and_coeff : linear_term) { + for (const std::pair& var_and_coeff : linear_term) { constraint_proto->add_var_index(var_and_coeff.first); constraint_proto->add_coefficient(var_and_coeff.second); } @@ -978,16 +979,16 @@ void MPSolver::ExportModelToProto(MPModelProto *output_model) const { output_model->set_objective_offset(Objective().offset()); if (!solution_hint_.empty()) { - PartialVariableAssignment *const hint = + PartialVariableAssignment* const hint = output_model->mutable_solution_hint(); - for (const auto &var_value_pair : solution_hint_) { + for (const auto& var_value_pair : solution_hint_) { hint->add_var_index(var_value_pair.first->index()); hint->add_var_value(var_value_pair.second); } } } -absl::Status MPSolver::LoadSolutionFromProto(const MPSolutionResponse &response, +absl::Status MPSolver::LoadSolutionFromProto(const MPSolutionResponse& response, double tolerance) { interface_->result_status_ = static_cast(response.status()); if (response.status() != MPSOLVER_OPTIMAL && @@ -1015,7 +1016,7 @@ absl::Status MPSolver::LoadSolutionFromProto(const MPSolutionResponse &response, int last_offending_var = -1; for (int i = 0; i < response.variable_value_size(); ++i) { const double var_value = response.variable_value(i); - MPVariable *var = variables_[i]; + MPVariable* var = variables_[i]; // TODO(user): Use parameter when they become available in this class. const double lb_error = var->lb() - var_value; const double ub_error = var_value - var->ub(); @@ -1073,15 +1074,15 @@ void MPSolver::Reset() { interface_->Reset(); } bool MPSolver::InterruptSolve() { return interface_->InterruptSolve(); } void MPSolver::SetStartingLpBasis( - const std::vector &variable_statuses, - const std::vector &constraint_statuses) { + const std::vector& variable_statuses, + const std::vector& constraint_statuses) { interface_->SetStartingLpBasis(variable_statuses, constraint_statuses); } -MPVariable *MPSolver::MakeVar(double lb, double ub, bool integer, - const std::string &name) { +MPVariable* MPSolver::MakeVar(double lb, double ub, bool integer, + const std::string& name) { const int var_index = NumVariables(); - MPVariable *v = + MPVariable* v = new MPVariable(var_index, lb, ub, integer, name, interface_.get()); if (variable_name_to_index_) { gtl::InsertOrDie(&*variable_name_to_index_, v->name(), var_index); @@ -1092,23 +1093,23 @@ MPVariable *MPSolver::MakeVar(double lb, double ub, bool integer, return v; } -MPVariable *MPSolver::MakeNumVar(double lb, double ub, - const std::string &name) { +MPVariable* MPSolver::MakeNumVar(double lb, double ub, + const std::string& name) { return MakeVar(lb, ub, false, name); } -MPVariable *MPSolver::MakeIntVar(double lb, double ub, - const std::string &name) { +MPVariable* MPSolver::MakeIntVar(double lb, double ub, + const std::string& name) { return MakeVar(lb, ub, true, name); } -MPVariable *MPSolver::MakeBoolVar(const std::string &name) { +MPVariable* MPSolver::MakeBoolVar(const std::string& name) { return MakeVar(0.0, 1.0, true, name); } void MPSolver::MakeVarArray(int nb, double lb, double ub, bool integer, - const std::string &name, - std::vector *vars) { + const std::string& name, + std::vector* vars) { DCHECK_GE(nb, 0); if (nb <= 0) return; const int num_digits = NumDigits(nb); @@ -1124,34 +1125,34 @@ void MPSolver::MakeVarArray(int nb, double lb, double ub, bool integer, } void MPSolver::MakeNumVarArray(int nb, double lb, double ub, - const std::string &name, - std::vector *vars) { + const std::string& name, + std::vector* vars) { MakeVarArray(nb, lb, ub, false, name, vars); } void MPSolver::MakeIntVarArray(int nb, double lb, double ub, - const std::string &name, - std::vector *vars) { + const std::string& name, + std::vector* vars) { MakeVarArray(nb, lb, ub, true, name, vars); } -void MPSolver::MakeBoolVarArray(int nb, const std::string &name, - std::vector *vars) { +void MPSolver::MakeBoolVarArray(int nb, const std::string& name, + std::vector* vars) { MakeVarArray(nb, 0.0, 1.0, true, name, vars); } -MPConstraint *MPSolver::MakeRowConstraint(double lb, double ub) { +MPConstraint* MPSolver::MakeRowConstraint(double lb, double ub) { return MakeRowConstraint(lb, ub, ""); } -MPConstraint *MPSolver::MakeRowConstraint() { +MPConstraint* MPSolver::MakeRowConstraint() { return MakeRowConstraint(-infinity(), infinity(), ""); } -MPConstraint *MPSolver::MakeRowConstraint(double lb, double ub, - const std::string &name) { +MPConstraint* MPSolver::MakeRowConstraint(double lb, double ub, + const std::string& name) { const int constraint_index = NumConstraints(); - MPConstraint *const constraint = + MPConstraint* const constraint = new MPConstraint(constraint_index, lb, ub, name, interface_.get()); if (constraint_name_to_index_) { gtl::InsertOrDie(&*constraint_name_to_index_, constraint->name(), @@ -1163,20 +1164,20 @@ MPConstraint *MPSolver::MakeRowConstraint(double lb, double ub, return constraint; } -MPConstraint *MPSolver::MakeRowConstraint(const std::string &name) { +MPConstraint* MPSolver::MakeRowConstraint(const std::string& name) { return MakeRowConstraint(-infinity(), infinity(), name); } -MPConstraint *MPSolver::MakeRowConstraint(const LinearRange &range) { +MPConstraint* MPSolver::MakeRowConstraint(const LinearRange& range) { return MakeRowConstraint(range, ""); } -MPConstraint *MPSolver::MakeRowConstraint(const LinearRange &range, - const std::string &name) { +MPConstraint* MPSolver::MakeRowConstraint(const LinearRange& range, + const std::string& name) { CheckLinearExpr(*this, range.linear_expr()); - MPConstraint *constraint = + MPConstraint* constraint = MakeRowConstraint(range.lower_bound(), range.upper_bound(), name); - for (const auto &kv : range.linear_expr().terms()) { + for (const auto& kv : range.linear_expr().terms()) { constraint->SetCoefficient(kv.first, kv.second); } return constraint; @@ -1188,7 +1189,7 @@ int MPSolver::ComputeMaxConstraintSize(int min_constraint_index, DCHECK_GE(min_constraint_index, 0); DCHECK_LE(max_constraint_index, constraints_.size()); for (int i = min_constraint_index; i < max_constraint_index; ++i) { - MPConstraint *const ct = constraints_[i]; + MPConstraint* const ct = constraints_[i]; if (ct->coefficients_.size() > max_constraint_size) { max_constraint_size = ct->coefficients_.size(); } @@ -1211,7 +1212,7 @@ bool MPSolver::HasInfeasibleConstraints() const { } bool MPSolver::HasIntegerVariables() const { - for (const MPVariable *const variable : variables_) { + for (const MPVariable* const variable : variables_) { if (variable->integer()) return true; } return false; @@ -1222,7 +1223,7 @@ MPSolver::ResultStatus MPSolver::Solve() { return Solve(default_param); } -MPSolver::ResultStatus MPSolver::Solve(const MPSolverParameters ¶m) { +MPSolver::ResultStatus MPSolver::Solve(const MPSolverParameters& param) { // Special case for infeasible constraints so that all solvers have // the same behavior. // TODO(user): replace this by model extraction to proto + proto validation @@ -1249,12 +1250,12 @@ MPSolver::ResultStatus MPSolver::Solve(const MPSolverParameters ¶m) { return status; } -void MPSolver::Write(const std::string &file_name) { +void MPSolver::Write(const std::string& file_name) { interface_->Write(file_name); } namespace { -std::string PrettyPrintVar(const MPVariable &var) { +std::string PrettyPrintVar(const MPVariable& var) { const std::string prefix = "Variable '" + var.name() + "': domain = "; if (var.lb() >= MPSolver::infinity() || var.ub() <= -MPSolver::infinity() || var.lb() > var.ub()) { @@ -1286,7 +1287,7 @@ std::string PrettyPrintVar(const MPVariable &var) { : absl::StrFormat("%f]", var.ub())); } -std::string PrettyPrintConstraint(const MPConstraint &constraint) { +std::string PrettyPrintConstraint(const MPConstraint& constraint) { std::string prefix = "Constraint '" + constraint.name() + "': "; if (constraint.lb() >= MPSolver::infinity() || constraint.ub() <= -MPSolver::infinity() || @@ -1316,7 +1317,7 @@ std::string PrettyPrintConstraint(const MPConstraint &constraint) { absl::Status MPSolver::ClampSolutionWithinBounds() { interface_->ExtractModel(); - for (MPVariable *const variable : variables_) { + for (MPVariable* const variable : variables_) { const double value = variable->solution_value(); if (std::isnan(value)) { return absl::InvalidArgumentError( @@ -1337,9 +1338,9 @@ std::vector MPSolver::ComputeConstraintActivities() const { if (!interface_->CheckSolutionIsSynchronizedAndExists()) return {}; std::vector activities(constraints_.size(), 0.0); for (int i = 0; i < constraints_.size(); ++i) { - const MPConstraint &constraint = *constraints_[i]; + const MPConstraint& constraint = *constraints_[i]; AccurateSum sum; - for (const auto &entry : constraint.coefficients_) { + for (const auto& entry : constraint.coefficients_) { sum.Add(entry.first->solution_value() * entry.second); } activities[i] = sum.Value(); @@ -1355,7 +1356,7 @@ bool MPSolver::VerifySolution(double tolerance, bool log_errors) const { // Verify variables. for (int i = 0; i < variables_.size(); ++i) { - const MPVariable &var = *variables_[i]; + const MPVariable& var = *variables_[i]; const double value = var.solution_value(); // Check for NaN. if (std::isnan(value)) { @@ -1403,11 +1404,11 @@ bool MPSolver::VerifySolution(double tolerance, bool log_errors) const { // Verify constraints. const std::vector activities = ComputeConstraintActivities(); for (int i = 0; i < constraints_.size(); ++i) { - const MPConstraint &constraint = *constraints_[i]; + const MPConstraint& constraint = *constraints_[i]; const double activity = activities[i]; // Re-compute the activity with a inaccurate summing algorithm. double inaccurate_activity = 0.0; - for (const auto &entry : constraint.coefficients_) { + for (const auto& entry : constraint.coefficients_) { inaccurate_activity += entry.first->solution_value() * entry.second; } // Catch NaNs. @@ -1456,11 +1457,11 @@ bool MPSolver::VerifySolution(double tolerance, bool log_errors) const { } // Verify that the objective value wasn't reported incorrectly. - const MPObjective &objective = Objective(); + const MPObjective& objective = Objective(); AccurateSum objective_sum; objective_sum.Add(objective.offset()); double inaccurate_objective_value = objective.offset(); - for (const auto &entry : objective.coefficients_) { + for (const auto& entry : objective.coefficients_) { const double term = entry.first->solution_value() * entry.second; objective_sum.Add(term); inaccurate_objective_value += term; @@ -1506,7 +1507,7 @@ double MPSolver::ComputeExactConditionNumber() const { return interface_->ComputeExactConditionNumber(); } -bool MPSolver::OwnsVariable(const MPVariable *var) const { +bool MPSolver::OwnsVariable(const MPVariable* var) const { if (var == nullptr) return false; if (var->index() >= 0 && var->index() < variables_.size()) { // Then, verify that the variable with this index has the same address. @@ -1544,9 +1545,8 @@ bool MPSolver::ExportModelAsMpsFormat(bool fixed_format, bool obfuscate, return status_or.ok(); } -void MPSolver::SetHint( - std::vector > hint) { - for (const auto &var_value_pair : hint) { +void MPSolver::SetHint(std::vector> hint) { + for (const auto& var_value_pair : hint) { CHECK(OwnsVariable(var_value_pair.first)) << "hint variable does not belong to this solver"; } @@ -1556,7 +1556,7 @@ void MPSolver::SetHint( void MPSolver::GenerateVariableNameIndex() const { if (variable_name_to_index_) return; variable_name_to_index_ = absl::flat_hash_map(); - for (const MPVariable *const var : variables_) { + for (const MPVariable* const var : variables_) { gtl::InsertOrDie(&*variable_name_to_index_, var->name(), var->index()); } } @@ -1564,14 +1564,14 @@ void MPSolver::GenerateVariableNameIndex() const { void MPSolver::GenerateConstraintNameIndex() const { if (constraint_name_to_index_) return; constraint_name_to_index_ = absl::flat_hash_map(); - for (const MPConstraint *const cst : constraints_) { + for (const MPConstraint* const cst : constraints_) { gtl::InsertOrDie(&*constraint_name_to_index_, cst->name(), cst->index()); } } bool MPSolver::NextSolution() { return interface_->NextSolution(); } -void MPSolver::SetCallback(MPCallback *mp_callback) { +void MPSolver::SetCallback(MPCallback* mp_callback) { interface_->SetCallback(mp_callback); } @@ -1611,7 +1611,7 @@ bool MPSolverResponseStatusIsRpcError(MPSolverResponseStatus status) { const int MPSolverInterface::kDummyVariableIndex = 0; -MPSolverInterface::MPSolverInterface(MPSolver *const solver) +MPSolverInterface::MPSolverInterface(MPSolver* const solver) : solver_(solver), sync_status_(MODEL_SYNCHRONIZED), result_status_(MPSolver::NOT_SOLVED), @@ -1623,7 +1623,7 @@ MPSolverInterface::MPSolverInterface(MPSolver *const solver) MPSolverInterface::~MPSolverInterface() {} -void MPSolverInterface::Write(const std::string &filename) { +void MPSolverInterface::Write(const std::string& filename) { LOG(WARNING) << "Writing model not implemented in this solver interface."; } @@ -1722,7 +1722,7 @@ double MPSolverInterface::ComputeExactConditionNumber() const { return 0.0; } -void MPSolverInterface::SetCommonParameters(const MPSolverParameters ¶m) { +void MPSolverInterface::SetCommonParameters(const MPSolverParameters& param) { // TODO(user): Overhaul the code that sets parameters to enable changing // GLOP parameters without issuing warnings. // By default, we let GLOP keep its own default tolerance, much more accurate @@ -1743,7 +1743,7 @@ void MPSolverInterface::SetCommonParameters(const MPSolverParameters ¶m) { } } -void MPSolverInterface::SetMIPParameters(const MPSolverParameters ¶m) { +void MPSolverInterface::SetMIPParameters(const MPSolverParameters& param) { if (solver_->ProblemType() != MPSolver::GLOP_LINEAR_PROGRAMMING) { SetRelativeMipGap( param.GetDoubleParam(MPSolverParameters::RELATIVE_MIP_GAP)); @@ -1775,7 +1775,7 @@ absl::Status MPSolverInterface::SetNumThreads(int num_threads) { } bool MPSolverInterface::SetSolverSpecificParametersAsString( - const std::string ¶meters) { + const std::string& parameters) { // Note(user): this method needs to return a success/failure boolean // immediately, so we also perform the actual parameter parsing right away. // Some implementations will keep them forever and won't need to re-parse @@ -1812,7 +1812,7 @@ bool MPSolverInterface::SetSolverSpecificParametersAsString( return no_error_so_far; } -bool MPSolverInterface::ReadParameterFile(const std::string &filename) { +bool MPSolverInterface::ReadParameterFile(const std::string& filename) { LOG(WARNING) << "ReadParameterFile() not supported by this solver."; return false; } diff --git a/ortools/linear_solver/linear_solver.h b/ortools/linear_solver/linear_solver.h index 055348751c..a1f9f8beb6 100644 --- a/ortools/linear_solver/linear_solver.h +++ b/ortools/linear_solver/linear_solver.h @@ -217,7 +217,7 @@ class MPSolver { }; /// Create a solver with the given name and underlying solver backend. - MPSolver(const std::string &name, OptimizationProblemType problem_type); + MPSolver(const std::string& name, OptimizationProblemType problem_type); virtual ~MPSolver(); /** @@ -248,7 +248,7 @@ class MPSolver { * - GLPK_LINEAR_PROGRAMMING or GLPK_LP * - GLPK_MIXED_INTEGER_PROGRAMMING or GLPK or GLPK_MIP */ - static MPSolver *CreateSolver(const std::string &solver_id); + static MPSolver* CreateSolver(const std::string& solver_id); /** * Whether the given problem type is supported (this will depend on the @@ -262,19 +262,19 @@ class MPSolver { * See the documentation of CreateSolver() for the list of supported names. */ static bool ParseSolverType(absl::string_view solver_id, - OptimizationProblemType *type); + OptimizationProblemType* type); /** * Parses the name of the solver and returns the correct optimization type or * dies. Invariant: ParseSolverTypeOrDie(ToString(type)) = type. */ static OptimizationProblemType ParseSolverTypeOrDie( - const std::string &solver_id); + const std::string& solver_id); bool IsMIP() const; /// Returns the name of the model set at construction. - const std::string &Name() const { + const std::string& Name() const { return name_; // Set at construction. } @@ -297,14 +297,14 @@ class MPSolver { * Returns the array of variables handled by the MPSolver. (They are listed in * the order in which they were created.) */ - const std::vector &variables() const { return variables_; } + const std::vector& variables() const { return variables_; } /** * Looks up a variable by name, and returns nullptr if it does not exist. The * first call has a O(n) complexity, as the variable name index is lazily * created upon first use. Will crash if variable names are not unique. */ - MPVariable *LookupVariableOrNull(const std::string &var_name) const; + MPVariable* LookupVariableOrNull(const std::string& var_name) const; /** * Creates a variable with the given bounds, integrality requirement and @@ -313,17 +313,17 @@ class MPSolver { * optional. If you give an empty name, name() will auto-generate one for you * upon request. */ - MPVariable *MakeVar(double lb, double ub, bool integer, - const std::string &name); + MPVariable* MakeVar(double lb, double ub, bool integer, + const std::string& name); /// Creates a continuous variable. - MPVariable *MakeNumVar(double lb, double ub, const std::string &name); + MPVariable* MakeNumVar(double lb, double ub, const std::string& name); /// Creates an integer variable. - MPVariable *MakeIntVar(double lb, double ub, const std::string &name); + MPVariable* MakeIntVar(double lb, double ub, const std::string& name); /// Creates a boolean variable. - MPVariable *MakeBoolVar(const std::string &name); + MPVariable* MakeBoolVar(const std::string& name); /** * Creates an array of variables. All variables created have the same bounds @@ -340,20 +340,20 @@ class MPSolver { * @param[out] vars the vector of variables to fill with variables. */ void MakeVarArray(int nb, double lb, double ub, bool integer, - const std::string &name_prefix, - std::vector *vars); + const std::string& name_prefix, + std::vector* vars); /// Creates an array of continuous variables. - void MakeNumVarArray(int nb, double lb, double ub, const std::string &name, - std::vector *vars); + void MakeNumVarArray(int nb, double lb, double ub, const std::string& name, + std::vector* vars); /// Creates an array of integer variables. - void MakeIntVarArray(int nb, double lb, double ub, const std::string &name, - std::vector *vars); + void MakeIntVarArray(int nb, double lb, double ub, const std::string& name, + std::vector* vars); /// Creates an array of boolean variables. - void MakeBoolVarArray(int nb, const std::string &name, - std::vector *vars); + void MakeBoolVarArray(int nb, const std::string& name, + std::vector* vars); /// Returns the number of constraints. int NumConstraints() const { return constraints_.size(); } @@ -363,9 +363,7 @@ class MPSolver { * * They are listed in the order in which they were created. */ - const std::vector &constraints() const { - return constraints_; - } + const std::vector& constraints() const { return constraints_; } /** * Looks up a constraint by name, and returns nullptr if it does not exist. @@ -374,8 +372,8 @@ class MPSolver { * lazily created upon first use. Will crash if constraint names are not * unique. */ - MPConstraint *LookupConstraintOrNull( - const std::string &constraint_name) const; + MPConstraint* LookupConstraintOrNull( + const std::string& constraint_name) const; /** * Creates a linear constraint with given bounds. @@ -385,27 +383,27 @@ class MPSolver { * * @return a pointer to the newly created constraint. */ - MPConstraint *MakeRowConstraint(double lb, double ub); + MPConstraint* MakeRowConstraint(double lb, double ub); /// Creates a constraint with -infinity and +infinity bounds. - MPConstraint *MakeRowConstraint(); + MPConstraint* MakeRowConstraint(); /// Creates a named constraint with given bounds. - MPConstraint *MakeRowConstraint(double lb, double ub, - const std::string &name); + MPConstraint* MakeRowConstraint(double lb, double ub, + const std::string& name); /// Creates a named constraint with -infinity and +infinity bounds. - MPConstraint *MakeRowConstraint(const std::string &name); + MPConstraint* MakeRowConstraint(const std::string& name); /** * Creates a constraint owned by MPSolver enforcing: * range.lower_bound() <= range.linear_expr() <= range.upper_bound() */ - MPConstraint *MakeRowConstraint(const LinearRange &range); + MPConstraint* MakeRowConstraint(const LinearRange& range); /// As above, but also names the constraint. - MPConstraint *MakeRowConstraint(const LinearRange &range, - const std::string &name); + MPConstraint* MakeRowConstraint(const LinearRange& range, + const std::string& name); /** * Returns the objective object. @@ -413,10 +411,10 @@ class MPSolver { * Note that the objective is owned by the solver, and is initialized to its * default value (see the MPObjective class below) at construction. */ - const MPObjective &Objective() const { return *objective_; } + const MPObjective& Objective() const { return *objective_; } /// Returns the mutable objective object. - MPObjective *MutableObjective() { return objective_.get(); } + MPObjective* MutableObjective() { return objective_.get(); } /** * The status of solving the problem. The straightforward translation to @@ -445,13 +443,13 @@ class MPSolver { ResultStatus Solve(); /// Solves the problem using the specified parameter values. - ResultStatus Solve(const MPSolverParameters ¶m); + ResultStatus Solve(const MPSolverParameters& param); /** * Writes the model using the solver internal write function. Currently only * available for Gurobi. */ - void Write(const std::string &file_name); + void Write(const std::string& file_name); /** * Advanced usage: compute the "activities" of all constraints, which are the @@ -507,8 +505,8 @@ class MPSolver { * otherwise (currently only MPSOLVER_MODEL_INVALID and MPSOLVER_INFEASIBLE). * If the model isn't valid, populates "error_message". */ - MPSolverResponseStatus LoadModelFromProto(const MPModelProto &input_model, - std::string *error_message); + MPSolverResponseStatus LoadModelFromProto(const MPModelProto& input_model, + std::string* error_message); /** * Loads model from protocol buffer. * @@ -517,10 +515,10 @@ class MPSolver { * constraint names are unique, respectively. */ MPSolverResponseStatus LoadModelFromProtoWithUniqueNamesOrDie( - const MPModelProto &input_model, std::string *error_message); + const MPModelProto& input_model, std::string* error_message); /// Encodes the current solution in a solution response protocol buffer. - void FillSolutionResponseProto(MPSolutionResponse *response) const; + void FillSolutionResponseProto(MPSolutionResponse* response) const; /** * Solves the model encoded by a MPModelRequest protocol buffer and fills the @@ -531,17 +529,17 @@ class MPSolver { * solving), you should write another version of this function that creates * the MPSolver object on the heap and returns it. * - * Note(user): This attempts to first use `DirectlySolveProto()` (if + * Note(pawell): This attempts to first use `DirectlySolveProto()` (if * implemented). Consequently, this most likely does *not* override any of * the default parameters of the underlying solver. This behavior *differs* * from `MPSolver::Solve()` which by default sets the feasibility tolerance * and the gap limit (as of 2020/02/11, to 1e-7 and 0.0001, respectively). */ - static void SolveWithProto(const MPModelRequest &model_request, - MPSolutionResponse *response); + static void SolveWithProto(const MPModelRequest& model_request, + MPSolutionResponse* response); /// Exports model to protocol buffer. - void ExportModelToProto(MPModelProto *output_model) const; + void ExportModelToProto(MPModelProto* output_model) const; /** * Load a solution encoded in a protocol buffer onto this solver for easy @@ -575,7 +573,7 @@ class MPSolver { * that. */ absl::Status LoadSolutionFromProto( - const MPSolutionResponse &response, + const MPSolutionResponse& response, double tolerance = kDefaultPrimalTolerance); /** @@ -615,7 +613,7 @@ class MPSolver { * The format is solver-specific and is the same as the corresponding solver * configuration file format. Returns true if the operation was successful. */ - bool SetSolverSpecificParametersAsString(const std::string ¶meters); + bool SetSolverSpecificParametersAsString(const std::string& parameters); std::string GetSolverSpecificParametersAsString() const { return solver_specific_parameter_string_; } @@ -633,7 +631,7 @@ class MPSolver { * try to return a solution "close" to this assignment in case of multiple * optimal solutions. */ - void SetHint(std::vector > hint); + void SetHint(std::vector > hint); /** * Advanced usage: possible basis status values for a variable and the slack @@ -659,8 +657,8 @@ class MPSolver { * likely not mean much on the presolved problem. */ void SetStartingLpBasis( - const std::vector &variable_statuses, - const std::vector &constraint_statuses); + const std::vector& variable_statuses, + const std::vector& constraint_statuses); /** * Infinity. @@ -721,7 +719,7 @@ class MPSolver { * that depends on the interface (CBC: OsiClpSolverInterface*, CLP: * ClpSimplex*, GLPK: glp_prob*, SCIP: SCIP*). */ - void *underlying_solver(); + void* underlying_solver(); /** Advanced usage: computes the exact condition number of the current scaled * basis: L1norm(B) * L1norm(inverse(B)), where B is the scaled basis. @@ -770,7 +768,7 @@ class MPSolver { // SCIP does not support suggesting a heuristic solution in the callback. // // See go/mpsolver-callbacks for additional documentation. - void SetCallback(MPCallback *mp_callback); + void SetCallback(MPCallback* mp_callback); bool SupportsCallbacks() const; // DEPRECATED: Use TimeLimit() and SetTimeLimit(absl::Duration) instead. @@ -814,7 +812,7 @@ class MPSolver { friend class KnapsackInterface; // Debugging: verify that the given MPVariable* belongs to this solver. - bool OwnsVariable(const MPVariable *var) const; + bool OwnsVariable(const MPVariable* var) const; private: // Computes the size of the constraint with the largest number of @@ -849,7 +847,7 @@ class MPSolver { std::unique_ptr interface_; // The vector of variables in the problem. - std::vector variables_; + std::vector variables_; // A map from a variable's name to its index in variables_. mutable absl::optional > variable_name_to_index_; @@ -857,7 +855,7 @@ class MPSolver { std::vector variable_is_extracted_; // The vector of constraints in the problem. - std::vector constraints_; + std::vector constraints_; // A map from a constraint's name to its index in constraints_. mutable absl::optional > constraint_name_to_index_; @@ -874,7 +872,7 @@ class MPSolver { // // TODO(user): replace by two vectors, a std::vector to indicate if a // hint is provided and a std::vector for the hint value. - std::vector > solution_hint_; + std::vector > solution_hint_; absl::Duration time_limit_ = absl::InfiniteDuration(); // Default = No limit. @@ -887,8 +885,8 @@ class MPSolver { std::string solver_specific_parameter_string_; MPSolverResponseStatus LoadModelFromProtoInternal( - const MPModelProto &input_model, bool clear_names, - bool check_model_validity, std::string *error_message); + const MPModelProto& input_model, bool clear_names, + bool check_model_validity, std::string* error_message); DISALLOW_COPY_AND_ASSIGN(MPSolver); }; @@ -900,21 +898,21 @@ inline bool SolverTypeIsMip(MPSolver::OptimizationProblemType solver_type) { const absl::string_view ToString( MPSolver::OptimizationProblemType optimization_problem_type); -inline std::ostream &operator<<( - std::ostream &os, +inline std::ostream& operator<<( + std::ostream& os, MPSolver::OptimizationProblemType optimization_problem_type) { return os << ToString(optimization_problem_type); } -inline std::ostream &operator<<(std::ostream &os, +inline std::ostream& operator<<(std::ostream& os, MPSolver::ResultStatus status) { return os << ProtoEnumToString( static_cast(status)); } bool AbslParseFlag(absl::string_view text, - MPSolver::OptimizationProblemType *solver_type, - std::string *error); + MPSolver::OptimizationProblemType* solver_type, + std::string* error); inline std::string AbslUnparseFlag( MPSolver::OptimizationProblemType solver_type) { @@ -936,21 +934,21 @@ class MPObjective { * If the variable does not belong to the solver, the function just returns, * or crashes in non-opt mode. */ - void SetCoefficient(const MPVariable *const var, double coeff); + void SetCoefficient(const MPVariable* const var, double coeff); /** * Gets the coefficient of a given variable in the objective * * It returns 0 if the variable does not appear in the objective). */ - double GetCoefficient(const MPVariable *const var) const; + double GetCoefficient(const MPVariable* const var) const; /** * Returns a map from variables to their coefficients in the objective. * * If a variable is not present in the map, then its coefficient is zero. */ - const absl::flat_hash_map &terms() const { + const absl::flat_hash_map& terms() const { return coefficients_; } @@ -964,19 +962,19 @@ class MPObjective { * Resets the current objective to take the value of linear_expr, and sets the * objective direction to maximize if "is_maximize", otherwise minimizes. */ - void OptimizeLinearExpr(const LinearExpr &linear_expr, bool is_maximization); + void OptimizeLinearExpr(const LinearExpr& linear_expr, bool is_maximization); /// Resets the current objective to maximize linear_expr. - void MaximizeLinearExpr(const LinearExpr &linear_expr) { + void MaximizeLinearExpr(const LinearExpr& linear_expr) { OptimizeLinearExpr(linear_expr, true); } /// Resets the current objective to minimize linear_expr. - void MinimizeLinearExpr(const LinearExpr &linear_expr) { + void MinimizeLinearExpr(const LinearExpr& linear_expr) { OptimizeLinearExpr(linear_expr, false); } /// Adds linear_expr to the current objective, does not change the direction. - void AddLinearExpr(const LinearExpr &linear_expr); + void AddLinearExpr(const LinearExpr& linear_expr); /// Sets the optimization direction (maximize: true or minimize: false). void SetOptimizationDirection(bool maximize); @@ -1035,13 +1033,13 @@ class MPObjective { // to several models. // At construction, an MPObjective has no terms (which is equivalent // on having a coefficient of 0 for all variables), and an offset of 0. - explicit MPObjective(MPSolverInterface *const interface_in) + explicit MPObjective(MPSolverInterface* const interface_in) : interface_(interface_in), coefficients_(1), offset_(0.0) {} - MPSolverInterface *const interface_; + MPSolverInterface* const interface_; // Mapping var -> coefficient. - absl::flat_hash_map coefficients_; + absl::flat_hash_map coefficients_; // Constant term. double offset_; @@ -1052,7 +1050,7 @@ class MPObjective { class MPVariable { public: /// Returns the name of the variable. - const std::string &name() const { return name_; } + const std::string& name() const { return name_; } /// Sets the integrality requirement of the variable. void SetInteger(bool integer); @@ -1143,7 +1141,7 @@ class MPVariable { // is specified in the constructor. A variable cannot belong to // several models. MPVariable(int index, double lb, double ub, bool integer, - const std::string &name, MPSolverInterface *const interface_in) + const std::string& name, MPSolverInterface* const interface_in) : index_(index), lb_(lb), ub_(ub), @@ -1165,7 +1163,7 @@ class MPVariable { double solution_value_; double reduced_cost_; int branching_priority_ = 0; - MPSolverInterface *const interface_; + MPSolverInterface* const interface_; DISALLOW_COPY_AND_ASSIGN(MPVariable); }; @@ -1177,7 +1175,7 @@ class MPVariable { class MPConstraint { public: /// Returns the name of the constraint. - const std::string &name() const { return name_; } + const std::string& name() const { return name_; } /// Clears all variables and coefficients. Does not clear the bounds. void Clear(); @@ -1188,20 +1186,20 @@ class MPConstraint { * If the variable does not belong to the solver, the function just returns, * or crashes in non-opt mode. */ - void SetCoefficient(const MPVariable *const var, double coeff); + void SetCoefficient(const MPVariable* const var, double coeff); /** * Gets the coefficient of a given variable on the constraint (which is 0 if * the variable does not appear in the constraint). */ - double GetCoefficient(const MPVariable *const var) const; + double GetCoefficient(const MPVariable* const var) const; /** * Returns a map from variables to their coefficients in the constraint. * * If a variable is not present in the map, then its coefficient is zero. */ - const absl::flat_hash_map &terms() const { + const absl::flat_hash_map& terms() const { return coefficients_; } @@ -1238,7 +1236,7 @@ class MPConstraint { */ void set_is_lazy(bool laziness) { is_lazy_ = laziness; } - const MPVariable *indicator_variable() const { return indicator_variable_; } + const MPVariable* indicator_variable() const { return indicator_variable_; } bool indicator_value() const { return indicator_value_; } /// Returns the index of the constraint in the MPSolver::constraints_. @@ -1283,8 +1281,8 @@ class MPConstraint { // Constructor. A constraint points to a single MPSolverInterface // that is specified in the constructor. A constraint cannot belong // to several models. - MPConstraint(int index, double lb, double ub, const std::string &name, - MPSolverInterface *const interface_in) + MPConstraint(int index, double lb, double ub, const std::string& name, + MPSolverInterface* const interface_in) : coefficients_(1), index_(index), lb_(lb), @@ -1303,7 +1301,7 @@ class MPConstraint { bool ContainsNewVariables(); // Mapping var -> coefficient. - absl::flat_hash_map coefficients_; + absl::flat_hash_map coefficients_; const int index_; // See index(). @@ -1323,11 +1321,11 @@ class MPConstraint { // If given, this constraint is only active if `indicator_variable_`'s value // is equal to `indicator_value_`. - const MPVariable *indicator_variable_; + const MPVariable* indicator_variable_; bool indicator_value_; double dual_value_; - MPSolverInterface *const interface_; + MPSolverInterface* const interface_; DISALLOW_COPY_AND_ASSIGN(MPConstraint); }; @@ -1537,25 +1535,25 @@ class MPSolverInterface { // Constructor. The user will access the MPSolverInterface through the // MPSolver passed as argument. - explicit MPSolverInterface(MPSolver *const solver); + explicit MPSolverInterface(MPSolver* const solver); virtual ~MPSolverInterface(); // ----- Solve ----- // Solves problem with specified parameter values. Returns true if the // solution is optimal. - virtual MPSolver::ResultStatus Solve(const MPSolverParameters ¶m) = 0; + virtual MPSolver::ResultStatus Solve(const MPSolverParameters& param) = 0; // Directly solves a MPModelRequest, bypassing the MPSolver data structures // entirely. Returns {} (eg. absl::nullopt) if the feature is not supported by // the underlying solver. virtual absl::optional DirectlySolveProto( - const MPModelRequest &request) { + const MPModelRequest& request) { return absl::nullopt; } // Writes the model using the solver internal write function. Currently only // available for GurobiInterface. - virtual void Write(const std::string &filename); + virtual void Write(const std::string& filename); // ----- Model modifications and extraction ----- // Resets extracted model. @@ -1574,28 +1572,28 @@ class MPSolverInterface { virtual void SetConstraintBounds(int index, double lb, double ub) = 0; // Adds a linear constraint. - virtual void AddRowConstraint(MPConstraint *const ct) = 0; + virtual void AddRowConstraint(MPConstraint* const ct) = 0; // Adds an indicator constraint. Returns true if the feature is supported by // the underlying solver. - virtual bool AddIndicatorConstraint(MPConstraint *const ct) { + virtual bool AddIndicatorConstraint(MPConstraint* const ct) { LOG(ERROR) << "Solver doesn't support indicator constraints."; return false; } // Add a variable. - virtual void AddVariable(MPVariable *const var) = 0; + virtual void AddVariable(MPVariable* const var) = 0; // Changes a coefficient in a constraint. - virtual void SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, + virtual void SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) = 0; // Clears a constraint from all its terms. - virtual void ClearConstraint(MPConstraint *const constraint) = 0; + virtual void ClearConstraint(MPConstraint* const constraint) = 0; // Changes a coefficient in the linear objective. - virtual void SetObjectiveCoefficient(const MPVariable *const variable, + virtual void SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) = 0; // Changes the constant term in the linear objective. @@ -1685,7 +1683,7 @@ class MPSolverInterface { virtual std::string SolverVersion() const = 0; // Returns the underlying solver. - virtual void *underlying_solver() = 0; + virtual void* underlying_solver() = 0; // Computes exact condition number. Only available for continuous // problems and only implemented in GLPK. @@ -1693,8 +1691,8 @@ class MPSolverInterface { // See MPSolver::SetStartingLpBasis(). virtual void SetStartingLpBasis( - const std::vector &variable_statuses, - const std::vector &constraint_statuses) { + const std::vector& variable_statuses, + const std::vector& constraint_statuses) { LOG(FATAL) << "Not supported by this solver."; } @@ -1704,7 +1702,7 @@ class MPSolverInterface { virtual bool NextSolution() { return false; } // See MPSolver::SetCallback() for details. - virtual void SetCallback(MPCallback *mp_callback) { + virtual void SetCallback(MPCallback* mp_callback) { LOG(FATAL) << "Callbacks not supported for this solver."; } @@ -1717,7 +1715,7 @@ class MPSolverInterface { friend class MPObjective; protected: - MPSolver *const solver_; + MPSolver* const solver_; // Indicates whether the model and the solution are synchronized. SynchronizationStatus sync_status_; // Indicates whether the solve has reached optimality, @@ -1756,11 +1754,11 @@ class MPSolverInterface { void InvalidateSolutionSynchronization(); // Sets parameters common to LP and MIP in the underlying solver. - void SetCommonParameters(const MPSolverParameters ¶m); + void SetCommonParameters(const MPSolverParameters& param); // Sets MIP specific parameters in the underlying solver. - void SetMIPParameters(const MPSolverParameters ¶m); + void SetMIPParameters(const MPSolverParameters& param); // Sets all parameters in the underlying solver. - virtual void SetParameters(const MPSolverParameters ¶m) = 0; + virtual void SetParameters(const MPSolverParameters& param) = 0; // Sets an unsupported double parameter. void SetUnsupportedDoubleParam(MPSolverParameters::DoubleParam param); // Sets an unsupported integer parameter. @@ -1790,11 +1788,11 @@ class MPSolverInterface { // into the solver. Solvers that support passing the parameters directly can // override this method to skip the temporary file logic. virtual bool SetSolverSpecificParametersAsString( - const std::string ¶meters); + const std::string& parameters); // Reads a solver-specific file of parameters and set them. // Returns true if there was no errors. - virtual bool ReadParameterFile(const std::string &filename); + virtual bool ReadParameterFile(const std::string& filename); // Returns a file extension like ".tmp", this is needed because some solvers // require a given extension for the ReadParameterFile() filename and we need diff --git a/ortools/linear_solver/linear_solver.proto b/ortools/linear_solver/linear_solver.proto index d78c1d83ab..3d403a7cde 100644 --- a/ortools/linear_solver/linear_solver.proto +++ b/ortools/linear_solver/linear_solver.proto @@ -152,7 +152,7 @@ message MPSosConstraint { SOS1_DEFAULT = 0; // At most two consecutive variables from `var_index` can be non-zero (i.e. // for some i, var_index[i] and var_index[i+1]). See - // http://www.eudoxus.com/lp-training/5/5-6-special-ordered-sets-of-type-2 + // https://en.wikipedia.org/wiki/Special_ordered_set#Types_of_SOS SOS2 = 1; } optional Type type = 1 [default = SOS1_DEFAULT]; @@ -180,7 +180,7 @@ message MPSosConstraint { // rotated Second-Order Cones are always accepted. Other forms may or may not be // accepted depending on the underlying solver used. // See https://scip.zib.de/doc/html/cons__quadratic_8h.php and -// https://www.gurobi.com/documentation/8.1/refman/constraints.html#subsubsection:QuadraticConstraints +// https://www.gurobi.com/documentation/9.0/refman/constraints.html#subsubsection:QuadraticConstraints message MPQuadraticConstraint { // Sparse representation of linear terms in the quadratic constraint, where // term i is var_index[i] * coefficient[i]. diff --git a/ortools/linear_solver/linear_solver_callback.cc b/ortools/linear_solver/linear_solver_callback.cc index 65198a54d4..759f18de9e 100644 --- a/ortools/linear_solver/linear_solver_callback.cc +++ b/ortools/linear_solver/linear_solver_callback.cc @@ -47,8 +47,8 @@ std::string ToString(MPCallbackEvent event) { namespace { // Returns true if any of the callbacks in a list might add cuts. -bool CallbacksMightAddCuts(const std::vector &callbacks) { - for (MPCallback *callback : callbacks) { +bool CallbacksMightAddCuts(const std::vector& callbacks) { + for (MPCallback* callback : callbacks) { if (callback->might_add_cuts()) { return true; } @@ -58,8 +58,8 @@ bool CallbacksMightAddCuts(const std::vector &callbacks) { // Returns true if any of the callbacks in a list might add lazy constraints. bool CallbacksMightAddLazyConstraints( - const std::vector &callbacks) { - for (MPCallback *callback : callbacks) { + const std::vector& callbacks) { + for (MPCallback* callback : callbacks) { if (callback->might_add_lazy_constraints()) { return true; } @@ -69,13 +69,13 @@ bool CallbacksMightAddLazyConstraints( } // namespace -MPCallbackList::MPCallbackList(const std::vector &callbacks) +MPCallbackList::MPCallbackList(const std::vector& callbacks) : MPCallback(CallbacksMightAddCuts(callbacks), CallbacksMightAddLazyConstraints(callbacks)), callbacks_(callbacks) {} -void MPCallbackList::RunCallback(MPCallbackContext *context) { - for (MPCallback *callback : callbacks_) { +void MPCallbackList::RunCallback(MPCallbackContext* context) { + for (MPCallback* callback : callbacks_) { callback->RunCallback(context); } } diff --git a/ortools/linear_solver/linear_solver_callback.h b/ortools/linear_solver/linear_solver_callback.h index e3b22dd6fd..68982d8b0d 100644 --- a/ortools/linear_solver/linear_solver_callback.h +++ b/ortools/linear_solver/linear_solver_callback.h @@ -34,16 +34,14 @@ class LinearRange; // for details. enum class MPCallbackEvent { kUnknown, - // For regaining control of the main thread in single threaded - // applications, + // For regaining control of the main thread in single threaded applications, // not for interacting with the solver. kPolling, // The solver is currently running presolve. kPresolve, // The solver is currently running the simplex method. kSimplex, - // The solver is in the MIP loop (called periodically before starting a - // new + // The solver is in the MIP loop (called periodically before starting a new // node). Useful to early termination. kMip, // Called every time a new MIP incumbent is found. @@ -52,8 +50,7 @@ enum class MPCallbackEvent { kMipNode, // Called in each iterate of IPM/barrier method. kBarrier, - // The solver is about to log out a message, use this callback to capture - // it. + // The solver is about to log out a message, use this callback to capture it. kMessage, // The solver is in multi-objective optimization. kMultiObj, @@ -87,7 +84,7 @@ class MPCallbackContext { // At kMipSolution, the solution is integer feasible, while at kMipNode, the // solution solves the current node's LP relaxation (so integer variables may // be fractional). - virtual double VariableValue(const MPVariable *variable) = 0; + virtual double VariableValue(const MPVariable* variable) = 0; // Adds a constraint to the model that strengths the LP relaxation. // @@ -99,7 +96,7 @@ class MPCallbackContext { // strengthen the LP (behavior is undefined otherwise). Use // MPCallbackContext::AddLazyConstriant() if you are cutting off integer // solutions. - virtual void AddCut(const LinearRange &cutting_plane) = 0; + virtual void AddCut(const LinearRange& cutting_plane) = 0; // Adds a constraint to the model that cuts off an undesired integer solution. // @@ -117,7 +114,7 @@ class MPCallbackContext { // Warning(rander): in some solvers, e.g. Gurobi, an integer solution may not // respect a previously added lazy constraint, so you may need to add a // constraint more than once (e.g. due to threading issues). - virtual void AddLazyConstraint(const LinearRange &lazy_constraint) = 0; + virtual void AddLazyConstraint(const LinearRange& lazy_constraint) = 0; // Suggests a (potentially partial) variable assignment to the solver, to be // used as a feasible solution (or part of one). If the assignment is partial, @@ -127,7 +124,7 @@ class MPCallbackContext { // // Call only when the event is kMipNode. virtual double SuggestSolution( - const absl::flat_hash_map &solution) = 0; + const absl::flat_hash_map& solution) = 0; // Returns the number of nodes explored so far in the branch and bound tree, // which 0 at the root node and > 0 otherwise. @@ -153,7 +150,7 @@ class MPCallback { // Threading behavior may be solver dependent: // * Gurobi: RunCallback always runs on the same thread that you called // MPSolver::Solve() on, even when Gurobi uses multiple threads. - virtual void RunCallback(MPCallbackContext *callback_context) = 0; + virtual void RunCallback(MPCallbackContext* callback_context) = 0; bool might_add_cuts() const { return might_add_cuts_; } bool might_add_lazy_constraints() const { @@ -169,13 +166,13 @@ class MPCallback { // sequence. class MPCallbackList : public MPCallback { public: - explicit MPCallbackList(const std::vector &callbacks); + explicit MPCallbackList(const std::vector& callbacks); // Runs all callbacks from the list given at construction, in sequence. - void RunCallback(MPCallbackContext *context) override; + void RunCallback(MPCallbackContext* context) override; private: - const std::vector callbacks_; + const std::vector callbacks_; }; } // namespace operations_research diff --git a/ortools/linear_solver/model_exporter.cc b/ortools/linear_solver/model_exporter.cc index 6f06f42270..a70213f860 100644 --- a/ortools/linear_solver/model_exporter.cc +++ b/ortools/linear_solver/model_exporter.cc @@ -46,11 +46,11 @@ class LineBreaker { // - Lines are split so that their length doesn't exceed the max length; // unless a single string given to Append() exceeds that length (in which // case it will be put alone on a single unsplit line). - void Append(const std::string &s); + void Append(const std::string& s); // Returns true if string s will fit on the current line without adding a // carriage return. - bool WillFit(const std::string &s) { + bool WillFit(const std::string& s) { return line_size_ + s.size() < max_line_size_; } @@ -66,7 +66,7 @@ class LineBreaker { std::string output_; }; -void LineBreaker::Append(const std::string &s) { +void LineBreaker::Append(const std::string& s) { line_size_ += s.size(); if (line_size_ > max_line_size_) { line_size_ = s.size(); @@ -77,11 +77,11 @@ void LineBreaker::Append(const std::string &s) { class MPModelProtoExporter { public: - explicit MPModelProtoExporter(const MPModelProto &model); - bool ExportModelAsLpFormat(const MPModelExportOptions &options, - std::string *output); - bool ExportModelAsMpsFormat(const MPModelExportOptions &options, - std::string *output); + explicit MPModelProtoExporter(const MPModelProto& model); + bool ExportModelAsLpFormat(const MPModelExportOptions& options, + std::string* output); + bool ExportModelAsMpsFormat(const MPModelExportOptions& options, + std::string* output); private: // Computes the number of continuous, integer and binary variables. @@ -109,10 +109,10 @@ class MPModelProtoExporter { // Therefore, a name "$20<=40" for proto #3 could be "_$20__40_1". template std::vector ExtractAndProcessNames( - const ListOfProtosWithNameFields &proto, const std::string &prefix, + const ListOfProtosWithNameFields& proto, const std::string& prefix, bool obfuscate, bool log_invalid_names, - const std::string &forbidden_first_chars, - const std::string &forbidden_chars); + const std::string& forbidden_first_chars, + const std::string& forbidden_chars); // Appends a general "Comment" section with useful metadata about the model // to "output". @@ -121,65 +121,65 @@ class MPModelProtoExporter { // may be more constraints in a .lp file as in the original model as // a constraint lhs <= term <= rhs will be output as the two constraints // term >= lhs and term <= rhs. - void AppendComments(const std::string &separator, std::string *output) const; + void AppendComments(const std::string& separator, std::string* output) const; // Appends an MPConstraintProto to the output text. If the constraint has // both an upper and lower bound that are not equal, it splits the constraint // into two constraints, one for the left hand side (_lhs) and one for right // hand side (_rhs). - bool AppendConstraint(const MPConstraintProto &ct_proto, - const std::string &name, - const MPModelExportOptions &options, - LineBreaker &line_breaker, - std::vector &show_variable, std::string *output); + bool AppendConstraint(const MPConstraintProto& ct_proto, + const std::string& name, + const MPModelExportOptions& options, + LineBreaker& line_breaker, + std::vector& show_variable, std::string* output); // Clears "output" and writes a term to it, in "LP" format. Returns false on // error (for example, var_index is out of range). bool WriteLpTerm(int var_index, double coefficient, - std::string *output) const; + std::string* output) const; // Appends a pair name, value to "output", formatted to comply with the MPS // standard. - void AppendMpsPair(const std::string &name, double value, - std::string *output) const; + void AppendMpsPair(const std::string& name, double value, + std::string* output) const; // Appends the head of a line, consisting of an id and a name to output. - void AppendMpsLineHeader(const std::string &id, const std::string &name, - std::string *output) const; + void AppendMpsLineHeader(const std::string& id, const std::string& name, + std::string* output) const; // Same as AppendMpsLineHeader. Appends an extra new-line at the end the // string pointed to by output. - void AppendMpsLineHeaderWithNewLine(const std::string &id, - const std::string &name, - std::string *output) const; + void AppendMpsLineHeaderWithNewLine(const std::string& id, + const std::string& name, + std::string* output) const; // Appends an MPS term in various contexts. The term consists of a head name, // a name, and a value. If the line is not empty, then only the pair // (name, value) is appended. The number of columns, limited to 2 by the MPS // format is also taken care of. - void AppendMpsTermWithContext(const std::string &head_name, - const std::string &name, double value, - std::string *output); + void AppendMpsTermWithContext(const std::string& head_name, + const std::string& name, double value, + std::string* output); // Appends a new-line if two columns are already present on the MPS line. // Used by and in complement to AppendMpsTermWithContext. - void AppendNewLineIfTwoColumns(std::string *output); + void AppendNewLineIfTwoColumns(std::string* output); // When 'integrality' is true, appends columns corresponding to integer // variables. Appends the columns for non-integer variables otherwise. // The sparse matrix must be passed as a vector of columns ('transpose'). void AppendMpsColumns( bool integrality, - const std::vector > > &transpose, - std::string *output); + const std::vector>>& transpose, + std::string* output); // Appends a line describing the bound of a variablenew-line if two columns // are already present on the MPS line. // Used by and in complement to AppendMpsTermWithContext. - void AppendMpsBound(const std::string &bound_type, const std::string &name, - double value, std::string *output) const; + void AppendMpsBound(const std::string& bound_type, const std::string& name, + double value, std::string* output) const; - const MPModelProto &proto_; + const MPModelProto& proto_; // Vector of variable names as they will be exported. std::vector exported_variable_names_; @@ -203,8 +203,8 @@ class MPModelProtoExporter { int current_mps_column_; // Format for MPS file lines. - std::unique_ptr > mps_header_format_; - std::unique_ptr > mps_format_; + std::unique_ptr> mps_header_format_; + std::unique_ptr> mps_format_; DISALLOW_COPY_AND_ASSIGN(MPModelProtoExporter); }; @@ -212,8 +212,8 @@ class MPModelProtoExporter { } // namespace absl::StatusOr ExportModelAsLpFormat( - const MPModelProto &model, const MPModelExportOptions &options) { - for (const MPGeneralConstraintProto &general_constraint : + const MPModelProto& model, const MPModelExportOptions& options) { + for (const MPGeneralConstraintProto& general_constraint : model.general_constraint()) { if (!general_constraint.has_indicator_constraint()) { return absl::InvalidArgumentError( @@ -229,7 +229,7 @@ absl::StatusOr ExportModelAsLpFormat( } absl::StatusOr ExportModelAsMpsFormat( - const MPModelProto &model, const MPModelExportOptions &options) { + const MPModelProto& model, const MPModelExportOptions& options) { if (model.general_constraint_size() > 0) { return absl::InvalidArgumentError("General constraints are not supported."); } @@ -242,7 +242,7 @@ absl::StatusOr ExportModelAsMpsFormat( } namespace { -MPModelProtoExporter::MPModelProtoExporter(const MPModelProto &model) +MPModelProtoExporter::MPModelProtoExporter(const MPModelProto& model) : proto_(model), num_integer_variables_(0), num_binary_variables_(0), @@ -253,14 +253,14 @@ namespace { class NameManager { public: NameManager() : names_set_(), last_n_(1) {} - std::string MakeUniqueName(const std::string &name); + std::string MakeUniqueName(const std::string& name); private: absl::flat_hash_set names_set_; int last_n_; }; -std::string NameManager::MakeUniqueName(const std::string &name) { +std::string NameManager::MakeUniqueName(const std::string& name) { std::string result = name; // Find the 'n' so that "name_n" does not already exist. int n = last_n_; @@ -274,10 +274,10 @@ std::string NameManager::MakeUniqueName(const std::string &name) { return result; } -std::string MakeExportableName(const std::string &name, - const std::string &forbidden_first_chars, - const std::string &forbidden_chars, - bool *found_forbidden_char) { +std::string MakeExportableName(const std::string& name, + const std::string& forbidden_first_chars, + const std::string& forbidden_chars, + bool* found_forbidden_char) { // Prepend with "_" all the names starting with a forbidden character. *found_forbidden_char = forbidden_first_chars.find(name[0]) != std::string::npos; @@ -285,7 +285,7 @@ std::string MakeExportableName(const std::string &name, *found_forbidden_char ? absl::StrCat("_", name) : name; // Replace all the other forbidden characters with "_". - for (char &c : exportable_name) { + for (char& c : exportable_name) { if (forbidden_chars.find(c) != std::string::npos) { c = '_'; *found_forbidden_char = true; @@ -297,16 +297,16 @@ std::string MakeExportableName(const std::string &name, template std::vector MPModelProtoExporter::ExtractAndProcessNames( - const ListOfProtosWithNameFields &proto, const std::string &prefix, + const ListOfProtosWithNameFields& proto, const std::string& prefix, bool obfuscate, bool log_invalid_names, - const std::string &forbidden_first_chars, - const std::string &forbidden_chars) { + const std::string& forbidden_first_chars, + const std::string& forbidden_chars) { const int num_items = proto.size(); std::vector result(num_items); NameManager namer; const int num_digits = absl::StrCat(num_items).size(); int i = 0; - for (const auto &item : proto) { + for (const auto& item : proto) { const std::string obfuscated_name = absl::StrFormat("%s%0*d", prefix, num_digits, i); if (obfuscate || !item.has_name()) { @@ -343,9 +343,9 @@ std::vector MPModelProtoExporter::ExtractAndProcessNames( return result; } -void MPModelProtoExporter::AppendComments(const std::string &separator, - std::string *output) const { - const char *const sep = separator.c_str(); +void MPModelProtoExporter::AppendComments(const std::string& separator, + std::string* output) const { + const char* const sep = separator.c_str(); absl::StrAppendFormat(output, "%s Generated by MPModelProtoExporter\n", sep); absl::StrAppendFormat(output, "%s %-16s : %s\n", sep, "Name", proto_.has_name() ? proto_.name().c_str() : "NoName"); @@ -373,12 +373,12 @@ std::string DoubleToString(double d) { return absl::StrCat((d)); } } // namespace -bool MPModelProtoExporter::AppendConstraint(const MPConstraintProto &ct_proto, - const std::string &name, - const MPModelExportOptions &options, - LineBreaker &line_breaker, - std::vector &show_variable, - std::string *output) { +bool MPModelProtoExporter::AppendConstraint(const MPConstraintProto& ct_proto, + const std::string& name, + const MPModelExportOptions& options, + LineBreaker& line_breaker, + std::vector& show_variable, + std::string* output) { for (int i = 0; i < ct_proto.var_index_size(); ++i) { const int var_index = ct_proto.var_index(i); const double coeff = ct_proto.coefficient(i); @@ -426,7 +426,7 @@ bool MPModelProtoExporter::AppendConstraint(const MPConstraintProto &ct_proto, } bool MPModelProtoExporter::WriteLpTerm(int var_index, double coefficient, - std::string *output) const { + std::string* output) const { output->clear(); if (var_index < 0 || var_index >= proto_.variable_size()) { LOG(DFATAL) << "Reference to out-of-bounds variable index # " << var_index; @@ -440,16 +440,16 @@ bool MPModelProtoExporter::WriteLpTerm(int var_index, double coefficient, } namespace { -bool IsBoolean(const MPVariableProto &var) { +bool IsBoolean(const MPVariableProto& var) { return var.is_integer() && ceil(var.lower_bound()) == 0.0 && floor(var.upper_bound()) == 1.0; } -void UpdateMaxSize(const std::string &new_string, int *size) { +void UpdateMaxSize(const std::string& new_string, int* size) { if (new_string.size() > *size) *size = new_string.size(); } -void UpdateMaxSize(double new_number, int *size) { +void UpdateMaxSize(double new_number, int* size) { UpdateMaxSize(DoubleToString(new_number), size); } } // namespace @@ -461,7 +461,7 @@ void MPModelProtoExporter::Setup() { } num_binary_variables_ = 0; num_integer_variables_ = 0; - for (const MPVariableProto &var : proto_.variable()) { + for (const MPVariableProto& var : proto_.variable()) { if (var.is_integer()) { if (IsBoolean(var)) { ++num_binary_variables_; @@ -480,14 +480,14 @@ void MPModelProtoExporter::ComputeMpsSmartColumnWidths(bool obfuscated) { int string_field_size = 6; int number_field_size = 6; - for (const MPVariableProto &var : proto_.variable()) { + for (const MPVariableProto& var : proto_.variable()) { UpdateMaxSize(var.name(), &string_field_size); UpdateMaxSize(var.objective_coefficient(), &number_field_size); UpdateMaxSize(var.lower_bound(), &number_field_size); UpdateMaxSize(var.upper_bound(), &number_field_size); } - for (const MPConstraintProto &cst : proto_.constraint()) { + for (const MPConstraintProto& cst : proto_.constraint()) { UpdateMaxSize(cst.name(), &string_field_size); UpdateMaxSize(cst.lower_bound(), &number_field_size); UpdateMaxSize(cst.upper_bound(), &number_field_size); @@ -518,7 +518,7 @@ void MPModelProtoExporter::ComputeMpsSmartColumnWidths(bool obfuscated) { } bool MPModelProtoExporter::ExportModelAsLpFormat( - const MPModelExportOptions &options, std::string *output) { + const MPModelExportOptions& options, std::string* output) { output->clear(); Setup(); const std::string kForbiddenFirstChars = "$.0123456789"; @@ -561,8 +561,8 @@ bool MPModelProtoExporter::ExportModelAsLpFormat( // Linear Constraints absl::StrAppend(output, obj_line_breaker.GetOutput(), "\nSubject to\n"); for (int cst_index = 0; cst_index < proto_.constraint_size(); ++cst_index) { - const MPConstraintProto &ct_proto = proto_.constraint(cst_index); - const std::string &name = exported_constraint_names_[cst_index]; + const MPConstraintProto& ct_proto = proto_.constraint(cst_index); + const std::string& name = exported_constraint_names_[cst_index]; LineBreaker line_breaker(options.max_line_length); const int kNumFormattingChars = 10; // Overevaluated. // Account for the size of the constraint name + possibly "_rhs" + @@ -577,9 +577,9 @@ bool MPModelProtoExporter::ExportModelAsLpFormat( // General Constraints for (int cst_index = 0; cst_index < proto_.general_constraint_size(); ++cst_index) { - const MPGeneralConstraintProto &ct_proto = + const MPGeneralConstraintProto& ct_proto = proto_.general_constraint(cst_index); - const std::string &name = exported_general_constraint_names_[cst_index]; + const std::string& name = exported_general_constraint_names_[cst_index]; LineBreaker line_breaker(options.max_line_length); const int kNumFormattingChars = 10; // Overevaluated. // Account for the size of the constraint name + possibly "_rhs" + @@ -587,7 +587,7 @@ bool MPModelProtoExporter::ExportModelAsLpFormat( line_breaker.Consume(kNumFormattingChars + name.size()); if (!ct_proto.has_indicator_constraint()) return false; - const MPIndicatorConstraint &indicator_ct = ct_proto.indicator_constraint(); + const MPIndicatorConstraint& indicator_ct = ct_proto.indicator_constraint(); const int binary_var_index = indicator_ct.var_index(); const int binary_var_value = indicator_ct.var_value(); if (binary_var_index < 0 || binary_var_index >= proto_.variable_size()) { @@ -609,7 +609,7 @@ bool MPModelProtoExporter::ExportModelAsLpFormat( } for (int var_index = 0; var_index < proto_.variable_size(); ++var_index) { if (!show_variable[var_index]) continue; - const MPVariableProto &var_proto = proto_.variable(var_index); + const MPVariableProto& var_proto = proto_.variable(var_index); const double lb = var_proto.lower_bound(); const double ub = var_proto.upper_bound(); if (var_proto.is_integer() && lb == round(lb) && ub == round(ub)) { @@ -637,7 +637,7 @@ bool MPModelProtoExporter::ExportModelAsLpFormat( absl::StrAppend(output, "Binaries\n"); for (int var_index = 0; var_index < proto_.variable_size(); ++var_index) { if (!show_variable[var_index]) continue; - const MPVariableProto &var_proto = proto_.variable(var_index); + const MPVariableProto& var_proto = proto_.variable(var_index); if (IsBoolean(var_proto)) { absl::StrAppendFormat(output, " %s\n", exported_variable_names_[var_index]); @@ -650,7 +650,7 @@ bool MPModelProtoExporter::ExportModelAsLpFormat( absl::StrAppend(output, "Generals\n"); for (int var_index = 0; var_index < proto_.variable_size(); ++var_index) { if (!show_variable[var_index]) continue; - const MPVariableProto &var_proto = proto_.variable(var_index); + const MPVariableProto& var_proto = proto_.variable(var_index); if (var_proto.is_integer() && !IsBoolean(var_proto)) { absl::StrAppend(output, " ", exported_variable_names_[var_index], "\n"); } @@ -660,27 +660,27 @@ bool MPModelProtoExporter::ExportModelAsLpFormat( return true; } -void MPModelProtoExporter::AppendMpsPair(const std::string &name, double value, - std::string *output) const { +void MPModelProtoExporter::AppendMpsPair(const std::string& name, double value, + std::string* output) const { absl::StrAppendFormat(output, *mps_format_, name, DoubleToString(value)); } -void MPModelProtoExporter::AppendMpsLineHeader(const std::string &id, - const std::string &name, - std::string *output) const { +void MPModelProtoExporter::AppendMpsLineHeader(const std::string& id, + const std::string& name, + std::string* output) const { absl::StrAppendFormat(output, *mps_header_format_, id, name); } void MPModelProtoExporter::AppendMpsLineHeaderWithNewLine( - const std::string &id, const std::string &name, std::string *output) const { + const std::string& id, const std::string& name, std::string* output) const { AppendMpsLineHeader(id, name, output); absl::StripTrailingAsciiWhitespace(output); absl::StrAppend(output, "\n"); } void MPModelProtoExporter::AppendMpsTermWithContext( - const std::string &head_name, const std::string &name, double value, - std::string *output) { + const std::string& head_name, const std::string& name, double value, + std::string* output) { if (current_mps_column_ == 0) { AppendMpsLineHeader("", head_name, output); } @@ -688,16 +688,16 @@ void MPModelProtoExporter::AppendMpsTermWithContext( AppendNewLineIfTwoColumns(output); } -void MPModelProtoExporter::AppendMpsBound(const std::string &bound_type, - const std::string &name, double value, - std::string *output) const { +void MPModelProtoExporter::AppendMpsBound(const std::string& bound_type, + const std::string& name, double value, + std::string* output) const { AppendMpsLineHeader(bound_type, "BOUND", output); AppendMpsPair(name, value, output); absl::StripTrailingAsciiWhitespace(output); absl::StrAppend(output, "\n"); } -void MPModelProtoExporter::AppendNewLineIfTwoColumns(std::string *output) { +void MPModelProtoExporter::AppendNewLineIfTwoColumns(std::string* output) { ++current_mps_column_; if (current_mps_column_ == 2) { absl::StripTrailingAsciiWhitespace(output); @@ -708,21 +708,21 @@ void MPModelProtoExporter::AppendNewLineIfTwoColumns(std::string *output) { void MPModelProtoExporter::AppendMpsColumns( bool integrality, - const std::vector > > &transpose, - std::string *output) { + const std::vector>>& transpose, + std::string* output) { current_mps_column_ = 0; for (int var_index = 0; var_index < proto_.variable_size(); ++var_index) { - const MPVariableProto &var_proto = proto_.variable(var_index); + const MPVariableProto& var_proto = proto_.variable(var_index); if (var_proto.is_integer() != integrality) continue; - const std::string &var_name = exported_variable_names_[var_index]; + const std::string& var_name = exported_variable_names_[var_index]; current_mps_column_ = 0; if (var_proto.objective_coefficient() != 0.0) { AppendMpsTermWithContext(var_name, "COST", var_proto.objective_coefficient(), output); } - for (const std::pair &cst_index_and_coeff : + for (const std::pair& cst_index_and_coeff : transpose[var_index]) { - const std::string &cst_name = + const std::string& cst_name = exported_constraint_names_[cst_index_and_coeff.first]; AppendMpsTermWithContext(var_name, cst_name, cst_index_and_coeff.second, output); @@ -732,7 +732,7 @@ void MPModelProtoExporter::AppendMpsColumns( } bool MPModelProtoExporter::ExportModelAsMpsFormat( - const MPModelExportOptions &options, std::string *output) { + const MPModelExportOptions& options, std::string* output) { output->clear(); Setup(); ComputeMpsSmartColumnWidths(options.obfuscate); @@ -761,10 +761,10 @@ bool MPModelProtoExporter::ExportModelAsMpsFormat( std::string rows_section; AppendMpsLineHeaderWithNewLine("N", "COST", &rows_section); for (int cst_index = 0; cst_index < proto_.constraint_size(); ++cst_index) { - const MPConstraintProto &ct_proto = proto_.constraint(cst_index); + const MPConstraintProto& ct_proto = proto_.constraint(cst_index); const double lb = ct_proto.lower_bound(); const double ub = ct_proto.upper_bound(); - const std::string &cst_name = exported_constraint_names_[cst_index]; + const std::string& cst_name = exported_constraint_names_[cst_index]; if (lb == -kInfinity && ub == kInfinity) { AppendMpsLineHeaderWithNewLine("N", cst_name, &rows_section); } else if (lb == ub) { @@ -782,10 +782,10 @@ bool MPModelProtoExporter::ExportModelAsMpsFormat( // As the information regarding a column needs to be contiguous, we create // a vector associating a variable index to a vector containing the indices // of the constraints where this variable appears. - std::vector > > transpose( + std::vector>> transpose( proto_.variable_size()); for (int cst_index = 0; cst_index < proto_.constraint_size(); ++cst_index) { - const MPConstraintProto &ct_proto = proto_.constraint(cst_index); + const MPConstraintProto& ct_proto = proto_.constraint(cst_index); for (int k = 0; k < ct_proto.var_index_size(); ++k) { const int var_index = ct_proto.var_index(k); if (var_index < 0 || var_index >= proto_.variable_size()) { @@ -821,10 +821,10 @@ bool MPModelProtoExporter::ExportModelAsMpsFormat( current_mps_column_ = 0; std::string rhs_section; for (int cst_index = 0; cst_index < proto_.constraint_size(); ++cst_index) { - const MPConstraintProto &ct_proto = proto_.constraint(cst_index); + const MPConstraintProto& ct_proto = proto_.constraint(cst_index); const double lb = ct_proto.lower_bound(); const double ub = ct_proto.upper_bound(); - const std::string &cst_name = exported_constraint_names_[cst_index]; + const std::string& cst_name = exported_constraint_names_[cst_index]; if (lb != -kInfinity) { AppendMpsTermWithContext("RHS", cst_name, lb, &rhs_section); } else if (ub != +kInfinity) { @@ -840,10 +840,10 @@ bool MPModelProtoExporter::ExportModelAsMpsFormat( current_mps_column_ = 0; std::string ranges_section; for (int cst_index = 0; cst_index < proto_.constraint_size(); ++cst_index) { - const MPConstraintProto &ct_proto = proto_.constraint(cst_index); + const MPConstraintProto& ct_proto = proto_.constraint(cst_index); const double range = fabs(ct_proto.upper_bound() - ct_proto.lower_bound()); if (range != 0.0 && range != +kInfinity) { - const std::string &cst_name = exported_constraint_names_[cst_index]; + const std::string& cst_name = exported_constraint_names_[cst_index]; AppendMpsTermWithContext("RANGE", cst_name, range, &ranges_section); } } @@ -856,10 +856,10 @@ bool MPModelProtoExporter::ExportModelAsMpsFormat( current_mps_column_ = 0; std::string bounds_section; for (int var_index = 0; var_index < proto_.variable_size(); ++var_index) { - const MPVariableProto &var_proto = proto_.variable(var_index); + const MPVariableProto& var_proto = proto_.variable(var_index); const double lb = var_proto.lower_bound(); const double ub = var_proto.upper_bound(); - const std::string &var_name = exported_variable_names_[var_index]; + const std::string& var_name = exported_variable_names_[var_index]; if (lb == -kInfinity && ub == +kInfinity) { AppendMpsLineHeader("FR", "BOUND", &bounds_section); diff --git a/ortools/linear_solver/model_validator.cc b/ortools/linear_solver/model_validator.cc index 09124a87ee..6528f12dcd 100644 --- a/ortools/linear_solver/model_validator.cc +++ b/ortools/linear_solver/model_validator.cc @@ -48,7 +48,7 @@ bool IsFinite(double value) { // Internal method to detect errors in bounds. The object passed as parameter // must have "lower_bound" and "upper_bound" fields. template -std::string FindErrorInBounds(const BoundedElement &element) { +std::string FindErrorInBounds(const BoundedElement& element) { if (std::isnan(element.lower_bound()) || std::isnan(element.upper_bound()) || element.lower_bound() >= absl::GetFlag(FLAGS_model_validator_infinity) || element.upper_bound() <= -absl::GetFlag(FLAGS_model_validator_infinity) || @@ -60,7 +60,7 @@ std::string FindErrorInBounds(const BoundedElement &element) { } // Internal method to detect errors in a single variable. -std::string FindErrorInMPVariable(const MPVariableProto &variable) { +std::string FindErrorInMPVariable(const MPVariableProto& variable) { const std::string bound_error = FindErrorInBounds(variable); if (!bound_error.empty()) return bound_error; @@ -79,8 +79,8 @@ std::string FindErrorInMPVariable(const MPVariableProto &variable) { // Returns an error message if 'var_indices' contains a duplicate index. template -std::string FindDuplicateVarIndex(const Iterable &var_indices, - std::vector *var_mask) { +std::string FindDuplicateVarIndex(const Iterable& var_indices, + std::vector* var_mask) { int duplicate_var_index = -1; for (const int var_index : var_indices) { if ((*var_mask)[var_index]) duplicate_var_index = var_index; @@ -100,8 +100,8 @@ std::string FindDuplicateVarIndex(const Iterable &var_indices, // Internal method to detect errors in a single constraint. // "var_mask" is a vector whose size is the number of variables in // the model, and it will be all set to false before and after the call. -std::string FindErrorInMPConstraint(const MPConstraintProto &constraint, - std::vector *var_mask) { +std::string FindErrorInMPConstraint(const MPConstraintProto& constraint, + std::vector* var_mask) { const std::string bound_error = FindErrorInBounds(constraint); if (!bound_error.empty()) return bound_error; @@ -135,7 +135,7 @@ std::string FindErrorInMPConstraint(const MPConstraintProto &constraint, return std::string(); } -std::string CroppedConstraintDebugString(const MPConstraintProto &constraint) { +std::string CroppedConstraintDebugString(const MPConstraintProto& constraint) { const int kMaxPrintedVars = 10; MPConstraintProto constraint_light = constraint; @@ -155,15 +155,15 @@ std::string CroppedConstraintDebugString(const MPConstraintProto &constraint) { ProtobufShortDebugString(constraint_light), suffix_str); } -bool IsBoolean(const MPVariableProto &variable) { +bool IsBoolean(const MPVariableProto& variable) { if (variable.lower_bound() < 0) return false; if (variable.upper_bound() > 1) return false; return variable.is_integer(); } std::string FindErrorInMPIndicatorConstraint( - const MPModelProto &model, const MPIndicatorConstraint &indicator, - std::vector *var_mask) { + const MPModelProto& model, const MPIndicatorConstraint& indicator, + std::vector* var_mask) { if (!indicator.has_var_index()) { return "var_index is required."; } @@ -178,7 +178,7 @@ std::string FindErrorInMPIndicatorConstraint( if (var_value < 0 || var_value > 1) { return absl::StrCat("var_value=", var_value, " must be 0 or 1."); } - const MPConstraintProto &constraint = indicator.constraint(); + const MPConstraintProto& constraint = indicator.constraint(); std::string error = FindErrorInMPConstraint(constraint, var_mask); if (!error.empty()) { // Constraint protos can be huge, theoretically. So we guard against @@ -189,9 +189,9 @@ std::string FindErrorInMPIndicatorConstraint( return ""; } -std::string FindErrorInMPSosConstraint(const MPModelProto &model, - const MPSosConstraint &sos, - std::vector *var_mask) { +std::string FindErrorInMPSosConstraint(const MPModelProto& model, + const MPSosConstraint& sos, + std::vector* var_mask) { if (sos.weight_size() != 0 && sos.weight_size() != sos.var_index_size()) { return "weight_size() > 0 and var_index_size() != weight_size()"; } @@ -216,9 +216,9 @@ std::string FindErrorInMPSosConstraint(const MPModelProto &model, return ""; } -std::string FindErrorInMPQuadraticConstraint(const MPModelProto &model, - const MPQuadraticConstraint &qcst, - std::vector *var_mask) { +std::string FindErrorInMPQuadraticConstraint(const MPModelProto& model, + const MPQuadraticConstraint& qcst, + std::vector* var_mask) { const int num_vars = model.variable_size(); if (qcst.var_index_size() != qcst.coefficient_size()) { @@ -267,8 +267,8 @@ std::string FindErrorInMPQuadraticConstraint(const MPModelProto &model, return ""; } -std::string FindErrorInMPAbsConstraint(const MPModelProto &model, - const MPAbsConstraint &abs) { +std::string FindErrorInMPAbsConstraint(const MPModelProto& model, + const MPAbsConstraint& abs) { if (!abs.has_var_index()) { return "var_index is required."; } @@ -288,8 +288,8 @@ std::string FindErrorInMPAbsConstraint(const MPModelProto &model, return ""; } -std::string FindErrorInMPAndOrConstraint(const MPModelProto &model, - const MPArrayConstraint &and_or) { +std::string FindErrorInMPAndOrConstraint(const MPModelProto& model, + const MPArrayConstraint& and_or) { if (and_or.var_index_size() == 0) { return "var_index cannot be empty."; } @@ -319,7 +319,7 @@ std::string FindErrorInMPAndOrConstraint(const MPModelProto &model, } std::string FindErrorInMPMinMaxConstraint( - const MPModelProto &model, const MPArrayWithConstantConstraint &min_max) { + const MPModelProto& model, const MPArrayWithConstantConstraint& min_max) { if (min_max.var_index_size() == 0) { return "var_index cannot be empty."; } @@ -346,7 +346,7 @@ std::string FindErrorInMPMinMaxConstraint( return ""; } -std::string FindErrorInQuadraticObjective(const MPQuadraticObjective &qobj, +std::string FindErrorInQuadraticObjective(const MPQuadraticObjective& qobj, int num_vars) { if (qobj.qvar1_index_size() != qobj.qvar2_index_size() || qobj.qvar1_index_size() != qobj.coefficient_size()) { @@ -373,7 +373,7 @@ std::string FindErrorInQuadraticObjective(const MPQuadraticObjective &qobj, } std::string FindErrorInSolutionHint( - const PartialVariableAssignment &solution_hint, int num_vars) { + const PartialVariableAssignment& solution_hint, int num_vars) { if (solution_hint.var_index_size() != solution_hint.var_value_size()) { return absl::StrCat("var_index_size() != var_value_size() [", solution_hint.var_index_size(), " VS ", @@ -399,7 +399,7 @@ std::string FindErrorInSolutionHint( } } // namespace -std::string FindErrorInMPModelProto(const MPModelProto &model) { +std::string FindErrorInMPModelProto(const MPModelProto& model) { // NOTE(user): Empty models are considered fine by this function, although // it is not clear whether MPSolver::Solve() will always respond in the same // way, depending on the solvers. @@ -424,7 +424,7 @@ std::string FindErrorInMPModelProto(const MPModelProto &model) { // Validate constraints. std::vector variable_appears(num_vars, false); for (int i = 0; i < num_cts; ++i) { - const MPConstraintProto &constraint = model.constraint(i); + const MPConstraintProto& constraint = model.constraint(i); error = FindErrorInMPConstraint(constraint, &variable_appears); if (!error.empty()) { // Constraint protos can be huge, theoretically. So we guard against that. @@ -435,7 +435,7 @@ std::string FindErrorInMPModelProto(const MPModelProto &model) { // Validate general constraints. for (int i = 0; i < model.general_constraint_size(); ++i) { - const MPGeneralConstraintProto &gen_constraint = + const MPGeneralConstraintProto& gen_constraint = model.general_constraint(i); std::string error; switch (gen_constraint.general_constraint_case()) { @@ -503,9 +503,9 @@ std::string FindErrorInMPModelProto(const MPModelProto &model) { return std::string(); } -absl::optional > -ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest &request, - MPSolutionResponse *response) { +absl::optional> +ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest& request, + MPSolutionResponse* response) { CHECK(response != nullptr); if (!request.has_model() && !request.has_model_delta()) { @@ -551,7 +551,7 @@ ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest &request, // If the baseline is valid and we have a model delta, validate the delta, // then apply it. if (error.empty() && request.has_model_delta()) { - const MPModelDeltaProto &delta = request.model_delta(); + const MPModelDeltaProto& delta = request.model_delta(); error = FindErrorInMPModelDeltaProto(delta, model.get()); if (error.empty()) ApplyVerifiedMPModelDelta(delta, model.get_mutable()); } @@ -582,8 +582,8 @@ ExtractValidMPModelOrPopulateResponseStatus(const MPModelRequest &request, } bool ExtractValidMPModelInPlaceOrPopulateResponseStatus( - MPModelRequest *request, MPSolutionResponse *response) { - absl::optional > lazy_copy = + MPModelRequest* request, MPSolutionResponse* response) { + absl::optional> lazy_copy = ExtractValidMPModelOrPopulateResponseStatus(*request, response); if (!lazy_copy) return false; if (lazy_copy->was_copied()) { @@ -594,7 +594,7 @@ bool ExtractValidMPModelInPlaceOrPopulateResponseStatus( // TODO(user): Add a general FindFeasibilityErrorInSolution() and factor out the // common code. -std::string FindFeasibilityErrorInSolutionHint(const MPModelProto &model, +std::string FindFeasibilityErrorInSolutionHint(const MPModelProto& model, double tolerance) { const int num_vars = model.variable_size(); @@ -633,7 +633,7 @@ std::string FindFeasibilityErrorInSolutionHint(const MPModelProto &model, // All the constraints must be satisfiable. for (int cst_index = 0; cst_index < model.constraint_size(); ++cst_index) { - const MPConstraintProto &constraint = model.constraint(cst_index); + const MPConstraintProto& constraint = model.constraint(cst_index); AccurateSum activity; for (int j = 0; j < constraint.var_index_size(); ++j) { activity.Add(constraint.coefficient(j) * @@ -653,17 +653,17 @@ std::string FindFeasibilityErrorInSolutionHint(const MPModelProto &model, return ""; } -std::string FindErrorInMPModelDeltaProto(const MPModelDeltaProto &delta, - const MPModelProto &model) { +std::string FindErrorInMPModelDeltaProto(const MPModelDeltaProto& delta, + const MPModelProto& model) { int num_vars = model.variable_size(); // Validate delta variables. std::string error; absl::flat_hash_set new_var_indices; int max_var_index = num_vars - 1; MPVariableProto tmp_var_proto; - for (const auto &pair : delta.variable_overrides()) { + for (const auto& pair : delta.variable_overrides()) { const int var_index = pair.first; - const MPVariableProto &var_override_proto = pair.second; + const MPVariableProto& var_override_proto = pair.second; if (var_index < 0) { error = "Invalid key"; } else if (var_index >= num_vars) { @@ -702,9 +702,9 @@ std::string FindErrorInMPModelDeltaProto(const MPModelDeltaProto &delta, const int num_constraints = model.constraint_size(); absl::flat_hash_set new_ct_indices; int max_ct_index = num_constraints - 1; - for (const auto &pair : delta.constraint_overrides()) { + for (const auto& pair : delta.constraint_overrides()) { const int ct_index = pair.first; - const MPConstraintProto &constraint_override_proto = pair.second; + const MPConstraintProto& constraint_override_proto = pair.second; if (ct_index < 0) { error = "Invalid constraint index"; } else if (ct_index >= num_constraints) { @@ -742,8 +742,8 @@ std::string FindErrorInMPModelDeltaProto(const MPModelDeltaProto &delta, return ""; } -void MergeMPConstraintProtoExceptTerms(const MPConstraintProto &from, - MPConstraintProto *to) { +void MergeMPConstraintProtoExceptTerms(const MPConstraintProto& from, + MPConstraintProto* to) { #define COPY_FIELD_IF_PRESENT(field) \ if (from.has_##field()) to->set_##field(from.field()) COPY_FIELD_IF_PRESENT(lower_bound); @@ -754,7 +754,7 @@ void MergeMPConstraintProtoExceptTerms(const MPConstraintProto &from, } namespace { -void PruneZeroTermsInMpConstraint(MPConstraintProto *ct) { +void PruneZeroTermsInMpConstraint(MPConstraintProto* ct) { // Optimize the fast path (when no term is pruned) by doing a first quick scan // until the first zero. int first_zero = 0; @@ -779,30 +779,30 @@ void PruneZeroTermsInMpConstraint(MPConstraintProto *ct) { // size. We don't use google::protobuf::util::Resize() because it's not // compatible with 'light' protos. template -void ExtendRepeatedPtrFieldToSize(const int size, T *repeated_messages) { +void ExtendRepeatedPtrFieldToSize(const int size, T* repeated_messages) { DCHECK_GE(size, repeated_messages->size()); while (repeated_messages->size() < size) repeated_messages->Add(); } } // namespace -void ApplyVerifiedMPModelDelta(const MPModelDeltaProto &delta, - MPModelProto *model) { +void ApplyVerifiedMPModelDelta(const MPModelDeltaProto& delta, + MPModelProto* model) { // Apply the delta to the variables: first, resize the variable array. int max_var_index = -1; - for (const auto &p : delta.variable_overrides()) { + for (const auto& p : delta.variable_overrides()) { max_var_index = std::max(max_var_index, p.first); } if (max_var_index >= model->variable_size()) { ExtendRepeatedPtrFieldToSize(max_var_index + 1, model->mutable_variable()); } // Then, apply the variable overrides. - for (const auto &p : delta.variable_overrides()) { + for (const auto& p : delta.variable_overrides()) { model->mutable_variable(p.first)->MergeFrom(p.second); } // Apply the delta to the constraints: first, resize the constraint array. int max_ct_index = -1; - for (const auto &p : delta.constraint_overrides()) { + for (const auto& p : delta.constraint_overrides()) { max_ct_index = std::max(max_ct_index, p.first); } const int old_num_constraints = model->constraint_size(); @@ -810,9 +810,9 @@ void ApplyVerifiedMPModelDelta(const MPModelDeltaProto &delta, ExtendRepeatedPtrFieldToSize(max_ct_index + 1, model->mutable_constraint()); } // Then, apply the constraint overrides. - for (const auto &p : delta.constraint_overrides()) { - const MPConstraintProto &override_ct = p.second; - MPConstraintProto *baseline = model->mutable_constraint(p.first); + for (const auto& p : delta.constraint_overrides()) { + const MPConstraintProto& override_ct = p.second; + MPConstraintProto* baseline = model->mutable_constraint(p.first); // Fast path for added constraints. if (p.first >= old_num_constraints) { *baseline = override_ct; @@ -847,7 +847,7 @@ void ApplyVerifiedMPModelDelta(const MPModelDeltaProto &delta, } PruneZeroTermsInMpConstraint(baseline); // Add the term overrides which haven't been used: those are added terms. - for (const auto &p : term_overrides) { + for (const auto& p : term_overrides) { if (p.second != 0.0) { baseline->add_var_index(p.first); baseline->add_coefficient(p.second); diff --git a/ortools/linear_solver/samples/bin_packing_mip.cc b/ortools/linear_solver/samples/bin_packing_mip.cc index 9449561066..241015a7ee 100644 --- a/ortools/linear_solver/samples/bin_packing_mip.cc +++ b/ortools/linear_solver/samples/bin_packing_mip.cc @@ -41,7 +41,8 @@ void BinPackingMip() { // [START solver] // Create the mip solver with the CBC backend. - MPSolver solver("bin_packing_mip", MPSolver::CBC_MIXED_INTEGER_PROGRAMMING); + MPSolver solver("bin_packing_mip", + MPSolver::CBC_MIXED_INTEGER_PROGRAMMING); // [END solver] // [START program_part2] diff --git a/ortools/linear_solver/sat_interface.cc b/ortools/linear_solver/sat_interface.cc index 098de7c695..7c60034fb8 100644 --- a/ortools/linear_solver/sat_interface.cc +++ b/ortools/linear_solver/sat_interface.cc @@ -40,11 +40,11 @@ using google::protobuf::Message; class SatInterface : public MPSolverInterface { public: - explicit SatInterface(MPSolver *const solver); + explicit SatInterface(MPSolver* const solver); ~SatInterface() override; // ----- Solve ----- - MPSolver::ResultStatus Solve(const MPSolverParameters ¶m) override; + MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; bool InterruptSolve() override; // ----- Model modifications and extraction ----- @@ -53,18 +53,18 @@ class SatInterface : public MPSolverInterface { void SetVariableBounds(int index, double lb, double ub) override; void SetVariableInteger(int index, bool integer) override; void SetConstraintBounds(int index, double lb, double ub) override; - void AddRowConstraint(MPConstraint *const ct) override; - void AddVariable(MPVariable *const var) override; - void SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, double new_value, + void AddRowConstraint(MPConstraint* const ct) override; + void AddVariable(MPVariable* const var) override; + void SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) override; - void ClearConstraint(MPConstraint *const constraint) override; - void SetObjectiveCoefficient(const MPVariable *const variable, + void ClearConstraint(MPConstraint* const constraint) override; + void SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) override; void SetObjectiveOffset(double value) override; void ClearObjective() override; - bool AddIndicatorConstraint(MPConstraint *const ct) override { return true; } + bool AddIndicatorConstraint(MPConstraint* const ct) override { return true; } // ------ Query statistics on the solution and the solve ------ int64 iterations() const override; @@ -79,13 +79,13 @@ class SatInterface : public MPSolverInterface { bool IsMIP() const override; std::string SolverVersion() const override; - void *underlying_solver() override; + void* underlying_solver() override; void ExtractNewVariables() override; void ExtractNewConstraints() override; void ExtractObjective() override; - void SetParameters(const MPSolverParameters ¶m) override; + void SetParameters(const MPSolverParameters& param) override; void SetRelativeMipGap(double value) override; void SetPrimalTolerance(double value) override; void SetDualTolerance(double value) override; @@ -93,7 +93,7 @@ class SatInterface : public MPSolverInterface { void SetScalingMode(int value) override; void SetLpAlgorithm(int value) override; bool SetSolverSpecificParametersAsString( - const std::string ¶meters) override; + const std::string& parameters) override; absl::Status SetNumThreads(int num_threads) override; private: @@ -105,12 +105,12 @@ class SatInterface : public MPSolverInterface { double best_objective_bound_ = 0.0; }; -SatInterface::SatInterface(MPSolver *const solver) +SatInterface::SatInterface(MPSolver* const solver) : MPSolverInterface(solver), interrupt_solve_(false) {} SatInterface::~SatInterface() {} -MPSolver::ResultStatus SatInterface::Solve(const MPSolverParameters ¶m) { +MPSolver::ResultStatus SatInterface::Solve(const MPSolverParameters& param) { interrupt_solve_ = false; // Reset extraction as this interface is not incremental yet. @@ -152,7 +152,7 @@ MPSolver::ResultStatus SatInterface::Solve(const MPSolverParameters ¶m) { SatSolveProto(std::move(request), &interrupt_solve_); if (!status_or.ok()) return MPSolver::ABNORMAL; - const MPSolutionResponse &response = status_or.value(); + const MPSolutionResponse& response = status_or.value(); // The solution must be marked as synchronized even when no solution exists. sync_status_ = SOLUTION_SYNCHRONIZED; @@ -182,7 +182,7 @@ MPSolver::ResultStatus SatInterface::Solve(const MPSolverParameters ¶m) { best_objective_bound_ = response.best_objective_bound(); const size_t num_vars = solver_->variables_.size(); for (int var_id = 0; var_id < num_vars; ++var_id) { - MPVariable *const var = solver_->variables_[var_id]; + MPVariable* const var = solver_->variables_[var_id]; var->set_solution_value(response.variable_value(var_id)); } } @@ -213,25 +213,25 @@ void SatInterface::SetConstraintBounds(int index, double lb, double ub) { NonIncrementalChange(); } -void SatInterface::AddRowConstraint(MPConstraint *const ct) { +void SatInterface::AddRowConstraint(MPConstraint* const ct) { NonIncrementalChange(); } -void SatInterface::AddVariable(MPVariable *const var) { +void SatInterface::AddVariable(MPVariable* const var) { NonIncrementalChange(); } -void SatInterface::SetCoefficient(MPConstraint *const constraint, - const MPVariable *const variable, +void SatInterface::SetCoefficient(MPConstraint* const constraint, + const MPVariable* const variable, double new_value, double old_value) { NonIncrementalChange(); } -void SatInterface::ClearConstraint(MPConstraint *const constraint) { +void SatInterface::ClearConstraint(MPConstraint* const constraint) { NonIncrementalChange(); } -void SatInterface::SetObjectiveCoefficient(const MPVariable *const variable, +void SatInterface::SetObjectiveCoefficient(const MPVariable* const variable, double coefficient) { NonIncrementalChange(); } @@ -269,7 +269,7 @@ std::string SatInterface::SolverVersion() const { return "SAT Based MIP Solver"; } -void *SatInterface::underlying_solver() { return nullptr; } +void* SatInterface::underlying_solver() { return nullptr; } void SatInterface::ExtractNewVariables() { NonIncrementalChange(); } @@ -277,7 +277,7 @@ void SatInterface::ExtractNewConstraints() { NonIncrementalChange(); } void SatInterface::ExtractObjective() { NonIncrementalChange(); } -void SatInterface::SetParameters(const MPSolverParameters ¶m) { +void SatInterface::SetParameters(const MPSolverParameters& param) { // By default, we use 8 threads as it allows to try a good set of orthogonal // parameters. This can be overridden by the user. parameters_.set_num_search_workers(num_threads_); @@ -301,7 +301,7 @@ void SatInterface::SetRelativeMipGap(double value) {} void SatInterface::SetPresolveMode(int value) {} bool SatInterface::SetSolverSpecificParametersAsString( - const std::string ¶meters) { + const std::string& parameters) { return ProtobufTextFormatMergeFromString(parameters, ¶meters_); } @@ -311,7 +311,7 @@ void SatInterface::NonIncrementalChange() { } // Register Sat in the global linear solver factory. -MPSolverInterface *BuildSatInterface(MPSolver *const solver) { +MPSolverInterface* BuildSatInterface(MPSolver* const solver) { return new SatInterface(solver); } diff --git a/ortools/linear_solver/sat_proto_solver.cc b/ortools/linear_solver/sat_proto_solver.cc index bbb9acfb6f..ff7485f649 100644 --- a/ortools/linear_solver/sat_proto_solver.cc +++ b/ortools/linear_solver/sat_proto_solver.cc @@ -57,7 +57,7 @@ MPSolverResponseStatus ToMPSolverResponseStatus(sat::CpSolverStatus status, } // namespace absl::StatusOr SatSolveProto( - MPModelRequest request, std::atomic *interrupt_solve) { + MPModelRequest request, std::atomic* interrupt_solve) { // By default, we use 8 threads as it allows to try a good set of orthogonal // parameters. This can be overridden by the user. sat::SatParameters params; @@ -93,8 +93,8 @@ absl::StatusOr SatSolveProto( // Note(user): the LP presolvers API is a bit weird and keep a reference to // the given GlopParameters, so we need to make sure it outlive them. const glop::GlopParameters glop_params; - MPModelProto *const mp_model = request.mutable_model(); - std::vector > for_postsolve; + MPModelProto* const mp_model = request.mutable_model(); + std::vector> for_postsolve; const bool log_info = VLOG_IS_ON(1) || params.log_search_progress(); const auto status = ApplyMipPresolveSteps(log_info, glop_params, mp_model, &for_postsolve); @@ -140,7 +140,7 @@ absl::StatusOr SatSolveProto( // Copy and scale the hint if there is one. if (request.model().has_solution_hint()) { - auto *cp_model_hint = cp_model.mutable_solution_hint(); + auto* cp_model_hint = cp_model.mutable_solution_hint(); const int size = request.model().solution_hint().var_index().size(); for (int i = 0; i < size; ++i) { const int var = request.model().solution_hint().var_index(i); diff --git a/ortools/linear_solver/sat_solver_utils.cc b/ortools/linear_solver/sat_solver_utils.cc index b912044798..35efcdddc2 100644 --- a/ortools/linear_solver/sat_solver_utils.cc +++ b/ortools/linear_solver/sat_solver_utils.cc @@ -25,8 +25,8 @@ namespace operations_research { lp_preprocessors.push_back(absl::make_unique(&glop_params)); MPSolverResponseStatus ApplyMipPresolveSteps( - bool log_info, const glop::GlopParameters &glop_params, MPModelProto *model, - std::vector > *for_postsolve) { + bool log_info, const glop::GlopParameters& glop_params, MPModelProto* model, + std::vector>* for_postsolve) { CHECK(model != nullptr); // TODO(user): General constraints are currently not supported. @@ -52,7 +52,7 @@ MPSolverResponseStatus ApplyMipPresolveSteps( "Running basic LP presolve, initial problem dimensions: "; LOG_IF(INFO, log_info) << header << lp.GetDimensionString(); std::vector names; - std::vector > lp_preprocessors; + std::vector> lp_preprocessors; ADD_LP_PREPROCESSOR(glop::FixedVariablePreprocessor); ADD_LP_PREPROCESSOR(glop::SingletonPreprocessor); ADD_LP_PREPROCESSOR(glop::ForcingAndImpliedFreeConstraintPreprocessor); @@ -65,7 +65,7 @@ MPSolverResponseStatus ApplyMipPresolveSteps( ADD_LP_PREPROCESSOR(glop::UnconstrainedVariablePreprocessor); for (int i = 0; i < lp_preprocessors.size(); ++i) { - auto &preprocessor = lp_preprocessors[i]; + auto& preprocessor = lp_preprocessors[i]; preprocessor->UseInMipContext(); const bool need_postsolve = preprocessor->Run(&lp); names[i].resize(header.size(), ' '); // padding. diff --git a/ortools/linear_solver/scip_callback.cc b/ortools/linear_solver/scip_callback.cc index e8c1fe8920..47007995f4 100644 --- a/ortools/linear_solver/scip_callback.cc +++ b/ortools/linear_solver/scip_callback.cc @@ -50,15 +50,15 @@ struct SCIP_ConshdlrData { }; struct SCIP_ConsData { - void *data; + void* data; }; namespace operations_research { namespace { -int ScipNumVars(SCIP *scip) { return SCIPgetNOrigVars(scip); } +int ScipNumVars(SCIP* scip) { return SCIPgetNOrigVars(scip); } -SCIP_VAR *ScipGetVar(SCIP *scip, int var_index) { +SCIP_VAR* ScipGetVar(SCIP* scip, int var_index) { DCHECK_GE(var_index, 0); DCHECK_LT(var_index, ScipNumVars(scip)); return SCIPgetOrigVars(scip)[var_index]; @@ -67,13 +67,13 @@ SCIP_VAR *ScipGetVar(SCIP *scip, int var_index) { } // namespace ScipConstraintHandlerContext::ScipConstraintHandlerContext( - SCIP *scip, SCIP_SOL *solution, bool is_pseudo_solution) + SCIP* scip, SCIP_SOL* solution, bool is_pseudo_solution) : scip_(scip), solution_(solution), is_pseudo_solution_(is_pseudo_solution) {} double ScipConstraintHandlerContext::VariableValue( - const MPVariable *variable) const { + const MPVariable* variable) const { return SCIPgetSolVal(scip_, solution_, ScipGetVar(scip_, variable->index())); } @@ -91,10 +91,10 @@ enum class ScipSeparationResult { kDidNotFind }; -bool LinearConstraintIsViolated(const ScipConstraintHandlerContext &context, - const LinearRange &constraint) { +bool LinearConstraintIsViolated(const ScipConstraintHandlerContext& context, + const LinearRange& constraint) { double a_times_x = 0; - for (const auto &coef_pair : constraint.linear_expr().terms()) { + for (const auto& coef_pair : constraint.linear_expr().terms()) { a_times_x += coef_pair.second * context.VariableValue(coef_pair.first); } double violation = std::max(a_times_x - constraint.upper_bound(), @@ -108,14 +108,14 @@ bool LinearConstraintIsViolated(const ScipConstraintHandlerContext &context, // returns kCuttingPlaneAdded, // else: // returns kDidNotFind -ScipSeparationResult RunSeparation(internal::ScipCallbackRunner *runner, - const ScipConstraintHandlerContext &context, - absl::Span constraints, +ScipSeparationResult RunSeparation(internal::ScipCallbackRunner* runner, + const ScipConstraintHandlerContext& context, + absl::Span constraints, bool is_integral) { ScipSeparationResult result = ScipSeparationResult::kDidNotFind; - SCIP *scip = context.scip(); - for (SCIP_CONS *constraint : constraints) { - SCIP_CONSDATA *consdata = SCIPconsGetData(constraint); + SCIP* scip = context.scip(); + for (SCIP_CONS* constraint : constraints) { + SCIP_CONSDATA* consdata = SCIPconsGetData(constraint); CHECK(consdata != nullptr); std::vector user_suggested_constraints; if (is_integral) { @@ -126,7 +126,7 @@ ScipSeparationResult RunSeparation(internal::ScipCallbackRunner *runner, runner->SeparateFractionalSolution(context, consdata->data); } int num_constraints_added = 0; - for (const CallbackRangeConstraint &user_suggested_constraint : + for (const CallbackRangeConstraint& user_suggested_constraint : user_suggested_constraints) { if (!LinearConstraintIsViolated(context, user_suggested_constraint.range)) { @@ -135,7 +135,7 @@ ScipSeparationResult RunSeparation(internal::ScipCallbackRunner *runner, num_constraints_added++; // Two code paths, one for cuts, one for lazy constraints. Cuts first: if (user_suggested_constraint.is_cut) { - SCIP_ROW *row = nullptr; + SCIP_ROW* row = nullptr; constexpr bool kModifiable = false; constexpr bool kRemovable = true; CHECK_OK(SCIP_TO_STATUS(SCIPcreateEmptyRowCons( @@ -144,11 +144,11 @@ ScipSeparationResult RunSeparation(internal::ScipCallbackRunner *runner, user_suggested_constraint.range.upper_bound(), user_suggested_constraint.local, kModifiable, kRemovable))); CHECK_OK(SCIP_TO_STATUS(SCIPcacheRowExtensions(scip, row))); - for (const auto &coef_pair : + for (const auto& coef_pair : user_suggested_constraint.range.linear_expr().terms()) { // NOTE(user): the coefficients don't come out sorted. I don't // think this matters. - SCIP_VAR *var = ScipGetVar(scip, coef_pair.first->index()); + SCIP_VAR* var = ScipGetVar(scip, coef_pair.first->index()); const double coef = coef_pair.second; CHECK_OK(SCIP_TO_STATUS(SCIPaddVarToRow(scip, row, var, coef))); } @@ -169,9 +169,9 @@ ScipSeparationResult RunSeparation(internal::ScipCallbackRunner *runner, } } else { // Lazy constraint path: - std::vector vars; + std::vector vars; std::vector coefs; - for (const auto &coef_pair : + for (const auto& coef_pair : user_suggested_constraint.range.linear_expr().terms()) { // NOTE(user): the coefficients don't come out sorted. I don't // think this matters. @@ -180,7 +180,7 @@ ScipSeparationResult RunSeparation(internal::ScipCallbackRunner *runner, } const int num_vars = vars.size(); - SCIP_CONS *scip_cons; + SCIP_CONS* scip_cons; // TODO(user): Maybe it is better to expose more of these options, // potentially through user_suggested_constraint. CHECK_OK(SCIP_TO_STATUS(SCIPcreateConsLinear( @@ -206,14 +206,14 @@ ScipSeparationResult RunSeparation(internal::ScipCallbackRunner *runner, } struct CallbackSetup { - SCIP_CONSHDLRDATA *scip_handler_data; - internal::ScipCallbackRunner *callback_runner; + SCIP_CONSHDLRDATA* scip_handler_data; + internal::ScipCallbackRunner* callback_runner; ScipConstraintHandlerContext context; - absl::Span useful_constraints; - absl::Span unlikely_useful_constraints; + absl::Span useful_constraints; + absl::Span unlikely_useful_constraints; - CallbackSetup(SCIP *scip, SCIP_CONSHDLR *scip_handler, SCIP_CONS **conss, - int nconss, int nusefulconss, SCIP_SOL *sol, + CallbackSetup(SCIP* scip, SCIP_CONSHDLR* scip_handler, SCIP_CONS** conss, + int nconss, int nusefulconss, SCIP_SOL* sol, bool is_pseudo_solution) : scip_handler_data(SCIPconshdlrGetData(scip_handler)), callback_runner(scip_handler_data->runner.get()), @@ -234,7 +234,7 @@ extern "C" { static SCIP_DECL_CONSFREE(ConstraintHandlerFreeC) { VLOG(3) << "FreeC"; CHECK(scip != nullptr); - SCIP_CONSHDLRDATA *scip_handler_data = SCIPconshdlrGetData(conshdlr); + SCIP_CONSHDLRDATA* scip_handler_data = SCIPconshdlrGetData(conshdlr); CHECK(scip_handler_data != nullptr); delete scip_handler_data; SCIPconshdlrSetData(conshdlr, nullptr); @@ -341,8 +341,8 @@ static SCIP_DECL_CONSCHECK(CheckFeasibilityC) { operations_research::CallbackSetup setup(scip, conshdlr, conss, nconss, nconss, sol, false); // All constraints are "useful" for this callback. - for (SCIP_CONS *constraint : setup.useful_constraints) { - SCIP_CONSDATA *consdata = SCIPconsGetData(constraint); + for (SCIP_CONS* constraint : setup.useful_constraints) { + SCIP_CONSDATA* consdata = SCIPconsGetData(constraint); CHECK(consdata != nullptr); if (!setup.callback_runner->IntegerSolutionFeasible(setup.context, consdata->data)) { @@ -395,7 +395,7 @@ static SCIP_DECL_CONSLOCK(VariableRoundingLockC) { const int num_vars = operations_research::ScipNumVars(scip); for (int i = 0; i < num_vars; ++i) { - SCIP_VAR *var = operations_research::ScipGetVar(scip, i); + SCIP_VAR* var = operations_research::ScipGetVar(scip, i); SCIP_CALL(SCIPaddVarLocksType(scip, var, locktype, nlockspos + nlocksneg, nlockspos + nlocksneg)); } @@ -407,10 +407,10 @@ namespace operations_research { namespace internal { void AddConstraintHandlerImpl( - const ScipConstraintHandlerDescription &description, - std::unique_ptr runner, SCIP *scip) { - SCIP_CONSHDLR *c_scip_handler; - SCIP_CONSHDLRDATA *scip_handler_data = new SCIP_CONSHDLRDATA; + const ScipConstraintHandlerDescription& description, + std::unique_ptr runner, SCIP* scip) { + SCIP_CONSHDLR* c_scip_handler; + SCIP_CONSHDLRDATA* scip_handler_data = new SCIP_CONSHDLRDATA; scip_handler_data->runner = std::move(runner); CHECK_OK(SCIP_TO_STATUS(SCIPincludeConshdlrBasic( @@ -430,16 +430,16 @@ void AddConstraintHandlerImpl( SCIPsetConshdlrDelete(scip, c_scip_handler, ConstraintHandlerDeleteC))); } -void AddCallbackConstraintImpl(SCIP *scip, const std::string &handler_name, - const std::string &constraint_name, - void *constraint_data, - const ScipCallbackConstraintOptions &options) { - SCIP_CONSHDLR *conshdlr = SCIPfindConshdlr(scip, handler_name.c_str()); +void AddCallbackConstraintImpl(SCIP* scip, const std::string& handler_name, + const std::string& constraint_name, + void* constraint_data, + const ScipCallbackConstraintOptions& options) { + SCIP_CONSHDLR* conshdlr = SCIPfindConshdlr(scip, handler_name.c_str()); CHECK(conshdlr != nullptr) << "Constraint handler " << handler_name << " not registered with scip."; - SCIP_ConsData *consdata = new SCIP_ConsData; + SCIP_ConsData* consdata = new SCIP_ConsData; consdata->data = constraint_data; - SCIP_CONS *constraint = nullptr; + SCIP_CONS* constraint = nullptr; CHECK_OK(SCIP_TO_STATUS(SCIPcreateCons( scip, &constraint, constraint_name.c_str(), conshdlr, consdata, options.initial, options.separate, options.enforce, options.check, diff --git a/ortools/linear_solver/scip_interface.cc b/ortools/linear_solver/scip_interface.cc index ee8bafba06..512c0d6721 100644 --- a/ortools/linear_solver/scip_interface.cc +++ b/ortools/linear_solver/scip_interface.cc @@ -57,26 +57,26 @@ class ScipConstraintHandlerForMPCallback; class SCIPInterface : public MPSolverInterface { public: - explicit SCIPInterface(MPSolver *solver); + explicit SCIPInterface(MPSolver* solver); ~SCIPInterface() override; void SetOptimizationDirection(bool maximize) override; - MPSolver::ResultStatus Solve(const MPSolverParameters ¶m) override; + MPSolver::ResultStatus Solve(const MPSolverParameters& param) override; absl::optional DirectlySolveProto( - const MPModelRequest &request) override; + const MPModelRequest& request) override; void Reset() override; void SetVariableBounds(int var_index, double lb, double ub) override; void SetVariableInteger(int var_index, bool integer) override; void SetConstraintBounds(int row_index, double lb, double ub) override; - void AddRowConstraint(MPConstraint *ct) override; - bool AddIndicatorConstraint(MPConstraint *ct) override; - void AddVariable(MPVariable *var) override; - void SetCoefficient(MPConstraint *constraint, const MPVariable *variable, + void AddRowConstraint(MPConstraint* ct) override; + bool AddIndicatorConstraint(MPConstraint* ct) override; + void AddVariable(MPVariable* var) override; + void SetCoefficient(MPConstraint* constraint, const MPVariable* variable, double new_value, double old_value) override; - void ClearConstraint(MPConstraint *constraint) override; - void SetObjectiveCoefficient(const MPVariable *variable, + void ClearConstraint(MPConstraint* constraint) override; + void SetObjectiveCoefficient(const MPVariable* variable, double coefficient) override; void SetObjectiveOffset(double value) override; void ClearObjective() override; @@ -113,7 +113,7 @@ class SCIPInterface : public MPSolverInterface { return SCIPinterruptSolve(scip_) == SCIP_OKAY; } - void *underlying_solver() override { return reinterpret_cast(scip_); } + void* underlying_solver() override { return reinterpret_cast(scip_); } // MULTIPLE SOLUTIONS SUPPORT // The default behavior of scip is to store the top incidentally generated @@ -139,11 +139,11 @@ class SCIPInterface : public MPSolverInterface { // a complete documentation of this design. // MPCallback API - void SetCallback(MPCallback *mp_callback) override; + void SetCallback(MPCallback* mp_callback) override; bool SupportsCallbacks() const override { return true; } private: - void SetParameters(const MPSolverParameters ¶m) override; + void SetParameters(const MPSolverParameters& param) override; void SetRelativeMipGap(double value) override; void SetPrimalTolerance(double value) override; void SetDualTolerance(double value) override; @@ -162,7 +162,7 @@ class SCIPInterface : public MPSolverInterface { absl::Status SetNumThreads(int num_threads) override; bool SetSolverSpecificParametersAsString( - const std::string ¶meters) override; + const std::string& parameters) override; void SetUnsupportedIntegerParam( MPSolverParameters::IntegerParam param) override; @@ -171,7 +171,7 @@ class SCIPInterface : public MPSolverInterface { // How many solutions SCIP found. int SolutionCount(); // Copy sol from SCIP to MPSolver. - void SetSolution(SCIP_SOL *solution); + void SetSolution(SCIP_SOL* solution); absl::Status CreateSCIP(); void DeleteSCIP(); @@ -184,11 +184,11 @@ class SCIPInterface : public MPSolverInterface { // If this status isn't OK, then most operations will silently be cancelled. absl::Status status_; - SCIP *scip_; - std::vector scip_variables_; - std::vector scip_constraints_; + SCIP* scip_; + std::vector scip_variables_; + std::vector scip_constraints_; int current_solution_index_ = 0; - MPCallback *callback_ = nullptr; + MPCallback* callback_ = nullptr; std::unique_ptr scip_constraint_handler_; // See ScipConstraintHandlerForMPCallback below. EmptyStruct constraint_data_for_handler_; @@ -199,25 +199,23 @@ class SCIPInterface : public MPSolverInterface { class ScipConstraintHandlerForMPCallback : public ScipConstraintHandler { public: - explicit ScipConstraintHandlerForMPCallback(MPCallback *mp_callback); + explicit ScipConstraintHandlerForMPCallback(MPCallback* mp_callback); std::vector SeparateFractionalSolution( - const ScipConstraintHandlerContext &context, - const EmptyStruct &) override; + const ScipConstraintHandlerContext& context, const EmptyStruct&) override; std::vector SeparateIntegerSolution( - const ScipConstraintHandlerContext &context, - const EmptyStruct &) override; + const ScipConstraintHandlerContext& context, const EmptyStruct&) override; private: std::vector SeparateSolution( - const ScipConstraintHandlerContext &context, + const ScipConstraintHandlerContext& context, const bool at_integer_solution); - MPCallback *mp_callback_; + MPCallback* mp_callback_; }; -SCIPInterface::SCIPInterface(MPSolver *solver) +SCIPInterface::SCIPInterface(MPSolver* solver) : MPSolverInterface(solver), scip_(nullptr) { status_ = CreateSCIP(); } @@ -350,8 +348,8 @@ void SCIPInterface::SetConstraintBounds(int index, double lb, double ub) { } } -void SCIPInterface::SetCoefficient(MPConstraint *constraint, - const MPVariable *variable, double new_value, +void SCIPInterface::SetCoefficient(MPConstraint* constraint, + const MPVariable* variable, double new_value, double old_value) { RETURN_IF_ALREADY_IN_ERROR_STATE; InvalidateSolutionSynchronization(); @@ -375,18 +373,18 @@ void SCIPInterface::SetCoefficient(MPConstraint *constraint, } // Not cached -void SCIPInterface::ClearConstraint(MPConstraint *constraint) { +void SCIPInterface::ClearConstraint(MPConstraint* constraint) { RETURN_IF_ALREADY_IN_ERROR_STATE; InvalidateSolutionSynchronization(); const int constraint_index = constraint->index(); // Constraint may not have been extracted yet. if (!constraint_is_extracted(constraint_index)) return; - for (const auto &entry : constraint->coefficients_) { + for (const auto& entry : constraint->coefficients_) { const int var_index = entry.first->index(); const double old_coef_value = entry.second; DCHECK(variable_is_extracted(var_index)); RETURN_AND_STORE_IF_SCIP_ERROR(SCIPfreeTransform(scip_)); - // Set coefficient to zero by substracting the old coefficient value. + // Set coefficient to zero by subtracting the old coefficient value. RETURN_AND_STORE_IF_SCIP_ERROR( SCIPaddCoefLinear(scip_, scip_constraints_[constraint_index], scip_variables_[var_index], -old_coef_value)); @@ -394,7 +392,7 @@ void SCIPInterface::ClearConstraint(MPConstraint *constraint) { } // Cached -void SCIPInterface::SetObjectiveCoefficient(const MPVariable *variable, +void SCIPInterface::SetObjectiveCoefficient(const MPVariable* variable, double coefficient) { sync_status_ = MUST_RELOAD; } @@ -412,7 +410,7 @@ void SCIPInterface::ClearObjective() { InvalidateSolutionSynchronization(); RETURN_AND_STORE_IF_SCIP_ERROR(SCIPfreeTransform(scip_)); // Clear linear terms - for (const auto &entry : solver_->objective_->coefficients_) { + for (const auto& entry : solver_->objective_->coefficients_) { const int var_index = entry.first->index(); // Variable may have not been extracted yet. if (!variable_is_extracted(var_index)) { @@ -441,16 +439,16 @@ void SCIPInterface::BranchingPriorityChangedForVariable(int var_index) { } } -void SCIPInterface::AddRowConstraint(MPConstraint *ct) { +void SCIPInterface::AddRowConstraint(MPConstraint* ct) { sync_status_ = MUST_RELOAD; } -bool SCIPInterface::AddIndicatorConstraint(MPConstraint *ct) { +bool SCIPInterface::AddIndicatorConstraint(MPConstraint* ct) { sync_status_ = MUST_RELOAD; return true; } -void SCIPInterface::AddVariable(MPVariable *var) { sync_status_ = MUST_RELOAD; } +void SCIPInterface::AddVariable(MPVariable* var) { sync_status_ = MUST_RELOAD; } void SCIPInterface::ExtractNewVariables() { RETURN_IF_ALREADY_IN_ERROR_STATE; @@ -459,10 +457,10 @@ void SCIPInterface::ExtractNewVariables() { RETURN_AND_STORE_IF_SCIP_ERROR(SCIPfreeTransform(scip_)); // Define new variables for (int j = last_variable_index_; j < total_num_vars; ++j) { - MPVariable *const var = solver_->variables_[j]; + MPVariable* const var = solver_->variables_[j]; DCHECK(!variable_is_extracted(j)); set_variable_as_extracted(j, true); - SCIP_VAR *scip_var = nullptr; + SCIP_VAR* scip_var = nullptr; // The true objective coefficient will be set later in ExtractObjective. double tmp_obj_coef = 0.0; RETURN_AND_STORE_IF_SCIP_ERROR(SCIPcreateVar( @@ -481,8 +479,8 @@ void SCIPInterface::ExtractNewVariables() { } // Add new variables to existing constraints. for (int i = 0; i < last_constraint_index_; i++) { - MPConstraint *const ct = solver_->constraints_[i]; - for (const auto &entry : ct->coefficients_) { + MPConstraint* const ct = solver_->constraints_[i]; + for (const auto& entry : ct->coefficients_) { const int var_index = entry.first->index(); DCHECK(variable_is_extracted(var_index)); if (var_index >= last_variable_index_) { @@ -505,34 +503,34 @@ void SCIPInterface::ExtractNewConstraints() { // Find the length of the longest row. int max_row_length = 0; for (int i = last_constraint_index_; i < total_num_rows; ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; DCHECK(!constraint_is_extracted(i)); set_constraint_as_extracted(i, true); if (ct->coefficients_.size() > max_row_length) { max_row_length = ct->coefficients_.size(); } } - std::unique_ptr vars(new SCIP_VAR *[max_row_length]); + std::unique_ptr vars(new SCIP_VAR*[max_row_length]); std::unique_ptr coeffs(new double[max_row_length]); // Add each new constraint. for (int i = last_constraint_index_; i < total_num_rows; ++i) { - MPConstraint *const ct = solver_->constraints_[i]; + MPConstraint* const ct = solver_->constraints_[i]; DCHECK(constraint_is_extracted(i)); const int size = ct->coefficients_.size(); int j = 0; - for (const auto &entry : ct->coefficients_) { + for (const auto& entry : ct->coefficients_) { const int var_index = entry.first->index(); DCHECK(variable_is_extracted(var_index)); vars[j] = scip_variables_[var_index]; coeffs[j] = entry.second; j++; } - SCIP_CONS *scip_constraint = nullptr; + SCIP_CONS* scip_constraint = nullptr; const bool is_lazy = ct->is_lazy(); if (ct->indicator_variable() != nullptr) { const int ind_index = ct->indicator_variable()->index(); DCHECK(variable_is_extracted(ind_index)); - SCIP_VAR *ind_var = scip_variables_[ind_index]; + SCIP_VAR* ind_var = scip_variables_[ind_index]; if (ct->indicator_value() == 0) { RETURN_AND_STORE_IF_SCIP_ERROR( SCIPgetNegatedVar(scip_, scip_variables_[ind_index], &ind_var)); @@ -541,10 +539,16 @@ void SCIPInterface::ExtractNewConstraints() { if (ct->ub() < std::numeric_limits::infinity()) { RETURN_AND_STORE_IF_SCIP_ERROR(SCIPcreateConsIndicator( scip_, &scip_constraint, ct->name().c_str(), ind_var, size, - vars.get(), coeffs.get(), ct->ub(), /*initial=*/!is_lazy, - /*separate=*/true, /*enforce=*/true, /*check=*/true, - /*propagate=*/true, /*local=*/false, /*dynamic=*/false, - /*removable=*/is_lazy, /*stickingatnode=*/false)); + vars.get(), coeffs.get(), ct->ub(), + /*initial=*/!is_lazy, + /*separate=*/true, + /*enforce=*/true, + /*check=*/true, + /*propagate=*/true, + /*local=*/false, + /*dynamic=*/false, + /*removable=*/is_lazy, + /*stickingatnode=*/false)); RETURN_AND_STORE_IF_SCIP_ERROR(SCIPaddCons(scip_, scip_constraint)); scip_constraints_.push_back(scip_constraint); } @@ -554,10 +558,16 @@ void SCIPInterface::ExtractNewConstraints() { } RETURN_AND_STORE_IF_SCIP_ERROR(SCIPcreateConsIndicator( scip_, &scip_constraint, ct->name().c_str(), ind_var, size, - vars.get(), coeffs.get(), -ct->lb(), /*initial=*/!is_lazy, - /*separate=*/true, /*enforce=*/true, /*check=*/true, - /*propagate=*/true, /*local=*/false, /*dynamic=*/false, - /*removable=*/is_lazy, /*stickingatnode=*/false)); + vars.get(), coeffs.get(), -ct->lb(), + /*initial=*/!is_lazy, + /*separate=*/true, + /*enforce=*/true, + /*check=*/true, + /*propagate=*/true, + /*local=*/false, + /*dynamic=*/false, + /*removable=*/is_lazy, + /*stickingatnode=*/false)); RETURN_AND_STORE_IF_SCIP_ERROR(SCIPaddCons(scip_, scip_constraint)); scip_constraints_.push_back(scip_constraint); } @@ -567,10 +577,16 @@ void SCIPInterface::ExtractNewConstraints() { // for an explanation of the parameters. RETURN_AND_STORE_IF_SCIP_ERROR(SCIPcreateConsLinear( scip_, &scip_constraint, ct->name().c_str(), size, vars.get(), - coeffs.get(), ct->lb(), ct->ub(), /*initial=*/!is_lazy, - /*separate=*/true, /*enforce=*/true, /*check=*/true, - /*propagate=*/true, /*local=*/false, /*modifiable=*/false, - /*dynamic=*/false, /*removable=*/is_lazy, + coeffs.get(), ct->lb(), ct->ub(), + /*initial=*/!is_lazy, + /*separate=*/true, + /*enforce=*/true, + /*check=*/true, + /*propagate=*/true, + /*local=*/false, + /*modifiable=*/false, + /*dynamic=*/false, + /*removable=*/is_lazy, /*stickingatnode=*/false)); RETURN_AND_STORE_IF_SCIP_ERROR(SCIPaddCons(scip_, scip_constraint)); scip_constraints_.push_back(scip_constraint); @@ -584,7 +600,7 @@ void SCIPInterface::ExtractObjective() { RETURN_AND_STORE_IF_SCIP_ERROR(SCIPfreeTransform(scip_)); // Linear objective: set objective coefficients for all variables (some might // have been modified). - for (const auto &entry : solver_->objective_->coefficients_) { + for (const auto& entry : solver_->objective_->coefficients_) { const int var_index = entry.first->index(); const double obj_coef = entry.second; RETURN_AND_STORE_IF_SCIP_ERROR( @@ -612,7 +628,7 @@ void SCIPInterface::ExtractObjective() { RETURN_ABNORMAL_IF_BAD_STATUS; \ } while (false); -MPSolver::ResultStatus SCIPInterface::Solve(const MPSolverParameters ¶m) { +MPSolver::ResultStatus SCIPInterface::Solve(const MPSolverParameters& param) { // "status_" may encode a variety of failure scenarios, many of which would // correspond to another MPResultStatus than ABNORMAL, but since SCIP is a // moving target, we use the most likely error code here (abnormalities, @@ -677,7 +693,7 @@ MPSolver::ResultStatus SCIPInterface::Solve(const MPSolverParameters ¶m) { // Use the solution hint if any. if (!solver_->solution_hint_.empty()) { - SCIP_SOL *solution; + SCIP_SOL* solution; bool is_solution_partial = false; const int num_vars = solver_->variables_.size(); if (solver_->solution_hint_.size() != num_vars) { @@ -691,7 +707,7 @@ MPSolver::ResultStatus SCIPInterface::Solve(const MPSolverParameters ¶m) { } // Fill the other variables from the given solution hint. - for (const std::pair &p : + for (const std::pair& p : solver_->solution_hint_) { RETURN_ABNORMAL_IF_SCIP_ERROR(SCIPsetSolVal( scip_, solution, scip_variables_[p.first->index()], p.second)); @@ -701,8 +717,8 @@ MPSolver::ResultStatus SCIPInterface::Solve(const MPSolverParameters ¶m) { SCIP_Bool is_feasible; RETURN_ABNORMAL_IF_SCIP_ERROR(SCIPcheckSol( scip_, solution, /*printreason=*/false, /*completely=*/true, - /*checkbounds=*/true, /*checkintegrality=*/true, - /*checklprows=*/true, &is_feasible)); + /*checkbounds=*/true, /*checkintegrality=*/true, /*checklprows=*/true, + &is_feasible)); VLOG(1) << "Solution hint is " << (is_feasible ? "FEASIBLE" : "INFEASIBLE"); } @@ -715,8 +731,8 @@ MPSolver::ResultStatus SCIPInterface::Solve(const MPSolverParameters ¶m) { if (!is_solution_partial && SCIPisTransformed(scip_)) { RETURN_ABNORMAL_IF_SCIP_ERROR(SCIPtrySolFree( scip_, &solution, /*printreason=*/false, /*completely=*/true, - /*checkbounds=*/true, /*checkintegrality=*/true, - /*checklprows=*/true, &is_stored)); + /*checkbounds=*/true, /*checkintegrality=*/true, /*checklprows=*/true, + &is_stored)); } else { RETURN_ABNORMAL_IF_SCIP_ERROR( SCIPaddSolFree(scip_, &solution, &is_stored)); @@ -732,7 +748,7 @@ MPSolver::ResultStatus SCIPInterface::Solve(const MPSolverParameters ¶m) { absl::FormatDuration(timer.GetDuration())); current_solution_index_ = 0; // Get the results. - SCIP_SOL *const solution = SCIPgetBestSol(scip_); + SCIP_SOL* const solution = SCIPgetBestSol(scip_); if (solution != nullptr) { // If optimal or feasible solution is found. SetSolution(solution); @@ -779,11 +795,11 @@ MPSolver::ResultStatus SCIPInterface::Solve(const MPSolverParameters ¶m) { return result_status_; } -void SCIPInterface::SetSolution(SCIP_SOL *solution) { +void SCIPInterface::SetSolution(SCIP_SOL* solution) { objective_value_ = SCIPgetSolOrigObj(scip_, solution); VLOG(1) << "objective=" << objective_value_; for (int i = 0; i < solver_->variables_.size(); ++i) { - MPVariable *const var = solver_->variables_[i]; + MPVariable* const var = solver_->variables_[i]; const int var_index = var->index(); const double val = SCIPgetSolVal(scip_, solution, scip_variables_[var_index]); @@ -793,7 +809,7 @@ void SCIPInterface::SetSolution(SCIP_SOL *solution) { } absl::optional SCIPInterface::DirectlySolveProto( - const MPModelRequest &request) { + const MPModelRequest& request) { // ScipSolveProto doesn't solve concurrently. if (solver_->GetNumThreads() > 1) return absl::nullopt; @@ -823,7 +839,7 @@ bool SCIPInterface::NextSolution() { return false; } current_solution_index_++; - SCIP_SOL **all_solutions = SCIPgetSols(scip_); + SCIP_SOL** all_solutions = SCIPgetSols(scip_); SetSolution(all_solutions[current_solution_index_]); return true; } @@ -857,7 +873,7 @@ double SCIPInterface::best_objective_bound() const { } } -void SCIPInterface::SetParameters(const MPSolverParameters ¶m) { +void SCIPInterface::SetParameters(const MPSolverParameters& param) { SetCommonParameters(param); SetMIPParameters(param); } @@ -980,7 +996,7 @@ absl::Status SCIPInterface::SetNumThreads(int num_threads) { } bool SCIPInterface::SetSolverSpecificParametersAsString( - const std::string ¶meters) { + const std::string& parameters) { const absl::Status s = LegacyScipSetSolverSpecificParameters(parameters, scip_); if (!s.ok()) { @@ -992,7 +1008,7 @@ bool SCIPInterface::SetSolverSpecificParametersAsString( class ScipMPCallbackContext : public MPCallbackContext { public: - ScipMPCallbackContext(const ScipConstraintHandlerContext *scip_context, + ScipMPCallbackContext(const ScipConstraintHandlerContext* scip_context, bool at_integer_solution) : scip_context_(scip_context), at_integer_solution_(at_integer_solution) {} @@ -1008,12 +1024,12 @@ class ScipMPCallbackContext : public MPCallbackContext { return !scip_context_->is_pseudo_solution(); } - double VariableValue(const MPVariable *variable) override { + double VariableValue(const MPVariable* variable) override { CHECK(CanQueryVariableValues()); return scip_context_->VariableValue(variable); } - void AddCut(const LinearRange &cutting_plane) override { + void AddCut(const LinearRange& cutting_plane) override { CallbackRangeConstraint constraint; constraint.is_cut = true; constraint.range = cutting_plane; @@ -1021,7 +1037,7 @@ class ScipMPCallbackContext : public MPCallbackContext { constraints_added_.push_back(std::move(constraint)); } - void AddLazyConstraint(const LinearRange &lazy_constraint) override { + void AddLazyConstraint(const LinearRange& lazy_constraint) override { CallbackRangeConstraint constraint; constraint.is_cut = false; constraint.range = lazy_constraint; @@ -1029,41 +1045,39 @@ class ScipMPCallbackContext : public MPCallbackContext { constraints_added_.push_back(std::move(constraint)); } - double SuggestSolution(const absl::flat_hash_map - &solution) override { + double SuggestSolution( + const absl::flat_hash_map& solution) override { LOG(FATAL) << "SuggestSolution() not currently supported for SCIP."; } int64 NumExploredNodes() override { // scip_context_->NumNodesProcessed() returns: - // 0 before the root node is solved, e.g. if a heuristic finds a - // solution. + // 0 before the root node is solved, e.g. if a heuristic finds a solution. // 1 at the root node // > 1 after the root node. // The NumExploredNodes spec requires that we return 0 at the root node, - // (this is consistent with gurobi). Below is a bandaid to try and make - // the + // (this is consistent with gurobi). Below is a bandaid to try and make the // behavior consistent, although some information is lost. return std::max(int64{0}, scip_context_->NumNodesProcessed() - 1); } - const std::vector &constraints_added() { + const std::vector& constraints_added() { return constraints_added_; } private: - const ScipConstraintHandlerContext *scip_context_; + const ScipConstraintHandlerContext* scip_context_; bool at_integer_solution_; // second value of pair is true for cuts and false for lazy constraints. std::vector constraints_added_; }; ScipConstraintHandlerForMPCallback::ScipConstraintHandlerForMPCallback( - MPCallback *mp_callback) + MPCallback* mp_callback) : ScipConstraintHandler( // MOE(begin-strip): - { /*name=*/ - "mp_solver_constraint_handler", /*description=*/ + {/*name=*/"mp_solver_constraint_handler", + /*description=*/ "A single constraint handler for all MPSolver models."} // MOE(end-strip-and-replace): ScipConstraintHandlerDescription() ), @@ -1071,33 +1085,33 @@ ScipConstraintHandlerForMPCallback::ScipConstraintHandlerForMPCallback( std::vector ScipConstraintHandlerForMPCallback::SeparateFractionalSolution( - const ScipConstraintHandlerContext &context, const EmptyStruct &) { + const ScipConstraintHandlerContext& context, const EmptyStruct&) { return SeparateSolution(context, /*at_integer_solution=*/false); } std::vector ScipConstraintHandlerForMPCallback::SeparateIntegerSolution( - const ScipConstraintHandlerContext &context, const EmptyStruct &) { + const ScipConstraintHandlerContext& context, const EmptyStruct&) { return SeparateSolution(context, /*at_integer_solution=*/true); } std::vector ScipConstraintHandlerForMPCallback::SeparateSolution( - const ScipConstraintHandlerContext &context, + const ScipConstraintHandlerContext& context, const bool at_integer_solution) { ScipMPCallbackContext mp_context(&context, at_integer_solution); mp_callback_->RunCallback(&mp_context); return mp_context.constraints_added(); } -void SCIPInterface::SetCallback(MPCallback *mp_callback) { +void SCIPInterface::SetCallback(MPCallback* mp_callback) { if (callback_ != nullptr) { callback_reset_ = true; } callback_ = mp_callback; } -MPSolverInterface *BuildSCIPInterface(MPSolver *const solver) { +MPSolverInterface* BuildSCIPInterface(MPSolver* const solver) { return new SCIPInterface(solver); } diff --git a/ortools/linear_solver/scip_proto_solver.cc b/ortools/linear_solver/scip_proto_solver.cc index ce33895502..a054fefb9e 100644 --- a/ortools/linear_solver/scip_proto_solver.cc +++ b/ortools/linear_solver/scip_proto_solver.cc @@ -61,12 +61,12 @@ namespace operations_research { namespace { // This function will create a new constraint if the indicator constraint has // both a lower bound and an upper bound. -absl::Status AddIndicatorConstraint(const MPGeneralConstraintProto &gen_cst, - SCIP *scip, SCIP_CONS **scip_cst, - std::vector *scip_variables, - std::vector *scip_constraints, - std::vector *tmp_variables, - std::vector *tmp_coefficients) { +absl::Status AddIndicatorConstraint(const MPGeneralConstraintProto& gen_cst, + SCIP* scip, SCIP_CONS** scip_cst, + std::vector* scip_variables, + std::vector* scip_constraints, + std::vector* tmp_variables, + std::vector* tmp_coefficients) { CHECK(scip != nullptr); CHECK(scip_cst != nullptr); CHECK(scip_variables != nullptr); @@ -76,10 +76,10 @@ absl::Status AddIndicatorConstraint(const MPGeneralConstraintProto &gen_cst, CHECK(gen_cst.has_indicator_constraint()); constexpr double kInfinity = std::numeric_limits::infinity(); - const auto &ind = gen_cst.indicator_constraint(); + const auto& ind = gen_cst.indicator_constraint(); if (!ind.has_constraint()) return absl::OkStatus(); - const MPConstraintProto &constraint = ind.constraint(); + const MPConstraintProto& constraint = ind.constraint(); const int size = constraint.var_index_size(); tmp_variables->resize(size, nullptr); tmp_coefficients->resize(size, 0); @@ -88,7 +88,7 @@ absl::Status AddIndicatorConstraint(const MPGeneralConstraintProto &gen_cst, (*tmp_coefficients)[i] = constraint.coefficient(i); } - SCIP_VAR *ind_var = (*scip_variables)[ind.var_index()]; + SCIP_VAR* ind_var = (*scip_variables)[ind.var_index()]; if (ind.var_value() == 0) { RETURN_IF_SCIP_ERROR( SCIPgetNegatedVar(scip, (*scip_variables)[ind.var_index()], &ind_var)); @@ -99,10 +99,15 @@ absl::Status AddIndicatorConstraint(const MPGeneralConstraintProto &gen_cst, scip, scip_cst, gen_cst.name().c_str(), ind_var, size, tmp_variables->data(), tmp_coefficients->data(), ind.constraint().upper_bound(), - /*initial=*/!ind.constraint().is_lazy(), /*separate=*/true, - /*enforce=*/true, /*check=*/true, /*propagate=*/true, - /*local=*/false, /*dynamic=*/false, - /*removable=*/ind.constraint().is_lazy(), /*stickingatnode=*/false)); + /*initial=*/!ind.constraint().is_lazy(), + /*separate=*/true, + /*enforce=*/true, + /*check=*/true, + /*propagate=*/true, + /*local=*/false, + /*dynamic=*/false, + /*removable=*/ind.constraint().is_lazy(), + /*stickingatnode=*/false)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, *scip_cst)); scip_constraints->push_back(nullptr); scip_cst = &scip_constraints->back(); @@ -115,28 +120,33 @@ absl::Status AddIndicatorConstraint(const MPGeneralConstraintProto &gen_cst, scip, scip_cst, gen_cst.name().c_str(), ind_var, size, tmp_variables->data(), tmp_coefficients->data(), -ind.constraint().lower_bound(), - /*initial=*/!ind.constraint().is_lazy(), /*separate=*/true, - /*enforce=*/true, /*check=*/true, /*propagate=*/true, - /*local=*/false, /*dynamic=*/false, - /*removable=*/ind.constraint().is_lazy(), /*stickingatnode=*/false)); + /*initial=*/!ind.constraint().is_lazy(), + /*separate=*/true, + /*enforce=*/true, + /*check=*/true, + /*propagate=*/true, + /*local=*/false, + /*dynamic=*/false, + /*removable=*/ind.constraint().is_lazy(), + /*stickingatnode=*/false)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, *scip_cst)); } return absl::OkStatus(); } -absl::Status AddSosConstraint(const MPGeneralConstraintProto &gen_cst, - const std::vector &scip_variables, - SCIP *scip, SCIP_CONS **scip_cst, - std::vector *tmp_variables, - std::vector *tmp_weights) { +absl::Status AddSosConstraint(const MPGeneralConstraintProto& gen_cst, + const std::vector& scip_variables, + SCIP* scip, SCIP_CONS** scip_cst, + std::vector* tmp_variables, + std::vector* tmp_weights) { CHECK(scip != nullptr); CHECK(scip_cst != nullptr); CHECK(tmp_variables != nullptr); CHECK(tmp_weights != nullptr); CHECK(gen_cst.has_sos_constraint()); - const MPSosConstraint &sos_cst = gen_cst.sos_constraint(); + const MPSosConstraint& sos_cst = gen_cst.sos_constraint(); // SOS constraints of type N indicate at most N variables are non-zero. // Constraints with N variables or less are valid, but useless. They also @@ -163,16 +173,22 @@ absl::Status AddSosConstraint(const MPGeneralConstraintProto &gen_cst, } switch (sos_cst.type()) { case MPSosConstraint::SOS1_DEFAULT: - RETURN_IF_SCIP_ERROR(SCIPcreateConsBasicSOS1( - scip, /*cons=*/scip_cst, /*name=*/gen_cst.name().c_str(), - /*nvars=*/sos_cst.var_index_size(), /*vars=*/tmp_variables->data(), - /*weights=*/tmp_weights->data())); + RETURN_IF_SCIP_ERROR( + SCIPcreateConsBasicSOS1(scip, + /*cons=*/scip_cst, + /*name=*/gen_cst.name().c_str(), + /*nvars=*/sos_cst.var_index_size(), + /*vars=*/tmp_variables->data(), + /*weights=*/tmp_weights->data())); break; case MPSosConstraint::SOS2: - RETURN_IF_SCIP_ERROR(SCIPcreateConsBasicSOS2( - scip, /*cons=*/scip_cst, /*name=*/gen_cst.name().c_str(), - /*nvars=*/sos_cst.var_index_size(), /*vars=*/tmp_variables->data(), - /*weights=*/tmp_weights->data())); + RETURN_IF_SCIP_ERROR( + SCIPcreateConsBasicSOS2(scip, + /*cons=*/scip_cst, + /*name=*/gen_cst.name().c_str(), + /*nvars=*/sos_cst.var_index_size(), + /*vars=*/tmp_variables->data(), + /*weights=*/tmp_weights->data())); break; } RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, *scip_cst)); @@ -180,13 +196,13 @@ absl::Status AddSosConstraint(const MPGeneralConstraintProto &gen_cst, } absl::Status AddQuadraticConstraint( - const MPGeneralConstraintProto &gen_cst, - const std::vector &scip_variables, SCIP *scip, - SCIP_CONS **scip_cst, std::vector *tmp_variables, - std::vector *tmp_coefficients, - std::vector *tmp_qvariables1, - std::vector *tmp_qvariables2, - std::vector *tmp_qcoefficients) { + const MPGeneralConstraintProto& gen_cst, + const std::vector& scip_variables, SCIP* scip, + SCIP_CONS** scip_cst, std::vector* tmp_variables, + std::vector* tmp_coefficients, + std::vector* tmp_qvariables1, + std::vector* tmp_qvariables2, + std::vector* tmp_qcoefficients) { CHECK(scip != nullptr); CHECK(scip_cst != nullptr); CHECK(tmp_variables != nullptr); @@ -196,7 +212,7 @@ absl::Status AddQuadraticConstraint( CHECK(tmp_qcoefficients != nullptr); CHECK(gen_cst.has_quadratic_constraint()); - const MPQuadraticConstraint &quad_cst = gen_cst.quadratic_constraint(); + const MPQuadraticConstraint& quad_cst = gen_cst.quadratic_constraint(); // Process linear part of the constraint. const int lsize = quad_cst.var_index_size(); @@ -221,48 +237,53 @@ absl::Status AddQuadraticConstraint( (*tmp_qcoefficients)[i] = quad_cst.qcoefficient(i); } - RETURN_IF_SCIP_ERROR(SCIPcreateConsBasicQuadratic( - scip, /*cons=*/scip_cst, /*name=*/gen_cst.name().c_str(), - /*nlinvars=*/lsize, /*linvars=*/tmp_variables->data(), - /*lincoefs=*/tmp_coefficients->data(), /*nquadterms=*/qsize, - /*quadvars1=*/tmp_qvariables1->data(), - /*quadvars2=*/tmp_qvariables2->data(), - /*quadcoefs=*/tmp_qcoefficients->data(), /*lhs=*/quad_cst.lower_bound(), - /*rhs=*/quad_cst.upper_bound())); + RETURN_IF_SCIP_ERROR( + SCIPcreateConsBasicQuadratic(scip, + /*cons=*/scip_cst, + /*name=*/gen_cst.name().c_str(), + /*nlinvars=*/lsize, + /*linvars=*/tmp_variables->data(), + /*lincoefs=*/tmp_coefficients->data(), + /*nquadterms=*/qsize, + /*quadvars1=*/tmp_qvariables1->data(), + /*quadvars2=*/tmp_qvariables2->data(), + /*quadcoefs=*/tmp_qcoefficients->data(), + /*lhs=*/quad_cst.lower_bound(), + /*rhs=*/quad_cst.upper_bound())); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, *scip_cst)); return absl::OkStatus(); } // Models the constraint y = |x| as y >= 0 plus one disjunction constraint: // y = x OR y = -x -absl::Status AddAbsConstraint(const MPGeneralConstraintProto &gen_cst, - const std::vector &scip_variables, - SCIP *scip, SCIP_CONS **scip_cst) { +absl::Status AddAbsConstraint(const MPGeneralConstraintProto& gen_cst, + const std::vector& scip_variables, + SCIP* scip, SCIP_CONS** scip_cst) { CHECK(scip != nullptr); CHECK(scip_cst != nullptr); CHECK(gen_cst.has_abs_constraint()); - const auto &abs = gen_cst.abs_constraint(); - SCIP_VAR *scip_var = scip_variables[abs.var_index()]; - SCIP_VAR *scip_resultant_var = scip_variables[abs.resultant_var_index()]; + const auto& abs = gen_cst.abs_constraint(); + SCIP_VAR* scip_var = scip_variables[abs.var_index()]; + SCIP_VAR* scip_resultant_var = scip_variables[abs.resultant_var_index()]; // Set the resultant variable's lower bound to zero if it's negative. if (SCIPvarGetLbLocal(scip_resultant_var) < 0.0) { RETURN_IF_SCIP_ERROR(SCIPchgVarLb(scip, scip_resultant_var, 0.0)); } - std::vector vars; + std::vector vars; std::vector vals; - std::vector cons; + std::vector cons; auto add_abs_constraint = - [&](const std::string &name_prefix) -> absl::Status { - SCIP_CONS *scip_cons = nullptr; + [&](const std::string& name_prefix) -> absl::Status { + SCIP_CONS* scip_cons = nullptr; CHECK(vars.size() == vals.size()); const std::string name = gen_cst.has_name() ? absl::StrCat(gen_cst.name(), name_prefix) : ""; RETURN_IF_SCIP_ERROR(SCIPcreateConsBasicLinear( - scip, /*cons=*/&scip_cons, /*name=*/name.c_str(), - /*nvars=*/vars.size(), /*vars=*/vars.data(), /*vals=*/vals.data(), - /*lhs=*/0.0, /*rhs=*/0.0)); + scip, /*cons=*/&scip_cons, + /*name=*/name.c_str(), /*nvars=*/vars.size(), /*vars=*/vars.data(), + /*vals=*/vals.data(), /*lhs=*/0.0, /*rhs=*/0.0)); // Note that the constraints are, by design, not added into the model using // SCIPaddCons. cons.push_back(scip_cons); @@ -282,53 +303,57 @@ absl::Status AddAbsConstraint(const MPGeneralConstraintProto &gen_cst, const std::string name = gen_cst.has_name() ? absl::StrCat(gen_cst.name(), "_disj") : ""; RETURN_IF_SCIP_ERROR(SCIPcreateConsBasicDisjunction( - scip, /*cons=*/scip_cst, /*name=*/name.c_str(), /*nconss=*/cons.size(), - /*conss=*/cons.data(), /*relaxcons=*/nullptr)); + scip, /*cons=*/scip_cst, /*name=*/name.c_str(), + /*nconss=*/cons.size(), /*conss=*/cons.data(), /*relaxcons=*/nullptr)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, *scip_cst)); return absl::OkStatus(); } -absl::Status AddAndConstraint(const MPGeneralConstraintProto &gen_cst, - const std::vector &scip_variables, - SCIP *scip, SCIP_CONS **scip_cst, - std::vector *tmp_variables) { +absl::Status AddAndConstraint(const MPGeneralConstraintProto& gen_cst, + const std::vector& scip_variables, + SCIP* scip, SCIP_CONS** scip_cst, + std::vector* tmp_variables) { CHECK(scip != nullptr); CHECK(scip_cst != nullptr); CHECK(tmp_variables != nullptr); CHECK(gen_cst.has_and_constraint()); - const auto &andcst = gen_cst.and_constraint(); + const auto& andcst = gen_cst.and_constraint(); tmp_variables->resize(andcst.var_index_size(), nullptr); for (int i = 0; i < andcst.var_index_size(); ++i) { (*tmp_variables)[i] = scip_variables[andcst.var_index(i)]; } RETURN_IF_SCIP_ERROR(SCIPcreateConsBasicAnd( - scip, /*cons=*/scip_cst, /*name=*/gen_cst.name().c_str(), + scip, /*cons=*/scip_cst, + /*name=*/gen_cst.name().c_str(), /*resvar=*/scip_variables[andcst.resultant_var_index()], - /*nvars=*/andcst.var_index_size(), /*vars=*/tmp_variables->data())); + /*nvars=*/andcst.var_index_size(), + /*vars=*/tmp_variables->data())); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, *scip_cst)); return absl::OkStatus(); } -absl::Status AddOrConstraint(const MPGeneralConstraintProto &gen_cst, - const std::vector &scip_variables, - SCIP *scip, SCIP_CONS **scip_cst, - std::vector *tmp_variables) { +absl::Status AddOrConstraint(const MPGeneralConstraintProto& gen_cst, + const std::vector& scip_variables, + SCIP* scip, SCIP_CONS** scip_cst, + std::vector* tmp_variables) { CHECK(scip != nullptr); CHECK(scip_cst != nullptr); CHECK(tmp_variables != nullptr); CHECK(gen_cst.has_or_constraint()); - const auto &orcst = gen_cst.or_constraint(); + const auto& orcst = gen_cst.or_constraint(); tmp_variables->resize(orcst.var_index_size(), nullptr); for (int i = 0; i < orcst.var_index_size(); ++i) { (*tmp_variables)[i] = scip_variables[orcst.var_index(i)]; } RETURN_IF_SCIP_ERROR(SCIPcreateConsBasicOr( - scip, /*cons=*/scip_cst, /*name=*/gen_cst.name().c_str(), + scip, /*cons=*/scip_cst, + /*name=*/gen_cst.name().c_str(), /*resvar=*/scip_variables[orcst.resultant_var_index()], - /*nvars=*/orcst.var_index_size(), /*vars=*/tmp_variables->data())); + /*nvars=*/orcst.var_index_size(), + /*vars=*/tmp_variables->data())); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, *scip_cst)); return absl::OkStatus(); } @@ -338,35 +363,35 @@ absl::Status AddOrConstraint(const MPGeneralConstraintProto &gen_cst, // - one disjunction constraint among all of the possible y = x1, y = x2, ... // y = xn, y = c constraints // Does the equivalent thing for max (with y >= max(...) instead). -absl::Status AddMinMaxConstraint(const MPGeneralConstraintProto &gen_cst, - const std::vector &scip_variables, - SCIP *scip, SCIP_CONS **scip_cst, - std::vector *scip_constraints, - std::vector *tmp_variables) { +absl::Status AddMinMaxConstraint(const MPGeneralConstraintProto& gen_cst, + const std::vector& scip_variables, + SCIP* scip, SCIP_CONS** scip_cst, + std::vector* scip_constraints, + std::vector* tmp_variables) { CHECK(scip != nullptr); CHECK(scip_cst != nullptr); CHECK(tmp_variables != nullptr); CHECK(gen_cst.has_min_constraint() || gen_cst.has_max_constraint()); - const auto &minmax = gen_cst.has_min_constraint() ? gen_cst.min_constraint() + const auto& minmax = gen_cst.has_min_constraint() ? gen_cst.min_constraint() : gen_cst.max_constraint(); const std::set unique_var_indices(minmax.var_index().begin(), minmax.var_index().end()); - SCIP_VAR *scip_resultant_var = scip_variables[minmax.resultant_var_index()]; + SCIP_VAR* scip_resultant_var = scip_variables[minmax.resultant_var_index()]; - std::vector vars; + std::vector vars; std::vector vals; - std::vector cons; - auto add_lin_constraint = [&](const std::string &name_prefix, + std::vector cons; + auto add_lin_constraint = [&](const std::string& name_prefix, double lower_bound = 0.0, double upper_bound = 0.0) -> absl::Status { - SCIP_CONS *scip_cons = nullptr; + SCIP_CONS* scip_cons = nullptr; CHECK(vars.size() == vals.size()); const std::string name = gen_cst.has_name() ? absl::StrCat(gen_cst.name(), name_prefix) : ""; RETURN_IF_SCIP_ERROR(SCIPcreateConsBasicLinear( - scip, /*cons=*/&scip_cons, /*name=*/name.c_str(), - /*nvars=*/vars.size(), /*vars=*/vars.data(), /*vals=*/vals.data(), - /*lhs=*/lower_bound, /*rhs=*/upper_bound)); + scip, /*cons=*/&scip_cons, + /*name=*/name.c_str(), /*nvars=*/vars.size(), /*vars=*/vars.data(), + /*vals=*/vals.data(), /*lhs=*/lower_bound, /*rhs=*/upper_bound)); // Note that the constraints are, by design, not added into the model using // SCIPaddCons. cons.push_back(scip_cons); @@ -392,8 +417,8 @@ absl::Status AddMinMaxConstraint(const MPGeneralConstraintProto &gen_cst, const std::string name = gen_cst.has_name() ? absl::StrCat(gen_cst.name(), "_disj") : ""; RETURN_IF_SCIP_ERROR(SCIPcreateConsBasicDisjunction( - scip, /*cons=*/scip_cst, /*name=*/name.c_str(), /*nconss=*/cons.size(), - /*conss=*/cons.data(), /*relaxcons=*/nullptr)); + scip, /*cons=*/scip_cst, /*name=*/name.c_str(), + /*nconss=*/cons.size(), /*conss=*/cons.data(), /*relaxcons=*/nullptr)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, *scip_cst)); // Add all of the inequality constraints. @@ -421,17 +446,17 @@ absl::Status AddMinMaxConstraint(const MPGeneralConstraintProto &gen_cst, minmax.constant(), kInfinity)); } } - for (SCIP_CONS *scip_cons : cons) { + for (SCIP_CONS* scip_cons : cons) { scip_constraints->push_back(scip_cons); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, scip_cons)); } return absl::OkStatus(); } -absl::Status AddQuadraticObjective(const MPQuadraticObjective &quadobj, - SCIP *scip, - std::vector *scip_variables, - std::vector *scip_constraints) { +absl::Status AddQuadraticObjective(const MPQuadraticObjective& quadobj, + SCIP* scip, + std::vector* scip_variables, + std::vector* scip_constraints) { CHECK(scip != nullptr); CHECK(scip_variables != nullptr); CHECK(scip_constraints != nullptr); @@ -444,17 +469,18 @@ absl::Status AddQuadraticObjective(const MPQuadraticObjective &quadobj, // SCIP supports quadratic objectives by adding a quadratic constraint. We // need to create an extra variable to hold this quadratic objective. scip_variables->push_back(nullptr); - RETURN_IF_SCIP_ERROR(SCIPcreateVarBasic( - scip, /*var=*/&scip_variables->back(), /*name=*/"quadobj", - /*lb=*/-kInfinity, /*ub=*/kInfinity, /*obj=*/1, - /*vartype=*/SCIP_VARTYPE_CONTINUOUS)); + RETURN_IF_SCIP_ERROR(SCIPcreateVarBasic(scip, /*var=*/&scip_variables->back(), + /*name=*/"quadobj", + /*lb=*/-kInfinity, /*ub=*/kInfinity, + /*obj=*/1, + /*vartype=*/SCIP_VARTYPE_CONTINUOUS)); RETURN_IF_SCIP_ERROR(SCIPaddVar(scip, scip_variables->back())); scip_constraints->push_back(nullptr); - SCIP_VAR *linvars[1] = {scip_variables->back()}; + SCIP_VAR* linvars[1] = {scip_variables->back()}; double lincoefs[1] = {-1}; - std::vector quadvars1(size, nullptr); - std::vector quadvars2(size, nullptr); + std::vector quadvars1(size, nullptr); + std::vector quadvars2(size, nullptr); std::vector quadcoefs(size, 0); for (int i = 0; i < size; ++i) { quadvars1[i] = scip_variables->at(quadobj.qvar1_index(i)); @@ -472,13 +498,13 @@ absl::Status AddQuadraticObjective(const MPQuadraticObjective &quadobj, return absl::OkStatus(); } -absl::Status AddSolutionHint(const MPModelProto &model, SCIP *scip, - const std::vector &scip_variables) { +absl::Status AddSolutionHint(const MPModelProto& model, SCIP* scip, + const std::vector& scip_variables) { CHECK(scip != nullptr); if (!model.has_solution_hint()) return absl::OkStatus(); - const PartialVariableAssignment &solution_hint = model.solution_hint(); - SCIP_SOL *solution; + const PartialVariableAssignment& solution_hint = model.solution_hint(); + SCIP_SOL* solution; bool is_solution_partial = solution_hint.var_index_size() != model.variable_size(); if (is_solution_partial) { @@ -504,12 +530,12 @@ absl::Status AddSolutionHint(const MPModelProto &model, SCIP *scip, // Returns "" iff the model seems valid for SCIP, else returns a human-readable // error message. Assumes that FindErrorInMPModelProto(model) found no error. -std::string FindErrorInMPModelForScip(const MPModelProto &model, SCIP *scip) { +std::string FindErrorInMPModelForScip(const MPModelProto& model, SCIP* scip) { CHECK(scip != nullptr); const double infinity = SCIPinfinity(scip); for (int v = 0; v < model.variable_size(); ++v) { - const MPVariableProto &variable = model.variable(v); + const MPVariableProto& variable = model.variable(v); if (variable.lower_bound() >= infinity) { return absl::StrFormat( "Variable %i's lower bound is considered +infinity", v); @@ -526,7 +552,7 @@ std::string FindErrorInMPModelForScip(const MPModelProto &model, SCIP *scip) { } for (int c = 0; c < model.constraint_size(); ++c) { - const MPConstraintProto &cst = model.constraint(c); + const MPConstraintProto& cst = model.constraint(c); if (cst.lower_bound() >= infinity) { return absl::StrFormat( "Constraint %d's lower_bound is considered +infinity", c); @@ -544,7 +570,7 @@ std::string FindErrorInMPModelForScip(const MPModelProto &model, SCIP *scip) { } for (int c = 0; c < model.general_constraint_size(); ++c) { - const MPGeneralConstraintProto &cst = model.general_constraint(c); + const MPGeneralConstraintProto& cst = model.general_constraint(c); switch (cst.general_constraint_case()) { case MPGeneralConstraintProto::kQuadraticConstraint: if (cst.quadratic_constraint().lower_bound() >= infinity) { @@ -600,7 +626,7 @@ std::string FindErrorInMPModelForScip(const MPModelProto &model, SCIP *scip) { } } - const MPQuadraticObjective &quad_obj = model.quadratic_objective(); + const MPQuadraticObjective& quad_obj = model.quadratic_objective(); for (int i = 0; i < quad_obj.coefficient_size(); ++i) { if (std::abs(quad_obj.coefficient(i)) >= infinity) { return absl::StrFormat( @@ -629,26 +655,26 @@ std::string FindErrorInMPModelForScip(const MPModelProto &model, SCIP *scip) { } absl::StatusOr ScipSolveProto( - const MPModelRequest &request) { + const MPModelRequest& request) { MPSolutionResponse response; - const absl::optional > optional_model = + const absl::optional> optional_model = ExtractValidMPModelOrPopulateResponseStatus(request, &response); if (!optional_model) return response; - const MPModelProto &model = optional_model->get(); - SCIP *scip = nullptr; - std::vector scip_variables(model.variable_size(), nullptr); - std::vector scip_constraints( + const MPModelProto& model = optional_model->get(); + SCIP* scip = nullptr; + std::vector scip_variables(model.variable_size(), nullptr); + std::vector scip_constraints( model.constraint_size() + model.general_constraint_size(), nullptr); auto delete_scip_objects = [&]() -> absl::Status { // Release all created pointers. if (scip == nullptr) return absl::OkStatus(); - for (SCIP_VAR *variable : scip_variables) { + for (SCIP_VAR* variable : scip_variables) { if (variable != nullptr) { RETURN_IF_SCIP_ERROR(SCIPreleaseVar(scip, &variable)); } } - for (SCIP_CONS *constraint : scip_constraints) { + for (SCIP_CONS* constraint : scip_constraints) { if (constraint != nullptr) { RETURN_IF_SCIP_ERROR(SCIPreleaseCons(scip, &constraint)); } @@ -676,7 +702,8 @@ absl::StatusOr ScipSolveProto( request.solver_specific_parameters(), scip); if (!parameters_status.ok()) { response.set_status(MPSOLVER_MODEL_INVALID_SOLVER_PARAMETERS); - response.set_status_str(std::string(parameters_status.message())); + response.set_status_str( + std::string(parameters_status.message())); // NOLINT return response; } // Default clock type. We use wall clock time because getting CPU user seconds @@ -700,7 +727,7 @@ absl::StatusOr ScipSolveProto( } for (int v = 0; v < model.variable_size(); ++v) { - const MPVariableProto &variable = model.variable(v); + const MPVariableProto& variable = model.variable(v); RETURN_IF_SCIP_ERROR(SCIPcreateVarBasic( scip, /*var=*/&scip_variables[v], /*name=*/variable.name().c_str(), /*lb=*/variable.lower_bound(), /*ub=*/variable.upper_bound(), @@ -711,10 +738,10 @@ absl::StatusOr ScipSolveProto( } { - std::vector ct_variables; + std::vector ct_variables; std::vector ct_coefficients; for (int c = 0; c < model.constraint_size(); ++c) { - const MPConstraintProto &constraint = model.constraint(c); + const MPConstraintProto& constraint = model.constraint(c); const int size = constraint.var_index_size(); ct_variables.resize(size, nullptr); ct_coefficients.resize(size, 0); @@ -726,22 +753,28 @@ absl::StatusOr ScipSolveProto( scip, /*cons=*/&scip_constraints[c], /*name=*/constraint.name().c_str(), /*nvars=*/constraint.var_index_size(), /*vars=*/ct_variables.data(), - /*vals=*/ct_coefficients.data(), /*lhs=*/constraint.lower_bound(), - /*rhs=*/constraint.upper_bound(), /*initial=*/!constraint.is_lazy(), - /*separate=*/true, /*enforce=*/true, /*check=*/true, - /*propagate=*/true, /*local=*/false, /*modifiable=*/false, - /*dynamic=*/false, /*removable=*/constraint.is_lazy(), + /*vals=*/ct_coefficients.data(), + /*lhs=*/constraint.lower_bound(), /*rhs=*/constraint.upper_bound(), + /*initial=*/!constraint.is_lazy(), + /*separate=*/true, + /*enforce=*/true, + /*check=*/true, + /*propagate=*/true, + /*local=*/false, + /*modifiable=*/false, + /*dynamic=*/false, + /*removable=*/constraint.is_lazy(), /*stickingatnode=*/false)); RETURN_IF_SCIP_ERROR(SCIPaddCons(scip, scip_constraints[c])); } // These extra arrays are used by quadratic constraints. - std::vector ct_qvariables1; - std::vector ct_qvariables2; + std::vector ct_qvariables1; + std::vector ct_qvariables2; std::vector ct_qcoefficients; const int lincst_size = model.constraint_size(); for (int c = 0; c < model.general_constraint_size(); ++c) { - const MPGeneralConstraintProto &gen_cst = model.general_constraint(c); + const MPGeneralConstraintProto& gen_cst = model.general_constraint(c); switch (gen_cst.general_constraint_case()) { case MPGeneralConstraintProto::kIndicatorConstraint: { RETURN_IF_ERROR(AddIndicatorConstraint( @@ -809,7 +842,7 @@ absl::StatusOr ScipSolveProto( } RETURN_IF_SCIP_ERROR(SCIPsolve(scip)); - SCIP_SOL *const solution = SCIPgetBestSol(scip); + SCIP_SOL* const solution = SCIPgetBestSol(scip); if (solution != nullptr) { response.set_objective_value(SCIPgetSolOrigObj(scip, solution)); response.set_best_objective_bound(SCIPgetDualbound(scip)); diff --git a/ortools/linear_solver/scip_proto_solver.h b/ortools/linear_solver/scip_proto_solver.h index bceb6f8830..75cfd51fce 100644 --- a/ortools/linear_solver/scip_proto_solver.h +++ b/ortools/linear_solver/scip_proto_solver.h @@ -25,9 +25,9 @@ namespace operations_research { // 1e-7, and the gap limit to 0.0001 (whereas SCIP defaults are 1e-6 and 0, // respectively, and they are being used here). absl::StatusOr ScipSolveProto( - const MPModelRequest &request); + const MPModelRequest& request); -std::string FindErrorInMPModelForScip(const MPModelProto &model, SCIP *scip); +std::string FindErrorInMPModelForScip(const MPModelProto& model, SCIP* scip); } // namespace operations_research diff --git a/ortools/lp_data/lp_data.cc b/ortools/lp_data/lp_data.cc index 31391635da..e02a318ef7 100644 --- a/ortools/lp_data/lp_data.cc +++ b/ortools/lp_data/lp_data.cc @@ -55,7 +55,7 @@ bool AreBoundsFreeOrBoxed(Fractional lower_bound, Fractional upper_bound) { } template -double Average(const gtl::ITIVector &v) { +double Average(const gtl::ITIVector& v) { const size_t size = v.size(); DCHECK_LT(0, size); double sum = 0.0; @@ -69,7 +69,7 @@ double Average(const gtl::ITIVector &v) { } template -double StandardDeviation(const gtl::ITIVector &v) { +double StandardDeviation(const gtl::ITIVector& v) { const size_t size = v.size(); double n = 0.0; // n is used in a calculation involving doubles. double sigma_square = 0.0; @@ -86,7 +86,7 @@ double StandardDeviation(const gtl::ITIVector &v) { // Returns 0 when the vector is empty. template -T GetMaxElement(const gtl::ITIVector &v) { +T GetMaxElement(const gtl::ITIVector& v) { const size_t size = v.size(); if (size == 0) { return T(0); @@ -174,7 +174,7 @@ ColIndex LinearProgram::CreateNewVariable() { ColIndex LinearProgram::CreateNewSlackVariable(bool is_integer_slack_variable, Fractional lower_bound, Fractional upper_bound, - const std::string &name) { + const std::string& name) { objective_coefficients_.push_back(0.0); variable_lower_bounds_.push_back(lower_bound); variable_upper_bounds_.push_back(upper_bound); @@ -200,7 +200,7 @@ RowIndex LinearProgram::CreateNewConstraint() { return row; } -ColIndex LinearProgram::FindOrCreateVariable(const std::string &variable_id) { +ColIndex LinearProgram::FindOrCreateVariable(const std::string& variable_id) { const absl::flat_hash_map::iterator it = variable_table_.find(variable_id); if (it != variable_table_.end()) { @@ -214,7 +214,7 @@ ColIndex LinearProgram::FindOrCreateVariable(const std::string &variable_id) { } RowIndex LinearProgram::FindOrCreateConstraint( - const std::string &constraint_id) { + const std::string& constraint_id) { const absl::flat_hash_map::iterator it = constraint_table_.find(constraint_id); if (it != constraint_table_.end()) { @@ -275,17 +275,17 @@ void LinearProgram::UpdateAllIntegerVariableLists() const { integer_variables_list_is_consistent_ = true; } -const std::vector &LinearProgram::IntegerVariablesList() const { +const std::vector& LinearProgram::IntegerVariablesList() const { UpdateAllIntegerVariableLists(); return integer_variables_list_; } -const std::vector &LinearProgram::BinaryVariablesList() const { +const std::vector& LinearProgram::BinaryVariablesList() const { UpdateAllIntegerVariableLists(); return binary_variables_list_; } -const std::vector &LinearProgram::NonBinaryVariablesList() const { +const std::vector& LinearProgram::NonBinaryVariablesList() const { UpdateAllIntegerVariableLists(); return non_binary_variables_list_; } @@ -371,7 +371,7 @@ LinearProgram::VariableType LinearProgram::GetVariableType(ColIndex col) const { return variable_types_[col]; } -const SparseMatrix &LinearProgram::GetTransposeSparseMatrix() const { +const SparseMatrix& LinearProgram::GetTransposeSparseMatrix() const { if (!transpose_matrix_is_consistent_) { transpose_matrix_.PopulateFromTranspose(matrix_); transpose_matrix_is_consistent_ = true; @@ -381,7 +381,7 @@ const SparseMatrix &LinearProgram::GetTransposeSparseMatrix() const { return transpose_matrix_; } -SparseMatrix *LinearProgram::GetMutableTransposeSparseMatrix() { +SparseMatrix* LinearProgram::GetMutableTransposeSparseMatrix() { if (!transpose_matrix_is_consistent_) { transpose_matrix_.PopulateFromTranspose(matrix_); } @@ -404,11 +404,11 @@ void LinearProgram::ClearTransposeMatrix() { transpose_matrix_is_consistent_ = false; } -const SparseColumn &LinearProgram::GetSparseColumn(ColIndex col) const { +const SparseColumn& LinearProgram::GetSparseColumn(ColIndex col) const { return matrix_.column(col); } -SparseColumn *LinearProgram::GetMutableSparseColumn(ColIndex col) { +SparseColumn* LinearProgram::GetMutableSparseColumn(ColIndex col) { columns_are_known_to_be_clean_ = false; transpose_matrix_is_consistent_ = false; return matrix_.mutable_column(col); @@ -448,7 +448,7 @@ std::string LinearProgram::GetObjectiveStatsString() const { } bool LinearProgram::SolutionIsWithinVariableBounds( - const DenseRow &solution, Fractional absolute_tolerance) const { + const DenseRow& solution, Fractional absolute_tolerance) const { DCHECK_EQ(solution.size(), num_variables()); if (solution.size() != num_variables()) return false; const ColIndex num_cols = num_variables(); @@ -463,12 +463,12 @@ bool LinearProgram::SolutionIsWithinVariableBounds( return true; } -bool LinearProgram::SolutionIsLPFeasible(const DenseRow &solution, +bool LinearProgram::SolutionIsLPFeasible(const DenseRow& solution, Fractional absolute_tolerance) const { if (!SolutionIsWithinVariableBounds(solution, absolute_tolerance)) { return false; } - const SparseMatrix &transpose = GetTransposeSparseMatrix(); + const SparseMatrix& transpose = GetTransposeSparseMatrix(); const RowIndex num_rows = num_constraints(); for (RowIndex row = RowIndex(0); row < num_rows; ++row) { const Fractional sum = @@ -483,7 +483,7 @@ bool LinearProgram::SolutionIsLPFeasible(const DenseRow &solution, return true; } -bool LinearProgram::SolutionIsInteger(const DenseRow &solution, +bool LinearProgram::SolutionIsInteger(const DenseRow& solution, Fractional absolute_tolerance) const { DCHECK_EQ(solution.size(), num_variables()); if (solution.size() != num_variables()) return false; @@ -495,16 +495,16 @@ bool LinearProgram::SolutionIsInteger(const DenseRow &solution, return true; } -bool LinearProgram::SolutionIsMIPFeasible(const DenseRow &solution, +bool LinearProgram::SolutionIsMIPFeasible(const DenseRow& solution, Fractional absolute_tolerance) const { return SolutionIsLPFeasible(solution, absolute_tolerance) && SolutionIsInteger(solution, absolute_tolerance); } -void LinearProgram::ComputeSlackVariableValues(DenseRow *solution) const { +void LinearProgram::ComputeSlackVariableValues(DenseRow* solution) const { CHECK(solution != nullptr); const ColIndex num_cols = GetFirstSlackVariable(); - const SparseMatrix &transpose = GetTransposeSparseMatrix(); + const SparseMatrix& transpose = GetTransposeSparseMatrix(); const RowIndex num_rows = num_constraints(); CHECK_EQ(solution->size(), num_variables()); for (RowIndex row = RowIndex(0); row < num_rows; ++row) { @@ -600,7 +600,7 @@ std::string LinearProgram::Dump() const { // Integer variables. // TODO(user): if needed provide similar output for binary variables. - const std::vector &integer_variables = IntegerVariablesList(); + const std::vector& integer_variables = IntegerVariablesList(); if (!integer_variables.empty()) { output += "int"; for (ColIndex col : integer_variables) { @@ -613,7 +613,7 @@ std::string LinearProgram::Dump() const { return output; } -std::string LinearProgram::DumpSolution(const DenseRow &variable_values) const { +std::string LinearProgram::DumpSolution(const DenseRow& variable_values) const { DCHECK_EQ(variable_values.size(), num_variables()); std::string output; for (ColIndex col(0); col < variable_values.size(); ++col) { @@ -683,9 +683,9 @@ void LinearProgram::AddSlackVariablesWhereNecessary( detect_integer_constraints); if (detect_integer_constraints) { for (ColIndex col(0); col < num_variables(); ++col) { - const SparseColumn &column = matrix_.column(col); + const SparseColumn& column = matrix_.column(col); const bool is_integer_variable = IsVariableInteger(col); - for (const SparseColumn::Entry &entry : column) { + for (const SparseColumn::Entry& entry : column) { const RowIndex row = entry.row(); has_integer_slack_variable[row] = has_integer_slack_variable[row] && is_integer_variable && @@ -730,8 +730,8 @@ ColIndex LinearProgram::GetSlackVariable(RowIndex row) const { return first_slack_variable_ + RowToColIndex(row); } -void LinearProgram::PopulateFromDual(const LinearProgram &dual, - RowToColMapping *duplicated_rows) { +void LinearProgram::PopulateFromDual(const LinearProgram& dual, + RowToColMapping* duplicated_rows) { const ColIndex dual_num_variables = dual.num_variables(); const RowIndex dual_num_constraints = dual.num_constraints(); Clear(); @@ -829,7 +829,7 @@ void LinearProgram::PopulateFromDual(const LinearProgram &dual, } void LinearProgram::PopulateFromLinearProgram( - const LinearProgram &linear_program) { + const LinearProgram& linear_program) { matrix_.PopulateFromSparseMatrix(linear_program.matrix_); if (linear_program.transpose_matrix_is_consistent_) { transpose_matrix_is_consistent_ = true; @@ -850,8 +850,8 @@ void LinearProgram::PopulateFromLinearProgram( } void LinearProgram::PopulateFromPermutedLinearProgram( - const LinearProgram &lp, const RowPermutation &row_permutation, - const ColumnPermutation &col_permutation) { + const LinearProgram& lp, const RowPermutation& row_permutation, + const ColumnPermutation& col_permutation) { DCHECK(lp.IsCleanedUp()); DCHECK_EQ(row_permutation.size(), lp.num_constraints()); DCHECK_EQ(col_permutation.size(), lp.num_variables()); @@ -902,7 +902,7 @@ void LinearProgram::PopulateFromPermutedLinearProgram( } void LinearProgram::PopulateFromLinearProgramVariables( - const LinearProgram &linear_program) { + const LinearProgram& linear_program) { matrix_.PopulateFromZero(RowIndex(0), linear_program.num_variables()); first_slack_variable_ = kInvalidCol; transpose_matrix_is_consistent_ = false; @@ -917,7 +917,7 @@ void LinearProgram::PopulateFromLinearProgramVariables( } void LinearProgram::PopulateNameObjectiveAndVariablesFromLinearProgram( - const LinearProgram &linear_program) { + const LinearProgram& linear_program) { objective_coefficients_ = linear_program.objective_coefficients_; variable_lower_bounds_ = linear_program.variable_lower_bounds_; variable_upper_bounds_ = linear_program.variable_upper_bounds_; @@ -939,9 +939,9 @@ void LinearProgram::PopulateNameObjectiveAndVariablesFromLinearProgram( } void LinearProgram::AddConstraints( - const SparseMatrix &coefficients, const DenseColumn &left_hand_sides, - const DenseColumn &right_hand_sides, - const StrictITIVector &names) { + const SparseMatrix& coefficients, const DenseColumn& left_hand_sides, + const DenseColumn& right_hand_sides, + const StrictITIVector& names) { const RowIndex num_new_constraints = coefficients.num_rows(); DCHECK_EQ(num_variables(), coefficients.num_cols()); DCHECK_EQ(num_new_constraints, left_hand_sides.size()); @@ -964,17 +964,17 @@ void LinearProgram::AddConstraints( } void LinearProgram::AddConstraintsWithSlackVariables( - const SparseMatrix &coefficients, const DenseColumn &left_hand_sides, - const DenseColumn &right_hand_sides, - const StrictITIVector &names, + const SparseMatrix& coefficients, const DenseColumn& left_hand_sides, + const DenseColumn& right_hand_sides, + const StrictITIVector& names, bool detect_integer_constraints_for_slack) { AddConstraints(coefficients, left_hand_sides, right_hand_sides, names); AddSlackVariablesWhereNecessary(detect_integer_constraints_for_slack); } bool LinearProgram::UpdateVariableBoundsToIntersection( - const DenseRow &variable_lower_bounds, - const DenseRow &variable_upper_bounds) { + const DenseRow& variable_lower_bounds, + const DenseRow& variable_upper_bounds) { const ColIndex num_vars = num_variables(); DCHECK_EQ(variable_lower_bounds.size(), num_vars); DCHECK_EQ(variable_upper_bounds.size(), num_vars); @@ -997,7 +997,7 @@ bool LinearProgram::UpdateVariableBoundsToIntersection( return true; } -void LinearProgram::Swap(LinearProgram *linear_program) { +void LinearProgram::Swap(LinearProgram* linear_program) { matrix_.Swap(&linear_program->matrix_); transpose_matrix_.Swap(&linear_program->transpose_matrix_); @@ -1031,7 +1031,7 @@ void LinearProgram::Swap(LinearProgram *linear_program) { std::swap(first_slack_variable_, linear_program->first_slack_variable_); } -void LinearProgram::DeleteColumns(const DenseBooleanRow &columns_to_delete) { +void LinearProgram::DeleteColumns(const DenseBooleanRow& columns_to_delete) { if (columns_to_delete.empty()) return; integer_variables_list_is_consistent_ = false; const ColIndex num_cols = num_variables(); @@ -1076,7 +1076,7 @@ void LinearProgram::DeleteColumns(const DenseBooleanRow &columns_to_delete) { if (transpose_matrix_is_consistent_) { transpose_matrix_.DeleteRows( ColToRowIndex(new_index), - reinterpret_cast(permutation)); + reinterpret_cast(permutation)); } } @@ -1086,7 +1086,7 @@ void LinearProgram::DeleteSlackVariables() { // Restore the bounds on the constraints corresponding to the slack variables. for (ColIndex slack_variable = first_slack_variable_; slack_variable < matrix_.num_cols(); ++slack_variable) { - const SparseColumn &column = matrix_.column(slack_variable); + const SparseColumn& column = matrix_.column(slack_variable); // Slack variables appear only in the constraints for which they were // created. We can find this constraint by looking at the (only) entry in // the columnm of the slack variable. @@ -1108,9 +1108,9 @@ namespace { // Note that we ignore zeros and infinities because they do not matter from a // scaling perspective where this function is used. template -void UpdateMinAndMaxMagnitude(const FractionalRange &range, - Fractional *min_magnitude, - Fractional *max_magnitude) { +void UpdateMinAndMaxMagnitude(const FractionalRange& range, + Fractional* min_magnitude, + Fractional* max_magnitude) { for (const Fractional value : range) { const Fractional magnitude = std::abs(value); if (magnitude == 0 || magnitude == kInfinity) continue; @@ -1119,7 +1119,7 @@ void UpdateMinAndMaxMagnitude(const FractionalRange &range, } } -Fractional GetMedianScalingFactor(const DenseRow &range) { +Fractional GetMedianScalingFactor(const DenseRow& range) { std::vector median; for (const Fractional value : range) { if (value == 0.0) continue; @@ -1130,7 +1130,7 @@ Fractional GetMedianScalingFactor(const DenseRow &range) { return median[median.size() / 2]; } -Fractional GetMeanScalingFactor(const DenseRow &range) { +Fractional GetMeanScalingFactor(const DenseRow& range) { Fractional mean = 0.0; int num_non_zeros = 0; for (const Fractional value : range) { @@ -1224,7 +1224,7 @@ Fractional LinearProgram::ScaleBounds() { return bound_scaling_factor; } -void LinearProgram::DeleteRows(const DenseBooleanColumn &rows_to_delete) { +void LinearProgram::DeleteRows(const DenseBooleanColumn& rows_to_delete) { if (rows_to_delete.empty()) return; // Deal with row-indexed data and construct the row mapping that will need to @@ -1267,7 +1267,7 @@ void LinearProgram::DeleteRows(const DenseBooleanColumn &rows_to_delete) { // Eventually update transpose_matrix_. if (transpose_matrix_is_consistent_) { transpose_matrix_.DeleteColumns( - reinterpret_cast(rows_to_delete)); + reinterpret_cast(rows_to_delete)); } } @@ -1402,7 +1402,7 @@ std::string LinearProgram::NonZeroStatFormatter( EntryIndex num_entries(0); const ColIndex num_cols = num_variables(); for (ColIndex col(0); col < num_cols; ++col) { - const SparseColumn &sparse_column = GetSparseColumn(col); + const SparseColumn& sparse_column = GetSparseColumn(col); num_entries += sparse_column.num_entries(); num_entries_in_column[col] = sparse_column.num_entries(); for (const SparseColumn::Entry e : sparse_column) { @@ -1471,7 +1471,7 @@ bool LinearProgram::BoundsOfIntegerConstraintsAreInteger( // Using transpose for this is faster (complexity = O(number of non zeros in // matrix)) than directly iterating through entries (complexity = O(number of // constraints * number of variables)). - const SparseMatrix &transpose = GetTransposeSparseMatrix(); + const SparseMatrix& transpose = GetTransposeSparseMatrix(); for (RowIndex row = RowIndex(0); row < num_constraints(); ++row) { bool integer_constraint = true; for (const SparseColumn::Entry var : transpose.column(RowToColIndex(row))) { diff --git a/ortools/lp_data/lp_data.h b/ortools/lp_data/lp_data.h index 517e4da2ba..4596ab160e 100644 --- a/ortools/lp_data/lp_data.h +++ b/ortools/lp_data/lp_data.h @@ -60,8 +60,7 @@ class LinearProgram { CONTINUOUS, // The variable must only take integer values. INTEGER, - // The variable is implied integer variable i.e. it was continuous - // variable + // The variable is implied integer variable i.e. it was continuous variable // in the LP and was detected to take only integer values. IMPLIED_INTEGER }; @@ -72,8 +71,8 @@ class LinearProgram { void Clear(); // Name setter and getter. - void SetName(const std::string &name) { name_ = name; } - const std::string &name() const { return name_; } + void SetName(const std::string& name) { name_ = name; } + const std::string& name() const { return name_; } // Creates a new variable and returns its index. // By default, the column bounds will be [0, infinity). @@ -84,7 +83,7 @@ class LinearProgram { ColIndex CreateNewSlackVariable(bool is_integer_slack_variable, Fractional lower_bound, Fractional upper_bound, - const std::string &name); + const std::string& name); // Creates a new constraint and returns its index. // By default, the constraint bounds will be [0, 0]. @@ -101,8 +100,8 @@ class LinearProgram { // LinearProgramBuilder class to simplify the code of some functions like // DeleteColumns() here and make the behavior on copy clear? or simply remove // them as it is almost as easy to maintain a hash_table on the client side. - ColIndex FindOrCreateVariable(const std::string &variable_id); - RowIndex FindOrCreateConstraint(const std::string &constraint_id); + ColIndex FindOrCreateVariable(const std::string& variable_id); + RowIndex FindOrCreateConstraint(const std::string& constraint_id); // Functions to set the name of a variable or constraint. Note that you // won't be able to find those named variables/constraints with @@ -173,8 +172,8 @@ class LinearProgram { // Returns the underlying SparseMatrix or its transpose (which may need to be // computed). - const SparseMatrix &GetSparseMatrix() const { return matrix_; } - const SparseMatrix &GetTransposeSparseMatrix() const; + const SparseMatrix& GetSparseMatrix() const { return matrix_; } + const SparseMatrix& GetTransposeSparseMatrix() const; // Some transformations are better done on the transpose representation. These // two functions are here for that. Note that calling the first function and @@ -189,7 +188,7 @@ class LinearProgram { // IMPORTANT: The matrix dimension cannot change. Otherwise this will cause // problems. This is checked in debug mode when calling // UseTransposeMatrixAsReference(). - SparseMatrix *GetMutableTransposeSparseMatrix(); + SparseMatrix* GetMutableTransposeSparseMatrix(); void UseTransposeMatrixAsReference(); // Release the memory used by the transpose matrix. @@ -197,10 +196,10 @@ class LinearProgram { // Gets the underlying SparseColumn with the given index. // This is the same as GetSparseMatrix().column(col); - const SparseColumn &GetSparseColumn(ColIndex col) const; + const SparseColumn& GetSparseColumn(ColIndex col) const; // Gets a pointer to the underlying SparseColumn with the given index. - SparseColumn *GetMutableSparseColumn(ColIndex col); + SparseColumn* GetMutableSparseColumn(ColIndex col); // Returns the number of variables. ColIndex num_variables() const { return matrix_.num_cols(); } @@ -213,24 +212,24 @@ class LinearProgram { // Return the lower bounds (resp. upper bounds) of constraints as a column // vector. Note that the bound values may be +/- infinity. - const DenseColumn &constraint_lower_bounds() const { + const DenseColumn& constraint_lower_bounds() const { return constraint_lower_bounds_; } - const DenseColumn &constraint_upper_bounds() const { + const DenseColumn& constraint_upper_bounds() const { return constraint_upper_bounds_; } // Returns the objective coefficients (or cost) of variables as a row vector. - const DenseRow &objective_coefficients() const { + const DenseRow& objective_coefficients() const { return objective_coefficients_; } // Return the lower bounds (resp. upper bounds) of variables as a row vector. // Note that the bound values may be +/- infinity. - const DenseRow &variable_lower_bounds() const { + const DenseRow& variable_lower_bounds() const { return variable_lower_bounds_; } - const DenseRow &variable_upper_bounds() const { + const DenseRow& variable_upper_bounds() const { return variable_upper_bounds_; } @@ -241,15 +240,15 @@ class LinearProgram { // Returns a list (technically a vector) of the ColIndices of the integer // variables. This vector is lazily computed. - const std::vector &IntegerVariablesList() const; + const std::vector& IntegerVariablesList() const; // Returns a list (technically a vector) of the ColIndices of the binary // integer variables. This vector is lazily computed. - const std::vector &BinaryVariablesList() const; + const std::vector& BinaryVariablesList() const; // Returns a list (technically a vector) of the ColIndices of the non-binary // integer variables. This vector is lazily computed. - const std::vector &NonBinaryVariablesList() const; + const std::vector& NonBinaryVariablesList() const; // Returns the objective coefficient (or cost) of the given variable for the // minimization version of the problem. That is, this is the same as @@ -264,28 +263,28 @@ class LinearProgram { } // Checks if each variable respects its bounds, nothing else. - bool SolutionIsWithinVariableBounds(const DenseRow &solution, + bool SolutionIsWithinVariableBounds(const DenseRow& solution, Fractional absolute_tolerance) const; // Tests if the solution is LP-feasible within the given tolerance, // i.e., satisfies all linear constraints within the absolute tolerance level. // The solution does not need to satisfy the integer constraints. - bool SolutionIsLPFeasible(const DenseRow &solution, + bool SolutionIsLPFeasible(const DenseRow& solution, Fractional absolute_tolerance) const; // Tests if the solution is integer within the given tolerance, i.e., all // integer variables have integer values within the absolute tolerance level. // The solution does not need to satisfy the linear constraints. - bool SolutionIsInteger(const DenseRow &solution, + bool SolutionIsInteger(const DenseRow& solution, Fractional absolute_tolerance) const; // Tests if the solution is both LP-feasible and integer within the tolerance. - bool SolutionIsMIPFeasible(const DenseRow &solution, + bool SolutionIsMIPFeasible(const DenseRow& solution, Fractional absolute_tolerance) const; // Fills the value of the slack from the other variable values. // This requires that the slack have been added. - void ComputeSlackVariableValues(DenseRow *solution) const; + void ComputeSlackVariableValues(DenseRow* solution) const; // Functions to translate the sum(solution * objective_coefficients()) to // the real objective of the problem and back. Note that these can also @@ -305,7 +304,7 @@ class LinearProgram { // Returns a string that contains the provided solution of the LP in the // format var1 = X, var2 = Y, var3 = Z, ... - std::string DumpSolution(const DenseRow &variable_values) const; + std::string DumpSolution(const DenseRow& variable_values) const; // Returns a comma-separated string of integers containing (in that order) // num_constraints_, num_variables_in_file_, num_entries_, @@ -415,24 +414,24 @@ class LinearProgram { // compute the solution of a maximization problem given as an argument. // // TODO(user): Do not interpret as a minimization problem? - void PopulateFromDual(const LinearProgram &dual, - RowToColMapping *duplicated_rows); + void PopulateFromDual(const LinearProgram& dual, + RowToColMapping* duplicated_rows); // Populates the calling object with the given LinearProgram. - void PopulateFromLinearProgram(const LinearProgram &linear_program); + void PopulateFromLinearProgram(const LinearProgram& linear_program); // Populates the calling object with the given LinearProgram while permuting // variables and constraints. This is useful mainly for testing to generate // a model with the same optimal objective value. void PopulateFromPermutedLinearProgram( - const LinearProgram &lp, const RowPermutation &row_permutation, - const ColumnPermutation &col_permutation); + const LinearProgram& lp, const RowPermutation& row_permutation, + const ColumnPermutation& col_permutation); // Populates the calling object with the variables of the given LinearProgram. // The function preserves the bounds, the integrality, the names of the // variables and their objective coefficients. No constraints are copied (the // matrix in the destination has 0 rows). - void PopulateFromLinearProgramVariables(const LinearProgram &linear_program); + void PopulateFromLinearProgramVariables(const LinearProgram& linear_program); // Adds constraints to the linear program. The constraints are specified using // a sparse matrix of the coefficients, and vectors that represent the @@ -441,26 +440,26 @@ class LinearProgram { // The sizes of the columns and the names must be the same as the number of // rows of the sparse matrix; the number of columns of the matrix must be // equal to the number of variables of the linear program. - void AddConstraints(const SparseMatrix &coefficients, - const DenseColumn &left_hand_sides, - const DenseColumn &right_hand_sides, - const StrictITIVector &names); + void AddConstraints(const SparseMatrix& coefficients, + const DenseColumn& left_hand_sides, + const DenseColumn& right_hand_sides, + const StrictITIVector& names); // Calls the AddConstraints method. After adding the constraints it adds slack // variables to the constraints. void AddConstraintsWithSlackVariables( - const SparseMatrix &coefficients, const DenseColumn &left_hand_sides, - const DenseColumn &right_hand_sides, - const StrictITIVector &names, + const SparseMatrix& coefficients, const DenseColumn& left_hand_sides, + const DenseColumn& right_hand_sides, + const StrictITIVector& names, bool detect_integer_constraints_for_slack); // Swaps the content of this LinearProgram with the one passed as argument. // Works in O(1). - void Swap(LinearProgram *linear_program); + void Swap(LinearProgram* linear_program); // Removes the given column indices from the LinearProgram. // This needs to allocate O(num_variables) memory to update variable_table_. - void DeleteColumns(const DenseBooleanRow &columns_to_delete); + void DeleteColumns(const DenseBooleanRow& columns_to_delete); // Removes slack variables from the linear program. The method restores the // bounds on constraints from the bounds of the slack variables, resets the @@ -469,7 +468,7 @@ class LinearProgram { void DeleteSlackVariables(); // Scales the problem using the given scaler. - void Scale(SparseMatrixScaler *scaler); + void Scale(SparseMatrixScaler* scaler); // While Scale() makes sure the coefficients inside the linear program matrix // are in [-1, 1], the objective coefficients, variable bounds and constraint @@ -502,7 +501,7 @@ class LinearProgram { // Removes the given row indices from the LinearProgram. // This needs to allocate O(num_variables) memory. - void DeleteRows(const DenseBooleanColumn &rows_to_delete); + void DeleteRows(const DenseBooleanColumn& rows_to_delete); // Does basic checking on the linear program: // - returns false if some coefficient are NaNs. @@ -516,8 +515,8 @@ class LinearProgram { // variable_upper_bounds. If the new bounds of all variables are non-empty, // returns true; otherwise, returns false. bool UpdateVariableBoundsToIntersection( - const DenseRow &variable_lower_bounds, - const DenseRow &variable_upper_bounds); + const DenseRow& variable_lower_bounds, + const DenseRow& variable_upper_bounds); // Returns true if the linear program is in equation form Ax = 0 and all slack // variables have been added. This is also called "computational form" in some @@ -565,7 +564,7 @@ class LinearProgram { // linear program with the data from the given linear program. The method does // not touch the data structures for storing constraints. void PopulateNameObjectiveAndVariablesFromLinearProgram( - const LinearProgram &linear_program); + const LinearProgram& linear_program); // Stores the linear program coefficients. SparseMatrix matrix_; @@ -635,7 +634,7 @@ class LinearProgram { // If true, checks bounds in debug mode. bool dcheck_bounds_ = true; - friend void Scale(LinearProgram *lp, SparseMatrixScaler *scaler, + friend void Scale(LinearProgram* lp, SparseMatrixScaler* scaler, GlopParameters::ScalingAlgorithm scaling_method); DISALLOW_COPY_AND_ASSIGN(LinearProgram); diff --git a/ortools/lp_data/lp_data_utils.cc b/ortools/lp_data/lp_data_utils.cc index b5d13ba378..280766354d 100644 --- a/ortools/lp_data/lp_data_utils.cc +++ b/ortools/lp_data/lp_data_utils.cc @@ -16,15 +16,15 @@ namespace operations_research { namespace glop { -void ComputeSlackVariablesValues(const LinearProgram &linear_program, - DenseRow *values) { +void ComputeSlackVariablesValues(const LinearProgram& linear_program, + DenseRow* values) { DCHECK(values); DCHECK_EQ(linear_program.num_variables(), values->size()); // If there are no slack variable, we can give up. if (linear_program.GetFirstSlackVariable() == kInvalidCol) return; - const auto &transposed_matrix = linear_program.GetTransposeSparseMatrix(); + const auto& transposed_matrix = linear_program.GetTransposeSparseMatrix(); for (RowIndex row(0); row < linear_program.num_constraints(); row++) { const ColIndex slack_variable = linear_program.GetSlackVariable(row); @@ -36,9 +36,9 @@ void ComputeSlackVariablesValues(const LinearProgram &linear_program, const RowIndex transposed_slack = ColToRowIndex(slack_variable); Fractional activation = 0.0; // Row in the initial matrix (column in the transposed). - const SparseColumn &sparse_row = + const SparseColumn& sparse_row = transposed_matrix.column(RowToColIndex(row)); - for (const auto &entry : sparse_row) { + for (const auto& entry : sparse_row) { if (transposed_slack == entry.index()) continue; activation += (*values)[RowToColIndex(entry.index())] * entry.coefficient(); @@ -49,7 +49,7 @@ void ComputeSlackVariablesValues(const LinearProgram &linear_program, // This is separated from the LinearProgram class because of a cyclic dependency // when scaling as an LP. -void Scale(LinearProgram *lp, SparseMatrixScaler *scaler) { +void Scale(LinearProgram* lp, SparseMatrixScaler* scaler) { // Create GlopParameters proto to get default scaling algorithm. GlopParameters params; Scale(lp, scaler, params.scaling_method()); @@ -57,22 +57,25 @@ void Scale(LinearProgram *lp, SparseMatrixScaler *scaler) { // This is separated from LinearProgram class because of a cyclic dependency // when scaling as an LP. -void Scale(LinearProgram *lp, SparseMatrixScaler *scaler, +void Scale(LinearProgram* lp, SparseMatrixScaler* scaler, GlopParameters::ScalingAlgorithm scaling_method) { scaler->Init(&lp->matrix_); scaler->Scale( scaling_method); // Compute R and C, and replace the matrix A by R.A.C - scaler->ScaleRowVector(false, &lp->objective_coefficients_); // oc = oc.C - scaler->ScaleRowVector(true, &lp->variable_upper_bounds_); // cl = cl.C^-1 - scaler->ScaleRowVector(true, &lp->variable_lower_bounds_); // cu = cu.C^-1 + scaler->ScaleRowVector(false, + &lp->objective_coefficients_); // oc = oc.C + scaler->ScaleRowVector(true, + &lp->variable_upper_bounds_); // cl = cl.C^-1 + scaler->ScaleRowVector(true, + &lp->variable_lower_bounds_); // cu = cu.C^-1 scaler->ScaleColumnVector(false, &lp->constraint_upper_bounds_); // rl = R.rl scaler->ScaleColumnVector(false, &lp->constraint_lower_bounds_); // ru = R.ru lp->transpose_matrix_is_consistent_ = false; } -void LpScalingHelper::Scale(LinearProgram *lp) { Scale(GlopParameters(), lp); } +void LpScalingHelper::Scale(LinearProgram* lp) { Scale(GlopParameters(), lp); } -void LpScalingHelper::Scale(const GlopParameters ¶ms, LinearProgram *lp) { +void LpScalingHelper::Scale(const GlopParameters& params, LinearProgram* lp) { scaler_.Clear(); ::operations_research::glop::Scale(lp, &scaler_, params.scaling_method()); bound_scaling_factor_ = 1.0 / lp->ScaleBounds(); @@ -116,7 +119,7 @@ Fractional LpScalingHelper::UnscaleConstraintActivity(RowIndex row, } void LpScalingHelper::UnscaleUnitRowLeftSolve( - ColIndex basis_col, ScatteredRow *left_inverse) const { + ColIndex basis_col, ScatteredRow* left_inverse) const { const Fractional global_factor = scaler_.ColUnscalingFactor(basis_col); // We have left_inverse * [RowScale * B * ColScale] = unit_row. @@ -135,8 +138,8 @@ void LpScalingHelper::UnscaleUnitRowLeftSolve( } void LpScalingHelper::UnscaleColumnRightSolve( - const RowToColMapping &basis, ColIndex col, - ScatteredColumn *right_inverse) const { + const RowToColMapping& basis, ColIndex col, + ScatteredColumn* right_inverse) const { const Fractional global_factor = scaler_.ColScalingFactor(col); // [RowScale * B * BColScale] * inverse = RowScale * column * ColScale. diff --git a/ortools/lp_data/lp_decomposer.cc b/ortools/lp_data/lp_decomposer.cc index 1199d116d4..c756e5ed06 100644 --- a/ortools/lp_data/lp_decomposer.cc +++ b/ortools/lp_data/lp_decomposer.cc @@ -29,19 +29,19 @@ namespace glop { LPDecomposer::LPDecomposer() : original_problem_(nullptr), clusters_(), mutex_() {} -void LPDecomposer::Decompose(const LinearProgram *linear_problem) { +void LPDecomposer::Decompose(const LinearProgram* linear_problem) { absl::MutexLock mutex_lock(&mutex_); original_problem_ = linear_problem; clusters_.clear(); - const SparseMatrix &transposed_matrix = + const SparseMatrix& transposed_matrix = original_problem_->GetTransposeSparseMatrix(); MergingPartition partition(original_problem_->num_variables().value()); // Iterate on all constraints, and merge all variables of each constraint. const ColIndex num_ct = RowToColIndex(original_problem_->num_constraints()); for (ColIndex ct(0); ct < num_ct; ++ct) { - const SparseColumn &sparse_constraint = transposed_matrix.column(ct); + const SparseColumn& sparse_constraint = transposed_matrix.column(ct); if (sparse_constraint.num_entries() > 1) { const RowIndex first_row = sparse_constraint.GetFirstRow(); for (EntryIndex e(1); e < sparse_constraint.num_entries(); ++e) { @@ -67,12 +67,12 @@ int LPDecomposer::GetNumberOfProblems() const { return clusters_.size(); } -const LinearProgram &LPDecomposer::original_problem() const { +const LinearProgram& LPDecomposer::original_problem() const { absl::MutexLock mutex_lock(&mutex_); return *original_problem_; } -void LPDecomposer::ExtractLocalProblem(int problem_index, LinearProgram *lp) { +void LPDecomposer::ExtractLocalProblem(int problem_index, LinearProgram* lp) { CHECK(lp != nullptr); CHECK_GE(problem_index, 0); CHECK_LT(problem_index, clusters_.size()); @@ -80,7 +80,7 @@ void LPDecomposer::ExtractLocalProblem(int problem_index, LinearProgram *lp) { lp->Clear(); absl::MutexLock mutex_lock(&mutex_); - const std::vector &cluster = clusters_[problem_index]; + const std::vector& cluster = clusters_[problem_index]; StrictITIVector global_to_local( original_problem_->num_variables(), kInvalidCol); SparseBitset constraints_to_use( @@ -88,8 +88,8 @@ void LPDecomposer::ExtractLocalProblem(int problem_index, LinearProgram *lp) { lp->SetMaximizationProblem(original_problem_->IsMaximizationProblem()); // Create variables and get all constraints of the cluster. - const SparseMatrix &original_matrix = original_problem_->GetSparseMatrix(); - const SparseMatrix &transposed_matrix = + const SparseMatrix& original_matrix = original_problem_->GetSparseMatrix(); + const SparseMatrix& transposed_matrix = original_problem_->GetTransposeSparseMatrix(); for (int i = 0; i < cluster.size(); ++i) { const ColIndex global_col = cluster[i]; @@ -134,15 +134,15 @@ void LPDecomposer::ExtractLocalProblem(int problem_index, LinearProgram *lp) { } DenseRow LPDecomposer::AggregateAssignments( - const std::vector &assignments) const { + const std::vector& assignments) const { CHECK_EQ(assignments.size(), clusters_.size()); absl::MutexLock mutex_lock(&mutex_); DenseRow global_assignment(original_problem_->num_variables(), Fractional(0.0)); for (int problem = 0; problem < assignments.size(); ++problem) { - const DenseRow &local_assignment = assignments[problem]; - const std::vector &cluster = clusters_[problem]; + const DenseRow& local_assignment = assignments[problem]; + const std::vector& cluster = clusters_[problem]; for (int i = 0; i < local_assignment.size(); ++i) { const ColIndex global_col = cluster[i]; global_assignment[global_col] = local_assignment[ColIndex(i)]; @@ -152,13 +152,13 @@ DenseRow LPDecomposer::AggregateAssignments( } DenseRow LPDecomposer::ExtractLocalAssignment(int problem_index, - const DenseRow &assignment) { + const DenseRow& assignment) { CHECK_GE(problem_index, 0); CHECK_LT(problem_index, clusters_.size()); CHECK_EQ(assignment.size(), original_problem_->num_variables()); absl::MutexLock mutex_lock(&mutex_); - const std::vector &cluster = clusters_[problem_index]; + const std::vector& cluster = clusters_[problem_index]; DenseRow local_assignment(ColIndex(cluster.size()), Fractional(0.0)); for (int i = 0; i < cluster.size(); ++i) { const ColIndex global_col = cluster[i]; diff --git a/ortools/lp_data/lp_print_utils.cc b/ortools/lp_data/lp_print_utils.cc index 6f42c53237..29587249ec 100644 --- a/ortools/lp_data/lp_print_utils.cc +++ b/ortools/lp_data/lp_print_utils.cc @@ -50,7 +50,7 @@ std::string Stringify(const Fractional x, bool fraction) { // Returns a string that pretty-prints a monomial ax with coefficient // a and variable name x -std::string StringifyMonomial(const Fractional a, const std::string &x, +std::string StringifyMonomial(const Fractional a, const std::string& x, bool fraction) { if (a == 0.0) return ""; return a > 0.0 diff --git a/ortools/lp_data/lp_types.h b/ortools/lp_data/lp_types.h index 7f9b5c8c61..ef6f3edc2d 100644 --- a/ortools/lp_data/lp_types.h +++ b/ortools/lp_data/lp_types.h @@ -103,49 +103,39 @@ enum class ProblemStatus : int8 { // a feasible solution. OPTIMAL, - // The problem has been proven primal-infeasible. Note that the problem is - // not - // necessarily DUAL_UNBOUNDED (See Chvatal p.60). The solver does not have - // a + // The problem has been proven primal-infeasible. Note that the problem is not + // necessarily DUAL_UNBOUNDED (See Chvatal p.60). The solver does not have a // dual unbounded ray in this case. PRIMAL_INFEASIBLE, - // The problem has been proven dual-infeasible. Note that the problem is - // not + // The problem has been proven dual-infeasible. Note that the problem is not // necessarily PRIMAL_UNBOUNDED (See Chvatal p.60). The solver does // note have a primal unbounded ray in this case, DUAL_INFEASIBLE, // The problem is either INFEASIBLE or UNBOUNDED (this applies to both the - // primal and dual algorithms). This status is only returned by the - // presolve + // primal and dual algorithms). This status is only returned by the presolve // step and means that a primal or dual unbounded ray was found during - // presolve. Note that because some presolve techniques assume that a - // feasible + // presolve. Note that because some presolve techniques assume that a feasible // solution exists to simplify the problem further, it is difficult to // distinguish between infeasibility and unboundedness. // // If a client needs to distinguish, it is possible to run the primal - // algorithm on the same problem with a 0 objective function to know if - // the + // algorithm on the same problem with a 0 objective function to know if the // problem was PRIMAL_INFEASIBLE. INFEASIBLE_OR_UNBOUNDED, // The problem has been proven feasible and unbounded. That means that the - // problem is DUAL_INFEASIBLE and that the solver has a primal unbounded - // ray. + // problem is DUAL_INFEASIBLE and that the solver has a primal unbounded ray. PRIMAL_UNBOUNDED, - // The problem has been proven dual-feasible and dual-unbounded. That - // means - // the problem is PRIMAL_INFEASIBLE and that the solver has a dual - // unbounded + // The problem has been proven dual-feasible and dual-unbounded. That means + // the problem is PRIMAL_INFEASIBLE and that the solver has a dual unbounded // ray to prove it. DUAL_UNBOUNDED, // All the statuses below correspond to a case where the solver was - // interrupted. This can happen because of a timeout, an iteration limit - // or an + // interrupted. This can happen because of a timeout, an iteration limit or an // error. // The solver didn't had a chance to prove anything. @@ -155,8 +145,7 @@ enum class ProblemStatus : int8 { // PRIMAL_UNBOUNDED. PRIMAL_FEASIBLE, - // The problem has been proven dual-feasible, but may still be - // DUAL_UNBOUNDED. + // The problem has been proven dual-feasible, but may still be DUAL_UNBOUNDED. // That means that if the primal is feasible, then it has a finite optimal // solution. DUAL_FEASIBLE, @@ -167,8 +156,7 @@ enum class ProblemStatus : int8 { // The input problem was invalid (see LinearProgram.IsValid()). INVALID_PROBLEM, - // The problem was solved to a feasible status, but the solution checker - // found + // The problem was solved to a feasible status, but the solution checker found // the primal and/or dual infeasibilities too important for the specified // parameters. IMPRECISE, @@ -177,7 +165,7 @@ enum class ProblemStatus : int8 { // Returns the string representation of the ProblemStatus enum. std::string GetProblemStatusString(ProblemStatus problem_status); -inline std::ostream &operator<<(std::ostream &os, ProblemStatus status) { +inline std::ostream& operator<<(std::ostream& os, ProblemStatus status) { os << GetProblemStatusString(status); return os; } @@ -194,7 +182,7 @@ enum class VariableType : int8 { // Returns the string representation of the VariableType enum. std::string GetVariableTypeString(VariableType variable_type); -inline std::ostream &operator<<(std::ostream &os, VariableType type) { +inline std::ostream& operator<<(std::ostream& os, VariableType type) { os << GetVariableTypeString(type); return os; } @@ -212,10 +200,8 @@ enum class VariableStatus : int8 { // Only possible status of a FIXED_VARIABLE not in the basis. The variable // value should be exactly equal to its bounds (which are the same). FIXED_VALUE, - // Only possible statuses of a non-basic variable which is not - // UNCONSTRAINED - // or FIXED. The variable value should be at its exact specified bound - // (which + // Only possible statuses of a non-basic variable which is not UNCONSTRAINED + // or FIXED. The variable value should be at its exact specified bound (which // must be finite). AT_LOWER_BOUND, AT_UPPER_BOUND, @@ -227,7 +213,7 @@ enum class VariableStatus : int8 { // Returns the string representation of the VariableStatus enum. std::string GetVariableStatusString(VariableStatus status); -inline std::ostream &operator<<(std::ostream &os, VariableStatus status) { +inline std::ostream& operator<<(std::ostream& os, VariableStatus status) { os << GetVariableStatusString(status); return os; } @@ -249,7 +235,7 @@ enum class ConstraintStatus : int8 { // Returns the string representation of the ConstraintStatus enum. std::string GetConstraintStatusString(ConstraintStatus status); -inline std::ostream &operator<<(std::ostream &os, ConstraintStatus status) { +inline std::ostream& operator<<(std::ostream& os, ConstraintStatus status) { os << GetConstraintStatusString(status); return os; } @@ -275,17 +261,17 @@ class StrictITIVector : public gtl::ITIVector { #endif StrictITIVector() : ParentType() {} explicit StrictITIVector(IntType size) : ParentType(size.value()) {} - StrictITIVector(IntType size, const T &v) : ParentType(size.value(), v) {} + StrictITIVector(IntType size, const T& v) : ParentType(size.value(), v) {} template StrictITIVector(InputIteratorType first, InputIteratorType last) : ParentType(first, last) {} void resize(IntType size) { ParentType::resize(size.value()); } - void resize(IntType size, const T &v) { ParentType::resize(size.value(), v); } + void resize(IntType size, const T& v) { ParentType::resize(size.value(), v); } void reserve(IntType size) { ParentType::reserve(size.value()); } - void assign(IntType size, const T &v) { ParentType::assign(size.value(), v); } + void assign(IntType size, const T& v) { ParentType::assign(size.value(), v); } IntType size() const { return IntType(ParentType::size()); } @@ -372,19 +358,19 @@ class VectorIterator : EntryType { using Index = typename EntryType::Index; using Entry = EntryType; - VectorIterator(const Index *indices, const Fractional *coefficients, + VectorIterator(const Index* indices, const Fractional* coefficients, EntryIndex i) : EntryType(indices, coefficients, i) {} void operator++() { ++this->i_; } - bool operator!=(const VectorIterator &other) const { + bool operator!=(const VectorIterator& other) const { // This operator is intended for use in natural range iteration ONLY. // Therefore, we prefer to use '<' so that a buggy range iteration which // start point is *after* its end point stops immediately, instead of // iterating 2^(number of bits of EntryIndex) times. return this->i_ < other.i_; } - const Entry &operator*() const { return *this; } + const Entry& operator*() const { return *this; } }; // This is used during the deterministic time computation to convert a given diff --git a/ortools/lp_data/lp_utils.cc b/ortools/lp_data/lp_utils.cc index 91cdb0310d..14351a3408 100644 --- a/ortools/lp_data/lp_utils.cc +++ b/ortools/lp_data/lp_utils.cc @@ -19,7 +19,7 @@ namespace operations_research { namespace glop { template -Fractional SquaredNormTemplate(const SparseColumnLike &column) { +Fractional SquaredNormTemplate(const SparseColumnLike& column) { Fractional sum(0.0); for (const SparseColumn::Entry e : column) { sum += Square(e.coefficient()); @@ -27,15 +27,15 @@ Fractional SquaredNormTemplate(const SparseColumnLike &column) { return sum; } -Fractional SquaredNorm(const SparseColumn &v) { +Fractional SquaredNorm(const SparseColumn& v) { return SquaredNormTemplate(v); } -Fractional SquaredNorm(const ColumnView &v) { +Fractional SquaredNorm(const ColumnView& v) { return SquaredNormTemplate(v); } -Fractional PreciseSquaredNorm(const SparseColumn &v) { +Fractional PreciseSquaredNorm(const SparseColumn& v) { KahanSum sum; for (const SparseColumn::Entry e : v) { sum.Add(Square(e.coefficient())); @@ -43,7 +43,7 @@ Fractional PreciseSquaredNorm(const SparseColumn &v) { return sum.Value(); } -Fractional PreciseSquaredNorm(const ScatteredColumn &v) { +Fractional PreciseSquaredNorm(const ScatteredColumn& v) { if (v.ShouldUseDenseIteration()) { return PreciseSquaredNorm(v.values); } @@ -54,7 +54,7 @@ Fractional PreciseSquaredNorm(const ScatteredColumn &v) { return sum.Value(); } -Fractional SquaredNorm(const DenseColumn &column) { +Fractional SquaredNorm(const DenseColumn& column) { Fractional sum(0.0); RowIndex row(0); const size_t num_blocks = column.size().value() / 4; @@ -70,7 +70,7 @@ Fractional SquaredNorm(const DenseColumn &column) { return sum; } -Fractional PreciseSquaredNorm(const DenseColumn &column) { +Fractional PreciseSquaredNorm(const DenseColumn& column) { KahanSum sum; for (RowIndex row(0); row < column.size(); ++row) { sum.Add(Square(column[row])); @@ -78,7 +78,7 @@ Fractional PreciseSquaredNorm(const DenseColumn &column) { return sum.Value(); } -Fractional InfinityNorm(const DenseColumn &v) { +Fractional InfinityNorm(const DenseColumn& v) { Fractional infinity_norm = 0.0; for (RowIndex row(0); row < v.size(); ++row) { infinity_norm = std::max(infinity_norm, fabs(v[row])); @@ -87,7 +87,7 @@ Fractional InfinityNorm(const DenseColumn &v) { } template -Fractional InfinityNormTemplate(const SparseColumnLike &column) { +Fractional InfinityNormTemplate(const SparseColumnLike& column) { Fractional infinity_norm = 0.0; for (const SparseColumn::Entry e : column) { infinity_norm = std::max(infinity_norm, fabs(e.coefficient())); @@ -95,15 +95,15 @@ Fractional InfinityNormTemplate(const SparseColumnLike &column) { return infinity_norm; } -Fractional InfinityNorm(const SparseColumn &v) { +Fractional InfinityNorm(const SparseColumn& v) { return InfinityNormTemplate(v); } -Fractional InfinityNorm(const ColumnView &v) { +Fractional InfinityNorm(const ColumnView& v) { return InfinityNormTemplate(v); } -double Density(const DenseRow &row) { +double Density(const DenseRow& row) { if (row.empty()) return 0.0; int sum = 0.0; for (ColIndex col(0); col < row.size(); ++col) { @@ -112,7 +112,7 @@ double Density(const DenseRow &row) { return static_cast(sum) / row.size().value(); } -void RemoveNearZeroEntries(Fractional threshold, DenseRow *row) { +void RemoveNearZeroEntries(Fractional threshold, DenseRow* row) { if (threshold == Fractional(0.0)) return; for (ColIndex col(0); col < row->size(); ++col) { if (fabs((*row)[col]) < threshold) { @@ -121,7 +121,7 @@ void RemoveNearZeroEntries(Fractional threshold, DenseRow *row) { } } -void RemoveNearZeroEntries(Fractional threshold, DenseColumn *column) { +void RemoveNearZeroEntries(Fractional threshold, DenseColumn* column) { if (threshold == Fractional(0.0)) return; for (RowIndex row(0); row < column->size(); ++row) { if (fabs((*column)[row]) < threshold) { @@ -130,9 +130,9 @@ void RemoveNearZeroEntries(Fractional threshold, DenseColumn *column) { } } -Fractional RestrictedInfinityNorm(const ColumnView &column, - const DenseBooleanColumn &rows_to_consider, - RowIndex *row_index) { +Fractional RestrictedInfinityNorm(const ColumnView& column, + const DenseBooleanColumn& rows_to_consider, + RowIndex* row_index) { Fractional infinity_norm = 0.0; for (const SparseColumn::Entry e : column) { if (rows_to_consider[e.row()] && fabs(e.coefficient()) > infinity_norm) { @@ -143,7 +143,7 @@ Fractional RestrictedInfinityNorm(const ColumnView &column, return infinity_norm; } -void SetSupportToFalse(const ColumnView &column, DenseBooleanColumn *b) { +void SetSupportToFalse(const ColumnView& column, DenseBooleanColumn* b) { for (const SparseColumn::Entry e : column) { if (e.coefficient() != 0.0) { (*b)[e.row()] = false; @@ -151,7 +151,7 @@ void SetSupportToFalse(const ColumnView &column, DenseBooleanColumn *b) { } } -bool IsDominated(const ColumnView &column, const DenseColumn &radius) { +bool IsDominated(const ColumnView& column, const DenseColumn& radius) { for (const SparseColumn::Entry e : column) { DCHECK_GE(radius[e.row()], 0.0); if (fabs(e.coefficient()) > radius[e.row()]) return false; diff --git a/ortools/lp_data/lp_utils.h b/ortools/lp_data/lp_utils.h index ffc5eaf778..7d01af7f9d 100644 --- a/ortools/lp_data/lp_utils.h +++ b/ortools/lp_data/lp_utils.h @@ -44,8 +44,8 @@ static inline Fractional Fractionality(Fractional f) { // Returns the scalar product between u and v. // The precise versions use KahanSum and are about two times slower. template -Fractional ScalarProduct(const DenseRowOrColumn1 &u, - const DenseRowOrColumn2 &v) { +Fractional ScalarProduct(const DenseRowOrColumn1& u, + const DenseRowOrColumn2& v) { DCHECK_EQ(u.size().value(), v.size().value()); Fractional sum(0.0); typename DenseRowOrColumn1::IndexType i(0); @@ -79,9 +79,9 @@ Fractional ScalarProduct(const DenseRowOrColumn1 &u, // option is to skip the u[col] that are 0.0 rather than fetching the coeff // and doing a Fractional multiplication. template -Fractional ScalarProduct(const DenseRowOrColumn &u, const SparseColumn &v) { +Fractional ScalarProduct(const DenseRowOrColumn& u, const SparseColumn& v) { Fractional sum(0.0); - for (const SparseColumn::Entry &e : v) { + for (const SparseColumn::Entry e : v) { sum += u[typename DenseRowOrColumn::IndexType(e.row().value())] * e.coefficient(); } @@ -89,8 +89,8 @@ Fractional ScalarProduct(const DenseRowOrColumn &u, const SparseColumn &v) { } template -Fractional PreciseScalarProduct(const DenseRowOrColumn &u, - const DenseRowOrColumn2 &v) { +Fractional PreciseScalarProduct(const DenseRowOrColumn& u, + const DenseRowOrColumn2& v) { DCHECK_EQ(u.size().value(), v.size().value()); KahanSum sum; for (typename DenseRowOrColumn::IndexType i(0); i < u.size(); ++i) { @@ -100,10 +100,10 @@ Fractional PreciseScalarProduct(const DenseRowOrColumn &u, } template -Fractional PreciseScalarProduct(const DenseRowOrColumn &u, - const SparseColumn &v) { +Fractional PreciseScalarProduct(const DenseRowOrColumn& u, + const SparseColumn& v) { KahanSum sum; - for (const SparseColumn::Entry &e : v) { + for (const SparseColumn::Entry e : v) { sum.Add(u[typename DenseRowOrColumn::IndexType(e.row().value())] * e.coefficient()); } @@ -111,8 +111,8 @@ Fractional PreciseScalarProduct(const DenseRowOrColumn &u, } template -Fractional PreciseScalarProduct(const DenseRowOrColumn &u, - const ScatteredColumn &v) { +Fractional PreciseScalarProduct(const DenseRowOrColumn& u, + const ScatteredColumn& v) { DCHECK_EQ(u.size().value(), v.values.size().value()); if (v.ShouldUseDenseIteration()) { return PreciseScalarProduct(u, v.values); @@ -127,8 +127,8 @@ Fractional PreciseScalarProduct(const DenseRowOrColumn &u, // Computes a scalar product for entries with index not greater than max_index. template -Fractional PartialScalarProduct(const DenseRowOrColumn &u, - const SparseColumn &v, int max_index) { +Fractional PartialScalarProduct(const DenseRowOrColumn& u, + const SparseColumn& v, int max_index) { Fractional sum(0.0); for (const SparseColumn::Entry e : v) { if (e.row().value() >= max_index) { @@ -142,63 +142,63 @@ Fractional PartialScalarProduct(const DenseRowOrColumn &u, // Returns the norm^2 (sum of the square of the entries) of the given column. // The precise version uses KahanSum and are about two times slower. -Fractional SquaredNorm(const SparseColumn &v); -Fractional SquaredNorm(const DenseColumn &column); -Fractional SquaredNorm(const ColumnView &v); -Fractional PreciseSquaredNorm(const SparseColumn &v); -Fractional PreciseSquaredNorm(const DenseColumn &column); -Fractional PreciseSquaredNorm(const ScatteredColumn &v); +Fractional SquaredNorm(const SparseColumn& v); +Fractional SquaredNorm(const DenseColumn& column); +Fractional SquaredNorm(const ColumnView& v); +Fractional PreciseSquaredNorm(const SparseColumn& v); +Fractional PreciseSquaredNorm(const DenseColumn& column); +Fractional PreciseSquaredNorm(const ScatteredColumn& v); // Returns the maximum of the |coefficients| of 'v'. -Fractional InfinityNorm(const DenseColumn &v); -Fractional InfinityNorm(const SparseColumn &v); -Fractional InfinityNorm(const ColumnView &v); +Fractional InfinityNorm(const DenseColumn& v); +Fractional InfinityNorm(const SparseColumn& v); +Fractional InfinityNorm(const ColumnView& v); // Returns the fraction of non-zero entries of the given row. // // TODO(user): Take a Scattered row/col instead. This is only used to report // stats, but we should still have a sparse version to do it faster. -double Density(const DenseRow &row); +double Density(const DenseRow& row); // Sets to 0.0 all entries of the given row whose fabs() is lower than the given // threshold. -void RemoveNearZeroEntries(Fractional threshold, DenseRow *row); -void RemoveNearZeroEntries(Fractional threshold, DenseColumn *column); +void RemoveNearZeroEntries(Fractional threshold, DenseRow* row); +void RemoveNearZeroEntries(Fractional threshold, DenseColumn* column); // Transposition functions implemented below with a cast so it should actually // have no complexity cost. -const DenseRow &Transpose(const DenseColumn &col); -const DenseColumn &Transpose(const DenseRow &row); +const DenseRow& Transpose(const DenseColumn& col); +const DenseColumn& Transpose(const DenseRow& row); // Returns the maximum of the |coefficients| of the given column restricted // to the rows_to_consider. Also returns the first RowIndex 'row' that attains // this maximum. If the maximum is 0.0, then row_index is left untouched. -Fractional RestrictedInfinityNorm(const ColumnView &column, - const DenseBooleanColumn &rows_to_consider, - RowIndex *row_index); +Fractional RestrictedInfinityNorm(const ColumnView& column, + const DenseBooleanColumn& rows_to_consider, + RowIndex* row_index); // Sets to false the entry b[row] if column[row] is non null. // Note that if 'b' was true only on the non-zero position of column, this can // be used as a fast way to clear 'b'. -void SetSupportToFalse(const ColumnView &column, DenseBooleanColumn *b); +void SetSupportToFalse(const ColumnView& column, DenseBooleanColumn* b); // Returns true iff for all 'row' we have '|column[row]| <= radius[row]'. -bool IsDominated(const ColumnView &column, const DenseColumn &radius); +bool IsDominated(const ColumnView& column, const DenseColumn& radius); // This cast based implementation should be safe, as long as DenseRow and // DenseColumn are implemented by the same underlying type. // We still do some DCHECK to be sure it works as expected in addition to the // unit tests. -inline const DenseRow &Transpose(const DenseColumn &col) { - const DenseRow &row = reinterpret_cast(col); +inline const DenseRow& Transpose(const DenseColumn& col) { + const DenseRow& row = reinterpret_cast(col); DCHECK_EQ(col.size(), ColToRowIndex(row.size())); DCHECK(col.empty() || (&(col[RowIndex(0)]) == &(row[ColIndex(0)]))); return row; } // Similar comment as the other Transpose() implementation above. -inline const DenseColumn &Transpose(const DenseRow &row) { - const DenseColumn &col = reinterpret_cast(row); +inline const DenseColumn& Transpose(const DenseRow& row) { + const DenseColumn& col = reinterpret_cast(row); DCHECK_EQ(col.size(), ColToRowIndex(row.size())); DCHECK(col.empty() || (&(col[RowIndex(0)]) == &(row[ColIndex(0)]))); return col; @@ -206,8 +206,8 @@ inline const DenseColumn &Transpose(const DenseRow &row) { // Computes the positions of the non-zeros of a dense vector. template -inline void ComputeNonZeros(const StrictITIVector &input, - std::vector *non_zeros) { +inline void ComputeNonZeros(const StrictITIVector& input, + std::vector* non_zeros) { non_zeros->clear(); const IndexType end = input.size(); for (IndexType index(0); index < end; ++index) { @@ -219,7 +219,7 @@ inline void ComputeNonZeros(const StrictITIVector &input, // Returns true if the given Fractional container is all zeros. template -inline bool IsAllZero(const Container &input) { +inline bool IsAllZero(const Container& input) { for (Fractional value : input) { if (value != 0.0) return false; } @@ -228,16 +228,16 @@ inline bool IsAllZero(const Container &input) { // Returns true if the given vector of bool is all false. template -bool IsAllFalse(const BoolVector &v) { +bool IsAllFalse(const BoolVector& v) { return std::all_of(v.begin(), v.end(), [](bool value) { return !value; }); } // Permutes the given dense vector. It uses for this an all zero scratchpad. template inline void PermuteWithScratchpad( - const Permutation &permutation, - StrictITIVector *zero_scratchpad, - StrictITIVector *input_output) { + const Permutation& permutation, + StrictITIVector* zero_scratchpad, + StrictITIVector* input_output) { DCHECK(IsAllZero(*zero_scratchpad)); const IndexType size = input_output->size(); zero_scratchpad->swap(*input_output); @@ -257,14 +257,14 @@ inline void PermuteWithScratchpad( // non-zeros are the initial non-zeros positions of output. template inline void PermuteWithKnownNonZeros( - const Permutation &permutation, - StrictITIVector *zero_scratchpad, - StrictITIVector *output, - std::vector *non_zeros) { + const Permutation& permutation, + StrictITIVector* zero_scratchpad, + StrictITIVector* output, + std::vector* non_zeros) { DCHECK(IsAllZero(*zero_scratchpad)); zero_scratchpad->swap(*output); output->resize(zero_scratchpad->size(), 0.0); - for (IndexType &index_ref : *non_zeros) { + for (IndexType& index_ref : *non_zeros) { const Fractional value = (*zero_scratchpad)[index_ref]; (*zero_scratchpad)[index_ref] = 0.0; const IndexType permuted_index(permutation[index_ref]); @@ -276,7 +276,7 @@ inline void PermuteWithKnownNonZeros( // Sets a dense vector for which the non zeros are known to be non_zeros. template inline void ClearAndResizeVectorWithNonZeros(IndexType size, - ScatteredRowOrCol *v) { + ScatteredRowOrCol* v) { // Only use the sparse version if there is less than 5% non-zeros positions // compared to the wanted size. Note that in most cases the vector will // already be of the correct size. @@ -297,7 +297,7 @@ inline void ClearAndResizeVectorWithNonZeros(IndexType size, // Changes the sign of all the entries in the given vector. template -inline void ChangeSign(StrictITIVector *data) { +inline void ChangeSign(StrictITIVector* data) { const IndexType end = data->size(); for (IndexType i(0); i < end; ++i) { (*data)[i] = -(*data)[i]; diff --git a/ortools/lp_data/matrix_scaler.cc b/ortools/lp_data/matrix_scaler.cc index 2f475a0bed..2defd3a6c2 100644 --- a/ortools/lp_data/matrix_scaler.cc +++ b/ortools/lp_data/matrix_scaler.cc @@ -30,7 +30,7 @@ namespace glop { SparseMatrixScaler::SparseMatrixScaler() : matrix_(nullptr), row_scale_(), col_scale_() {} -void SparseMatrixScaler::Init(SparseMatrix *matrix) { +void SparseMatrixScaler::Init(SparseMatrix* matrix) { DCHECK(matrix != nullptr); matrix_ = matrix; row_scale_.resize(matrix_->num_rows(), 1.0); @@ -64,7 +64,7 @@ Fractional SparseMatrixScaler::ColScalingFactor(ColIndex col) const { std::string SparseMatrixScaler::DebugInformationString() const { // Note that some computations are redundant with the computations made in // some callees, but we do not care as this function is supposed to be called - // with absl::GetFlag(FLAGS_v) set to 1. + // with FLAGS_v set to 1. DCHECK(!row_scale_.empty()); DCHECK(!col_scale_.empty()); Fractional max_magnitude; @@ -141,8 +141,8 @@ void SparseMatrixScaler::Scale(GlopParameters::ScalingAlgorithm method) { namespace { template -void ScaleVector(const gtl::ITIVector &scale, bool up, - gtl::ITIVector *vector_to_scale) { +void ScaleVector(const gtl::ITIVector& scale, bool up, + gtl::ITIVector* vector_to_scale) { RETURN_IF_NULL(vector_to_scale); const I size(std::min(scale.size(), vector_to_scale->size())); if (up) { @@ -158,8 +158,8 @@ void ScaleVector(const gtl::ITIVector &scale, bool up, template ColIndex CreateOrGetScaleIndex( - InputIndexType num, LinearProgram *lp, - gtl::ITIVector *scale_var_indices) { + InputIndexType num, LinearProgram* lp, + gtl::ITIVector* scale_var_indices) { if ((*scale_var_indices)[num] == -1) { (*scale_var_indices)[num] = lp->CreateNewVariable(); } @@ -167,13 +167,13 @@ ColIndex CreateOrGetScaleIndex( } } // anonymous namespace -void SparseMatrixScaler::ScaleRowVector(bool up, DenseRow *row_vector) const { +void SparseMatrixScaler::ScaleRowVector(bool up, DenseRow* row_vector) const { DCHECK(row_vector != nullptr); ScaleVector(col_scale_, up, row_vector); } void SparseMatrixScaler::ScaleColumnVector(bool up, - DenseColumn *column_vector) const { + DenseColumn* column_vector) const { DCHECK(column_vector != nullptr); ScaleVector(row_scale_, up, column_vector); } @@ -300,7 +300,7 @@ ColIndex SparseMatrixScaler::EquilibrateColumns() { return num_cols_scaled; } -RowIndex SparseMatrixScaler::ScaleMatrixRows(const DenseColumn &factors) { +RowIndex SparseMatrixScaler::ScaleMatrixRows(const DenseColumn& factors) { // Matrix rows are scaled by dividing their coefficients by factors[row]. DCHECK(matrix_ != nullptr); const RowIndex num_rows = matrix_->num_rows(); @@ -317,7 +317,7 @@ RowIndex SparseMatrixScaler::ScaleMatrixRows(const DenseColumn &factors) { const ColIndex num_cols = matrix_->num_cols(); for (ColIndex col(0); col < num_cols; ++col) { - SparseColumn *const column = matrix_->mutable_column(col); + SparseColumn* const column = matrix_->mutable_column(col); if (column != nullptr) { column->ComponentWiseDivide(factors); } @@ -332,7 +332,7 @@ void SparseMatrixScaler::ScaleMatrixColumn(ColIndex col, Fractional factor) { col_scale_[col] *= factor; DCHECK_NE(0.0, factor); - SparseColumn *const column = matrix_->mutable_column(col); + SparseColumn* const column = matrix_->mutable_column(col); if (column != nullptr) { column->DivideByConstant(factor); } @@ -346,7 +346,7 @@ void SparseMatrixScaler::Unscale() { const Fractional column_scale = col_scale_[col]; DCHECK_NE(0.0, column_scale); - SparseColumn *const column = matrix_->mutable_column(col); + SparseColumn* const column = matrix_->mutable_column(col); if (column != nullptr) { column->MultiplyByConstant(column_scale); column->ComponentWiseMultiply(row_scale_); @@ -391,7 +391,7 @@ Status SparseMatrixScaler::LPScale() { matrix_->CleanUp(); const ColIndex num_cols = matrix_->num_cols(); for (ColIndex col(0); col < num_cols; ++col) { - SparseColumn *const column = matrix_->mutable_column(col); + SparseColumn* const column = matrix_->mutable_column(col); // This is the variable representing the log of the scale factor for col. const ColIndex column_scale = CreateOrGetScaleIndex( col, linear_program.get(), &col_scale_var_indices); @@ -408,10 +408,8 @@ Status SparseMatrixScaler::LPScale() { // This is derived from the formulation in // min β // Subject to: - // ∀ c∈C, v∈V, p_{c,v} ≠ 0.0, w_{c,v} + s^{var}_v + s^{comb}_c + - // β ≥ 0.0 - // ∀ c∈C, v∈V, p_{c,v} ≠ 0.0, w_{c,v} + s^{var}_v + s^{comb}_c - // ≤ β + // ∀ c∈C, v∈V, p_{c,v} ≠ 0.0, w_{c,v} + s^{var}_v + s^{comb}_c + β ≥ 0.0 + // ∀ c∈C, v∈V, p_{c,v} ≠ 0.0, w_{c,v} + s^{var}_v + s^{comb}_c ≤ β // If a variable is integer, its scale factor is zero. // Start with the constraint w_cv + s_c + s_v + beta >= 0. diff --git a/ortools/lp_data/matrix_utils.cc b/ortools/lp_data/matrix_utils.cc index 79c55a21fe..953a0cb33d 100644 --- a/ortools/lp_data/matrix_utils.cc +++ b/ortools/lp_data/matrix_utils.cc @@ -27,7 +27,7 @@ namespace { // // See the header comment on FindProportionalColumns() for the exact definition // of two proportional columns with a given tolerance. -bool AreColumnsProportional(const SparseColumn &a, const SparseColumn &b, +bool AreColumnsProportional(const SparseColumn& a, const SparseColumn& b, Fractional tolerance) { DCHECK(a.IsCleanedUp()); DCHECK(b.IsCleanedUp()); @@ -64,7 +64,7 @@ struct ColumnFingerprint { // two given columns, then in a sorted list of columns // AreProportionalCandidates() will be true for all the pairs of columns // between the two given ones (included). - bool operator<(const ColumnFingerprint &other) const { + bool operator<(const ColumnFingerprint& other) const { if (hash == other.hash) { return value < other.value; } @@ -85,7 +85,7 @@ bool AreProportionalCandidates(ColumnFingerprint a, ColumnFingerprint b, // - A hash value of the column non-zero pattern. // - A double value which should be the same for two proportional columns // modulo numerical errors. -ColumnFingerprint ComputeFingerprint(ColIndex col, const SparseColumn &column) { +ColumnFingerprint ComputeFingerprint(ColIndex col, const SparseColumn& column) { int64 non_zero_pattern_hash = 0; Fractional min_abs = std::numeric_limits::max(); Fractional max_abs = 0.0; @@ -112,7 +112,7 @@ ColumnFingerprint ComputeFingerprint(ColIndex col, const SparseColumn &column) { } // namespace -ColMapping FindProportionalColumns(const SparseMatrix &matrix, +ColMapping FindProportionalColumns(const SparseMatrix& matrix, Fractional tolerance) { const ColIndex num_cols = matrix.num_cols(); ColMapping mapping(num_cols, kInvalidCol); @@ -169,7 +169,7 @@ ColMapping FindProportionalColumns(const SparseMatrix &matrix, } ColMapping FindProportionalColumnsUsingSimpleAlgorithm( - const SparseMatrix &matrix, Fractional tolerance) { + const SparseMatrix& matrix, Fractional tolerance) { const ColIndex num_cols = matrix.num_cols(); ColMapping mapping(num_cols, kInvalidCol); for (ColIndex col_a(0); col_a < num_cols; ++col_a) { @@ -188,8 +188,8 @@ ColMapping FindProportionalColumnsUsingSimpleAlgorithm( } bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, - const SparseMatrix &matrix_a, - const CompactSparseMatrix &matrix_b) { + const SparseMatrix& matrix_a, + const CompactSparseMatrix& matrix_b) { // TODO(user): Also DCHECK() that matrix_b is ordered by rows. DCHECK(matrix_a.IsCleanedUp()); if (num_rows > matrix_a.num_rows() || num_rows > matrix_b.num_rows() || @@ -197,8 +197,8 @@ bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, return false; } for (ColIndex col(0); col < num_cols; ++col) { - const SparseColumn &col_a = matrix_a.column(col); - const ColumnView &col_b = matrix_b.column(col); + const SparseColumn& col_a = matrix_a.column(col); + const ColumnView& col_b = matrix_b.column(col); const EntryIndex end = std::min(col_a.num_entries(), col_b.num_entries()); if (end < col_a.num_entries() && col_a.EntryRow(end) < num_rows) { return false; @@ -228,13 +228,13 @@ bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, return true; } -bool IsRightMostSquareMatrixIdentity(const SparseMatrix &matrix) { +bool IsRightMostSquareMatrixIdentity(const SparseMatrix& matrix) { DCHECK(matrix.IsCleanedUp()); if (matrix.num_rows().value() > matrix.num_cols().value()) return false; const ColIndex first_identity_col = matrix.num_cols() - RowToColIndex(matrix.num_rows()); for (ColIndex col = first_identity_col; col < matrix.num_cols(); ++col) { - const SparseColumn &column = matrix.column(col); + const SparseColumn& column = matrix.column(col); if (column.num_entries() != 1 || column.EntryCoefficient(EntryIndex(0)) != 1.0) { return false; diff --git a/ortools/lp_data/model_reader.cc b/ortools/lp_data/model_reader.cc index 6ffee483ca..4a61ee3e3a 100644 --- a/ortools/lp_data/model_reader.cc +++ b/ortools/lp_data/model_reader.cc @@ -21,8 +21,8 @@ namespace operations_research { namespace glop { -bool LoadMPModelProtoFromModelOrRequest(const std::string &input_file_path, - MPModelProto *model) { +bool LoadMPModelProtoFromModelOrRequest(const std::string& input_file_path, + MPModelProto* model) { MPModelProto model_proto; MPModelRequest request_proto; ReadFileToProto(input_file_path, &model_proto); @@ -53,8 +53,8 @@ bool LoadMPModelProtoFromModelOrRequest(const std::string &input_file_path, return true; } -bool LoadLinearProgramFromModelOrRequest(const std::string &input_file_path, - LinearProgram *linear_program) { +bool LoadLinearProgramFromModelOrRequest(const std::string& input_file_path, + LinearProgram* linear_program) { MPModelProto model_proto; if (LoadMPModelProtoFromModelOrRequest(input_file_path, &model_proto)) { MPModelProtoToLinearProgram(model_proto, linear_program); diff --git a/ortools/lp_data/mps_reader.cc b/ortools/lp_data/mps_reader.cc index e8cbaefee0..e9002f711b 100644 --- a/ortools/lp_data/mps_reader.cc +++ b/ortools/lp_data/mps_reader.cc @@ -30,7 +30,7 @@ class MPSReaderImpl { // Parses instance from a file. We currently support LinearProgram and // MpModelProto for the Data type, but it should be easy to add more. template - absl::Status ParseFile(const std::string &file_name, Data *data, + absl::Status ParseFile(const std::string& file_name, Data* data, MPSReader::Form form); private: @@ -66,7 +66,7 @@ class MPSReaderImpl { bool IsCommentOrBlank() const; // Helper function that returns fields_[offset + index]. - const std::string &GetField(int offset, int index) const { + const std::string& GetField(int offset, int index) const { return fields_[offset + index]; } @@ -79,43 +79,43 @@ class MPSReaderImpl { // Line processor. template - absl::Status ProcessLine(const std::string &line, DataWrapper *data); + absl::Status ProcessLine(const std::string& line, DataWrapper* data); // Process section OBJSENSE in MPS file. template - absl::Status ProcessObjectiveSenseSection(DataWrapper *data); + absl::Status ProcessObjectiveSenseSection(DataWrapper* data); // Process section ROWS in the MPS file. template - absl::Status ProcessRowsSection(bool is_lazy, DataWrapper *data); + absl::Status ProcessRowsSection(bool is_lazy, DataWrapper* data); // Process section COLUMNS in the MPS file. template - absl::Status ProcessColumnsSection(DataWrapper *data); + absl::Status ProcessColumnsSection(DataWrapper* data); // Process section RHS in the MPS file. template - absl::Status ProcessRhsSection(DataWrapper *data); + absl::Status ProcessRhsSection(DataWrapper* data); // Process section RANGES in the MPS file. template - absl::Status ProcessRangesSection(DataWrapper *data); + absl::Status ProcessRangesSection(DataWrapper* data); // Process section BOUNDS in the MPS file. template - absl::Status ProcessBoundsSection(DataWrapper *data); + absl::Status ProcessBoundsSection(DataWrapper* data); // Process section INDICATORS in the MPS file. template - absl::Status ProcessIndicatorsSection(DataWrapper *data); + absl::Status ProcessIndicatorsSection(DataWrapper* data); // Process section SOS in the MPS file. absl::Status ProcessSosSection(); // Safely converts a string to a numerical type. Returns an error if the // string passed as parameter is ill-formed. - absl::StatusOr GetDoubleFromString(const std::string &str); - absl::StatusOr GetBoolFromString(const std::string &str); + absl::StatusOr GetDoubleFromString(const std::string& str); + absl::StatusOr GetBoolFromString(const std::string& str); // Different types of variables, as defined in the MPS file specification. // Note these are more precise than the ones in PrimalSimplex. @@ -142,34 +142,34 @@ class MPSReaderImpl { // Stores a bound value of a given type, for a given column name. template - absl::Status StoreBound(const std::string &bound_type_mnemonic, - const std::string &column_name, - const std::string &bound_value, DataWrapper *data); + absl::Status StoreBound(const std::string& bound_type_mnemonic, + const std::string& column_name, + const std::string& bound_value, DataWrapper* data); // Stores a coefficient value for a column number and a row name. template - absl::Status StoreCoefficient(int col, const std::string &row_name, - const std::string &row_value, - DataWrapper *data); + absl::Status StoreCoefficient(int col, const std::string& row_name, + const std::string& row_value, + DataWrapper* data); // Stores a right-hand-side value for a row name. template - absl::Status StoreRightHandSide(const std::string &row_name, - const std::string &row_value, - DataWrapper *data); + absl::Status StoreRightHandSide(const std::string& row_name, + const std::string& row_value, + DataWrapper* data); // Stores a range constraint of value row_value for a row name. template - absl::Status StoreRange(const std::string &row_name, - const std::string &range_value, DataWrapper *data); + absl::Status StoreRange(const std::string& row_name, + const std::string& range_value, DataWrapper* data); // Returns an InvalidArgumentError with the given error message, postfixed by // the current line of the .mps file (number and contents). - absl::Status InvalidArgumentError(const std::string &error_message); + absl::Status InvalidArgumentError(const std::string& error_message); // Appends the current line of the .mps file (number and contents) to the // status if it's an error message. - absl::Status AppendLineToError(const absl::Status &status); + absl::Status AppendLineToError(const absl::Status& status); // Boolean set to true if the reader expects a free-form MPS file. bool free_form_; @@ -243,20 +243,20 @@ class DataWrapper {}; template <> class DataWrapper { public: - explicit DataWrapper(LinearProgram *data) { data_ = data; } + explicit DataWrapper(LinearProgram* data) { data_ = data; } void SetUp() { data_->SetDcheckBounds(false); data_->Clear(); } - void SetName(const std::string &name) { data_->SetName(name); } + void SetName(const std::string& name) { data_->SetName(name); } void SetObjectiveDirection(bool maximize) { data_->SetMaximizationProblem(maximize); } - int FindOrCreateConstraint(const std::string &name) { + int FindOrCreateConstraint(const std::string& name) { return data_->FindOrCreateConstraint(name).value(); } void SetConstraintBounds(int index, double lower_bound, double upper_bound) { @@ -279,7 +279,7 @@ class DataWrapper { return data_->constraint_upper_bounds()[RowIndex(row_index)]; } - int FindOrCreateVariable(const std::string &name) { + int FindOrCreateVariable(const std::string& name) { return data_->FindOrCreateVariable(name).value(); } void SetVariableTypeToInteger(int index) { @@ -311,26 +311,26 @@ class DataWrapper { void CleanUp() { data_->CleanUp(); } private: - LinearProgram *data_; + LinearProgram* data_; }; template <> class DataWrapper { public: - explicit DataWrapper(MPModelProto *data) { data_ = data; } + explicit DataWrapper(MPModelProto* data) { data_ = data; } void SetUp() { data_->Clear(); } - void SetName(const std::string &name) { data_->set_name(name); } + void SetName(const std::string& name) { data_->set_name(name); } void SetObjectiveDirection(bool maximize) { data_->set_maximize(maximize); } - int FindOrCreateConstraint(const std::string &name) { + int FindOrCreateConstraint(const std::string& name) { const auto it = constraint_indices_by_name_.find(name); if (it != constraint_indices_by_name_.end()) return it->second; const int index = data_->constraint_size(); - MPConstraintProto *const constraint = data_->add_constraint(); + MPConstraintProto* const constraint = data_->add_constraint(); constraint->set_lower_bound(0.0); constraint->set_upper_bound(0.0); constraint->set_name(name); @@ -347,7 +347,7 @@ class DataWrapper { // there is, we will just add more than one entry from the same variable in // a constraint, and we let any program that ingests an MPModelProto handle // it. - MPConstraintProto *const constraint = data_->mutable_constraint(row_index); + MPConstraintProto* const constraint = data_->mutable_constraint(row_index); constraint->add_var_index(col_index); constraint->add_coefficient(coefficient); } @@ -361,12 +361,12 @@ class DataWrapper { return data_->constraint(row_index).upper_bound(); } - int FindOrCreateVariable(const std::string &name) { + int FindOrCreateVariable(const std::string& name) { const auto it = variable_indices_by_name_.find(name); if (it != variable_indices_by_name_.end()) return it->second; const int index = data_->variable_size(); - MPVariableProto *const variable = data_->add_variable(); + MPVariableProto* const variable = data_->add_variable(); variable->set_lower_bound(0.0); variable->set_name(name); variable_indices_by_name_[name] = index; @@ -401,11 +401,11 @@ class DataWrapper { } const int cst_index = it->second; - MPGeneralConstraintProto *const constraint = + MPGeneralConstraintProto* const constraint = data_->add_general_constraint(); constraint->set_name( absl::StrCat("ind_", data_->constraint(cst_index).name())); - MPIndicatorConstraint *const indicator = + MPIndicatorConstraint* const indicator = constraint->mutable_indicator_constraint(); *indicator->mutable_constraint() = data_->constraint(cst_index); indicator->set_var_index(var_index); @@ -421,7 +421,7 @@ class DataWrapper { } private: - MPModelProto *data_; + MPModelProto* data_; absl::flat_hash_map variable_indices_by_name_; absl::flat_hash_map constraint_indices_by_name_; @@ -429,7 +429,7 @@ class DataWrapper { }; template -absl::Status MPSReaderImpl::ParseFile(const std::string &file_name, Data *data, +absl::Status MPSReaderImpl::ParseFile(const std::string& file_name, Data* data, MPSReader::Form form) { if (data == nullptr) { return absl::InvalidArgumentError("NULL pointer passed as argument."); @@ -447,7 +447,7 @@ absl::Status MPSReaderImpl::ParseFile(const std::string &file_name, Data *data, Reset(); DataWrapper data_wrapper(data); data_wrapper.SetUp(); - for (const std::string &line : + for (const std::string& line : FileLines(file_name, FileLineIterator::REMOVE_INLINE_CR)) { RETURN_IF_ERROR(ProcessLine(line, &data_wrapper)); } @@ -457,8 +457,8 @@ absl::Status MPSReaderImpl::ParseFile(const std::string &file_name, Data *data, } template -absl::Status MPSReaderImpl::ProcessLine(const std::string &line, - DataWrapper *data) { +absl::Status MPSReaderImpl::ProcessLine(const std::string& line, + DataWrapper* data) { ++line_num_; line_ = line; if (IsCommentOrBlank()) { @@ -540,7 +540,7 @@ absl::Status MPSReaderImpl::ProcessLine(const std::string &line, } template -absl::Status MPSReaderImpl::ProcessObjectiveSenseSection(DataWrapper *data) { +absl::Status MPSReaderImpl::ProcessObjectiveSenseSection(DataWrapper* data) { if (fields_.size() != 1 && fields_[0] != "MIN" && fields_[0] != "MAX") { return InvalidArgumentError("Expected objective sense (MAX or MIN)."); } @@ -550,7 +550,7 @@ absl::Status MPSReaderImpl::ProcessObjectiveSenseSection(DataWrapper *data) { template absl::Status MPSReaderImpl::ProcessRowsSection(bool is_lazy, - DataWrapper *data) { + DataWrapper* data) { if (fields_.size() < 2) { return InvalidArgumentError("Not enough fields in ROWS section."); } @@ -596,7 +596,7 @@ absl::Status MPSReaderImpl::ProcessRowsSection(bool is_lazy, } template -absl::Status MPSReaderImpl::ProcessColumnsSection(DataWrapper *data) { +absl::Status MPSReaderImpl::ProcessColumnsSection(DataWrapper* data) { // Take into account the INTORG and INTEND markers. if (absl::StrContains(line_, "'MARKER'")) { if (absl::StrContains(line_, "'INTORG'")) { @@ -619,9 +619,9 @@ absl::Status MPSReaderImpl::ProcessColumnsSection(DataWrapper *data) { if (fields_.size() < start_index + 3) { return InvalidArgumentError("Not enough fields in COLUMNS section."); } - const std::string &column_name = GetField(start_index, 0); - const std::string &row1_name = GetField(start_index, 1); - const std::string &row1_value = GetField(start_index, 2); + const std::string& column_name = GetField(start_index, 0); + const std::string& row1_name = GetField(start_index, 1); + const std::string& row1_value = GetField(start_index, 2); const int col = data->FindOrCreateVariable(column_name); is_binary_by_default_.resize(col + 1, false); if (in_integer_section_) { @@ -637,53 +637,53 @@ absl::Status MPSReaderImpl::ProcessColumnsSection(DataWrapper *data) { return InvalidArgumentError("Unexpected number of fields."); } if (fields_.size() - start_index > 4) { - const std::string &row2_name = GetField(start_index, 3); - const std::string &row2_value = GetField(start_index, 4); + const std::string& row2_name = GetField(start_index, 3); + const std::string& row2_value = GetField(start_index, 4); RETURN_IF_ERROR(StoreCoefficient(col, row2_name, row2_value, data)); } return absl::OkStatus(); } template -absl::Status MPSReaderImpl::ProcessRhsSection(DataWrapper *data) { +absl::Status MPSReaderImpl::ProcessRhsSection(DataWrapper* data) { const int start_index = free_form_ ? 0 : 2; const int offset = start_index + GetFieldOffset(); if (fields_.size() < offset + 2) { return InvalidArgumentError("Not enough fields in RHS section."); } // const std::string& rhs_name = fields_[0]; is not used - const std::string &row1_name = GetField(offset, 0); - const std::string &row1_value = GetField(offset, 1); + const std::string& row1_name = GetField(offset, 0); + const std::string& row1_value = GetField(offset, 1); RETURN_IF_ERROR(StoreRightHandSide(row1_name, row1_value, data)); if (fields_.size() - start_index >= 4) { - const std::string &row2_name = GetField(offset, 2); - const std::string &row2_value = GetField(offset, 3); + const std::string& row2_name = GetField(offset, 2); + const std::string& row2_value = GetField(offset, 3); RETURN_IF_ERROR(StoreRightHandSide(row2_name, row2_value, data)); } return absl::OkStatus(); } template -absl::Status MPSReaderImpl::ProcessRangesSection(DataWrapper *data) { +absl::Status MPSReaderImpl::ProcessRangesSection(DataWrapper* data) { const int start_index = free_form_ ? 0 : 2; const int offset = start_index + GetFieldOffset(); if (fields_.size() < offset + 2) { return InvalidArgumentError("Not enough fields in RHS section."); } // const std::string& range_name = fields_[0]; is not used - const std::string &row1_name = GetField(offset, 0); - const std::string &row1_value = GetField(offset, 1); + const std::string& row1_name = GetField(offset, 0); + const std::string& row1_value = GetField(offset, 1); RETURN_IF_ERROR(StoreRange(row1_name, row1_value, data)); if (fields_.size() - start_index >= 4) { - const std::string &row2_name = GetField(offset, 2); - const std::string &row2_value = GetField(offset, 3); + const std::string& row2_name = GetField(offset, 2); + const std::string& row2_value = GetField(offset, 3); RETURN_IF_ERROR(StoreRange(row2_name, row2_value, data)); } return absl::OkStatus(); } template -absl::Status MPSReaderImpl::ProcessBoundsSection(DataWrapper *data) { +absl::Status MPSReaderImpl::ProcessBoundsSection(DataWrapper* data) { if (fields_.size() < 3) { return InvalidArgumentError("Not enough fields in BOUNDS section."); } @@ -698,7 +698,7 @@ absl::Status MPSReaderImpl::ProcessBoundsSection(DataWrapper *data) { } template -absl::Status MPSReaderImpl::ProcessIndicatorsSection(DataWrapper *data) { +absl::Status MPSReaderImpl::ProcessIndicatorsSection(DataWrapper* data) { // TODO(user): Enforce section order. This section must come after // anything related to constraints, or we'll have partial data inside the // indicator constraints. @@ -732,9 +732,9 @@ absl::Status MPSReaderImpl::ProcessIndicatorsSection(DataWrapper *data) { template absl::Status MPSReaderImpl::StoreCoefficient(int col, - const std::string &row_name, - const std::string &row_value, - DataWrapper *data) { + const std::string& row_name, + const std::string& row_value, + DataWrapper* data) { if (row_name.empty() || row_name == "$") { return absl::OkStatus(); } @@ -755,9 +755,9 @@ absl::Status MPSReaderImpl::StoreCoefficient(int col, } template -absl::Status MPSReaderImpl::StoreRightHandSide(const std::string &row_name, - const std::string &row_value, - DataWrapper *data) { +absl::Status MPSReaderImpl::StoreRightHandSide(const std::string& row_name, + const std::string& row_value, + DataWrapper* data) { if (row_name.empty()) return absl::OkStatus(); if (row_name != objective_name_) { @@ -778,9 +778,9 @@ absl::Status MPSReaderImpl::StoreRightHandSide(const std::string &row_name, } template -absl::Status MPSReaderImpl::StoreRange(const std::string &row_name, - const std::string &range_value, - DataWrapper *data) { +absl::Status MPSReaderImpl::StoreRange(const std::string& row_name, + const std::string& range_value, + DataWrapper* data) { if (row_name.empty()) return absl::OkStatus(); const int row = data->FindOrCreateConstraint(row_name); @@ -807,10 +807,10 @@ absl::Status MPSReaderImpl::StoreRange(const std::string &row_name, } template -absl::Status MPSReaderImpl::StoreBound(const std::string &bound_type_mnemonic, - const std::string &column_name, - const std::string &bound_value, - DataWrapper *data) { +absl::Status MPSReaderImpl::StoreBound(const std::string& bound_type_mnemonic, + const std::string& column_name, + const std::string& bound_value, + DataWrapper* data) { const BoundTypeId bound_type_id = gtl::FindWithDefault( bound_name_to_id_map_, bound_type_mnemonic, UNKNOWN_BOUND_TYPE); if (bound_type_id == UNKNOWN_BOUND_TYPE) { @@ -986,7 +986,7 @@ std::string MPSReaderImpl::GetFirstWord() const { } bool MPSReaderImpl::IsCommentOrBlank() const { - const char *line = line_.c_str(); + const char* line = line_.c_str(); if (*line == '*') { return true; } @@ -999,7 +999,7 @@ bool MPSReaderImpl::IsCommentOrBlank() const { } absl::StatusOr MPSReaderImpl::GetDoubleFromString( - const std::string &str) { + const std::string& str) { double result; if (!absl::SimpleAtod(str, &result)) { return InvalidArgumentError( @@ -1011,7 +1011,7 @@ absl::StatusOr MPSReaderImpl::GetDoubleFromString( return result; } -absl::StatusOr MPSReaderImpl::GetBoolFromString(const std::string &str) { +absl::StatusOr MPSReaderImpl::GetBoolFromString(const std::string& str) { int result; if (!absl::SimpleAtoi(str, &result) || result < 0 || result > 1) { return InvalidArgumentError( @@ -1025,23 +1025,23 @@ absl::Status MPSReaderImpl::ProcessSosSection() { } absl::Status MPSReaderImpl::InvalidArgumentError( - const std::string &error_message) { + const std::string& error_message) { return AppendLineToError(absl::InvalidArgumentError(error_message)); } -absl::Status MPSReaderImpl::AppendLineToError(const absl::Status &status) { +absl::Status MPSReaderImpl::AppendLineToError(const absl::Status& status) { return util::StatusBuilder(status).SetAppend() << " Line " << line_num_ << ": \"" << line_ << "\"."; } // Parses instance from a file. -absl::Status MPSReader::ParseFile(const std::string &file_name, - LinearProgram *data, Form form) { +absl::Status MPSReader::ParseFile(const std::string& file_name, + LinearProgram* data, Form form) { return MPSReaderImpl().ParseFile(file_name, data, form); } -absl::Status MPSReader::ParseFile(const std::string &file_name, - MPModelProto *data, Form form) { +absl::Status MPSReader::ParseFile(const std::string& file_name, + MPModelProto* data, Form form) { return MPSReaderImpl().ParseFile(file_name, data, form); } diff --git a/ortools/lp_data/proto_utils.cc b/ortools/lp_data/proto_utils.cc index 6613bd3bac..a3863e5a0c 100644 --- a/ortools/lp_data/proto_utils.cc +++ b/ortools/lp_data/proto_utils.cc @@ -17,14 +17,14 @@ namespace operations_research { namespace glop { // Converts a LinearProgram to a MPModelProto. -void LinearProgramToMPModelProto(const LinearProgram &input, - MPModelProto *output) { +void LinearProgramToMPModelProto(const LinearProgram& input, + MPModelProto* output) { output->Clear(); output->set_name(input.name()); output->set_maximize(input.IsMaximizationProblem()); output->set_objective_offset(input.objective_offset()); for (ColIndex col(0); col < input.num_variables(); ++col) { - MPVariableProto *variable = output->add_variable(); + MPVariableProto* variable = output->add_variable(); variable->set_lower_bound(input.variable_lower_bounds()[col]); variable->set_upper_bound(input.variable_upper_bounds()[col]); variable->set_name(input.GetVariableName(col)); @@ -36,7 +36,7 @@ void LinearProgramToMPModelProto(const LinearProgram &input, SparseMatrix transpose; transpose.PopulateFromTranspose(input.GetSparseMatrix()); for (RowIndex row(0); row < input.num_constraints(); ++row) { - MPConstraintProto *constraint = output->add_constraint(); + MPConstraintProto* constraint = output->add_constraint(); constraint->set_lower_bound(input.constraint_lower_bounds()[row]); constraint->set_upper_bound(input.constraint_upper_bounds()[row]); constraint->set_name(input.GetConstraintName(row)); @@ -48,15 +48,15 @@ void LinearProgramToMPModelProto(const LinearProgram &input, } // Converts a MPModelProto to a LinearProgram. -void MPModelProtoToLinearProgram(const MPModelProto &input, - LinearProgram *output) { +void MPModelProtoToLinearProgram(const MPModelProto& input, + LinearProgram* output) { output->Clear(); output->SetName(input.name()); output->SetMaximizationProblem(input.maximize()); output->SetObjectiveOffset(input.objective_offset()); // TODO(user,user): clean up loops to use natural range iteration. for (int i = 0; i < input.variable_size(); ++i) { - const MPVariableProto &var = input.variable(i); + const MPVariableProto& var = input.variable(i); const ColIndex col = output->CreateNewVariable(); output->SetVariableName(col, var.name()); output->SetVariableBounds(col, var.lower_bound(), var.upper_bound()); @@ -66,7 +66,7 @@ void MPModelProtoToLinearProgram(const MPModelProto &input, } } for (int j = 0; j < input.constraint_size(); ++j) { - const MPConstraintProto &cst = input.constraint(j); + const MPConstraintProto& cst = input.constraint(j); const RowIndex row = output->CreateNewConstraint(); output->SetConstraintName(row, cst.name()); output->SetConstraintBounds(row, cst.lower_bound(), cst.upper_bound()); diff --git a/ortools/lp_data/sparse.cc b/ortools/lp_data/sparse.cc index 486e881889..f7d859a6cc 100644 --- a/ortools/lp_data/sparse.cc +++ b/ortools/lp_data/sparse.cc @@ -28,7 +28,7 @@ namespace { using ::util::Reverse; template -EntryIndex ComputeNumEntries(const Matrix &matrix) { +EntryIndex ComputeNumEntries(const Matrix& matrix) { EntryIndex num_entries(0); const ColIndex num_cols(matrix.num_cols()); for (ColIndex col(0); col < num_cols; ++col) { @@ -41,7 +41,7 @@ EntryIndex ComputeNumEntries(const Matrix &matrix) { // The 1-norm |A| is defined as max_j sum_i |a_ij| or // max_col sum_row |a(row,col)|. template -Fractional ComputeOneNormTemplate(const Matrix &matrix) { +Fractional ComputeOneNormTemplate(const Matrix& matrix) { Fractional norm(0.0); const ColIndex num_cols(matrix.num_cols()); for (ColIndex col(0); col < num_cols; ++col) { @@ -60,7 +60,7 @@ Fractional ComputeOneNormTemplate(const Matrix &matrix) { // The oo-norm |A| is defined as max_i sum_j |a_ij| or // max_row sum_col |a(row,col)|. template -Fractional ComputeInfinityNormTemplate(const Matrix &matrix) { +Fractional ComputeInfinityNormTemplate(const Matrix& matrix) { DenseColumn row_sum(matrix.num_rows(), 0.0); const ColIndex num_cols(matrix.num_cols()); for (ColIndex col(0); col < num_cols; ++col) { @@ -88,7 +88,7 @@ SparseMatrix::SparseMatrix() : columns_(), num_rows_(0) {} #if (!defined(_MSC_VER) || (_MSC_VER >= 1800)) SparseMatrix::SparseMatrix( - std::initializer_list > init_list) { + std::initializer_list> init_list) { ColIndex num_cols(0); num_rows_ = RowIndex(init_list.size()); RowIndex row(0); @@ -155,7 +155,7 @@ void SparseMatrix::AppendUnitVector(RowIndex row, Fractional value) { columns_.push_back(std::move(new_col)); } -void SparseMatrix::Swap(SparseMatrix *matrix) { +void SparseMatrix::Swap(SparseMatrix* matrix) { // We do not need to swap the different mutable scratchpads we use. columns_.swap(matrix->columns_); std::swap(num_rows_, matrix->num_rows_); @@ -178,7 +178,7 @@ void SparseMatrix::PopulateFromIdentity(ColIndex num_cols) { } template -void SparseMatrix::PopulateFromTranspose(const Matrix &input) { +void SparseMatrix::PopulateFromTranspose(const Matrix& input) { Reset(RowToColIndex(input.num_rows()), ColToRowIndex(input.num_cols())); // We do a first pass on the input matrix to resize the new columns properly. @@ -203,15 +203,15 @@ void SparseMatrix::PopulateFromTranspose(const Matrix &input) { DCHECK(IsCleanedUp()); } -void SparseMatrix::PopulateFromSparseMatrix(const SparseMatrix &matrix) { +void SparseMatrix::PopulateFromSparseMatrix(const SparseMatrix& matrix) { Reset(ColIndex(0), matrix.num_rows_); columns_ = matrix.columns_; } template void SparseMatrix::PopulateFromPermutedMatrix( - const Matrix &a, const RowPermutation &row_perm, - const ColumnPermutation &inverse_col_perm) { + const Matrix& a, const RowPermutation& row_perm, + const ColumnPermutation& inverse_col_perm) { const ColIndex num_cols = a.num_cols(); Reset(num_cols, a.num_rows()); for (ColIndex col(0); col < num_cols; ++col) { @@ -223,9 +223,9 @@ void SparseMatrix::PopulateFromPermutedMatrix( } void SparseMatrix::PopulateFromLinearCombination(Fractional alpha, - const SparseMatrix &a, + const SparseMatrix& a, Fractional beta, - const SparseMatrix &b) { + const SparseMatrix& b) { DCHECK_EQ(a.num_cols(), b.num_cols()); DCHECK_EQ(a.num_rows(), b.num_rows()); @@ -247,8 +247,8 @@ void SparseMatrix::PopulateFromLinearCombination(Fractional alpha, } } -void SparseMatrix::PopulateFromProduct(const SparseMatrix &a, - const SparseMatrix &b) { +void SparseMatrix::PopulateFromProduct(const SparseMatrix& a, + const SparseMatrix& b) { const ColIndex num_cols = b.num_cols(); const RowIndex num_rows = a.num_rows(); Reset(num_cols, num_rows); @@ -273,7 +273,7 @@ void SparseMatrix::PopulateFromProduct(const SparseMatrix &a, } } -void SparseMatrix::DeleteColumns(const DenseBooleanRow &columns_to_delete) { +void SparseMatrix::DeleteColumns(const DenseBooleanRow& columns_to_delete) { if (columns_to_delete.empty()) return; ColIndex new_index(0); const ColIndex num_cols = columns_.size(); @@ -287,7 +287,7 @@ void SparseMatrix::DeleteColumns(const DenseBooleanRow &columns_to_delete) { } void SparseMatrix::DeleteRows(RowIndex new_num_rows, - const RowPermutation &permutation) { + const RowPermutation& permutation) { DCHECK_EQ(num_rows_, permutation.size()); for (RowIndex row(0); row < num_rows_; ++row) { DCHECK_LT(permutation[row], new_num_rows); @@ -299,21 +299,21 @@ void SparseMatrix::DeleteRows(RowIndex new_num_rows, SetNumRows(new_num_rows); } -bool SparseMatrix::AppendRowsFromSparseMatrix(const SparseMatrix &matrix) { +bool SparseMatrix::AppendRowsFromSparseMatrix(const SparseMatrix& matrix) { const ColIndex end = num_cols(); if (end != matrix.num_cols()) { return false; } const RowIndex offset = num_rows(); for (ColIndex col(0); col < end; ++col) { - const SparseColumn &source_column = matrix.columns_[col]; + const SparseColumn& source_column = matrix.columns_[col]; columns_[col].AppendEntriesWithOffset(source_column, offset); } SetNumRows(offset + matrix.num_rows()); return true; } -void SparseMatrix::ApplyRowPermutation(const RowPermutation &row_perm) { +void SparseMatrix::ApplyRowPermutation(const RowPermutation& row_perm) { const ColIndex num_cols(columns_.size()); for (ColIndex col(0); col < num_cols; ++col) { columns_[col].ApplyRowPermutation(row_perm); @@ -324,7 +324,7 @@ Fractional SparseMatrix::LookUpValue(RowIndex row, ColIndex col) const { return columns_[col].LookUpCoefficient(row); } -bool SparseMatrix::Equals(const SparseMatrix &a, Fractional tolerance) const { +bool SparseMatrix::Equals(const SparseMatrix& a, Fractional tolerance) const { if (num_cols() != a.num_cols() || num_rows() != a.num_rows()) { return false; } @@ -366,8 +366,8 @@ bool SparseMatrix::Equals(const SparseMatrix &a, Fractional tolerance) const { return true; } -void SparseMatrix::ComputeMinAndMaxMagnitudes(Fractional *min_magnitude, - Fractional *max_magnitude) const { +void SparseMatrix::ComputeMinAndMaxMagnitudes(Fractional* min_magnitude, + Fractional* max_magnitude) const { RETURN_IF_NULL(min_magnitude); RETURN_IF_NULL(max_magnitude); *min_magnitude = kInfinity; @@ -426,15 +426,15 @@ Fractional MatrixView::ComputeInfinityNorm() const { // Instantiate needed templates. template void SparseMatrix::PopulateFromTranspose( - const SparseMatrix &input); + const SparseMatrix& input); template void SparseMatrix::PopulateFromPermutedMatrix( - const SparseMatrix &a, const RowPermutation &row_perm, - const ColumnPermutation &inverse_col_perm); + const SparseMatrix& a, const RowPermutation& row_perm, + const ColumnPermutation& inverse_col_perm); template void SparseMatrix::PopulateFromPermutedMatrix( - const CompactSparseMatrixView &a, const RowPermutation &row_perm, - const ColumnPermutation &inverse_col_perm); + const CompactSparseMatrixView& a, const RowPermutation& row_perm, + const ColumnPermutation& inverse_col_perm); -void CompactSparseMatrix::PopulateFromMatrixView(const MatrixView &input) { +void CompactSparseMatrix::PopulateFromMatrixView(const MatrixView& input) { num_cols_ = input.num_cols(); num_rows_ = input.num_rows(); const EntryIndex num_entries = input.num_entries(); @@ -454,7 +454,7 @@ void CompactSparseMatrix::PopulateFromMatrixView(const MatrixView &input) { } void CompactSparseMatrix::PopulateFromTranspose( - const CompactSparseMatrix &input) { + const CompactSparseMatrix& input) { num_cols_ = RowToColIndex(input.num_rows()); num_rows_ = ColToRowIndex(input.num_cols()); @@ -488,7 +488,7 @@ void CompactSparseMatrix::PopulateFromTranspose( DCHECK_EQ(starts_.back(), rows_.size()); } -void TriangularMatrix::PopulateFromTranspose(const TriangularMatrix &input) { +void TriangularMatrix::PopulateFromTranspose(const TriangularMatrix& input) { CompactSparseMatrix::PopulateFromTranspose(input); // This takes care of the triangular special case. @@ -533,12 +533,12 @@ void TriangularMatrix::Reset(RowIndex num_rows, ColIndex col_capacity) { starts_[ColIndex(0)] = 0; } -ColIndex CompactSparseMatrix::AddDenseColumn(const DenseColumn &dense_column) { +ColIndex CompactSparseMatrix::AddDenseColumn(const DenseColumn& dense_column) { return AddDenseColumnPrefix(dense_column, RowIndex(0)); } ColIndex CompactSparseMatrix::AddDenseColumnPrefix( - const DenseColumn &dense_column, RowIndex start) { + const DenseColumn& dense_column, RowIndex start) { const RowIndex num_rows(dense_column.size()); for (RowIndex row(start); row < num_rows; ++row) { if (dense_column[row] != 0.0) { @@ -552,7 +552,7 @@ ColIndex CompactSparseMatrix::AddDenseColumnPrefix( } ColIndex CompactSparseMatrix::AddDenseColumnWithNonZeros( - const DenseColumn &dense_column, const std::vector &non_zeros) { + const DenseColumn& dense_column, const std::vector& non_zeros) { if (non_zeros.empty()) return AddDenseColumn(dense_column); for (const RowIndex row : non_zeros) { const Fractional value = dense_column[row]; @@ -567,7 +567,7 @@ ColIndex CompactSparseMatrix::AddDenseColumnWithNonZeros( } ColIndex CompactSparseMatrix::AddAndClearColumnWithNonZeros( - DenseColumn *column, std::vector *non_zeros) { + DenseColumn* column, std::vector* non_zeros) { for (const RowIndex row : *non_zeros) { const Fractional value = (*column)[row]; if (value != 0.0) { @@ -582,7 +582,7 @@ ColIndex CompactSparseMatrix::AddAndClearColumnWithNonZeros( return num_cols_ - 1; } -void CompactSparseMatrix::Swap(CompactSparseMatrix *other) { +void CompactSparseMatrix::Swap(CompactSparseMatrix* other) { std::swap(num_rows_, other->num_rows_); std::swap(num_cols_, other->num_cols_); coefficients_.swap(other->coefficients_); @@ -590,7 +590,7 @@ void CompactSparseMatrix::Swap(CompactSparseMatrix *other) { starts_.swap(other->starts_); } -void TriangularMatrix::Swap(TriangularMatrix *other) { +void TriangularMatrix::Swap(TriangularMatrix* other) { CompactSparseMatrix::Swap(other); diagonal_coefficients_.swap(other->diagonal_coefficients_); std::swap(first_non_identity_column_, other->first_non_identity_column_); @@ -637,7 +637,7 @@ void TriangularMatrix::AddDiagonalOnlyColumn(Fractional diagonal_value) { CloseCurrentColumn(diagonal_value); } -void TriangularMatrix::AddTriangularColumn(const ColumnView &column, +void TriangularMatrix::AddTriangularColumn(const ColumnView& column, RowIndex diagonal_row) { Fractional diagonal_value = 0.0; for (const SparseColumn::Entry e : column) { @@ -653,7 +653,7 @@ void TriangularMatrix::AddTriangularColumn(const ColumnView &column, } void TriangularMatrix::AddAndNormalizeTriangularColumn( - const SparseColumn &column, RowIndex diagonal_row, + const SparseColumn& column, RowIndex diagonal_row, Fractional diagonal_coefficient) { // TODO(user): use division by a constant using multiplication. for (const SparseColumn::Entry e : column) { @@ -670,7 +670,7 @@ void TriangularMatrix::AddAndNormalizeTriangularColumn( } void TriangularMatrix::AddTriangularColumnWithGivenDiagonalEntry( - const SparseColumn &column, RowIndex diagonal_row, + const SparseColumn& column, RowIndex diagonal_row, Fractional diagonal_value) { for (SparseColumn::Entry e : column) { DCHECK_NE(e.row(), diagonal_row); @@ -681,7 +681,7 @@ void TriangularMatrix::AddTriangularColumnWithGivenDiagonalEntry( } void TriangularMatrix::PopulateFromTriangularSparseMatrix( - const SparseMatrix &input) { + const SparseMatrix& input) { Reset(input.num_rows(), input.num_cols()); for (ColIndex col(0); col < input.num_cols(); ++col) { AddTriangularColumn(ColumnView(input.column(col)), ColToRowIndex(col)); @@ -710,7 +710,7 @@ bool TriangularMatrix::IsUpperTriangular() const { } void TriangularMatrix::ApplyRowPermutationToNonDiagonalEntries( - const RowPermutation &row_perm) { + const RowPermutation& row_perm) { EntryIndex num_entries = rows_.size(); for (EntryIndex i(0); i < num_entries; ++i) { rows_[i] = row_perm[rows_[i]]; @@ -718,7 +718,7 @@ void TriangularMatrix::ApplyRowPermutationToNonDiagonalEntries( } void TriangularMatrix::CopyColumnToSparseColumn(ColIndex col, - SparseColumn *output) const { + SparseColumn* output) const { output->Clear(); for (const EntryIndex i : Column(col)) { output->SetCoefficient(EntryRow(i), EntryCoefficient(i)); @@ -727,19 +727,19 @@ void TriangularMatrix::CopyColumnToSparseColumn(ColIndex col, output->CleanUp(); } -void TriangularMatrix::CopyToSparseMatrix(SparseMatrix *output) const { +void TriangularMatrix::CopyToSparseMatrix(SparseMatrix* output) const { output->PopulateFromZero(num_rows_, num_cols_); for (ColIndex col(0); col < num_cols_; ++col) { CopyColumnToSparseColumn(col, output->mutable_column(col)); } } -void TriangularMatrix::LowerSolve(DenseColumn *rhs) const { +void TriangularMatrix::LowerSolve(DenseColumn* rhs) const { LowerSolveStartingAt(ColIndex(0), rhs); } void TriangularMatrix::LowerSolveStartingAt(ColIndex start, - DenseColumn *rhs) const { + DenseColumn* rhs) const { if (all_diagonal_coefficients_are_one_) { LowerSolveStartingAtInternal(start, rhs); } else { @@ -749,7 +749,7 @@ void TriangularMatrix::LowerSolveStartingAt(ColIndex start, template void TriangularMatrix::LowerSolveStartingAtInternal(ColIndex start, - DenseColumn *rhs) const { + DenseColumn* rhs) const { RETURN_IF_NULL(rhs); const ColIndex begin = std::max(start, first_non_identity_column_); const ColIndex end = diagonal_coefficients_.size(); @@ -767,7 +767,7 @@ void TriangularMatrix::LowerSolveStartingAtInternal(ColIndex start, } } -void TriangularMatrix::UpperSolve(DenseColumn *rhs) const { +void TriangularMatrix::UpperSolve(DenseColumn* rhs) const { if (all_diagonal_coefficients_are_one_) { UpperSolveInternal(rhs); } else { @@ -776,7 +776,7 @@ void TriangularMatrix::UpperSolve(DenseColumn *rhs) const { } template -void TriangularMatrix::UpperSolveInternal(DenseColumn *rhs) const { +void TriangularMatrix::UpperSolveInternal(DenseColumn* rhs) const { RETURN_IF_NULL(rhs); const ColIndex end = first_non_identity_column_; for (ColIndex col(diagonal_coefficients_.size() - 1); col >= end; --col) { @@ -798,7 +798,7 @@ void TriangularMatrix::UpperSolveInternal(DenseColumn *rhs) const { } } -void TriangularMatrix::TransposeUpperSolve(DenseColumn *rhs) const { +void TriangularMatrix::TransposeUpperSolve(DenseColumn* rhs) const { if (all_diagonal_coefficients_are_one_) { TransposeUpperSolveInternal(rhs); } else { @@ -807,7 +807,7 @@ void TriangularMatrix::TransposeUpperSolve(DenseColumn *rhs) const { } template -void TriangularMatrix::TransposeUpperSolveInternal(DenseColumn *rhs) const { +void TriangularMatrix::TransposeUpperSolveInternal(DenseColumn* rhs) const { RETURN_IF_NULL(rhs); const ColIndex end = num_cols_; EntryIndex i = starts_[first_non_identity_column_]; @@ -828,7 +828,7 @@ void TriangularMatrix::TransposeUpperSolveInternal(DenseColumn *rhs) const { } } -void TriangularMatrix::TransposeLowerSolve(DenseColumn *rhs) const { +void TriangularMatrix::TransposeLowerSolve(DenseColumn* rhs) const { if (all_diagonal_coefficients_are_one_) { TransposeLowerSolveInternal(rhs); } else { @@ -837,7 +837,7 @@ void TriangularMatrix::TransposeLowerSolve(DenseColumn *rhs) const { } template -void TriangularMatrix::TransposeLowerSolveInternal(DenseColumn *rhs) const { +void TriangularMatrix::TransposeLowerSolveInternal(DenseColumn* rhs) const { RETURN_IF_NULL(rhs); const ColIndex end = first_non_identity_column_; @@ -866,8 +866,8 @@ void TriangularMatrix::TransposeLowerSolveInternal(DenseColumn *rhs) const { } } -void TriangularMatrix::HyperSparseSolve(DenseColumn *rhs, - RowIndexVector *non_zero_rows) const { +void TriangularMatrix::HyperSparseSolve(DenseColumn* rhs, + RowIndexVector* non_zero_rows) const { if (all_diagonal_coefficients_are_one_) { HyperSparseSolveInternal(rhs, non_zero_rows); } else { @@ -877,7 +877,7 @@ void TriangularMatrix::HyperSparseSolve(DenseColumn *rhs, template void TriangularMatrix::HyperSparseSolveInternal( - DenseColumn *rhs, RowIndexVector *non_zero_rows) const { + DenseColumn* rhs, RowIndexVector* non_zero_rows) const { RETURN_IF_NULL(rhs); int new_size = 0; for (const RowIndex row : *non_zero_rows) { @@ -897,7 +897,7 @@ void TriangularMatrix::HyperSparseSolveInternal( } void TriangularMatrix::HyperSparseSolveWithReversedNonZeros( - DenseColumn *rhs, RowIndexVector *non_zero_rows) const { + DenseColumn* rhs, RowIndexVector* non_zero_rows) const { if (all_diagonal_coefficients_are_one_) { HyperSparseSolveWithReversedNonZerosInternal(rhs, non_zero_rows); } else { @@ -907,7 +907,7 @@ void TriangularMatrix::HyperSparseSolveWithReversedNonZeros( template void TriangularMatrix::HyperSparseSolveWithReversedNonZerosInternal( - DenseColumn *rhs, RowIndexVector *non_zero_rows) const { + DenseColumn* rhs, RowIndexVector* non_zero_rows) const { RETURN_IF_NULL(rhs); int new_start = non_zero_rows->size(); for (const RowIndex row : Reverse(*non_zero_rows)) { @@ -928,7 +928,7 @@ void TriangularMatrix::HyperSparseSolveWithReversedNonZerosInternal( } void TriangularMatrix::TransposeHyperSparseSolve( - DenseColumn *rhs, RowIndexVector *non_zero_rows) const { + DenseColumn* rhs, RowIndexVector* non_zero_rows) const { if (all_diagonal_coefficients_are_one_) { TransposeHyperSparseSolveInternal(rhs, non_zero_rows); } else { @@ -938,7 +938,7 @@ void TriangularMatrix::TransposeHyperSparseSolve( template void TriangularMatrix::TransposeHyperSparseSolveInternal( - DenseColumn *rhs, RowIndexVector *non_zero_rows) const { + DenseColumn* rhs, RowIndexVector* non_zero_rows) const { RETURN_IF_NULL(rhs); int new_size = 0; for (const RowIndex row : *non_zero_rows) { @@ -958,7 +958,7 @@ void TriangularMatrix::TransposeHyperSparseSolveInternal( } void TriangularMatrix::TransposeHyperSparseSolveWithReversedNonZeros( - DenseColumn *rhs, RowIndexVector *non_zero_rows) const { + DenseColumn* rhs, RowIndexVector* non_zero_rows) const { if (all_diagonal_coefficients_are_one_) { TransposeHyperSparseSolveWithReversedNonZerosInternal(rhs, non_zero_rows); @@ -970,7 +970,7 @@ void TriangularMatrix::TransposeHyperSparseSolveWithReversedNonZeros( template void TriangularMatrix::TransposeHyperSparseSolveWithReversedNonZerosInternal( - DenseColumn *rhs, RowIndexVector *non_zero_rows) const { + DenseColumn* rhs, RowIndexVector* non_zero_rows) const { RETURN_IF_NULL(rhs); int new_start = non_zero_rows->size(); for (const RowIndex row : Reverse(*non_zero_rows)) { @@ -996,9 +996,9 @@ void TriangularMatrix::TransposeHyperSparseSolveWithReversedNonZerosInternal( } void TriangularMatrix::PermutedLowerSolve( - const SparseColumn &rhs, const RowPermutation &row_perm, - const RowMapping &partial_inverse_row_perm, SparseColumn *lower, - SparseColumn *upper) const { + const SparseColumn& rhs, const RowPermutation& row_perm, + const RowMapping& partial_inverse_row_perm, SparseColumn* lower, + SparseColumn* upper) const { DCHECK(all_diagonal_coefficients_are_one_); RETURN_IF_NULL(lower); RETURN_IF_NULL(upper); @@ -1035,10 +1035,10 @@ void TriangularMatrix::PermutedLowerSolve( DCHECK(lower->CheckNoDuplicates()); } -void TriangularMatrix::PermutedLowerSparseSolve(const ColumnView &rhs, - const RowPermutation &row_perm, - SparseColumn *lower_column, - SparseColumn *upper_column) { +void TriangularMatrix::PermutedLowerSparseSolve(const ColumnView& rhs, + const RowPermutation& row_perm, + SparseColumn* lower_column, + SparseColumn* upper_column) { DCHECK(all_diagonal_coefficients_are_one_); RETURN_IF_NULL(lower_column); RETURN_IF_NULL(upper_column); @@ -1114,8 +1114,8 @@ void TriangularMatrix::PermutedLowerSparseSolve(const ColumnView &rhs, // will be given by upper_column_rows_ and it will be populated in reverse // order. void TriangularMatrix::PermutedComputeRowsToConsider( - const ColumnView &rhs, const RowPermutation &row_perm, - RowIndexVector *lower_column_rows, RowIndexVector *upper_column_rows) { + const ColumnView& rhs, const RowPermutation& row_perm, + RowIndexVector* lower_column_rows, RowIndexVector* upper_column_rows) { stored_.resize(num_rows_, false); marked_.resize(num_rows_, false); lower_column_rows->clear(); @@ -1227,7 +1227,7 @@ void TriangularMatrix::PermutedComputeRowsToConsider( } void TriangularMatrix::ComputeRowsToConsiderWithDfs( - RowIndexVector *non_zero_rows) const { + RowIndexVector* non_zero_rows) const { if (non_zero_rows->empty()) return; // We don't start the DFS if the initial number of non-zeros is under the @@ -1304,7 +1304,7 @@ void TriangularMatrix::ComputeRowsToConsiderWithDfs( } void TriangularMatrix::ComputeRowsToConsiderInSortedOrder( - RowIndexVector *non_zero_rows) const { + RowIndexVector* non_zero_rows) const { static const Fractional kDefaultSparsityRatio = 0.025; static const Fractional kDefaultNumOpsRatio = 0.05; ComputeRowsToConsiderInSortedOrder(non_zero_rows, kDefaultSparsityRatio, @@ -1312,7 +1312,7 @@ void TriangularMatrix::ComputeRowsToConsiderInSortedOrder( } void TriangularMatrix::ComputeRowsToConsiderInSortedOrder( - RowIndexVector *non_zero_rows, Fractional sparsity_ratio, + RowIndexVector* non_zero_rows, Fractional sparsity_ratio, Fractional num_ops_ratio) const { if (non_zero_rows->empty()) return; diff --git a/ortools/lp_data/sparse_column.cc b/ortools/lp_data/sparse_column.cc index ed86614058..2bfb15cb2f 100644 --- a/ortools/lp_data/sparse_column.cc +++ b/ortools/lp_data/sparse_column.cc @@ -47,7 +47,7 @@ void RandomAccessSparseColumn::Resize(RowIndex num_rows) { } void RandomAccessSparseColumn::PopulateFromSparseColumn( - const SparseColumn &sparse_column) { + const SparseColumn& sparse_column) { Clear(); for (const SparseColumn::Entry e : sparse_column) { SetCoefficient(e.row(), e.coefficient()); @@ -55,7 +55,7 @@ void RandomAccessSparseColumn::PopulateFromSparseColumn( } void RandomAccessSparseColumn::PopulateSparseColumn( - SparseColumn *sparse_column) const { + SparseColumn* sparse_column) const { RETURN_IF_NULL(sparse_column); sparse_column->Clear(); diff --git a/ortools/lp_data/sparse_vector.h b/ortools/lp_data/sparse_vector.h index 0962d2bf0d..c2a295c09d 100644 --- a/ortools/lp_data/sparse_vector.h +++ b/ortools/lp_data/sparse_vector.h @@ -79,8 +79,7 @@ class SparseVectorEntry; // TODO(user): un-expose this type to client; by getting rid of the // index-based APIs and leveraging iterator-based APIs; if possible. template > > + typename IteratorType = VectorIterator>> class SparseVector { public: typedef IndexType Index; @@ -98,14 +97,14 @@ class SparseVector { // as noexcept. However, the noexcept annotation is banned by the style guide, // and the only way to get it is by using the default move constructor and // assignment operator generated by the compiler. - SparseVector(const SparseVector &other); + SparseVector(const SparseVector& other); #if !defined(_MSC_VER) - SparseVector(SparseVector &&other) = default; + SparseVector(SparseVector&& other) = default; #endif - SparseVector &operator=(const SparseVector &other); + SparseVector& operator=(const SparseVector& other); #if !defined(_MSC_VER) - SparseVector &operator=(SparseVector &&other) = default; + SparseVector& operator=(SparseVector&& other) = default; #endif // Read-only API for a given SparseVector entry. The typical way for a @@ -150,15 +149,15 @@ class SparseVector { // Swaps the content of this sparse vector with the one passed as argument. // Works in O(1). - void Swap(SparseVector *other); + void Swap(SparseVector* other); // Populates the current vector from sparse_vector. // Runs in O(num_entries). - void PopulateFromSparseVector(const SparseVector &sparse_vector); + void PopulateFromSparseVector(const SparseVector& sparse_vector); // Populates the current vector from dense_vector. // Runs in O(num_indices_in_dense_vector). - void PopulateFromDenseVector(const DenseVector &dense_vector); + void PopulateFromDenseVector(const DenseVector& dense_vector); // Appends all entries from sparse_vector to the current vector; the indices // of the appended entries are increased by offset. If the current vector @@ -166,7 +165,7 @@ class SparseVector { // overwritten with the value from sparse_vector. // Note that while offset may be negative itself, the indices of all entries // after applying the offset must be non-negative. - void AppendEntriesWithOffset(const SparseVector &sparse_vector, Index offset); + void AppendEntriesWithOffset(const SparseVector& sparse_vector, Index offset); // Returns true when the vector contains no duplicates. Runs in // O(max_index + num_entries), max_index being the largest index in entry. @@ -179,7 +178,7 @@ class SparseVector { // Note that boolean_vector should be initialized to false before calling this // method; It will remain equal to false after calls to CheckNoDuplicates(). // Note that we use a mutable Boolean to make subsequent call runs in O(1). - bool CheckNoDuplicates(StrictITIVector *boolean_vector) const; + bool CheckNoDuplicates(StrictITIVector* boolean_vector) const; // Defines the coefficient at index, i.e. vector[index] = value; void SetCoefficient(Index index, Fractional value); @@ -195,7 +194,7 @@ class SparseVector { // Same as RemoveNearZeroEntries, but the entry magnitude of each row is // multiplied by weights[row] before being compared with threshold. void RemoveNearZeroEntriesWithWeights(Fractional threshold, - const DenseVector &weights); + const DenseVector& weights); // Moves the entry with given Index to the first position in the vector. If // the entry is not present, nothing happens. @@ -211,7 +210,7 @@ class SparseVector { // Multiplies all entries by its corresponding factor, // i.e. entry.coefficient *= factors[entry.index]. - void ComponentWiseMultiply(const DenseVector &factors); + void ComponentWiseMultiply(const DenseVector& factors); // Divides all entries by factor. // i.e. entry.coefficient /= factor. @@ -219,22 +218,22 @@ class SparseVector { // Divides all entries by its corresponding factor, // i.e. entry.coefficient /= factors[entry.index]. - void ComponentWiseDivide(const DenseVector &factors); + void ComponentWiseDivide(const DenseVector& factors); // Populates a dense vector from the sparse vector. // Runs in O(num_indices) as the dense vector values have to be reset to 0.0. - void CopyToDenseVector(Index num_indices, DenseVector *dense_vector) const; + void CopyToDenseVector(Index num_indices, DenseVector* dense_vector) const; // Populates a dense vector from the permuted sparse vector. // Runs in O(num_indices) as the dense vector values have to be reset to 0.0. - void PermutedCopyToDenseVector(const IndexPermutation &index_perm, + void PermutedCopyToDenseVector(const IndexPermutation& index_perm, Index num_indices, - DenseVector *dense_vector) const; + DenseVector* dense_vector) const; // Performs the operation dense_vector += multiplier * this. // This is known as multiply-accumulate or (fused) multiply-add. void AddMultipleToDenseVector(Fractional multiplier, - DenseVector *dense_vector) const; + DenseVector* dense_vector) const; // WARNING: BOTH vectors (the current and the destination) MUST be "clean", // i.e. sorted and without duplicates. @@ -243,25 +242,25 @@ class SparseVector { // absolute value are under the given drop_tolerance. void AddMultipleToSparseVectorAndDeleteCommonIndex( Fractional multiplier, Index removed_common_index, - Fractional drop_tolerance, SparseVector *accumulator_vector) const; + Fractional drop_tolerance, SparseVector* accumulator_vector) const; // Same as AddMultipleToSparseVectorAndDeleteCommonIndex() but instead of // deleting the common index, leave it unchanged. void AddMultipleToSparseVectorAndIgnoreCommonIndex( Fractional multiplier, Index removed_common_index, - Fractional drop_tolerance, SparseVector *accumulator_vector) const; + Fractional drop_tolerance, SparseVector* accumulator_vector) const; // Applies the index permutation to all entries: index = index_perm[index]; - void ApplyIndexPermutation(const IndexPermutation &index_perm); + void ApplyIndexPermutation(const IndexPermutation& index_perm); // Same as ApplyIndexPermutation but deletes the index if index_perm[index] // is negative. - void ApplyPartialIndexPermutation(const IndexPermutation &index_perm); + void ApplyPartialIndexPermutation(const IndexPermutation& index_perm); // Removes the entries for which index_perm[index] is non-negative and appends // them to output. Note that the index of the entries are NOT permuted. - void MoveTaggedEntriesTo(const IndexPermutation &index_perm, - SparseVector *output); + void MoveTaggedEntriesTo(const IndexPermutation& index_perm, + SparseVector* output); // Returns the coefficient at position index. // Call with care: runs in O(number-of-entries) as entries may not be sorted. @@ -306,7 +305,7 @@ class SparseVector { // Returns true if this vector is exactly equal to the given one, i.e. all its // index indices and coefficients appear in the same order and are equal. - bool IsEqualTo(const SparseVector &other) const; + bool IsEqualTo(const SparseVector& other) const; // An exhaustive, pretty-printed listing of the entries, in their // internal order. a.DebugString() == b.DebugString() iff a.IsEqualTo(b). @@ -355,12 +354,12 @@ class SparseVector { // Mutable access to the indices and coefficients of the entries of the sparse // vector. - Index &MutableIndex(EntryIndex i) { + Index& MutableIndex(EntryIndex i) { DCHECK_GE(i, 0); DCHECK_LT(i, num_entries_); return index_[i.value()]; } - Fractional &MutableCoefficient(EntryIndex i) { + Fractional& MutableCoefficient(EntryIndex i) { DCHECK_GE(i, 0); DCHECK_LT(i, num_entries_); return coefficient_[i.value()]; @@ -382,8 +381,8 @@ class SparseVector { EntryIndex capacity_; // Pointers to the first elements of the index and coefficient arrays. - Index *index_; - Fractional *coefficient_; + Index* index_; + Fractional* coefficient_; // This is here to speed up the CheckNoDuplicates() methods and is mutable // so we can perform checks on const argument. @@ -394,7 +393,7 @@ class SparseVector { // and AddMultipleToSparseVectorAndIgnoreCommonIndex() which is shared. void AddMultipleToSparseVectorInternal( bool delete_common_index, Fractional multiplier, Index common_index, - Fractional drop_tolerance, SparseVector *accumulator_vector) const; + Fractional drop_tolerance, SparseVector* accumulator_vector) const; }; // -------------------------------------------------------- @@ -425,7 +424,7 @@ class SparseVectorEntry { // Note that the constructor is intentionally made protected, so that the // entry can be created only as a part of the construction of an iterator over // a sparse data structure. - SparseVectorEntry(const Index *indices, const Fractional *coefficients, + SparseVectorEntry(const Index* indices, const Fractional* coefficients, EntryIndex i) : i_(i), index_(indices), coefficient_(coefficients) {} @@ -439,8 +438,8 @@ class SparseVectorEntry { // increment both when moving the iterator. // 3. keep a pointer to the sparse vector object and the index of the current // entry. - const Index *index_; - const Fractional *coefficient_; + const Index* index_; + const Fractional* coefficient_; }; template @@ -465,13 +464,13 @@ SparseVector::SparseVector() may_contain_duplicates_(false) {} template -SparseVector::SparseVector(const SparseVector &other) { +SparseVector::SparseVector(const SparseVector& other) { PopulateFromSparseVector(other); } template -SparseVector & -SparseVector::operator=(const SparseVector &other) { +SparseVector& +SparseVector::operator=(const SparseVector& other) { PopulateFromSparseVector(other); return *this; } @@ -506,9 +505,9 @@ void SparseVector::Reserve(EntryIndex new_capacity) { const size_t value_buffer_size = new_capacity.value() * sizeof(Fractional); const size_t new_buffer_size = index_buffer_size + value_buffer_size; std::unique_ptr new_buffer(new char[new_buffer_size]); - IndexType *const new_index = reinterpret_cast(new_buffer.get()); - Fractional *const new_coefficient = - reinterpret_cast(new_index + new_capacity.value()); + IndexType* const new_index = reinterpret_cast(new_buffer.get()); + Fractional* const new_coefficient = + reinterpret_cast(new_index + new_capacity.value()); // Avoid copying the data if the vector is empty. if (num_entries_ > 0) { @@ -532,7 +531,7 @@ bool SparseVector::IsEmpty() const { } template -void SparseVector::Swap(SparseVector *other) { +void SparseVector::Swap(SparseVector* other) { std::swap(buffer_, other->buffer_); std::swap(num_entries_, other->num_entries_); std::swap(capacity_, other->capacity_); @@ -553,15 +552,15 @@ void SparseVector::CleanUp() { // Implementing in-place search will require either implementing a custom // sorting code, or custom iterators that abstract away the internal // representation. - std::vector > entries; + std::vector> entries; entries.reserve(num_entries_.value()); for (EntryIndex i(0); i < num_entries_; ++i) { entries.emplace_back(GetIndex(i), GetCoefficient(i)); } std::stable_sort( entries.begin(), entries.end(), - [](const std::pair &a, - const std::pair &b) { return a.first < b.first; }); + [](const std::pair& a, + const std::pair& b) { return a.first < b.first; }); EntryIndex new_size(0); for (int i = 0; i < num_entries_; ++i) { @@ -591,7 +590,7 @@ bool SparseVector::IsCleanedUp() const { template void SparseVector::PopulateFromSparseVector( - const SparseVector &sparse_vector) { + const SparseVector& sparse_vector) { // Clear the sparse vector before reserving the new capacity. If we didn't do // this, Reserve would have to copy the current contents of the vector if it // allocated a new buffer. This would be wasteful, since we overwrite it in @@ -612,7 +611,7 @@ void SparseVector::PopulateFromSparseVector( template void SparseVector::PopulateFromDenseVector( - const DenseVector &dense_vector) { + const DenseVector& dense_vector) { Clear(); const Index num_indices(dense_vector.size()); for (Index index(0); index < num_indices; ++index) { @@ -625,7 +624,7 @@ void SparseVector::PopulateFromDenseVector( template void SparseVector::AppendEntriesWithOffset( - const SparseVector &sparse_vector, Index offset) { + const SparseVector& sparse_vector, Index offset) { for (const EntryIndex i : sparse_vector.AllEntryIndices()) { const Index new_index = offset + sparse_vector.GetIndex(i); DCHECK_GE(new_index, 0); @@ -636,7 +635,7 @@ void SparseVector::AppendEntriesWithOffset( template bool SparseVector::CheckNoDuplicates( - StrictITIVector *boolean_vector) const { + StrictITIVector* boolean_vector) const { RETURN_VALUE_IF_NULL(boolean_vector, false); // Note(user): Using num_entries() or any function that call // CheckNoDuplicates() again will cause an infinite loop! @@ -719,7 +718,7 @@ void SparseVector::RemoveNearZeroEntries( template void SparseVector::RemoveNearZeroEntriesWithWeights( - Fractional threshold, const DenseVector &weights) { + Fractional threshold, const DenseVector& weights) { DCHECK(CheckNoDuplicates()); EntryIndex new_index(0); for (const EntryIndex i : AllEntryIndices()) { @@ -769,7 +768,7 @@ void SparseVector::MultiplyByConstant( template void SparseVector::ComponentWiseMultiply( - const DenseVector &factors) { + const DenseVector& factors) { for (const EntryIndex i : AllEntryIndices()) { MutableCoefficient(i) *= factors[GetIndex(i)]; } @@ -785,7 +784,7 @@ void SparseVector::DivideByConstant( template void SparseVector::ComponentWiseDivide( - const DenseVector &factors) { + const DenseVector& factors) { for (const EntryIndex i : AllEntryIndices()) { MutableCoefficient(i) /= factors[GetIndex(i)]; } @@ -793,7 +792,7 @@ void SparseVector::ComponentWiseDivide( template void SparseVector::CopyToDenseVector( - Index num_indices, DenseVector *dense_vector) const { + Index num_indices, DenseVector* dense_vector) const { RETURN_IF_NULL(dense_vector); dense_vector->AssignToZero(num_indices); for (const EntryIndex i : AllEntryIndices()) { @@ -803,8 +802,8 @@ void SparseVector::CopyToDenseVector( template void SparseVector::PermutedCopyToDenseVector( - const IndexPermutation &index_perm, Index num_indices, - DenseVector *dense_vector) const { + const IndexPermutation& index_perm, Index num_indices, + DenseVector* dense_vector) const { RETURN_IF_NULL(dense_vector); dense_vector->AssignToZero(num_indices); for (const EntryIndex i : AllEntryIndices()) { @@ -814,7 +813,7 @@ void SparseVector::PermutedCopyToDenseVector( template void SparseVector::AddMultipleToDenseVector( - Fractional multiplier, DenseVector *dense_vector) const { + Fractional multiplier, DenseVector* dense_vector) const { RETURN_IF_NULL(dense_vector); if (multiplier == 0.0) return; for (const EntryIndex i : AllEntryIndices()) { @@ -826,7 +825,7 @@ template void SparseVector:: AddMultipleToSparseVectorAndDeleteCommonIndex( Fractional multiplier, Index removed_common_index, - Fractional drop_tolerance, SparseVector *accumulator_vector) const { + Fractional drop_tolerance, SparseVector* accumulator_vector) const { AddMultipleToSparseVectorInternal(true, multiplier, removed_common_index, drop_tolerance, accumulator_vector); } @@ -835,7 +834,7 @@ template void SparseVector:: AddMultipleToSparseVectorAndIgnoreCommonIndex( Fractional multiplier, Index removed_common_index, - Fractional drop_tolerance, SparseVector *accumulator_vector) const { + Fractional drop_tolerance, SparseVector* accumulator_vector) const { AddMultipleToSparseVectorInternal(false, multiplier, removed_common_index, drop_tolerance, accumulator_vector); } @@ -843,7 +842,7 @@ void SparseVector:: template void SparseVector::AddMultipleToSparseVectorInternal( bool delete_common_index, Fractional multiplier, Index common_index, - Fractional drop_tolerance, SparseVector *accumulator_vector) const { + Fractional drop_tolerance, SparseVector* accumulator_vector) const { // DCHECK that the input is correct. DCHECK(IsCleanedUp()); DCHECK(accumulator_vector->IsCleanedUp()); @@ -857,8 +856,8 @@ void SparseVector::AddMultipleToSparseVectorInternal( // be multiplied by "multiplier"), and "b" the second vector (which will be // swapped with "c" at the end to hold the result). // We incrementally build c as: a * multiplier + b. - const SparseVector &a = *this; - const SparseVector &b = *accumulator_vector; + const SparseVector& a = *this; + const SparseVector& b = *accumulator_vector; SparseVector c; EntryIndex ia(0); // Index in the vector "a" EntryIndex ib(0); // ... and "b" @@ -925,7 +924,7 @@ void SparseVector::AddMultipleToSparseVectorInternal( template void SparseVector::ApplyIndexPermutation( - const IndexPermutation &index_perm) { + const IndexPermutation& index_perm) { for (const EntryIndex i : AllEntryIndices()) { MutableIndex(i) = index_perm[GetIndex(i)]; } @@ -933,7 +932,7 @@ void SparseVector::ApplyIndexPermutation( template void SparseVector::ApplyPartialIndexPermutation( - const IndexPermutation &index_perm) { + const IndexPermutation& index_perm) { EntryIndex new_index(0); for (const EntryIndex i : AllEntryIndices()) { const Index index = GetIndex(i); @@ -948,7 +947,7 @@ void SparseVector::ApplyPartialIndexPermutation( template void SparseVector::MoveTaggedEntriesTo( - const IndexPermutation &index_perm, SparseVector *output) { + const IndexPermutation& index_perm, SparseVector* output) { // Note that this function is called many times, so performance does matter // and it is why we optimized the "nothing to do" case. const EntryIndex end(num_entries_); @@ -994,7 +993,7 @@ Fractional SparseVector::LookUpCoefficient( template bool SparseVector::IsEqualTo( - const SparseVector &other) const { + const SparseVector& other) const { // We do not take into account the mutable value may_contain_duplicates_. if (num_entries() != other.num_entries()) return false; for (const EntryIndex i : AllEntryIndices()) { diff --git a/ortools/port/file_nonport.cc b/ortools/port/file_nonport.cc index f01b7ed124..e04a3aa3b8 100644 --- a/ortools/port/file_nonport.cc +++ b/ortools/port/file_nonport.cc @@ -30,20 +30,20 @@ namespace operations_research { } ::absl::Status PortableFileGetContents(absl::string_view file_name, - std::string *output) { + std::string* output) { return file::GetContents(file_name, output, file::Defaults()); } -bool PortableTemporaryFile(const char *directory_prefix, - std::string *filename_out) { +bool PortableTemporaryFile(const char* directory_prefix, + std::string* filename_out) { #if defined(__linux) int32 tid = static_cast(pthread_self()); -#else // defined(__linux__) +#else // defined(__linux__) int32 tid = 123; #endif // defined(__linux__) #if !defined(_MSC_VER) int32 pid = static_cast(getpid()); -#else // _MSC_VER +#else // _MSC_VER int32 pid = 456; #endif // _MSC_VER int64 now = absl::GetCurrentTimeNanos(); diff --git a/ortools/sat/BUILD b/ortools/sat/BUILD index 0d3ec3e485..a6c2a411fb 100644 --- a/ortools/sat/BUILD +++ b/ortools/sat/BUILD @@ -309,6 +309,7 @@ cc_library( ":sat_base", ":sat_parameters_cc_proto", ":simplification", + ":var_domination", "//ortools/base", "//ortools/base:hash", "//ortools/base:map_util", @@ -561,6 +562,23 @@ cc_library( ], ) +cc_library( + name = "var_domination", + srcs = ["var_domination.cc"], + hdrs = ["var_domination.h"], + deps = [ + ":cp_model_cc_proto", + ":cp_model_utils", + ":integer", + ":presolve_context", + "//ortools/base", + "@com_google_absl//absl/types:span", + "//ortools/base:int_type", + "//ortools/base:int_type_indexed_vector", + "//ortools/algorithms:dynamic_partition", + ], +) + cc_library( name = "integer", srcs = ["integer.cc"], diff --git a/ortools/sat/all_different.cc b/ortools/sat/all_different.cc index 619bc39e76..7196ba4c89 100644 --- a/ortools/sat/all_different.cc +++ b/ortools/sat/all_different.cc @@ -28,24 +28,24 @@ namespace operations_research { namespace sat { -std::function AllDifferentBinary( - const std::vector &vars) { - return [=](Model *model) { +std::function AllDifferentBinary( + const std::vector& vars) { + return [=](Model* model) { // Fully encode all the given variables and construct a mapping value -> // List of literal each indicating that a given variable takes this value. // // Note that we use a map to always add the constraints in the same order. - std::map > value_to_literals; - IntegerEncoder *encoder = model->GetOrCreate(); + std::map> value_to_literals; + IntegerEncoder* encoder = model->GetOrCreate(); for (const IntegerVariable var : vars) { model->Add(FullyEncodeVariable(var)); - for (const auto &entry : encoder->FullDomainEncoding(var)) { + for (const auto& entry : encoder->FullDomainEncoding(var)) { value_to_literals[entry.value].push_back(entry.literal); } } // Add an at most one constraint for each value. - for (const auto &entry : value_to_literals) { + for (const auto& entry : value_to_literals) { if (entry.second.size() > 1) { model->Add(AtMostOneConstraint(entry.second)); } @@ -55,30 +55,30 @@ std::function AllDifferentBinary( // a permutation. We can add a bool_or for each literals attached to a // value. if (value_to_literals.size() == vars.size()) { - for (const auto &entry : value_to_literals) { + for (const auto& entry : value_to_literals) { model->Add(ClauseConstraint(entry.second)); } } }; } -std::function AllDifferentOnBounds( - const std::vector &vars) { - return [=](Model *model) { +std::function AllDifferentOnBounds( + const std::vector& vars) { + return [=](Model* model) { if (vars.empty()) return; - auto *constraint = new AllDifferentBoundsPropagator( + auto* constraint = new AllDifferentBoundsPropagator( vars, model->GetOrCreate()); constraint->RegisterWith(model->GetOrCreate()); model->TakeOwnership(constraint); }; } -std::function AllDifferentAC( - const std::vector &variables) { - return [=](Model *model) { +std::function AllDifferentAC( + const std::vector& variables) { + return [=](Model* model) { if (variables.size() < 3) return; - AllDifferentConstraint *constraint = new AllDifferentConstraint( + AllDifferentConstraint* constraint = new AllDifferentConstraint( variables, model->GetOrCreate(), model->GetOrCreate(), model->GetOrCreate()); constraint->RegisterWith(model->GetOrCreate()); @@ -87,8 +87,8 @@ std::function AllDifferentAC( } AllDifferentConstraint::AllDifferentConstraint( - std::vector variables, IntegerEncoder *encoder, - Trail *trail, IntegerTrail *integer_trail) + std::vector variables, IntegerEncoder* encoder, + Trail* trail, IntegerTrail* integer_trail) : num_variables_(variables.size()), variables_(std::move(variables)), trail_(trail), @@ -124,7 +124,7 @@ AllDifferentConstraint::AllDifferentConstraint( // Fill cache with literals, default value is kFalseLiteralIndex. int64 size = variable_max_value_[x] - variable_min_value_[x] + 1; variable_literal_index_[x].resize(size, kFalseLiteralIndex); - for (const auto &entry : encoder->FullDomainEncoding(variables_[x])) { + for (const auto& entry : encoder->FullDomainEncoding(variables_[x])) { int64 value = entry.value.value(); // Can happen because of initial propagation! if (value < variable_min_value_[x] || variable_max_value_[x] < value) { @@ -145,10 +145,10 @@ AllDifferentConstraint::AllDifferentConstraint( component_number_.resize(num_variables_ + num_all_values_ + 1); } -void AllDifferentConstraint::RegisterWith(GenericLiteralWatcher *watcher) { +void AllDifferentConstraint::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); watcher->SetPropagatorPriority(id, 2); - for (const auto &literal_indices : variable_literal_index_) { + for (const auto& literal_indices : variable_literal_index_) { for (const LiteralIndex li : literal_indices) { // Watch only unbound literals. if (li >= 0 && @@ -300,7 +300,7 @@ bool AllDifferentConstraint::Propagate() { // MakeAugmentingPath from increasing the matching size. if (x < num_variables_) { // For now explain all forbidden arcs. - std::vector *conflict = trail_->MutableConflict(); + std::vector* conflict = trail_->MutableConflict(); conflict->clear(); for (int y = 0; y < num_variables_; y++) { if (!variable_visited_[y]) continue; @@ -349,15 +349,15 @@ bool AllDifferentConstraint::Propagate() { // Compute SCCs, make node -> component map. struct SccOutput { - explicit SccOutput(std::vector *c) : components(c) {} - void emplace_back(int const *b, int const *e) { - for (int const *it = b; it < e; ++it) { + explicit SccOutput(std::vector* c) : components(c) {} + void emplace_back(int const* b, int const* e) { + for (int const* it = b; it < e; ++it) { (*components)[*it] = num_components; } ++num_components; } int num_components = 0; - std::vector *components; + std::vector* components; }; SccOutput scc_output(&component_number_); FindStronglyConnectedComponents( @@ -390,7 +390,7 @@ bool AllDifferentConstraint::Propagate() { MakeAugmentingPath(old_variable); DCHECK_EQ(variable_to_value_[old_variable], -1); // No reassignment. - std::vector *reason = trail_->GetEmptyVectorToStoreReason(); + std::vector* reason = trail_->GetEmptyVectorToStoreReason(); for (int y = 0; y < num_variables_; y++) { if (!variable_visited_[y]) continue; for (int value = variable_min_value_[y]; @@ -416,7 +416,7 @@ bool AllDifferentConstraint::Propagate() { } AllDifferentBoundsPropagator::AllDifferentBoundsPropagator( - const std::vector &vars, IntegerTrail *integer_trail) + const std::vector& vars, IntegerTrail* integer_trail) : integer_trail_(integer_trail) { CHECK(!vars.empty()); @@ -475,7 +475,7 @@ int AllDifferentBoundsPropagator::FindStartIndexAndCompressPath(int index) { bool AllDifferentBoundsPropagator::PropagateLowerBounds() { // Start by filling the cached bounds and sorting by increasing lb. - for (VarValue &entry : vars_) { + for (VarValue& entry : vars_) { entry.lb = integer_trail_->LowerBound(entry.var); entry.ub = integer_trail_->UpperBound(entry.var); } @@ -633,7 +633,7 @@ bool AllDifferentBoundsPropagator::PropagateLowerBoundsInternal( } void AllDifferentBoundsPropagator::RegisterWith( - GenericLiteralWatcher *watcher) { + GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); for (const VarValue entry : vars_) { watcher->WatchIntegerVariable(entry.var, id); diff --git a/ortools/sat/boolean_problem.cc b/ortools/sat/boolean_problem.cc index b53ce63d79..c0c69bb16d 100644 --- a/ortools/sat/boolean_problem.cc +++ b/ortools/sat/boolean_problem.cc @@ -47,8 +47,8 @@ namespace sat { using util::RemapGraph; -void ExtractAssignment(const LinearBooleanProblem &problem, - const SatSolver &solver, std::vector *assignment) { +void ExtractAssignment(const LinearBooleanProblem& problem, + const SatSolver& solver, std::vector* assignment) { assignment->clear(); for (int i = 0; i < problem.num_variables(); ++i) { assignment->push_back( @@ -63,8 +63,8 @@ namespace { // // A non-empty string indicates an error. template -std::string ValidateLinearTerms(const LinearTerms &terms, - std::vector *variable_seen) { +std::string ValidateLinearTerms(const LinearTerms& terms, + std::vector* variable_seen) { // variable_seen already has all items false and is reset before return. std::string err_str; int num_errs = 0; @@ -116,7 +116,7 @@ std::string ValidateLinearTerms(const LinearTerms &terms, // of LiteralWithCoeff. template std::vector ConvertLinearExpression( - const ProtoFormat &input) { + const ProtoFormat& input) { std::vector cst; cst.reserve(input.literals_size()); for (int i = 0; i < input.literals_size(); ++i) { @@ -128,10 +128,10 @@ std::vector ConvertLinearExpression( } // namespace -absl::Status ValidateBooleanProblem(const LinearBooleanProblem &problem) { +absl::Status ValidateBooleanProblem(const LinearBooleanProblem& problem) { std::vector variable_seen(problem.num_variables(), false); for (int i = 0; i < problem.constraints_size(); ++i) { - const LinearBooleanConstraint &constraint = problem.constraints(i); + const LinearBooleanConstraint& constraint = problem.constraints(i); const std::string error = ValidateLinearTerms(constraint, &variable_seen); if (!error.empty()) { return absl::Status( @@ -148,20 +148,20 @@ absl::Status ValidateBooleanProblem(const LinearBooleanProblem &problem) { return ::absl::OkStatus(); } -CpModelProto BooleanProblemToCpModelproto(const LinearBooleanProblem &problem) { +CpModelProto BooleanProblemToCpModelproto(const LinearBooleanProblem& problem) { CpModelProto result; for (int i = 0; i < problem.num_variables(); ++i) { - IntegerVariableProto *var = result.add_variables(); + IntegerVariableProto* var = result.add_variables(); if (problem.var_names_size() > i) { var->set_name(problem.var_names(i)); } var->add_domain(0); var->add_domain(1); } - for (const LinearBooleanConstraint &constraint : problem.constraints()) { - ConstraintProto *ct = result.add_constraints(); + for (const LinearBooleanConstraint& constraint : problem.constraints()) { + ConstraintProto* ct = result.add_constraints(); ct->set_name(constraint.name()); - LinearConstraintProto *linear = ct->mutable_linear(); + LinearConstraintProto* linear = ct->mutable_linear(); int64 offset = 0; for (int i = 0; i < constraint.literals_size(); ++i) { // Note that the new format is slightly different. @@ -185,7 +185,7 @@ CpModelProto BooleanProblemToCpModelproto(const LinearBooleanProblem &problem) { : kint32max + offset); } if (problem.has_objective()) { - CpObjectiveProto *objective = result.mutable_objective(); + CpObjectiveProto* objective = result.mutable_objective(); int64 offset = 0; for (int i = 0; i < problem.objective().literals_size(); ++i) { const int lit = problem.objective().literals(i); @@ -205,19 +205,19 @@ CpModelProto BooleanProblemToCpModelproto(const LinearBooleanProblem &problem) { return result; } -void ChangeOptimizationDirection(LinearBooleanProblem *problem) { - LinearObjective *objective = problem->mutable_objective(); +void ChangeOptimizationDirection(LinearBooleanProblem* problem) { + LinearObjective* objective = problem->mutable_objective(); objective->set_scaling_factor(-objective->scaling_factor()); objective->set_offset(-objective->offset()); // We need 'auto' here to keep the open-source compilation happy // (it uses the public protobuf release). - for (auto &coefficients_ref : *objective->mutable_coefficients()) { + for (auto& coefficients_ref : *objective->mutable_coefficients()) { coefficients_ref = -coefficients_ref; } } -bool LoadBooleanProblem(const LinearBooleanProblem &problem, - SatSolver *solver) { +bool LoadBooleanProblem(const LinearBooleanProblem& problem, + SatSolver* solver) { // TODO(user): Currently, the sat solver can load without any issue // constraints with duplicate variables, so we just output a warning if the // problem is not "valid". Make this a strong check once we have some @@ -236,7 +236,7 @@ bool LoadBooleanProblem(const LinearBooleanProblem &problem, std::vector cst; int64 num_terms = 0; int num_constraints = 0; - for (const LinearBooleanConstraint &constraint : problem.constraints()) { + for (const LinearBooleanConstraint& constraint : problem.constraints()) { num_terms += constraint.literals_size(); cst = ConvertLinearExpression(constraint); if (!solver->AddLinearConstraint( @@ -256,8 +256,8 @@ bool LoadBooleanProblem(const LinearBooleanProblem &problem, return true; } -bool LoadAndConsumeBooleanProblem(LinearBooleanProblem *problem, - SatSolver *solver) { +bool LoadAndConsumeBooleanProblem(LinearBooleanProblem* problem, + SatSolver* solver) { const absl::Status status = ValidateBooleanProblem(*problem); if (!status.ok()) { LOG(WARNING) << "The given problem is invalid! " << status.message(); @@ -281,7 +281,7 @@ bool LoadAndConsumeBooleanProblem(LinearBooleanProblem *problem, std::reverse(problem->mutable_constraints()->begin(), problem->mutable_constraints()->end()); for (int i = problem->constraints_size() - 1; i >= 0; --i) { - const LinearBooleanConstraint &constraint = problem->constraints(i); + const LinearBooleanConstraint& constraint = problem->constraints(i); num_terms += constraint.literals_size(); cst = ConvertLinearExpression(constraint); if (!solver->AddLinearConstraint( @@ -304,9 +304,9 @@ bool LoadAndConsumeBooleanProblem(LinearBooleanProblem *problem, return true; } -void UseObjectiveForSatAssignmentPreference(const LinearBooleanProblem &problem, - SatSolver *solver) { - const LinearObjective &objective = problem.objective(); +void UseObjectiveForSatAssignmentPreference(const LinearBooleanProblem& problem, + SatSolver* solver) { + const LinearObjective& objective = problem.objective(); CHECK_EQ(objective.literals_size(), objective.coefficients_size()); int64 max_abs_weight = 0; for (const int64 coefficient : objective.coefficients()) { @@ -325,29 +325,29 @@ void UseObjectiveForSatAssignmentPreference(const LinearBooleanProblem &problem, } } -bool AddObjectiveUpperBound(const LinearBooleanProblem &problem, - Coefficient upper_bound, SatSolver *solver) { +bool AddObjectiveUpperBound(const LinearBooleanProblem& problem, + Coefficient upper_bound, SatSolver* solver) { std::vector cst = ConvertLinearExpression(problem.objective()); return solver->AddLinearConstraint(false, Coefficient(0), true, upper_bound, &cst); } -bool AddObjectiveConstraint(const LinearBooleanProblem &problem, +bool AddObjectiveConstraint(const LinearBooleanProblem& problem, bool use_lower_bound, Coefficient lower_bound, bool use_upper_bound, Coefficient upper_bound, - SatSolver *solver) { + SatSolver* solver) { std::vector cst = ConvertLinearExpression(problem.objective()); return solver->AddLinearConstraint(use_lower_bound, lower_bound, use_upper_bound, upper_bound, &cst); } -Coefficient ComputeObjectiveValue(const LinearBooleanProblem &problem, - const std::vector &assignment) { +Coefficient ComputeObjectiveValue(const LinearBooleanProblem& problem, + const std::vector& assignment) { CHECK_EQ(assignment.size(), problem.num_variables()); Coefficient sum(0); - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); for (int i = 0; i < objective.literals_size(); ++i) { const Literal literal(objective.literals(i)); if (assignment[literal.Variable().value()] == literal.IsPositive()) { @@ -357,12 +357,12 @@ Coefficient ComputeObjectiveValue(const LinearBooleanProblem &problem, return sum; } -bool IsAssignmentValid(const LinearBooleanProblem &problem, - const std::vector &assignment) { +bool IsAssignmentValid(const LinearBooleanProblem& problem, + const std::vector& assignment) { CHECK_EQ(assignment.size(), problem.num_variables()); // Check that all constraints are satisfied. - for (const LinearBooleanConstraint &constraint : problem.constraints()) { + for (const LinearBooleanConstraint& constraint : problem.constraints()) { Coefficient sum(0); for (int i = 0; i < constraint.literals_size(); ++i) { const Literal literal(constraint.literals(i)); @@ -388,10 +388,10 @@ bool IsAssignmentValid(const LinearBooleanProblem &problem, // given LinearBooleanProblem. All constraint coefficients must be 1 (and of the // form >= 1) and all objective weights must be strictly positive. std::string LinearBooleanProblemToCnfString( - const LinearBooleanProblem &problem) { + const LinearBooleanProblem& problem) { std::string output; const bool is_wcnf = (problem.objective().coefficients_size() > 0); - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); // Hack: We know that all the variables with index greater than this have been // created "artificially" in order to encode a max-sat problem into our @@ -401,7 +401,7 @@ std::string LinearBooleanProblemToCnfString( // This will contains the objective. absl::flat_hash_map literal_to_weight; - std::vector > non_slack_objective; + std::vector> non_slack_objective; // This will be the weight of the "hard" clauses in the wcnf format. It must // be greater than the sum of the weight of all the soft clauses, so we will @@ -440,7 +440,7 @@ std::string LinearBooleanProblemToCnfString( } std::string constraint_output; - for (const LinearBooleanConstraint &constraint : problem.constraints()) { + for (const LinearBooleanConstraint& constraint : problem.constraints()) { if (constraint.literals_size() == 0) return ""; // Assumption. constraint_output.clear(); int64 weight = hard_weight; @@ -472,8 +472,8 @@ std::string LinearBooleanProblemToCnfString( return output; } -void StoreAssignment(const VariablesAssignment &assignment, - BooleanAssignment *output) { +void StoreAssignment(const VariablesAssignment& assignment, + BooleanAssignment* output) { output->clear_literals(); for (BooleanVariable var(0); var < assignment.NumberOfVariables(); ++var) { if (assignment.VariableIsAssigned(var)) { @@ -483,9 +483,9 @@ void StoreAssignment(const VariablesAssignment &assignment, } } -void ExtractSubproblem(const LinearBooleanProblem &problem, - const std::vector &constraint_indices, - LinearBooleanProblem *subproblem) { +void ExtractSubproblem(const LinearBooleanProblem& problem, + const std::vector& constraint_indices, + LinearBooleanProblem* subproblem) { *subproblem = problem; subproblem->set_name("Subproblem of " + problem.name()); subproblem->clear_constraints(); @@ -529,14 +529,14 @@ class IdGenerator { // in [0, num_classes) and any symmetry will only map nodes with the same class // between each other. template -Graph *GenerateGraphForSymmetryDetection( - const LinearBooleanProblem &problem, - std::vector *initial_equivalence_classes) { +Graph* GenerateGraphForSymmetryDetection( + const LinearBooleanProblem& problem, + std::vector* initial_equivalence_classes) { // First, we convert the problem to its canonical representation. const int num_variables = problem.num_variables(); CanonicalBooleanLinearProblem canonical_problem; std::vector cst; - for (const LinearBooleanConstraint &constraint : problem.constraints()) { + for (const LinearBooleanConstraint& constraint : problem.constraints()) { cst = ConvertLinearExpression(constraint); CHECK(canonical_problem.AddLinearConstraint( constraint.has_lower_bound(), Coefficient(constraint.lower_bound()), @@ -546,7 +546,7 @@ Graph *GenerateGraphForSymmetryDetection( // TODO(user): reserve the memory for the graph? not sure it is worthwhile // since it would require some linear scan of the problem though. - Graph *graph = new Graph(); + Graph* graph = new Graph(); initial_equivalence_classes->clear(); // We will construct a graph with 3 different types of node that must be @@ -632,9 +632,9 @@ Graph *GenerateGraphForSymmetryDetection( return graph; } -void MakeAllLiteralsPositive(LinearBooleanProblem *problem) { +void MakeAllLiteralsPositive(LinearBooleanProblem* problem) { // Objective. - LinearObjective *mutable_objective = problem->mutable_objective(); + LinearObjective* mutable_objective = problem->mutable_objective(); int64 objective_offset = 0; for (int i = 0; i < mutable_objective->literals_size(); ++i) { const int signed_literal = mutable_objective->literals(i); @@ -648,7 +648,7 @@ void MakeAllLiteralsPositive(LinearBooleanProblem *problem) { mutable_objective->set_offset(mutable_objective->offset() + objective_offset); // Constraints. - for (LinearBooleanConstraint &constraint : + for (LinearBooleanConstraint& constraint : *(problem->mutable_constraints())) { int64 sum = 0; for (int i = 0; i < constraint.literals_size(); ++i) { @@ -668,8 +668,8 @@ void MakeAllLiteralsPositive(LinearBooleanProblem *problem) { } void FindLinearBooleanProblemSymmetries( - const LinearBooleanProblem &problem, - std::vector > *generators) { + const LinearBooleanProblem& problem, + std::vector>* generators) { typedef GraphSymmetryFinder::Graph Graph; std::vector equivalence_classes; std::unique_ptr graph( @@ -700,7 +700,8 @@ void FindLinearBooleanProblemSymmetries( } } #endif // __PORTABLE_PLATFORM__ - GraphSymmetryFinder symmetry_finder(*graph, /*is_undirected=*/true); + GraphSymmetryFinder symmetry_finder(*graph, + /*is_undirected=*/true); std::vector factorized_automorphism_group_size; // TODO(user): inject the appropriate time limit here. CHECK_OK(symmetry_finder.FindSymmetries( @@ -713,7 +714,7 @@ void FindLinearBooleanProblemSymmetries( double average_support_size = 0.0; int num_generators = 0; for (int i = 0; i < generators->size(); ++i) { - SparsePermutation *permutation = (*generators)[i].get(); + SparsePermutation* permutation = (*generators)[i].get(); std::vector to_delete; for (int j = 0; j < permutation->NumCycles(); ++j) { if (*(permutation->Cycle(j).begin()) >= 2 * problem.num_variables()) { @@ -740,8 +741,8 @@ void FindLinearBooleanProblemSymmetries( } void ApplyLiteralMappingToBooleanProblem( - const gtl::ITIVector &mapping, - LinearBooleanProblem *problem) { + const gtl::ITIVector& mapping, + LinearBooleanProblem* problem) { Coefficient bound_shift; Coefficient max_value; std::vector cst; @@ -749,18 +750,18 @@ void ApplyLiteralMappingToBooleanProblem( // First the objective. cst = ConvertLinearExpression(problem->objective()); ApplyLiteralMapping(mapping, &cst, &bound_shift, &max_value); - LinearObjective *mutable_objective = problem->mutable_objective(); + LinearObjective* mutable_objective = problem->mutable_objective(); mutable_objective->clear_literals(); mutable_objective->clear_coefficients(); mutable_objective->set_offset(mutable_objective->offset() - bound_shift.value()); - for (const LiteralWithCoeff &entry : cst) { + for (const LiteralWithCoeff& entry : cst) { mutable_objective->add_literals(entry.literal.SignedValue()); mutable_objective->add_coefficients(entry.coefficient.value()); } // Now the clauses. - for (LinearBooleanConstraint &constraint : *problem->mutable_constraints()) { + for (LinearBooleanConstraint& constraint : *problem->mutable_constraints()) { cst = ConvertLinearExpression(constraint); constraint.clear_literals(); constraint.clear_coefficients(); @@ -785,7 +786,7 @@ void ApplyLiteralMappingToBooleanProblem( // If the constraint is always true, we just leave it empty. if (constraint.has_lower_bound() || constraint.has_upper_bound()) { - for (const LiteralWithCoeff &entry : cst) { + for (const LiteralWithCoeff& entry : cst) { constraint.add_literals(entry.literal.SignedValue()); constraint.add_coefficients(entry.coefficient.value()); } @@ -821,8 +822,8 @@ void ApplyLiteralMappingToBooleanProblem( // A simple preprocessing step that does basic probing and removes the // equivalent literals. -void ProbeAndSimplifyProblem(SatPostsolver *postsolver, - LinearBooleanProblem *problem) { +void ProbeAndSimplifyProblem(SatPostsolver* postsolver, + LinearBooleanProblem* problem) { // TODO(user): expose the number of iterations as a parameter. for (int iter = 0; iter < 6; ++iter) { SatSolver solver; diff --git a/ortools/sat/circuit.cc b/ortools/sat/circuit.cc index 2b4a3bcd42..0d79d11032 100644 --- a/ortools/sat/circuit.cc +++ b/ortools/sat/circuit.cc @@ -23,10 +23,10 @@ namespace operations_research { namespace sat { CircuitPropagator::CircuitPropagator(const int num_nodes, - const std::vector &tails, - const std::vector &heads, - const std::vector &literals, - Options options, Model *model) + const std::vector& tails, + const std::vector& heads, + const std::vector& literals, + Options options, Model* model) : num_nodes_(num_nodes), options_(options), trail_(model->GetOrCreate()), @@ -89,7 +89,7 @@ CircuitPropagator::CircuitPropagator(const int num_nodes, } } -void CircuitPropagator::RegisterWith(GenericLiteralWatcher *watcher) { +void CircuitPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); for (int w = 0; w < watch_index_to_literal_.size(); ++w) { watcher->WatchLiteral(watch_index_to_literal_[w], id, w); @@ -125,7 +125,7 @@ void CircuitPropagator::SetLevel(int level) { } void CircuitPropagator::FillReasonForPath(int start_node, - std::vector *reason) const { + std::vector* reason) const { CHECK_NE(start_node, -1); reason->clear(); int node = start_node; @@ -151,7 +151,7 @@ void CircuitPropagator::AddArc(int tail, int head, LiteralIndex literal_index) { } bool CircuitPropagator::IncrementalPropagate( - const std::vector &watch_indices) { + const std::vector& watch_indices) { for (const int w : watch_indices) { const Literal literal = watch_index_to_literal_[w]; for (const Arc arc : watch_index_to_arcs_[w]) { @@ -164,7 +164,7 @@ bool CircuitPropagator::IncrementalPropagate( // Get rid of the trivial conflicts: At most one incoming and one outgoing // arc for each nodes. if (next_[arc.tail] != -1) { - std::vector *conflict = trail_->MutableConflict(); + std::vector* conflict = trail_->MutableConflict(); if (next_literal_[arc.tail] != kNoLiteralIndex) { *conflict = {Literal(next_literal_[arc.tail]).Negated(), literal.Negated()}; @@ -174,7 +174,7 @@ bool CircuitPropagator::IncrementalPropagate( return false; } if (prev_[arc.head] != -1) { - std::vector *conflict = trail_->MutableConflict(); + std::vector* conflict = trail_->MutableConflict(); if (next_literal_[prev_[arc.head]] != kNoLiteralIndex) { *conflict = {Literal(next_literal_[prev_[arc.head]]).Negated(), literal.Negated()}; @@ -257,7 +257,7 @@ bool CircuitPropagator::Propagate() { const Literal literal = it->second; if (assignment_.LiteralIsFalse(literal)) continue; - std::vector *reason = trail_->GetEmptyVectorToStoreReason(); + std::vector* reason = trail_->GetEmptyVectorToStoreReason(); FillReasonForPath(start_node, reason); if (extra_reason != kFalseLiteralIndex) { reason->push_back(Literal(extra_reason)); @@ -306,8 +306,8 @@ bool CircuitPropagator::Propagate() { } CircuitCoveringPropagator::CircuitCoveringPropagator( - std::vector > graph, - const std::vector &distinguished_nodes, Model *model) + std::vector> graph, + const std::vector& distinguished_nodes, Model* model) : graph_(std::move(graph)), num_nodes_(graph_.size()), trail_(model->GetOrCreate()) { @@ -317,7 +317,7 @@ CircuitCoveringPropagator::CircuitCoveringPropagator( } } -void CircuitCoveringPropagator::RegisterWith(GenericLiteralWatcher *watcher) { +void CircuitCoveringPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int watcher_id = watcher->Register(this); // Fill fixed_arcs_ with arcs that are initially fixed to true, @@ -351,16 +351,16 @@ void CircuitCoveringPropagator::SetLevel(int level) { } bool CircuitCoveringPropagator::IncrementalPropagate( - const std::vector &watch_indices) { + const std::vector& watch_indices) { for (const int w : watch_indices) { - const auto &arc = watch_index_to_arc_[w]; + const auto& arc = watch_index_to_arc_[w]; fixed_arcs_.push_back(arc); } return Propagate(); } void CircuitCoveringPropagator::FillFixedPathInReason( - int start, int end, std::vector *reason) { + int start, int end, std::vector* reason) { reason->clear(); int current = start; do { @@ -375,7 +375,7 @@ bool CircuitCoveringPropagator::Propagate() { // Gather next_ and prev_ from fixed arcs. next_.assign(num_nodes_, -1); prev_.assign(num_nodes_, -1); - for (const auto &arc : fixed_arcs_) { + for (const auto& arc : fixed_arcs_) { // Two arcs go out of arc.first, forbidden. if (next_[arc.first] != -1) { *trail_->MutableConflict() = { @@ -439,7 +439,7 @@ bool CircuitCoveringPropagator::Propagate() { // Path with no distinguished node: forbid to close it. if (current == -1 && distinguished == -1 && !trail_->Assignment().LiteralIsFalse(graph_[end][start])) { - auto *reason = trail_->GetEmptyVectorToStoreReason(); + auto* reason = trail_->GetEmptyVectorToStoreReason(); FillFixedPathInReason(start, end, reason); const bool ok = trail_->EnqueueWithStoredReason(graph_[end][start].Negated()); @@ -449,9 +449,9 @@ bool CircuitCoveringPropagator::Propagate() { return true; } -std::function ExactlyOnePerRowAndPerColumn( - const std::vector > &graph) { - return [=](Model *model) { +std::function ExactlyOnePerRowAndPerColumn( + const std::vector>& graph) { + return [=](Model* model) { const int n = graph.size(); std::vector exactly_one_constraint; exactly_one_constraint.reserve(n); @@ -468,8 +468,8 @@ std::function ExactlyOnePerRowAndPerColumn( }; } -int ReindexArcs(std::vector *tails, std::vector *heads, - std::vector *literals) { +int ReindexArcs(std::vector* tails, std::vector* heads, + std::vector* literals) { const int num_arcs = tails->size(); if (num_arcs == 0) return 0; @@ -495,11 +495,11 @@ int ReindexArcs(std::vector *tails, std::vector *heads, return nodes.size(); } -std::function SubcircuitConstraint( - int num_nodes, const std::vector &tails, const std::vector &heads, - const std::vector &literals, +std::function SubcircuitConstraint( + int num_nodes, const std::vector& tails, const std::vector& heads, + const std::vector& literals, bool multiple_subcircuit_through_zero) { - return [=](Model *model) { + return [=](Model* model) { const int num_arcs = tails.size(); CHECK_GT(num_arcs, 0); CHECK_EQ(heads.size(), num_arcs); @@ -509,8 +509,8 @@ std::function SubcircuitConstraint( // as soon as we add the corresponding ExactlyOneConstraint(). auto sat_solver = model->GetOrCreate(); - std::vector > exactly_one_incoming(num_nodes); - std::vector > exactly_one_outgoing(num_nodes); + std::vector> exactly_one_incoming(num_nodes); + std::vector> exactly_one_outgoing(num_nodes); for (int arc = 0; arc < num_arcs; arc++) { const int tail = tails[arc]; const int head = heads[arc]; @@ -530,18 +530,18 @@ std::function SubcircuitConstraint( CircuitPropagator::Options options; options.multiple_subcircuit_through_zero = multiple_subcircuit_through_zero; - CircuitPropagator *constraint = new CircuitPropagator( + CircuitPropagator* constraint = new CircuitPropagator( num_nodes, tails, heads, literals, options, model); constraint->RegisterWith(model->GetOrCreate()); model->TakeOwnership(constraint); }; } -std::function CircuitCovering( - const std::vector > &graph, - const std::vector &distinguished_nodes) { - return [=](Model *model) { - CircuitCoveringPropagator *constraint = +std::function CircuitCovering( + const std::vector>& graph, + const std::vector& distinguished_nodes) { + return [=](Model* model) { + CircuitCoveringPropagator* constraint = new CircuitCoveringPropagator(graph, distinguished_nodes, model); constraint->RegisterWith(model->GetOrCreate()); model->TakeOwnership(constraint); diff --git a/ortools/sat/clause.cc b/ortools/sat/clause.cc index 4787f0eb53..e13bc85455 100644 --- a/ortools/sat/clause.cc +++ b/ortools/sat/clause.cc @@ -31,9 +31,9 @@ namespace { // Returns true if the given watcher list contains the given clause. template -bool WatcherListContains(const std::vector &list, - const SatClause &candidate) { - for (const Watcher &watcher : list) { +bool WatcherListContains(const std::vector& list, + const SatClause& candidate) { + for (const Watcher& watcher : list) { if (watcher.clause == &candidate) return true; } return false; @@ -49,7 +49,7 @@ void RemoveIf(Container c, Predicate p) { // ----- LiteralWatchers ----- -LiteralWatchers::LiteralWatchers(Model *model) +LiteralWatchers::LiteralWatchers(Model* model) : SatPropagator("LiteralWatchers"), implication_graph_(model->GetOrCreate()), trail_(model->GetOrCreate()), @@ -75,7 +75,7 @@ void LiteralWatchers::Resize(int num_variables) { // Note that this is the only place where we add Watcher so the DCHECK // guarantees that there are no duplicates. void LiteralWatchers::AttachOnFalse(Literal literal, Literal blocking_literal, - SatClause *clause) { + SatClause* clause) { SCOPED_TIME_STAT(&stats_); DCHECK(is_clean_); DCHECK(!WatcherListContains(watchers_on_false_[literal.Index()], *clause)); @@ -83,11 +83,11 @@ void LiteralWatchers::AttachOnFalse(Literal literal, Literal blocking_literal, Watcher(clause, blocking_literal)); } -bool LiteralWatchers::PropagateOnFalse(Literal false_literal, Trail *trail) { +bool LiteralWatchers::PropagateOnFalse(Literal false_literal, Trail* trail) { SCOPED_TIME_STAT(&stats_); DCHECK(is_clean_); - std::vector &watchers = watchers_on_false_[false_literal.Index()]; - const VariablesAssignment &assignment = trail->Assignment(); + std::vector& watchers = watchers_on_false_[false_literal.Index()]; + const VariablesAssignment& assignment = trail->Assignment(); // Note(user): It sounds better to inspect the list in order, this is because // small clauses like binary or ternary clauses will often propagate and thus @@ -108,7 +108,7 @@ bool LiteralWatchers::PropagateOnFalse(Literal false_literal, Trail *trail) { // If the other watched literal is true, just change the blocking literal. // Note that we use the fact that the first two literals of the clause are // the ones currently watched. - Literal *literals = it->clause->literals(); + Literal* literals = it->clause->literals(); const Literal other_watched_literal( LiteralIndex(literals[0].Index().value() ^ literals[1].Index().value() ^ false_literal.Index().value())); @@ -179,7 +179,7 @@ bool LiteralWatchers::PropagateOnFalse(Literal false_literal, Trail *trail) { return true; } -bool LiteralWatchers::Propagate(Trail *trail) { +bool LiteralWatchers::Propagate(Trail* trail) { const int old_index = trail->Index(); while (trail->Index() == old_index && propagation_trail_index_ < old_index) { const Literal literal = (*trail)[propagation_trail_index_++]; @@ -188,12 +188,12 @@ bool LiteralWatchers::Propagate(Trail *trail) { return true; } -absl::Span LiteralWatchers::Reason(const Trail &trail, +absl::Span LiteralWatchers::Reason(const Trail& trail, int trail_index) const { return reasons_[trail_index]->PropagationReason(); } -SatClause *LiteralWatchers::ReasonClause(int trail_index) const { +SatClause* LiteralWatchers::ReasonClause(int trail_index) const { return reasons_[trail_index]; } @@ -202,15 +202,15 @@ bool LiteralWatchers::AddClause(absl::Span literals) { } bool LiteralWatchers::AddClause(absl::Span literals, - Trail *trail) { - SatClause *clause = SatClause::Create(literals); + Trail* trail) { + SatClause* clause = SatClause::Create(literals); clauses_.push_back(clause); return AttachAndPropagate(clause, trail); } -SatClause *LiteralWatchers::AddRemovableClause( - const std::vector &literals, Trail *trail) { - SatClause *clause = SatClause::Create(literals); +SatClause* LiteralWatchers::AddRemovableClause( + const std::vector& literals, Trail* trail) { + SatClause* clause = SatClause::Create(literals); clauses_.push_back(clause); CHECK(AttachAndPropagate(clause, trail)); return clause; @@ -221,11 +221,11 @@ SatClause *LiteralWatchers::AddRemovableClause( // false. It returns false if the clause only contains literals assigned to // false. If only one literals is not false, it propagates it to true if it // is not already assigned. -bool LiteralWatchers::AttachAndPropagate(SatClause *clause, Trail *trail) { +bool LiteralWatchers::AttachAndPropagate(SatClause* clause, Trail* trail) { SCOPED_TIME_STAT(&stats_); const int size = clause->size(); - Literal *literals = clause->literals(); + Literal* literals = clause->literals(); // Select the first two literals that are not assigned to false and put them // on position 0 and 1. @@ -270,8 +270,8 @@ bool LiteralWatchers::AttachAndPropagate(SatClause *clause, Trail *trail) { return true; } -void LiteralWatchers::Attach(SatClause *clause, Trail *trail) { - Literal *literals = clause->literals(); +void LiteralWatchers::Attach(SatClause* clause, Trail* trail) { + Literal* literals = clause->literals(); CHECK(!trail->Assignment().LiteralIsAssigned(literals[0])); CHECK(!trail->Assignment().LiteralIsAssigned(literals[1])); @@ -280,7 +280,7 @@ void LiteralWatchers::Attach(SatClause *clause, Trail *trail) { AttachOnFalse(literals[1], literals[0], clause); } -void LiteralWatchers::InternalDetach(SatClause *clause) { +void LiteralWatchers::InternalDetach(SatClause* clause) { --num_watched_clauses_; const size_t size = clause->size(); if (drat_proof_handler_ != nullptr && size > 2) { @@ -290,18 +290,18 @@ void LiteralWatchers::InternalDetach(SatClause *clause) { clause->Clear(); } -void LiteralWatchers::LazyDetach(SatClause *clause) { +void LiteralWatchers::LazyDetach(SatClause* clause) { InternalDetach(clause); is_clean_ = false; needs_cleaning_.Set(clause->FirstLiteral().Index()); needs_cleaning_.Set(clause->SecondLiteral().Index()); } -void LiteralWatchers::Detach(SatClause *clause) { +void LiteralWatchers::Detach(SatClause* clause) { InternalDetach(clause); for (const Literal l : {clause->FirstLiteral(), clause->SecondLiteral()}) { needs_cleaning_.Clear(l.Index()); - RemoveIf(&(watchers_on_false_[l.Index()]), [](const Watcher &watcher) { + RemoveIf(&(watchers_on_false_[l.Index()]), [](const Watcher& watcher) { return !watcher.clause->IsAttached(); }); } @@ -326,7 +326,7 @@ void LiteralWatchers::AttachAllClauses() { watchers_on_false_.resize(needs_cleaning_.size().value()); DeleteRemovedClauses(); - for (SatClause *clause : clauses_) { + for (SatClause* clause : clauses_) { ++num_watched_clauses_; CHECK_GE(clause->size(), 2); AttachOnFalse(clause->FirstLiteral(), clause->SecondLiteral(), clause); @@ -354,7 +354,7 @@ bool LiteralWatchers::InprocessingFixLiteral(Literal true_literal) { // TODO(user): We could do something slower if the clauses are attached like // we do for InprocessingRewriteClause(). -void LiteralWatchers::InprocessingRemoveClause(SatClause *clause) { +void LiteralWatchers::InprocessingRemoveClause(SatClause* clause) { CHECK(!all_clauses_are_attached_); if (drat_proof_handler_ != nullptr) { drat_proof_handler_->DeleteClause(clause->AsSpan()); @@ -364,7 +364,7 @@ void LiteralWatchers::InprocessingRemoveClause(SatClause *clause) { } bool LiteralWatchers::InprocessingRewriteClause( - SatClause *clause, absl::Span new_clause) { + SatClause* clause, absl::Span new_clause) { if (new_clause.empty()) return false; // UNSAT. if (DEBUG_MODE) { @@ -398,7 +398,7 @@ bool LiteralWatchers::InprocessingRewriteClause( clause->Clear(); for (const Literal l : {clause->FirstLiteral(), clause->SecondLiteral()}) { needs_cleaning_.Clear(l.Index()); - RemoveIf(&(watchers_on_false_[l.Index()]), [](const Watcher &watcher) { + RemoveIf(&(watchers_on_false_[l.Index()]), [](const Watcher& watcher) { return !watcher.clause->IsAttached(); }); } @@ -411,7 +411,7 @@ bool LiteralWatchers::InprocessingRewriteClause( return true; } -SatClause *LiteralWatchers::InprocessingAddClause( +SatClause* LiteralWatchers::InprocessingAddClause( absl::Span new_clause) { CHECK(!new_clause.empty()); CHECK(!all_clauses_are_attached_); @@ -432,7 +432,7 @@ SatClause *LiteralWatchers::InprocessingAddClause( return nullptr; } - SatClause *clause = SatClause::Create(new_clause); + SatClause* clause = SatClause::Create(new_clause); clauses_.push_back(clause); return clause; } @@ -441,7 +441,7 @@ void LiteralWatchers::CleanUpWatchers() { SCOPED_TIME_STAT(&stats_); for (LiteralIndex index : needs_cleaning_.PositionsSetAtLeastOnce()) { DCHECK(needs_cleaning_[index]); - RemoveIf(&(watchers_on_false_[index]), [](const Watcher &watcher) { + RemoveIf(&(watchers_on_false_[index]), [](const Watcher& watcher) { return !watcher.clause->IsAttached(); }); needs_cleaning_.Clear(index); @@ -460,13 +460,13 @@ void LiteralWatchers::DeleteRemovedClauses() { to_minimize_index_ = std::stable_partition(clauses_.begin(), clauses_.begin() + to_minimize_index_, - [](SatClause *a) { return a->IsAttached(); }) - + [](SatClause* a) { return a->IsAttached(); }) - clauses_.begin(); // Do the proper deletion. - std::vector::iterator iter = + std::vector::iterator iter = std::stable_partition(clauses_.begin(), clauses_.end(), - [](SatClause *a) { return a->IsAttached(); }); + [](SatClause* a) { return a->IsAttached(); }); gtl::STLDeleteContainerPointers(iter, clauses_.end()); clauses_.erase(iter, clauses_.end()); } @@ -492,8 +492,7 @@ void BinaryImplicationGraph::AddBinaryClause(Literal a, Literal b) { if (drat_proof_handler_ != nullptr) { // TODO(user): Like this we will duplicate all binary clause from the // problem. However this leads to a simpler API (since we don't need to - // special case the loading of the original clauses) and we mainly use - // drat + // special case the loading of the original clauses) and we mainly use drat // proof for testing anyway. drat_proof_handler_->AddClause({a, b}); } @@ -510,7 +509,7 @@ bool BinaryImplicationGraph::AddBinaryClauseDuringSearch(Literal a, Literal b) { if (num_implications_ == 0) propagation_trail_index_ = trail_->Index(); AddBinaryClause(a, b); - const auto &assignment = trail_->Assignment(); + const auto& assignment = trail_->Assignment(); if (assignment.LiteralIsFalse(a)) { if (assignment.LiteralIsAssigned(b)) { if (assignment.LiteralIsFalse(b)) return false; @@ -563,7 +562,7 @@ bool BinaryImplicationGraph::FixLiteral(Literal true_literal) { // cleaning/copying the at most ones on the fly to the beginning of the same // buffer. bool BinaryImplicationGraph::CleanUpAndAddAtMostOnes(const int base_index) { - const VariablesAssignment &assignment = trail_->Assignment(); + const VariablesAssignment& assignment = trail_->Assignment(); int local_end = base_index; const int buffer_size = at_most_one_buffer_.size(); for (int i = base_index; i < buffer_size; ++i) { @@ -658,10 +657,10 @@ bool BinaryImplicationGraph::CleanUpAndAddAtMostOnes(const int base_index) { } bool BinaryImplicationGraph::PropagateOnTrue(Literal true_literal, - Trail *trail) { + Trail* trail) { SCOPED_TIME_STAT(&stats_); - const VariablesAssignment &assignment = trail->Assignment(); + const VariablesAssignment& assignment = trail->Assignment(); DCHECK(assignment.LiteralIsTrue(true_literal)); // Note(user): This update is not exactly correct because in case of conflict @@ -727,7 +726,7 @@ bool BinaryImplicationGraph::PropagateOnTrue(Literal true_literal, return true; } -bool BinaryImplicationGraph::Propagate(Trail *trail) { +bool BinaryImplicationGraph::Propagate(Trail* trail) { if (IsEmpty()) { propagation_trail_index_ = trail->Index(); return true; @@ -740,7 +739,7 @@ bool BinaryImplicationGraph::Propagate(Trail *trail) { } absl::Span BinaryImplicationGraph::Reason( - const Trail &trail, int trail_index) const { + const Trail& trail, int trail_index) const { return {&reasons_[trail_index], 1}; } @@ -752,7 +751,7 @@ absl::Span BinaryImplicationGraph::Reason( // // TODO(user): Also consider at most one? void BinaryImplicationGraph::MinimizeConflictWithReachability( - std::vector *conflict) { + std::vector* conflict) { SCOPED_TIME_STAT(&stats_); dfs_stack_.clear(); @@ -773,7 +772,7 @@ void BinaryImplicationGraph::MinimizeConflictWithReachability( // We treat the direct implications differently so we can also remove the // redundant implications from this list at the same time. - auto &direct_implications = implications_[root_literal_index]; + auto& direct_implications = implications_[root_literal_index]; for (const Literal l : direct_implications) { if (is_marked_[l.Index()]) continue; dfs_stack_.push_back(l); @@ -829,8 +828,8 @@ void BinaryImplicationGraph::MinimizeConflictWithReachability( // will be implied if the 1-UIP literal is assigned to false, and the classic // minimization algorithm can take advantage of that. void BinaryImplicationGraph::MinimizeConflictFirst( - const Trail &trail, std::vector *conflict, - SparseBitset *marked) { + const Trail& trail, std::vector* conflict, + SparseBitset* marked) { SCOPED_TIME_STAT(&stats_); CHECK(!conflict->empty()); is_marked_.ClearAndResize(LiteralIndex(implications_.size())); @@ -847,15 +846,15 @@ void BinaryImplicationGraph::MinimizeConflictFirst( // computation to remove redundant implication in the implication list of the // first UIP conflict. void BinaryImplicationGraph::MinimizeConflictFirstWithTransitiveReduction( - const Trail &trail, std::vector *conflict, - SparseBitset *marked, random_engine_t *random) { + const Trail& trail, std::vector* conflict, + SparseBitset* marked, random_engine_t* random) { SCOPED_TIME_STAT(&stats_); const LiteralIndex root_literal_index = conflict->front().NegatedIndex(); is_marked_.ClearAndResize(LiteralIndex(implications_.size())); is_marked_.Set(root_literal_index); int new_size = 0; - auto &direct_implications = implications_[root_literal_index]; + auto& direct_implications = implications_[root_literal_index]; // The randomization allow to find more redundant implication since to find // a => b and remove b, a must be before b in direct_implications. Note that @@ -891,7 +890,7 @@ void BinaryImplicationGraph::MinimizeConflictFirstWithTransitiveReduction( } void BinaryImplicationGraph::RemoveRedundantLiterals( - std::vector *conflict) { + std::vector* conflict) { SCOPED_TIME_STAT(&stats_); int new_index = 1; for (int i = 1; i < conflict->size(); ++i) { @@ -909,7 +908,7 @@ void BinaryImplicationGraph::RemoveRedundantLiterals( // TODO(user): Also consider at most one? void BinaryImplicationGraph::MinimizeConflictExperimental( - const Trail &trail, std::vector *conflict) { + const Trail& trail, std::vector* conflict) { SCOPED_TIME_STAT(&stats_); is_marked_.ClearAndResize(LiteralIndex(implications_.size())); is_simplified_.ClearAndResize(LiteralIndex(implications_.size())); @@ -967,7 +966,7 @@ void BinaryImplicationGraph::RemoveFixedVariables() { const int new_num_fixed = trail_->Index(); if (num_processed_fixed_variables_ == new_num_fixed) return; - const VariablesAssignment &assignment = trail_->Assignment(); + const VariablesAssignment& assignment = trail_->Assignment(); is_marked_.ClearAndResize(LiteralIndex(implications_.size())); for (; num_processed_fixed_variables_ < new_num_fixed; ++num_processed_fixed_variables_) { @@ -1002,7 +1001,7 @@ void BinaryImplicationGraph::RemoveFixedVariables() { } } for (const LiteralIndex i : is_marked_.PositionsSetAtLeastOnce()) { - RemoveIf(&implications_[i], [&assignment](const Literal &lit) { + RemoveIf(&implications_[i], [&assignment](const Literal& lit) { return assignment.LiteralIsTrue(lit); }); } @@ -1016,22 +1015,21 @@ void BinaryImplicationGraph::RemoveFixedVariables() { class SccGraph { public: using Implication = - gtl::ITIVector >; - using AtMostOne = - gtl::ITIVector >; + gtl::ITIVector>; + using AtMostOne = gtl::ITIVector>; using SccFinder = StronglyConnectedComponentsFinder > >; + std::vector>>; - explicit SccGraph(SccFinder *finder, Implication *graph, - AtMostOne *at_most_ones, - std::vector *at_most_one_buffer) + explicit SccGraph(SccFinder* finder, Implication* graph, + AtMostOne* at_most_ones, + std::vector* at_most_one_buffer) : finder_(*finder), implications_(*graph), at_most_ones_(*at_most_ones), at_most_one_buffer_(*at_most_one_buffer) {} - const std::vector &operator[](int32 node) const { + const std::vector& operator[](int32 node) const { tmp_.clear(); for (const Literal l : implications_[LiteralIndex(node)]) { tmp_.push_back(l.Index().value()); @@ -1109,10 +1107,10 @@ class SccGraph { mutable int64 work_done_ = 0; private: - const SccFinder &finder_; - const Implication &implications_; - const AtMostOne &at_most_ones_; - const std::vector &at_most_one_buffer_; + const SccFinder& finder_; + const Implication& implications_; + const AtMostOne& at_most_ones_; + const std::vector& at_most_one_buffer_; mutable std::vector tmp_; @@ -1132,12 +1130,12 @@ bool BinaryImplicationGraph::DetectEquivalences(bool log_info) { // Lets remove all fixed variables first. if (!Propagate(trail_)) return false; RemoveFixedVariables(); - const VariablesAssignment &assignment = trail_->Assignment(); + const VariablesAssignment& assignment = trail_->Assignment(); // TODO(user): We could just do it directly though. int num_fixed_during_scc = 0; const int32 size(implications_.size()); - std::vector > scc; + std::vector> scc; double dtime = 0.0; { SccGraph::SccFinder finder; @@ -1160,7 +1158,7 @@ bool BinaryImplicationGraph::DetectEquivalences(bool log_info) { int num_equivalences = 0; reverse_topological_order_.clear(); - for (std::vector &component : scc) { + for (std::vector& component : scc) { // If one is fixed then all must be fixed. Note that the reason why the // propagation didn't already do that and we don't always get fixed // component of size 1 is because of the potential newly fixed literals @@ -1215,8 +1213,8 @@ bool BinaryImplicationGraph::DetectEquivalences(bool log_info) { // Note that because we process list in reverse topological order, this // is only needed if there is any equivalence before this point. if (num_equivalences > 0) { - auto &representative_list = implications_[representative]; - for (Literal &ref : representative_list) { + auto& representative_list = implications_[representative]; + for (Literal& ref : representative_list) { const LiteralIndex rep = representative_of_[ref.Index()]; if (rep == representative) continue; if (rep == kNoLiteralIndex) continue; @@ -1246,7 +1244,7 @@ bool BinaryImplicationGraph::DetectEquivalences(bool log_info) { // Merge all the lists in implications_[representative]. // Note that we do not want representative in its own list. - auto &representative_list = implications_[representative]; + auto& representative_list = implications_[representative]; int new_size = 0; for (const Literal l : representative_list) { const Literal rep = RepresentativeOf(l); @@ -1256,7 +1254,7 @@ bool BinaryImplicationGraph::DetectEquivalences(bool log_info) { representative_list.resize(new_size); for (int i = 1; i < component.size(); ++i) { const Literal literal = Literal(LiteralIndex(component[i])); - auto &ref = implications_[literal.Index()]; + auto& ref = implications_[literal.Index()]; for (const Literal l : ref) { const Literal rep = RepresentativeOf(l); if (rep.Index() != representative) representative_list.push_back(rep); @@ -1354,7 +1352,7 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) { if (is_redundant_[root]) continue; if (trail_->Assignment().LiteralIsAssigned(Literal(root))) continue; - auto &direct_implications = implications_[root]; + auto& direct_implications = implications_[root]; if (direct_implications.empty()) continue; // This is a "poor" version of the tree look stuff, but it does show good @@ -1405,7 +1403,7 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) { // Failed literal probing. If both x and not(x) are marked then root must be // false. Note that because we process "roots" in reverse topological order, // we will fix the LCA of x and not(x) first. - const auto &marked_positions = is_marked_.PositionsSetAtLeastOnce(); + const auto& marked_positions = is_marked_.PositionsSetAtLeastOnce(); for (; marked_index < marked_positions.size(); ++marked_index) { const LiteralIndex i = marked_positions[marked_index]; if (is_marked_[Literal(i).NegatedIndex()]) { @@ -1468,7 +1466,7 @@ bool BinaryImplicationGraph::ComputeTransitiveReduction(bool log_info) { namespace { -bool IntersectionIsEmpty(const std::vector &a, const std::vector &b) { +bool IntersectionIsEmpty(const std::vector& a, const std::vector& b) { DCHECK(std::is_sorted(a.begin(), a.end())); DCHECK(std::is_sorted(b.begin(), b.end())); int i = 0; @@ -1486,7 +1484,7 @@ bool IntersectionIsEmpty(const std::vector &a, const std::vector &b) { // Used by TransformIntoMaxCliques(). struct VectorHash { - std::size_t operator()(const std::vector &at_most_one) const { + std::size_t operator()(const std::vector& at_most_one) const { size_t hash = 0; for (Literal literal : at_most_one) { hash = util_hash::Hash(literal.Index().value(), hash); @@ -1498,7 +1496,7 @@ struct VectorHash { } // namespace bool BinaryImplicationGraph::TransformIntoMaxCliques( - std::vector > *at_most_ones, + std::vector>* at_most_ones, int64 max_num_explored_nodes) { // The code below assumes a DAG. if (!DetectEquivalences()) return false; @@ -1509,7 +1507,7 @@ bool BinaryImplicationGraph::TransformIntoMaxCliques( int num_added = 0; absl::flat_hash_set, VectorHash> max_cliques; - gtl::ITIVector > max_cliques_containing( + gtl::ITIVector> max_cliques_containing( implications_.size()); // We starts by processing larger constraints first. @@ -1517,7 +1515,7 @@ bool BinaryImplicationGraph::TransformIntoMaxCliques( [](const std::vector a, const std::vector b) { return a.size() > b.size(); }); - for (std::vector &clique : *at_most_ones) { + for (std::vector& clique : *at_most_ones) { const int old_size = clique.size(); // Remap the clique to only use representative. @@ -1526,7 +1524,7 @@ bool BinaryImplicationGraph::TransformIntoMaxCliques( // indices as representative, this make sure that if possible, we express // the clique in term of user provided variable (that are always created // first). - for (Literal &ref : clique) { + for (Literal& ref : clique) { DCHECK_LT(ref.Index(), representative_of_.size()); const LiteralIndex rep = representative_of_[ref.Index()]; if (rep == kNoLiteralIndex) continue; @@ -1577,8 +1575,8 @@ bool BinaryImplicationGraph::TransformIntoMaxCliques( std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( const absl::Span at_most_one, - const gtl::ITIVector &can_be_included, - const gtl::ITIVector &expanded_lp_values) { + const gtl::ITIVector& can_be_included, + const gtl::ITIVector& expanded_lp_values) { std::vector clique(at_most_one.begin(), at_most_one.end()); std::vector intersection; double clique_weight = 0.0; @@ -1641,10 +1639,10 @@ std::vector BinaryImplicationGraph::ExpandAtMostOneWithWeight( return clique; } -const std::vector > - &BinaryImplicationGraph::GenerateAtMostOnesWithLargeWeight( - const std::vector &literals, - const std::vector &lp_values) { +const std::vector>& +BinaryImplicationGraph::GenerateAtMostOnesWithLargeWeight( + const std::vector& literals, + const std::vector& lp_values) { // We only want to generate a cut with literals from the LP, not extra ones. const int num_literals = implications_.size(); gtl::ITIVector can_be_included(num_literals, false); @@ -1665,7 +1663,7 @@ const std::vector > Literal a; Literal b; double sum; - bool operator<(const Candidate &other) const { return sum > other.sum; } + bool operator<(const Candidate& other) const { return sum > other.sum; } }; std::vector candidates; @@ -1717,7 +1715,7 @@ const std::vector > // Note that we only expand using literal from the LP. tmp_cuts_.clear(); std::vector at_most_one; - for (const Candidate &candidate : candidates) { + for (const Candidate& candidate : candidates) { at_most_one = ExpandAtMostOneWithWeight( {candidate.a, candidate.b}, can_be_included, expanded_lp_values); if (!at_most_one.empty()) tmp_cuts_.push_back(at_most_one); @@ -1795,7 +1793,7 @@ std::vector BinaryImplicationGraph::ExpandAtMostOne( // TODO(user): lazy cleanup the lists on is_removed_? // TODO(user): Mark fixed variable as is_removed_ for faster iteration? -const std::vector &BinaryImplicationGraph::DirectImplications( +const std::vector& BinaryImplicationGraph::DirectImplications( Literal literal) { CHECK(!is_removed_[literal.Index()]); @@ -1806,7 +1804,7 @@ const std::vector &BinaryImplicationGraph::DirectImplications( direct_implications_.clear(); // Fill new state. - const VariablesAssignment &assignment = trail_->Assignment(); + const VariablesAssignment& assignment = trail_->Assignment(); CHECK(!assignment.LiteralIsAssigned(literal)); for (const Literal l : implications_[literal.Index()]) { if (l == literal) continue; @@ -1839,11 +1837,11 @@ const std::vector &BinaryImplicationGraph::DirectImplications( } bool BinaryImplicationGraph::FindFailedLiteralAroundVar(BooleanVariable var, - bool *is_unsat) { + bool* is_unsat) { const int saved_index = propagation_trail_index_; CHECK_EQ(propagation_trail_index_, trail_->Index()); // Propagation done. - const VariablesAssignment &assignment = trail_->Assignment(); + const VariablesAssignment& assignment = trail_->Assignment(); if (assignment.VariableIsAssigned(var)) return false; const Literal literal(var, true); @@ -1884,7 +1882,7 @@ int64 BinaryImplicationGraph::NumImplicationOnVariableRemoval( // For all possible a => var => b, add a => b. void BinaryImplicationGraph::RemoveBooleanVariable( - BooleanVariable var, std::deque > *postsolve_clauses) { + BooleanVariable var, std::deque>* postsolve_clauses) { const Literal literal(var, true); direct_implications_of_negated_literal_ = DirectImplications(literal.Negated()); @@ -1914,8 +1912,7 @@ void BinaryImplicationGraph::RemoveBooleanVariable( postsolve_clauses->push_back({Literal(var, true), a_negated}); } - // We need to remove any occurrence of var in our implication lists, this - // will + // We need to remove any occurrence of var in our implication lists, this will // be delayed to the CleanupAllRemovedVariables() call. for (LiteralIndex index : {literal.Index(), literal.NegatedIndex()}) { is_removed_[index] = true; @@ -1928,7 +1925,7 @@ void BinaryImplicationGraph::RemoveBooleanVariable( } void BinaryImplicationGraph::CleanupAllRemovedVariables() { - for (auto &implication : implications_) { + for (auto& implication : implications_) { int new_size = 0; for (const Literal l : implication) { if (!is_removed_[l.Index()]) implication[new_size++] = l; @@ -1944,9 +1941,9 @@ void BinaryImplicationGraph::CleanupAllRemovedVariables() { // ----- SatClause ----- // static -SatClause *SatClause::Create(absl::Span literals) { +SatClause* SatClause::Create(absl::Span literals) { CHECK_GE(literals.size(), 2); - SatClause *clause = reinterpret_cast( + SatClause* clause = reinterpret_cast( ::operator new(sizeof(SatClause) + literals.size() * sizeof(Literal))); clause->size_ = literals.size(); for (int i = 0; i < literals.size(); ++i) { @@ -1958,7 +1955,7 @@ SatClause *SatClause::Create(absl::Span literals) { // Note that for an attached clause, removing fixed literal is okay because if // any of the watched literal is assigned, then the clause is necessarily true. bool SatClause::RemoveFixedLiteralsAndTestIfTrue( - const VariablesAssignment &assignment) { + const VariablesAssignment& assignment) { DCHECK(IsAttached()); if (assignment.VariableIsAssigned(literals_[0].Variable()) || assignment.VariableIsAssigned(literals_[1].Variable())) { @@ -1981,7 +1978,7 @@ bool SatClause::RemoveFixedLiteralsAndTestIfTrue( return false; } -bool SatClause::IsSatisfied(const VariablesAssignment &assignment) const { +bool SatClause::IsSatisfied(const VariablesAssignment& assignment) const { for (const Literal literal : *this) { if (assignment.LiteralIsTrue(literal)) return true; } diff --git a/ortools/sat/clause.h b/ortools/sat/clause.h index df0fd3a12a..e02218b6e3 100644 --- a/ortools/sat/clause.h +++ b/ortools/sat/clause.h @@ -54,10 +54,10 @@ class SatClause { // treated separatly and never constructed. In practice, we do use // BinaryImplicationGraph for the clause of size 2, so this is mainly used for // size at least 3. - static SatClause *Create(absl::Span literals); + static SatClause* Create(absl::Span literals); // Non-sized delete because this is a tail-padded class. - void operator delete(void *p) { + void operator delete(void* p) { ::operator delete(p); // non-sized delete } @@ -66,8 +66,8 @@ class SatClause { int empty() const { return size_ == 0; } // Allows for range based iteration: for (Literal literal : clause) {}. - const Literal *const begin() const { return &(literals_[0]); } - const Literal *const end() const { return &(literals_[size_]); } + const Literal* const begin() const { return &(literals_[0]); } + const Literal* const end() const { return &(literals_[size_]); } // Returns the first and second literals. These are always the watched // literals if the clause is attached in the LiteralWatchers. @@ -97,12 +97,12 @@ class SatClause { // // Note that the removed literal can still be accessed in the portion [size, // old_size) of literals(). - bool RemoveFixedLiteralsAndTestIfTrue(const VariablesAssignment &assignment); + bool RemoveFixedLiteralsAndTestIfTrue(const VariablesAssignment& assignment); // Returns true if the clause is satisfied for the given assignment. Note that // the assignment may be partial, so false does not mean that the clause can't // be satisfied by completing the assignment. - bool IsSatisfied(const VariablesAssignment &assignment) const; + bool IsSatisfied(const VariablesAssignment& assignment) const; // Returns true if the clause is attached to a LiteralWatchers. bool IsAttached() const { return size_ > 0; } @@ -114,7 +114,7 @@ class SatClause { // call Clear()/Rewrite. friend class LiteralWatchers; - Literal *literals() { return &(literals_[0]); } + Literal* literals() { return &(literals_[0]); } // Marks the clause so that the next call to CleanUpWatchers() can identify it // and actually detach it. We use size_ = 0 for this since the clause will @@ -159,37 +159,37 @@ class BinaryImplicationGraph; // clauses and is the place where all the clauses are stored. class LiteralWatchers : public SatPropagator { public: - explicit LiteralWatchers(Model *model); + explicit LiteralWatchers(Model* model); ~LiteralWatchers() override; // Must be called before adding clauses refering to such variables. void Resize(int num_variables); // SatPropagator API. - bool Propagate(Trail *trail) final; - absl::Span Reason(const Trail &trail, + bool Propagate(Trail* trail) final; + absl::Span Reason(const Trail& trail, int trail_index) const final; // Returns the reason of the variable at given trail_index. This only works // for variable propagated by this class and is almost the same as Reason() // with a different return format. - SatClause *ReasonClause(int trail_index) const; + SatClause* ReasonClause(int trail_index) const; // Adds a new clause and perform initial propagation for this clause only. - bool AddClause(absl::Span literals, Trail *trail); + bool AddClause(absl::Span literals, Trail* trail); bool AddClause(absl::Span literals); // Same as AddClause() for a removable clause. This is only called on learned // conflict, so this should never have all its literal at false (CHECKED). - SatClause *AddRemovableClause(const std::vector &literals, - Trail *trail); + SatClause* AddRemovableClause(const std::vector& literals, + Trail* trail); // Lazily detach the given clause. The deletion will actually occur when // CleanUpWatchers() is called. The later needs to be called before any other // function in this class can be called. This is DCHECKed. // // Note that we remove the clause from clauses_info_ right away. - void LazyDetach(SatClause *clause); + void LazyDetach(SatClause* clause); void CleanUpWatchers(); // Detaches the given clause right away. @@ -197,18 +197,18 @@ class LiteralWatchers : public SatPropagator { // TODO(user): It might be better to have a "slower" mode in // PropagateOnFalse() that deal with detached clauses in the watcher list and // is activated until the next CleanUpWatchers() calls. - void Detach(SatClause *clause); + void Detach(SatClause* clause); // Attaches the given clause. The first two literal of the clause must // be unassigned and the clause must not be already attached. - void Attach(SatClause *clause, Trail *trail); + void Attach(SatClause* clause, Trail* trail); // Reclaims the memory of the lazily removed clauses (their size was set to // zero) and remove them from AllClausesInCreationOrder() this work in // O(num_clauses()). void DeleteRemovedClauses(); int64 num_clauses() const { return clauses_.size(); } - const std::vector &AllClausesInCreationOrder() const { + const std::vector& AllClausesInCreationOrder() const { return clauses_; } @@ -216,11 +216,11 @@ class LiteralWatchers : public SatPropagator { // This is the case for clauses that were learned during search. Note however // that some learned clause are kept forever (heuristics) and do not appear // here. - bool IsRemovable(SatClause *const clause) const { + bool IsRemovable(SatClause* const clause) const { return gtl::ContainsKey(clauses_info_, clause); } int64 num_removable_clauses() const { return clauses_info_.size(); } - absl::flat_hash_map *mutable_clauses_info() { + absl::flat_hash_map* mutable_clauses_info() { return &clauses_info_; } @@ -236,14 +236,14 @@ class LiteralWatchers : public SatPropagator { // Number of clauses currently watched. int64 num_watched_clauses() const { return num_watched_clauses_; } - void SetDratProofHandler(DratProofHandler *drat_proof_handler) { + void SetDratProofHandler(DratProofHandler* drat_proof_handler) { drat_proof_handler_ = drat_proof_handler; } // Really basic algorithm to return a clause to try to minimize. We simply // loop over the clause that we keep forever, in creation order. This starts // by the problem clauses and then the learned one that we keep forever. - SatClause *NextClauseToMinimize() { + SatClause* NextClauseToMinimize() { for (; to_minimize_index_ < clauses_.size(); ++to_minimize_index_) { if (!clauses_[to_minimize_index_]->IsAttached()) continue; if (!IsRemovable(clauses_[to_minimize_index_])) { @@ -272,21 +272,21 @@ class LiteralWatchers : public SatPropagator { void AttachAllClauses(); // These must only be called between [Detach/Attach]AllClauses() calls. - void InprocessingRemoveClause(SatClause *clause); + void InprocessingRemoveClause(SatClause* clause); ABSL_MUST_USE_RESULT bool InprocessingFixLiteral(Literal true_literal); ABSL_MUST_USE_RESULT bool InprocessingRewriteClause( - SatClause *clause, absl::Span new_clause); + SatClause* clause, absl::Span new_clause); // This can return nullptr if new_clause was of size one or two as these are // treated differently. Note that none of the variable should be fixed in the // given new clause. - SatClause *InprocessingAddClause(absl::Span new_clause); + SatClause* InprocessingAddClause(absl::Span new_clause); // Contains, for each literal, the list of clauses that need to be inspected // when the corresponding literal becomes false. struct Watcher { Watcher() {} - Watcher(SatClause *c, Literal b, int i = 2) + Watcher(SatClause* c, Literal b, int i = 2) : blocking_literal(b), start_index(i), clause(c) {} // Optimization. A literal from the clause that sometimes allow to not even @@ -304,45 +304,45 @@ class LiteralWatchers : public SatPropagator { // because of the struct alignment, we store it here instead. int32 start_index; - SatClause *clause; + SatClause* clause; }; // This is exposed since some inprocessing code can heuristically exploit the // currently watched literal and blocking literal to do some simplification. - const std::vector &WatcherListOnFalse(Literal false_literal) const { + const std::vector& WatcherListOnFalse(Literal false_literal) const { return watchers_on_false_[false_literal.Index()]; } private: // Attaches the given clause. This eventually propagates a literal which is // enqueued on the trail. Returns false if a contradiction was encountered. - bool AttachAndPropagate(SatClause *clause, Trail *trail); + bool AttachAndPropagate(SatClause* clause, Trail* trail); // Launches all propagation when the given literal becomes false. // Returns false if a contradiction was encountered. - bool PropagateOnFalse(Literal false_literal, Trail *trail); + bool PropagateOnFalse(Literal false_literal, Trail* trail); // Attaches the given clause to the event: the given literal becomes false. // The blocking_literal can be any literal from the clause, it is used to // speed up PropagateOnFalse() by skipping the clause if it is true. void AttachOnFalse(Literal literal, Literal blocking_literal, - SatClause *clause); + SatClause* clause); // Common code between LazyDetach() and Detach(). - void InternalDetach(SatClause *clause); + void InternalDetach(SatClause* clause); - gtl::ITIVector > watchers_on_false_; + gtl::ITIVector> watchers_on_false_; // SatClause reasons by trail_index. - std::vector reasons_; + std::vector reasons_; // Indicates if the corresponding watchers_on_false_ list need to be // cleaned. The boolean is_clean_ is just used in DCHECKs. SparseBitset needs_cleaning_; bool is_clean_ = true; - BinaryImplicationGraph *implication_graph_; - Trail *trail_; + BinaryImplicationGraph* implication_graph_; + Trail* trail_; int64 num_inspected_clauses_; int64 num_inspected_clause_literals_; @@ -359,14 +359,14 @@ class LiteralWatchers : public SatPropagator { // Note that the unit clauses are not kept here and if the parameter // treat_binary_clauses_separately is true, the binary clause are not kept // here either. - std::vector clauses_; + std::vector clauses_; int to_minimize_index_ = 0; // Only contains removable clause. - absl::flat_hash_map clauses_info_; + absl::flat_hash_map clauses_info_; - DratProofHandler *drat_proof_handler_ = nullptr; + DratProofHandler* drat_proof_handler_ = nullptr; DISALLOW_COPY_AND_ASSIGN(LiteralWatchers); }; @@ -400,11 +400,11 @@ class BinaryClauseManager { } // Returns the newly added BinaryClause since the last ClearNewlyAdded() call. - const std::vector &newly_added() const { return newly_added_; } + const std::vector& newly_added() const { return newly_added_; } void ClearNewlyAdded() { newly_added_.clear(); } private: - absl::flat_hash_set > set_; + absl::flat_hash_set> set_; std::vector newly_added_; DISALLOW_COPY_AND_ASSIGN(BinaryClauseManager); }; @@ -455,7 +455,7 @@ class BinaryClauseManager { // http://www.cs.helsinki.fi/u/mjarvisa/papers/heule-jarvisalo-biere.sat11.pdf class BinaryImplicationGraph : public SatPropagator { public: - explicit BinaryImplicationGraph(Model *model) + explicit BinaryImplicationGraph(Model* model) : SatPropagator("BinaryImplicationGraph"), stats_("BinaryImplicationGraph"), time_limit_(model->GetOrCreate()), @@ -472,8 +472,8 @@ class BinaryImplicationGraph : public SatPropagator { } // SatPropagator interface. - bool Propagate(Trail *trail) final; - absl::Span Reason(const Trail &trail, + bool Propagate(Trail* trail) final; + absl::Span Reason(const Trail& trail, int trail_index) const final; // Resizes the data structure. @@ -512,14 +512,14 @@ class BinaryImplicationGraph : public SatPropagator { // we use different heuristics/algorithms to do this minimization. // See the binary_minimization_algorithm SAT parameter and the .cc for more // details about the different algorithms. - void MinimizeConflictWithReachability(std::vector *c); - void MinimizeConflictExperimental(const Trail &trail, - std::vector *c); - void MinimizeConflictFirst(const Trail &trail, std::vector *c, - SparseBitset *marked); + void MinimizeConflictWithReachability(std::vector* c); + void MinimizeConflictExperimental(const Trail& trail, + std::vector* c); + void MinimizeConflictFirst(const Trail& trail, std::vector* c, + SparseBitset* marked); void MinimizeConflictFirstWithTransitiveReduction( - const Trail &trail, std::vector *c, - SparseBitset *marked, random_engine_t *random); + const Trail& trail, std::vector* c, + SparseBitset* marked, random_engine_t* random); // This must only be called at decision level 0 after all the possible // propagations. It: @@ -544,14 +544,14 @@ class BinaryImplicationGraph : public SatPropagator { // One must call DetectEquivalences() first, this is CHECKed. // Returns a list so that if x => y, then x is after y. - const std::vector &ReverseTopologicalOrder() const { + const std::vector& ReverseTopologicalOrder() const { CHECK(is_dag_); return reverse_topological_order_; } // Returns the list of literal "directly" implied by l. Beware that this can // easily change behind your back if you modify the solver state. - const absl::InlinedVector &Implications(Literal l) const { + const absl::InlinedVector& Implications(Literal l) const { return implications_[l.Index()]; } @@ -585,7 +585,7 @@ class BinaryImplicationGraph : public SatPropagator { // // Returns false if the model is detected to be UNSAT (this needs to call // DetectEquivalences() if not already done). - bool TransformIntoMaxCliques(std::vector > *at_most_ones, + bool TransformIntoMaxCliques(std::vector>* at_most_ones, int64 max_num_explored_nodes = 1e8); // LP clique cut heuristic. Returns a set of "at most one" constraints on the @@ -597,9 +597,9 @@ class BinaryImplicationGraph : public SatPropagator { // only generate clique with these literals or their negation. // // TODO(user): Refine the heuristic and unit test! - const std::vector > &GenerateAtMostOnesWithLargeWeight( - const std::vector &literals, - const std::vector &lp_values); + const std::vector>& GenerateAtMostOnesWithLargeWeight( + const std::vector& literals, + const std::vector& lp_values); // Number of literal propagated by this class (including conflicts). int64 num_propagations() const { return num_propagations_; } @@ -643,17 +643,16 @@ class BinaryImplicationGraph : public SatPropagator { // TODO(user): When extracting to cp_model.proto we could be more efficient // by extracting bool_and constraint with many lhs terms. template - void ExtractAllBinaryClauses(Output *out) const { + void ExtractAllBinaryClauses(Output* out) const { // TODO(user): Ideally we should just never have duplicate clauses in this // class. But it seems we do in some corner cases, so lets not output them // twice. - absl::flat_hash_set > + absl::flat_hash_set> duplicate_detection; for (LiteralIndex i(0); i < implications_.size(); ++i) { const Literal a = Literal(i).Negated(); for (const Literal b : implications_[i]) { - // Note(user): We almost always have both a => b and not(b) => not(a) - // in + // Note(user): We almost always have both a => b and not(b) => not(a) in // our implications_ database. Except if ComputeTransitiveReduction() // was aborted early, but in this case, if only one is present, the // other could be removed, so we shouldn't need to output it. @@ -665,7 +664,7 @@ class BinaryImplicationGraph : public SatPropagator { } } - void SetDratProofHandler(DratProofHandler *drat_proof_handler) { + void SetDratProofHandler(DratProofHandler* drat_proof_handler) { drat_proof_handler_ = drat_proof_handler; } @@ -685,7 +684,7 @@ class BinaryImplicationGraph : public SatPropagator { // // When doing blocked clause elimination of bounded variable elimination, one // only need to consider this list and not the full reachability. - const std::vector &DirectImplications(Literal literal); + const std::vector& DirectImplications(Literal literal); // A proxy for DirectImplications().size(), However we currently do not // maintain it perfectly. It is exact each time DirectImplications() is @@ -702,11 +701,10 @@ class BinaryImplicationGraph : public SatPropagator { // be removed if one run DetectEquivalences() before this. Similarly, if a => // var => not(a) then a must be false and this is detected and dealt with by // FindFailedLiteralAroundVar(). - bool FindFailedLiteralAroundVar(BooleanVariable var, bool *is_unsat); + bool FindFailedLiteralAroundVar(BooleanVariable var, bool* is_unsat); int64 NumImplicationOnVariableRemoval(BooleanVariable var); void RemoveBooleanVariable( - BooleanVariable var, - std::deque > *postsolve_clauses); + BooleanVariable var, std::deque>* postsolve_clauses); bool IsRemoved(Literal l) const { return is_removed_[l.Index()]; } // TODO(user): consider at most ones. @@ -721,10 +719,10 @@ class BinaryImplicationGraph : public SatPropagator { // Returns false if a conflict was encountered, in which case // trail->SetFailingClause() will be called with the correct size 2 clause. // This calls trail->Enqueue() on the newly assigned literals. - bool PropagateOnTrue(Literal true_literal, Trail *trail); + bool PropagateOnTrue(Literal true_literal, Trail* trail); // Remove any literal whose negation is marked (except the first one). - void RemoveRedundantLiterals(std::vector *conflict); + void RemoveRedundantLiterals(std::vector* conflict); // Fill is_marked_ with all the descendant of root. // Note that this also use dfs_stack_. @@ -740,8 +738,8 @@ class BinaryImplicationGraph : public SatPropagator { // Same as ExpandAtMostOne() but try to maximize the weight in the clique. std::vector ExpandAtMostOneWithWeight( const absl::Span at_most_one, - const gtl::ITIVector &can_be_included, - const gtl::ITIVector &expanded_lp_values); + const gtl::ITIVector& can_be_included, + const gtl::ITIVector& expanded_lp_values); // Process all at most one constraints starting at or after base_index in // at_most_one_buffer_. This replace literal by their representative, remove @@ -750,10 +748,10 @@ class BinaryImplicationGraph : public SatPropagator { bool CleanUpAndAddAtMostOnes(const int base_index); mutable StatsGroup stats_; - TimeLimit *time_limit_; - ModelRandomGenerator *random_; - Trail *trail_; - DratProofHandler *drat_proof_handler_ = nullptr; + TimeLimit* time_limit_; + ModelRandomGenerator* random_; + Trail* trail_; + DratProofHandler* drat_proof_handler_ = nullptr; // Binary reasons by trail_index. We need a deque because we kept pointers to // elements of this array and this can dynamically change size. @@ -769,7 +767,7 @@ class BinaryImplicationGraph : public SatPropagator { // // TODO(user): We could be even more efficient since a size of int32 is enough // for us and we could store in common the inlined/not-inlined size. - gtl::ITIVector > implications_; + gtl::ITIVector> implications_; int64 num_implications_ = 0; // Internal representation of at_most_one constraints. Each entry point to the @@ -779,11 +777,11 @@ class BinaryImplicationGraph : public SatPropagator { // // TODO(user): We could be more cache efficient by combining this with // implications_ in some way. Do some propagation speed benchmark. - gtl::ITIVector > at_most_ones_; + gtl::ITIVector> at_most_ones_; std::vector at_most_one_buffer_; // Used by GenerateAtMostOnesWithLargeWeight(). - std::vector > tmp_cuts_; + std::vector> tmp_cuts_; // Some stats. int64 num_propagations_ = 0; diff --git a/ortools/sat/cp_constraints.cc b/ortools/sat/cp_constraints.cc index 04f6409f32..d23dd01360 100644 --- a/ortools/sat/cp_constraints.cc +++ b/ortools/sat/cp_constraints.cc @@ -58,7 +58,7 @@ bool BooleanXorPropagator::Propagate() { if (sum == value_) return true; // Conflict. - std::vector *conflict = trail_->MutableConflict(); + std::vector* conflict = trail_->MutableConflict(); conflict->clear(); for (int i = 0; i < literals_.size(); ++i) { const Literal l = literals_[i]; @@ -68,9 +68,9 @@ bool BooleanXorPropagator::Propagate() { return false; } -void BooleanXorPropagator::RegisterWith(GenericLiteralWatcher *watcher) { +void BooleanXorPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); - for (const Literal &l : literals_) { + for (const Literal& l : literals_) { watcher->WatchLiteral(l, id); watcher->WatchLiteral(l.Negated(), id); } @@ -80,7 +80,7 @@ GreaterThanAtLeastOneOfPropagator::GreaterThanAtLeastOneOfPropagator( IntegerVariable target_var, const absl::Span vars, const absl::Span offsets, const absl::Span selectors, - const absl::Span enforcements, Model *model) + const absl::Span enforcements, Model* model) : target_var_(target_var), vars_(vars.begin(), vars.end()), offsets_(offsets.begin(), offsets.end()), @@ -135,7 +135,7 @@ bool GreaterThanAtLeastOneOfPropagator::Propagate() { } void GreaterThanAtLeastOneOfPropagator::RegisterWith( - GenericLiteralWatcher *watcher) { + GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); for (const Literal l : selectors_) watcher->WatchLiteral(l.Negated(), id); for (const Literal l : enforcements_) watcher->WatchLiteral(l, id); diff --git a/ortools/sat/cp_model.cc b/ortools/sat/cp_model.cc index 14c35f05b8..23647f63df 100644 --- a/ortools/sat/cp_model.cc +++ b/ortools/sat/cp_model.cc @@ -25,10 +25,10 @@ namespace sat { BoolVar::BoolVar() : cp_model_(nullptr), index_(0) {} -BoolVar::BoolVar(int index, CpModelProto *cp_model) +BoolVar::BoolVar(int index, CpModelProto* cp_model) : cp_model_(cp_model), index_(index) {} -BoolVar BoolVar::WithName(const std::string &name) { +BoolVar BoolVar::WithName(const std::string& name) { cp_model_->mutable_variables(index_)->set_name(name); return *this; } @@ -38,7 +38,7 @@ std::string BoolVar::DebugString() const { return absl::StrFormat("Not(%s)", Not().DebugString()); } else { std::string output; - const IntegerVariableProto &var_proto = cp_model_->variables(index_); + const IntegerVariableProto& var_proto = cp_model_->variables(index_); // Special case for constant variables without names. if (var_proto.name().empty() && var_proto.domain_size() == 2 && var_proto.domain(0) == var_proto.domain(1)) { @@ -62,24 +62,24 @@ std::string BoolVar::DebugString() const { BoolVar Not(BoolVar x) { return x.Not(); } -std::ostream &operator<<(std::ostream &os, const BoolVar &var) { +std::ostream& operator<<(std::ostream& os, const BoolVar& var) { os << var.DebugString(); return os; } IntVar::IntVar() : cp_model_(nullptr), index_(0) {} -IntVar::IntVar(int index, CpModelProto *cp_model) +IntVar::IntVar(int index, CpModelProto* cp_model) : cp_model_(cp_model), index_(index) { CHECK(RefIsPositive(index)); } -IntVar IntVar::WithName(const std::string &name) { +IntVar IntVar::WithName(const std::string& name) { cp_model_->mutable_variables(index_)->set_name(name); return *this; } -IntVar::IntVar(const BoolVar &var) { +IntVar::IntVar(const BoolVar& var) { cp_model_ = var.cp_model_; index_ = var.index_; } @@ -103,7 +103,7 @@ std::string IntVar::DebugString() const { return absl::StrFormat("Not(%s)", IntVar(NegatedRef(index_), cp_model_).DebugString()); } - const IntegerVariableProto &var_proto = cp_model_->variables(index_); + const IntegerVariableProto& var_proto = cp_model_->variables(index_); // Special case for constant variables without names. if (var_proto.name().empty() && var_proto.domain_size() == 2 && var_proto.domain(0) == var_proto.domain(1)) { @@ -127,7 +127,7 @@ std::string IntVar::DebugString() const { } } -std::ostream &operator<<(std::ostream &os, const IntVar &var) { +std::ostream& operator<<(std::ostream& os, const IntVar& var) { os << var.DebugString(); return os; } @@ -142,7 +142,7 @@ LinearExpr::LinearExpr(int64 constant) { constant_ = constant; } LinearExpr LinearExpr::Sum(absl::Span vars) { LinearExpr result; - for (const IntVar &var : vars) { + for (const IntVar& var : vars) { result.AddVar(var); } return result; @@ -166,7 +166,7 @@ LinearExpr LinearExpr::Term(IntVar var, int64 coefficient) { LinearExpr LinearExpr::BooleanSum(absl::Span vars) { LinearExpr result; - for (const IntVar &var : vars) { + for (const IntVar& var : vars) { result.AddVar(var); } return result; @@ -182,7 +182,7 @@ LinearExpr LinearExpr::BooleanScalProd(absl::Span vars, return result; } -LinearExpr &LinearExpr::AddConstant(int64 value) { +LinearExpr& LinearExpr::AddConstant(int64 value) { constant_ += value; return *this; } @@ -201,17 +201,17 @@ void LinearExpr::AddTerm(IntVar var, int64 coeff) { } } -Constraint::Constraint(ConstraintProto *proto) : proto_(proto) {} +Constraint::Constraint(ConstraintProto* proto) : proto_(proto) {} -Constraint Constraint::WithName(const std::string &name) { +Constraint Constraint::WithName(const std::string& name) { proto_->set_name(name); return *this; } -const std::string &Constraint::Name() const { return proto_->name(); } +const std::string& Constraint::Name() const { return proto_->name(); } Constraint Constraint::OnlyEnforceIf(absl::Span literals) { - for (const BoolVar &var : literals) { + for (const BoolVar& var : literals) { proto_->add_enforcement_literal(var.index_); } return *this; @@ -241,8 +241,8 @@ void TableConstraint::AddTuple(absl::Span tuple) { } } -ReservoirConstraint::ReservoirConstraint(ConstraintProto *proto, - CpModelBuilder *builder) +ReservoirConstraint::ReservoirConstraint(ConstraintProto* proto, + CpModelBuilder* builder) : Constraint(proto), builder_(builder) {} void ReservoirConstraint::AddEvent(IntVar time, int64 demand) { @@ -273,8 +273,8 @@ void NoOverlap2DConstraint::AddRectangle(IntervalVar x_coordinate, proto_->mutable_no_overlap_2d()->add_y_intervals(y_coordinate.index_); } -CumulativeConstraint::CumulativeConstraint(ConstraintProto *proto, - CpModelBuilder *builder) +CumulativeConstraint::CumulativeConstraint(ConstraintProto* proto, + CpModelBuilder* builder) : Constraint(proto), builder_(builder) {} void CumulativeConstraint::AddDemand(IntervalVar interval, IntVar demand) { @@ -285,10 +285,10 @@ void CumulativeConstraint::AddDemand(IntervalVar interval, IntVar demand) { IntervalVar::IntervalVar() : cp_model_(nullptr), index_() {} -IntervalVar::IntervalVar(int index, CpModelProto *cp_model) +IntervalVar::IntervalVar(int index, CpModelProto* cp_model) : cp_model_(cp_model), index_(index) {} -IntervalVar IntervalVar::WithName(const std::string &name) { +IntervalVar IntervalVar::WithName(const std::string& name) { cp_model_->mutable_constraints(index_)->set_name(name); return *this; } @@ -314,7 +314,7 @@ std::string IntervalVar::Name() const { std::string IntervalVar::DebugString() const { CHECK_GE(index_, 0); - const ConstraintProto &ct_proto = cp_model_->constraints(index_); + const ConstraintProto& ct_proto = cp_model_->constraints(index_); std::string output; if (ct_proto.name().empty()) { absl::StrAppend(&output, "IntervalVar", index_, "("); @@ -327,7 +327,7 @@ std::string IntervalVar::DebugString() const { return output; } -std::ostream &operator<<(std::ostream &os, const IntervalVar &var) { +std::ostream& operator<<(std::ostream& os, const IntervalVar& var) { os << var.DebugString(); return os; } @@ -335,7 +335,7 @@ std::ostream &operator<<(std::ostream &os, const IntervalVar &var) { int CpModelBuilder::IndexFromConstant(int64 value) { if (!gtl::ContainsKey(constant_to_index_map_, value)) { const int index = cp_model_.variables_size(); - IntegerVariableProto *const var_proto = cp_model_.add_variables(); + IntegerVariableProto* const var_proto = cp_model_.add_variables(); var_proto->add_domain(value); var_proto->add_domain(value); constant_to_index_map_[value] = index; @@ -349,9 +349,9 @@ int CpModelBuilder::GetOrCreateIntegerIndex(int index) { } if (!gtl::ContainsKey(bool_to_integer_index_map_, index)) { const int ref = PositiveRef(index); - const IntegerVariableProto &old_var = cp_model_.variables(ref); + const IntegerVariableProto& old_var = cp_model_.variables(ref); const int new_index = cp_model_.variables_size(); - IntegerVariableProto *const new_var = cp_model_.add_variables(); + IntegerVariableProto* const new_var = cp_model_.add_variables(); new_var->add_domain(0); new_var->add_domain(1); if (!old_var.name().empty()) { @@ -364,10 +364,10 @@ int CpModelBuilder::GetOrCreateIntegerIndex(int index) { return bool_to_integer_index_map_[index]; } -IntVar CpModelBuilder::NewIntVar(const Domain &domain) { +IntVar CpModelBuilder::NewIntVar(const Domain& domain) { const int index = cp_model_.variables_size(); - IntegerVariableProto *const var_proto = cp_model_.add_variables(); - for (const auto &interval : domain) { + IntegerVariableProto* const var_proto = cp_model_.add_variables(); + for (const auto& interval : domain) { var_proto->add_domain(interval.start); var_proto->add_domain(interval.end); } @@ -376,7 +376,7 @@ IntVar CpModelBuilder::NewIntVar(const Domain &domain) { BoolVar CpModelBuilder::NewBoolVar() { const int index = cp_model_.variables_size(); - IntegerVariableProto *const var_proto = cp_model_.add_variables(); + IntegerVariableProto* const var_proto = cp_model_.add_variables(); var_proto->add_domain(0); var_proto->add_domain(1); return BoolVar(index, &cp_model_); @@ -403,9 +403,9 @@ IntervalVar CpModelBuilder::NewOptionalIntervalVar(IntVar start, IntVar size, IntVar end, BoolVar presence) { const int index = cp_model_.constraints_size(); - ConstraintProto *const ct = cp_model_.add_constraints(); + ConstraintProto* const ct = cp_model_.add_constraints(); ct->add_enforcement_literal(presence.index_); - IntervalConstraintProto *const interval = ct->mutable_interval(); + IntervalConstraintProto* const interval = ct->mutable_interval(); interval->set_start(GetOrCreateIntegerIndex(start.index_)); interval->set_size(GetOrCreateIntegerIndex(size.index_)); interval->set_end(GetOrCreateIntegerIndex(end.index_)); @@ -413,39 +413,39 @@ IntervalVar CpModelBuilder::NewOptionalIntervalVar(IntVar start, IntVar size, } Constraint CpModelBuilder::AddBoolOr(absl::Span literals) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const BoolVar &lit : literals) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const BoolVar& lit : literals) { proto->mutable_bool_or()->add_literals(lit.index_); } return Constraint(proto); } Constraint CpModelBuilder::AddBoolAnd(absl::Span literals) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const BoolVar &lit : literals) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const BoolVar& lit : literals) { proto->mutable_bool_and()->add_literals(lit.index_); } return Constraint(proto); } Constraint CpModelBuilder::AddBoolXor(absl::Span literals) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const BoolVar &lit : literals) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const BoolVar& lit : literals) { proto->mutable_bool_xor()->add_literals(lit.index_); } return Constraint(proto); } -void CpModelBuilder::FillLinearTerms(const LinearExpr &left, - const LinearExpr &right, - LinearConstraintProto *proto) { - for (const IntVar &x : left.variables()) { +void CpModelBuilder::FillLinearTerms(const LinearExpr& left, + const LinearExpr& right, + LinearConstraintProto* proto) { + for (const IntVar& x : left.variables()) { proto->add_vars(x.index_); } for (const int64 coeff : left.coefficients()) { proto->add_coeffs(coeff); } - for (const IntVar &x : right.variables()) { + for (const IntVar& x : right.variables()) { proto->add_vars(x.index_); } for (const int64 coeff : right.coefficients()) { @@ -453,9 +453,9 @@ void CpModelBuilder::FillLinearTerms(const LinearExpr &left, } } -Constraint CpModelBuilder::AddEquality(const LinearExpr &left, - const LinearExpr &right) { - ConstraintProto *const proto = cp_model_.add_constraints(); +Constraint CpModelBuilder::AddEquality(const LinearExpr& left, + const LinearExpr& right) { + ConstraintProto* const proto = cp_model_.add_constraints(); FillLinearTerms(left, right, proto->mutable_linear()); const int64 rhs = right.constant() - left.constant(); proto->mutable_linear()->add_domain(rhs); @@ -463,9 +463,9 @@ Constraint CpModelBuilder::AddEquality(const LinearExpr &left, return Constraint(proto); } -Constraint CpModelBuilder::AddGreaterOrEqual(const LinearExpr &left, - const LinearExpr &right) { - ConstraintProto *const proto = cp_model_.add_constraints(); +Constraint CpModelBuilder::AddGreaterOrEqual(const LinearExpr& left, + const LinearExpr& right) { + ConstraintProto* const proto = cp_model_.add_constraints(); FillLinearTerms(left, right, proto->mutable_linear()); const int64 rhs = right.constant() - left.constant(); proto->mutable_linear()->add_domain(rhs); @@ -473,9 +473,9 @@ Constraint CpModelBuilder::AddGreaterOrEqual(const LinearExpr &left, return Constraint(proto); } -Constraint CpModelBuilder::AddLessOrEqual(const LinearExpr &left, - const LinearExpr &right) { - ConstraintProto *const proto = cp_model_.add_constraints(); +Constraint CpModelBuilder::AddLessOrEqual(const LinearExpr& left, + const LinearExpr& right) { + ConstraintProto* const proto = cp_model_.add_constraints(); FillLinearTerms(left, right, proto->mutable_linear()); const int64 rhs = right.constant() - left.constant(); proto->mutable_linear()->add_domain(kint64min); @@ -483,9 +483,9 @@ Constraint CpModelBuilder::AddLessOrEqual(const LinearExpr &left, return Constraint(proto); } -Constraint CpModelBuilder::AddGreaterThan(const LinearExpr &left, - const LinearExpr &right) { - ConstraintProto *const proto = cp_model_.add_constraints(); +Constraint CpModelBuilder::AddGreaterThan(const LinearExpr& left, + const LinearExpr& right) { + ConstraintProto* const proto = cp_model_.add_constraints(); FillLinearTerms(left, right, proto->mutable_linear()); const int64 rhs = right.constant() - left.constant(); proto->mutable_linear()->add_domain(rhs + 1); @@ -493,9 +493,9 @@ Constraint CpModelBuilder::AddGreaterThan(const LinearExpr &left, return Constraint(proto); } -Constraint CpModelBuilder::AddLessThan(const LinearExpr &left, - const LinearExpr &right) { - ConstraintProto *const proto = cp_model_.add_constraints(); +Constraint CpModelBuilder::AddLessThan(const LinearExpr& left, + const LinearExpr& right) { + ConstraintProto* const proto = cp_model_.add_constraints(); FillLinearTerms(left, right, proto->mutable_linear()); const int64 rhs = right.constant() - left.constant(); proto->mutable_linear()->add_domain(kint64min); @@ -503,26 +503,26 @@ Constraint CpModelBuilder::AddLessThan(const LinearExpr &left, return Constraint(proto); } -Constraint CpModelBuilder::AddLinearConstraint(const LinearExpr &expr, - const Domain &domain) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const IntVar &x : expr.variables()) { +Constraint CpModelBuilder::AddLinearConstraint(const LinearExpr& expr, + const Domain& domain) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const IntVar& x : expr.variables()) { proto->mutable_linear()->add_vars(x.index_); } for (const int64 coeff : expr.coefficients()) { proto->mutable_linear()->add_coeffs(coeff); } const int64 cst = expr.constant(); - for (const auto &i : domain) { + for (const auto& i : domain) { proto->mutable_linear()->add_domain(i.start - cst); proto->mutable_linear()->add_domain(i.end - cst); } return Constraint(proto); } -Constraint CpModelBuilder::AddNotEqual(const LinearExpr &left, - const LinearExpr &right) { - ConstraintProto *const proto = cp_model_.add_constraints(); +Constraint CpModelBuilder::AddNotEqual(const LinearExpr& left, + const LinearExpr& right) { + ConstraintProto* const proto = cp_model_.add_constraints(); FillLinearTerms(left, right, proto->mutable_linear()); const int64 rhs = right.constant() - left.constant(); proto->mutable_linear()->add_domain(kint64min); @@ -533,8 +533,8 @@ Constraint CpModelBuilder::AddNotEqual(const LinearExpr &left, } Constraint CpModelBuilder::AddAllDifferent(absl::Span vars) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const IntVar &var : vars) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const IntVar& var : vars) { proto->mutable_all_diff()->add_vars(GetOrCreateIntegerIndex(var.index_)); } return Constraint(proto); @@ -542,10 +542,10 @@ Constraint CpModelBuilder::AddAllDifferent(absl::Span vars) { Constraint CpModelBuilder::AddVariableElement( IntVar index, absl::Span variables, IntVar target) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_element()->set_index(GetOrCreateIntegerIndex(index.index_)); proto->mutable_element()->set_target(GetOrCreateIntegerIndex(target.index_)); - for (const IntVar &var : variables) { + for (const IntVar& var : variables) { proto->mutable_element()->add_vars(GetOrCreateIntegerIndex(var.index_)); } return Constraint(proto); @@ -554,7 +554,7 @@ Constraint CpModelBuilder::AddVariableElement( Constraint CpModelBuilder::AddElement(IntVar index, absl::Span values, IntVar target) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_element()->set_index(GetOrCreateIntegerIndex(index.index_)); proto->mutable_element()->set_target(GetOrCreateIntegerIndex(target.index_)); for (int64 value : values) { @@ -573,8 +573,8 @@ MultipleCircuitConstraint CpModelBuilder::AddMultipleCircuitConstraint() { TableConstraint CpModelBuilder::AddAllowedAssignments( absl::Span vars) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const IntVar &var : vars) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const IntVar& var : vars) { proto->mutable_table()->add_vars(GetOrCreateIntegerIndex(var.index_)); } return TableConstraint(proto); @@ -582,8 +582,8 @@ TableConstraint CpModelBuilder::AddAllowedAssignments( TableConstraint CpModelBuilder::AddForbiddenAssignments( absl::Span vars) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const IntVar &var : vars) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const IntVar& var : vars) { proto->mutable_table()->add_vars(GetOrCreateIntegerIndex(var.index_)); } proto->mutable_table()->set_negated(true); @@ -593,11 +593,11 @@ TableConstraint CpModelBuilder::AddForbiddenAssignments( Constraint CpModelBuilder::AddInverseConstraint( absl::Span variables, absl::Span inverse_variables) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const IntVar &var : variables) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const IntVar& var : variables) { proto->mutable_inverse()->add_f_direct(GetOrCreateIntegerIndex(var.index_)); } - for (const IntVar &var : inverse_variables) { + for (const IntVar& var : inverse_variables) { proto->mutable_inverse()->add_f_inverse( GetOrCreateIntegerIndex(var.index_)); } @@ -606,7 +606,7 @@ Constraint CpModelBuilder::AddInverseConstraint( ReservoirConstraint CpModelBuilder::AddReservoirConstraint(int64 min_level, int64 max_level) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_reservoir()->set_min_level(min_level); proto->mutable_reservoir()->set_max_level(max_level); return ReservoirConstraint(proto, this); @@ -615,8 +615,8 @@ ReservoirConstraint CpModelBuilder::AddReservoirConstraint(int64 min_level, AutomatonConstraint CpModelBuilder::AddAutomaton( absl::Span transition_variables, int starting_state, absl::Span final_states) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const IntVar &var : transition_variables) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const IntVar& var : transition_variables) { proto->mutable_automaton()->add_vars(GetOrCreateIntegerIndex(var.index_)); } proto->mutable_automaton()->set_starting_state(starting_state); @@ -628,16 +628,16 @@ AutomatonConstraint CpModelBuilder::AddAutomaton( Constraint CpModelBuilder::AddMinEquality(IntVar target, absl::Span vars) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_int_min()->set_target(GetOrCreateIntegerIndex(target.index_)); - for (const IntVar &var : vars) { + for (const IntVar& var : vars) { proto->mutable_int_min()->add_vars(GetOrCreateIntegerIndex(var.index_)); } return Constraint(proto); } -void CpModelBuilder::LinearExprToProto(const LinearExpr &expr, - LinearExpressionProto *expr_proto) { +void CpModelBuilder::LinearExprToProto(const LinearExpr& expr, + LinearExpressionProto* expr_proto) { for (const IntVar var : expr.variables()) { expr_proto->add_vars(GetOrCreateIntegerIndex(var.index_)); } @@ -648,11 +648,11 @@ void CpModelBuilder::LinearExprToProto(const LinearExpr &expr, } Constraint CpModelBuilder::AddLinMinEquality( - const LinearExpr &target, absl::Span exprs) { - ConstraintProto *const proto = cp_model_.add_constraints(); + const LinearExpr& target, absl::Span exprs) { + ConstraintProto* const proto = cp_model_.add_constraints(); LinearExprToProto(target, proto->mutable_lin_min()->mutable_target()); - for (const LinearExpr &expr : exprs) { - LinearExpressionProto *expr_proto = proto->mutable_lin_min()->add_exprs(); + for (const LinearExpr& expr : exprs) { + LinearExpressionProto* expr_proto = proto->mutable_lin_min()->add_exprs(); LinearExprToProto(expr, expr_proto); } return Constraint(proto); @@ -660,20 +660,20 @@ Constraint CpModelBuilder::AddLinMinEquality( Constraint CpModelBuilder::AddMaxEquality(IntVar target, absl::Span vars) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_int_max()->set_target(GetOrCreateIntegerIndex(target.index_)); - for (const IntVar &var : vars) { + for (const IntVar& var : vars) { proto->mutable_int_max()->add_vars(GetOrCreateIntegerIndex(var.index_)); } return Constraint(proto); } Constraint CpModelBuilder::AddLinMaxEquality( - const LinearExpr &target, absl::Span exprs) { - ConstraintProto *const proto = cp_model_.add_constraints(); + const LinearExpr& target, absl::Span exprs) { + ConstraintProto* const proto = cp_model_.add_constraints(); LinearExprToProto(target, proto->mutable_lin_max()->mutable_target()); - for (const LinearExpr &expr : exprs) { - LinearExpressionProto *expr_proto = proto->mutable_lin_max()->add_exprs(); + for (const LinearExpr& expr : exprs) { + LinearExpressionProto* expr_proto = proto->mutable_lin_max()->add_exprs(); LinearExprToProto(expr, expr_proto); } return Constraint(proto); @@ -681,7 +681,7 @@ Constraint CpModelBuilder::AddLinMaxEquality( Constraint CpModelBuilder::AddDivisionEquality(IntVar target, IntVar numerator, IntVar denominator) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_int_div()->set_target(GetOrCreateIntegerIndex(target.index_)); proto->mutable_int_div()->add_vars(GetOrCreateIntegerIndex(numerator.index_)); proto->mutable_int_div()->add_vars( @@ -690,7 +690,7 @@ Constraint CpModelBuilder::AddDivisionEquality(IntVar target, IntVar numerator, } Constraint CpModelBuilder::AddAbsEquality(IntVar target, IntVar var) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_int_max()->set_target(GetOrCreateIntegerIndex(target.index_)); proto->mutable_int_max()->add_vars(GetOrCreateIntegerIndex(var.index_)); proto->mutable_int_max()->add_vars( @@ -700,7 +700,7 @@ Constraint CpModelBuilder::AddAbsEquality(IntVar target, IntVar var) { Constraint CpModelBuilder::AddModuloEquality(IntVar target, IntVar var, IntVar mod) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_int_mod()->set_target(GetOrCreateIntegerIndex(target.index_)); proto->mutable_int_mod()->add_vars(GetOrCreateIntegerIndex(var.index_)); proto->mutable_int_mod()->add_vars(GetOrCreateIntegerIndex(mod.index_)); @@ -709,17 +709,17 @@ Constraint CpModelBuilder::AddModuloEquality(IntVar target, IntVar var, Constraint CpModelBuilder::AddProductEquality(IntVar target, absl::Span vars) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_int_prod()->set_target(GetOrCreateIntegerIndex(target.index_)); - for (const IntVar &var : vars) { + for (const IntVar& var : vars) { proto->mutable_int_prod()->add_vars(GetOrCreateIntegerIndex(var.index_)); } return Constraint(proto); } Constraint CpModelBuilder::AddNoOverlap(absl::Span vars) { - ConstraintProto *const proto = cp_model_.add_constraints(); - for (const IntervalVar &var : vars) { + ConstraintProto* const proto = cp_model_.add_constraints(); + for (const IntervalVar& var : vars) { proto->mutable_no_overlap()->add_intervals( GetOrCreateIntegerIndex(var.index_)); } @@ -731,15 +731,15 @@ NoOverlap2DConstraint CpModelBuilder::AddNoOverlap2D() { } CumulativeConstraint CpModelBuilder::AddCumulative(IntVar capacity) { - ConstraintProto *const proto = cp_model_.add_constraints(); + ConstraintProto* const proto = cp_model_.add_constraints(); proto->mutable_cumulative()->set_capacity( GetOrCreateIntegerIndex(capacity.index_)); return CumulativeConstraint(proto, this); } -void CpModelBuilder::Minimize(const LinearExpr &expr) { +void CpModelBuilder::Minimize(const LinearExpr& expr) { cp_model_.mutable_objective()->Clear(); - for (const IntVar &x : expr.variables()) { + for (const IntVar& x : expr.variables()) { cp_model_.mutable_objective()->add_vars(x.index_); } for (const int64 coeff : expr.coefficients()) { @@ -748,9 +748,9 @@ void CpModelBuilder::Minimize(const LinearExpr &expr) { cp_model_.mutable_objective()->set_offset(expr.constant()); } -void CpModelBuilder::Maximize(const LinearExpr &expr) { +void CpModelBuilder::Maximize(const LinearExpr& expr) { cp_model_.mutable_objective()->Clear(); - for (const IntVar &x : expr.variables()) { + for (const IntVar& x : expr.variables()) { cp_model_.mutable_objective()->add_vars(x.index_); } for (const int64 coeff : expr.coefficients()) { @@ -770,8 +770,8 @@ void CpModelBuilder::AddDecisionStrategy( absl::Span variables, DecisionStrategyProto::VariableSelectionStrategy var_strategy, DecisionStrategyProto::DomainReductionStrategy domain_strategy) { - DecisionStrategyProto *const proto = cp_model_.add_search_strategy(); - for (const IntVar &var : variables) { + DecisionStrategyProto* const proto = cp_model_.add_search_strategy(); + for (const IntVar& var : variables) { proto->add_variables(var.index_); } proto->set_variable_selection_strategy(var_strategy); @@ -782,8 +782,8 @@ void CpModelBuilder::AddDecisionStrategy( absl::Span variables, DecisionStrategyProto::VariableSelectionStrategy var_strategy, DecisionStrategyProto::DomainReductionStrategy domain_strategy) { - DecisionStrategyProto *const proto = cp_model_.add_search_strategy(); - for (const BoolVar &var : variables) { + DecisionStrategyProto* const proto = cp_model_.add_search_strategy(); + for (const BoolVar& var : variables) { proto->add_variables(var.index_); } proto->set_variable_selection_strategy(var_strategy); @@ -796,7 +796,7 @@ void CpModelBuilder::AddHint(IntVar var, int64 value) { cp_model_.mutable_solution_hint()->add_values(value); } -int64 SolutionIntegerValue(const CpSolverResponse &r, const LinearExpr &expr) { +int64 SolutionIntegerValue(const CpSolverResponse& r, const LinearExpr& expr) { int64 result = expr.constant(); for (int i = 0; i < expr.variables().size(); ++i) { result += r.solution(expr.variables()[i].index_) * expr.coefficients()[i]; @@ -804,7 +804,7 @@ int64 SolutionIntegerValue(const CpSolverResponse &r, const LinearExpr &expr) { return result; } -int64 SolutionIntegerMin(const CpSolverResponse &r, IntVar x) { +int64 SolutionIntegerMin(const CpSolverResponse& r, IntVar x) { if (r.solution_size() > 0) { return r.solution(x.index_); } else { @@ -812,7 +812,7 @@ int64 SolutionIntegerMin(const CpSolverResponse &r, IntVar x) { } } -int64 SolutionIntegerMax(const CpSolverResponse &r, IntVar x) { +int64 SolutionIntegerMax(const CpSolverResponse& r, IntVar x) { if (r.solution_size() > 0) { return r.solution(x.index_); } else { @@ -820,7 +820,7 @@ int64 SolutionIntegerMax(const CpSolverResponse &r, IntVar x) { } } -bool SolutionBooleanValue(const CpSolverResponse &r, BoolVar x) { +bool SolutionBooleanValue(const CpSolverResponse& r, BoolVar x) { const int ref = x.index_; if (RefIsPositive(ref)) { return r.solution(ref) == 1; diff --git a/ortools/sat/cp_model_checker.cc b/ortools/sat/cp_model_checker.cc index 7cc33496a5..4dfe308875 100644 --- a/ortools/sat/cp_model_checker.cc +++ b/ortools/sat/cp_model_checker.cc @@ -45,7 +45,7 @@ namespace { } while (false) template -bool DomainInProtoIsValid(const ProtoWithDomain &proto) { +bool DomainInProtoIsValid(const ProtoWithDomain& proto) { if (proto.domain().size() % 2) return false; std::vector domain; for (int i = 0; i < proto.domain_size(); i += 2) { @@ -55,22 +55,22 @@ bool DomainInProtoIsValid(const ProtoWithDomain &proto) { return IntervalsAreSortedAndNonAdjacent(domain); } -bool VariableReferenceIsValid(const CpModelProto &model, int reference) { +bool VariableReferenceIsValid(const CpModelProto& model, int reference) { // We do it this way to avoid overflow if reference is kint64min for instance. if (reference >= model.variables_size()) return false; return reference >= -static_cast(model.variables_size()); } -bool LiteralReferenceIsValid(const CpModelProto &model, int reference) { +bool LiteralReferenceIsValid(const CpModelProto& model, int reference) { if (!VariableReferenceIsValid(model, reference)) return false; - const auto &var_proto = model.variables(PositiveRef(reference)); + const auto& var_proto = model.variables(PositiveRef(reference)); const int64 min_domain = var_proto.domain(0); const int64 max_domain = var_proto.domain(var_proto.domain_size() - 1); return min_domain >= 0 && max_domain <= 1; } -std::string ValidateIntegerVariable(const CpModelProto &model, int v) { - const IntegerVariableProto &proto = model.variables(v); +std::string ValidateIntegerVariable(const CpModelProto& model, int v) { + const IntegerVariableProto& proto = model.variables(v); if (proto.domain_size() == 0) { return absl::StrCat("var #", v, " has no domain(): ", ProtobufShortDebugString(proto)); @@ -107,9 +107,9 @@ std::string ValidateIntegerVariable(const CpModelProto &model, int v) { return ""; } -std::string ValidateArgumentReferencesInConstraint(const CpModelProto &model, +std::string ValidateArgumentReferencesInConstraint(const CpModelProto& model, int c) { - const ConstraintProto &ct = model.constraints(c); + const ConstraintProto& ct = model.constraints(c); IndexReferences references = GetReferencesUsedByConstraint(ct); for (const int v : references.variables) { if (!VariableReferenceIsValid(model, v)) { @@ -148,13 +148,13 @@ std::string ValidateArgumentReferencesInConstraint(const CpModelProto &model, } template -bool PossibleIntegerOverflow(const CpModelProto &model, - const LinearExpressionProto &proto) { +bool PossibleIntegerOverflow(const CpModelProto& model, + const LinearExpressionProto& proto) { int64 sum_min = 0; int64 sum_max = 0; for (int i = 0; i < proto.vars_size(); ++i) { const int ref = proto.vars(i); - const auto &var_proto = model.variables(PositiveRef(ref)); + const auto& var_proto = model.variables(PositiveRef(ref)); const int64 min_domain = var_proto.domain(0); const int64 max_domain = var_proto.domain(var_proto.domain_size() - 1); const int64 coeff = RefIsPositive(ref) ? proto.coeffs(i) : -proto.coeffs(i); @@ -179,11 +179,11 @@ bool PossibleIntegerOverflow(const CpModelProto &model, return false; } -std::string ValidateIntervalConstraint(const CpModelProto &model, - const ConstraintProto &ct) { - const IntervalConstraintProto &arg = ct.interval(); +std::string ValidateIntervalConstraint(const CpModelProto& model, + const ConstraintProto& ct) { + const IntervalConstraintProto& arg = ct.interval(); if (arg.size() < 0) { - const IntegerVariableProto &size_var_proto = + const IntegerVariableProto& size_var_proto = model.variables(NegatedRef(arg.size())); if (size_var_proto.domain(size_var_proto.domain_size() - 1) > 0) { return absl::StrCat( @@ -191,7 +191,7 @@ std::string ValidateIntervalConstraint(const CpModelProto &model, "negation of size var: ", ProtobufDebugString(size_var_proto)); } } else { - const IntegerVariableProto &size_var_proto = model.variables(arg.size()); + const IntegerVariableProto& size_var_proto = model.variables(arg.size()); if (size_var_proto.domain(0) < 0) { return absl::StrCat( "Negative value in interval size domain: ", ProtobufDebugString(ct), @@ -201,9 +201,9 @@ std::string ValidateIntervalConstraint(const CpModelProto &model, return ""; } -std::string ValidateLinearConstraint(const CpModelProto &model, - const ConstraintProto &ct) { - const LinearConstraintProto &arg = ct.linear(); +std::string ValidateLinearConstraint(const CpModelProto& model, + const ConstraintProto& ct) { + const LinearConstraintProto& arg = ct.linear(); if (PossibleIntegerOverflow(model, arg)) { return "Possible integer overflow in constraint: " + ProtobufDebugString(ct); @@ -211,8 +211,8 @@ std::string ValidateLinearConstraint(const CpModelProto &model, return ""; } -std::string ValidateLinearExpression(const CpModelProto &model, - const LinearExpressionProto &expr) { +std::string ValidateLinearExpression(const CpModelProto& model, + const LinearExpressionProto& expr) { if (expr.coeffs_size() != expr.vars_size()) { return absl::StrCat("coeffs_size() != vars_size() in linear expression: ", ProtobufShortDebugString(expr)); @@ -224,8 +224,8 @@ std::string ValidateLinearExpression(const CpModelProto &model, return ""; } -std::string ValidateCircuitConstraint(const CpModelProto &model, - const ConstraintProto &ct) { +std::string ValidateCircuitConstraint(const CpModelProto& model, + const ConstraintProto& ct) { const int size = ct.circuit().tails().size(); if (ct.circuit().heads().size() != size || ct.circuit().literals().size() != size) { @@ -235,8 +235,8 @@ std::string ValidateCircuitConstraint(const CpModelProto &model, return ""; } -std::string ValidateRoutesConstraint(const CpModelProto &model, - const ConstraintProto &ct) { +std::string ValidateRoutesConstraint(const CpModelProto& model, + const ConstraintProto& ct) { const int size = ct.routes().tails().size(); if (ct.routes().heads().size() != size || ct.routes().literals().size() != size) { @@ -246,8 +246,8 @@ std::string ValidateRoutesConstraint(const CpModelProto &model, return ""; } -std::string ValidateReservoirConstraint(const CpModelProto &model, - const ConstraintProto &ct) { +std::string ValidateReservoirConstraint(const CpModelProto& model, + const ConstraintProto& ct) { if (ct.enforcement_literal_size() > 0) { return "Reservoir does not support enforcement literals."; } @@ -256,7 +256,7 @@ std::string ValidateReservoirConstraint(const CpModelProto &model, ProtobufShortDebugString(ct)); } for (const int t : ct.reservoir().times()) { - const IntegerVariableProto &time = model.variables(t); + const IntegerVariableProto& time = model.variables(t); for (const int64 bound : time.domain()) { if (bound < 0) { return absl::StrCat("Time variables must be >= 0 in constraint ", @@ -283,7 +283,7 @@ std::string ValidateReservoirConstraint(const CpModelProto &model, return ""; } -std::string ValidateCircuitCoveringConstraint(const ConstraintProto &ct) { +std::string ValidateCircuitCoveringConstraint(const ConstraintProto& ct) { const int num_nodes = ct.circuit_covering().nexts_size(); for (const int d : ct.circuit_covering().distinguished_nodes()) { if (d < 0 || d >= num_nodes) { @@ -295,13 +295,13 @@ std::string ValidateCircuitCoveringConstraint(const ConstraintProto &ct) { return ""; } -std::string ValidateIntModConstraint(const CpModelProto &model, - const ConstraintProto &ct) { +std::string ValidateIntModConstraint(const CpModelProto& model, + const ConstraintProto& ct) { if (ct.int_mod().vars().size() != 2) { return absl::StrCat("An int_mod constraint should have exactly 2 terms: ", ProtobufShortDebugString(ct)); } - const IntegerVariableProto &mod_proto = model.variables(ct.int_mod().vars(1)); + const IntegerVariableProto& mod_proto = model.variables(ct.int_mod().vars(1)); if (mod_proto.domain(0) <= 0) { return absl::StrCat( "An int_mod must have a strictly positive modulo argument: ", @@ -310,8 +310,8 @@ std::string ValidateIntModConstraint(const CpModelProto &model, return ""; } -std::string ValidateObjective(const CpModelProto &model, - const CpObjectiveProto &obj) { +std::string ValidateObjective(const CpModelProto& model, + const CpObjectiveProto& obj) { if (!DomainInProtoIsValid(obj)) { return absl::StrCat("The objective has and invalid domain() format: ", ProtobufShortDebugString(obj)); @@ -333,15 +333,15 @@ std::string ValidateObjective(const CpModelProto &model, return ""; } -std::string ValidateSearchStrategies(const CpModelProto &model) { - for (const DecisionStrategyProto &strategy : model.search_strategy()) { +std::string ValidateSearchStrategies(const CpModelProto& model) { + for (const DecisionStrategyProto& strategy : model.search_strategy()) { for (const int ref : strategy.variables()) { if (!VariableReferenceIsValid(model, ref)) { return absl::StrCat("Invalid variable reference in strategy: ", ProtobufShortDebugString(strategy)); } } - for (const auto &transformation : strategy.transformations()) { + for (const auto& transformation : strategy.transformations()) { if (transformation.positive_coeff() <= 0) { return absl::StrCat("Affine transformation coeff should be positive: ", ProtobufShortDebugString(transformation)); @@ -356,9 +356,9 @@ std::string ValidateSearchStrategies(const CpModelProto &model) { return ""; } -std::string ValidateSolutionHint(const CpModelProto &model) { +std::string ValidateSolutionHint(const CpModelProto& model) { if (!model.has_solution_hint()) return ""; - const auto &hint = model.solution_hint(); + const auto& hint = model.solution_hint(); if (hint.vars().size() != hint.values().size()) { return "Invalid solution hint: vars and values do not have the same size."; } @@ -372,7 +372,7 @@ std::string ValidateSolutionHint(const CpModelProto &model) { } // namespace -std::string ValidateCpModel(const CpModelProto &model) { +std::string ValidateCpModel(const CpModelProto& model) { for (int v = 0; v < model.variables_size(); ++v) { RETURN_IF_NOT_EMPTY(ValidateIntegerVariable(model, v)); } @@ -385,7 +385,7 @@ std::string ValidateCpModel(const CpModelProto &model) { // Other non-generic validations. // TODO(user): validate all constraints. - const ConstraintProto &ct = model.constraints(c); + const ConstraintProto& ct = model.constraints(c); const ConstraintProto::ConstraintCase type = ct.constraint_case(); switch (type) { case ConstraintProto::ConstraintCase::kIntDiv: @@ -512,7 +512,7 @@ namespace { class ConstraintChecker { public: - explicit ConstraintChecker(const std::vector &variable_values) + explicit ConstraintChecker(const std::vector& variable_values) : variable_values_(variable_values) {} bool LiteralIsTrue(int l) const { @@ -527,28 +527,28 @@ class ConstraintChecker { return -variable_values_[-var - 1]; } - bool ConstraintIsEnforced(const ConstraintProto &ct) { + bool ConstraintIsEnforced(const ConstraintProto& ct) { for (const int lit : ct.enforcement_literal()) { if (LiteralIsFalse(lit)) return false; } return true; } - bool BoolOrConstraintIsFeasible(const ConstraintProto &ct) { + bool BoolOrConstraintIsFeasible(const ConstraintProto& ct) { for (const int lit : ct.bool_or().literals()) { if (LiteralIsTrue(lit)) return true; } return false; } - bool BoolAndConstraintIsFeasible(const ConstraintProto &ct) { + bool BoolAndConstraintIsFeasible(const ConstraintProto& ct) { for (const int lit : ct.bool_and().literals()) { if (LiteralIsFalse(lit)) return false; } return true; } - bool AtMostOneConstraintIsFeasible(const ConstraintProto &ct) { + bool AtMostOneConstraintIsFeasible(const ConstraintProto& ct) { int num_true_literals = 0; for (const int lit : ct.at_most_one().literals()) { if (LiteralIsTrue(lit)) ++num_true_literals; @@ -556,7 +556,7 @@ class ConstraintChecker { return num_true_literals <= 1; } - bool BoolXorConstraintIsFeasible(const ConstraintProto &ct) { + bool BoolXorConstraintIsFeasible(const ConstraintProto& ct) { int sum = 0; for (const int lit : ct.bool_xor().literals()) { sum ^= LiteralIsTrue(lit) ? 1 : 0; @@ -564,7 +564,7 @@ class ConstraintChecker { return sum == 1; } - bool LinearConstraintIsFeasible(const ConstraintProto &ct) { + bool LinearConstraintIsFeasible(const ConstraintProto& ct) { int64 sum = 0; const int num_variables = ct.linear().coeffs_size(); for (int i = 0; i < num_variables; ++i) { @@ -573,7 +573,7 @@ class ConstraintChecker { return DomainInProtoContains(ct.linear(), sum); } - bool IntMaxConstraintIsFeasible(const ConstraintProto &ct) { + bool IntMaxConstraintIsFeasible(const ConstraintProto& ct) { const int64 max = Value(ct.int_max().target()); int64 actual_max = kint64min; for (int i = 0; i < ct.int_max().vars_size(); ++i) { @@ -582,7 +582,7 @@ class ConstraintChecker { return max == actual_max; } - int64 LinearExpressionValue(const LinearExpressionProto &expr) { + int64 LinearExpressionValue(const LinearExpressionProto& expr) { int64 sum = expr.offset(); const int num_variables = expr.vars_size(); for (int i = 0; i < num_variables; ++i) { @@ -591,7 +591,7 @@ class ConstraintChecker { return sum; } - bool LinMaxConstraintIsFeasible(const ConstraintProto &ct) { + bool LinMaxConstraintIsFeasible(const ConstraintProto& ct) { const int64 max = LinearExpressionValue(ct.lin_max().target()); int64 actual_max = kint64min; for (int i = 0; i < ct.lin_max().exprs_size(); ++i) { @@ -601,7 +601,7 @@ class ConstraintChecker { return max == actual_max; } - bool IntProdConstraintIsFeasible(const ConstraintProto &ct) { + bool IntProdConstraintIsFeasible(const ConstraintProto& ct) { const int64 prod = Value(ct.int_prod().target()); int64 actual_prod = 1; for (int i = 0; i < ct.int_prod().vars_size(); ++i) { @@ -610,17 +610,17 @@ class ConstraintChecker { return prod == actual_prod; } - bool IntDivConstraintIsFeasible(const ConstraintProto &ct) { + bool IntDivConstraintIsFeasible(const ConstraintProto& ct) { return Value(ct.int_div().target()) == Value(ct.int_div().vars(0)) / Value(ct.int_div().vars(1)); } - bool IntModConstraintIsFeasible(const ConstraintProto &ct) { + bool IntModConstraintIsFeasible(const ConstraintProto& ct) { return Value(ct.int_mod().target()) == Value(ct.int_mod().vars(0)) % Value(ct.int_mod().vars(1)); } - bool IntMinConstraintIsFeasible(const ConstraintProto &ct) { + bool IntMinConstraintIsFeasible(const ConstraintProto& ct) { const int64 min = Value(ct.int_min().target()); int64 actual_min = kint64max; for (int i = 0; i < ct.int_min().vars_size(); ++i) { @@ -629,7 +629,7 @@ class ConstraintChecker { return min == actual_min; } - bool LinMinConstraintIsFeasible(const ConstraintProto &ct) { + bool LinMinConstraintIsFeasible(const ConstraintProto& ct) { const int64 min = LinearExpressionValue(ct.lin_min().target()); int64 actual_min = kint64max; for (int i = 0; i < ct.lin_min().exprs_size(); ++i) { @@ -639,7 +639,7 @@ class ConstraintChecker { return min == actual_min; } - bool AllDiffConstraintIsFeasible(const ConstraintProto &ct) { + bool AllDiffConstraintIsFeasible(const ConstraintProto& ct) { absl::flat_hash_set values; for (const int v : ct.all_diff().vars()) { if (gtl::ContainsKey(values, Value(v))) return false; @@ -648,19 +648,19 @@ class ConstraintChecker { return true; } - bool IntervalConstraintIsFeasible(const ConstraintProto &ct) { + bool IntervalConstraintIsFeasible(const ConstraintProto& ct) { const int64 size = Value(ct.interval().size()); if (size < 0) return false; return Value(ct.interval().start()) + size == Value(ct.interval().end()); } - bool NoOverlapConstraintIsFeasible(const CpModelProto &model, - const ConstraintProto &ct) { - std::vector > start_durations_pairs; + bool NoOverlapConstraintIsFeasible(const CpModelProto& model, + const ConstraintProto& ct) { + std::vector> start_durations_pairs; for (const int i : ct.no_overlap().intervals()) { - const ConstraintProto &interval_constraint = model.constraints(i); + const ConstraintProto& interval_constraint = model.constraints(i); if (ConstraintIsEnforced(interval_constraint)) { - const IntervalConstraintProto &interval = + const IntervalConstraintProto& interval = interval_constraint.interval(); start_durations_pairs.push_back( {Value(interval.start()), Value(interval.size())}); @@ -675,30 +675,30 @@ class ConstraintChecker { return true; } - bool IntervalsAreDisjoint(const IntervalConstraintProto &interval1, - const IntervalConstraintProto &interval2) { + bool IntervalsAreDisjoint(const IntervalConstraintProto& interval1, + const IntervalConstraintProto& interval2) { return Value(interval1.end()) <= Value(interval2.start()) || Value(interval2.end()) <= Value(interval1.start()); } - bool IntervalIsEmpty(const IntervalConstraintProto &interval) { + bool IntervalIsEmpty(const IntervalConstraintProto& interval) { return Value(interval.start()) == Value(interval.end()); } - bool NoOverlap2DConstraintIsFeasible(const CpModelProto &model, - const ConstraintProto &ct) { - const auto &arg = ct.no_overlap_2d(); + bool NoOverlap2DConstraintIsFeasible(const CpModelProto& model, + const ConstraintProto& ct) { + const auto& arg = ct.no_overlap_2d(); // Those intervals from arg.x_intervals and arg.y_intervals where both // the x and y intervals are enforced. - std::vector > + std::vector> enforced_intervals_xy; { const int num_intervals = arg.x_intervals_size(); CHECK_EQ(arg.y_intervals_size(), num_intervals); for (int i = 0; i < num_intervals; ++i) { - const ConstraintProto &x = model.constraints(arg.x_intervals(i)); - const ConstraintProto &y = model.constraints(arg.y_intervals(i)); + const ConstraintProto& x = model.constraints(arg.x_intervals(i)); + const ConstraintProto& y = model.constraints(arg.y_intervals(i)); if (ConstraintIsEnforced(x) && ConstraintIsEnforced(y) && (!arg.boxes_with_null_area_can_overlap() || (!IntervalIsEmpty(x.interval()) && @@ -710,10 +710,10 @@ class ConstraintChecker { const int num_enforced_intervals = enforced_intervals_xy.size(); for (int i = 0; i < num_enforced_intervals; ++i) { for (int j = i + 1; j < num_enforced_intervals; ++j) { - const auto &xi = *enforced_intervals_xy[i].first; - const auto &yi = *enforced_intervals_xy[i].second; - const auto &xj = *enforced_intervals_xy[j].first; - const auto &yj = *enforced_intervals_xy[j].second; + const auto& xi = *enforced_intervals_xy[i].first; + const auto& yi = *enforced_intervals_xy[i].second; + const auto& xj = *enforced_intervals_xy[j].first; + const auto& yj = *enforced_intervals_xy[j].second; if (!IntervalsAreDisjoint(xi, xj) && !IntervalsAreDisjoint(yi, yj) && !IntervalIsEmpty(xi) && !IntervalIsEmpty(xj) && !IntervalIsEmpty(yi) && !IntervalIsEmpty(yj)) { @@ -730,17 +730,17 @@ class ConstraintChecker { return true; } - bool CumulativeConstraintIsFeasible(const CpModelProto &model, - const ConstraintProto &ct) { + bool CumulativeConstraintIsFeasible(const CpModelProto& model, + const ConstraintProto& ct) { // TODO(user,user): Improve complexity for large durations. const int64 capacity = Value(ct.cumulative().capacity()); const int num_intervals = ct.cumulative().intervals_size(); absl::flat_hash_map usage; for (int i = 0; i < num_intervals; ++i) { - const ConstraintProto &interval_constraint = + const ConstraintProto& interval_constraint = model.constraints(ct.cumulative().intervals(i)); if (ConstraintIsEnforced(interval_constraint)) { - const IntervalConstraintProto &interval = + const IntervalConstraintProto& interval = interval_constraint.interval(); const int64 start = Value(interval.start()); const int64 duration = Value(interval.size()); @@ -754,12 +754,12 @@ class ConstraintChecker { return true; } - bool ElementConstraintIsFeasible(const ConstraintProto &ct) { + bool ElementConstraintIsFeasible(const ConstraintProto& ct) { const int index = Value(ct.element().index()); return Value(ct.element().vars(index)) == Value(ct.element().target()); } - bool TableConstraintIsFeasible(const ConstraintProto &ct) { + bool TableConstraintIsFeasible(const ConstraintProto& ct) { const int size = ct.table().vars_size(); if (size == 0) return true; for (int row_start = 0; row_start < ct.table().values_size(); @@ -773,7 +773,7 @@ class ConstraintChecker { return ct.table().negated(); } - bool AutomatonConstraintIsFeasible(const ConstraintProto &ct) { + bool AutomatonConstraintIsFeasible(const ConstraintProto& ct) { // Build the transition table {tail, label} -> head. absl::flat_hash_map, int64> transition_map; const int num_transitions = ct.automaton().transition_tail().size(); @@ -802,7 +802,7 @@ class ConstraintChecker { return false; } - bool CircuitConstraintIsFeasible(const ConstraintProto &ct) { + bool CircuitConstraintIsFeasible(const ConstraintProto& ct) { // Compute the set of relevant nodes for the constraint and set the next of // each of them. This also detects duplicate nexts. const int num_arcs = ct.circuit().tails_size(); @@ -843,7 +843,7 @@ class ConstraintChecker { return num_visited == cycle_size; // Another cycle somewhere if false. } - bool RoutesConstraintIsFeasible(const ConstraintProto &ct) { + bool RoutesConstraintIsFeasible(const ConstraintProto& ct) { const int num_arcs = ct.routes().tails_size(); int num_used_arcs = 0; int num_self_arcs = 0; @@ -903,7 +903,7 @@ class ConstraintChecker { return true; } - bool CircuitCoveringConstraintIsFeasible(const ConstraintProto &ct) { + bool CircuitCoveringConstraintIsFeasible(const ConstraintProto& ct) { const int num_nodes = ct.circuit_covering().nexts_size(); std::vector distinguished(num_nodes, false); std::vector visited(num_nodes, false); @@ -934,7 +934,7 @@ class ConstraintChecker { return true; } - bool InverseConstraintIsFeasible(const ConstraintProto &ct) { + bool InverseConstraintIsFeasible(const ConstraintProto& ct) { const int num_variables = ct.inverse().f_direct_size(); if (num_variables != ct.inverse().f_inverse_size()) return false; // Check that f_inverse(f_direct(i)) == i; this is sufficient. @@ -946,7 +946,7 @@ class ConstraintChecker { return true; } - bool ReservoirConstraintIsFeasible(const ConstraintProto &ct) { + bool ReservoirConstraintIsFeasible(const ConstraintProto& ct) { const int num_variables = ct.reservoir().times_size(); const int64 min_level = ct.reservoir().min_level(); const int64 max_level = ct.reservoir().max_level(); @@ -964,7 +964,7 @@ class ConstraintChecker { } } int64 current_level = 0; - for (const auto &delta : deltas) { + for (const auto& delta : deltas) { current_level += delta.second; if (current_level < min_level || current_level > max_level) { VLOG(1) << "Reservoir level " << current_level @@ -981,10 +981,10 @@ class ConstraintChecker { } // namespace -bool SolutionIsFeasible(const CpModelProto &model, - const std::vector &variable_values, - const CpModelProto *mapping_proto, - const std::vector *postsolve_mapping) { +bool SolutionIsFeasible(const CpModelProto& model, + const std::vector& variable_values, + const CpModelProto* mapping_proto, + const std::vector* postsolve_mapping) { if (variable_values.size() != model.variables_size()) { VLOG(1) << "Wrong number of variables in the solution vector"; return false; @@ -1004,7 +1004,7 @@ bool SolutionIsFeasible(const CpModelProto &model, ConstraintChecker checker(variable_values); for (int c = 0; c < model.constraints_size(); ++c) { - const ConstraintProto &ct = model.constraints(c); + const ConstraintProto& ct = model.constraints(c); if (!checker.ConstraintIsEnforced(ct)) continue; diff --git a/ortools/sat/cp_model_expand.cc b/ortools/sat/cp_model_expand.cc index 8fd8ef4389..3cd4a81d0a 100644 --- a/ortools/sat/cp_model_expand.cc +++ b/ortools/sat/cp_model_expand.cc @@ -30,15 +30,15 @@ namespace operations_research { namespace sat { namespace { -void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { +void ExpandReservoir(ConstraintProto* ct, PresolveContext* context) { if (ct->reservoir().min_level() > ct->reservoir().max_level()) { VLOG(1) << "Empty level domain in reservoir constraint."; return (void)context->NotifyThatModelIsUnsat(); } // TODO(user): Support sharing constraints in the model across constraints. - absl::flat_hash_map, int> precedence_cache; - const ReservoirConstraintProto &reservoir = ct->reservoir(); + absl::flat_hash_map, int> precedence_cache; + const ReservoirConstraintProto& reservoir = ct->reservoir(); const int num_events = reservoir.times_size(); const int true_literal = context->GetOrCreateConstantVar(1); @@ -52,7 +52,7 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { const auto add_reified_precedence = [&context](int x_lesseq_y, int x, int y, int l_x, int l_y) { // x_lesseq_y => (x <= y) && l_x is true && l_y is true. - ConstraintProto *const lesseq = context->working_model->add_constraints(); + ConstraintProto* const lesseq = context->working_model->add_constraints(); lesseq->add_enforcement_literal(x_lesseq_y); lesseq->mutable_linear()->add_vars(x); lesseq->mutable_linear()->add_vars(y); @@ -68,7 +68,7 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { } // Not(x_lesseq_y) && l_x && l_y => (x > y) - ConstraintProto *const greater = context->working_model->add_constraints(); + ConstraintProto* const greater = context->working_model->add_constraints(); greater->mutable_linear()->add_vars(x); greater->mutable_linear()->add_vars(y); greater->mutable_linear()->add_coeffs(-1); @@ -104,8 +104,10 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { if (context->LiteralIsFalse(active_j)) continue; const int time_j = reservoir.times(j); - const std::pair p = std::make_pair(time_i, time_j); - const std::pair rev_p = std::make_pair(time_j, time_i); + const std::tuple p = + std::make_tuple(time_i, time_j, active_i, active_j); + const std::tuple rev_p = + std::make_tuple(time_j, time_i, active_j, active_i); if (gtl::ContainsKey(precedence_cache, p)) continue; const int i_lesseq_j = context->NewBoolVar(); @@ -121,7 +123,7 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { add_reified_precedence(j_lesseq_i, time_j, time_i, active_j, active_i); // Consistency. This is redundant but should improves performance. - auto *const bool_or = + auto* const bool_or = context->working_model->add_constraints()->mutable_bool_or(); bool_or->add_literals(i_lesseq_j); bool_or->add_literals(j_lesseq_i); @@ -140,7 +142,7 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { const int time_i = reservoir.times(i); // Accumulates demands of all predecessors. - ConstraintProto *const level = context->working_model->add_constraints(); + ConstraintProto* const level = context->working_model->add_constraints(); level->add_enforcement_literal(active_i); // Add contributions from previous events. @@ -151,7 +153,8 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { const int time_j = reservoir.times(j); level->mutable_linear()->add_vars(gtl::FindOrDieNoPrint( - precedence_cache, std::make_pair(time_j, time_i))); + precedence_cache, + std::make_tuple(time_j, time_i, active_j, active_i))); level->mutable_linear()->add_coeffs(reservoir.demands(j)); } @@ -165,7 +168,7 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { } else { // If all demands have the same sign, we do not care about the order, just // the sum. - auto *const sum = + auto* const sum = context->working_model->add_constraints()->mutable_linear(); for (int i = 0; i < num_events; ++i) { sum->add_vars(is_active_literal(i)); @@ -179,7 +182,7 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { // We need to do it only if 0 is not in [min_level..max_level]. // Otherwise, the regular propagation will already check it. if (reservoir.min_level() > 0 || reservoir.max_level() < 0) { - auto *const sum_at_zero = + auto* const sum_at_zero = context->working_model->add_constraints()->mutable_linear(); for (int i = 0; i < num_events; ++i) { const int active_i = is_active_literal(i); @@ -189,7 +192,7 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { const int lesseq_0 = context->NewBoolVar(); // lesseq_0 => (time_i <= 0) && active_i is true - ConstraintProto *const lesseq = context->working_model->add_constraints(); + ConstraintProto* const lesseq = context->working_model->add_constraints(); lesseq->add_enforcement_literal(lesseq_0); lesseq->mutable_linear()->add_vars(time_i); lesseq->mutable_linear()->add_coeffs(1); @@ -201,7 +204,7 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { } // Not(lesseq_0) && active_i => (time_i >= 1) - ConstraintProto *const greater = + ConstraintProto* const greater = context->working_model->add_constraints(); greater->add_enforcement_literal(NegatedRef(lesseq_0)); greater->add_enforcement_literal(active_i); @@ -222,8 +225,8 @@ void ExpandReservoir(ConstraintProto *ct, PresolveContext *context) { context->UpdateRuleStats("reservoir: expanded"); } -void ExpandIntMod(ConstraintProto *ct, PresolveContext *context) { - const IntegerArgumentProto &int_mod = ct->int_mod(); +void ExpandIntMod(ConstraintProto* ct, PresolveContext* context) { + const IntegerArgumentProto& int_mod = ct->int_mod(); const int var = int_mod.vars(0); const int mod_var = int_mod.vars(1); const int target_var = int_mod.target(); @@ -242,13 +245,13 @@ void ExpandIntMod(ConstraintProto *ct, PresolveContext *context) { auto add_enforcement_literal_if_needed = [&]() { if (ct->enforcement_literal_size() == 0) return; const int literal = ct->enforcement_literal(0); - ConstraintProto *const last = context->working_model->mutable_constraints( + ConstraintProto* const last = context->working_model->mutable_constraints( context->working_model->constraints_size() - 1); last->add_enforcement_literal(literal); }; // div = var / mod. - IntegerArgumentProto *const div_proto = + IntegerArgumentProto* const div_proto = context->working_model->add_constraints()->mutable_int_div(); div_proto->set_target(div_var); div_proto->add_vars(var); @@ -258,7 +261,7 @@ void ExpandIntMod(ConstraintProto *ct, PresolveContext *context) { // Checks if mod is constant. if (mod_lb == mod_ub) { // var - div_var * mod = target. - LinearConstraintProto *const lin = + LinearConstraintProto* const lin = context->working_model->add_constraints()->mutable_linear(); lin->add_vars(int_mod.vars(0)); lin->add_coeffs(1); @@ -273,7 +276,7 @@ void ExpandIntMod(ConstraintProto *ct, PresolveContext *context) { // Create prod_var = div_var * mod. const int prod_var = context->NewIntVar( Domain(var_lb * mod_lb / mod_ub, var_ub * mod_ub / mod_lb)); - IntegerArgumentProto *const int_prod = + IntegerArgumentProto* const int_prod = context->working_model->add_constraints()->mutable_int_prod(); int_prod->set_target(prod_var); int_prod->add_vars(div_var); @@ -281,7 +284,7 @@ void ExpandIntMod(ConstraintProto *ct, PresolveContext *context) { add_enforcement_literal_if_needed(); // var - prod_var = target. - LinearConstraintProto *const lin = + LinearConstraintProto* const lin = context->working_model->add_constraints()->mutable_linear(); lin->add_vars(var); lin->add_coeffs(1); @@ -299,8 +302,8 @@ void ExpandIntMod(ConstraintProto *ct, PresolveContext *context) { } void ExpandIntProdWithBoolean(int bool_ref, int int_ref, int product_ref, - PresolveContext *context) { - ConstraintProto *const one = context->working_model->add_constraints(); + PresolveContext* context) { + ConstraintProto* const one = context->working_model->add_constraints(); one->add_enforcement_literal(bool_ref); one->mutable_linear()->add_vars(int_ref); one->mutable_linear()->add_coeffs(1); @@ -309,7 +312,7 @@ void ExpandIntProdWithBoolean(int bool_ref, int int_ref, int product_ref, one->mutable_linear()->add_domain(0); one->mutable_linear()->add_domain(0); - ConstraintProto *const zero = context->working_model->add_constraints(); + ConstraintProto* const zero = context->working_model->add_constraints(); zero->add_enforcement_literal(NegatedRef(bool_ref)); zero->mutable_linear()->add_vars(product_ref); zero->mutable_linear()->add_coeffs(1); @@ -318,8 +321,8 @@ void ExpandIntProdWithBoolean(int bool_ref, int int_ref, int product_ref, } void AddXEqualYOrXEqualZero(int x_eq_y, int x, int y, - PresolveContext *context) { - ConstraintProto *equality = context->working_model->add_constraints(); + PresolveContext* context) { + ConstraintProto* equality = context->working_model->add_constraints(); equality->add_enforcement_literal(x_eq_y); equality->mutable_linear()->add_vars(x); equality->mutable_linear()->add_coeffs(1); @@ -332,7 +335,7 @@ void AddXEqualYOrXEqualZero(int x_eq_y, int x, int y, // a_ref spans across 0, b_ref does not. void ExpandIntProdWithOneAcrossZero(int a_ref, int b_ref, int product_ref, - PresolveContext *context) { + PresolveContext* context) { DCHECK_LT(context->MinOf(a_ref), 0); DCHECK_GT(context->MaxOf(a_ref), 0); DCHECK(context->MinOf(b_ref) >= 0 || context->MaxOf(b_ref) <= 0); @@ -353,7 +356,7 @@ void ExpandIntProdWithOneAcrossZero(int a_ref, int b_ref, int product_ref, b_is_positive ? Domain({0, context->MaxOf(product_ref)}) : Domain({context->MinOf(product_ref), 0}); const int pos_a_product = context->NewIntVar(pos_a_product_domain); - IntegerArgumentProto *pos_product = + IntegerArgumentProto* pos_product = context->working_model->add_constraints()->mutable_int_prod(); pos_product->set_target(pos_a_product); pos_product->add_vars(pos_a_ref); @@ -364,14 +367,14 @@ void ExpandIntProdWithOneAcrossZero(int a_ref, int b_ref, int product_ref, b_is_positive ? Domain({context->MinOf(product_ref), 0}) : Domain({0, context->MaxOf(product_ref)}); const int neg_a_product = context->NewIntVar(neg_a_product_domain); - IntegerArgumentProto *neg_product = + IntegerArgumentProto* neg_product = context->working_model->add_constraints()->mutable_int_prod(); neg_product->set_target(neg_a_product); neg_product->add_vars(neg_a_ref); neg_product->add_vars(b_ref); // Link back to the original product. - LinearConstraintProto *lin = + LinearConstraintProto* lin = context->working_model->add_constraints()->mutable_linear(); lin->add_vars(product_ref); lin->add_coeffs(-1); @@ -384,7 +387,7 @@ void ExpandIntProdWithOneAcrossZero(int a_ref, int b_ref, int product_ref, } void ExpandIntProdWithTwoAcrossZero(int a_ref, int b_ref, int product_ref, - PresolveContext *context) { + PresolveContext* context) { // Split a_ref domain in two, controlled by a new literal. const int a_is_positive = context->NewBoolVar(); context->AddImplyInDomain(a_is_positive, a_ref, {0, kint64max}); @@ -407,7 +410,7 @@ void ExpandIntProdWithTwoAcrossZero(int a_ref, int b_ref, int product_ref, ExpandIntProdWithOneAcrossZero(b_ref, neg_a_ref, neg_product_ref, context); // Link back to the original product. - LinearConstraintProto *lin = + LinearConstraintProto* lin = context->working_model->add_constraints()->mutable_linear(); lin->add_vars(product_ref); lin->add_coeffs(-1); @@ -419,8 +422,8 @@ void ExpandIntProdWithTwoAcrossZero(int a_ref, int b_ref, int product_ref, lin->add_domain(0); } -void ExpandIntProd(ConstraintProto *ct, PresolveContext *context) { - const IntegerArgumentProto &int_prod = ct->int_prod(); +void ExpandIntProd(ConstraintProto* ct, PresolveContext* context) { + const IntegerArgumentProto& int_prod = ct->int_prod(); if (int_prod.vars_size() != 2) return; const int a = int_prod.vars(0); const int b = int_prod.vars(1); @@ -472,7 +475,7 @@ void ExpandIntProd(ConstraintProto *ct, PresolveContext *context) { } } -void ExpandInverse(ConstraintProto *ct, PresolveContext *context) { +void ExpandInverse(ConstraintProto* ct, PresolveContext* context) { const int size = ct->inverse().f_direct().size(); CHECK_EQ(size, ct->inverse().f_inverse().size()); @@ -500,14 +503,14 @@ void ExpandInverse(ConstraintProto *ct, PresolveContext *context) { // Note this reaches the fixpoint as there is a one to one mapping between // (variable-value) pairs in each vector. const auto filter_inverse_domain = [context, size, &possible_values]( - const auto &direct, - const auto &inverse) { + const auto& direct, + const auto& inverse) { // Propagate for the inverse vector to the direct vector. for (int i = 0; i < size; ++i) { possible_values.clear(); const Domain domain = context->DomainOf(direct[i]); bool removed_value = false; - for (const ClosedInterval &interval : domain) { + for (const ClosedInterval& interval : domain) { for (int64 j = interval.start; j <= interval.end; ++j) { if (context->DomainOf(inverse[j]).Contains(i)) { possible_values.push_back(j); @@ -542,7 +545,7 @@ void ExpandInverse(ConstraintProto *ct, PresolveContext *context) { for (int i = 0; i < size; ++i) { const int f_i = ct->inverse().f_direct(i); const Domain domain = context->DomainOf(f_i); - for (const ClosedInterval &interval : domain) { + for (const ClosedInterval& interval : domain) { for (int64 j = interval.start; j <= interval.end; ++j) { // We have f[i] == j <=> r[j] == i; const int r_j = ct->inverse().f_inverse(j); @@ -561,8 +564,8 @@ void ExpandInverse(ConstraintProto *ct, PresolveContext *context) { context->UpdateRuleStats("inverse: expanded"); } -void ExpandElement(ConstraintProto *ct, PresolveContext *context) { - const ElementConstraintProto &element = ct->element(); +void ExpandElement(ConstraintProto* ct, PresolveContext* context) { + const ElementConstraintProto& element = ct->element(); const int index_ref = element.index(); const int target_ref = element.target(); const int size = element.vars_size(); @@ -578,7 +581,7 @@ void ExpandElement(ConstraintProto *ct, PresolveContext *context) { std::vector invalid_indices; Domain index_domain = context->DomainOf(index_ref); Domain target_domain = context->DomainOf(target_ref); - for (const ClosedInterval &interval : index_domain) { + for (const ClosedInterval& interval : index_domain) { for (int64 v = interval.start; v <= interval.end; ++v) { const int var = element.vars(v); const Domain var_domain = context->DomainOf(var); @@ -614,7 +617,7 @@ void ExpandElement(ConstraintProto *ct, PresolveContext *context) { // no longer valid for the target variable. They are created only for values // that have multiples literals supporting them. // Order is not important. - absl::flat_hash_map supports; + absl::flat_hash_map supports; if (all_constants && target_ref != index_ref) { if (!context->IntersectDomainWith( target_ref, Domain::FromValues(constant_var_values))) { @@ -629,12 +632,12 @@ void ExpandElement(ConstraintProto *ct, PresolveContext *context) { return; } - for (const ClosedInterval &interval : target_domain) { + for (const ClosedInterval& interval : target_domain) { for (int64 v = interval.start; v <= interval.end; ++v) { const int usage = gtl::FindOrDie(constant_var_values_usage, v); if (usage > 1) { const int lit = context->GetOrCreateVarValueEncoding(target_ref, v); - BoolArgumentProto *const support = + BoolArgumentProto* const support = context->working_model->add_constraints()->mutable_bool_or(); supports[v] = support; support->add_literals(NegatedRef(lit)); @@ -645,9 +648,9 @@ void ExpandElement(ConstraintProto *ct, PresolveContext *context) { // While this is not stricly needed since all value in the index will be // covered, it allows to easily detect this fact in the presolve. - auto *bool_or = context->working_model->add_constraints()->mutable_bool_or(); + auto* bool_or = context->working_model->add_constraints()->mutable_bool_or(); - for (const ClosedInterval &interval : index_domain) { + for (const ClosedInterval& interval : index_domain) { for (int64 v = interval.start; v <= interval.end; ++v) { const int var = element.vars(v); const int index_lit = context->GetOrCreateVarValueEncoding(index_ref, v); @@ -681,7 +684,7 @@ void ExpandElement(ConstraintProto *ct, PresolveContext *context) { context->AddImplyInDomain(index_lit, target_ref, var_domain); } } else { - ConstraintProto *const ct = context->working_model->add_constraints(); + ConstraintProto* const ct = context->working_model->add_constraints(); ct->add_enforcement_literal(index_lit); ct->mutable_linear()->add_vars(var); ct->mutable_linear()->add_coeffs(1); @@ -721,12 +724,12 @@ void ExpandElement(ConstraintProto *ct, PresolveContext *context) { << " over " << var_min << " among " << size << " values."; } - LinearConstraintProto *const linear = + LinearConstraintProto* const linear = context->working_model->add_constraints()->mutable_linear(); int64 rhs = -base; linear->add_vars(target_ref); linear->add_coeffs(-1); - for (const ClosedInterval &interval : index_domain) { + for (const ClosedInterval& interval : index_domain) { for (int64 v = interval.start; v <= interval.end; ++v) { const int ref = element.vars(v); const int index_lit = @@ -755,14 +758,14 @@ void ExpandElement(ConstraintProto *ct, PresolveContext *context) { // Adds clauses so that literals[i] true <=> encoding[value[i]] true. // This also implicitly use the fact that exactly one alternative is true. void LinkLiteralsAndValues( - const std::vector &value_literals, const std::vector &values, - const absl::flat_hash_map &target_encoding, - PresolveContext *context) { + const std::vector& value_literals, const std::vector& values, + const absl::flat_hash_map& target_encoding, + PresolveContext* context) { CHECK_EQ(value_literals.size(), values.size()); // TODO(user): Make sure this does not appear in the profile. // We use a map to make this method deterministic. - std::map > value_literals_per_target_literal; + std::map> value_literals_per_target_literal; // If a value is false (i.e not possible), then the tuple with this // value is false too (i.e not possible). Conversely, if the tuple is @@ -776,7 +779,7 @@ void LinkLiteralsAndValues( // If all tuples supporting a value are false, then this value must be // false. - for (const auto &it : value_literals_per_target_literal) { + for (const auto& it : value_literals_per_target_literal) { const int target_literal = it.first; switch (it.second.size()) { case 0: { @@ -791,7 +794,7 @@ void LinkLiteralsAndValues( break; } default: { - BoolArgumentProto *const bool_or = + BoolArgumentProto* const bool_or = context->working_model->add_constraints()->mutable_bool_or(); bool_or->add_literals(NegatedRef(target_literal)); for (const int value_literal : it.second) { @@ -803,8 +806,8 @@ void LinkLiteralsAndValues( } } -void ExpandAutomaton(ConstraintProto *ct, PresolveContext *context) { - AutomatonConstraintProto &proto = *ct->mutable_automaton(); +void ExpandAutomaton(ConstraintProto* ct, PresolveContext* context) { + AutomatonConstraintProto& proto = *ct->mutable_automaton(); if (proto.vars_size() == 0) { const int64 initial_state = proto.starting_state(); @@ -828,7 +831,7 @@ void ExpandAutomaton(ConstraintProto *ct, PresolveContext *context) { // Compute the set of reachable state at each time point. const absl::flat_hash_set final_states( {proto.final_states().begin(), proto.final_states().end()}); - std::vector > reachable_states(n + 1); + std::vector> reachable_states(n + 1); reachable_states[0].insert(proto.starting_state()); // Forward pass. @@ -917,7 +920,7 @@ void ExpandAutomaton(ConstraintProto *ct, PresolveContext *context) { // Note that we do not need the ExactlyOneConstraint(tuple_literals) // because it is already implicitly encoded since we have exactly one // transition value. - LinearConstraintProto *const exactly_one = + LinearConstraintProto* const exactly_one = context->working_model->add_constraints()->mutable_linear(); exactly_one->add_domain(1); exactly_one->add_domain(1); @@ -941,7 +944,7 @@ void ExpandAutomaton(ConstraintProto *ct, PresolveContext *context) { } // Fully encode the variable. - for (const ClosedInterval &interval : context->DomainOf(vars[time])) { + for (const ClosedInterval& interval : context->DomainOf(vars[time])) { for (int64 v = interval.start; v <= interval.end; ++v) { encoding[v] = context->GetOrCreateVarValueEncoding(vars[time], v); } @@ -986,11 +989,11 @@ void ExpandAutomaton(ConstraintProto *ct, PresolveContext *context) { ct->Clear(); } -void ExpandNegativeTable(ConstraintProto *ct, PresolveContext *context) { - TableConstraintProto &table = *ct->mutable_table(); +void ExpandNegativeTable(ConstraintProto* ct, PresolveContext* context) { + TableConstraintProto& table = *ct->mutable_table(); const int num_vars = table.vars_size(); const int num_original_tuples = table.values_size() / num_vars; - std::vector > tuples(num_original_tuples); + std::vector> tuples(num_original_tuples); int count = 0; for (int i = 0; i < num_original_tuples; ++i) { for (int j = 0; j < num_vars; ++j) { @@ -1014,7 +1017,7 @@ void ExpandNegativeTable(ConstraintProto *ct, PresolveContext *context) { // For each tuple, forbid the variables values to be this tuple. std::vector clause; - for (const std::vector &tuple : tuples) { + for (const std::vector& tuple : tuples) { clause.clear(); for (int i = 0; i < num_vars; ++i) { const int64 value = tuple[i]; @@ -1025,7 +1028,7 @@ void ExpandNegativeTable(ConstraintProto *ct, PresolveContext *context) { clause.push_back(NegatedRef(literal)); } if (!clause.empty()) { - BoolArgumentProto *bool_or = + BoolArgumentProto* bool_or = context->working_model->add_constraints()->mutable_bool_or(); for (const int lit : clause) { bool_or->add_literals(lit); @@ -1036,8 +1039,8 @@ void ExpandNegativeTable(ConstraintProto *ct, PresolveContext *context) { ct->Clear(); } -void ExpandLinMin(ConstraintProto *ct, PresolveContext *context) { - ConstraintProto *const lin_max = context->working_model->add_constraints(); +void ExpandLinMin(ConstraintProto* ct, PresolveContext* context) { + ConstraintProto* const lin_max = context->working_model->add_constraints(); for (int i = 0; i < ct->enforcement_literal_size(); ++i) { lin_max->add_enforcement_literal(ct->enforcement_literal(i)); } @@ -1047,7 +1050,7 @@ void ExpandLinMin(ConstraintProto *ct, PresolveContext *context) { lin_max->mutable_lin_max()->mutable_target()); for (int i = 0; i < ct->lin_min().exprs_size(); ++i) { - LinearExpressionProto *const expr = lin_max->mutable_lin_max()->add_exprs(); + LinearExpressionProto* const expr = lin_max->mutable_lin_max()->add_exprs(); SetToNegatedLinearExpression(ct->lin_min().exprs(i), expr); } ct->Clear(); @@ -1058,15 +1061,15 @@ void ExpandLinMin(ConstraintProto *ct, PresolveContext *context) { // (tuple_literals, values) contains all valid projected tuples. The // tuples_with_any vector provides a list of tuple_literals that will support // any value. -void ProcessOneVariable(const std::vector &tuple_literals, - const std::vector &values, int variable, - const std::vector &tuples_with_any, - PresolveContext *context) { +void ProcessOneVariable(const std::vector& tuple_literals, + const std::vector& values, int variable, + const std::vector& tuples_with_any, + PresolveContext* context) { VLOG(2) << "Process var(" << variable << ") with domain " << context->DomainOf(variable) << " and " << values.size() << " active tuples, and " << tuples_with_any.size() << " any tuples"; CHECK_EQ(tuple_literals.size(), values.size()); - std::vector > pairs; + std::vector> pairs; // Collect pairs of value-literal. for (int i = 0; i < values.size(); ++i) { @@ -1092,7 +1095,7 @@ void ProcessOneVariable(const std::vector &tuple_literals, } else { const int value_literal = context->GetOrCreateVarValueEncoding(variable, value); - BoolArgumentProto *no_support = + BoolArgumentProto* no_support = context->working_model->add_constraints()->mutable_bool_or(); for (const int lit : selected) { no_support->add_literals(lit); @@ -1110,10 +1113,9 @@ void ProcessOneVariable(const std::vector &tuple_literals, // Simpler encoding for table constraints with 2 variables. void AddSizeTwoTable( - const std::vector &vars, - const std::vector > &tuples, - const std::vector > &values_per_var, - PresolveContext *context) { + const std::vector& vars, const std::vector>& tuples, + const std::vector>& values_per_var, + PresolveContext* context) { CHECK_EQ(vars.size(), 2); const int left_var = vars[0]; const int right_var = vars[1]; @@ -1124,10 +1126,10 @@ void AddSizeTwoTable( return; } - std::map > left_to_right; - std::map > right_to_left; + std::map> left_to_right; + std::map> right_to_left; - for (const auto &tuple : tuples) { + for (const auto& tuple : tuples) { const int64 left_value(tuple[0]); const int64 right_value(tuple[1]); CHECK(context->DomainContains(left_var, left_value)); @@ -1146,14 +1148,14 @@ void AddSizeTwoTable( int num_large_clause_added = 0; auto add_support_constraint = [context, &num_clause_added, &num_large_clause_added, &num_implications]( - int lit, const std::vector &support_literals, + int lit, const std::vector& support_literals, int max_support_size) { if (support_literals.size() == max_support_size) return; if (support_literals.size() == 1) { context->AddImplication(lit, support_literals.front()); num_implications++; } else { - BoolArgumentProto *bool_or = + BoolArgumentProto* bool_or = context->working_model->add_constraints()->mutable_bool_or(); for (const int support_literal : support_literals) { bool_or->add_literals(support_literal); @@ -1166,10 +1168,10 @@ void AddSizeTwoTable( } }; - for (const auto &it : left_to_right) { + for (const auto& it : left_to_right) { add_support_constraint(it.first, it.second, values_per_var[1].size()); } - for (const auto &it : right_to_left) { + for (const auto& it : right_to_left) { add_support_constraint(it.first, it.second, values_per_var[0].size()); } VLOG(2) << "Table: 2 variables, " << tuples.size() << " tuples encoded using " @@ -1178,14 +1180,14 @@ void AddSizeTwoTable( << " implications"; } -void ExpandPositiveTable(ConstraintProto *ct, PresolveContext *context) { - const TableConstraintProto &table = ct->table(); +void ExpandPositiveTable(ConstraintProto* ct, PresolveContext* context) { + const TableConstraintProto& table = ct->table(); const std::vector vars(table.vars().begin(), table.vars().end()); const int num_vars = table.vars_size(); const int num_original_tuples = table.values_size() / num_vars; // Read tuples flat array and recreate the vector of tuples. - std::vector > tuples(num_original_tuples); + std::vector> tuples(num_original_tuples); int count = 0; for (int tuple_index = 0; tuple_index < num_original_tuples; ++tuple_index) { for (int var_index = 0; var_index < num_vars; ++var_index) { @@ -1195,7 +1197,7 @@ void ExpandPositiveTable(ConstraintProto *ct, PresolveContext *context) { // Compute the set of possible values for each variable (from the table). // Remove invalid tuples along the way. - std::vector > values_per_var(num_vars); + std::vector> values_per_var(num_vars); int new_size = 0; for (int tuple_index = 0; tuple_index < num_original_tuples; ++tuple_index) { bool keep = true; @@ -1259,8 +1261,8 @@ void ExpandPositiveTable(ConstraintProto *ct, PresolveContext *context) { // tuples. int num_prefix_tuples = 0; { - absl::flat_hash_set > prefixes; - for (const std::vector &tuple : tuples) { + absl::flat_hash_set> prefixes; + for (const std::vector& tuple : tuples) { prefixes.insert(absl::MakeSpan(tuple.data(), num_vars - 1)); } num_prefix_tuples = prefixes.size(); @@ -1342,7 +1344,7 @@ void ExpandPositiveTable(ConstraintProto *ct, PresolveContext *context) { // var = sum(tuple_literals[i] * values[i]) // It could be done here or along the deductions grouping. std::vector tuple_literals(num_compressed_tuples); - BoolArgumentProto *at_least_one_tuple = + BoolArgumentProto* at_least_one_tuple = context->working_model->add_constraints()->mutable_bool_or(); // If we want to enumerate all solutions, we should not add new variables that @@ -1356,7 +1358,7 @@ void ExpandPositiveTable(ConstraintProto *ct, PresolveContext *context) { // // TODO(user): We use keep_all_feasible_solutions as a proxy for enumerate // all solution, but the concept are slightly different though. - BoolArgumentProto *at_most_one_tuple = nullptr; + BoolArgumentProto* at_most_one_tuple = nullptr; if (context->keep_all_feasible_solutions) { at_most_one_tuple = context->working_model->add_constraints()->mutable_at_most_one(); @@ -1402,9 +1404,9 @@ void ExpandPositiveTable(ConstraintProto *ct, PresolveContext *context) { ct->Clear(); } -void ExpandAllDiff(bool expand_non_permutations, ConstraintProto *ct, - PresolveContext *context) { - AllDifferentConstraintProto &proto = *ct->mutable_all_diff(); +void ExpandAllDiff(bool expand_non_permutations, ConstraintProto* ct, + PresolveContext* context) { + AllDifferentConstraintProto& proto = *ct->mutable_all_diff(); if (proto.vars_size() <= 2) return; const int num_vars = proto.vars_size(); @@ -1422,7 +1424,7 @@ void ExpandAllDiff(bool expand_non_permutations, ConstraintProto *ct, // Collect all possible variables that can take each value, and add one linear // equation per value stating that this value can be assigned at most once, or // exactly once in case of permutation. - for (const ClosedInterval &interval : union_of_domains) { + for (const ClosedInterval& interval : union_of_domains) { for (int64 v = interval.start; v <= interval.end; ++v) { // Collect references which domain contains v. std::vector possible_refs; @@ -1449,7 +1451,7 @@ void ExpandAllDiff(bool expand_non_permutations, ConstraintProto *ct, } } - LinearConstraintProto *at_most_or_equal_one = + LinearConstraintProto* at_most_or_equal_one = context->working_model->add_constraints()->mutable_linear(); int lb = is_permutation ? 1 : 0; int ub = 1; @@ -1481,7 +1483,7 @@ void ExpandAllDiff(bool expand_non_permutations, ConstraintProto *ct, } // namespace -void ExpandCpModel(PresolveOptions options, PresolveContext *context) { +void ExpandCpModel(PresolveOptions options, PresolveContext* context) { if (context->ModelIsUnsat()) return; // Make sure all domains are initialized. @@ -1489,7 +1491,7 @@ void ExpandCpModel(PresolveOptions options, PresolveContext *context) { const int num_constraints = context->working_model->constraints_size(); for (int i = 0; i < num_constraints; ++i) { - ConstraintProto *const ct = context->working_model->mutable_constraints(i); + ConstraintProto* const ct = context->working_model->mutable_constraints(i); bool skip = false; switch (ct->constraint_case()) { case ConstraintProto::ConstraintCase::kReservoir: diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index c3c63dd1d4..1a38922430 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -31,9 +31,9 @@ namespace operations_research { namespace sat { NeighborhoodGeneratorHelper::NeighborhoodGeneratorHelper( - CpModelProto const *model_proto, SatParameters const *parameters, - SharedResponseManager *shared_response, SharedTimeLimit *shared_time_limit, - SharedBoundsManager *shared_bounds) + CpModelProto const* model_proto, SatParameters const* parameters, + SharedResponseManager* shared_response, SharedTimeLimit* shared_time_limit, + SharedBoundsManager* shared_bounds) : SubSolver(""), parameters_(*parameters), model_proto_(*model_proto), @@ -64,7 +64,7 @@ void NeighborhoodGeneratorHelper::Synchronize() { const int64 new_lb = new_lower_bounds[i]; const int64 new_ub = new_upper_bounds[i]; if (VLOG_IS_ON(3)) { - const auto &domain = + const auto& domain = model_proto_with_only_variables_.variables(var).domain(); const int64 old_lb = domain.Get(0); const int64 old_ub = domain.Get(domain.size() - 1); @@ -137,7 +137,7 @@ void NeighborhoodGeneratorHelper::RecomputeHelperData() { active_variables_set_.assign(model_proto_.variables_size(), false); if (parameters_.lns_focus_on_decision_variables()) { - for (const auto &search_strategy : model_proto_.search_strategy()) { + for (const auto& search_strategy : model_proto_.search_strategy()) { for (const int var : search_strategy.variables()) { const int pos_var = PositiveRef(var); if (!active_variables_set_[pos_var] && !IsConstant(pos_var)) { @@ -181,8 +181,8 @@ Neighborhood NeighborhoodGeneratorHelper::FullNeighborhood() const { } Neighborhood NeighborhoodGeneratorHelper::FixGivenVariables( - const CpSolverResponse &initial_solution, - const std::vector &variables_to_fix) const { + const CpSolverResponse& initial_solution, + const std::vector& variables_to_fix) const { // TODO(user,user): Do not include constraint with all fixed variables to // save memory and speed-up LNS presolving. Neighborhood neighborhood = FullNeighborhood(); @@ -215,7 +215,7 @@ Neighborhood NeighborhoodGeneratorHelper::FixGivenVariables( } Neighborhood NeighborhoodGeneratorHelper::RemoveMarkedConstraints( - const std::vector &constraints_to_remove) const { + const std::vector& constraints_to_remove) const { // TODO(user,user): Do not include constraint with all fixed variables to // save memory and speed-up LNS presolving. Neighborhood neighborhood = FullNeighborhood(); @@ -230,8 +230,8 @@ Neighborhood NeighborhoodGeneratorHelper::RemoveMarkedConstraints( } Neighborhood NeighborhoodGeneratorHelper::RelaxGivenVariables( - const CpSolverResponse &initial_solution, - const std::vector &relaxed_variables) const { + const CpSolverResponse& initial_solution, + const std::vector& relaxed_variables) const { std::vector relaxed_variables_set(model_proto_.variables_size(), false); for (const int var : relaxed_variables) relaxed_variables_set[var] = true; std::vector fixed_variables; @@ -244,7 +244,7 @@ Neighborhood NeighborhoodGeneratorHelper::RelaxGivenVariables( } Neighborhood NeighborhoodGeneratorHelper::FixAllVariables( - const CpSolverResponse &initial_solution) const { + const CpSolverResponse& initial_solution) const { std::vector fixed_variables; for (const int i : active_variables_) { fixed_variables.push_back(i); @@ -274,7 +274,7 @@ void NeighborhoodGenerator::Synchronize() { int num_fully_solved_in_batch = 0; int num_not_fully_solved_in_batch = 0; - for (const SolveData &data : solve_data_) { + for (const SolveData& data : solve_data_) { AdditionalProcessingOnSynchronize(data); ++num_calls_; @@ -346,8 +346,8 @@ void NeighborhoodGenerator::Synchronize() { namespace { template -void GetRandomSubset(double relative_size, std::vector *base, - Random *random) { +void GetRandomSubset(double relative_size, std::vector* base, + Random* random) { // TODO(user): we could generate this more efficiently than using random // shuffle. std::shuffle(base->begin(), base->end(), *random); @@ -358,16 +358,16 @@ void GetRandomSubset(double relative_size, std::vector *base, } // namespace Neighborhood SimpleNeighborhoodGenerator::Generate( - const CpSolverResponse &initial_solution, double difficulty, - random_engine_t *random) { + const CpSolverResponse& initial_solution, double difficulty, + random_engine_t* random) { std::vector fixed_variables = helper_.ActiveVariables(); GetRandomSubset(1.0 - difficulty, &fixed_variables, random); return helper_.FixGivenVariables(initial_solution, fixed_variables); } Neighborhood VariableGraphNeighborhoodGenerator::Generate( - const CpSolverResponse &initial_solution, double difficulty, - random_engine_t *random) { + const CpSolverResponse& initial_solution, double difficulty, + random_engine_t* random) { const int num_active_vars = helper_.ActiveVariables().size(); const int num_model_vars = helper_.ModelProto().variables_size(); const int target_size = std::ceil(difficulty * num_active_vars); @@ -418,8 +418,8 @@ Neighborhood VariableGraphNeighborhoodGenerator::Generate( } Neighborhood ConstraintGraphNeighborhoodGenerator::Generate( - const CpSolverResponse &initial_solution, double difficulty, - random_engine_t *random) { + const CpSolverResponse& initial_solution, double difficulty, + random_engine_t* random) { const int num_active_vars = helper_.ActiveVariables().size(); const int num_model_vars = helper_.ModelProto().variables_size(); const int target_size = std::ceil(difficulty * num_active_vars); @@ -474,8 +474,8 @@ Neighborhood ConstraintGraphNeighborhoodGenerator::Generate( Neighborhood GenerateSchedulingNeighborhoodForRelaxation( const absl::Span intervals_to_relax, - const CpSolverResponse &initial_solution, - const NeighborhoodGeneratorHelper &helper) { + const CpSolverResponse& initial_solution, + const NeighborhoodGeneratorHelper& helper) { Neighborhood neighborhood = helper.FullNeighborhood(); neighborhood.is_reduced = (intervals_to_relax.size() < @@ -489,7 +489,7 @@ Neighborhood GenerateSchedulingNeighborhoodForRelaxation( for (const int i : helper.TypeToConstraints(ConstraintProto::kInterval)) { if (ignored_intervals.count(i)) continue; - const ConstraintProto &interval_ct = neighborhood.cp_model.constraints(i); + const ConstraintProto& interval_ct = neighborhood.cp_model.constraints(i); if (interval_ct.enforcement_literal().empty()) continue; CHECK_EQ(interval_ct.enforcement_literal().size(), 1); @@ -511,11 +511,11 @@ Neighborhood GenerateSchedulingNeighborhoodForRelaxation( for (const int c : helper.TypeToConstraints(ConstraintProto::kNoOverlap)) { // Sort all non-relaxed intervals of this constraint by current start time. - std::vector > start_interval_pairs; + std::vector> start_interval_pairs; for (const int i : neighborhood.cp_model.constraints(c).no_overlap().intervals()) { if (ignored_intervals.count(i)) continue; - const ConstraintProto &interval_ct = neighborhood.cp_model.constraints(i); + const ConstraintProto& interval_ct = neighborhood.cp_model.constraints(i); // TODO(user): we ignore size zero for now. const int size_var = interval_ct.interval().size(); @@ -540,7 +540,7 @@ Neighborhood GenerateSchedulingNeighborhoodForRelaxation( CHECK_LE(initial_solution.solution(before_var), initial_solution.solution(after_var)); - LinearConstraintProto *linear = + LinearConstraintProto* linear = neighborhood.cp_model.add_constraints()->mutable_linear(); linear->add_domain(kint64min); linear->add_domain(0); @@ -566,8 +566,8 @@ Neighborhood GenerateSchedulingNeighborhoodForRelaxation( } Neighborhood SchedulingNeighborhoodGenerator::Generate( - const CpSolverResponse &initial_solution, double difficulty, - random_engine_t *random) { + const CpSolverResponse& initial_solution, double difficulty, + random_engine_t* random) { const auto span = helper_.TypeToConstraints(ConstraintProto::kInterval); std::vector intervals_to_relax(span.begin(), span.end()); GetRandomSubset(difficulty, &intervals_to_relax, random); @@ -577,11 +577,11 @@ Neighborhood SchedulingNeighborhoodGenerator::Generate( } Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate( - const CpSolverResponse &initial_solution, double difficulty, - random_engine_t *random) { - std::vector > start_interval_pairs; + const CpSolverResponse& initial_solution, double difficulty, + random_engine_t* random) { + std::vector> start_interval_pairs; for (const int i : helper_.TypeToConstraints(ConstraintProto::kInterval)) { - const ConstraintProto &interval_ct = helper_.ModelProto().constraints(i); + const ConstraintProto& interval_ct = helper_.ModelProto().constraints(i); const int start_var = interval_ct.interval().start(); const int64 start_value = initial_solution.solution(start_var); @@ -628,8 +628,8 @@ bool RelaxationInducedNeighborhoodGenerator::ReadyToGenerate() const { } Neighborhood RelaxationInducedNeighborhoodGenerator::Generate( - const CpSolverResponse &initial_solution, double difficulty, - random_engine_t *random) { + const CpSolverResponse& initial_solution, double difficulty, + random_engine_t* random) { Neighborhood neighborhood = helper_.FullNeighborhood(); neighborhood.is_generated = false; @@ -660,15 +660,16 @@ Neighborhood RelaxationInducedNeighborhoodGenerator::Generate( : lp_solution_available; if (use_lp_relaxation) { rins_neighborhood = - GetRINSNeighborhood(response_manager_, /*relaxation_solutions=*/nullptr, - lp_solutions_, incomplete_solutions_, random); + GetRINSNeighborhood(response_manager_, + /*relaxation_solutions=*/nullptr, lp_solutions_, + incomplete_solutions_, random); neighborhood.source_info = incomplete_solution_available ? "incomplete" : "lp"; } else { CHECK(relaxation_solution_available || incomplete_solution_available); rins_neighborhood = GetRINSNeighborhood( - response_manager_, relaxation_solutions_, /*lp_solutions=*/nullptr, - incomplete_solutions_, random); + response_manager_, relaxation_solutions_, + /*lp_solutions=*/nullptr, incomplete_solutions_, random); neighborhood.source_info = incomplete_solution_available ? "incomplete" : "relaxation"; } @@ -699,7 +700,7 @@ Neighborhood RelaxationInducedNeighborhoodGenerator::Generate( neighborhood.is_reduced = true; } - for (const std::pair > + for (const std::pair> reduced_var : rins_neighborhood.reduced_domain_vars) { const int var = reduced_var.first; const int64 lb = reduced_var.second.first; @@ -720,8 +721,8 @@ Neighborhood RelaxationInducedNeighborhoodGenerator::Generate( } Neighborhood ConsecutiveConstraintsRelaxationNeighborhoodGenerator::Generate( - const CpSolverResponse &initial_solution, double difficulty, - random_engine_t *random) { + const CpSolverResponse& initial_solution, double difficulty, + random_engine_t* random) { std::vector removable_constraints; const int num_constraints = helper_.ModelProto().constraints_size(); removable_constraints.reserve(num_constraints); @@ -756,7 +757,7 @@ Neighborhood ConsecutiveConstraintsRelaxationNeighborhoodGenerator::Generate( WeightedRandomRelaxationNeighborhoodGenerator:: WeightedRandomRelaxationNeighborhoodGenerator( - NeighborhoodGeneratorHelper const *helper, const std::string &name) + NeighborhoodGeneratorHelper const* helper, const std::string& name) : NeighborhoodGenerator(name, helper) { std::vector removable_constraints; const int num_constraints = helper_.ModelProto().constraints_size(); @@ -808,11 +809,11 @@ WeightedRandomRelaxationNeighborhoodGenerator:: } void WeightedRandomRelaxationNeighborhoodGenerator:: - AdditionalProcessingOnSynchronize(const SolveData &solve_data) { + AdditionalProcessingOnSynchronize(const SolveData& solve_data) { const IntegerValue best_objective_improvement = solve_data.new_objective_bound - solve_data.initial_best_objective_bound; - const std::vector &removed_constraints = + const std::vector& removed_constraints = removed_constraints_[solve_data.neighborhood_id]; // Heuristic: We change the weights of the removed constraints if the @@ -849,8 +850,8 @@ void WeightedRandomRelaxationNeighborhoodGenerator:: } Neighborhood WeightedRandomRelaxationNeighborhoodGenerator::Generate( - const CpSolverResponse &initial_solution, double difficulty, - random_engine_t *random) { + const CpSolverResponse& initial_solution, double difficulty, + random_engine_t* random) { const int target_size = std::round((1.0 - difficulty) * num_removable_constraints_); @@ -859,7 +860,7 @@ Neighborhood WeightedRandomRelaxationNeighborhoodGenerator::Generate( // Generate a random number between (0,1) = u[i] and use score[i] = // u[i]^(1/w[i]) and then select top k items with largest scores. // Reference: https://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf - std::vector > constraint_removal_scores; + std::vector> constraint_removal_scores; std::uniform_real_distribution random_var(0.0, 1.0); for (int c = 0; c < constraint_weights_.size(); ++c) { if (constraint_weights_[c] <= 0) continue; diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index 6b289e8af5..b43589c16b 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -55,13 +55,13 @@ namespace sat { namespace { template -std::vector ValuesFromProto(const Values &values) { +std::vector ValuesFromProto(const Values& values) { return std::vector(values.begin(), values.end()); } -void ComputeLinearBounds(const LinearConstraintProto &proto, - CpModelMapping *mapping, IntegerTrail *integer_trail, - int64 *sum_min, int64 *sum_max) { +void ComputeLinearBounds(const LinearConstraintProto& proto, + CpModelMapping* mapping, IntegerTrail* integer_trail, + int64* sum_min, int64* sum_max) { *sum_min = 0; *sum_max = 0; @@ -81,14 +81,14 @@ void ComputeLinearBounds(const LinearConstraintProto &proto, } // We check if the constraint is a sum(ax * xi) == value. -bool ConstraintIsEq(const LinearConstraintProto &proto) { +bool ConstraintIsEq(const LinearConstraintProto& proto) { return proto.domain_size() == 2 && proto.domain(0) == proto.domain(1); } // We check if the constraint is a sum(ax * xi) != value. -bool ConstraintIsNEq(const LinearConstraintProto &proto, - CpModelMapping *mapping, IntegerTrail *integer_trail, - int64 *single_value) { +bool ConstraintIsNEq(const LinearConstraintProto& proto, + CpModelMapping* mapping, IntegerTrail* integer_trail, + int64* single_value) { int64 sum_min = 0; int64 sum_max = 0; ComputeLinearBounds(proto, mapping, integer_trail, &sum_min, &sum_max); @@ -110,15 +110,15 @@ bool ConstraintIsNEq(const LinearConstraintProto &proto, } // namespace -void CpModelMapping::CreateVariables(const CpModelProto &model_proto, +void CpModelMapping::CreateVariables(const CpModelProto& model_proto, bool view_all_booleans_as_integers, - Model *m) { + Model* m) { const int num_proto_variables = model_proto.variables_size(); // All [0, 1] variables always have a corresponding Boolean, even if it is // fixed to 0 (domain == [0,0]) or fixed to 1 (domain == [1,1]). { - auto *sat_solver = m->GetOrCreate(); + auto* sat_solver = m->GetOrCreate(); CHECK_EQ(sat_solver->NumVariables(), 0); BooleanVariable new_var(0); @@ -128,7 +128,7 @@ void CpModelMapping::CreateVariables(const CpModelProto &model_proto, booleans_.resize(num_proto_variables, kNoBooleanVariable); reverse_boolean_map_.resize(num_proto_variables, -1); for (int i = 0; i < num_proto_variables; ++i) { - const auto &domain = model_proto.variables(i).domain(); + const auto& domain = model_proto.variables(i).domain(); if (domain.size() != 2) continue; if (domain[0] >= 0 && domain[1] <= 1) { booleans_[i] = new_var; @@ -165,7 +165,7 @@ void CpModelMapping::CreateVariables(const CpModelProto &model_proto, IndexReferences refs; for (int c = 0; c < model_proto.constraints_size(); ++c) { - const ConstraintProto &ct = model_proto.constraints(c); + const ConstraintProto& ct = model_proto.constraints(c); refs = GetReferencesUsedByConstraint(ct); for (const int ref : refs.variables) { used_variables.insert(PositiveRef(ref)); @@ -179,7 +179,7 @@ void CpModelMapping::CreateVariables(const CpModelProto &model_proto, used_variables.insert(PositiveRef(obj_var)); } } - for (const DecisionStrategyProto &strategy : + for (const DecisionStrategyProto& strategy : model_proto.search_strategy()) { for (const int var : strategy.variables()) { used_variables.insert(PositiveRef(var)); @@ -201,19 +201,19 @@ void CpModelMapping::CreateVariables(const CpModelProto &model_proto, } integers_.resize(num_proto_variables, kNoIntegerVariable); - auto *integer_trail = m->GetOrCreate(); + auto* integer_trail = m->GetOrCreate(); integer_trail->ReserveSpaceForNumVariables( var_to_instantiate_as_integer.size()); reverse_integer_map_.resize(2 * var_to_instantiate_as_integer.size(), -1); for (const int i : var_to_instantiate_as_integer) { - const auto &var_proto = model_proto.variables(i); + const auto& var_proto = model_proto.variables(i); integers_[i] = integer_trail->AddIntegerVariable(ReadDomainFromProto(var_proto)); DCHECK_LT(integers_[i], reverse_integer_map_.size()); reverse_integer_map_[integers_[i]] = i; } - auto *encoder = m->GetOrCreate(); + auto* encoder = m->GetOrCreate(); // Link any variable that has both views. for (int i = 0; i < num_proto_variables; ++i) { @@ -228,7 +228,7 @@ void CpModelMapping::CreateVariables(const CpModelProto &model_proto, // Create the interval variables. intervals_.resize(model_proto.constraints_size(), kNoIntervalVariable); for (int c = 0; c < model_proto.constraints_size(); ++c) { - const ConstraintProto &ct = model_proto.constraints(c); + const ConstraintProto& ct = model_proto.constraints(c); if (ct.constraint_case() != ConstraintProto::ConstraintCase::kInterval) { continue; } @@ -256,11 +256,11 @@ void CpModelMapping::CreateVariables(const CpModelProto &model_proto, // // TODO(user): Regroup/presolve two encoding like b => x > 2 and the same // Boolean b => x > 5. These shouldn't happen if we merge linear constraints. -void CpModelMapping::ExtractEncoding(const CpModelProto &model_proto, - Model *m) { - auto *encoder = m->GetOrCreate(); - auto *integer_trail = m->GetOrCreate(); - auto *sat_solver = m->GetOrCreate(); +void CpModelMapping::ExtractEncoding(const CpModelProto& model_proto, + Model* m) { + auto* encoder = m->GetOrCreate(); + auto* integer_trail = m->GetOrCreate(); + auto* sat_solver = m->GetOrCreate(); // TODO(user): Debug what makes it unsat at this point. if (sat_solver->IsModelUnsat()) return; @@ -269,12 +269,12 @@ void CpModelMapping::ExtractEncoding(const CpModelProto &model_proto, // half-reified constraint lit => equality or lit => inequality for a given // variable, and we will later sort them to detect equivalence. struct EqualityDetectionHelper { - const ConstraintProto *ct; + const ConstraintProto* ct; sat::Literal literal; int64 value; bool is_equality; // false if != instead. - bool operator<(const EqualityDetectionHelper &o) const { + bool operator<(const EqualityDetectionHelper& o) const { if (literal.Variable() == o.literal.Variable()) { if (value == o.value) return is_equality && !o.is_equality; return value < o.value; @@ -282,24 +282,24 @@ void CpModelMapping::ExtractEncoding(const CpModelProto &model_proto, return literal.Variable() < o.literal.Variable(); } }; - std::vector > var_to_equalities( + std::vector> var_to_equalities( model_proto.variables_size()); // TODO(user): We will re-add the same implied bounds during probing, so // it might not be necessary to do that here. Also, it might be too early // if some of the literal view used in the LP are created later, but that // should be fixable via calls to implied_bounds->NotifyNewIntegerView(). - auto *implied_bounds = m->GetOrCreate(); + auto* implied_bounds = m->GetOrCreate(); // Detection of literal equivalent to (i_var >= bound). We also collect // all the half-refied part and we will sort the vector for detection of the // equivalence. struct InequalityDetectionHelper { - const ConstraintProto *ct; + const ConstraintProto* ct; sat::Literal literal; IntegerLiteral i_lit; - bool operator<(const InequalityDetectionHelper &o) const { + bool operator<(const InequalityDetectionHelper& o) const { if (literal.Variable() == o.literal.Variable()) { return i_lit.var < o.i_lit.var; } @@ -309,7 +309,7 @@ void CpModelMapping::ExtractEncoding(const CpModelProto &model_proto, std::vector inequalities; // Loop over all contraints and fill var_to_equalities and inequalities. - for (const ConstraintProto &ct : model_proto.constraints()) { + for (const ConstraintProto& ct : model_proto.constraints()) { if (ct.constraint_case() != ConstraintProto::ConstraintCase::kLinear) { continue; } @@ -433,7 +433,7 @@ void CpModelMapping::ExtractEncoding(const CpModelProto &model_proto, int num_fully_encoded = 0; int num_partially_encoded = 0; for (int i = 0; i < var_to_equalities.size(); ++i) { - std::vector &encoding = var_to_equalities[i]; + std::vector& encoding = var_to_equalities[i]; std::sort(encoding.begin(), encoding.end()); if (encoding.empty()) continue; num_constraints += encoding.size(); @@ -504,14 +504,14 @@ void CpModelMapping::ExtractEncoding(const CpModelProto &model_proto, } void CpModelMapping::PropagateEncodingFromEquivalenceRelations( - const CpModelProto &model_proto, Model *m) { - auto *encoder = m->GetOrCreate(); - auto *sat_solver = m->GetOrCreate(); + const CpModelProto& model_proto, Model* m) { + auto* encoder = m->GetOrCreate(); + auto* sat_solver = m->GetOrCreate(); // Loop over all contraints and find affine ones. int64 num_associations = 0; int64 num_set_to_false = 0; - for (const ConstraintProto &ct : model_proto.constraints()) { + for (const ConstraintProto& ct : model_proto.constraints()) { if (!ct.enforcement_literal().empty()) continue; if (ct.constraint_case() != ConstraintProto::kLinear) continue; if (ct.linear().vars_size() != 2) continue; @@ -586,9 +586,9 @@ void CpModelMapping::PropagateEncodingFromEquivalenceRelations( } } -void CpModelMapping::DetectOptionalVariables(const CpModelProto &model_proto, - Model *m) { - const SatParameters ¶meters = *(m->GetOrCreate()); +void CpModelMapping::DetectOptionalVariables(const CpModelProto& model_proto, + Model* m) { + const SatParameters& parameters = *(m->GetOrCreate()); if (!parameters.use_optional_variables()) return; if (parameters.enumerate_all_solutions()) return; @@ -609,10 +609,10 @@ void CpModelMapping::DetectOptionalVariables(const CpModelProto &model_proto, // appear to false. This can be done with a LCA computation in the tree of // Boolean implication (once the presolve remove cycles). Not sure if we can // properly exploit that afterwards though. Do some research! - std::vector > enforcement_intersection(num_proto_variables); + std::vector> enforcement_intersection(num_proto_variables); std::set literals_set; for (int c = 0; c < model_proto.constraints_size(); ++c) { - const ConstraintProto &ct = model_proto.constraints(c); + const ConstraintProto& ct = model_proto.constraints(c); if (ct.enforcement_literal().empty()) { for (const int var : UsedVariables(ct)) { already_seen[var] = true; @@ -628,7 +628,7 @@ void CpModelMapping::DetectOptionalVariables(const CpModelProto &model_proto, ct.enforcement_literal().end()); } else { // Take the intersection. - std::vector &vector_ref = enforcement_intersection[var]; + std::vector& vector_ref = enforcement_intersection[var]; int new_size = 0; for (const int literal : vector_ref) { if (gtl::ContainsKey(literals_set, literal)) { @@ -644,9 +644,9 @@ void CpModelMapping::DetectOptionalVariables(const CpModelProto &model_proto, // Auto-detect optional variables. int num_optionals = 0; - auto *integer_trail = m->GetOrCreate(); + auto* integer_trail = m->GetOrCreate(); for (int var = 0; var < num_proto_variables; ++var) { - const IntegerVariableProto &var_proto = model_proto.variables(var); + const IntegerVariableProto& var_proto = model_proto.variables(var); const int64 min = var_proto.domain(0); const int64 max = var_proto.domain(var_proto.domain().size() - 1); if (min == max) continue; @@ -667,7 +667,7 @@ void CpModelMapping::DetectOptionalVariables(const CpModelProto &model_proto, class FullEncodingFixedPointComputer { public: - FullEncodingFixedPointComputer(const CpModelProto &model_proto, Model *model) + FullEncodingFixedPointComputer(const CpModelProto& model_proto, Model* model) : model_proto_(model_proto), parameters_(*(model->GetOrCreate())), model_(model), @@ -733,22 +733,22 @@ class FullEncodingFixedPointComputer { bool ProcessAutomaton(ConstraintIndex ct_index); bool ProcessLinear(ConstraintIndex ct_index); - const CpModelProto &model_proto_; - const SatParameters ¶meters_; + const CpModelProto& model_proto_; + const SatParameters& parameters_; - Model *model_; - CpModelMapping *mapping_; - IntegerEncoder *integer_encoder_; - IntegerTrail *integer_trail_; + Model* model_; + CpModelMapping* mapping_; + IntegerEncoder* integer_encoder_; + IntegerTrail* integer_trail_; std::vector variable_was_added_in_to_propagate_; std::vector variables_to_propagate_; - std::vector > variable_watchers_; + std::vector> variable_watchers_; gtl::ITIVector constraint_is_finished_; gtl::ITIVector constraint_is_registered_; - absl::flat_hash_map > + absl::flat_hash_map> variables_to_equal_or_diff_variables_; }; @@ -775,7 +775,7 @@ void FullEncodingFixedPointComputer::ComputeFixedPoint() { int num_variables_fully_encoded_by_heuristics = 0; for (int var = 0; var < num_vars; ++var) { if (!mapping_->IsInteger(var) || IsFullyEncoded(var)) continue; - const IntegerVariableProto &int_var_proto = model_proto_.variables(var); + const IntegerVariableProto& int_var_proto = model_proto_.variables(var); const Domain domain = ReadDomainFromProto(int_var_proto); int64 domain_size = domain.Size(); int64 num_diff_or_equal_var_constraints = 0; @@ -783,7 +783,7 @@ void FullEncodingFixedPointComputer::ComputeFixedPoint() { if (domain_size <= 2) continue; - const absl::flat_hash_set &value_set = + const absl::flat_hash_set& value_set = mapping_->PotentialEncodedValues(var); for (const int value : value_set) { if (value > domain.Min() && value < domain.Max() && @@ -792,7 +792,7 @@ void FullEncodingFixedPointComputer::ComputeFixedPoint() { } } - const auto &it = variables_to_equal_or_diff_variables_.find(var); + const auto& it = variables_to_equal_or_diff_variables_.find(var); if (it != variables_to_equal_or_diff_variables_.end()) { num_diff_or_equal_var_constraints = it->second.size(); } @@ -835,7 +835,7 @@ void FullEncodingFixedPointComputer::ComputeFixedPoint() { // Returns true if the constraint has finished encoding what it wants. bool FullEncodingFixedPointComputer::ProcessConstraint( ConstraintIndex ct_index) { - const ConstraintProto &ct = model_proto_.constraints(ct_index.value()); + const ConstraintProto& ct = model_proto_.constraints(ct_index.value()); switch (ct.constraint_case()) { case ConstraintProto::ConstraintProto::kElement: return ProcessElement(ct_index); @@ -851,7 +851,7 @@ bool FullEncodingFixedPointComputer::ProcessConstraint( } bool FullEncodingFixedPointComputer::ProcessElement(ConstraintIndex ct_index) { - const ConstraintProto &ct = model_proto_.constraints(ct_index.value()); + const ConstraintProto& ct = model_proto_.constraints(ct_index.value()); // Index must always be full encoded. FullyEncode(ct.element().index()); @@ -889,7 +889,7 @@ bool FullEncodingFixedPointComputer::ProcessElement(ConstraintIndex ct_index) { } bool FullEncodingFixedPointComputer::ProcessTable(ConstraintIndex ct_index) { - const ConstraintProto &ct = model_proto_.constraints(ct_index.value()); + const ConstraintProto& ct = model_proto_.constraints(ct_index.value()); if (ct.table().negated()) return true; @@ -902,7 +902,7 @@ bool FullEncodingFixedPointComputer::ProcessTable(ConstraintIndex ct_index) { bool FullEncodingFixedPointComputer::ProcessAutomaton( ConstraintIndex ct_index) { - const ConstraintProto &ct = model_proto_.constraints(ct_index.value()); + const ConstraintProto& ct = model_proto_.constraints(ct_index.value()); for (const int variable : ct.automaton().vars()) { FullyEncode(variable); } @@ -912,7 +912,7 @@ bool FullEncodingFixedPointComputer::ProcessAutomaton( bool FullEncodingFixedPointComputer::ProcessLinear(ConstraintIndex ct_index) { // We are only interested in linear equations of the form: // [b =>] a1 * x1 + a2 * x2 ==|!= value - const ConstraintProto &ct = model_proto_.constraints(ct_index.value()); + const ConstraintProto& ct = model_proto_.constraints(ct_index.value()); if (parameters_.boolean_encoding_level() == 0 || ct.linear().vars_size() != 2) { return true; @@ -934,7 +934,7 @@ bool FullEncodingFixedPointComputer::ProcessLinear(ConstraintIndex ct_index) { return true; } -void MaybeFullyEncodeMoreVariables(const CpModelProto &model_proto, Model *m) { +void MaybeFullyEncodeMoreVariables(const CpModelProto& model_proto, Model* m) { FullEncodingFixedPointComputer fixpoint(model_proto, m); fixpoint.ComputeFixedPoint(); } @@ -943,8 +943,8 @@ void MaybeFullyEncodeMoreVariables(const CpModelProto &model_proto, Model *m) { // Constraint loading functions. // ============================================================================ -void LoadBoolOrConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadBoolOrConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); std::vector literals = mapping->Literals(ct.bool_or().literals()); for (const int ref : ct.enforcement_literal()) { literals.push_back(mapping->Literal(ref).Negated()); @@ -952,13 +952,13 @@ void LoadBoolOrConstraint(const ConstraintProto &ct, Model *m) { m->Add(ClauseConstraint(literals)); } -void LoadBoolAndConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadBoolAndConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); std::vector literals; for (const int ref : ct.enforcement_literal()) { literals.push_back(mapping->Literal(ref).Negated()); } - auto *sat_solver = m->GetOrCreate(); + auto* sat_solver = m->GetOrCreate(); for (const Literal literal : mapping->Literals(ct.bool_and().literals())) { literals.push_back(literal); sat_solver->AddProblemClause(literals); @@ -966,14 +966,14 @@ void LoadBoolAndConstraint(const ConstraintProto &ct, Model *m) { } } -void LoadAtMostOneConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadAtMostOneConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); CHECK(!HasEnforcementLiteral(ct)) << "Not supported."; m->Add(AtMostOneConstraint(mapping->Literals(ct.at_most_one().literals()))); } -void LoadBoolXorConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadBoolXorConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); CHECK(!HasEnforcementLiteral(ct)) << "Not supported."; m->Add(LiteralXorIs(mapping->Literals(ct.bool_xor().literals()), true)); } @@ -985,8 +985,8 @@ namespace { void LoadEquivalenceAC(const std::vector enforcement_literal, IntegerValue coeff1, IntegerVariable var1, IntegerValue coeff2, IntegerVariable var2, - const IntegerValue rhs, Model *m) { - auto *encoder = m->GetOrCreate(); + const IntegerValue rhs, Model* m) { + auto* encoder = m->GetOrCreate(); CHECK(encoder->VariableIsFullyEncoded(var1)); CHECK(encoder->VariableIsFullyEncoded(var2)); absl::flat_hash_map term1_value_to_literal; @@ -1029,8 +1029,8 @@ void LoadEquivalenceAC(const std::vector enforcement_literal, void LoadEquivalenceNeqAC(const std::vector enforcement_literal, IntegerValue coeff1, IntegerVariable var1, IntegerValue coeff2, IntegerVariable var2, - const IntegerValue rhs, Model *m) { - auto *encoder = m->GetOrCreate(); + const IntegerValue rhs, Model* m) { + auto* encoder = m->GetOrCreate(); CHECK(encoder->VariableIsFullyEncoded(var1)); CHECK(encoder->VariableIsFullyEncoded(var2)); absl::flat_hash_map term1_value_to_literal; @@ -1040,7 +1040,7 @@ void LoadEquivalenceNeqAC(const std::vector enforcement_literal, } for (const auto value_literal : encoder->FullDomainEncoding(var2)) { const IntegerValue target_value = rhs - value_literal.value * coeff2; - const auto &it = term1_value_to_literal.find(target_value); + const auto& it = term1_value_to_literal.find(target_value); if (it != term1_value_to_literal.end()) { const Literal target_literal = it->second; m->Add(EnforcedClause( @@ -1052,8 +1052,8 @@ void LoadEquivalenceNeqAC(const std::vector enforcement_literal, } // namespace -void LoadLinearConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); if (ct.linear().vars().empty()) { const Domain rhs = ReadDomainFromProto(ct.linear()); @@ -1071,7 +1071,7 @@ void LoadLinearConstraint(const ConstraintProto &ct, Model *m) { return; } - auto *integer_trail = m->GetOrCreate(); + auto* integer_trail = m->GetOrCreate(); const std::vector vars = mapping->Integers(ct.linear().vars()); const std::vector coeffs = ValuesFromProto(ct.linear().coeffs()); @@ -1099,8 +1099,8 @@ void LoadLinearConstraint(const ConstraintProto &ct, Model *m) { if (ct.linear().vars_size() == 2 && !integer_trail->IsFixed(vars[0]) && !integer_trail->IsFixed(vars[1]) && max_domain_size < 16) { - const SatParameters ¶ms = *m->GetOrCreate(); - auto *encoder = m->GetOrCreate(); + const SatParameters& params = *m->GetOrCreate(); + auto* encoder = m->GetOrCreate(); if (params.boolean_encoding_level() > 0 && ConstraintIsEq(ct.linear()) && ct.linear().domain(0) != min_sum && ct.linear().domain(0) != max_sum && encoder->VariableIsFullyEncoded(vars[0]) && @@ -1199,14 +1199,14 @@ void LoadLinearConstraint(const ConstraintProto &ct, Model *m) { } } -void LoadAllDiffConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadAllDiffConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const std::vector vars = mapping->Integers(ct.all_diff().vars()); // If all variables are fully encoded and domains are not too large, use // arc-consistent reasoning. Otherwise, use bounds-consistent reasoning. - IntegerTrail *integer_trail = m->GetOrCreate(); - IntegerEncoder *encoder = m->GetOrCreate(); + IntegerTrail* integer_trail = m->GetOrCreate(); + IntegerEncoder* encoder = m->GetOrCreate(); int num_fully_encoded = 0; int64 max_domain_size = 0; for (const IntegerVariable variable : vars) { @@ -1226,8 +1226,8 @@ void LoadAllDiffConstraint(const ConstraintProto &ct, Model *m) { } } -void LoadIntProdConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadIntProdConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const IntegerVariable prod = mapping->Integer(ct.int_prod().target()); const std::vector vars = mapping->Integers(ct.int_prod().vars()); @@ -1235,8 +1235,8 @@ void LoadIntProdConstraint(const ConstraintProto &ct, Model *m) { m->Add(ProductConstraint(vars[0], vars[1], prod)); } -void LoadIntDivConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadIntDivConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const IntegerVariable div = mapping->Integer(ct.int_div().target()); const std::vector vars = mapping->Integers(ct.int_div().vars()); @@ -1252,16 +1252,16 @@ void LoadIntDivConstraint(const ConstraintProto &ct, Model *m) { } } -void LoadIntMinConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadIntMinConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const IntegerVariable min = mapping->Integer(ct.int_min().target()); const std::vector vars = mapping->Integers(ct.int_min().vars()); m->Add(IsEqualToMinOf(min, vars)); } -LinearExpression GetExprFromProto(const LinearExpressionProto &expr_proto, - const CpModelMapping &mapping) { +LinearExpression GetExprFromProto(const LinearExpressionProto& expr_proto, + const CpModelMapping& mapping) { LinearExpression expr; expr.vars = mapping.Integers(expr_proto.vars()); for (int j = 0; j < expr_proto.coeffs_size(); ++j) { @@ -1271,8 +1271,8 @@ LinearExpression GetExprFromProto(const LinearExpressionProto &expr_proto, return CanonicalizeExpr(expr); } -void LoadLinMaxConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadLinMaxConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const LinearExpression max = GetExprFromProto(ct.lin_max().target(), *mapping); std::vector negated_exprs; @@ -1285,22 +1285,22 @@ void LoadLinMaxConstraint(const ConstraintProto &ct, Model *m) { m->Add(IsEqualToMinOf(NegationOf(max), negated_exprs)); } -void LoadIntMaxConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadIntMaxConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const IntegerVariable max = mapping->Integer(ct.int_max().target()); const std::vector vars = mapping->Integers(ct.int_max().vars()); m->Add(IsEqualToMaxOf(max, vars)); } -void LoadNoOverlapConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadNoOverlapConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); m->Add(Disjunctive(mapping->Intervals(ct.no_overlap().intervals()))); } -void LoadNoOverlap2dConstraint(const ConstraintProto &ct, Model *m) { +void LoadNoOverlap2dConstraint(const ConstraintProto& ct, Model* m) { if (ct.no_overlap_2d().x_intervals().empty()) return; - auto *mapping = m->GetOrCreate(); + auto* mapping = m->GetOrCreate(); const std::vector x_intervals = mapping->Intervals(ct.no_overlap_2d().x_intervals()); const std::vector y_intervals = @@ -1310,8 +1310,8 @@ void LoadNoOverlap2dConstraint(const ConstraintProto &ct, Model *m) { !ct.no_overlap_2d().boxes_with_null_area_can_overlap())); } -void LoadCumulativeConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadCumulativeConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const std::vector intervals = mapping->Intervals(ct.cumulative().intervals()); const AffineExpression capacity(mapping->Integer(ct.cumulative().capacity())); @@ -1326,11 +1326,11 @@ void LoadCumulativeConstraint(const ConstraintProto &ct, Model *m) { // If a variable is constant and its value appear in no other variable domains, // then the literal encoding the index and the one encoding the target at this // value are equivalent. -bool DetectEquivalencesInElementConstraint(const ConstraintProto &ct, - Model *m) { - auto *mapping = m->GetOrCreate(); - IntegerEncoder *encoder = m->GetOrCreate(); - IntegerTrail *integer_trail = m->GetOrCreate(); +bool DetectEquivalencesInElementConstraint(const ConstraintProto& ct, + Model* m) { + auto* mapping = m->GetOrCreate(); + IntegerEncoder* encoder = m->GetOrCreate(); + IntegerTrail* integer_trail = m->GetOrCreate(); const IntegerVariable index = mapping->Integer(ct.element().index()); const IntegerVariable target = mapping->Integer(ct.element().target()); @@ -1384,8 +1384,8 @@ bool DetectEquivalencesInElementConstraint(const ConstraintProto &ct, // TODO(user): Be more efficient when the element().vars() are constants. // Ideally we should avoid creating them as integer variable since we don't // use them. -void LoadElementConstraintBounds(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadElementConstraintBounds(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const IntegerVariable index = mapping->Integer(ct.element().index()); const IntegerVariable target = mapping->Integer(ct.element().target()); const std::vector vars = @@ -1436,8 +1436,8 @@ void LoadElementConstraintBounds(const ConstraintProto &ct, Model *m) { // value). Rules 1 and 2 are enforced by target == value <=> \Or_{i} // selected[i][value]. Rule 3 is enforced by index == i <=> \Or_{value} // selected[i][value]. -void LoadElementConstraintAC(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadElementConstraintAC(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const IntegerVariable index = mapping->Integer(ct.element().index()); const IntegerVariable target = mapping->Integer(ct.element().target()); const std::vector vars = @@ -1453,9 +1453,9 @@ void LoadElementConstraintAC(const ConstraintProto &ct, Model *m) { // For i \in index and value in vars[i], make (index == i /\ vars[i] == value) // literals and store them by value in vectors. - absl::flat_hash_map > value_to_literals; + absl::flat_hash_map> value_to_literals; const auto index_encoding = m->Add(FullyEncodeVariable(index)); - IntegerTrail *integer_trail = m->GetOrCreate(); + IntegerTrail* integer_trail = m->GetOrCreate(); for (const auto literal_value : index_encoding) { const int i = literal_value.value.value(); const Literal i_lit = literal_value.literal; @@ -1490,7 +1490,7 @@ void LoadElementConstraintAC(const ConstraintProto &ct, Model *m) { } // target == value <=> \Or_{i \in index} (vars[i] == value /\ index == i). - for (const auto &entry : target_map) { + for (const auto& entry : target_map) { const IntegerValue value = entry.first; const Literal target_is_value = entry.second; @@ -1510,8 +1510,8 @@ namespace { // solver will easily learn an AC encoding... // // The advantage is that this does not introduce extra BooleanVariables. -void LoadElementConstraintHalfAC(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadElementConstraintHalfAC(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const IntegerVariable index = mapping->Integer(ct.element().index()); const IntegerVariable target = mapping->Integer(ct.element().target()); const std::vector vars = @@ -1528,8 +1528,8 @@ void LoadElementConstraintHalfAC(const ConstraintProto &ct, Model *m) { } } -void LoadBooleanElement(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadBooleanElement(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const IntegerVariable index = mapping->Integer(ct.element().index()); const std::vector literals = mapping->Literals(ct.element().vars()); const Literal target = mapping->Literal(ct.element().target()); @@ -1558,8 +1558,8 @@ void LoadBooleanElement(const ConstraintProto &ct, Model *m) { } // namespace -void LoadElementConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadElementConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const IntegerVariable index = mapping->Integer(ct.element().index()); bool boolean_array = true; @@ -1610,7 +1610,7 @@ void LoadElementConstraint(const ConstraintProto &ct, Model *m) { return LoadElementConstraintBounds(ct, m); } - IntegerEncoder *encoder = m->GetOrCreate(); + IntegerEncoder* encoder = m->GetOrCreate(); const bool target_is_AC = encoder->VariableIsFullyEncoded(target); int num_AC_variables = 0; @@ -1622,7 +1622,7 @@ void LoadElementConstraint(const ConstraintProto &ct, Model *m) { if (is_full) num_AC_variables++; } - const SatParameters ¶ms = *m->GetOrCreate(); + const SatParameters& params = *m->GetOrCreate(); if (params.boolean_encoding_level() > 0 && (target_is_AC || num_AC_variables >= num_vars - 1)) { if (params.boolean_encoding_level() > 1) { @@ -1635,14 +1635,14 @@ void LoadElementConstraint(const ConstraintProto &ct, Model *m) { } } -void LoadTableConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadTableConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const std::vector vars = mapping->Integers(ct.table().vars()); const std::vector values = ValuesFromProto(ct.table().values()); const int num_vars = vars.size(); const int num_tuples = values.size() / num_vars; - std::vector > tuples(num_tuples); + std::vector> tuples(num_tuples); int count = 0; for (int i = 0; i < num_tuples; ++i) { for (int j = 0; j < num_vars; ++j) { @@ -1656,13 +1656,13 @@ void LoadTableConstraint(const ConstraintProto &ct, Model *m) { } } -void LoadAutomatonConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadAutomatonConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const std::vector vars = mapping->Integers(ct.automaton().vars()); const int num_transitions = ct.automaton().transition_tail_size(); - std::vector > transitions; + std::vector> transitions; transitions.reserve(num_transitions); for (int i = 0; i < num_transitions; ++i) { transitions.push_back({ct.automaton().transition_tail(i), @@ -1678,14 +1678,14 @@ void LoadAutomatonConstraint(const ConstraintProto &ct, Model *m) { // From vector of n IntegerVariables, returns an n x n matrix of Literal // such that matrix[i][j] is the Literal corresponding to vars[i] == j. -std::vector > GetSquareMatrixFromIntegerVariables( - const std::vector &vars, Model *m) { +std::vector> GetSquareMatrixFromIntegerVariables( + const std::vector& vars, Model* m) { const int n = vars.size(); const Literal kTrueLiteral = m->GetOrCreate()->GetTrueLiteral(); const Literal kFalseLiteral = m->GetOrCreate()->GetFalseLiteral(); - std::vector > matrix( + std::vector> matrix( n, std::vector(n, kFalseLiteral)); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { @@ -1696,7 +1696,7 @@ std::vector > GetSquareMatrixFromIntegerVariables( matrix[i][value] = kTrueLiteral; } else { const auto encoding = m->Add(FullyEncodeVariable(vars[i])); - for (const auto &entry : encoding) { + for (const auto& entry : encoding) { const int value = entry.value.value(); DCHECK_LE(0, value); DCHECK_LT(value, n); @@ -1708,8 +1708,8 @@ std::vector > GetSquareMatrixFromIntegerVariables( return matrix; } -void LoadCircuitConstraint(const ConstraintProto &ct, Model *m) { - const auto &circuit = ct.circuit(); +void LoadCircuitConstraint(const ConstraintProto& ct, Model* m) { + const auto& circuit = ct.circuit(); if (circuit.tails().empty()) return; std::vector tails(circuit.tails().begin(), circuit.tails().end()); @@ -1720,8 +1720,8 @@ void LoadCircuitConstraint(const ConstraintProto &ct, Model *m) { m->Add(SubcircuitConstraint(num_nodes, tails, heads, literals)); } -void LoadRoutesConstraint(const ConstraintProto &ct, Model *m) { - const auto &routes = ct.routes(); +void LoadRoutesConstraint(const ConstraintProto& ct, Model* m) { + const auto& routes = ct.routes(); if (routes.tails().empty()) return; std::vector tails(routes.tails().begin(), routes.tails().end()); @@ -1733,11 +1733,11 @@ void LoadRoutesConstraint(const ConstraintProto &ct, Model *m) { /*multiple_subcircuit_through_zero=*/true)); } -void LoadCircuitCoveringConstraint(const ConstraintProto &ct, Model *m) { - auto *mapping = m->GetOrCreate(); +void LoadCircuitCoveringConstraint(const ConstraintProto& ct, Model* m) { + auto* mapping = m->GetOrCreate(); const std::vector nexts = mapping->Integers(ct.circuit_covering().nexts()); - const std::vector > graph = + const std::vector> graph = GetSquareMatrixFromIntegerVariables(nexts, m); const std::vector distinguished( ct.circuit_covering().distinguished_nodes().begin(), @@ -1746,7 +1746,7 @@ void LoadCircuitCoveringConstraint(const ConstraintProto &ct, Model *m) { m->Add(CircuitCovering(graph, distinguished)); } -bool LoadConstraint(const ConstraintProto &ct, Model *m) { +bool LoadConstraint(const ConstraintProto& ct, Model* m) { switch (ct.constraint_case()) { case ConstraintProto::ConstraintCase::CONSTRAINT_NOT_SET: return true; diff --git a/ortools/sat/cp_model_objective.cc b/ortools/sat/cp_model_objective.cc index b1397484ea..6f686ecda5 100644 --- a/ortools/sat/cp_model_objective.cc +++ b/ortools/sat/cp_model_objective.cc @@ -18,7 +18,7 @@ namespace operations_research { namespace sat { -void EncodeObjectiveAsSingleVariable(CpModelProto *cp_model) { +void EncodeObjectiveAsSingleVariable(CpModelProto* cp_model) { if (!cp_model->has_objective()) return; if (cp_model->objective().vars_size() == 1) { @@ -66,7 +66,7 @@ void EncodeObjectiveAsSingleVariable(CpModelProto *cp_model) { // Create the new objective var. const int obj_ref = cp_model->variables_size(); { - IntegerVariableProto *obj = cp_model->add_variables(); + IntegerVariableProto* obj = cp_model->add_variables(); Domain obj_domain(min_obj, max_obj); if (!cp_model->objective().domain().empty()) { obj_domain = obj_domain.IntersectionWith( @@ -76,7 +76,7 @@ void EncodeObjectiveAsSingleVariable(CpModelProto *cp_model) { } // Add the linear constraint. - LinearConstraintProto *ct = cp_model->add_constraints()->mutable_linear(); + LinearConstraintProto* ct = cp_model->add_constraints()->mutable_linear(); ct->add_domain(0); ct->add_domain(0); *(ct->mutable_vars()) = cp_model->objective().vars(); diff --git a/ortools/sat/cp_model_postsolve.cc b/ortools/sat/cp_model_postsolve.cc index b1465ba498..05df4be2fd 100644 --- a/ortools/sat/cp_model_postsolve.cc +++ b/ortools/sat/cp_model_postsolve.cc @@ -24,7 +24,7 @@ namespace sat { // // Also, any "free" Boolean should be fixed to some value for the subsequent // postsolve steps. -void PostsolveClause(const ConstraintProto &ct, std::vector *domains) { +void PostsolveClause(const ConstraintProto& ct, std::vector* domains) { const int size = ct.bool_or().literals_size(); CHECK_NE(size, 0); bool satisfied = false; @@ -49,9 +49,9 @@ void PostsolveClause(const ConstraintProto &ct, std::vector *domains) { // Here we simply assign all non-fixed variable to a feasible value. Which // should always exists by construction. -void PostsolveLinear(const ConstraintProto &ct, - const std::vector &prefer_lower_value, - std::vector *domains) { +void PostsolveLinear(const ConstraintProto& ct, + const std::vector& prefer_lower_value, + std::vector* domains) { int64 fixed_activity = 0; const int size = ct.linear().vars().size(); std::vector free_vars; @@ -115,7 +115,7 @@ void PostsolveLinear(const ConstraintProto &ct, // We assign any non fixed lhs variables to their minimum value. Then we assign // the target to the max. This should always be feasible. -void PostsolveIntMax(const ConstraintProto &ct, std::vector *domains) { +void PostsolveIntMax(const ConstraintProto& ct, std::vector* domains) { int64 m = kint64min; for (const int ref : ct.int_max().vars()) { const int var = PositiveRef(ref); @@ -141,7 +141,7 @@ void PostsolveIntMax(const ConstraintProto &ct, std::vector *domains) { } // We only support 3 cases in the presolve currently. -void PostsolveElement(const ConstraintProto &ct, std::vector *domains) { +void PostsolveElement(const ConstraintProto& ct, std::vector* domains) { const int index_ref = ct.element().index(); const int index_var = PositiveRef(index_ref); const int target_ref = ct.element().target(); @@ -205,9 +205,9 @@ void PostsolveElement(const ConstraintProto &ct, std::vector *domains) { } void PostsolveResponse(const int64 num_variables_in_original_model, - const CpModelProto &mapping_proto, - const std::vector &postsolve_mapping, - CpSolverResponse *response) { + const CpModelProto& mapping_proto, + const std::vector& postsolve_mapping, + CpSolverResponse* response) { // Abort if no solution or something is wrong. if (response->status() != CpSolverStatus::FEASIBLE && response->status() != CpSolverStatus::OPTIMAL) { @@ -251,7 +251,7 @@ void PostsolveResponse(const int64 num_variables_in_original_model, // Process the constraints in reverse order. const int num_constraints = mapping_proto.constraints_size(); for (int i = num_constraints - 1; i >= 0; i--) { - const ConstraintProto &ct = mapping_proto.constraints(i); + const ConstraintProto& ct = mapping_proto.constraints(i); // We should only encounter assigned enforcement literal. bool enforced = true; diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index d009529428..a1a70bb918 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -48,11 +48,12 @@ #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/sat/simplification.h" +#include "ortools/sat/var_domination.h" namespace operations_research { namespace sat { -bool CpModelPresolver::RemoveConstraint(ConstraintProto *ct) { +bool CpModelPresolver::RemoveConstraint(ConstraintProto* ct) { ct->Clear(); return true; } @@ -76,10 +77,10 @@ void CpModelPresolver::RemoveEmptyConstraints() { } context_->working_model->mutable_constraints()->DeleteSubrange( new_num_constraints, old_num_non_empty_constraints - new_num_constraints); - for (ConstraintProto &ct_ref : + for (ConstraintProto& ct_ref : *context_->working_model->mutable_constraints()) { ApplyToAllIntervalIndices( - [&interval_mapping](int *ref) { + [&interval_mapping](int* ref) { *ref = interval_mapping[*ref]; CHECK_NE(-1, *ref); }, @@ -87,7 +88,7 @@ void CpModelPresolver::RemoveEmptyConstraints() { } } -bool CpModelPresolver::PresolveEnforcementLiteral(ConstraintProto *ct) { +bool CpModelPresolver::PresolveEnforcementLiteral(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (!HasEnforcementLiteral(*ct)) return false; @@ -132,7 +133,7 @@ bool CpModelPresolver::PresolveEnforcementLiteral(ConstraintProto *ct) { return new_size != old_size; } -bool CpModelPresolver::PresolveBoolXor(ConstraintProto *ct) { +bool CpModelPresolver::PresolveBoolXor(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (HasEnforcementLiteral(*ct)) return false; @@ -180,7 +181,7 @@ bool CpModelPresolver::PresolveBoolXor(ConstraintProto *ct) { return changed; } -bool CpModelPresolver::PresolveBoolOr(ConstraintProto *ct) { +bool CpModelPresolver::PresolveBoolOr(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; // Move the enforcement literal inside the clause if any. Note that we do not @@ -257,7 +258,7 @@ bool CpModelPresolver::PresolveBoolOr(ConstraintProto *ct) { } ABSL_MUST_USE_RESULT bool CpModelPresolver::MarkConstraintAsFalse( - ConstraintProto *ct) { + ConstraintProto* ct) { if (HasEnforcementLiteral(*ct)) { // Change the constraint to a bool_or. ct->mutable_bool_or()->clear_literals(); @@ -272,7 +273,7 @@ ABSL_MUST_USE_RESULT bool CpModelPresolver::MarkConstraintAsFalse( } } -bool CpModelPresolver::PresolveBoolAnd(ConstraintProto *ct) { +bool CpModelPresolver::PresolveBoolAnd(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (!HasEnforcementLiteral(*ct)) { @@ -317,10 +318,16 @@ bool CpModelPresolver::PresolveBoolAnd(ConstraintProto *ct) { return changed; } -bool CpModelPresolver::PresolveAtMostOne(ConstraintProto *ct) { +bool CpModelPresolver::PresolveAtMostOne(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; CHECK(!HasEnforcementLiteral(*ct)); + // An at most one with just one literal is always satisfied. + if (ct->at_most_one().literals_size() == 1) { + context_->UpdateRuleStats("at_most_one: size one"); + return RemoveConstraint(ct); + } + // Fix to false any duplicate literals. std::sort(ct->mutable_at_most_one()->mutable_literals()->begin(), ct->mutable_at_most_one()->mutable_literals()->end()); @@ -368,7 +375,7 @@ bool CpModelPresolver::PresolveAtMostOne(ConstraintProto *ct) { return changed; } -bool CpModelPresolver::PresolveIntMax(ConstraintProto *ct) { +bool CpModelPresolver::PresolveIntMax(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (ct->int_max().vars().empty()) { context_->UpdateRuleStats("int_max: no variables!"); @@ -402,9 +409,9 @@ bool CpModelPresolver::PresolveIntMax(ConstraintProto *ct) { context_->UpdateRuleStats("int_max: x = max(x, ...)"); for (const int ref : ct->int_max().vars()) { if (ref == target_ref) continue; - ConstraintProto *new_ct = context_->working_model->add_constraints(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); *new_ct->mutable_enforcement_literal() = ct->enforcement_literal(); - auto *arg = new_ct->mutable_linear(); + auto* arg = new_ct->mutable_linear(); arg->add_vars(target_ref); arg->add_coeffs(1); arg->add_vars(ref); @@ -435,7 +442,7 @@ bool CpModelPresolver::PresolveIntMax(ConstraintProto *ct) { // infered_domain ∩ [kint64min, target_ub] ⊂ target_domain // then the constraint is really max(...) <= target_ub and we can simplify it. if (context_->VariableIsUniqueAndRemovable(target_ref)) { - const Domain &target_domain = context_->DomainOf(target_ref); + const Domain& target_domain = context_->DomainOf(target_ref); if (infered_domain.IntersectionWith(Domain(kint64min, target_domain.Max())) .IsIncludedIn(target_domain)) { if (infered_domain.Max() <= target_domain.Max()) { @@ -455,7 +462,7 @@ bool CpModelPresolver::PresolveIntMax(ConstraintProto *ct) { // enforcement => [var_i <= target_domain.Max()]. context_->UpdateRuleStats("int_max: reified lower than constant"); for (const int ref : ct->int_max().vars()) { - ConstraintProto *new_ct = context_->working_model->add_constraints(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); *(new_ct->mutable_enforcement_literal()) = ct->enforcement_literal(); ct->mutable_linear()->add_vars(ref); ct->mutable_linear()->add_coeffs(1); @@ -505,9 +512,9 @@ bool CpModelPresolver::PresolveIntMax(ConstraintProto *ct) { // Convert to an equality. Note that we create a new constraint otherwise it // might not be processed again. context_->UpdateRuleStats("int_max: converted to equality"); - ConstraintProto *new_ct = context_->working_model->add_constraints(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); *new_ct = *ct; // copy name and potential reification. - auto *arg = new_ct->mutable_linear(); + auto* arg = new_ct->mutable_linear(); arg->add_vars(target_ref); arg->add_coeffs(1); arg->add_vars(ct->int_max().vars(0)); @@ -520,20 +527,20 @@ bool CpModelPresolver::PresolveIntMax(ConstraintProto *ct) { return modified; } -bool CpModelPresolver::PresolveLinMin(ConstraintProto *ct) { +bool CpModelPresolver::PresolveLinMin(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; // Convert to lin_max and presolve lin_max. const auto copy = ct->lin_min(); SetToNegatedLinearExpression(copy.target(), ct->mutable_lin_max()->mutable_target()); - for (const LinearExpressionProto &expr : copy.exprs()) { - LinearExpressionProto *const new_expr = ct->mutable_lin_max()->add_exprs(); + for (const LinearExpressionProto& expr : copy.exprs()) { + LinearExpressionProto* const new_expr = ct->mutable_lin_max()->add_exprs(); SetToNegatedLinearExpression(expr, new_expr); } return PresolveLinMax(ct); } -bool CpModelPresolver::PresolveLinMax(ConstraintProto *ct) { +bool CpModelPresolver::PresolveLinMax(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (ct->lin_max().exprs().empty()) { context_->UpdateRuleStats("lin_max: no exprs"); @@ -544,7 +551,7 @@ bool CpModelPresolver::PresolveLinMax(ConstraintProto *ct) { // Pass 1, Compute the infered min of the target. int64 infered_min = context_->MinOf(ct->lin_max().target()); - for (const LinearExpressionProto &expr : ct->lin_max().exprs()) { + for (const LinearExpressionProto& expr : ct->lin_max().exprs()) { // TODO(user): Check if the expressions contain target. // TODO(user): Check if the negated expression is already present and @@ -556,7 +563,7 @@ bool CpModelPresolver::PresolveLinMax(ConstraintProto *ct) { // Pass 2, Filter the expressions which are smaller than inferred min. int new_size = 0; for (int i = 0; i < ct->lin_max().exprs_size(); ++i) { - const LinearExpressionProto &expr = ct->lin_max().exprs(i); + const LinearExpressionProto& expr = ct->lin_max().exprs(i); if (context_->MaxOf(expr) >= infered_min) { *ct->mutable_lin_max()->mutable_exprs(new_size) = expr; new_size++; @@ -573,7 +580,7 @@ bool CpModelPresolver::PresolveLinMax(ConstraintProto *ct) { return false; } -bool CpModelPresolver::PresolveIntAbs(ConstraintProto *ct) { +bool CpModelPresolver::PresolveIntAbs(ConstraintProto* ct) { CHECK_EQ(ct->enforcement_literal_size(), 0); if (context_->ModelIsUnsat()) return false; const int target_ref = ct->int_max().target(); @@ -603,9 +610,9 @@ bool CpModelPresolver::PresolveIntAbs(ConstraintProto *ct) { if (context_->MinOf(var) >= 0 && !context_->IsFixed(var)) { context_->UpdateRuleStats("int_abs: converted to equality"); - ConstraintProto *new_ct = context_->working_model->add_constraints(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); new_ct->set_name(ct->name()); - auto *arg = new_ct->mutable_linear(); + auto* arg = new_ct->mutable_linear(); arg->add_vars(target_ref); arg->add_coeffs(1); arg->add_vars(var); @@ -618,9 +625,9 @@ bool CpModelPresolver::PresolveIntAbs(ConstraintProto *ct) { if (context_->MaxOf(var) <= 0 && !context_->IsFixed(var)) { context_->UpdateRuleStats("int_abs: converted to equality"); - ConstraintProto *new_ct = context_->working_model->add_constraints(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); new_ct->set_name(ct->name()); - auto *arg = new_ct->mutable_linear(); + auto* arg = new_ct->mutable_linear(); arg->add_vars(target_ref); arg->add_coeffs(1); arg->add_vars(var); @@ -650,7 +657,7 @@ bool CpModelPresolver::PresolveIntAbs(ConstraintProto *ct) { return false; } -bool CpModelPresolver::PresolveIntMin(ConstraintProto *ct) { +bool CpModelPresolver::PresolveIntMin(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; const auto copy = ct->int_min(); @@ -661,7 +668,7 @@ bool CpModelPresolver::PresolveIntMin(ConstraintProto *ct) { return PresolveIntMax(ct); } -bool CpModelPresolver::PresolveIntProd(ConstraintProto *ct) { +bool CpModelPresolver::PresolveIntProd(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (HasEnforcementLiteral(*ct)) return false; @@ -672,7 +679,7 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto *ct) { int64 constant = 1; for (int i = 0; i < ct->int_prod().vars().size(); ++i) { const int ref = ct->int_prod().vars(i); - const AffineRelation::Relation &r = context_->GetAffineRelation(ref); + const AffineRelation::Relation& r = context_->GetAffineRelation(ref); if (r.representative != ref && r.offset == 0) { changed = true; ct->mutable_int_prod()->set_vars(i, r.representative); @@ -693,7 +700,7 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto *ct) { const int old_target = ct->int_prod().target(); const int new_target = context_->working_model->variables_size(); - IntegerVariableProto *var_proto = context_->working_model->add_variables(); + IntegerVariableProto* var_proto = context_->working_model->add_variables(); FillDomainInProto( context_->DomainOf(old_target).InverseMultiplicationBy(constant), var_proto); @@ -713,8 +720,8 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto *ct) { // We cannot store the affine relation because the old target seems // to already be in affine relation with another variable. This is rare // and we need to add a new constraint in that case. - ConstraintProto *new_ct = context_->working_model->add_constraints(); - LinearConstraintProto *lin = new_ct->mutable_linear(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); + LinearConstraintProto* lin = new_ct->mutable_linear(); lin->add_vars(old_target); lin->add_coeffs(1); lin->add_vars(new_target); @@ -748,7 +755,7 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto *ct) { if (context_->IsFixed(b)) std::swap(a, b); if (context_->IsFixed(a)) { if (b != product) { - ConstraintProto *const lin = context_->working_model->add_constraints(); + ConstraintProto* const lin = context_->working_model->add_constraints(); lin->mutable_linear()->add_vars(b); lin->mutable_linear()->add_coeffs(context_->MinOf(a)); lin->mutable_linear()->add_vars(product); @@ -794,16 +801,16 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto *ct) { } context_->UpdateRuleStats("int_prod: all Boolean."); { - ConstraintProto *new_ct = context_->working_model->add_constraints(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); new_ct->add_enforcement_literal(target_ref); - auto *arg = new_ct->mutable_bool_and(); + auto* arg = new_ct->mutable_bool_and(); for (const int var : ct->int_prod().vars()) { arg->add_literals(var); } } { - ConstraintProto *new_ct = context_->working_model->add_constraints(); - auto *arg = new_ct->mutable_bool_or(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); + auto* arg = new_ct->mutable_bool_or(); arg->add_literals(target_ref); for (const int var : ct->int_prod().vars()) { arg->add_literals(NegatedRef(var)); @@ -813,7 +820,7 @@ bool CpModelPresolver::PresolveIntProd(ConstraintProto *ct) { return RemoveConstraint(ct); } -bool CpModelPresolver::PresolveIntDiv(ConstraintProto *ct) { +bool CpModelPresolver::PresolveIntDiv(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; // For now, we only presolve the case where the divisor is constant. @@ -828,7 +835,7 @@ bool CpModelPresolver::PresolveIntDiv(ConstraintProto *ct) { const int64 divisor = context_->MinOf(ref_div); if (divisor == 1) { - LinearConstraintProto *const lin = + LinearConstraintProto* const lin = context_->working_model->add_constraints()->mutable_linear(); lin->add_vars(ref_x); lin->add_coeffs(1); @@ -859,7 +866,7 @@ bool CpModelPresolver::PresolveIntDiv(ConstraintProto *ct) { if (context_->MinOf(target) >= 0 && context_->MinOf(ref_x) >= 0 && divisor > 1) { - LinearConstraintProto *const lin = + LinearConstraintProto* const lin = context_->working_model->add_constraints()->mutable_linear(); lin->add_vars(ref_x); lin->add_coeffs(1); @@ -878,14 +885,14 @@ bool CpModelPresolver::PresolveIntDiv(ConstraintProto *ct) { return false; } -bool CpModelPresolver::ExploitEquivalenceRelations(int c, ConstraintProto *ct) { +bool CpModelPresolver::ExploitEquivalenceRelations(int c, ConstraintProto* ct) { bool changed = false; // Optim: Special case for the linear constraint. We just remap the // enforcement literals, the normal variables will be replaced by their // representative in CanonicalizeLinear(). if (ct->constraint_case() == ConstraintProto::ConstraintCase::kLinear) { - for (int &ref : *ct->mutable_enforcement_literal()) { + for (int& ref : *ct->mutable_enforcement_literal()) { const int rep = this->context_->GetLiteralRepresentative(ref); if (rep != ref) { changed = true; @@ -909,7 +916,7 @@ bool CpModelPresolver::ExploitEquivalenceRelations(int c, ConstraintProto *ct) { // Remap equal and negated variables to their representative. ApplyToAllVariableIndices( - [&changed, this](int *ref) { + [&changed, this](int* ref) { const int rep = context_->GetVariableRepresentative(*ref); if (rep != *ref) { changed = true; @@ -920,7 +927,7 @@ bool CpModelPresolver::ExploitEquivalenceRelations(int c, ConstraintProto *ct) { // Remap literal and negated literal to their representative. ApplyToAllLiteralIndices( - [&changed, this](int *ref) { + [&changed, this](int* ref) { const int rep = this->context_->GetLiteralRepresentative(*ref); if (rep != *ref) { changed = true; @@ -931,7 +938,7 @@ bool CpModelPresolver::ExploitEquivalenceRelations(int c, ConstraintProto *ct) { return changed; } -void CpModelPresolver::DivideLinearByGcd(ConstraintProto *ct) { +void CpModelPresolver::DivideLinearByGcd(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return; // Compute the GCD of all coefficients. @@ -955,7 +962,7 @@ void CpModelPresolver::DivideLinearByGcd(ConstraintProto *ct) { } } -bool CpModelPresolver::CanonicalizeLinear(ConstraintProto *ct) { +bool CpModelPresolver::CanonicalizeLinear(ConstraintProto* ct) { if (ct->constraint_case() != ConstraintProto::ConstraintCase::kLinear || context_->ModelIsUnsat()) { return false; @@ -1054,7 +1061,7 @@ bool CpModelPresolver::CanonicalizeLinear(ConstraintProto *ct) { return var_constraint_graph_changed; } -bool CpModelPresolver::RemoveSingletonInLinear(ConstraintProto *ct) { +bool CpModelPresolver::RemoveSingletonInLinear(ConstraintProto* ct) { if (ct->constraint_case() != ConstraintProto::ConstraintCase::kLinear || context_->ModelIsUnsat()) { return false; @@ -1202,7 +1209,7 @@ bool CpModelPresolver::RemoveSingletonInLinear(ConstraintProto *ct) { return true; } -bool CpModelPresolver::PresolveSmallLinear(ConstraintProto *ct) { +bool CpModelPresolver::PresolveSmallLinear(ConstraintProto* ct) { if (ct->constraint_case() != ConstraintProto::ConstraintCase::kLinear || context_->ModelIsUnsat()) { return false; @@ -1245,12 +1252,12 @@ bool CpModelPresolver::PresolveSmallLinear(ConstraintProto *ct) { return MarkConstraintAsFalse(ct); } - ConstraintProto *new_ct = context_->working_model->add_constraints(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); new_ct->set_name(ct->name()); for (const int literal : ct->enforcement_literal()) { new_ct->add_enforcement_literal(literal); } - auto *arg = new_ct->mutable_linear(); + auto* arg = new_ct->mutable_linear(); arg->add_vars(abs_arg); arg->add_coeffs(1); FillDomainInProto(new_abs_var_domain, new_ct->mutable_linear()); @@ -1265,7 +1272,7 @@ bool CpModelPresolver::PresolveSmallLinear(ConstraintProto *ct) { } const int literal = ct->enforcement_literal(0); - const LinearConstraintProto &linear = ct->linear(); + const LinearConstraintProto& linear = ct->linear(); const int ref = linear.vars(0); const int var = PositiveRef(ref); const int64 coeff = @@ -1317,7 +1324,7 @@ bool CpModelPresolver::PresolveSmallLinear(ConstraintProto *ct) { // // TODO(user): it might be better to first add only the affine relation with // a coefficient of magnitude 1, and later the one with larger coeffs. - const LinearConstraintProto &arg = ct->linear(); + const LinearConstraintProto& arg = ct->linear(); if (arg.vars_size() == 2) { const Domain rhs = ReadDomainFromProto(ct->linear()); const int64 rhs_min = rhs.Min(); @@ -1347,13 +1354,13 @@ bool CpModelPresolver::PresolveSmallLinear(ConstraintProto *ct) { namespace { // Return true if the given domain only restrict the values with an upper bound. -bool IsLeConstraint(const Domain &domain, const Domain &all_values) { +bool IsLeConstraint(const Domain& domain, const Domain& all_values) { return all_values.IntersectionWith(Domain(kint64min, domain.Max())) .IsIncludedIn(domain); } // Same as IsLeConstraint() but in the other direction. -bool IsGeConstraint(const Domain &domain, const Domain &all_values) { +bool IsGeConstraint(const Domain& domain, const Domain& all_values) { return all_values.IntersectionWith(Domain(domain.Min(), kint64max)) .IsIncludedIn(domain); } @@ -1361,8 +1368,8 @@ bool IsGeConstraint(const Domain &domain, const Domain &all_values) { // In the equation terms + coeff * var_domain \included rhs, returns true if can // we always fix rhs to its min value for any value in terms. It is okay to // not be as generic as possible here. -bool RhsCanBeFixedToMin(int64 coeff, const Domain &var_domain, - const Domain &terms, const Domain &rhs) { +bool RhsCanBeFixedToMin(int64 coeff, const Domain& var_domain, + const Domain& terms, const Domain& rhs) { if (var_domain.NumIntervals() != 1) return false; if (std::abs(coeff) != 1) return false; @@ -1381,8 +1388,8 @@ bool RhsCanBeFixedToMin(int64 coeff, const Domain &var_domain, return false; } -bool RhsCanBeFixedToMax(int64 coeff, const Domain &var_domain, - const Domain &terms, const Domain &rhs) { +bool RhsCanBeFixedToMax(int64 coeff, const Domain& var_domain, + const Domain& terms, const Domain& rhs) { if (var_domain.NumIntervals() != 1) return false; if (std::abs(coeff) != 1) return false; @@ -1396,8 +1403,8 @@ bool RhsCanBeFixedToMax(int64 coeff, const Domain &var_domain, } // Remove from to_clear any entry not in current. -void TakeIntersectionWith(const absl::flat_hash_set ¤t, - absl::flat_hash_set *to_clear) { +void TakeIntersectionWith(const absl::flat_hash_set& current, + absl::flat_hash_set* to_clear) { std::vector new_set; for (const int c : *to_clear) { if (current.contains(c)) new_set.push_back(c); @@ -1408,15 +1415,15 @@ void TakeIntersectionWith(const absl::flat_hash_set ¤t, } // namespace -bool CpModelPresolver::PropagateDomainsInLinear(int c, ConstraintProto *ct) { +bool CpModelPresolver::PropagateDomainsInLinear(int c, ConstraintProto* ct) { if (ct->constraint_case() != ConstraintProto::ConstraintCase::kLinear || context_->ModelIsUnsat()) { return false; } // Compute the implied rhs bounds from the variable ones. - auto &term_domains = context_->tmp_term_domains; - auto &left_domains = context_->tmp_left_domains; + auto& term_domains = context_->tmp_term_domains; + auto& left_domains = context_->tmp_left_domains; const int num_vars = ct->linear().vars_size(); term_domains.resize(num_vars + 1); left_domains.resize(num_vars + 1); @@ -1429,7 +1436,7 @@ bool CpModelPresolver::PropagateDomainsInLinear(int c, ConstraintProto *ct) { left_domains[i + 1] = left_domains[i].AdditionWith(term_domains[i]).RelaxIfTooComplex(); } - const Domain &implied_rhs = left_domains[num_vars]; + const Domain& implied_rhs = left_domains[num_vars]; // Abort if trivial. const Domain old_rhs = ReadDomainFromProto(ct->linear()); @@ -1719,18 +1726,17 @@ bool CpModelPresolver::PropagateDomainsInLinear(int c, ConstraintProto *ct) { // true or false. Moves such literal to the constraint enforcement literals // list. // -// This operation is similar to coefficient strengthening in the MIP world. +// We also generalize this to integer variable at one of their bound. // -// TODO(user): For non-binary variable, we should also reduce large coefficient -// by using the same logic (i.e. real coefficient strengthening). +// This operation is similar to coefficient strengthening in the MIP world. void CpModelPresolver::ExtractEnforcementLiteralFromLinearConstraint( - ConstraintProto *ct) { + int ct_index, ConstraintProto* ct) { if (ct->constraint_case() != ConstraintProto::ConstraintCase::kLinear || context_->ModelIsUnsat()) { return; } - const LinearConstraintProto &arg = ct->linear(); + const LinearConstraintProto& arg = ct->linear(); const int num_vars = arg.vars_size(); // No need to process size one constraints, they will be presolved separately. @@ -1751,13 +1757,16 @@ void CpModelPresolver::ExtractEnforcementLiteralFromLinearConstraint( } // We can only extract enforcement literals if the maximum coefficient - // magnitude is greater or equal to max_sum - rhs_domain.Max() or - // rhs_domain.Min() - min_sum. - Domain rhs_domain = ReadDomainFromProto(ct->linear()); - if (max_coeff_magnitude < - std::max(max_sum - rhs_domain.Max(), rhs_domain.Min() - min_sum)) { - return; - } + // magnitude is large enough. Note that we handle complex domain. + // + // TODO(user): Depending on how we split below, the threshold are not the + // same. This is maybe not too important, we just don't split as often as we + // could, but it is still unclear if splitting is good. + const auto& domain = ct->linear().domain(); + const int64 ub_threshold = domain[domain.size() - 2] - min_sum; + const int64 lb_threshold = max_sum - domain[1]; + const Domain rhs_domain = ReadDomainFromProto(ct->linear()); + if (max_coeff_magnitude < std::max(ub_threshold, lb_threshold)) return; // We need the constraint to be only bounded on one side in order to extract // enforcement literal. @@ -1781,7 +1790,7 @@ void CpModelPresolver::ExtractEnforcementLiteralFromLinearConstraint( if (!lower_bounded && !upper_bounded) return; if (lower_bounded && upper_bounded) { context_->UpdateRuleStats("linear: split boxed constraint"); - ConstraintProto *new_ct1 = context_->working_model->add_constraints(); + ConstraintProto* new_ct1 = context_->working_model->add_constraints(); *new_ct1 = *ct; if (!ct->name().empty()) { new_ct1->set_name(absl::StrCat(ct->name(), " (part 1)")); @@ -1789,7 +1798,7 @@ void CpModelPresolver::ExtractEnforcementLiteralFromLinearConstraint( FillDomainInProto(Domain(min_sum, rhs_domain.Max()), new_ct1->mutable_linear()); - ConstraintProto *new_ct2 = context_->working_model->add_constraints(); + ConstraintProto* new_ct2 = context_->working_model->add_constraints(); *new_ct2 = *ct; if (!ct->name().empty()) { new_ct2->set_name(absl::StrCat(ct->name(), " (part 2)")); @@ -1801,76 +1810,75 @@ void CpModelPresolver::ExtractEnforcementLiteralFromLinearConstraint( return (void)RemoveConstraint(ct); } + // Any coefficient greater than this will cause the constraint to be trivially + // satisfied when the variable move away from its bound. Note that as we + // remove coefficient, the threshold do not change! + const int64 threshold = lower_bounded ? ub_threshold : lb_threshold; + + // Do we only extract Booleans? + const bool only_booleans = + !options_.parameters.presolve_extract_integer_enforcement(); + // To avoid a quadratic loop, we will rewrite the linear expression at the // same time as we extract enforcement literals. int new_size = 0; - LinearConstraintProto *mutable_arg = ct->mutable_linear(); + int64 rhs_offset = 0; + bool some_integer_encoding_were_extracted = false; + LinearConstraintProto* mutable_arg = ct->mutable_linear(); for (int i = 0; i < arg.vars_size(); ++i) { - // We currently only process binary variables. - const int ref = arg.vars(i); - if (context_->MinOf(ref) == 0 && context_->MaxOf(ref) == 1) { - const int64 coeff = arg.coeffs(i); - if (!lower_bounded) { - if (max_sum - std::abs(coeff) <= rhs_domain.front().end) { - if (coeff > 0) { - // Fix the variable to 1 in the constraint and add it as enforcement - // literal. - rhs_domain = rhs_domain.AdditionWith(Domain(-coeff)); - ct->add_enforcement_literal(ref); - // 'min_sum' remains unaffected. - max_sum -= coeff; - } else { - // Fix the variable to 0 in the constraint and add its negation as - // enforcement literal. - ct->add_enforcement_literal(NegatedRef(ref)); - // 'max_sum' remains unaffected. - min_sum -= coeff; - } - context_->UpdateRuleStats( - "linear: extracted enforcement literal from constraint"); - continue; - } - } else { - DCHECK(!upper_bounded); - if (min_sum + std::abs(coeff) >= rhs_domain.back().start) { - if (coeff > 0) { - // Fix the variable to 0 in the constraint and add its negation as - // enforcement literal. - ct->add_enforcement_literal(NegatedRef(ref)); - // 'min_sum' remains unaffected. - max_sum -= coeff; - } else { - // Fix the variable to 1 in the constraint and add it as enforcement - // literal. - rhs_domain = rhs_domain.AdditionWith(Domain(-coeff)); - ct->add_enforcement_literal(ref); - // 'max_sum' remains unaffected. - min_sum -= coeff; - } - context_->UpdateRuleStats( - "linear: extracted enforcement literal from constraint"); - continue; - } - } + int ref = arg.vars(i); + int64 coeff = arg.coeffs(i); + if (coeff < 0) { + ref = NegatedRef(ref); + coeff = -coeff; } - // We keep this term. - mutable_arg->set_vars(new_size, mutable_arg->vars(i)); - mutable_arg->set_coeffs(new_size, mutable_arg->coeffs(i)); - ++new_size; - } + const bool is_boolean = context_->CanBeUsedAsLiteral(ref); + if (context_->IsFixed(ref) || coeff < threshold || + (only_booleans && !is_boolean)) { + // We keep this term. + mutable_arg->set_vars(new_size, mutable_arg->vars(i)); + mutable_arg->set_coeffs(new_size, mutable_arg->coeffs(i)); + ++new_size; + continue; + } + if (is_boolean) { + context_->UpdateRuleStats("linear: extracted enforcement literal"); + } else { + some_integer_encoding_were_extracted = true; + context_->UpdateRuleStats( + "linear: extracted integer enforcement literal"); + } + if (lower_bounded) { + ct->add_enforcement_literal(is_boolean + ? NegatedRef(ref) + : context_->GetOrCreateVarValueEncoding( + ref, context_->MinOf(ref))); + rhs_offset -= coeff * context_->MinOf(ref); + } else { + ct->add_enforcement_literal(is_boolean + ? ref + : context_->GetOrCreateVarValueEncoding( + ref, context_->MaxOf(ref))); + rhs_offset -= coeff * context_->MaxOf(ref); + } + } mutable_arg->mutable_vars()->Truncate(new_size); mutable_arg->mutable_coeffs()->Truncate(new_size); - FillDomainInProto(rhs_domain, mutable_arg); + FillDomainInProto(rhs_domain.AdditionWith(Domain(rhs_offset)), mutable_arg); + if (some_integer_encoding_were_extracted) { + context_->UpdateNewConstraintsVariableUsage(); + context_->UpdateConstraintVariableUsage(ct_index); + } } -void CpModelPresolver::ExtractAtMostOneFromLinear(ConstraintProto *ct) { +void CpModelPresolver::ExtractAtMostOneFromLinear(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return; if (HasEnforcementLiteral(*ct)) return; const Domain rhs = ReadDomainFromProto(ct->linear()); - const LinearConstraintProto &arg = ct->linear(); + const LinearConstraintProto& arg = ct->linear(); const int num_vars = arg.vars_size(); int64 min_sum = 0; int64 max_sum = 0; @@ -1908,7 +1916,7 @@ void CpModelPresolver::ExtractAtMostOneFromLinear(ConstraintProto *ct) { } else { context_->UpdateRuleStats("linear: extracted at most one (min)."); } - ConstraintProto *new_ct = context_->working_model->add_constraints(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); new_ct->set_name(ct->name()); for (const int ref : at_most_one) { new_ct->mutable_at_most_one()->add_literals(ref); @@ -1920,13 +1928,13 @@ void CpModelPresolver::ExtractAtMostOneFromLinear(ConstraintProto *ct) { // Convert some linear constraint involving only Booleans to their Boolean // form. -bool CpModelPresolver::PresolveLinearOnBooleans(ConstraintProto *ct) { +bool CpModelPresolver::PresolveLinearOnBooleans(ConstraintProto* ct) { if (ct->constraint_case() != ConstraintProto::ConstraintCase::kLinear || context_->ModelIsUnsat()) { return false; } - const LinearConstraintProto &arg = ct->linear(); + const LinearConstraintProto& arg = ct->linear(); const int num_vars = arg.vars_size(); int64 min_coeff = kint64max; int64 max_coeff = 0; @@ -2052,8 +2060,8 @@ bool CpModelPresolver::PresolveLinearOnBooleans(ConstraintProto *ct) { min_sum + 2 * min_coeff > rhs_domain.Max() && min_sum + max_coeff <= rhs_domain.Max()) { context_->UpdateRuleStats("linear: positive equal one"); - ConstraintProto *at_least_one = context_->working_model->add_constraints(); - ConstraintProto *at_most_one = context_->working_model->add_constraints(); + ConstraintProto* at_least_one = context_->working_model->add_constraints(); + ConstraintProto* at_most_one = context_->working_model->add_constraints(); at_least_one->set_name(ct->name()); at_most_one->set_name(ct->name()); for (int i = 0; i < num_vars; ++i) { @@ -2070,8 +2078,8 @@ bool CpModelPresolver::PresolveLinearOnBooleans(ConstraintProto *ct) { max_sum - 2 * min_coeff < rhs_domain.Min() && max_sum - max_coeff >= rhs_domain.Min()) { context_->UpdateRuleStats("linear: negative equal one"); - ConstraintProto *at_least_one = context_->working_model->add_constraints(); - ConstraintProto *at_most_one = context_->working_model->add_constraints(); + ConstraintProto* at_least_one = context_->working_model->add_constraints(); + ConstraintProto* at_most_one = context_->working_model->add_constraints(); at_least_one->set_name(ct->name()); at_most_one->set_name(ct->name()); for (int i = 0; i < num_vars; ++i) { @@ -2102,8 +2110,8 @@ bool CpModelPresolver::PresolveLinearOnBooleans(ConstraintProto *ct) { if (rhs_domain.Contains(value)) continue; // Add a new clause to exclude this bad assignment. - ConstraintProto *new_ct = context_->working_model->add_constraints(); - auto *new_arg = new_ct->mutable_bool_or(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); + auto* new_arg = new_ct->mutable_bool_or(); if (HasEnforcementLiteral(*ct)) { *new_ct->mutable_enforcement_literal() = ct->enforcement_literal(); } @@ -2117,7 +2125,7 @@ bool CpModelPresolver::PresolveLinearOnBooleans(ConstraintProto *ct) { return RemoveConstraint(ct); } -bool CpModelPresolver::PresolveInterval(int c, ConstraintProto *ct) { +bool CpModelPresolver::PresolveInterval(int c, ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; const int start = ct->interval().start(); @@ -2153,7 +2161,7 @@ bool CpModelPresolver::PresolveInterval(int c, ConstraintProto *ct) { if (context_->IntervalUsage(c) == 0) { // Convert to linear. - ConstraintProto *new_ct = context_->working_model->add_constraints(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); *(new_ct->mutable_enforcement_literal()) = ct->enforcement_literal(); new_ct->mutable_linear()->add_domain(0); new_ct->mutable_linear()->add_domain(0); @@ -2184,7 +2192,7 @@ bool CpModelPresolver::PresolveInterval(int c, ConstraintProto *ct) { return false; } -bool CpModelPresolver::PresolveElement(ConstraintProto *ct) { +bool CpModelPresolver::PresolveElement(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; const int index_ref = ct->element().index(); @@ -2209,14 +2217,14 @@ bool CpModelPresolver::PresolveElement(ConstraintProto *ct) { // Filter possible index values. Accumulate variable domains to build // a possible target domain. Domain infered_domain; - const Domain &initial_index_domain = context_->DomainOf(index_ref); - const Domain &target_domain = context_->DomainOf(target_ref); + const Domain& initial_index_domain = context_->DomainOf(index_ref); + const Domain& target_domain = context_->DomainOf(target_ref); for (const ClosedInterval interval : initial_index_domain) { for (int value = interval.start; value <= interval.end; ++value) { CHECK_GE(value, 0); CHECK_LT(value, ct->element().vars_size()); const int ref = ct->element().vars(value); - const Domain &domain = context_->DomainOf(ref); + const Domain& domain = context_->DomainOf(ref); if (domain.IntersectionWith(target_domain).IsEmpty()) { bool domain_modified = false; if (!context_->IntersectDomainWith( @@ -2255,7 +2263,7 @@ bool CpModelPresolver::PresolveElement(ConstraintProto *ct) { if (context_->IsFixed(index_ref)) { const int var = ct->element().vars(context_->MinOf(index_ref)); if (var != target_ref) { - LinearConstraintProto *const lin = + LinearConstraintProto* const lin = context_->working_model->add_constraints()->mutable_linear(); lin->add_vars(var); lin->add_coeffs(-1); @@ -2285,7 +2293,7 @@ bool CpModelPresolver::PresolveElement(ConstraintProto *ct) { const int64 v0 = context_->MinOf(ct->element().vars(0)); const int64 v1 = context_->MinOf(ct->element().vars(1)); - LinearConstraintProto *const lin = + LinearConstraintProto* const lin = context_->working_model->add_constraints()->mutable_linear(); lin->add_vars(target_ref); lin->add_coeffs(1); @@ -2318,7 +2326,7 @@ bool CpModelPresolver::PresolveElement(ConstraintProto *ct) { r_index.offset + r_max * r_index.coeff >= 0 && r_index.offset + r_max * r_index.coeff < array_size) { // This will happen eventually when domains are synchronized. - ElementConstraintProto *const element = + ElementConstraintProto* const element = context_->working_model->add_constraints()->mutable_element(); for (int64 v = 0; v <= r_max; ++v) { const int64 scaled_index = v * r_index.coeff + r_index.offset; @@ -2364,8 +2372,8 @@ bool CpModelPresolver::PresolveElement(ConstraintProto *ct) { if (target_ref == index_ref) { // Filter impossible index values. std::vector possible_indices; - const Domain &index_domain = context_->DomainOf(index_ref); - for (const ClosedInterval &interval : index_domain) { + const Domain& index_domain = context_->DomainOf(index_ref); + for (const ClosedInterval& interval : index_domain) { for (int64 value = interval.start; value <= interval.end; ++value) { const int ref = ct->element().vars(value); if (context_->DomainContains(ref, value)) { @@ -2395,7 +2403,7 @@ bool CpModelPresolver::PresolveElement(ConstraintProto *ct) { return false; } -bool CpModelPresolver::PresolveTable(ConstraintProto *ct) { +bool CpModelPresolver::PresolveTable(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (HasEnforcementLiteral(*ct)) return false; if (ct->table().vars().empty()) { @@ -2409,9 +2417,9 @@ bool CpModelPresolver::PresolveTable(ConstraintProto *ct) { const int num_vars = ct->table().vars_size(); const int num_tuples = ct->table().values_size() / num_vars; std::vector tuple(num_vars); - std::vector > new_tuples; + std::vector> new_tuples; new_tuples.reserve(num_tuples); - std::vector > new_domains(num_vars); + std::vector> new_domains(num_vars); std::vector affine_relations; absl::flat_hash_set visited; @@ -2439,7 +2447,7 @@ bool CpModelPresolver::PresolveTable(ConstraintProto *ct) { for (int j = 0; j < num_vars; ++j) { const int ref = ct->table().vars(j); int64 v = ct->table().values(i * num_vars + j); - const AffineRelation::Relation &r = affine_relations[j]; + const AffineRelation::Relation& r = affine_relations[j]; if (r.representative != ref) { const int64 inverse_value = (v - r.offset) / r.coeff; if (inverse_value * r.coeff + r.offset != v) { @@ -2467,7 +2475,7 @@ bool CpModelPresolver::PresolveTable(ConstraintProto *ct) { // Update the list of tuples if needed. if (new_tuples.size() < num_tuples || modified_variables) { ct->mutable_table()->clear_values(); - for (const std::vector &t : new_tuples) { + for (const std::vector& t : new_tuples) { for (const int64 v : t) { ct->mutable_table()->add_values(v); } @@ -2479,7 +2487,7 @@ bool CpModelPresolver::PresolveTable(ConstraintProto *ct) { if (modified_variables) { for (int j = 0; j < num_vars; ++j) { - const AffineRelation::Relation &r = affine_relations[j]; + const AffineRelation::Relation& r = affine_relations[j]; if (r.representative != ct->table().vars(j)) { ct->mutable_table()->set_vars(j, r.representative); } @@ -2525,11 +2533,11 @@ bool CpModelPresolver::PresolveTable(ConstraintProto *ct) { // it could. if (new_tuples.size() > 0.7 * prod) { // Enumerate all tuples. - std::vector > var_to_values(num_vars); + std::vector> var_to_values(num_vars); for (int j = 0; j < num_vars; ++j) { var_to_values[j].assign(new_domains[j].begin(), new_domains[j].end()); } - std::vector > all_tuples(prod); + std::vector> all_tuples(prod); for (int i = 0; i < prod; ++i) { all_tuples[i].resize(num_vars); int index = i; @@ -2541,14 +2549,14 @@ bool CpModelPresolver::PresolveTable(ConstraintProto *ct) { gtl::STLSortAndRemoveDuplicates(&all_tuples); // Compute the complement of new_tuples. - std::vector > diff(prod - new_tuples.size()); + std::vector> diff(prod - new_tuples.size()); std::set_difference(all_tuples.begin(), all_tuples.end(), new_tuples.begin(), new_tuples.end(), diff.begin()); // Negate the constraint. ct->mutable_table()->set_negated(!ct->table().negated()); ct->mutable_table()->clear_values(); - for (const std::vector &t : diff) { + for (const std::vector& t : diff) { for (const int64 v : t) ct->mutable_table()->add_values(v); } context_->UpdateRuleStats("table: negated"); @@ -2556,11 +2564,11 @@ bool CpModelPresolver::PresolveTable(ConstraintProto *ct) { return modified_variables; } -bool CpModelPresolver::PresolveAllDiff(ConstraintProto *ct) { +bool CpModelPresolver::PresolveAllDiff(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (HasEnforcementLiteral(*ct)) return false; - AllDifferentConstraintProto &all_diff = *ct->mutable_all_diff(); + AllDifferentConstraintProto& all_diff = *ct->mutable_all_diff(); bool constraint_has_changed = false; for (;;) { @@ -2626,16 +2634,16 @@ bool CpModelPresolver::PresolveAllDiff(ConstraintProto *ct) { domain = domain.UnionWith(context_->DomainOf(all_diff.vars(i))); } if (all_diff.vars_size() == domain.Size()) { - absl::flat_hash_map > value_to_refs; + absl::flat_hash_map> value_to_refs; for (const int ref : all_diff.vars()) { - for (const ClosedInterval &interval : context_->DomainOf(ref)) { + for (const ClosedInterval& interval : context_->DomainOf(ref)) { for (int64 v = interval.start; v <= interval.end; ++v) { value_to_refs[v].push_back(ref); } } } bool propagated = false; - for (const auto &it : value_to_refs) { + for (const auto& it : value_to_refs) { if (it.second.size() == 1 && context_->DomainOf(it.second.front()).Size() > 1) { const int ref = it.second.front(); @@ -2661,7 +2669,7 @@ namespace { // Returns the sorted list of literals for given bool_or or at_most_one // constraint. -std::vector GetLiteralsFromSetPPCConstraint(ConstraintProto *ct) { +std::vector GetLiteralsFromSetPPCConstraint(ConstraintProto* ct) { std::vector sorted_literals; if (ct->constraint_case() == ConstraintProto::ConstraintCase::kAtMostOne) { for (const int literal : ct->at_most_one().literals()) { @@ -2679,8 +2687,8 @@ std::vector GetLiteralsFromSetPPCConstraint(ConstraintProto *ct) { // Add the constraint (lhs => rhs) to the given proto. The hash map lhs -> // bool_and constraint index is used to merge implications with the same lhs. -void AddImplication(int lhs, int rhs, CpModelProto *proto, - absl::flat_hash_map *ref_to_bool_and) { +void AddImplication(int lhs, int rhs, CpModelProto* proto, + absl::flat_hash_map* ref_to_bool_and) { if (ref_to_bool_and->contains(lhs)) { const int ct_index = (*ref_to_bool_and)[lhs]; proto->mutable_constraints(ct_index)->mutable_bool_and()->add_literals(rhs); @@ -2690,15 +2698,15 @@ void AddImplication(int lhs, int rhs, CpModelProto *proto, NegatedRef(lhs)); } else { (*ref_to_bool_and)[lhs] = proto->constraints_size(); - ConstraintProto *ct = proto->add_constraints(); + ConstraintProto* ct = proto->add_constraints(); ct->add_enforcement_literal(lhs); ct->mutable_bool_and()->add_literals(rhs); } } template -void ExtractClauses(bool use_bool_and, const ClauseContainer &container, - CpModelProto *proto) { +void ExtractClauses(bool use_bool_and, const ClauseContainer& container, + CpModelProto* proto) { // We regroup the "implication" into bool_and to have a more consise proto and // also for nicer information about the number of binary clauses. // @@ -2707,7 +2715,7 @@ void ExtractClauses(bool use_bool_and, const ClauseContainer &container, // how we perform the postsolve. absl::flat_hash_map ref_to_bool_and; for (int i = 0; i < container.NumClauses(); ++i) { - const std::vector &clause = container.Clause(i); + const std::vector& clause = container.Clause(i); if (clause.empty()) continue; // bool_and. @@ -2723,7 +2731,7 @@ void ExtractClauses(bool use_bool_and, const ClauseContainer &container, } // bool_or. - ConstraintProto *ct = proto->add_constraints(); + ConstraintProto* ct = proto->add_constraints(); for (const Literal l : clause) { if (l.IsPositive()) { ct->mutable_bool_or()->add_literals(l.Variable().value()); @@ -2736,10 +2744,10 @@ void ExtractClauses(bool use_bool_and, const ClauseContainer &container, } // namespace -bool CpModelPresolver::PresolveNoOverlap(ConstraintProto *ct) { +bool CpModelPresolver::PresolveNoOverlap(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; - const NoOverlapConstraintProto &proto = ct->no_overlap(); + const NoOverlapConstraintProto& proto = ct->no_overlap(); // Filter absent intervals. int new_size = 0; @@ -2774,7 +2782,7 @@ bool CpModelPresolver::PresolveNoOverlap(ConstraintProto *ct) { new_size = 0; for (int i = 0; i < proto.intervals_size(); ++i) { const int interval_index = proto.intervals(i); - const IntervalConstraintProto &interval = + const IntervalConstraintProto& interval = context_->working_model->constraints(interval_index).interval(); const int64 end_max_of_previous_intervals = end_max_so_far; end_max_so_far = std::max(end_max_so_far, context_->MaxOf(interval.end())); @@ -2803,10 +2811,10 @@ bool CpModelPresolver::PresolveNoOverlap(ConstraintProto *ct) { return false; } -bool CpModelPresolver::PresolveCumulative(ConstraintProto *ct) { +bool CpModelPresolver::PresolveCumulative(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; - const CumulativeConstraintProto &proto = ct->cumulative(); + const CumulativeConstraintProto& proto = ct->cumulative(); // Filter absent intervals. int new_size = 0; @@ -2858,10 +2866,10 @@ bool CpModelPresolver::PresolveCumulative(ConstraintProto *ct) { bool has_optional_interval = false; for (int i = 0; i < size; ++i) { // TODO(user): adapt in the presence of optional intervals. - const ConstraintProto &ct = + const ConstraintProto& ct = context_->working_model->constraints(proto.intervals(i)); if (!ct.enforcement_literal().empty()) has_optional_interval = true; - const IntervalConstraintProto &interval = ct.interval(); + const IntervalConstraintProto& interval = ct.interval(); start_indices[i] = interval.start(); const int duration_ref = interval.size(); const int demand_ref = proto.demands(i); @@ -2909,8 +2917,8 @@ bool CpModelPresolver::PresolveCumulative(ConstraintProto *ct) { if (num_greater_half_capacity == size) { if (num_duration_one == size && !has_optional_interval) { context_->UpdateRuleStats("cumulative: convert to all_different"); - ConstraintProto *new_ct = context_->working_model->add_constraints(); - auto *arg = new_ct->mutable_all_diff(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); + auto* arg = new_ct->mutable_all_diff(); for (const int var : start_indices) { arg->add_vars(var); } @@ -2918,8 +2926,8 @@ bool CpModelPresolver::PresolveCumulative(ConstraintProto *ct) { return RemoveConstraint(ct); } else { context_->UpdateRuleStats("cumulative: convert to no_overlap"); - ConstraintProto *new_ct = context_->working_model->add_constraints(); - auto *arg = new_ct->mutable_no_overlap(); + ConstraintProto* new_ct = context_->working_model->add_constraints(); + auto* arg = new_ct->mutable_no_overlap(); for (const int interval : proto.intervals()) { arg->add_intervals(interval); } @@ -2931,10 +2939,10 @@ bool CpModelPresolver::PresolveCumulative(ConstraintProto *ct) { return changed; } -bool CpModelPresolver::PresolveRoutes(ConstraintProto *ct) { +bool CpModelPresolver::PresolveRoutes(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (HasEnforcementLiteral(*ct)) return false; - RoutesConstraintProto &proto = *ct->mutable_routes(); + RoutesConstraintProto& proto = *ct->mutable_routes(); int new_size = 0; const int num_arcs = proto.literals_size(); @@ -2960,15 +2968,15 @@ bool CpModelPresolver::PresolveRoutes(ConstraintProto *ct) { return false; } -bool CpModelPresolver::PresolveCircuit(ConstraintProto *ct) { +bool CpModelPresolver::PresolveCircuit(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (HasEnforcementLiteral(*ct)) return false; - CircuitConstraintProto &proto = *ct->mutable_circuit(); + CircuitConstraintProto& proto = *ct->mutable_circuit(); // Convert the flat structure to a graph, note that we includes all the arcs // here (even if they are at false). - std::vector > incoming_arcs; - std::vector > outgoing_arcs; + std::vector> incoming_arcs; + std::vector> outgoing_arcs; int num_nodes = 0; const int num_arcs = proto.literals_size(); for (int i = 0; i < num_arcs; ++i) { @@ -2993,8 +3001,8 @@ bool CpModelPresolver::PresolveCircuit(ConstraintProto *ct) { int num_fixed_at_true = 0; while (loop_again) { loop_again = false; - for (const auto *node_to_refs : {&incoming_arcs, &outgoing_arcs}) { - for (const std::vector &refs : *node_to_refs) { + for (const auto* node_to_refs : {&incoming_arcs, &outgoing_arcs}) { + for (const std::vector& refs : *node_to_refs) { if (refs.size() == 1) { if (!context_->LiteralIsTrue(refs.front())) { ++num_fixed_at_true; @@ -3110,7 +3118,7 @@ bool CpModelPresolver::PresolveCircuit(ConstraintProto *ct) { // Look for in/out-degree of two, this will imply that one of the indicator // Boolean is equal to the negation of the other. for (int i = 0; i < num_nodes; ++i) { - for (const std::vector *arc_literals : + for (const std::vector* arc_literals : {&incoming_arcs[i], &outgoing_arcs[i]}) { std::vector literals; for (const int ref : *arc_literals) { @@ -3140,10 +3148,10 @@ bool CpModelPresolver::PresolveCircuit(ConstraintProto *ct) { return false; } -bool CpModelPresolver::PresolveAutomaton(ConstraintProto *ct) { +bool CpModelPresolver::PresolveAutomaton(ConstraintProto* ct) { if (context_->ModelIsUnsat()) return false; if (HasEnforcementLiteral(*ct)) return false; - AutomatonConstraintProto &proto = *ct->mutable_automaton(); + AutomatonConstraintProto& proto = *ct->mutable_automaton(); if (proto.vars_size() == 0 || proto.transition_label_size() == 0) { return false; } @@ -3222,7 +3230,7 @@ bool CpModelPresolver::PresolveAutomaton(ConstraintProto *ct) { const std::vector vars = {proto.vars().begin(), proto.vars().end()}; // Compute the set of reachable state at each time point. - std::vector > reachable_states(n + 1); + std::vector> reachable_states(n + 1); reachable_states[0].insert(proto.starting_state()); reachable_states[n] = {proto.final_states().begin(), proto.final_states().end()}; @@ -3242,7 +3250,7 @@ bool CpModelPresolver::PresolveAutomaton(ConstraintProto *ct) { } } - std::vector > reached_values(n); + std::vector> reached_values(n); // Backward. for (int time = n - 1; time >= 0; --time) { @@ -3285,7 +3293,7 @@ void CpModelPresolver::ExtractBoolAnd() { const int num_constraints = context_->working_model->constraints_size(); std::vector to_remove; for (int c = 0; c < num_constraints; ++c) { - const ConstraintProto &ct = context_->working_model->constraints(c); + const ConstraintProto& ct = context_->working_model->constraints(c); if (HasEnforcementLiteral(ct)) continue; if (ct.constraint_case() == ConstraintProto::ConstraintCase::kBoolOr && @@ -3309,7 +3317,7 @@ void CpModelPresolver::ExtractBoolAnd() { context_->UpdateNewConstraintsVariableUsage(); for (const int c : to_remove) { - ConstraintProto *ct = context_->working_model->mutable_constraints(c); + ConstraintProto* ct = context_->working_model->mutable_constraints(c); CHECK(RemoveConstraint(ct)); context_->UpdateConstraintVariableUsage(c); } @@ -3323,7 +3331,7 @@ void CpModelPresolver::Probe() { FillDomainInProto(context_->DomainOf(i), context_->working_model->mutable_variables(i)); } - const CpModelProto &model_proto = *(context_->working_model); + const CpModelProto& model_proto = *(context_->working_model); // Load the constraints in a local model. // @@ -3335,19 +3343,19 @@ void CpModelPresolver::Probe() { Model model; // Adapt some of the parameters during this probing phase. - auto *local_param = model.GetOrCreate(); + auto* local_param = model.GetOrCreate(); *local_param = options_.parameters; local_param->set_use_implied_bounds(false); model.GetOrCreate()->MergeWithGlobalTimeLimit(options_.time_limit); - auto *encoder = model.GetOrCreate(); + auto* encoder = model.GetOrCreate(); encoder->DisableImplicationBetweenLiteral(); - auto *mapping = model.GetOrCreate(); + auto* mapping = model.GetOrCreate(); mapping->CreateVariables(model_proto, false, &model); mapping->DetectOptionalVariables(model_proto, &model); mapping->ExtractEncoding(model_proto, &model); - auto *sat_solver = model.GetOrCreate(); - for (const ConstraintProto &ct : model_proto.constraints()) { + auto* sat_solver = model.GetOrCreate(); + for (const ConstraintProto& ct : model_proto.constraints()) { if (mapping->ConstraintIsAlreadyLoaded(&ct)) continue; CHECK(LoadConstraint(ct, &model)); if (sat_solver->IsModelUnsat()) { @@ -3363,7 +3371,7 @@ void CpModelPresolver::Probe() { // // TODO(user): Compute the transitive reduction instead of just the // equivalences, and use the newly learned binary clauses? - auto *implication_graph = model.GetOrCreate(); + auto* implication_graph = model.GetOrCreate(); ProbeBooleanVariables(/*deterministic_time_limit=*/1.0, &model); if (options_.time_limit != nullptr) { options_.time_limit->AdvanceDeterministicTime( @@ -3385,7 +3393,7 @@ void CpModelPresolver::Probe() { } const int num_variables = context_->working_model->variables().size(); - auto *integer_trail = model.GetOrCreate(); + auto* integer_trail = model.GetOrCreate(); for (int var = 0; var < num_variables; ++var) { // Restrict IntegerVariable domain. // Note that Boolean are already dealt with above. @@ -3456,7 +3464,7 @@ void CpModelPresolver::PresolvePureSatPart() { // TODO(user): Be a bit more efficient, and enforce this invariant before we // reach this point? for (int c = 0; c < context_->working_model->constraints_size(); ++c) { - const ConstraintProto &ct = context_->working_model->constraints(c); + const ConstraintProto& ct = context_->working_model->constraints(c); if (ct.constraint_case() == ConstraintProto::ConstraintCase::kBoolOr || ct.constraint_case() == ConstraintProto::ConstraintCase::kBoolAnd) { if (PresolveOneConstraint(c)) { @@ -3480,7 +3488,7 @@ void CpModelPresolver::PresolvePureSatPart() { std::vector clause; int num_removed_constraints = 0; for (int i = 0; i < context_->working_model->constraints_size(); ++i) { - const ConstraintProto &ct = context_->working_model->constraints(i); + const ConstraintProto& ct = context_->working_model->constraints(i); if (ct.constraint_case() == ConstraintProto::ConstraintCase::kBoolOr) { ++num_removed_constraints; @@ -3564,7 +3572,7 @@ void CpModelPresolver::PresolvePureSatPart() { VLOG(1) << "New variables added by the SAT presolver."; for (int i = context_->working_model->variables_size(); i < new_num_variables; ++i) { - IntegerVariableProto *var_proto = + IntegerVariableProto* var_proto = context_->working_model->add_variables(); var_proto->add_domain(0); var_proto->add_domain(1); @@ -3619,7 +3627,7 @@ void CpModelPresolver::ExpandObjective() { absl::flat_hash_set relevant_constraints; std::vector var_to_num_relevant_constraints(num_variables, 0); for (int ct_index = 0; ct_index < num_constraints; ++ct_index) { - const ConstraintProto &ct = context_->working_model->constraints(ct_index); + const ConstraintProto& ct = context_->working_model->constraints(ct_index); // Skip everything that is not a linear equality constraint. if (!ct.enforcement_literal().empty() || ct.constraint_case() != ConstraintProto::ConstraintCase::kLinear || @@ -3676,7 +3684,7 @@ void CpModelPresolver::ExpandObjective() { int expanded_linear_index = -1; int64 objective_coeff_in_expanded_constraint; int64 size_of_expanded_constraint = 0; - const auto &non_deterministic_list = + const auto& non_deterministic_list = context_->VarToConstraints(objective_var); std::vector constraints_with_objective(non_deterministic_list.begin(), non_deterministic_list.end()); @@ -3684,7 +3692,7 @@ void CpModelPresolver::ExpandObjective() { constraints_with_objective.end()); for (const int ct_index : constraints_with_objective) { if (relevant_constraints.count(ct_index) == 0) continue; - const ConstraintProto &ct = + const ConstraintProto& ct = context_->working_model->constraints(ct_index); // This constraint is relevant now, but it will never be later because @@ -3739,7 +3747,7 @@ void CpModelPresolver::ExpandObjective() { // Update the objective map. Note that the division is possible because // currently we only expand with coeff with a magnitude of 1. CHECK_EQ(std::abs(objective_coeff_in_expanded_constraint), 1); - const ConstraintProto &ct = + const ConstraintProto& ct = context_->working_model->constraints(expanded_linear_index); context_->SubstituteVariableInObjective( objective_var, objective_coeff_in_expanded_constraint, ct, @@ -3795,7 +3803,7 @@ void CpModelPresolver::ExpandObjective() { unique_expanded_constraint != -1) { context_->UpdateRuleStats( "objective: removed unique objective constraint."); - ConstraintProto *mutable_ct = context_->working_model->mutable_constraints( + ConstraintProto* mutable_ct = context_->working_model->mutable_constraints( unique_expanded_constraint); *(context_->mapping_model->add_constraints()) = *mutable_ct; mutable_ct->Clear(); @@ -3819,9 +3827,9 @@ void CpModelPresolver::MergeNoOverlapConstraints() { // Extract the no-overlap constraints. std::vector disjunctive_index; - std::vector > cliques; + std::vector> cliques; for (int c = 0; c < num_constraints; ++c) { - const ConstraintProto &ct = context_->working_model->constraints(c); + const ConstraintProto& ct = context_->working_model->constraints(c); if (ct.constraint_case() != ConstraintProto::ConstraintCase::kNoOverlap) { continue; } @@ -3840,9 +3848,9 @@ void CpModelPresolver::MergeNoOverlapConstraints() { // We reuse the max-clique code from sat. Model local_model; local_model.GetOrCreate()->Resize(num_constraints); - auto *graph = local_model.GetOrCreate(); + auto* graph = local_model.GetOrCreate(); graph->Resize(num_constraints); - for (const std::vector &clique : cliques) { + for (const std::vector& clique : cliques) { // All variables at false is always a valid solution of the local model, // so this should never return UNSAT. CHECK(graph->AddAtMostOne(clique)); @@ -3856,7 +3864,7 @@ void CpModelPresolver::MergeNoOverlapConstraints() { int new_num_intervals = 0; for (int i = 0; i < cliques.size(); ++i) { const int ct_index = disjunctive_index[i]; - ConstraintProto *ct = + ConstraintProto* ct = context_->working_model->mutable_constraints(ct_index); ct->Clear(); if (cliques[i].empty()) continue; @@ -3887,10 +3895,10 @@ void CpModelPresolver::TransformIntoMaxCliques() { const int num_constraints = context_->working_model->constraints_size(); // Extract the bool_and and at_most_one constraints. - std::vector > cliques; + std::vector> cliques; for (int c = 0; c < num_constraints; ++c) { - ConstraintProto *ct = context_->working_model->mutable_constraints(c); + ConstraintProto* ct = context_->working_model->mutable_constraints(c); if (ct->constraint_case() == ConstraintProto::ConstraintCase::kAtMostOne) { std::vector clique; for (const int ref : ct->at_most_one().literals()) { @@ -3919,9 +3927,9 @@ void CpModelPresolver::TransformIntoMaxCliques() { Model local_model; const int num_variables = context_->working_model->variables().size(); local_model.GetOrCreate()->Resize(num_variables); - auto *graph = local_model.GetOrCreate(); + auto* graph = local_model.GetOrCreate(); graph->Resize(num_variables); - for (const std::vector &clique : cliques) { + for (const std::vector& clique : cliques) { if (!graph->AddAtMostOne(clique)) { return (void)context_->NotifyThatModelIsUnsat(); } @@ -3946,10 +3954,10 @@ void CpModelPresolver::TransformIntoMaxCliques() { } int num_new_cliques = 0; - for (const std::vector &clique : cliques) { + for (const std::vector& clique : cliques) { if (clique.empty()) continue; num_new_cliques++; - ConstraintProto *ct = context_->working_model->add_constraints(); + ConstraintProto* ct = context_->working_model->add_constraints(); for (const Literal literal : clique) { if (literal.IsPositive()) { ct->mutable_at_most_one()->add_literals(literal.Variable().value()); @@ -3972,7 +3980,7 @@ void CpModelPresolver::TransformIntoMaxCliques() { bool CpModelPresolver::PresolveOneConstraint(int c) { if (context_->ModelIsUnsat()) return false; - ConstraintProto *ct = context_->working_model->mutable_constraints(c); + ConstraintProto* ct = context_->working_model->mutable_constraints(c); // Generic presolve to exploit variable/literal equivalence. if (ExploitEquivalenceRelations(c, ct)) { @@ -4039,7 +4047,7 @@ bool CpModelPresolver::PresolveOneConstraint(int c) { } if (ct->constraint_case() == ConstraintProto::ConstraintCase::kLinear) { const int old_num_enforcement_literals = ct->enforcement_literal_size(); - ExtractEnforcementLiteralFromLinearConstraint(ct); + ExtractEnforcementLiteralFromLinearConstraint(c, ct); if (ct->constraint_case() == ConstraintProto::ConstraintCase::CONSTRAINT_NOT_SET) { context_->UpdateConstraintVariableUsage(c); @@ -4076,15 +4084,15 @@ bool CpModelPresolver::PresolveOneConstraint(int c) { } bool CpModelPresolver::ProcessSetPPCSubset( - int c1, int c2, const std::vector &c2_minus_c1, - const std::vector &original_constraint_index, - std::vector *marked_for_removal) { + int c1, int c2, const std::vector& c2_minus_c1, + const std::vector& original_constraint_index, + std::vector* marked_for_removal) { if (context_->ModelIsUnsat()) return false; CHECK(!(*marked_for_removal)[c1]); CHECK(!(*marked_for_removal)[c2]); - ConstraintProto *ct1 = context_->working_model->mutable_constraints( + ConstraintProto* ct1 = context_->working_model->mutable_constraints( original_constraint_index[c1]); - ConstraintProto *ct2 = context_->working_model->mutable_constraints( + ConstraintProto* ct2 = context_->working_model->mutable_constraints( original_constraint_index[c2]); if (ct1->constraint_case() == ConstraintProto::ConstraintCase::kBoolOr && ct2->constraint_case() == ConstraintProto::ConstraintCase::kAtMostOne) { @@ -4123,11 +4131,11 @@ bool CpModelPresolver::ProcessSetPPC() { // Graph of constraints to literals. constraint_literals[c] contains all the // literals in constraint indexed by 'c' in sorted order. - std::vector > constraint_literals; + std::vector> constraint_literals; // Graph of literals to constraints. literals_to_constraints[l] contains the // vector of constraint indices in which literal 'l' or 'neg(l)' appears. - std::vector > literals_to_constraints; + std::vector> literals_to_constraints; // vector of booleans indicating if the constraint is marked for removal. Note // that we don't remove constraints while processing them but remove all the @@ -4142,7 +4150,7 @@ bool CpModelPresolver::ProcessSetPPC() { // initialize other containers defined above. int num_setppc_constraints = 0; for (int c = 0; c < num_constraints; ++c) { - ConstraintProto *ct = context_->working_model->mutable_constraints(c); + ConstraintProto* ct = context_->working_model->mutable_constraints(c); if (ct->constraint_case() == ConstraintProto::ConstraintCase::kBoolOr || ct->constraint_case() == ConstraintProto::ConstraintCase::kAtMostOne) { // Because TransformIntoMaxCliques() can detect literal equivalence @@ -4177,8 +4185,8 @@ bool CpModelPresolver::ProcessSetPPC() { VLOG(1) << "#setppc constraints: " << num_setppc_constraints; // Set of constraint pairs which are already compared. - absl::flat_hash_set > compared_constraints; - for (const std::vector &literal_to_constraints : + absl::flat_hash_set> compared_constraints; + for (const std::vector& literal_to_constraints : literals_to_constraints) { for (int index1 = 0; index1 < literal_to_constraints.size(); ++index1) { if (options_.time_limit != nullptr && @@ -4187,8 +4195,8 @@ bool CpModelPresolver::ProcessSetPPC() { } const int c1 = literal_to_constraints[index1]; if (marked_for_removal[c1]) continue; - const std::vector &c1_literals = constraint_literals[c1]; - ConstraintProto *ct1 = context_->working_model->mutable_constraints( + const std::vector& c1_literals = constraint_literals[c1]; + ConstraintProto* ct1 = context_->working_model->mutable_constraints( original_constraint_index[c1]); for (int index2 = index1 + 1; index2 < literal_to_constraints.size(); ++index2) { @@ -4217,8 +4225,8 @@ bool CpModelPresolver::ProcessSetPPC() { } // Check if literals in c1 is subset of literals in c2 or vice versa. - const std::vector &c2_literals = constraint_literals[c2]; - ConstraintProto *ct2 = context_->working_model->mutable_constraints( + const std::vector& c2_literals = constraint_literals[c2]; + ConstraintProto* ct2 = context_->working_model->mutable_constraints( original_constraint_index[c2]); // TODO(user): Try avoiding computation of set differences if // possible. @@ -4256,7 +4264,7 @@ bool CpModelPresolver::ProcessSetPPC() { } for (int c = 0; c < num_setppc_constraints; ++c) { if (marked_for_removal[c]) { - ConstraintProto *ct = context_->working_model->mutable_constraints( + ConstraintProto* ct = context_->working_model->mutable_constraints( original_constraint_index[c]); changed = RemoveConstraint(ct); context_->UpdateConstraintVariableUsage(original_constraint_index[c]); @@ -4286,7 +4294,7 @@ void CpModelPresolver::TryToSimplifyDomain(int var) { } // Only process discrete domain. - const Domain &domain = context_->DomainOf(var); + const Domain& domain = context_->DomainOf(var); // Special case for non-Boolean domain of size 2. if (domain.Size() == 2 && (domain.Min() != 0 || domain.Max() != 1)) { @@ -4299,7 +4307,7 @@ void CpModelPresolver::TryToSimplifyDomain(int var) { const int64 var_min = domain.Min(); int64 gcd = domain[1].start - var_min; for (int index = 2; index < domain.NumIntervals(); ++index) { - const ClosedInterval &i = domain[index]; + const ClosedInterval& i = domain[index]; CHECK_EQ(i.start, i.end); const int64 shifted_value = i.start - var_min; CHECK_GE(shifted_value, 0); @@ -4313,7 +4321,7 @@ void CpModelPresolver::TryToSimplifyDomain(int var) { { std::vector scaled_values; for (int index = 0; index < domain.NumIntervals(); ++index) { - const ClosedInterval &i = domain[index]; + const ClosedInterval& i = domain[index]; CHECK_EQ(i.start, i.end); const int64 shifted_value = i.start - var_min; scaled_values.push_back(shifted_value / gcd); @@ -4347,8 +4355,8 @@ void CpModelPresolver::EncodeAllAffineRelations() { } ++num_added; - ConstraintProto *ct = context_->working_model->add_constraints(); - auto *arg = ct->mutable_linear(); + ConstraintProto* ct = context_->working_model->add_constraints(); + auto* arg = ct->mutable_linear(); arg->add_vars(var); arg->add_coeffs(1); arg->add_vars(r.representative); @@ -4390,8 +4398,8 @@ bool CpModelPresolver::PresolveAffineRelationIfAny(int var) { // any value of the representative. if (context_->VariableIsUniqueAndRemovable(var)) { // Add relation with current representative to the mapping model. - ConstraintProto *ct = context_->mapping_model->add_constraints(); - auto *arg = ct->mutable_linear(); + ConstraintProto* ct = context_->mapping_model->add_constraints(); + auto* arg = ct->mutable_linear(); arg->add_vars(var); arg->add_coeffs(1); arg->add_vars(r.representative); @@ -4415,9 +4423,9 @@ void CpModelPresolver::PresolveToFixPoint() { // This is used for constraint having unique variables in them (i.e. not // appearing anywhere else) to not call the presolve more than once for this // reason. - absl::flat_hash_set > var_constraint_pair_already_called; + absl::flat_hash_set> var_constraint_pair_already_called; - TimeLimit *time_limit = options_.time_limit; + TimeLimit* time_limit = options_.time_limit; // The queue of "active" constraints, initialized to the non-empty ones. std::vector in_queue(context_->working_model->constraints_size(), @@ -4433,8 +4441,7 @@ void CpModelPresolver::PresolveToFixPoint() { // When thinking about how the presolve works, it seems like a good idea to // process the "simple" constraints first in order to be more efficient. - // In September 2019, experiment on the flatzinc problems shows no changes - // in + // In September 2019, experiment on the flatzinc problems shows no changes in // the results. We should actually count the number of rules triggered. std::sort(queue.begin(), queue.end(), [this](int a, int b) { const int score_a = context_->ConstraintToVars(a).size(); @@ -4518,7 +4525,7 @@ void CpModelPresolver::PresolveToFixPoint() { // once. const int num_vars = context_->working_model->variables_size(); for (int v = 0; v < num_vars; ++v) { - const auto &constraints = context_->VarToConstraints(v); + const auto& constraints = context_->VarToConstraints(v); if (constraints.size() != 1) continue; const int c = *constraints.begin(); if (c < 0) continue; @@ -4546,6 +4553,20 @@ void CpModelPresolver::PresolveToFixPoint() { if (context_->ModelIsUnsat()) return; + // Detect & exploit dominance between variables, or variables that can move + // freely in one direction. + // + // TODO(user): We can support assumptions but we need to not cut them out of + // the feasible region. + if (!context_->keep_all_feasible_solutions && + context_->working_model->assumptions().empty()) { + VarDomination var_dom; + DualBoundStrengthening dual_bound_strengthening; + DetectDominanceRelations(*context_, &var_dom, &dual_bound_strengthening); + if (!dual_bound_strengthening.Strengthen(context_)) return; + if (!ExploitDominanceRelations(var_dom, context_)) return; + } + // Second "pass" for transformation better done after all of the above and // that do not need a fix-point loop. // @@ -4556,7 +4577,7 @@ void CpModelPresolver::PresolveToFixPoint() { // maintain such list. const int num_constraints = context_->working_model->constraints_size(); for (int c = 0; c < num_constraints; ++c) { - ConstraintProto *ct = context_->working_model->mutable_constraints(c); + ConstraintProto* ct = context_->working_model->mutable_constraints(c); switch (ct->constraint_case()) { case ConstraintProto::ConstraintCase::kNoOverlap: // Filter out absent intervals. @@ -4577,7 +4598,7 @@ void CpModelPresolver::PresolveToFixPoint() { case ConstraintProto::ConstraintCase::kBoolOr: { // Try to infer domain reductions from clauses and the saved "implies in // domain" relations. - for (const auto &pair : + for (const auto& pair : context_->deductions.ProcessClause(ct->bool_or().literals())) { bool modified = false; if (!context_->IntersectDomainWith(pair.first, pair.second, @@ -4598,14 +4619,14 @@ void CpModelPresolver::PresolveToFixPoint() { context_->deductions.MarkProcessingAsDoneForNow(); } -void LogInfoFromContext(const PresolveContext *context) { +void LogInfoFromContext(const PresolveContext* context) { LOG(INFO) << "- " << context->NumAffineRelations() << " affine relations were detected."; LOG(INFO) << "- " << context->NumEquivRelations() << " variable equivalence relations were detected."; std::map sorted_rules(context->stats_by_rule_name.begin(), context->stats_by_rule_name.end()); - for (const auto &entry : sorted_rules) { + for (const auto& entry : sorted_rules) { if (entry.second == 1) { LOG(INFO) << "- rule '" << entry.first << "' was applied 1 time."; } else { @@ -4619,25 +4640,26 @@ void LogInfoFromContext(const PresolveContext *context) { // Public API. // ============================================================================= -bool PresolveCpModel(const PresolveOptions &options, PresolveContext *context, - std::vector *postsolve_mapping) { +bool PresolveCpModel(const PresolveOptions& options, PresolveContext* context, + std::vector* postsolve_mapping) { CpModelPresolver presolver(options, context, postsolve_mapping); return presolver.Presolve(); } -CpModelPresolver::CpModelPresolver(const PresolveOptions &options, - PresolveContext *context, - std::vector *postsolve_mapping) +CpModelPresolver::CpModelPresolver(const PresolveOptions& options, + PresolveContext* context, + std::vector* postsolve_mapping) : options_(options), postsolve_mapping_(postsolve_mapping), context_(context) { context_->keep_all_feasible_solutions = + options.parameters.keep_all_feasible_solutions_in_presolve() || options.parameters.enumerate_all_solutions() || options.parameters.fill_tightened_domains_in_response() || !options.parameters.cp_model_presolve(); // We copy the search strategy to the mapping_model. - for (const auto &decision_strategy : + for (const auto& decision_strategy : context_->working_model->search_strategy()) { *(context_->mapping_model->add_search_strategy()) = decision_strategy; } @@ -4687,7 +4709,7 @@ bool CpModelPresolver::Presolve() { // context_->ConstraintVariableGraphIsUpToDate() before doing anything that // depends on the graph. for (int c = 0; c < context_->working_model->constraints_size(); ++c) { - ConstraintProto *ct = context_->working_model->mutable_constraints(c); + ConstraintProto* ct = context_->working_model->mutable_constraints(c); PresolveEnforcementLiteral(ct); switch (ct->constraint_case()) { case ConstraintProto::ConstraintCase::kBoolOr: @@ -4765,7 +4787,7 @@ bool CpModelPresolver::Presolve() { if (!context_->ModelIsUnsat() && iter == 0) { const int old_size = context_->working_model->constraints_size(); for (int c = 0; c < old_size; ++c) { - ConstraintProto *ct = context_->working_model->mutable_constraints(c); + ConstraintProto* ct = context_->working_model->mutable_constraints(c); if (ct->constraint_case() != ConstraintProto::ConstraintCase::kLinear) { continue; } @@ -4850,7 +4872,7 @@ bool CpModelPresolver::Presolve() { // variable order is not CHOOSE_FIRST, then we also encode the associated // affine transformation in order to preserve the order. absl::flat_hash_set used_variables; - for (DecisionStrategyProto &strategy : + for (DecisionStrategyProto& strategy : *context_->working_model->mutable_search_strategy()) { DecisionStrategyProto copy = strategy; strategy.clear_variables(); @@ -4874,7 +4896,7 @@ bool CpModelPresolver::Presolve() { strategy.add_variables(rep); if (strategy.variable_selection_strategy() != DecisionStrategyProto::CHOOSE_FIRST) { - DecisionStrategyProto::AffineTransformation *t = + DecisionStrategyProto::AffineTransformation* t = strategy.add_transformations(); t->set_var(rep); t->set_offset(r.offset); @@ -4954,37 +4976,37 @@ bool CpModelPresolver::Presolve() { return true; } -void ApplyVariableMapping(const std::vector &mapping, - const PresolveContext &context) { - CpModelProto *proto = context.working_model; +void ApplyVariableMapping(const std::vector& mapping, + const PresolveContext& context) { + CpModelProto* proto = context.working_model; // Remap all the variable/literal references in the constraints and the // enforcement literals in the variables. - auto mapping_function = [&mapping](int *ref) { + auto mapping_function = [&mapping](int* ref) { const int image = mapping[PositiveRef(*ref)]; CHECK_GE(image, 0); *ref = RefIsPositive(*ref) ? image : NegatedRef(image); }; - for (ConstraintProto &ct_ref : *proto->mutable_constraints()) { + for (ConstraintProto& ct_ref : *proto->mutable_constraints()) { ApplyToAllVariableIndices(mapping_function, &ct_ref); ApplyToAllLiteralIndices(mapping_function, &ct_ref); } // Remap the objective variables. if (proto->has_objective()) { - for (int &mutable_ref : *proto->mutable_objective()->mutable_vars()) { + for (int& mutable_ref : *proto->mutable_objective()->mutable_vars()) { mapping_function(&mutable_ref); } } // Remap the assumptions. - for (int &mutable_ref : *proto->mutable_assumptions()) { + for (int& mutable_ref : *proto->mutable_assumptions()) { mapping_function(&mutable_ref); } // Remap the search decision heuristic. // Note that we delete any heuristic related to a removed variable. - for (DecisionStrategyProto &strategy : *proto->mutable_search_strategy()) { + for (DecisionStrategyProto& strategy : *proto->mutable_search_strategy()) { DecisionStrategyProto copy = strategy; strategy.clear_variables(); for (const int ref : copy.variables()) { @@ -4994,11 +5016,11 @@ void ApplyVariableMapping(const std::vector &mapping, } } strategy.clear_transformations(); - for (const auto &transform : copy.transformations()) { + for (const auto& transform : copy.transformations()) { const int ref = transform.var(); const int image = mapping[PositiveRef(ref)]; if (image >= 0) { - auto *new_transform = strategy.add_transformations(); + auto* new_transform = strategy.add_transformations(); *new_transform = transform; new_transform->set_var(RefIsPositive(ref) ? image : NegatedRef(image)); } @@ -5007,7 +5029,7 @@ void ApplyVariableMapping(const std::vector &mapping, // Remap the solution hint. if (proto->has_solution_hint()) { - auto *mutable_hint = proto->mutable_solution_hint(); + auto* mutable_hint = proto->mutable_solution_hint(); int new_size = 0; for (int i = 0; i < mutable_hint->vars_size(); ++i) { const int old_ref = mutable_hint->vars(i); @@ -5045,17 +5067,17 @@ void ApplyVariableMapping(const std::vector &mapping, new_variables[image].Swap(proto->mutable_variables(i)); } proto->clear_variables(); - for (IntegerVariableProto &proto_ref : new_variables) { + for (IntegerVariableProto& proto_ref : new_variables) { proto->add_variables()->Swap(&proto_ref); } // Check that all variables are used. - for (const IntegerVariableProto &v : proto->variables()) { + for (const IntegerVariableProto& v : proto->variables()) { CHECK_GT(v.domain_size(), 0); } } -std::vector FindDuplicateConstraints(const CpModelProto &model_proto) { +std::vector FindDuplicateConstraints(const CpModelProto& model_proto) { std::vector result; // We use a map hash: serialized_constraint_proto -> constraint index. diff --git a/ortools/sat/cp_model_presolve.h b/ortools/sat/cp_model_presolve.h index 54d35c018d..58e76edb31 100644 --- a/ortools/sat/cp_model_presolve.h +++ b/ortools/sat/cp_model_presolve.h @@ -36,8 +36,8 @@ namespace sat { // // The image of the mapping should be dense in [0, new_num_variables), this is // also CHECKed. -void ApplyVariableMapping(const std::vector &mapping, - const PresolveContext &context); +void ApplyVariableMapping(const std::vector& mapping, + const PresolveContext& context); // Presolves the initial content of presolved_model. // @@ -62,8 +62,8 @@ void ApplyVariableMapping(const std::vector &mapping, // inside the model. We can add a IntegerVariableProto::initial_index; class CpModelPresolver { public: - CpModelPresolver(const PresolveOptions &options, PresolveContext *context, - std::vector *postsolve_mapping); + CpModelPresolver(const PresolveOptions& options, PresolveContext* context, + std::vector* postsolve_mapping); // Returns false if a non-recoverable error was encountered. // @@ -94,34 +94,34 @@ class CpModelPresolver { // the current code. This way we shouldn't keep doing computation on an // inconsistent state. // TODO(user,user): Make these public and unit test. - bool PresolveAutomaton(ConstraintProto *ct); - bool PresolveCircuit(ConstraintProto *ct); - bool PresolveRoutes(ConstraintProto *ct); - bool PresolveCumulative(ConstraintProto *ct); - bool PresolveNoOverlap(ConstraintProto *ct); - bool PresolveAllDiff(ConstraintProto *ct); - bool PresolveTable(ConstraintProto *ct); - bool PresolveElement(ConstraintProto *ct); - bool PresolveInterval(int c, ConstraintProto *ct); - bool PresolveIntDiv(ConstraintProto *ct); - bool PresolveIntProd(ConstraintProto *ct); - bool PresolveIntMin(ConstraintProto *ct); - bool PresolveIntMax(ConstraintProto *ct); - bool PresolveLinMin(ConstraintProto *ct); - bool PresolveLinMax(ConstraintProto *ct); - bool PresolveIntAbs(ConstraintProto *ct); - bool PresolveBoolXor(ConstraintProto *ct); - bool PresolveAtMostOne(ConstraintProto *ct); - bool PresolveBoolAnd(ConstraintProto *ct); - bool PresolveBoolOr(ConstraintProto *ct); - bool PresolveEnforcementLiteral(ConstraintProto *ct); + bool PresolveAutomaton(ConstraintProto* ct); + bool PresolveCircuit(ConstraintProto* ct); + bool PresolveRoutes(ConstraintProto* ct); + bool PresolveCumulative(ConstraintProto* ct); + bool PresolveNoOverlap(ConstraintProto* ct); + bool PresolveAllDiff(ConstraintProto* ct); + bool PresolveTable(ConstraintProto* ct); + bool PresolveElement(ConstraintProto* ct); + bool PresolveInterval(int c, ConstraintProto* ct); + bool PresolveIntDiv(ConstraintProto* ct); + bool PresolveIntProd(ConstraintProto* ct); + bool PresolveIntMin(ConstraintProto* ct); + bool PresolveIntMax(ConstraintProto* ct); + bool PresolveLinMin(ConstraintProto* ct); + bool PresolveLinMax(ConstraintProto* ct); + bool PresolveIntAbs(ConstraintProto* ct); + bool PresolveBoolXor(ConstraintProto* ct); + bool PresolveAtMostOne(ConstraintProto* ct); + bool PresolveBoolAnd(ConstraintProto* ct); + bool PresolveBoolOr(ConstraintProto* ct); + bool PresolveEnforcementLiteral(ConstraintProto* ct); // For the linear constraints, we have more than one function. - bool CanonicalizeLinear(ConstraintProto *ct); - bool PropagateDomainsInLinear(int c, ConstraintProto *ct); - bool RemoveSingletonInLinear(ConstraintProto *ct); - bool PresolveSmallLinear(ConstraintProto *ct); - bool PresolveLinearOnBooleans(ConstraintProto *ct); + bool CanonicalizeLinear(ConstraintProto* ct); + bool PropagateDomainsInLinear(int c, ConstraintProto* ct); + bool RemoveSingletonInLinear(ConstraintProto* ct); + bool PresolveSmallLinear(ConstraintProto* ct); + bool PresolveLinearOnBooleans(ConstraintProto* ct); // SetPPC is short for set packing, partitioning and covering constraints. // These are sum of booleans <=, = and >= 1 respectively. @@ -130,17 +130,19 @@ class CpModelPresolver { // Removes dominated constraints or fixes some variables for given pair of // setppc constraints. This assumes that literals in constraint c1 is subset // of literals in constraint c2. - bool ProcessSetPPCSubset(int c1, int c2, const std::vector &c2_minus_c1, - const std::vector &original_constraint_index, - std::vector *marked_for_removal); + bool ProcessSetPPCSubset(int c1, int c2, const std::vector& c2_minus_c1, + const std::vector& original_constraint_index, + std::vector* marked_for_removal); void PresolvePureSatPart(); // Extracts AtMostOne constraint from Linear constraint. - void ExtractAtMostOneFromLinear(ConstraintProto *ct); + void ExtractAtMostOneFromLinear(ConstraintProto* ct); - void DivideLinearByGcd(ConstraintProto *ct); - void ExtractEnforcementLiteralFromLinearConstraint(ConstraintProto *ct); + void DivideLinearByGcd(ConstraintProto* ct); + + void ExtractEnforcementLiteralFromLinearConstraint(int ct_index, + ConstraintProto* ct); // Extracts cliques from bool_and and small at_most_one constraints and // transforms them into maximal cliques. @@ -160,25 +162,25 @@ class CpModelPresolver { void EncodeAllAffineRelations(); bool PresolveAffineRelationIfAny(int var); - bool IntervalsCanIntersect(const IntervalConstraintProto &interval1, - const IntervalConstraintProto &interval2); + bool IntervalsCanIntersect(const IntervalConstraintProto& interval1, + const IntervalConstraintProto& interval2); - bool ExploitEquivalenceRelations(int c, ConstraintProto *ct); + bool ExploitEquivalenceRelations(int c, ConstraintProto* ct); - ABSL_MUST_USE_RESULT bool RemoveConstraint(ConstraintProto *ct); - ABSL_MUST_USE_RESULT bool MarkConstraintAsFalse(ConstraintProto *ct); + ABSL_MUST_USE_RESULT bool RemoveConstraint(ConstraintProto* ct); + ABSL_MUST_USE_RESULT bool MarkConstraintAsFalse(ConstraintProto* ct); - const PresolveOptions &options_; - std::vector *postsolve_mapping_; - PresolveContext *context_; + const PresolveOptions& options_; + std::vector* postsolve_mapping_; + PresolveContext* context_; // Used by CanonicalizeLinear(). - std::vector > tmp_terms_; + std::vector> tmp_terms_; }; // Convenient wrapper to call the full presolve. -bool PresolveCpModel(const PresolveOptions &options, PresolveContext *context, - std::vector *postsolve_mapping); +bool PresolveCpModel(const PresolveOptions& options, PresolveContext* context, + std::vector* postsolve_mapping); // Returns the index of exact duplicate constraints in the given proto. That // is, all returned constraints will have an identical constraint before it in @@ -189,7 +191,7 @@ bool PresolveCpModel(const PresolveOptions &options, PresolveContext *context, // // TODO(user): Ignore names? canonicalize constraint further by sorting // enforcement literal list for instance... -std::vector FindDuplicateConstraints(const CpModelProto &model_proto); +std::vector FindDuplicateConstraints(const CpModelProto& model_proto); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index f6d19a6f22..d205470231 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -40,19 +40,19 @@ struct VarValue { }; const std::function ConstructSearchStrategyInternal( - const absl::flat_hash_map > - &var_to_coeff_offset_pair, - const std::vector &strategies, Model *model) { - IntegerEncoder *const integer_encoder = model->GetOrCreate(); - IntegerTrail *const integer_trail = model->GetOrCreate(); + const absl::flat_hash_map>& + var_to_coeff_offset_pair, + const std::vector& strategies, Model* model) { + IntegerEncoder* const integer_encoder = model->GetOrCreate(); + IntegerTrail* const integer_trail = model->GetOrCreate(); // Note that we copy strategies to keep the return function validity // independently of the life of the passed vector. return [integer_encoder, integer_trail, strategies, var_to_coeff_offset_pair, model]() { - const SatParameters *const parameters = model->GetOrCreate(); + const SatParameters* const parameters = model->GetOrCreate(); - for (const Strategy &strategy : strategies) { + for (const Strategy& strategy : strategies) { IntegerVariable candidate = kNoIntegerVariable; IntegerValue candidate_value = kMaxIntegerValue; IntegerValue candidate_lb; @@ -125,7 +125,7 @@ const std::function ConstructSearchStrategyInternal( CHECK(!active_vars.empty()); const IntegerValue threshold( candidate_value + parameters->search_randomization_tolerance()); - auto is_above_tolerance = [threshold](const VarValue &entry) { + auto is_above_tolerance = [threshold](const VarValue& entry) { return entry.value > threshold; }; // Remove all values above tolerance. @@ -171,9 +171,9 @@ const std::function ConstructSearchStrategyInternal( } std::function ConstructSearchStrategy( - const CpModelProto &cp_model_proto, - const std::vector &variable_mapping, - IntegerVariable objective_var, Model *model) { + const CpModelProto& cp_model_proto, + const std::vector& variable_mapping, + IntegerVariable objective_var, Model* model) { // Default strategy is to instantiate the IntegerVariable in order. std::function default_search_strategy = nullptr; const bool instantiate_all_variables = @@ -196,10 +196,10 @@ std::function ConstructSearchStrategy( } std::vector strategies; - absl::flat_hash_map > var_to_coeff_offset_pair; - for (const DecisionStrategyProto &proto : cp_model_proto.search_strategy()) { + absl::flat_hash_map> var_to_coeff_offset_pair; + for (const DecisionStrategyProto& proto : cp_model_proto.search_strategy()) { strategies.push_back(Strategy()); - Strategy &strategy = strategies.back(); + Strategy& strategy = strategies.back(); for (const int ref : proto.variables()) { strategy.variables.push_back( RefIsPositive(ref) ? variable_mapping[ref] @@ -207,7 +207,7 @@ std::function ConstructSearchStrategy( } strategy.var_strategy = proto.variable_selection_strategy(); strategy.domain_strategy = proto.domain_reduction_strategy(); - for (const auto &transform : proto.transformations()) { + for (const auto& transform : proto.transformations()) { const int ref = transform.var(); const IntegerVariable var = RefIsPositive(ref) ? variable_mapping[ref] @@ -229,10 +229,10 @@ std::function ConstructSearchStrategy( } std::function InstrumentSearchStrategy( - const CpModelProto &cp_model_proto, - const std::vector &variable_mapping, - const std::function &instrumented_strategy, - Model *model) { + const CpModelProto& cp_model_proto, + const std::vector& variable_mapping, + const std::function& instrumented_strategy, + Model* model) { std::vector ref_to_display; for (int i = 0; i < cp_model_proto.variables_size(); ++i) { if (variable_mapping[i] == kNoIntegerVariable) continue; @@ -244,7 +244,7 @@ std::function InstrumentSearchStrategy( cp_model_proto.variables(j).name(); }); - std::vector > old_domains(variable_mapping.size()); + std::vector> old_domains(variable_mapping.size()); return [instrumented_strategy, model, variable_mapping, cp_model_proto, old_domains, ref_to_display]() mutable { const BooleanOrIntegerLiteral decision = instrumented_strategy(); @@ -263,7 +263,7 @@ std::function InstrumentSearchStrategy( const int level = model->Get()->CurrentDecisionLevel(); std::string to_display = absl::StrCat("Diff since last call, level=", level, "\n"); - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); for (const int ref : ref_to_display) { const IntegerVariable var = variable_mapping[ref]; const std::pair new_domain( @@ -290,7 +290,7 @@ std::function InstrumentSearchStrategy( // - Fast restart in randomized search // - Different propatation levels for scheduling constraints std::vector GetDiverseSetOfParameters( - const SatParameters &base_params, const CpModelProto &cp_model, + const SatParameters& base_params, const CpModelProto& cp_model, const int num_workers) { // Defines a set of named strategies so it is easier to read in one place // the one that are used. See below. @@ -399,7 +399,7 @@ std::vector GetDiverseSetOfParameters( // Creates the diverse set of parameters with names and seed. We remove the // last ones if needed below. std::vector result; - for (const std::string &name : names) { + for (const std::string& name : names) { SatParameters new_params = strategies.at(name); new_params.set_name(name); new_params.set_random_seed(result.size() + 1); diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 64482b7831..824fb7267d 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -42,6 +42,7 @@ #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/synchronization/mutex.h" +#include "ortools/base/vlog_is_on.h" #include "ortools/base/commandlineflags.h" #include "ortools/base/int_type.h" #include "ortools/base/int_type_indexed_vector.h" @@ -50,7 +51,6 @@ #include "ortools/base/map_util.h" #include "ortools/base/threadpool.h" #include "ortools/base/timer.h" -#include "ortools/base/vlog_is_on.h" #include "ortools/graph/connected_components.h" #include "ortools/port/proto_utils.h" #include "ortools/sat/circuit.h" @@ -95,19 +95,17 @@ ABSL_FLAG(std::string, cp_model_dump_prefix, "/tmp/", ABSL_FLAG(bool, cp_model_dump_models, false, "DEBUG ONLY. When set to true, SolveCpModel() will dump its model " "protos (original model, presolved model, mapping model) in text " - "format to " - "'absl::GetFlag(FLAGS_cp_model_dump_prefix)'{model|presolved_model|" + "format to 'FLAGS_cp_model_dump_prefix'{model|presolved_model|" "mapping_model}.pbtxt."); ABSL_FLAG(bool, cp_model_dump_lns, false, "DEBUG ONLY. When set to true, solve will dump all " "lns models proto in text format to " - "'absl::GetFlag(FLAGS_cp_model_dump_prefix)'lns_xxx.pbtxt."); + "'FLAGS_cp_model_dump_prefix'lns_xxx.pbtxt."); -ABSL_FLAG( - bool, cp_model_dump_response, false, - "DEBUG ONLY. If true, the final response of each solve will be " - "dumped to 'absl::GetFlag(FLAGS_cp_model_dump_prefix)'response.pbtxt"); +ABSL_FLAG(bool, cp_model_dump_response, false, + "DEBUG ONLY. If true, the final response of each solve will be " + "dumped to 'FLAGS_cp_model_dump_prefix'response.pbtxt"); ABSL_FLAG(std::string, cp_model_params, "", "This is interpreted as a text SatParameters proto. The " @@ -137,7 +135,7 @@ namespace sat { namespace { // Makes the string fit in one line by cutting it in the middle if necessary. -std::string Summarize(const std::string &input) { +std::string Summarize(const std::string& input) { if (input.size() < 105) return input; const int half = 50; return absl::StrCat(input.substr(0, half), " ... ", @@ -150,12 +148,12 @@ std::string Summarize(const std::string &input) { // Public API. // ============================================================================= -std::string CpModelStats(const CpModelProto &model_proto) { +std::string CpModelStats(const CpModelProto& model_proto) { std::map num_constraints_by_name; std::map num_reif_constraints_by_name; std::map name_to_num_literals; std::map name_to_num_terms; - for (const ConstraintProto &ct : model_proto.constraints()) { + for (const ConstraintProto& ct : model_proto.constraints()) { std::string name = ConstraintCaseName(ct.constraint_case()); // We split the linear constraints into 3 buckets has it gives more insight @@ -193,7 +191,7 @@ std::string CpModelStats(const CpModelProto &model_proto) { int num_constants = 0; std::set constant_values; std::map num_vars_per_domains; - for (const IntegerVariableProto &var : model_proto.variables()) { + for (const IntegerVariableProto& var : model_proto.variables()) { if (var.domain_size() == 2 && var.domain(0) == var.domain(1)) { ++num_constants; constant_values.insert(var.domain(0)); @@ -211,7 +209,7 @@ std::string CpModelStats(const CpModelProto &model_proto) { "':\n"); } - for (const DecisionStrategyProto &strategy : model_proto.search_strategy()) { + for (const DecisionStrategyProto& strategy : model_proto.search_strategy()) { absl::StrAppend( &result, "Search strategy: on ", strategy.variables_size(), " variables, ", @@ -231,7 +229,7 @@ std::string CpModelStats(const CpModelProto &model_proto) { absl::StrAppend(&result, "#Variables: ", model_proto.variables_size(), objective_string, "\n"); if (num_vars_per_domains.size() < 100) { - for (const auto &entry : num_vars_per_domains) { + for (const auto& entry : num_vars_per_domains) { const std::string temp = absl::StrCat(" - ", entry.second, " in ", entry.first.ToString(), "\n"); absl::StrAppend(&result, Summarize(temp)); @@ -240,7 +238,7 @@ std::string CpModelStats(const CpModelProto &model_proto) { int64 max_complexity = 0; int64 min = kint64max; int64 max = kint64min; - for (const auto &entry : num_vars_per_domains) { + for (const auto& entry : num_vars_per_domains) { min = std::min(min, entry.first.Min()); max = std::max(max, entry.first.Max()); max_complexity = std::max(max_complexity, @@ -260,8 +258,8 @@ std::string CpModelStats(const CpModelProto &model_proto) { std::vector constraints; constraints.reserve(num_constraints_by_name.size()); - for (const auto &entry : num_constraints_by_name) { - const std::string &name = entry.first; + for (const auto& entry : num_constraints_by_name) { + const std::string& name = entry.first; constraints.push_back(absl::StrCat("#", name, ": ", entry.second)); if (gtl::ContainsKey(num_reif_constraints_by_name, name)) { absl::StrAppend(&constraints.back(), @@ -282,7 +280,7 @@ std::string CpModelStats(const CpModelProto &model_proto) { return result; } -std::string CpSolverResponseStats(const CpSolverResponse &response, +std::string CpSolverResponseStats(const CpSolverResponse& response, bool has_objective) { std::string result; absl::StrAppend(&result, "CpSolverResponse:"); @@ -320,15 +318,15 @@ std::string CpSolverResponseStats(const CpSolverResponse &response, namespace { -void FillSolutionInResponse(const CpModelProto &model_proto, const Model &model, - CpSolverResponse *response) { +void FillSolutionInResponse(const CpModelProto& model_proto, const Model& model, + CpSolverResponse* response) { response->clear_solution(); response->clear_solution_lower_bounds(); response->clear_solution_upper_bounds(); - auto *mapping = model.Get(); - auto *trail = model.Get(); - auto *integer_trail = model.Get(); + auto* mapping = model.Get(); + auto* trail = model.Get(); + auto* integer_trail = model.Get(); std::vector solution; for (int i = 0; i < model_proto.variables_size(); ++i) { @@ -368,7 +366,7 @@ void FillSolutionInResponse(const CpModelProto &model_proto, const Model &model, } else { // Not all variables are fixed. // We fill instead the lb/ub of each variables. - const auto &assignment = trail->Assignment(); + const auto& assignment = trail->Assignment(); for (int i = 0; i < model_proto.variables_size(); ++i) { if (mapping->IsBoolean(i)) { if (assignment.VariableIsAssigned(mapping->Literal(i).Variable())) { @@ -392,8 +390,7 @@ void FillSolutionInResponse(const CpModelProto &model_proto, const Model &model, namespace { IntegerVariable GetOrCreateVariableWithTightBound( - const std::vector > &terms, - Model *model) { + const std::vector>& terms, Model* model) { if (terms.empty()) return model->Add(ConstantIntegerVariable(0)); if (terms.size() == 1 && terms.front().second == 1) { return terms.front().first; @@ -417,8 +414,7 @@ IntegerVariable GetOrCreateVariableWithTightBound( } IntegerVariable GetOrCreateVariableGreaterOrEqualToSumOf( - const std::vector > &terms, - Model *model) { + const std::vector>& terms, Model* model) { if (terms.empty()) return model->Add(ConstantIntegerVariable(0)); if (terms.size() == 1 && terms.front().second == 1) { return terms.front().first; @@ -432,7 +428,7 @@ IntegerVariable GetOrCreateVariableGreaterOrEqualToSumOf( GetOrCreateVariableWithTightBound(terms, model); std::vector vars; std::vector coeffs; - for (const auto &term : terms) { + for (const auto& term : terms) { vars.push_back(term.first); coeffs.push_back(term.second); } @@ -442,12 +438,12 @@ IntegerVariable GetOrCreateVariableGreaterOrEqualToSumOf( return new_var; } -void TryToAddCutGenerators(const CpModelProto &model_proto, - const ConstraintProto &ct, Model *m, - LinearRelaxation *relaxation) { +void TryToAddCutGenerators(const CpModelProto& model_proto, + const ConstraintProto& ct, Model* m, + LinearRelaxation* relaxation) { const int linearization_level = m->GetOrCreate()->linearization_level(); - auto *mapping = m->GetOrCreate(); + auto* mapping = m->GetOrCreate(); if (ct.constraint_case() == ConstraintProto::ConstraintCase::kCircuit && linearization_level > 1) { std::vector tails(ct.circuit().tails().begin(), @@ -496,7 +492,7 @@ void TryToAddCutGenerators(const CpModelProto &model_proto, IntegerVariable x = mapping->Integer(ct.int_prod().vars(0)); IntegerVariable y = mapping->Integer(ct.int_prod().vars(1)); - IntegerTrail *const integer_trail = m->GetOrCreate(); + IntegerTrail* const integer_trail = m->GetOrCreate(); IntegerValue x_lb = integer_trail->LowerBound(x); IntegerValue x_ub = integer_trail->UpperBound(x); IntegerValue y_lb = integer_trail->LowerBound(y); @@ -605,17 +601,17 @@ void TryToAddCutGenerators(const CpModelProto &model_proto, } // namespace -LinearRelaxation ComputeLinearRelaxation(const CpModelProto &model_proto, - int linearization_level, Model *m) { +LinearRelaxation ComputeLinearRelaxation(const CpModelProto& model_proto, + int linearization_level, Model* m) { LinearRelaxation relaxation; // Linearize the constraints. absl::flat_hash_set used_integer_variable; - auto *mapping = m->GetOrCreate(); - auto *encoder = m->GetOrCreate(); - auto *trail = m->GetOrCreate(); - for (const auto &ct : model_proto.constraints()) { + auto* mapping = m->GetOrCreate(); + auto* encoder = m->GetOrCreate(); + auto* trail = m->GetOrCreate(); + for (const auto& ct : model_proto.constraints()) { // Make sure the literals from a circuit constraint always have a view. if (ct.constraint_case() == ConstraintProto::ConstraintCase::kCircuit) { for (const int ref : ct.circuit().literals()) { @@ -694,7 +690,7 @@ LinearRelaxation ComputeLinearRelaxation(const CpModelProto &model_proto, // into maximum "at most one" first and we removes redundant ones. m->GetOrCreate()->TransformIntoMaxCliques( &relaxation.at_most_ones); - for (const std::vector &at_most_one : relaxation.at_most_ones) { + for (const std::vector& at_most_one : relaxation.at_most_ones) { if (at_most_one.empty()) continue; LinearConstraintBuilder lc(m, kMinIntegerValue, IntegerValue(1)); @@ -732,8 +728,8 @@ LinearRelaxation ComputeLinearRelaxation(const CpModelProto &model_proto, } // Adds one LinearProgrammingConstraint per connected component of the model. -IntegerVariable AddLPConstraints(const CpModelProto &model_proto, - int linearization_level, Model *m) { +IntegerVariable AddLPConstraints(const CpModelProto& model_proto, + int linearization_level, Model* m) { const LinearRelaxation relaxation = ComputeLinearRelaxation(model_proto, linearization_level, m); @@ -773,7 +769,7 @@ IntegerVariable AddLPConstraints(const CpModelProto &model_proto, // // TODO(user): Because we currently add every at_most_ones (and we clear it) // this code is unused outside of experiments. - for (const std::vector &at_most_one : relaxation.at_most_ones) { + for (const std::vector& at_most_one : relaxation.at_most_ones) { LinearConstraintBuilder builder(m, kMinIntegerValue, IntegerValue(1)); for (const Literal literal : at_most_one) { // Note that it is okay to simply ignore the literal if it has no @@ -802,7 +798,7 @@ IntegerVariable AddLPConstraints(const CpModelProto &model_proto, // as much as possible the objective bound by using any bounds the LP give // us on one of its components. This is critical on the zephyrus problems for // instance. - auto *mapping = m->GetOrCreate(); + auto* mapping = m->GetOrCreate(); for (int i = 0; i < model_proto.objective().coeffs_size(); ++i) { const IntegerVariable var = mapping->Integer(model_proto.objective().vars(i)); @@ -810,9 +806,9 @@ IntegerVariable AddLPConstraints(const CpModelProto &model_proto, } // Dispatch every constraint to its LinearProgrammingConstraint. - std::vector lp_constraints(num_components, - nullptr); - std::vector > component_to_constraints( + std::vector lp_constraints(num_components, + nullptr); + std::vector> component_to_constraints( num_components); for (int i = 0; i < num_lp_constraints; i++) { const int c = index_to_component[get_constraint_index(i)]; @@ -835,9 +831,9 @@ IntegerVariable AddLPConstraints(const CpModelProto &model_proto, } // Register "generic" clique (i.e. at most one) cut generator. - const SatParameters ¶ms = *(m->GetOrCreate()); + const SatParameters& params = *(m->GetOrCreate()); if (params.add_clique_cuts() && params.linearization_level() > 1) { - for (LinearProgrammingConstraint *lp : lp_constraints) { + for (LinearProgrammingConstraint* lp : lp_constraints) { if (lp == nullptr) continue; lp->AddCutGenerator(CreateCliqueCutGenerator(lp->integer_variables(), m)); } @@ -853,9 +849,9 @@ IntegerVariable AddLPConstraints(const CpModelProto &model_proto, } // Add the objective. - std::vector > > + std::vector>> component_to_cp_terms(num_components); - std::vector > top_level_cp_terms; + std::vector> top_level_cp_terms; int num_components_containing_objective = 0; if (model_proto.has_objective()) { // First pass: set objective coefficients on the lp constraints, and store @@ -891,7 +887,7 @@ IntegerVariable AddLPConstraints(const CpModelProto &model_proto, // Register LP constraints. Note that this needs to be done after all the // constraints have been added. - for (LinearProgrammingConstraint *lp_constraint : lp_constraints) { + for (LinearProgrammingConstraint* lp_constraint : lp_constraints) { if (lp_constraint == nullptr) continue; lp_constraint->RegisterWith(m); VLOG(3) << "LP constraint: " << lp_constraint->DimensionString() << "."; @@ -907,21 +903,21 @@ IntegerVariable AddLPConstraints(const CpModelProto &model_proto, // Used by NewFeasibleSolutionObserver to register observers. struct SolutionObservers { - explicit SolutionObservers(Model *model) {} - std::vector > observers; + explicit SolutionObservers(Model* model) {} + std::vector> observers; }; -std::function NewFeasibleSolutionObserver( - const std::function &observer) { - return [=](Model *model) { +std::function NewFeasibleSolutionObserver( + const std::function& observer) { + return [=](Model* model) { model->GetOrCreate()->observers.push_back(observer); }; } #if !defined(__PORTABLE_PLATFORM__) // TODO(user): Support it on android. -std::function NewSatParameters( - const std::string ¶ms) { +std::function NewSatParameters( + const std::string& params) { sat::SatParameters parameters; if (!params.empty()) { CHECK(google::protobuf::TextFormat::ParseFromString(params, ¶meters)) @@ -931,9 +927,9 @@ std::function NewSatParameters( } #endif // __PORTABLE_PLATFORM__ -std::function NewSatParameters( - const sat::SatParameters ¶meters) { - return [=](Model *model) { +std::function NewSatParameters( + const sat::SatParameters& parameters) { + return [=](Model* model) { // Tricky: It is important to initialize the model parameters before any // of the solver object are created, so that by default they use the given // parameters. @@ -948,14 +944,14 @@ namespace { // Registers a callback that will export variables bounds fixed at level 0 of // the search. This should not be registered to a LNS search. void RegisterVariableBoundsLevelZeroExport( - const CpModelProto &model_proto, SharedBoundsManager *shared_bounds_manager, - Model *model) { + const CpModelProto& model_proto, SharedBoundsManager* shared_bounds_manager, + Model* model) { CHECK(shared_bounds_manager != nullptr); int saved_trail_index = 0; const auto broadcast_level_zero_bounds = [&model_proto, saved_trail_index, model, shared_bounds_manager]( - const std::vector &modified_vars) mutable { - CpModelMapping *const mapping = model->GetOrCreate(); + const std::vector& modified_vars) mutable { + CpModelMapping* const mapping = model->GetOrCreate(); std::vector model_variables; std::vector new_lower_bounds; @@ -963,8 +959,8 @@ void RegisterVariableBoundsLevelZeroExport( absl::flat_hash_set visited_variables; // Inspect the modified IntegerVariables. - auto *integer_trail = model->Get(); - for (const IntegerVariable &var : modified_vars) { + auto* integer_trail = model->Get(); + for (const IntegerVariable& var : modified_vars) { const IntegerVariable positive_var = PositiveVariable(var); const int model_var = mapping->GetProtoVariableFromIntegerVariable(positive_var); @@ -988,7 +984,7 @@ void RegisterVariableBoundsLevelZeroExport( } // Inspect the newly modified Booleans. - auto *trail = model->Get(); + auto* trail = model->Get(); for (; saved_trail_index < trail->Index(); ++saved_trail_index) { const Literal fixed_literal = (*trail)[saved_trail_index]; const int model_var = mapping->GetProtoVariableFromBooleanVariable( @@ -1031,14 +1027,14 @@ void RegisterVariableBoundsLevelZeroExport( // shared_bounds_manager. These bounds are imported at level 0 of the search // in the linear scan minimize function. void RegisterVariableBoundsLevelZeroImport( - const CpModelProto &model_proto, SharedBoundsManager *shared_bounds_manager, - Model *model) { + const CpModelProto& model_proto, SharedBoundsManager* shared_bounds_manager, + Model* model) { CHECK(shared_bounds_manager != nullptr); - auto *integer_trail = model->GetOrCreate(); - CpModelMapping *const mapping = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); + CpModelMapping* const mapping = model->GetOrCreate(); const int id = shared_bounds_manager->RegisterNewId(); - const auto &import_level_zero_bounds = [&model_proto, shared_bounds_manager, + const auto& import_level_zero_bounds = [&model_proto, shared_bounds_manager, model, integer_trail, id, mapping]() { std::vector model_variables; std::vector new_lower_bounds; @@ -1062,9 +1058,9 @@ void RegisterVariableBoundsLevelZeroImport( new_bounds_have_been_imported = true; if (VLOG_IS_ON(3)) { - const IntegerVariableProto &var_proto = + const IntegerVariableProto& var_proto = model_proto.variables(model_var); - const std::string &var_name = + const std::string& var_name = var_proto.name().empty() ? absl::StrCat("anonymous_var(", model_var, ")") : var_proto.name(); @@ -1098,11 +1094,11 @@ void RegisterVariableBoundsLevelZeroImport( // It will be called each time new objective bound are propagated at level zero. void RegisterObjectiveBestBoundExport( IntegerVariable objective_var, - SharedResponseManager *shared_response_manager, Model *model) { - auto *integer_trail = model->Get(); + SharedResponseManager* shared_response_manager, Model* model) { + auto* integer_trail = model->Get(); const auto broadcast_objective_lower_bound = [objective_var, integer_trail, shared_response_manager, - model](const std::vector &unused) { + model](const std::vector& unused) { shared_response_manager->UpdateInnerObjectiveBounds( model->Name(), integer_trail->LevelZeroLowerBound(objective_var), integer_trail->LevelZeroUpperBound(objective_var)); @@ -1120,10 +1116,10 @@ void RegisterObjectiveBestBoundExport( // time the search main loop is back to level zero. Note that it the presence of // assumptions, this will not happen until the set of assumptions is changed. void RegisterObjectiveBoundsImport( - SharedResponseManager *shared_response_manager, Model *model) { - auto *solver = model->GetOrCreate(); - auto *integer_trail = model->GetOrCreate(); - auto *objective = model->GetOrCreate(); + SharedResponseManager* shared_response_manager, Model* model) { + auto* solver = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); + auto* objective = model->GetOrCreate(); const std::string name = model->Name(); const auto import_objective_bounds = [name, solver, integer_trail, objective, shared_response_manager]() { @@ -1171,11 +1167,11 @@ void RegisterObjectiveBoundsImport( import_objective_bounds); } -void LoadBaseModel(const CpModelProto &model_proto, - SharedResponseManager *shared_response_manager, - Model *model) { +void LoadBaseModel(const CpModelProto& model_proto, + SharedResponseManager* shared_response_manager, + Model* model) { CHECK(shared_response_manager != nullptr); - auto *sat_solver = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); // Simple function for the few places where we do "return unsat()". const auto unsat = [shared_response_manager, sat_solver, model] { @@ -1187,8 +1183,8 @@ void LoadBaseModel(const CpModelProto &model_proto, // We will add them all at once after model_proto is loaded. model->GetOrCreate()->DisableImplicationBetweenLiteral(); - auto *mapping = model->GetOrCreate(); - const SatParameters ¶meters = *(model->GetOrCreate()); + auto* mapping = model->GetOrCreate(); + const SatParameters& parameters = *(model->GetOrCreate()); const bool view_all_booleans_as_integers = (parameters.linearization_level() >= 2) || (parameters.search_branching() == SatParameters::FIXED_SEARCH && @@ -1207,7 +1203,7 @@ void LoadBaseModel(const CpModelProto &model_proto, // Load the constraints. std::set unsupported_types; int num_ignored_constraints = 0; - for (const ConstraintProto &ct : model_proto.constraints()) { + for (const ConstraintProto& ct : model_proto.constraints()) { if (mapping->ConstraintIsAlreadyLoaded(&ct)) { ++num_ignored_constraints; continue; @@ -1225,7 +1221,7 @@ void LoadBaseModel(const CpModelProto &model_proto, // certain types of problems with millions of constraints. if (DEBUG_MODE) { if (sat_solver->FinishPropagation()) { - Trail *trail = model->GetOrCreate(); + Trail* trail = model->GetOrCreate(); const int old_num_fixed = trail->Index(); if (trail->Index() > old_num_fixed) { VLOG(3) << "Constraint fixed " << trail->Index() - old_num_fixed @@ -1245,7 +1241,7 @@ void LoadBaseModel(const CpModelProto &model_proto, } if (!unsupported_types.empty()) { VLOG(1) << "There is unsupported constraints types in this model: "; - for (const std::string &type : unsupported_types) { + for (const std::string& type : unsupported_types) { VLOG(1) << " - " << type; } return unsat(); @@ -1256,15 +1252,15 @@ void LoadBaseModel(const CpModelProto &model_proto, if (!sat_solver->FinishPropagation()) return unsat(); } -void LoadFeasibilityPump(const CpModelProto &model_proto, - SharedResponseManager *shared_response_manager, - Model *model) { +void LoadFeasibilityPump(const CpModelProto& model_proto, + SharedResponseManager* shared_response_manager, + Model* model) { CHECK(shared_response_manager != nullptr); LoadBaseModel(model_proto, shared_response_manager, model); - auto *mapping = model->GetOrCreate(); - const SatParameters ¶meters = *(model->GetOrCreate()); + auto* mapping = model->GetOrCreate(); + const SatParameters& parameters = *(model->GetOrCreate()); if (parameters.linearization_level() == 0) return; // Add linear constraints to Feasibility Pump. @@ -1272,7 +1268,7 @@ void LoadFeasibilityPump(const CpModelProto &model_proto, model_proto, parameters.linearization_level(), model); const int num_lp_constraints = relaxation.linear_constraints.size(); if (num_lp_constraints == 0) return; - auto *feasibility_pump = model->GetOrCreate(); + auto* feasibility_pump = model->GetOrCreate(); for (int i = 0; i < num_lp_constraints; i++) { feasibility_pump->AddLinearConstraint(relaxation.linear_constraints[i]); } @@ -1291,10 +1287,10 @@ void LoadFeasibilityPump(const CpModelProto &model_proto, // This should only be called once on a given 'Model' class. // // TODO(user): move to cp_model_loader.h/.cc -void LoadCpModel(const CpModelProto &model_proto, - SharedResponseManager *shared_response_manager, Model *model) { +void LoadCpModel(const CpModelProto& model_proto, + SharedResponseManager* shared_response_manager, Model* model) { CHECK(shared_response_manager != nullptr); - auto *sat_solver = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); LoadBaseModel(model_proto, shared_response_manager, model); @@ -1305,8 +1301,8 @@ void LoadCpModel(const CpModelProto &model_proto, absl::StrCat(model->Name(), " [loading]")); }; - auto *mapping = model->GetOrCreate(); - const SatParameters ¶meters = *(model->GetOrCreate()); + auto* mapping = model->GetOrCreate(); + const SatParameters& parameters = *(model->GetOrCreate()); // Auto detect "at least one of" constraints in the PrecedencesPropagator. // Note that we do that before we finish loading the problem (objective and @@ -1341,8 +1337,8 @@ void LoadCpModel(const CpModelProto &model_proto, objective_var = AddLPConstraints(model_proto, parameters.linearization_level(), model); } else if (model_proto.has_objective()) { - const CpObjectiveProto &obj = model_proto.objective(); - std::vector > terms; + const CpObjectiveProto& obj = model_proto.objective(); + std::vector> terms; terms.reserve(obj.vars_size()); for (int i = 0; i < obj.vars_size(); ++i) { terms.push_back( @@ -1358,8 +1354,8 @@ void LoadCpModel(const CpModelProto &model_proto, // Create the objective definition inside the Model so that it can be accessed // by the heuristics than needs it. if (objective_var != kNoIntegerVariable) { - const CpObjectiveProto &objective_proto = model_proto.objective(); - auto *objective_definition = model->GetOrCreate(); + const CpObjectiveProto& objective_proto = model_proto.objective(); + auto* objective_definition = model->GetOrCreate(); objective_definition->scaling_factor = objective_proto.scaling_factor(); if (objective_definition->scaling_factor == 0.0) { @@ -1420,7 +1416,7 @@ void LoadCpModel(const CpModelProto &model_proto, if (!automatic_domain.IsIncludedIn(user_domain)) { std::vector vars; std::vector coeffs; - const CpObjectiveProto &obj = model_proto.objective(); + const CpObjectiveProto& obj = model_proto.objective(); for (int i = 0; i < obj.vars_size(); ++i) { vars.push_back(mapping->Integer(obj.vars(i))); coeffs.push_back(obj.coeffs(i)); @@ -1437,7 +1433,7 @@ void LoadCpModel(const CpModelProto &model_proto, if (model_proto.has_objective()) { // Report the initial objective variable bounds. - auto *integer_trail = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); shared_response_manager->UpdateInnerObjectiveBounds( "init", integer_trail->LowerBound(objective_var), integer_trail->UpperBound(objective_var)); @@ -1456,9 +1452,9 @@ void LoadCpModel(const CpModelProto &model_proto, // Cache the links between model vars, IntegerVariables and lp constraints. // TODO(user): Cache this only if it is actually used. - auto *integer_trail = model->GetOrCreate(); - auto *lp_dispatcher = model->GetOrCreate(); - auto *lp_vars = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); + auto* lp_dispatcher = model->GetOrCreate(); + auto* lp_vars = model->GetOrCreate(); IntegerVariable size = integer_trail->NumIntegerVariables(); for (IntegerVariable positive_var(0); positive_var < size; positive_var += 2) { @@ -1476,7 +1472,7 @@ void LoadCpModel(const CpModelProto &model_proto, } // Initialize the fixed_search strategy. - auto *search_heuristics = model->GetOrCreate(); + auto* search_heuristics = model->GetOrCreate(); search_heuristics->fixed_search = ConstructSearchStrategy( model_proto, mapping->GetVariableMapping(), objective_var, model); if (VLOG_IS_ON(3)) { @@ -1515,8 +1511,8 @@ void LoadCpModel(const CpModelProto &model_proto, shared_response_manager->NewSolution(response, model); }; - const auto &objective = *model->GetOrCreate(); - CoreBasedOptimizer *core = + const auto& objective = *model->GetOrCreate(); + CoreBasedOptimizer* core = new CoreBasedOptimizer(objective_var, objective.vars, objective.coeffs, solution_observer, model); model->Register(core); @@ -1530,12 +1526,12 @@ void LoadCpModel(const CpModelProto &model_proto, // TODO(user): This should be transformed so that it can be called many times // and resume from the last search state as if it wasn't interuped. That would // allow use to easily interleave different heuristics in the same thread. -void SolveLoadedCpModel(const CpModelProto &model_proto, - SharedResponseManager *shared_response_manager, - Model *model) { +void SolveLoadedCpModel(const CpModelProto& model_proto, + SharedResponseManager* shared_response_manager, + Model* model) { if (shared_response_manager->ProblemIsSolved()) return; - const std::string &solution_info = model->Name(); + const std::string& solution_info = model->Name(); const auto solution_observer = [&model_proto, &model, &solution_info, &shared_response_manager]() { CpSolverResponse response; @@ -1547,9 +1543,9 @@ void SolveLoadedCpModel(const CpModelProto &model_proto, // Reconfigure search heuristic if it was changed. ConfigureSearchHeuristics(model); - const auto &mapping = *model->GetOrCreate(); + const auto& mapping = *model->GetOrCreate(); SatSolver::Status status; - const SatParameters ¶meters = *model->GetOrCreate(); + const SatParameters& parameters = *model->GetOrCreate(); if (!model_proto.has_objective()) { while (true) { status = ResetAndSolveIntegerProblem( @@ -1568,7 +1564,7 @@ void SolveLoadedCpModel(const CpModelProto &model_proto, solution_info); // Extract a good subset of assumptions and add it to the response. - auto *sat_solver = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); std::vector core = sat_solver->GetLastIncompatibleDecisions(); MinimizeCoreWithPropagation(sat_solver, &core); std::vector core_in_proto_format; @@ -1583,7 +1579,7 @@ void SolveLoadedCpModel(const CpModelProto &model_proto, } } else { // Optimization problem. - const auto &objective = *model->GetOrCreate(); + const auto& objective = *model->GetOrCreate(); const IntegerVariable objective_var = objective.objective_var; CHECK_NE(objective_var, kNoIntegerVariable); @@ -1624,14 +1620,14 @@ void SolveLoadedCpModel(const CpModelProto &model_proto, // Try to find a solution by following the hint and using a low conflict limit. // The CpModelProto must already be loaded in the Model. -void QuickSolveWithHint(const CpModelProto &model_proto, - SharedResponseManager *shared_response_manager, - Model *model) { +void QuickSolveWithHint(const CpModelProto& model_proto, + SharedResponseManager* shared_response_manager, + Model* model) { if (!model_proto.has_solution_hint()) return; if (shared_response_manager->ProblemIsSolved()) return; // Temporarily change the parameters. - auto *parameters = model->GetOrCreate(); + auto* parameters = model->GetOrCreate(); const SatParameters saved_params = *parameters; parameters->set_max_number_of_conflicts(parameters->hint_conflict_limit()); parameters->set_search_branching(SatParameters::HINT_SEARCH); @@ -1641,11 +1637,11 @@ void QuickSolveWithHint(const CpModelProto &model_proto, // Solve decision problem. ConfigureSearchHeuristics(model); - const auto &mapping = *model->GetOrCreate(); + const auto& mapping = *model->GetOrCreate(); const SatSolver::Status status = ResetAndSolveIntegerProblem( mapping.Literals(model_proto.assumptions()), model); - const std::string &solution_info = model->Name(); + const std::string& solution_info = model->Name(); if (status == SatSolver::Status::FEASIBLE) { CpSolverResponse response; FillSolutionInResponse(model_proto, *model, &response); @@ -1661,7 +1657,7 @@ void QuickSolveWithHint(const CpModelProto &model_proto, const IntegerVariable objective_var = model->GetOrCreate()->objective_var; model->GetOrCreate()->Backtrack(0); - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); if (!integer_trail->Enqueue( IntegerLiteral::LowerOrEqual( objective_var, @@ -1679,16 +1675,16 @@ void QuickSolveWithHint(const CpModelProto &model_proto, // Solve a model with a different objective consisting of minimizing the L1 // distance with the provided hint. Note that this method creates an in-memory // copy of the model and loads a local Model object from the copied model. -void MinimizeL1DistanceWithHint(const CpModelProto &model_proto, - SharedResponseManager *shared_response_manager, - WallTimer *wall_timer, - SharedTimeLimit *shared_time_limit, - Model *model) { +void MinimizeL1DistanceWithHint(const CpModelProto& model_proto, + SharedResponseManager* shared_response_manager, + WallTimer* wall_timer, + SharedTimeLimit* shared_time_limit, + Model* model) { Model local_model; if (!model_proto.has_solution_hint()) return; if (shared_response_manager->ProblemIsSolved()) return; - auto *parameters = local_model.GetOrCreate(); + auto* parameters = local_model.GetOrCreate(); // TODO(user): As of now the repair hint doesn't support when // enumerate_all_solutions is set since the solution is created on a different // model. @@ -1711,7 +1707,7 @@ void MinimizeL1DistanceWithHint(const CpModelProto &model_proto, // Add a new var to represent the difference between var and value. const int new_var_index = updated_model_proto.variables_size(); - IntegerVariableProto *var_proto = updated_model_proto.add_variables(); + IntegerVariableProto* var_proto = updated_model_proto.add_variables(); const int64 min_domain = model_proto.variables(var).domain(0) - value; const int64 max_domain = model_proto.variables(var).domain( model_proto.variables(var).domain_size() - 1) - @@ -1720,9 +1716,9 @@ void MinimizeL1DistanceWithHint(const CpModelProto &model_proto, var_proto->add_domain(max_domain); // new_var = var - value. - ConstraintProto *const linear_constraint_proto = + ConstraintProto* const linear_constraint_proto = updated_model_proto.add_constraints(); - LinearConstraintProto *linear = linear_constraint_proto->mutable_linear(); + LinearConstraintProto* linear = linear_constraint_proto->mutable_linear(); linear->add_vars(new_var_index); linear->add_coeffs(1); linear->add_vars(var); @@ -1732,13 +1728,13 @@ void MinimizeL1DistanceWithHint(const CpModelProto &model_proto, // abs_var = abs(new_var). const int abs_var_index = updated_model_proto.variables_size(); - IntegerVariableProto *abs_var_proto = updated_model_proto.add_variables(); + IntegerVariableProto* abs_var_proto = updated_model_proto.add_variables(); const int64 abs_min_domain = 0; const int64 abs_max_domain = std::max(std::abs(min_domain), std::abs(max_domain)); abs_var_proto->add_domain(abs_min_domain); abs_var_proto->add_domain(abs_max_domain); - ConstraintProto *const abs_constraint_proto = + ConstraintProto* const abs_constraint_proto = updated_model_proto.add_constraints(); abs_constraint_proto->mutable_int_max()->set_target(abs_var_index); abs_constraint_proto->mutable_int_max()->add_vars(new_var_index); @@ -1759,11 +1755,11 @@ void MinimizeL1DistanceWithHint(const CpModelProto &model_proto, LoadCpModel(updated_model_proto, &local_response_manager, &local_model); ConfigureSearchHeuristics(&local_model); - const auto &mapping = *local_model.GetOrCreate(); + const auto& mapping = *local_model.GetOrCreate(); const SatSolver::Status status = ResetAndSolveIntegerProblem( mapping.Literals(updated_model_proto.assumptions()), &local_model); - const std::string &solution_info = model->Name(); + const std::string& solution_info = model->Name(); if (status == SatSolver::Status::FEASIBLE) { CpSolverResponse response; FillSolutionInResponse(model_proto, local_model, &response); @@ -1786,8 +1782,8 @@ void MinimizeL1DistanceWithHint(const CpModelProto &model_proto, // the model before presolve. void PostsolveResponseWithFullSolver( const int64 num_variables_in_original_model, CpModelProto mapping_proto, - const std::vector &postsolve_mapping, WallTimer *wall_timer, - CpSolverResponse *response) { + const std::vector& postsolve_mapping, WallTimer* wall_timer, + CpSolverResponse* response) { if (response->status() != CpSolverStatus::FEASIBLE && response->status() != CpSolverStatus::OPTIMAL) { return; @@ -1800,13 +1796,13 @@ void PostsolveResponseWithFullSolver( // Postsolve. for (int i = 0; i < response->solution_size(); ++i) { - auto *var_proto = mapping_proto.mutable_variables(postsolve_mapping[i]); + auto* var_proto = mapping_proto.mutable_variables(postsolve_mapping[i]); var_proto->clear_domain(); var_proto->add_domain(response->solution(i)); var_proto->add_domain(response->solution(i)); } for (int i = 0; i < response->solution_lower_bounds_size(); ++i) { - auto *var_proto = mapping_proto.mutable_variables(postsolve_mapping[i]); + auto* var_proto = mapping_proto.mutable_variables(postsolve_mapping[i]); FillDomainInProto( ReadDomainFromProto(*var_proto) .IntersectionWith({response->solution_lower_bounds(i), @@ -1855,12 +1851,12 @@ void PostsolveResponseWithFullSolver( } } -void PostsolveResponseWrapper(const SatParameters ¶ms, +void PostsolveResponseWrapper(const SatParameters& params, const int64 num_variables_in_original_model, - const CpModelProto &mapping_proto, - const std::vector &postsolve_mapping, - WallTimer *wall_timer, - CpSolverResponse *response) { + const CpModelProto& mapping_proto, + const std::vector& postsolve_mapping, + WallTimer* wall_timer, + CpSolverResponse* response) { if (params.cp_model_postsolve_with_full_solver()) { PostsolveResponseWithFullSolver(num_variables_in_original_model, mapping_proto, postsolve_mapping, @@ -1872,8 +1868,8 @@ void PostsolveResponseWrapper(const SatParameters ¶ms, } // TODO(user): Uniformize this function with the other one. -CpSolverResponse SolvePureSatModel(const CpModelProto &model_proto, - WallTimer *wall_timer, Model *model) { +CpSolverResponse SolvePureSatModel(const CpModelProto& model_proto, + WallTimer* wall_timer, Model* model) { std::unique_ptr solver(new SatSolver()); SatParameters parameters = *model->GetOrCreate(); solver->SetParameters(parameters); @@ -1885,7 +1881,7 @@ CpSolverResponse SolvePureSatModel(const CpModelProto &model_proto, if (!absl::GetFlag(FLAGS_drat_output).empty() || absl::GetFlag(FLAGS_drat_check)) { if (!absl::GetFlag(FLAGS_drat_output).empty()) { - File *output; + File* output; CHECK_OK(file::Open(absl::GetFlag(FLAGS_drat_output), "w", &output, file::Defaults())); drat_proof_handler = absl::make_unique( @@ -1918,7 +1914,7 @@ CpSolverResponse SolvePureSatModel(const CpModelProto &model_proto, drat_proof_handler->AddProblemClause({ref_literal}); } } - for (const ConstraintProto &ct : model_proto.constraints()) { + for (const ConstraintProto& ct : model_proto.constraints()) { switch (ct.constraint_case()) { case ConstraintProto::ConstraintCase::kBoolAnd: { if (ct.enforcement_literal_size() == 0) { @@ -1948,7 +1944,7 @@ CpSolverResponse SolvePureSatModel(const CpModelProto &model_proto, } } - for (const ConstraintProto &ct : model_proto.constraints()) { + for (const ConstraintProto& ct : model_proto.constraints()) { switch (ct.constraint_case()) { case ConstraintProto::ConstraintCase::kBoolAnd: { if (ct.enforcement_literal_size() == 0) { @@ -2079,14 +2075,14 @@ CpSolverResponse SolvePureSatModel(const CpModelProto &model_proto, // Small wrapper to simplify the constructions of the two SubSolver below. struct SharedClasses { - CpModelProto const *model_proto; - WallTimer *wall_timer; - SharedTimeLimit *time_limit; - SharedBoundsManager *bounds; - SharedResponseManager *response; - SharedRelaxationSolutionRepository *relaxation_solutions; - SharedLPSolutionRepository *lp_solutions; - SharedIncompleteSolutionManager *incomplete_solutions; + CpModelProto const* model_proto; + WallTimer* wall_timer; + SharedTimeLimit* time_limit; + SharedBoundsManager* bounds; + SharedResponseManager* response; + SharedRelaxationSolutionRepository* relaxation_solutions; + SharedLPSolutionRepository* lp_solutions; + SharedIncompleteSolutionManager* incomplete_solutions; bool SearchIsDone() { if (response->ProblemIsSolved()) return true; @@ -2098,9 +2094,9 @@ struct SharedClasses { // Encapsulate a full CP-SAT solve without presolve in the SubSolver API. class FullProblemSolver : public SubSolver { public: - FullProblemSolver(const std::string &name, - const SatParameters &local_parameters, bool split_in_chunks, - SharedClasses *shared) + FullProblemSolver(const std::string& name, + const SatParameters& local_parameters, bool split_in_chunks, + SharedClasses* shared) : SubSolver(name), shared_(shared), split_in_chunks_(split_in_chunks), @@ -2173,11 +2169,11 @@ class FullProblemSolver : public SubSolver { } } - auto *time_limit = local_model_->GetOrCreate(); + auto* time_limit = local_model_->GetOrCreate(); if (split_in_chunks_) { // Configure time limit for chunk solving. Note that we do not want // to do that for the hint search for now. - auto *params = local_model_->GetOrCreate(); + auto* params = local_model_->GetOrCreate(); params->set_max_deterministic_time(1); time_limit->ResetLimitFromParameters(*params); shared_->time_limit->UpdateLocalLimit(time_limit); @@ -2213,10 +2209,8 @@ class FullProblemSolver : public SubSolver { }; } - // TODO(user): A few of the information sharing we do between threads does - // not - // happen here (bound sharing, RINS neighborhood, objective). Fix that so - // we + // TODO(user): A few of the information sharing we do between threads does not + // happen here (bound sharing, RINS neighborhood, objective). Fix that so we // can have a deterministic parallel mode. void Synchronize() override { absl::MutexLock mutex_lock(&mutex_); @@ -2227,7 +2221,7 @@ class FullProblemSolver : public SubSolver { } private: - SharedClasses *shared_; + SharedClasses* shared_; const bool split_in_chunks_; std::unique_ptr local_model_; @@ -2243,8 +2237,8 @@ class FullProblemSolver : public SubSolver { class FeasibilityPumpSolver : public SubSolver { public: - FeasibilityPumpSolver(const SatParameters &local_parameters, - SharedClasses *shared) + FeasibilityPumpSolver(const SatParameters& local_parameters, + SharedClasses* shared) : SubSolver("feasibility_pump"), shared_(shared), local_model_(absl::make_unique(name_)) { @@ -2306,9 +2300,9 @@ class FeasibilityPumpSolver : public SubSolver { } } - auto *time_limit = local_model_->GetOrCreate(); + auto* time_limit = local_model_->GetOrCreate(); const double saved_dtime = time_limit->GetElapsedDeterministicTime(); - auto *feasibility_pump = local_model_->Mutable(); + auto* feasibility_pump = local_model_->Mutable(); if (!feasibility_pump->Solve()) { shared_->response->NotifyThatImprovingProblemIsInfeasible(name_); } @@ -2339,7 +2333,7 @@ class FeasibilityPumpSolver : public SubSolver { } private: - SharedClasses *shared_; + SharedClasses* shared_; std::unique_ptr local_model_; absl::Mutex mutex_; @@ -2357,8 +2351,8 @@ class FeasibilityPumpSolver : public SubSolver { class LnsSolver : public SubSolver { public: LnsSolver(std::unique_ptr generator, - const SatParameters ¶meters, - NeighborhoodGeneratorHelper *helper, SharedClasses *shared) + const SatParameters& parameters, + NeighborhoodGeneratorHelper* helper, SharedClasses* shared) : SubSolver(generator->name()), generator_(std::move(generator)), helper_(helper), @@ -2389,7 +2383,7 @@ class LnsSolver : public SubSolver { // Choose a base solution for this neighborhood. CpSolverResponse base_response; { - const SharedSolutionRepository &repo = + const SharedSolutionRepository& repo = shared_->response->SolutionsRepository(); if (repo.NumSolutions() > 0) { base_response.set_status(CpSolverStatus::FEASIBLE); @@ -2454,7 +2448,7 @@ class LnsSolver : public SubSolver { Model local_model; local_model.Add(NewSatParameters(local_params)); - TimeLimit *local_time_limit = local_model.GetOrCreate(); + TimeLimit* local_time_limit = local_model.GetOrCreate(); shared_->time_limit->UpdateLocalLimit(local_time_limit); const int64 num_neighborhood_model_vars = @@ -2544,7 +2538,8 @@ class LnsSolver : public SubSolver { *shared_->model_proto, std::vector(local_response.solution().begin(), local_response.solution().end()))) { - shared_->response->NewSolution(local_response, /*model=*/nullptr); + shared_->response->NewSolution(local_response, + /*model=*/nullptr); // Mark the solution optimal if the relaxation status is optimal. if (local_response.status() == CpSolverStatus::OPTIMAL) { @@ -2575,7 +2570,8 @@ class LnsSolver : public SubSolver { // Report any feasible solution we have. if (local_response.status() == CpSolverStatus::OPTIMAL || local_response.status() == CpSolverStatus::FEASIBLE) { - shared_->response->NewSolution(local_response, /*model=*/nullptr); + shared_->response->NewSolution(local_response, + /*model=*/nullptr); } if (!neighborhood.is_reduced && (local_response.status() == CpSolverStatus::OPTIMAL || @@ -2600,6 +2596,7 @@ class LnsSolver : public SubSolver { << ", p: " << fully_solved_proportion << "]"; }; } + void Synchronize() override { generator_->Synchronize(); const double old = deterministic_time_; @@ -2609,17 +2606,17 @@ class LnsSolver : public SubSolver { private: std::unique_ptr generator_; - NeighborhoodGeneratorHelper *helper_; + NeighborhoodGeneratorHelper* helper_; const SatParameters parameters_; - SharedClasses *shared_; + SharedClasses* shared_; }; -void SolveCpModelParallel(const CpModelProto &model_proto, - SharedResponseManager *shared_response_manager, - SharedTimeLimit *shared_time_limit, - WallTimer *wall_timer, Model *global_model) { +void SolveCpModelParallel(const CpModelProto& model_proto, + SharedResponseManager* shared_response_manager, + SharedTimeLimit* shared_time_limit, + WallTimer* wall_timer, Model* global_model) { CHECK(shared_response_manager != nullptr); - const SatParameters ¶meters = *global_model->GetOrCreate(); + const SatParameters& parameters = *global_model->GetOrCreate(); const int num_search_workers = parameters.num_search_workers(); const bool log_search = parameters.log_search_progress() || VLOG_IS_ON(1); CHECK(!parameters.enumerate_all_solutions()) @@ -2669,7 +2666,7 @@ void SolveCpModelParallel(const CpModelProto &model_proto, shared.incomplete_solutions = shared_incomplete_solutions.get(); // The list of all the SubSolver that will be used in this parallel search. - std::vector > subsolvers; + std::vector> subsolvers; // Add a synchronization point for the shared classes. subsolvers.push_back(absl::make_unique( @@ -2696,9 +2693,10 @@ void SolveCpModelParallel(const CpModelProto &model_proto, local_params.set_stop_after_first_solution(true); local_params.set_linearization_level(0); subsolvers.push_back(absl::make_unique( - "first_solution", local_params, /*split_in_chunks=*/false, &shared)); + "first_solution", local_params, + /*split_in_chunks=*/false, &shared)); } else { - for (const SatParameters &local_params : GetDiverseSetOfParameters( + for (const SatParameters& local_params : GetDiverseSetOfParameters( parameters, model_proto, num_search_workers)) { // TODO(user): This is currently not supported here. if (parameters.optimize_with_max_hs()) continue; @@ -2722,13 +2720,13 @@ void SolveCpModelParallel(const CpModelProto &model_proto, auto unique_helper = absl::make_unique( &model_proto, ¶meters, shared_response_manager, shared_time_limit, shared_bounds_manager.get()); - NeighborhoodGeneratorHelper *helper = unique_helper.get(); + NeighborhoodGeneratorHelper* helper = unique_helper.get(); subsolvers.push_back(std::move(unique_helper)); const int num_lns_strategies = parameters.diversify_lns_params() ? 6 : 1; - const std::vector &lns_params = + const std::vector& lns_params = GetDiverseSetOfParameters(parameters, model_proto, num_lns_strategies); - for (const SatParameters &local_params : lns_params) { + for (const SatParameters& local_params : lns_params) { // Only register following LNS SubSolver if there is an objective. if (model_proto.has_objective()) { // Enqueue all the possible LNS neighborhood subsolvers. @@ -2810,12 +2808,11 @@ void SolveCpModelParallel(const CpModelProto &model_proto, // Log the name of all our SubSolvers. if (log_search) { std::vector names; - for (const auto &subsolver : subsolvers) { + for (const auto& subsolver : subsolvers) { if (!subsolver->name().empty()) names.push_back(subsolver->name()); } LOG(INFO) << absl::StrFormat( - "*** starting Search at %.2fs with %i workers " - "and subsolvers: [ %s ]", + "*** starting Search at %.2fs with %i workers and subsolvers: [ %s ]", wall_timer->Get(), num_search_workers, absl::StrJoin(names, ", ")); } @@ -2833,12 +2830,12 @@ void SolveCpModelParallel(const CpModelProto &model_proto, // If the option use_sat_inprocessing is true, then before postsolving a // solution, we need to make sure we add any new clause required for postsolving // to the mapping_model. -void AddPostsolveClauses(const std::vector &postsolve_mapping, - Model *model, CpModelProto *mapping_proto) { - auto *mapping = model->GetOrCreate(); - auto *postsolve = model->GetOrCreate(); - for (const auto &clause : postsolve->clauses) { - auto *ct = mapping_proto->add_constraints()->mutable_bool_or(); +void AddPostsolveClauses(const std::vector& postsolve_mapping, + Model* model, CpModelProto* mapping_proto) { + auto* mapping = model->GetOrCreate(); + auto* postsolve = model->GetOrCreate(); + for (const auto& clause : postsolve->clauses) { + auto* ct = mapping_proto->add_constraints()->mutable_bool_or(); for (const Literal l : clause) { int var = mapping->GetProtoVariableFromBooleanVariable(l.Variable()); CHECK_NE(var, -1); @@ -2851,7 +2848,7 @@ void AddPostsolveClauses(const std::vector &postsolve_mapping, } // namespace -CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { +CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { WallTimer wall_timer; UserTimer user_timer; wall_timer.Start(); @@ -2868,7 +2865,7 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { CHECK_OK(file::SetTextProto(file, model_proto, file::Defaults())); } - absl::Cleanup > dump_response_cleanup; + absl::Cleanup> dump_response_cleanup; if (absl::GetFlag(FLAGS_cp_model_dump_response)) { dump_response_cleanup = absl::MakeCleanup([&final_response] { const std::string file = absl::StrCat( @@ -2895,12 +2892,12 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { } #endif // __PORTABLE_PLATFORM__ - const SatParameters ¶ms = *model->GetOrCreate(); + const SatParameters& params = *model->GetOrCreate(); const bool log_search = params.log_search_progress() || VLOG_IS_ON(1); LOG_IF(INFO, log_search) << "Parameters: " << params.ShortDebugString(); // Always display the final response stats if requested. - absl::Cleanup > display_response_cleanup; + absl::Cleanup> display_response_cleanup; if (log_search) { display_response_cleanup = absl::MakeCleanup([&final_response, &model_proto] { @@ -2930,14 +2927,14 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { !params.use_lns_only() && params.num_search_workers() <= 1 && model_proto.assumptions().empty()) { bool is_pure_sat = true; - for (const IntegerVariableProto &var : model_proto.variables()) { + for (const IntegerVariableProto& var : model_proto.variables()) { if (var.domain_size() != 2 || var.domain(0) < 0 || var.domain(1) > 1) { is_pure_sat = false; break; } } if (is_pure_sat) { - for (const ConstraintProto &ct : model_proto.constraints()) { + for (const ConstraintProto& ct : model_proto.constraints()) { if (ct.constraint_case() != ConstraintProto::ConstraintCase::kBoolOr && ct.constraint_case() != ConstraintProto::ConstraintCase::kBoolAnd) { is_pure_sat = false; @@ -2953,7 +2950,7 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { final_response.set_user_time(user_timer.Get()); final_response.set_deterministic_time( shared_time_limit.GetElapsedDeterministicTime()); - const SatParameters ¶ms = *model->GetOrCreate(); + const SatParameters& params = *model->GetOrCreate(); if (params.fill_tightened_domains_in_response()) { *final_response.mutable_tightened_variables() = model_proto.variables(); } @@ -3009,7 +3006,7 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { if (params.cp_model_presolve()) { postprocess_solution = [&model_proto, ¶ms, &mapping_proto, &shared_time_limit, &postsolve_mapping, &wall_timer, - &user_timer, model](CpSolverResponse *response) { + &user_timer, model](CpSolverResponse* response) { AddPostsolveClauses(postsolve_mapping, model, &mapping_proto); PostsolveResponseWrapper(params, model_proto.variables_size(), mapping_proto, postsolve_mapping, &wall_timer, @@ -3039,7 +3036,7 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { } else { postprocess_solution = [&model_proto, ¶ms, &wall_timer, &shared_time_limit, - &user_timer](CpSolverResponse *response) { + &user_timer](CpSolverResponse* response) { // Truncate the solution in case model expansion added more variables. const int initial_size = model_proto.variables_size(); if (response->solution_size() > 0) { @@ -3068,12 +3065,12 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { absl::GetFlag(FLAGS_cp_model_dump_prefix)); shared_response_manager.SetGapLimitsFromParameters(params); model->Register(&shared_response_manager); - const auto &observers = model->GetOrCreate()->observers; + const auto& observers = model->GetOrCreate()->observers; if (!observers.empty()) { shared_response_manager.AddSolutionCallback( [&model_proto, &observers, &wall_timer, &user_timer, &postprocess_solution, &shared_time_limit]( - const CpSolverResponse &response_of_presolved_problem) { + const CpSolverResponse& response_of_presolved_problem) { CpSolverResponse response = response_of_presolved_problem; postprocess_solution(&response); if (!response.solution().empty()) { @@ -3085,7 +3082,7 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { } } - for (const auto &observer : observers) { + for (const auto& observer : observers) { observer(response); } }); @@ -3109,7 +3106,7 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { if (params.stop_after_presolve() || shared_time_limit.LimitReached()) { int64 num_terms = 0; - for (const ConstraintProto &ct : new_cp_model_proto.constraints()) { + for (const ConstraintProto& ct : new_cp_model_proto.constraints()) { num_terms += UsedVariables(ct).size(); } LOG_IF(INFO, log_search) @@ -3130,7 +3127,7 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { if (params.stop_after_first_solution()) { shared_response_manager.AddSolutionCallback( [&shared_time_limit]( - const CpSolverResponse &response_of_presolved_problem) { + const CpSolverResponse& response_of_presolved_problem) { shared_time_limit.Stop(); }); } @@ -3138,7 +3135,7 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { #if defined(__PORTABLE_PLATFORM__) if (/* DISABLES CODE */ (false)) { // We ignore the multithreading parameter in this case. -#else // __PORTABLE_PLATFORM__ +#else // __PORTABLE_PLATFORM__ if (params.num_search_workers() > 1 || params.interleave_search()) { SolveCpModelParallel(new_cp_model_proto, &shared_response_manager, &shared_time_limit, &wall_timer, model); @@ -3181,21 +3178,21 @@ CpSolverResponse SolveCpModel(const CpModelProto &model_proto, Model *model) { return final_response; } -CpSolverResponse Solve(const CpModelProto &model_proto) { +CpSolverResponse Solve(const CpModelProto& model_proto) { Model model; return SolveCpModel(model_proto, &model); } -CpSolverResponse SolveWithParameters(const CpModelProto &model_proto, - const SatParameters ¶ms) { +CpSolverResponse SolveWithParameters(const CpModelProto& model_proto, + const SatParameters& params) { Model model; model.Add(NewSatParameters(params)); return SolveCpModel(model_proto, &model); } #if !defined(__PORTABLE_PLATFORM__) -CpSolverResponse SolveWithParameters(const CpModelProto &model_proto, - const std::string ¶ms) { +CpSolverResponse SolveWithParameters(const CpModelProto& model_proto, + const std::string& params) { Model model; model.Add(NewSatParameters(params)); return SolveCpModel(model_proto, &model); diff --git a/ortools/sat/cp_model_symmetries.cc b/ortools/sat/cp_model_symmetries.cc index cc7ff8bb10..7c53e2a21e 100644 --- a/ortools/sat/cp_model_symmetries.cc +++ b/ortools/sat/cp_model_symmetries.cc @@ -28,7 +28,7 @@ namespace sat { namespace { struct VectorHash { - std::size_t operator()(const std::vector &values) const { + std::size_t operator()(const std::vector& values) const { size_t hash = 0; for (const int64 value : values) { hash = util_hash::Hash(value, hash); @@ -45,7 +45,7 @@ class IdGenerator { // If the key was never seen before, then generate a new id, otherwise return // the previously generated id. - int GetId(const std::vector &key) { + int GetId(const std::vector& key) { return gtl::LookupOrInsert(&id_map_, key, id_map_.size()); } @@ -58,8 +58,8 @@ class IdGenerator { // We use a template as proto int64 != C++ int64 in open source. template void Append( - const google::protobuf::RepeatedField &repeated_field, - std::vector *vector) { + const google::protobuf::RepeatedField& repeated_field, + std::vector* vector) { CHECK(vector != nullptr); for (const FieldInt64Type value : repeated_field) { vector->push_back(value); @@ -82,8 +82,8 @@ void Append( // between each other. template std::unique_ptr GenerateGraphForSymmetryDetection( - const CpModelProto &problem, - std::vector *initial_equivalence_classes) { + const CpModelProto& problem, + std::vector* initial_equivalence_classes) { CHECK(initial_equivalence_classes != nullptr); const int num_variables = problem.variables_size(); @@ -106,7 +106,7 @@ std::unique_ptr GenerateGraphForSymmetryDetection( } auto new_node = [&initial_equivalence_classes, - &id_generator](const std::vector &key) { + &id_generator](const std::vector& key) { // Since we add nodes one by one, initial_equivalence_classes->size() gives // the number of nodes at any point, which we use as the next node index. const int node = initial_equivalence_classes->size(); @@ -115,7 +115,7 @@ std::unique_ptr GenerateGraphForSymmetryDetection( }; for (int v = 0; v < num_variables; ++v) { - const IntegerVariableProto &variable = problem.variables(v); + const IntegerVariableProto& variable = problem.variables(v); std::vector key = {VARIABLE_NODE, objective_by_var[v]}; Append(variable.domain(), &key); CHECK_EQ(v, new_node(key)); @@ -141,7 +141,7 @@ std::unique_ptr GenerateGraphForSymmetryDetection( }; // Add constraints to the graph. - for (const ConstraintProto &constraint : problem.constraints()) { + for (const ConstraintProto& constraint : problem.constraints()) { const int constraint_node = initial_equivalence_classes->size(); std::vector key = {CONSTRAINT_NODE, constraint.constraint_case()}; @@ -241,8 +241,8 @@ std::unique_ptr GenerateGraphForSymmetryDetection( } // namespace void FindCpModelSymmetries( - const CpModelProto &problem, - std::vector > *generators, + const CpModelProto& problem, + std::vector>* generators, double time_limit_seconds) { CHECK(generators != nullptr); generators->clear(); @@ -269,7 +269,7 @@ void FindCpModelSymmetries( double average_support_size = 0.0; int num_generators = 0; for (int i = 0; i < generators->size(); ++i) { - SparsePermutation *permutation = (*generators)[i].get(); + SparsePermutation* permutation = (*generators)[i].get(); std::vector to_delete; for (int j = 0; j < permutation->NumCycles(); ++j) { // Because variable nodes are in a separate equivalence class than any diff --git a/ortools/sat/cp_model_utils.cc b/ortools/sat/cp_model_utils.cc index b766b85316..e58f3931da 100644 --- a/ortools/sat/cp_model_utils.cc +++ b/ortools/sat/cp_model_utils.cc @@ -22,19 +22,19 @@ namespace sat { namespace { template -void AddIndices(const IntList &indices, absl::flat_hash_set *output) { +void AddIndices(const IntList& indices, absl::flat_hash_set* output) { output->insert(indices.begin(), indices.end()); } template -void AddIndices(const IntList &indices, std::vector *output) { +void AddIndices(const IntList& indices, std::vector* output) { output->insert(output->end(), indices.begin(), indices.end()); } } // namespace -void SetToNegatedLinearExpression(const LinearExpressionProto &input_expr, - LinearExpressionProto *output_negated_expr) { +void SetToNegatedLinearExpression(const LinearExpressionProto& input_expr, + LinearExpressionProto* output_negated_expr) { output_negated_expr->Clear(); for (int i = 0; i < input_expr.vars_size(); ++i) { output_negated_expr->add_vars(NegatedRef(input_expr.vars(i))); @@ -43,7 +43,7 @@ void SetToNegatedLinearExpression(const LinearExpressionProto &input_expr, output_negated_expr->set_offset(-input_expr.offset()); } -IndexReferences GetReferencesUsedByConstraint(const ConstraintProto &ct) { +IndexReferences GetReferencesUsedByConstraint(const ConstraintProto& ct) { IndexReferences output; switch (ct.constraint_case()) { case ConstraintProto::ConstraintCase::kBoolOr: @@ -154,12 +154,12 @@ IndexReferences GetReferencesUsedByConstraint(const ConstraintProto &ct) { #define APPLY_TO_REPEATED_FIELD(ct_name, field_name) \ { \ - for (int &r : *ct->mutable_##ct_name()->mutable_##field_name()) f(&r); \ + for (int& r : *ct->mutable_##ct_name()->mutable_##field_name()) f(&r); \ } -void ApplyToAllLiteralIndices(const std::function &f, - ConstraintProto *ct) { - for (int &r : *ct->mutable_enforcement_literal()) f(&r); +void ApplyToAllLiteralIndices(const std::function& f, + ConstraintProto* ct) { + for (int& r : *ct->mutable_enforcement_literal()) f(&r); switch (ct->constraint_case()) { case ConstraintProto::ConstraintCase::kBoolOr: APPLY_TO_REPEATED_FIELD(bool_or, literals); @@ -223,8 +223,8 @@ void ApplyToAllLiteralIndices(const std::function &f, } } -void ApplyToAllVariableIndices(const std::function &f, - ConstraintProto *ct) { +void ApplyToAllVariableIndices(const std::function& f, + ConstraintProto* ct) { switch (ct->constraint_case()) { case ConstraintProto::ConstraintCase::kBoolOr: break; @@ -315,8 +315,8 @@ void ApplyToAllVariableIndices(const std::function &f, } } -void ApplyToAllIntervalIndices(const std::function &f, - ConstraintProto *ct) { +void ApplyToAllIntervalIndices(const std::function& f, + ConstraintProto* ct) { switch (ct->constraint_case()) { case ConstraintProto::ConstraintCase::kBoolOr: break; @@ -438,9 +438,9 @@ std::string ConstraintCaseName( } } -std::vector UsedVariables(const ConstraintProto &ct) { +std::vector UsedVariables(const ConstraintProto& ct) { IndexReferences references = GetReferencesUsedByConstraint(ct); - for (int &ref : references.variables) { + for (int& ref : references.variables) { ref = PositiveRef(ref); } for (const int lit : references.literals) { @@ -453,7 +453,7 @@ std::vector UsedVariables(const ConstraintProto &ct) { return references.variables; } -std::vector UsedIntervals(const ConstraintProto &ct) { +std::vector UsedIntervals(const ConstraintProto& ct) { std::vector used_intervals; switch (ct.constraint_case()) { case ConstraintProto::ConstraintCase::kBoolOr: @@ -517,10 +517,10 @@ std::vector UsedIntervals(const ConstraintProto &ct) { return used_intervals; } -int64 ComputeInnerObjective(const CpObjectiveProto &objective, - const CpSolverResponse &response) { +int64 ComputeInnerObjective(const CpObjectiveProto& objective, + const CpSolverResponse& response) { int64 objective_value = 0; - auto &repeated_field_values = response.solution().empty() + auto& repeated_field_values = response.solution().empty() ? response.solution_lower_bounds() : response.solution(); for (int i = 0; i < objective.vars_size(); ++i) { diff --git a/ortools/sat/cumulative.cc b/ortools/sat/cumulative.cc index 91416a78b9..d6248c0a92 100644 --- a/ortools/sat/cumulative.cc +++ b/ortools/sat/cumulative.cc @@ -32,17 +32,17 @@ namespace operations_research { namespace sat { -std::function Cumulative( - const std::vector &vars, - const std::vector &demands, AffineExpression capacity, - SchedulingConstraintHelper *helper) { - return [=](Model *model) mutable { +std::function Cumulative( + const std::vector& vars, + const std::vector& demands, AffineExpression capacity, + SchedulingConstraintHelper* helper) { + return [=](Model* model) mutable { if (vars.empty()) return; - auto *intervals = model->GetOrCreate(); - auto *encoder = model->GetOrCreate(); - auto *integer_trail = model->GetOrCreate(); - auto *watcher = model->GetOrCreate(); + auto* intervals = model->GetOrCreate(); + auto* encoder = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); + auto* watcher = model->GetOrCreate(); // Redundant constraints to ensure that the resource capacity is high enough // for each task. Also ensure that no task consumes more resource than what @@ -79,7 +79,7 @@ std::function Cumulative( if (vars.size() == 1) return; - const SatParameters ¶meters = *(model->GetOrCreate()); + const SatParameters& parameters = *(model->GetOrCreate()); // Detect a subset of intervals that needs to be in disjunction and add a // Disjunctive() constraint over them. @@ -128,7 +128,7 @@ std::function Cumulative( // Propagator responsible for applying Timetabling filtering rule. It // increases the minimum of the start variables, decrease the maximum of the // end variables, and increase the minimum of the capacity variable. - TimeTablingPerTask *time_tabling = + TimeTablingPerTask* time_tabling = new TimeTablingPerTask(demands, capacity, integer_trail, helper); time_tabling->RegisterWith(watcher); model->TakeOwnership(time_tabling); @@ -143,7 +143,7 @@ std::function Cumulative( // rule. It increases the minimum of the start variables and decreases the // maximum of the end variables, if (parameters.use_timetable_edge_finding_in_cumulative_constraint()) { - TimeTableEdgeFinding *time_table_edge_finding = + TimeTableEdgeFinding* time_table_edge_finding = new TimeTableEdgeFinding(demands, capacity, helper, integer_trail); time_table_edge_finding->RegisterWith(watcher); model->TakeOwnership(time_table_edge_finding); @@ -151,22 +151,22 @@ std::function Cumulative( }; } -std::function CumulativeTimeDecomposition( - const std::vector &vars, - const std::vector &demands, AffineExpression capacity, - SchedulingConstraintHelper *helper) { - return [=](Model *model) { +std::function CumulativeTimeDecomposition( + const std::vector& vars, + const std::vector& demands, AffineExpression capacity, + SchedulingConstraintHelper* helper) { + return [=](Model* model) { if (vars.empty()) return; - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); CHECK(integer_trail->IsFixed(capacity)); const Coefficient fixed_capacity( integer_trail->UpperBound(capacity).value()); const int num_tasks = vars.size(); - SatSolver *sat_solver = model->GetOrCreate(); - IntegerEncoder *encoder = model->GetOrCreate(); - IntervalsRepository *intervals = model->GetOrCreate(); + SatSolver* sat_solver = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); + IntervalsRepository* intervals = model->GetOrCreate(); std::vector start_vars; std::vector end_vars; diff --git a/ortools/sat/cumulative_energy.cc b/ortools/sat/cumulative_energy.cc index 6172cba855..ae8430555d 100644 --- a/ortools/sat/cumulative_energy.cc +++ b/ortools/sat/cumulative_energy.cc @@ -26,23 +26,23 @@ namespace sat { void AddCumulativeEnergyConstraint(std::vector energies, AffineExpression capacity, - SchedulingConstraintHelper *helper, - Model *model) { - auto *watcher = model->GetOrCreate(); - auto *integer_trail = model->GetOrCreate(); + SchedulingConstraintHelper* helper, + Model* model) { + auto* watcher = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); - CumulativeEnergyConstraint *constraint = new CumulativeEnergyConstraint( + CumulativeEnergyConstraint* constraint = new CumulativeEnergyConstraint( std::move(energies), capacity, integer_trail, helper); constraint->RegisterWith(watcher); model->TakeOwnership(constraint); } -void AddCumulativeOverloadChecker(const std::vector &demands, +void AddCumulativeOverloadChecker(const std::vector& demands, AffineExpression capacity, - SchedulingConstraintHelper *helper, - Model *model) { - auto *watcher = model->GetOrCreate(); - auto *integer_trail = model->GetOrCreate(); + SchedulingConstraintHelper* helper, + Model* model) { + auto* watcher = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); std::vector energies; const int num_tasks = helper->NumTasks(); @@ -82,7 +82,7 @@ void AddCumulativeOverloadChecker(const std::vector &demands, } } - CumulativeEnergyConstraint *constraint = + CumulativeEnergyConstraint* constraint = new CumulativeEnergyConstraint(energies, capacity, integer_trail, helper); constraint->RegisterWith(watcher); model->TakeOwnership(constraint); @@ -90,7 +90,7 @@ void AddCumulativeOverloadChecker(const std::vector &demands, CumulativeEnergyConstraint::CumulativeEnergyConstraint( std::vector energies, AffineExpression capacity, - IntegerTrail *integer_trail, SchedulingConstraintHelper *helper) + IntegerTrail* integer_trail, SchedulingConstraintHelper* helper) : energies_(std::move(energies)), capacity_(capacity), integer_trail_(integer_trail), @@ -101,7 +101,7 @@ CumulativeEnergyConstraint::CumulativeEnergyConstraint( task_to_start_event_.resize(num_tasks); } -void CumulativeEnergyConstraint::RegisterWith(GenericLiteralWatcher *watcher) { +void CumulativeEnergyConstraint::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); helper_->WatchAllTasks(id, watcher); watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id); diff --git a/ortools/sat/cuts.cc b/ortools/sat/cuts.cc index 3a1e2161b5..0eeebb36c4 100644 --- a/ortools/sat/cuts.cc +++ b/ortools/sat/cuts.cc @@ -40,8 +40,8 @@ const double kMinCutViolation = 1e-4; // Returns the lp value of a Literal. double GetLiteralLpValue( - const Literal lit, const gtl::ITIVector &lp_values, - const IntegerEncoder *encoder) { + const Literal lit, const gtl::ITIVector& lp_values, + const IntegerEncoder* encoder) { const IntegerVariable direct_view = encoder->GetLiteralView(lit); if (direct_view != kNoIntegerVariable) { return lp_values[direct_view]; @@ -55,9 +55,9 @@ double GetLiteralLpValue( // upper bound. The arguments must form a non-trival constraint of the form // sum terms (coeff * var) <= upper_bound. LinearConstraint GenerateKnapsackCutForCover( - const std::vector &vars, - const std::vector &coeffs, const IntegerValue upper_bound, - const IntegerTrail &integer_trail) { + const std::vector& vars, + const std::vector& coeffs, const IntegerValue upper_bound, + const IntegerTrail& integer_trail) { CHECK_EQ(vars.size(), coeffs.size()); CHECK_GT(vars.size(), 0); LinearConstraint cut; @@ -83,8 +83,8 @@ LinearConstraint GenerateKnapsackCutForCover( } bool SolutionSatisfiesConstraint( - const LinearConstraint &constraint, - const gtl::ITIVector &lp_values) { + const LinearConstraint& constraint, + const gtl::ITIVector& lp_values) { const double activity = ComputeActivity(constraint, lp_values); const double tolerance = 1e-6; return (activity <= constraint.ub.value() + tolerance && @@ -94,7 +94,7 @@ bool SolutionSatisfiesConstraint( } bool SmallRangeAndAllCoefficientsMagnitudeAreTheSame( - const LinearConstraint &constraint, IntegerTrail *integer_trail) { + const LinearConstraint& constraint, IntegerTrail* integer_trail) { if (constraint.vars.empty()) return true; const int64 magnitude = std::abs(constraint.coeffs[0].value()); @@ -114,7 +114,7 @@ bool SmallRangeAndAllCoefficientsMagnitudeAreTheSame( bool AllVarsTakeIntegerValue( const std::vector vars, - const gtl::ITIVector &lp_values) { + const gtl::ITIVector& lp_values) { for (IntegerVariable var : vars) { if (std::abs(lp_values[var] - std::round(lp_values[var])) > 1e-6) { return false; @@ -132,8 +132,8 @@ bool AllVarsTakeIntegerValue( // 3. Add terms in cover until term sum is smaller or equal to upper bound. // 4. Add the last item which violates the upper bound. This forms the smallest // cover. Return the size of this cover. -int GetSmallestCoverSize(const LinearConstraint &constraint, - const IntegerTrail &integer_trail) { +int GetSmallestCoverSize(const LinearConstraint& constraint, + const IntegerTrail& integer_trail) { IntegerValue ub = constraint.ub; std::vector sorted_terms; for (int i = 0; i < constraint.vars.size(); ++i) { @@ -155,8 +155,8 @@ int GetSmallestCoverSize(const LinearConstraint &constraint, return smallest_cover_size; } -bool ConstraintIsEligibleForLifting(const LinearConstraint &constraint, - const IntegerTrail &integer_trail) { +bool ConstraintIsEligibleForLifting(const LinearConstraint& constraint, + const IntegerTrail& integer_trail) { for (const IntegerVariable var : constraint.vars) { if (integer_trail.LevelZeroLowerBound(var) != IntegerValue(0) || integer_trail.LevelZeroUpperBound(var) != IntegerValue(1)) { @@ -168,18 +168,18 @@ bool ConstraintIsEligibleForLifting(const LinearConstraint &constraint, } // namespace bool LiftKnapsackCut( - const LinearConstraint &constraint, - const gtl::ITIVector &lp_values, - const std::vector &cut_vars_original_coefficients, - const IntegerTrail &integer_trail, TimeLimit *time_limit, - LinearConstraint *cut) { + const LinearConstraint& constraint, + const gtl::ITIVector& lp_values, + const std::vector& cut_vars_original_coefficients, + const IntegerTrail& integer_trail, TimeLimit* time_limit, + LinearConstraint* cut) { std::set vars_in_cut; for (IntegerVariable var : cut->vars) { vars_in_cut.insert(var); } - std::vector > non_zero_vars; - std::vector > zero_vars; + std::vector> non_zero_vars; + std::vector> zero_vars; for (int i = 0; i < constraint.vars.size(); ++i) { const IntegerVariable var = constraint.vars[i]; if (integer_trail.LevelZeroLowerBound(var) != IntegerValue(0) || @@ -200,7 +200,7 @@ bool LiftKnapsackCut( std::sort(non_zero_vars.rbegin(), non_zero_vars.rend()); std::sort(zero_vars.rbegin(), zero_vars.rend()); - std::vector > lifting_sequence( + std::vector> lifting_sequence( std::move(non_zero_vars)); lifting_sequence.insert(lifting_sequence.end(), zero_vars.begin(), @@ -246,9 +246,9 @@ bool LiftKnapsackCut( } LinearConstraint GetPreprocessedLinearConstraint( - const LinearConstraint &constraint, - const gtl::ITIVector &lp_values, - const IntegerTrail &integer_trail) { + const LinearConstraint& constraint, + const gtl::ITIVector& lp_values, + const IntegerTrail& integer_trail) { IntegerValue ub = constraint.ub; LinearConstraint constraint_with_left_vars; for (int i = 0; i < constraint.vars.size(); ++i) { @@ -269,8 +269,8 @@ LinearConstraint GetPreprocessedLinearConstraint( return constraint_with_left_vars; } -bool ConstraintIsTriviallyTrue(const LinearConstraint &constraint, - const IntegerTrail &integer_trail) { +bool ConstraintIsTriviallyTrue(const LinearConstraint& constraint, + const IntegerTrail& integer_trail) { IntegerValue term_sum = IntegerValue(0); for (int i = 0; i < constraint.vars.size(); ++i) { const IntegerVariable var = constraint.vars[i]; @@ -286,9 +286,9 @@ bool ConstraintIsTriviallyTrue(const LinearConstraint &constraint, } bool CanBeFilteredUsingCutLowerBound( - const LinearConstraint &preprocessed_constraint, - const gtl::ITIVector &lp_values, - const IntegerTrail &integer_trail) { + const LinearConstraint& preprocessed_constraint, + const gtl::ITIVector& lp_values, + const IntegerTrail& integer_trail) { std::vector variable_upper_bound_distances; for (const IntegerVariable var : preprocessed_constraint.vars) { const IntegerValue var_ub = integer_trail.LevelZeroUpperBound(var); @@ -332,9 +332,9 @@ double GetKnapsackUpperBound(std::vector items, } bool CanBeFilteredUsingKnapsackUpperBound( - const LinearConstraint &constraint, - const gtl::ITIVector &lp_values, - const IntegerTrail &integer_trail) { + const LinearConstraint& constraint, + const gtl::ITIVector& lp_values, + const IntegerTrail& integer_trail) { std::vector items; double capacity = -constraint.ub.value() - 1.0; double sum_variable_profit = 0; @@ -366,9 +366,9 @@ bool CanBeFilteredUsingKnapsackUpperBound( } bool CanFormValidKnapsackCover( - const LinearConstraint &preprocessed_constraint, - const gtl::ITIVector &lp_values, - const IntegerTrail &integer_trail) { + const LinearConstraint& preprocessed_constraint, + const gtl::ITIVector& lp_values, + const IntegerTrail& integer_trail) { if (ConstraintIsTriviallyTrue(preprocessed_constraint, integer_trail)) { return false; } @@ -383,9 +383,9 @@ bool CanFormValidKnapsackCover( return true; } -void ConvertToKnapsackForm(const LinearConstraint &constraint, - std::vector *knapsack_constraints, - IntegerTrail *integer_trail) { +void ConvertToKnapsackForm(const LinearConstraint& constraint, + std::vector* knapsack_constraints, + IntegerTrail* integer_trail) { // If all coefficient are the same, the generated knapsack cuts cannot be // stronger than the constraint itself. However, when we substitute variables // using the implication graph, this is not longer true. So we only skip @@ -433,14 +433,14 @@ void ConvertToKnapsackForm(const LinearConstraint &constraint, // TODO(user): Move the cut generator into a class and reuse variables. CutGenerator CreateKnapsackCoverCutGenerator( - const std::vector &base_constraints, - const std::vector &vars, Model *model) { + const std::vector& base_constraints, + const std::vector& vars, Model* model) { CutGenerator result; result.vars = vars; - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); std::vector knapsack_constraints; - for (const LinearConstraint &constraint : base_constraints) { + for (const LinearConstraint& constraint : base_constraints) { // There is often a lot of small linear base constraints and it doesn't seem // super useful to generate cuts for constraints of size 2. Any valid cut // of size 1 should be already infered by the propagation. @@ -463,9 +463,9 @@ CutGenerator CreateKnapsackCoverCutGenerator( // TODO(user): do not add generator if there are no knapsack constraints. result.generate_cuts = [implied_bounds_processor, knapsack_constraints, vars, model, integer_trail]( - const gtl::ITIVector - &lp_values, - LinearConstraintManager *manager) mutable { + const gtl::ITIVector& + lp_values, + LinearConstraintManager* manager) mutable { // TODO(user): When we use implied-bound substitution, we might still infer // an interesting cut even if all variables are integer. See if we still // want to skip all such constraints. @@ -478,7 +478,7 @@ CutGenerator CreateKnapsackCoverCutGenerator( // Iterate through all knapsack constraints. implied_bounds_processor.ClearCache(); - for (const LinearConstraint &constraint : knapsack_constraints) { + for (const LinearConstraint& constraint : knapsack_constraints) { if (model->GetOrCreate()->LimitReached()) break; VLOG(2) << "Processing constraint: " << constraint.DebugString(); @@ -703,10 +703,10 @@ std::function GetSuperAdditiveRoundingFunction( // as it still takes around 25% percent of the run time when all the cuts are on // for the opm*mps.gz problems and others. void IntegerRoundingCutHelper::ComputeCut( - RoundingOptions options, const std::vector &lp_values, - const std::vector &lower_bounds, - const std::vector &upper_bounds, - ImpliedBoundsProcessor *ib_processor, LinearConstraint *cut) { + RoundingOptions options, const std::vector& lp_values, + const std::vector& lower_bounds, + const std::vector& upper_bounds, + ImpliedBoundsProcessor* ib_processor, LinearConstraint* cut) { const int size = lp_values.size(); if (size == 0) return; CHECK_EQ(lower_bounds.size(), size); @@ -1151,9 +1151,9 @@ void IntegerRoundingCutHelper::ComputeCut( } bool CoverCutHelper::TrySimpleKnapsack( - const LinearConstraint base_ct, const std::vector &lp_values, - const std::vector &lower_bounds, - const std::vector &upper_bounds) { + const LinearConstraint base_ct, const std::vector& lp_values, + const std::vector& lower_bounds, + const std::vector& upper_bounds) { const int base_size = lp_values.size(); // Fill terms with a rewrite of the base constraint where all coeffs & @@ -1187,7 +1187,7 @@ bool CoverCutHelper::TrySimpleKnapsack( // Look for violated CUT of the form: sum (UB - X) or (X - LB) >= 1. double activity = 0.0; int new_size = 0; - std::sort(terms_.begin(), terms_.end(), [](const Term &a, const Term &b) { + std::sort(terms_.begin(), terms_.end(), [](const Term& a, const Term& b) { if (a.dist_to_max_value == b.dist_to_max_value) { // Prefer low coefficients if the distance is the same. return a.positive_coeff < b.positive_coeff; @@ -1195,7 +1195,7 @@ bool CoverCutHelper::TrySimpleKnapsack( return a.dist_to_max_value < b.dist_to_max_value; }); for (int i = 0; i < terms_.size(); ++i) { - const Term &term = terms_[i]; + const Term& term = terms_[i]; activity += term.dist_to_max_value; // As an heuristic we select all the term so that the sum of distance @@ -1230,7 +1230,7 @@ bool CoverCutHelper::TrySimpleKnapsack( // // We compute the cut at the same time. terms_.resize(new_size); - std::sort(terms_.begin(), terms_.end(), [](const Term &a, const Term &b) { + std::sort(terms_.begin(), terms_.end(), [](const Term& a, const Term& b) { if (a.positive_coeff == b.positive_coeff) { return a.dist_to_max_value > b.dist_to_max_value; } @@ -1325,15 +1325,15 @@ bool CoverCutHelper::TrySimpleKnapsack( CutGenerator CreatePositiveMultiplicationCutGenerator(IntegerVariable z, IntegerVariable x, IntegerVariable y, - Model *model) { + Model* model) { CutGenerator result; result.vars = {z, x, y}; - IntegerTrail *const integer_trail = model->GetOrCreate(); + IntegerTrail* const integer_trail = model->GetOrCreate(); result.generate_cuts = [z, x, y, integer_trail]( - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { const int64 x_lb = integer_trail->LevelZeroLowerBound(x).value(); const int64 x_ub = integer_trail->LevelZeroUpperBound(x).value(); const int64 y_lb = integer_trail->LevelZeroLowerBound(y).value(); @@ -1419,15 +1419,15 @@ CutGenerator CreatePositiveMultiplicationCutGenerator(IntegerVariable z, } CutGenerator CreateSquareCutGenerator(IntegerVariable y, IntegerVariable x, - Model *model) { + Model* model) { CutGenerator result; result.vars = {y, x}; - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); result.generate_cuts = [y, x, integer_trail]( - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { const int64 x_ub = integer_trail->LevelZeroUpperBound(x).value(); const int64 x_lb = integer_trail->LevelZeroLowerBound(x).value(); @@ -1486,8 +1486,8 @@ CutGenerator CreateSquareCutGenerator(IntegerVariable y, IntegerVariable x, } void ImpliedBoundsProcessor::ProcessUpperBoundedConstraint( - const gtl::ITIVector &lp_values, - LinearConstraint *cut) { + const gtl::ITIVector& lp_values, + LinearConstraint* cut) { ProcessUpperBoundedConstraintWithSlackCreation( /*substitute_only_inner_variables=*/false, IntegerVariable(0), lp_values, cut, nullptr); @@ -1503,12 +1503,12 @@ ImpliedBoundsProcessor::GetCachedImpliedBoundInfo(IntegerVariable var) { ImpliedBoundsProcessor::BestImpliedBoundInfo ImpliedBoundsProcessor::ComputeBestImpliedBound( IntegerVariable var, - const gtl::ITIVector &lp_values) { + const gtl::ITIVector& lp_values) { auto it = cache_.find(var); if (it != cache_.end()) return it->second; BestImpliedBoundInfo result; const IntegerValue lb = integer_trail_->LevelZeroLowerBound(var); - for (const ImpliedBoundEntry &entry : + for (const ImpliedBoundEntry& entry : implied_bounds_->GetImpliedBounds(var)) { // Only process entries with a Boolean variable currently part of the LP // we are considering for this cut. @@ -1534,7 +1534,7 @@ ImpliedBoundsProcessor::ComputeBestImpliedBound( if (slack_lp_value < -1e-4) { LinearConstraint ib_cut; ib_cut.lb = kMinIntegerValue; - std::vector > terms; + std::vector> terms; if (entry.is_positive) { // X >= Indicator * (bound - lb) + lb terms.push_back({entry.literal_view, diff}); @@ -1570,7 +1570,7 @@ ImpliedBoundsProcessor::ComputeBestImpliedBound( // TODO(user): restrict to a subset of the variables to not spend too much time. void ImpliedBoundsProcessor::SeparateSomeImpliedBoundCuts( - const gtl::ITIVector &lp_values) { + const gtl::ITIVector& lp_values) { for (const IntegerVariable var : implied_bounds_->VariablesWithImpliedBounds()) { if (!lp_vars_.contains(PositiveVariable(var))) continue; @@ -1580,8 +1580,8 @@ void ImpliedBoundsProcessor::SeparateSomeImpliedBoundCuts( void ImpliedBoundsProcessor::ProcessUpperBoundedConstraintWithSlackCreation( bool substitute_only_inner_variables, IntegerVariable first_slack, - const gtl::ITIVector &lp_values, - LinearConstraint *cut, std::vector *slack_infos) { + const gtl::ITIVector& lp_values, + LinearConstraint* cut, std::vector* slack_infos) { tmp_terms_.clear(); IntegerValue new_ub = cut->ub; bool changed = false; @@ -1720,9 +1720,9 @@ void ImpliedBoundsProcessor::ProcessUpperBoundedConstraintWithSlackCreation( } bool ImpliedBoundsProcessor::DebugSlack(IntegerVariable first_slack, - const LinearConstraint &initial_cut, - const LinearConstraint &cut, - const std::vector &info) { + const LinearConstraint& initial_cut, + const LinearConstraint& cut, + const std::vector& info) { tmp_terms_.clear(); IntegerValue new_ub = cut.ub; for (int i = 0; i < cut.vars.size(); ++i) { @@ -1735,7 +1735,7 @@ bool ImpliedBoundsProcessor::DebugSlack(IntegerVariable first_slack, // Replace slack by its definition. const IntegerValue multiplier = cut.coeffs[i]; const int index = (cut.vars[i].value() - first_slack.value()) / 2; - for (const std::pair &term : + for (const std::pair& term : info[index].terms) { tmp_terms_.push_back({term.first, term.second * multiplier}); } @@ -1774,10 +1774,10 @@ bool ImpliedBoundsProcessor::DebugSlack(IntegerVariable first_slack, namespace { void TryToGenerateAllDiffCut( - const std::vector > &sorted_vars_lp, - const IntegerTrail &integer_trail, - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + const std::vector>& sorted_vars_lp, + const IntegerTrail& integer_trail, + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { Domain current_union; std::vector current_set_vars; double sum = 0.0; @@ -1813,20 +1813,20 @@ void TryToGenerateAllDiffCut( } // namespace CutGenerator CreateAllDifferentCutGenerator( - const std::vector &vars, Model *model) { + const std::vector& vars, Model* model) { CutGenerator result; result.vars = vars; - IntegerTrail *integer_trail = model->GetOrCreate(); - Trail *trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); + Trail* trail = model->GetOrCreate(); result.generate_cuts = [vars, integer_trail, trail]( - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { // These cuts work at all levels but the generator adds too many cuts on // some instances and degrade the performance so we only use it at level // 0. if (trail->CurrentDecisionLevel() > 0) return; - std::vector > sorted_vars; + std::vector> sorted_vars; for (const IntegerVariable var : vars) { if (integer_trail->LevelZeroLowerBound(var) == integer_trail->LevelZeroUpperBound(var)) { @@ -1851,7 +1851,7 @@ namespace { IntegerValue MaxCornerDifference(const IntegerVariable var, const IntegerValue w1_i, const IntegerValue w2_i, - const IntegerTrail &integer_trail) { + const IntegerTrail& integer_trail) { const IntegerValue lb = integer_trail.LevelZeroLowerBound(var); const IntegerValue ub = integer_trail.LevelZeroUpperBound(var); return std::max((w2_i - w1_i) * lb, (w2_i - w1_i) * ub); @@ -1864,10 +1864,10 @@ IntegerValue MaxCornerDifference(const IntegerVariable var, // target expr I(i), max expr k. // The coefficient of zk is Sum(i=1..n)(MPlusCoefficient_ki) + bk IntegerValue MPlusCoefficient( - const std::vector &x_vars, - const std::vector &exprs, - const gtl::ITIVector &variable_partition, - const int max_index, const IntegerTrail &integer_trail) { + const std::vector& x_vars, + const std::vector& exprs, + const gtl::ITIVector& variable_partition, + const int max_index, const IntegerTrail& integer_trail) { IntegerValue coeff = exprs[max_index].offset; // TODO(user): This algo is quadratic since GetCoefficientOfPositiveVar() // is linear. This can be optimized (better complexity) if needed. @@ -1886,19 +1886,19 @@ IntegerValue MPlusCoefficient( // rhs = wI(i)i * xi + Sum(k=1..d)(MPlusCoefficient_ki * zk) // for variable xi for given target index I(i). double ComputeContribution( - const IntegerVariable xi_var, const std::vector &z_vars, - const std::vector &exprs, - const gtl::ITIVector &lp_values, - const IntegerTrail &integer_trail, const int target_index) { + const IntegerVariable xi_var, const std::vector& z_vars, + const std::vector& exprs, + const gtl::ITIVector& lp_values, + const IntegerTrail& integer_trail, const int target_index) { CHECK_GE(target_index, 0); CHECK_LT(target_index, exprs.size()); - const LinearExpression &target_expr = exprs[target_index]; + const LinearExpression& target_expr = exprs[target_index]; const double xi_value = lp_values[xi_var]; const IntegerValue wt_i = GetCoefficientOfPositiveVar(xi_var, target_expr); double contrib = wt_i.value() * xi_value; for (int expr_index = 0; expr_index < exprs.size(); ++expr_index) { if (expr_index == target_index) continue; - const LinearExpression &max_expr = exprs[expr_index]; + const LinearExpression& max_expr = exprs[expr_index]; const double z_max_value = lp_values[z_vars[expr_index]]; const IntegerValue corner_value = MaxCornerDifference( xi_var, wt_i, GetCoefficientOfPositiveVar(xi_var, max_expr), @@ -1910,8 +1910,8 @@ double ComputeContribution( } // namespace CutGenerator CreateLinMaxCutGenerator( - const IntegerVariable target, const std::vector &exprs, - const std::vector &z_vars, Model *model) { + const IntegerVariable target, const std::vector& exprs, + const std::vector& z_vars, Model* model) { CutGenerator result; std::vector x_vars; result.vars = {target}; @@ -1927,11 +1927,11 @@ CutGenerator CreateLinMaxCutGenerator( })); result.vars.insert(result.vars.end(), x_vars.begin(), x_vars.end()); - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); result.generate_cuts = [x_vars, z_vars, target, num_exprs, exprs, integer_trail, model]( - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { gtl::ITIVector variable_partition( lp_values.size(), -1); gtl::ITIVector variable_partition_contrib( @@ -1955,7 +1955,7 @@ CutGenerator CreateLinMaxCutGenerator( for (const IntegerVariable xi_var : x_vars) { const int input_index = variable_partition[xi_var]; - const LinearExpression &expr = exprs[input_index]; + const LinearExpression& expr = exprs[input_index]; const IntegerValue coeff = GetCoefficientOfPositiveVar(xi_var, expr); if (coeff != IntegerValue(0)) { cut.AddTerm(xi_var, coeff); @@ -1978,16 +1978,16 @@ CutGenerator CreateLinMaxCutGenerator( return result; } -void AddIntegerVariableFromIntervals(SchedulingConstraintHelper *helper, - Model *model, - std::vector *vars) { +void AddIntegerVariableFromIntervals(SchedulingConstraintHelper* helper, + Model* model, + std::vector* vars) { vars->insert(vars->end(), helper->StartVars().begin(), helper->StartVars().end()); vars->insert(vars->end(), helper->SizeVars().begin(), helper->SizeVars().end()); vars->insert(vars->end(), helper->EndVars().begin(), helper->EndVars().end()); - IntegerEncoder *encoder = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); for (int t = 0; t < helper->NumTasks(); ++t) { if (helper->IsOptional(t) && !helper->IsAbsent(t) && !helper->IsPresent(t)) { @@ -2007,19 +2007,19 @@ void AddIntegerVariableFromIntervals(SchedulingConstraintHelper *helper, } } -std::function &, - LinearConstraintManager *)> -GenerateCumulativeCut(const std::string &cut_name, - SchedulingConstraintHelper *helper, - const std::vector &demands, - AffineExpression capacity, Model *model) { - Trail *trail = model->GetOrCreate(); - IntegerTrail *integer_trail = model->GetOrCreate(); - IntegerEncoder *encoder = model->GetOrCreate(); +std::function&, + LinearConstraintManager*)> +GenerateCumulativeCut(const std::string& cut_name, + SchedulingConstraintHelper* helper, + const std::vector& demands, + AffineExpression capacity, Model* model) { + Trail* trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); return [capacity, demands, trail, integer_trail, helper, model, cut_name, - encoder](const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + encoder](const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { if (trail->CurrentDecisionLevel() > 0) return; const auto demand_is_fixed = [integer_trail, &demands](int i) { @@ -2189,12 +2189,12 @@ GenerateCumulativeCut(const std::string &cut_name, } CutGenerator CreateCumulativeCutGenerator( - const std::vector &intervals, - const IntegerVariable capacity, const std::vector &demands, - Model *model) { + const std::vector& intervals, + const IntegerVariable capacity, const std::vector& demands, + Model* model) { CutGenerator result; - SchedulingConstraintHelper *helper = + SchedulingConstraintHelper* helper = new SchedulingConstraintHelper(intervals, model); model->TakeOwnership(helper); @@ -2208,12 +2208,12 @@ CutGenerator CreateCumulativeCutGenerator( } CutGenerator CreateOverlappingCumulativeCutGenerator( - const std::vector &intervals, - const IntegerVariable capacity, const std::vector &demands, - Model *model) { + const std::vector& intervals, + const IntegerVariable capacity, const std::vector& demands, + Model* model) { CutGenerator result; - SchedulingConstraintHelper *helper = + SchedulingConstraintHelper* helper = new SchedulingConstraintHelper(intervals, model); model->TakeOwnership(helper); @@ -2228,13 +2228,13 @@ CutGenerator CreateOverlappingCumulativeCutGenerator( IntegerVariable demand; }; - Trail *trail = model->GetOrCreate(); - IntegerTrail *integer_trail = model->GetOrCreate(); + Trail* trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); result.generate_cuts = [helper, capacity, demands, trail, integer_trail, model]( - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { if (trail->CurrentDecisionLevel() > 0) return; std::vector events; @@ -2277,7 +2277,7 @@ CutGenerator CreateOverlappingCumulativeCutGenerator( std::vector cut_events; bool added_positive_event = false; - for (const Event &e : events) { + for (const Event& e : events) { if (e.positive) { added_positive_event = true; cut_events.push_back(e); @@ -2289,7 +2289,7 @@ CutGenerator CreateOverlappingCumulativeCutGenerator( LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0)); cut.AddTerm(capacity, IntegerValue(-1)); - for (const Event &cut_event : cut_events) { + for (const Event& cut_event : cut_events) { if (helper->IsPresent(cut_event.interval_index)) { cut.AddTerm(cut_event.demand, IntegerValue(1)); } else { @@ -2322,37 +2322,38 @@ CutGenerator CreateOverlappingCumulativeCutGenerator( } CutGenerator CreateNoOverlapCutGenerator( - const std::vector &intervals, Model *model) { + const std::vector& intervals, Model* model) { CutGenerator result; - SchedulingConstraintHelper *helper = + SchedulingConstraintHelper* helper = new SchedulingConstraintHelper(intervals, model); model->TakeOwnership(helper); AddIntegerVariableFromIntervals(helper, model, &result.vars); result.generate_cuts = GenerateCumulativeCut( - "NoOverlapEnergy", helper, /*demands=*/{}, + "NoOverlapEnergy", helper, + /*demands=*/{}, /*capacity=*/AffineExpression(IntegerValue(1)), model); return result; } CutGenerator CreateNoOverlapPrecedenceCutGenerator( - const std::vector &intervals, Model *model) { + const std::vector& intervals, Model* model) { CutGenerator result; - SchedulingConstraintHelper *helper = + SchedulingConstraintHelper* helper = new SchedulingConstraintHelper(intervals, model); model->TakeOwnership(helper); AddIntegerVariableFromIntervals(helper, model, &result.vars); - Trail *trail = model->GetOrCreate(); + Trail* trail = model->GetOrCreate(); result.generate_cuts = [trail, helper, model]( - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { if (trail->CurrentDecisionLevel() > 0) return; // TODO(user): We can do much better in term of complexity: @@ -2401,15 +2402,15 @@ CutGenerator CreateNoOverlapPrecedenceCutGenerator( } CutGenerator CreateCliqueCutGenerator( - const std::vector &base_variables, Model *model) { + const std::vector& base_variables, Model* model) { // Filter base_variables to only keep the one with a literal view, and // do the conversion. std::vector variables; std::vector literals; absl::flat_hash_map positive_map; absl::flat_hash_map negative_map; - auto *integer_trail = model->GetOrCreate(); - auto *encoder = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); + auto* encoder = model->GetOrCreate(); for (const IntegerVariable var : base_variables) { if (integer_trail->LowerBound(var) != IntegerValue(0)) continue; if (integer_trail->UpperBound(var) != IntegerValue(1)) continue; @@ -2424,20 +2425,20 @@ CutGenerator CreateCliqueCutGenerator( } CutGenerator result; result.vars = variables; - auto *implication_graph = model->GetOrCreate(); + auto* implication_graph = model->GetOrCreate(); result.generate_cuts = [variables, literals, implication_graph, positive_map, negative_map, - model](const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + model](const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { std::vector packed_values; for (int i = 0; i < literals.size(); ++i) { packed_values.push_back(lp_values[variables[i]]); } - const std::vector > at_most_ones = + const std::vector> at_most_ones = implication_graph->GenerateAtMostOnesWithLargeWeight(literals, packed_values); - for (const std::vector &at_most_one : at_most_ones) { + for (const std::vector& at_most_one : at_most_ones) { // We need to express such "at most one" in term of the initial // variables, so we do not use the // LinearConstraintBuilder::AddLiteralTerm() here. diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index e7d8c22cbc..0af293e572 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -30,10 +30,10 @@ namespace operations_research { namespace sat { -void AddCumulativeRelaxation(const std::vector &x_intervals, - SchedulingConstraintHelper *x, - SchedulingConstraintHelper *y, Model *model) { - auto *integer_trail = model->GetOrCreate(); +void AddCumulativeRelaxation(const std::vector& x_intervals, + SchedulingConstraintHelper* x, + SchedulingConstraintHelper* y, Model* model) { + auto* integer_trail = model->GetOrCreate(); std::vector sizes; int64 min_starts = kint64max; @@ -95,9 +95,9 @@ IntegerValue FindCanonicalValue(IntegerValue lb, IntegerValue ub) { return candidate; } -void SplitDisjointBoxes(const SchedulingConstraintHelper &x, +void SplitDisjointBoxes(const SchedulingConstraintHelper& x, absl::Span boxes, - std::vector > *result) { + std::vector>* result) { result->clear(); std::sort(boxes.begin(), boxes.end(), [&x](int a, int b) { return x.StartMin(a) < x.StartMin(b); }); @@ -148,7 +148,7 @@ bool NonOverlappingRectanglesEnergyPropagator::Propagate() { if (cached_areas_[box] == 0) continue; // TODO(user): Also consider shifted end max. - Dimension &dimension = cached_dimensions_[box]; + Dimension& dimension = cached_dimensions_[box]; dimension.x_min = x_.ShiftedStartMin(box); dimension.x_max = x_.EndMax(box); dimension.y_min = y_.ShiftedStartMin(box); @@ -177,7 +177,7 @@ bool NonOverlappingRectanglesEnergyPropagator::Propagate() { } int NonOverlappingRectanglesEnergyPropagator::RegisterWith( - GenericLiteralWatcher *watcher) { + GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); x_.WatchAllTasks(id, watcher, /*watch_start_max=*/false, /*watch_end_max=*/true); @@ -189,12 +189,12 @@ int NonOverlappingRectanglesEnergyPropagator::RegisterWith( void NonOverlappingRectanglesEnergyPropagator::SortBoxesIntoNeighbors( int box, absl::Span local_boxes, IntegerValue total_sum_of_areas) { - const Dimension &box_dim = cached_dimensions_[box]; + const Dimension& box_dim = cached_dimensions_[box]; neighbors_.clear(); for (const int other_box : local_boxes) { if (other_box == box) continue; - const Dimension &other_dim = cached_dimensions_[other_box]; + const Dimension& other_dim = cached_dimensions_[other_box]; const IntegerValue span_x = std::max(box_dim.x_max, other_dim.x_max) - std::min(box_dim.x_min, other_dim.x_min); const IntegerValue span_y = std::max(box_dim.y_max, other_dim.y_max) - @@ -256,9 +256,9 @@ bool NonOverlappingRectanglesEnergyPropagator::FailWhenEnergyIsTooLarge( // to the disjunctive propagators. NonOverlappingRectanglesDisjunctivePropagator:: NonOverlappingRectanglesDisjunctivePropagator(bool strict, - SchedulingConstraintHelper *x, - SchedulingConstraintHelper *y, - Model *model) + SchedulingConstraintHelper* x, + SchedulingConstraintHelper* y, + Model* model) : global_x_(*x), global_y_(*y), x_(x->NumTasks(), model), @@ -291,8 +291,8 @@ void NonOverlappingRectanglesDisjunctivePropagator::Register( bool NonOverlappingRectanglesDisjunctivePropagator:: FindBoxesThatMustOverlapAHorizontalLineAndPropagate( - const SchedulingConstraintHelper &x, - const SchedulingConstraintHelper &y, + const SchedulingConstraintHelper& x, + const SchedulingConstraintHelper& y, std::function inner_propagate) { // Compute relevant events (line in the y dimension). active_boxes_.clear(); @@ -339,12 +339,12 @@ bool NonOverlappingRectanglesDisjunctivePropagator:: // the next. This save a bit more than 1%. int new_size = 0; { - for (std::vector &overlapping_boxes : events_overlapping_boxes_) { + for (std::vector& overlapping_boxes : events_overlapping_boxes_) { if (overlapping_boxes.size() < 2) { continue; // Remove current event. } if (new_size > 0) { - const std::vector &previous_overlapping_boxes = + const std::vector& previous_overlapping_boxes = events_overlapping_boxes_[new_size - 1]; // If the previous set of boxes is included in the current one, replace @@ -375,7 +375,7 @@ bool NonOverlappingRectanglesDisjunctivePropagator:: // Boxes are sorted in a stable manner in the Split method. // Note that we do not use reduced_overlapping_boxes_ directly so that // the order of iteration is deterministic. - const auto &insertion = reduced_overlapping_boxes_.insert(sub_boxes); + const auto& insertion = reduced_overlapping_boxes_.insert(sub_boxes); if (insertion.second) boxes_to_propagate_.push_back(sub_boxes); } } diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index e5a86bdfb8..8d091d726f 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -27,11 +27,11 @@ namespace operations_research { namespace sat { -std::function Disjunctive( - const std::vector &vars) { - return [=](Model *model) { +std::function Disjunctive( + const std::vector& vars) { + return [=](Model* model) { bool is_all_different = true; - IntervalsRepository *repository = model->GetOrCreate(); + IntervalsRepository* repository = model->GetOrCreate(); for (const IntervalVariable var : vars) { if (repository->IsOptional(var) || repository->MinSize(var) != 1 || repository->MaxSize(var) != 1) { @@ -49,15 +49,15 @@ std::function Disjunctive( return; } - auto *watcher = model->GetOrCreate(); - const auto &sat_parameters = *model->GetOrCreate(); + auto* watcher = model->GetOrCreate(); + const auto& sat_parameters = *model->GetOrCreate(); if (vars.size() > 2 && sat_parameters.use_combined_no_overlap()) { - model->GetOrCreate >()->AddNoOverlap(vars); - model->GetOrCreate >()->AddNoOverlap(vars); + model->GetOrCreate>()->AddNoOverlap(vars); + model->GetOrCreate>()->AddNoOverlap(vars); return; } - SchedulingConstraintHelper *helper = + SchedulingConstraintHelper* helper = new SchedulingConstraintHelper(vars, model); model->TakeOwnership(helper); @@ -65,7 +65,7 @@ std::function Disjunctive( if (/*DISABLES_CODE*/ (false)) { const AffineExpression one(IntegerValue(1)); std::vector demands(vars.size(), one); - TimeTablingPerTask *timetable = new TimeTablingPerTask( + TimeTablingPerTask* timetable = new TimeTablingPerTask( demands, one, model->GetOrCreate(), helper); timetable->RegisterWith(watcher); model->TakeOwnership(timetable); @@ -73,7 +73,7 @@ std::function Disjunctive( } if (vars.size() == 2) { - DisjunctiveWithTwoItems *propagator = new DisjunctiveWithTwoItems(helper); + DisjunctiveWithTwoItems* propagator = new DisjunctiveWithTwoItems(helper); propagator->RegisterWith(watcher); model->TakeOwnership(propagator); } else { @@ -81,28 +81,28 @@ std::function Disjunctive( // shouldn't matter much because of the different priorities used. { // Only one direction is needed by this one. - DisjunctiveOverloadChecker *overload_checker = + DisjunctiveOverloadChecker* overload_checker = new DisjunctiveOverloadChecker(helper); const int id = overload_checker->RegisterWith(watcher); watcher->SetPropagatorPriority(id, 1); model->TakeOwnership(overload_checker); } for (const bool time_direction : {true, false}) { - DisjunctiveDetectablePrecedences *detectable_precedences = + DisjunctiveDetectablePrecedences* detectable_precedences = new DisjunctiveDetectablePrecedences(time_direction, helper); const int id = detectable_precedences->RegisterWith(watcher); watcher->SetPropagatorPriority(id, 2); model->TakeOwnership(detectable_precedences); } for (const bool time_direction : {true, false}) { - DisjunctiveNotLast *not_last = + DisjunctiveNotLast* not_last = new DisjunctiveNotLast(time_direction, helper); const int id = not_last->RegisterWith(watcher); watcher->SetPropagatorPriority(id, 3); model->TakeOwnership(not_last); } for (const bool time_direction : {true, false}) { - DisjunctiveEdgeFinding *edge_finding = + DisjunctiveEdgeFinding* edge_finding = new DisjunctiveEdgeFinding(time_direction, helper); const int id = edge_finding->RegisterWith(watcher); watcher->SetPropagatorPriority(id, 4); @@ -116,7 +116,7 @@ std::function Disjunctive( if (sat_parameters.use_precedences_in_disjunctive_constraint() && !sat_parameters.use_combined_no_overlap()) { for (const bool time_direction : {true, false}) { - DisjunctivePrecedences *precedences = new DisjunctivePrecedences( + DisjunctivePrecedences* precedences = new DisjunctivePrecedences( time_direction, helper, model->GetOrCreate(), model->GetOrCreate()); const int id = precedences->RegisterWith(watcher); @@ -127,12 +127,12 @@ std::function Disjunctive( }; } -std::function DisjunctiveWithBooleanPrecedencesOnly( - const std::vector &vars) { - return [=](Model *model) { - SatSolver *sat_solver = model->GetOrCreate(); - IntervalsRepository *repository = model->GetOrCreate(); - PrecedencesPropagator *precedences = +std::function DisjunctiveWithBooleanPrecedencesOnly( + const std::vector& vars) { + return [=](Model* model) { + SatSolver* sat_solver = model->GetOrCreate(); + IntervalsRepository* repository = model->GetOrCreate(); + PrecedencesPropagator* precedences = model->GetOrCreate(); for (int i = 0; i < vars.size(); ++i) { for (int j = 0; j < i; ++j) { @@ -150,15 +150,15 @@ std::function DisjunctiveWithBooleanPrecedencesOnly( }; } -std::function DisjunctiveWithBooleanPrecedences( - const std::vector &vars) { - return [=](Model *model) { +std::function DisjunctiveWithBooleanPrecedences( + const std::vector& vars) { + return [=](Model* model) { model->Add(DisjunctiveWithBooleanPrecedencesOnly(vars)); model->Add(Disjunctive(vars)); }; } -void TaskSet::AddEntry(const Entry &e) { +void TaskSet::AddEntry(const Entry& e) { int j = sorted_tasks_.size(); sorted_tasks_.push_back(e); while (j > 0 && sorted_tasks_[j - 1].start_min > e.start_min) { @@ -173,13 +173,13 @@ void TaskSet::AddEntry(const Entry &e) { if (j <= optimized_restart_) optimized_restart_ = 0; } -void TaskSet::AddShiftedStartMinEntry(const SchedulingConstraintHelper &helper, +void TaskSet::AddShiftedStartMinEntry(const SchedulingConstraintHelper& helper, int t) { const IntegerValue dmin = helper.SizeMin(t); AddEntry({t, std::max(helper.StartMin(t), helper.EndMin(t) - dmin), dmin}); } -void TaskSet::NotifyEntryIsNowLastIfPresent(const Entry &e) { +void TaskSet::NotifyEntryIsNowLastIfPresent(const Entry& e) { const int size = sorted_tasks_.size(); for (int i = 0;; ++i) { if (i == size) return; @@ -204,7 +204,7 @@ IntegerValue TaskSet::ComputeEndMin() const { const int size = sorted_tasks_.size(); IntegerValue end_min = kMinIntegerValue; for (int i = optimized_restart_; i < size; ++i) { - const Entry &e = sorted_tasks_[i]; + const Entry& e = sorted_tasks_[i]; if (e.start_min >= end_min) { optimized_restart_ = i; end_min = e.start_min + e.size_min; @@ -216,7 +216,7 @@ IntegerValue TaskSet::ComputeEndMin() const { } IntegerValue TaskSet::ComputeEndMin(int task_to_ignore, - int *critical_index) const { + int* critical_index) const { // The order in which we process tasks with the same start-min doesn't matter. DCHECK(std::is_sorted(sorted_tasks_.begin(), sorted_tasks_.end())); bool ignored = false; @@ -231,7 +231,7 @@ IntegerValue TaskSet::ComputeEndMin(int task_to_ignore, } for (int i = optimized_restart_; i < size; ++i) { - const Entry &e = sorted_tasks_[i]; + const Entry& e = sorted_tasks_[i]; if (e.task == task_to_ignore) { ignored = true; continue; @@ -307,7 +307,7 @@ bool DisjunctiveWithTwoItems::Propagate() { return true; } -int DisjunctiveWithTwoItems::RegisterWith(GenericLiteralWatcher *watcher) { +int DisjunctiveWithTwoItems::RegisterWith(GenericLiteralWatcher* watcher) { // This propagator reach the fix point in one pass. const int id = watcher->Register(this); helper_->WatchAllTasks(id, watcher); @@ -315,12 +315,12 @@ int DisjunctiveWithTwoItems::RegisterWith(GenericLiteralWatcher *watcher) { } template -CombinedDisjunctive::CombinedDisjunctive(Model *model) +CombinedDisjunctive::CombinedDisjunctive(Model* model) : helper_(model->GetOrCreate()) { helper_->SetTimeDirection(time_direction); task_to_disjunctives_.resize(helper_->NumTasks()); - auto *watcher = model->GetOrCreate(); + auto* watcher = model->GetOrCreate(); const int id = watcher->Register(this); helper_->WatchAllTasks(id, watcher, /*watch_start_max=*/true, /*watch_end_max=*/false); @@ -329,7 +329,7 @@ CombinedDisjunctive::CombinedDisjunctive(Model *model) template void CombinedDisjunctive::AddNoOverlap( - const std::vector &vars) { + const std::vector& vars) { const int index = task_sets_.size(); task_sets_.emplace_back(vars.size()); end_mins_.push_back(kMinIntegerValue); @@ -341,11 +341,11 @@ void CombinedDisjunctive::AddNoOverlap( template bool CombinedDisjunctive::Propagate() { helper_->SetTimeDirection(time_direction); - const auto &task_by_increasing_end_min = helper_->TaskByIncreasingEndMin(); - const auto &task_by_decreasing_start_max = + const auto& task_by_increasing_end_min = helper_->TaskByIncreasingEndMin(); + const auto& task_by_decreasing_start_max = helper_->TaskByDecreasingStartMax(); - for (auto &task_set : task_sets_) task_set.Clear(); + for (auto& task_set : task_sets_) task_set.Clear(); end_mins_.assign(end_mins_.size(), kMinIntegerValue); IntegerValue max_of_end_min = kMinIntegerValue; @@ -421,7 +421,7 @@ bool CombinedDisjunctive::Propagate() { // TODO(user): Maybe factor out the code? It does require a function with a // lot of arguments though. helper_->ClearReason(); - const std::vector &sorted_tasks = + const std::vector& sorted_tasks = task_sets_[best_d_index].SortedTasks(); const IntegerValue window_start = sorted_tasks[best_critical_index].start_min; @@ -626,7 +626,7 @@ bool DisjunctiveOverloadChecker::PropagateSubwindow( return true; } -int DisjunctiveOverloadChecker::RegisterWith(GenericLiteralWatcher *watcher) { +int DisjunctiveOverloadChecker::RegisterWith(GenericLiteralWatcher* watcher) { // This propagator reach the fix point in one pass. const int id = watcher->Register(this); helper_->SetTimeDirection(/*is_forward=*/true); @@ -807,7 +807,7 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow() { // Note that this works as well when IsPresent(t) is false. if (task_set_end_min > helper_->StartMin(t)) { const int critical_index = task_set_.GetCriticalIndex(); - const std::vector &sorted_tasks = + const std::vector& sorted_tasks = task_set_.SortedTasks(); helper_->ClearReason(); @@ -854,7 +854,7 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow() { } int DisjunctiveDetectablePrecedences::RegisterWith( - GenericLiteralWatcher *watcher) { + GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); helper_->SetTimeDirection(time_direction_); helper_->WatchAllTasks(id, watcher, /*watch_start_max=*/true, @@ -925,7 +925,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { // more general computation to find by how much we can push var? const IntegerValue new_lb = task_set_.ComputeEndMin() + min_offset; if (new_lb > integer_trail_->LowerBound(var)) { - const std::vector &sorted_tasks = task_set_.SortedTasks(); + const std::vector& sorted_tasks = task_set_.SortedTasks(); helper_->ClearReason(); // Fill task_to_arc_index_ since we need it for the reason. @@ -958,7 +958,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { return true; } -int DisjunctivePrecedences::RegisterWith(GenericLiteralWatcher *watcher) { +int DisjunctivePrecedences::RegisterWith(GenericLiteralWatcher* watcher) { // This propagator reach the fixed point in one go. const int id = watcher->Register(this); helper_->SetTimeDirection(time_direction_); @@ -970,9 +970,9 @@ int DisjunctivePrecedences::RegisterWith(GenericLiteralWatcher *watcher) { bool DisjunctiveNotLast::Propagate() { helper_->SetTimeDirection(time_direction_); - const auto &task_by_decreasing_start_max = + const auto& task_by_decreasing_start_max = helper_->TaskByDecreasingStartMax(); - const auto &task_by_increasing_shifted_start_min = + const auto& task_by_increasing_shifted_start_min = helper_->TaskByIncreasingShiftedStartMin(); // Split problem into independent part. @@ -1034,15 +1034,15 @@ bool DisjunctiveNotLast::Propagate() { } bool DisjunctiveNotLast::PropagateSubwindow() { - auto &task_by_increasing_end_max = start_max_window_; - for (TaskTime &entry : task_by_increasing_end_max) { + auto& task_by_increasing_end_max = start_max_window_; + for (TaskTime& entry : task_by_increasing_end_max) { entry.time = helper_->EndMax(entry.task_index); } IncrementalSort(task_by_increasing_end_max.begin(), task_by_increasing_end_max.end()); const IntegerValue threshold = task_by_increasing_end_max.back().time; - auto &task_by_increasing_start_max = start_min_window_; + auto& task_by_increasing_start_max = start_min_window_; int queue_size = 0; for (const TaskTime entry : task_by_increasing_start_max) { const int task = entry.task_index; @@ -1101,7 +1101,7 @@ bool DisjunctiveNotLast::PropagateSubwindow() { // Find the largest start-max of the critical tasks (excluding t). The // end-max for t need to be smaller than or equal to this. IntegerValue largest_ct_start_max = kMinIntegerValue; - const std::vector &sorted_tasks = task_set_.SortedTasks(); + const std::vector& sorted_tasks = task_set_.SortedTasks(); const int sorted_tasks_size = sorted_tasks.size(); for (int i = critical_index; i < sorted_tasks_size; ++i) { const int ct = sorted_tasks[i].task; @@ -1140,7 +1140,7 @@ bool DisjunctiveNotLast::PropagateSubwindow() { return true; } -int DisjunctiveNotLast::RegisterWith(GenericLiteralWatcher *watcher) { +int DisjunctiveNotLast::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); helper_->WatchAllTasks(id, watcher); watcher->NotifyThatPropagatorMayNotReachFixedPointInOnePass(id); @@ -1361,7 +1361,7 @@ bool DisjunctiveEdgeFinding::PropagateSubwindow(IntegerValue window_end_min) { return true; } -int DisjunctiveEdgeFinding::RegisterWith(GenericLiteralWatcher *watcher) { +int DisjunctiveEdgeFinding::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); helper_->SetTimeDirection(time_direction_); helper_->WatchAllTasks(id, watcher, /*watch_start_max=*/false, diff --git a/ortools/sat/drat_checker.cc b/ortools/sat/drat_checker.cc index 6c9584f452..492aadfa57 100644 --- a/ortools/sat/drat_checker.cc +++ b/ortools/sat/drat_checker.cc @@ -112,7 +112,7 @@ void DratChecker::DeleteClause(absl::Span clause) { // Temporarily add 'clause' to find if it has been previously added. const auto it = clause_set_.find(AddClause(clause)); if (it != clause_set_.end()) { - Clause &existing_clause = clauses_[*it]; + Clause& existing_clause = clauses_[*it]; existing_clause.num_copies -= 1; if (existing_clause.num_copies == 0) { DCHECK(existing_clause.deleted_index == std::numeric_limits::max()); @@ -159,7 +159,7 @@ DratChecker::Status DratChecker::Check(double max_time_in_seconds) { if (time_limit.LimitReached()) { return Status::UNKNOWN; } - const Clause &clause = clauses_[i]; + const Clause& clause = clauses_[i]; // Start watching the literals of the clauses that were deleted just after // this one, and which are now no longer deleted. for (const ClauseIndex j : clause.deleted_clauses) { @@ -205,22 +205,22 @@ DratChecker::Status DratChecker::Check(double max_time_in_seconds) { return Status::VALID; } -std::vector > DratChecker::GetUnsatSubProblem() const { +std::vector> DratChecker::GetUnsatSubProblem() const { return GetClausesNeededForProof(ClauseIndex(0), first_infered_clause_index_); } -std::vector > DratChecker::GetOptimizedProof() const { +std::vector> DratChecker::GetOptimizedProof() const { return GetClausesNeededForProof(first_infered_clause_index_, ClauseIndex(clauses_.size())); } -std::vector > DratChecker::GetClausesNeededForProof( +std::vector> DratChecker::GetClausesNeededForProof( ClauseIndex begin, ClauseIndex end) const { - std::vector > result; + std::vector> result; for (ClauseIndex i = begin; i < end; ++i) { - const Clause &clause = clauses_[i]; + const Clause& clause = clauses_[i]; if (clause.is_needed_for_proof) { - const absl::Span &literals = Literals(clause); + const absl::Span& literals = Literals(clause); result.emplace_back(literals.begin(), literals.end()); if (clause.rat_literal_index != kNoLiteralIndex) { const int rat_literal_clause_index = @@ -234,7 +234,7 @@ std::vector > DratChecker::GetClausesNeededForProof( return result; } -absl::Span DratChecker::Literals(const Clause &clause) const { +absl::Span DratChecker::Literals(const Clause& clause) const { return absl::Span( literals_.data() + clause.first_literal_index, clause.num_literals); } @@ -254,7 +254,7 @@ void DratChecker::Init() { for (ClauseIndex clause_index(0); clause_index < clauses_.size(); ++clause_index) { - Clause &clause = clauses_[clause_index]; + Clause& clause = clauses_[clause_index]; if (clause.num_literals >= 2) { // Don't watch the literals of the deleted clauses right away, instead // watch them when these clauses become 'undeleted' in backward checking. @@ -268,7 +268,7 @@ void DratChecker::Init() { } void DratChecker::WatchClause(ClauseIndex clause_index) { - const Literal *clause_literals = + const Literal* clause_literals = literals_.data() + clauses_[clause_index].first_literal_index; watched_literals_[clause_literals[0].Index()].push_back(clause_index); watched_literals_[clause_literals[1].Index()].push_back(clause_index); @@ -286,7 +286,7 @@ bool DratChecker::HasRupProperty(ClauseIndex num_clauses, } for (const ClauseIndex clause_index : single_literal_clauses_) { - const Clause &clause = clauses_[clause_index]; + const Clause& clause = clauses_[clause_index]; // TODO(user): consider ignoring the deletion of single literal clauses // as done in drat-trim. if (clause_index < num_clauses && !clause.IsDeleted(num_clauses)) { @@ -303,7 +303,7 @@ bool DratChecker::HasRupProperty(ClauseIndex num_clauses, while (!(high_priority_literals_to_assign_.empty() && low_priority_literals_to_assign_.empty()) && conflict == kNoClauseIndex) { - std::vector &stack = + std::vector& stack = high_priority_literals_to_assign_.empty() ? low_priority_literals_to_assign_ : high_priority_literals_to_assign_; @@ -347,7 +347,7 @@ ClauseIndex DratChecker::AssignAndPropagate(ClauseIndex num_clauses, assignment_source_[literal.Variable()] = source_clause_index; const Literal false_literal = literal.Negated(); - std::vector &watched = watched_literals_[false_literal.Index()]; + std::vector& watched = watched_literals_[false_literal.Index()]; int new_watched_size = 0; ClauseIndex conflict_index = kNoClauseIndex; for (const ClauseIndex clause_index : watched) { @@ -356,14 +356,14 @@ ClauseIndex DratChecker::AssignAndPropagate(ClauseIndex num_clauses, // necessary to check the rest of the proof. continue; } - Clause &clause = clauses_[clause_index]; + Clause& clause = clauses_[clause_index]; DCHECK(!clause.IsDeleted(num_clauses)); if (conflict_index != kNoClauseIndex) { watched[new_watched_size++] = clause_index; continue; } - Literal *clause_literals = literals_.data() + clause.first_literal_index; + Literal* clause_literals = literals_.data() + clause.first_literal_index; const Literal other_watched_literal(LiteralIndex( clause_literals[0].Index().value() ^ clause_literals[1].Index().value() ^ false_literal.Index().value())); @@ -411,8 +411,8 @@ ClauseIndex DratChecker::AssignAndPropagate(ClauseIndex num_clauses, return conflict_index; } -void DratChecker::MarkAsNeededForProof(Clause *clause) { - const auto mark_clause_and_sources = [&](Clause *clause) { +void DratChecker::MarkAsNeededForProof(Clause* clause) { + const auto mark_clause_and_sources = [&](Clause* clause) { clause->is_needed_for_proof = true; for (const Literal literal : Literals(*clause)) { const ClauseIndex source_clause_index = @@ -424,7 +424,7 @@ void DratChecker::MarkAsNeededForProof(Clause *clause) { }; mark_clause_and_sources(clause); for (int i = unit_stack_.size() - 1; i >= 0; --i) { - Clause &unit_clause = clauses_[unit_stack_[i]]; + Clause& unit_clause = clauses_[unit_stack_[i]]; if (unit_clause.tmp_is_needed_for_proof_step) { mark_clause_and_sources(&unit_clause); // We can clean this flag here without risking missing clauses needed for @@ -463,8 +463,8 @@ bool ContainsLiteral(absl::Span clause, Literal literal) { bool Resolve(absl::Span clause, absl::Span other_clause, - Literal complementary_literal, VariablesAssignment *assignment, - std::vector *resolvent) { + Literal complementary_literal, VariablesAssignment* assignment, + std::vector* resolvent) { DCHECK(ContainsLiteral(clause, complementary_literal)); DCHECK(ContainsLiteral(other_clause, complementary_literal.Negated())); resolvent->clear(); @@ -498,8 +498,8 @@ bool Resolve(absl::Span clause, return result; } -bool AddProblemClauses(const std::string &file_path, - DratChecker *drat_checker) { +bool AddProblemClauses(const std::string& file_path, + DratChecker* drat_checker) { int line_number = 0; int num_variables = 0; int num_clauses = 0; @@ -547,8 +547,8 @@ bool AddProblemClauses(const std::string &file_path, return result; } -bool AddInferedAndDeletedClauses(const std::string &file_path, - DratChecker *drat_checker) { +bool AddInferedAndDeletedClauses(const std::string& file_path, + DratChecker* drat_checker) { int line_number = 0; bool ends_with_empty_clause = false; std::vector literals; @@ -589,14 +589,14 @@ bool AddInferedAndDeletedClauses(const std::string &file_path, return result; } -bool PrintClauses(const std::string &file_path, SatFormat format, - const std::vector > &clauses, +bool PrintClauses(const std::string& file_path, SatFormat format, + const std::vector>& clauses, int num_variables) { std::ofstream output_stream(file_path, std::ofstream::out); if (format == DIMACS) { output_stream << "p cnf " << num_variables << " " << clauses.size() << "\n"; } - for (const auto &clause : clauses) { + for (const auto& clause : clauses) { for (Literal literal : clause) { output_stream << literal.SignedValue() << " "; } diff --git a/ortools/sat/drat_proof_handler.cc b/ortools/sat/drat_proof_handler.cc index 1f6e0ce129..08b5ba7075 100644 --- a/ortools/sat/drat_proof_handler.cc +++ b/ortools/sat/drat_proof_handler.cc @@ -25,7 +25,7 @@ namespace sat { DratProofHandler::DratProofHandler() : variable_index_(0), drat_checker_(new DratChecker()) {} -DratProofHandler::DratProofHandler(bool in_binary_format, File *output, +DratProofHandler::DratProofHandler(bool in_binary_format, File* output, bool check) : variable_index_(0), drat_writer_(new DratWriter(in_binary_format, output)) { @@ -35,7 +35,7 @@ DratProofHandler::DratProofHandler(bool in_binary_format, File *output, } void DratProofHandler::ApplyMapping( - const gtl::ITIVector &mapping) { + const gtl::ITIVector& mapping) { gtl::ITIVector new_mapping; for (BooleanVariable v(0); v < mapping.size(); ++v) { const BooleanVariable image = mapping[v]; @@ -106,8 +106,7 @@ void DratProofHandler::MapClause(absl::Span clause) { values_.push_back(original_literal); } - // The sorting is such that new variables appear first. This is important - // for + // The sorting is such that new variables appear first. This is important for // BVA since DRAT-trim only check the RAT property with respect to the first // variable of the clause. std::sort(values_.begin(), values_.end(), [](Literal a, Literal b) { diff --git a/ortools/sat/encoding.cc b/ortools/sat/encoding.cc index 3fdd7ec389..66fb67d8e6 100644 --- a/ortools/sat/encoding.cc +++ b/ortools/sat/encoding.cc @@ -32,8 +32,8 @@ EncodingNode::EncodingNode(Literal l) child_b_(nullptr), literals_(1, l) {} -void EncodingNode::InitializeFullNode(int n, EncodingNode *a, EncodingNode *b, - SatSolver *solver) { +void EncodingNode::InitializeFullNode(int n, EncodingNode* a, EncodingNode* b, + SatSolver* solver) { CHECK(literals_.empty()) << "Already initialized"; CHECK_GT(n, 0); const BooleanVariable first_var_index(solver->NumVariables()); @@ -52,8 +52,8 @@ void EncodingNode::InitializeFullNode(int n, EncodingNode *a, EncodingNode *b, for_sorting_ = first_var_index; } -void EncodingNode::InitializeLazyNode(EncodingNode *a, EncodingNode *b, - SatSolver *solver) { +void EncodingNode::InitializeLazyNode(EncodingNode* a, EncodingNode* b, + SatSolver* solver) { CHECK(literals_.empty()) << "Already initialized"; const BooleanVariable first_var_index(solver->NumVariables()); solver->SetNumVariables(solver->NumVariables() + 1); @@ -68,7 +68,7 @@ void EncodingNode::InitializeLazyNode(EncodingNode *a, EncodingNode *b, for_sorting_ = std::min(a->for_sorting_, b->for_sorting_); } -bool EncodingNode::IncreaseCurrentUB(SatSolver *solver) { +bool EncodingNode::IncreaseCurrentUB(SatSolver* solver) { CHECK(!literals_.empty()); if (current_ub() == ub_) return false; literals_.emplace_back(BooleanVariable(solver->NumVariables()), true); @@ -78,7 +78,7 @@ bool EncodingNode::IncreaseCurrentUB(SatSolver *solver) { return true; } -int EncodingNode::Reduce(const SatSolver &solver) { +int EncodingNode::Reduce(const SatSolver& solver) { int i = 0; while (i < literals_.size() && solver.Assignment().LiteralIsTrue(literals_[i])) { @@ -94,7 +94,7 @@ int EncodingNode::Reduce(const SatSolver &solver) { return i; } -void EncodingNode::ApplyUpperBound(int64 upper_bound, SatSolver *solver) { +void EncodingNode::ApplyUpperBound(int64 upper_bound, SatSolver* solver) { if (size() <= upper_bound) return; for (int i = upper_bound; i < size(); ++i) { solver->AddUnitClause(literal(i).Negated()); @@ -103,7 +103,7 @@ void EncodingNode::ApplyUpperBound(int64 upper_bound, SatSolver *solver) { ub_ = lb_ + literals_.size(); } -EncodingNode LazyMerge(EncodingNode *a, EncodingNode *b, SatSolver *solver) { +EncodingNode LazyMerge(EncodingNode* a, EncodingNode* b, SatSolver* solver) { EncodingNode n; n.InitializeLazyNode(a, b, solver); solver->AddBinaryClause(a->literal(0).Negated(), n.literal(0)); @@ -113,9 +113,9 @@ EncodingNode LazyMerge(EncodingNode *a, EncodingNode *b, SatSolver *solver) { return n; } -void IncreaseNodeSize(EncodingNode *node, SatSolver *solver) { +void IncreaseNodeSize(EncodingNode* node, SatSolver* solver) { if (!node->IncreaseCurrentUB(solver)) return; - std::vector to_process; + std::vector to_process; to_process.push_back(node); // Only one side of the constraint is mandatory (the one propagating the ones @@ -126,9 +126,9 @@ void IncreaseNodeSize(EncodingNode *node, SatSolver *solver) { const bool complete_encoding = false; while (!to_process.empty()) { - EncodingNode *n = to_process.back(); - EncodingNode *a = n->child_a(); - EncodingNode *b = n->child_b(); + EncodingNode* n = to_process.back(); + EncodingNode* a = n->child_a(); + EncodingNode* b = n->child_b(); to_process.pop_back(); // Note that since we were able to increase its size, n must have children. @@ -209,8 +209,8 @@ void IncreaseNodeSize(EncodingNode *node, SatSolver *solver) { } } -EncodingNode FullMerge(Coefficient upper_bound, EncodingNode *a, - EncodingNode *b, SatSolver *solver) { +EncodingNode FullMerge(Coefficient upper_bound, EncodingNode* a, + EncodingNode* b, SatSolver* solver) { EncodingNode n; const int size = std::min(Coefficient(a->size() + b->size()), upper_bound).value(); @@ -260,15 +260,15 @@ EncodingNode FullMerge(Coefficient upper_bound, EncodingNode *a, return n; } -EncodingNode *MergeAllNodesWithDeque(Coefficient upper_bound, - const std::vector &nodes, - SatSolver *solver, - std::deque *repository) { - std::deque dq(nodes.begin(), nodes.end()); +EncodingNode* MergeAllNodesWithDeque(Coefficient upper_bound, + const std::vector& nodes, + SatSolver* solver, + std::deque* repository) { + std::deque dq(nodes.begin(), nodes.end()); while (dq.size() > 1) { - EncodingNode *a = dq.front(); + EncodingNode* a = dq.front(); dq.pop_front(); - EncodingNode *b = dq.front(); + EncodingNode* b = dq.front(); dq.pop_front(); repository->push_back(FullMerge(upper_bound, a, b, solver)); dq.push_back(&repository->back()); @@ -278,20 +278,20 @@ EncodingNode *MergeAllNodesWithDeque(Coefficient upper_bound, namespace { struct SortEncodingNodePointers { - bool operator()(EncodingNode *a, EncodingNode *b) const { return *a < *b; } + bool operator()(EncodingNode* a, EncodingNode* b) const { return *a < *b; } }; } // namespace -EncodingNode *LazyMergeAllNodeWithPQ(const std::vector &nodes, - SatSolver *solver, - std::deque *repository) { - std::priority_queue, +EncodingNode* LazyMergeAllNodeWithPQ(const std::vector& nodes, + SatSolver* solver, + std::deque* repository) { + std::priority_queue, SortEncodingNodePointers> pq(nodes.begin(), nodes.end()); while (pq.size() > 1) { - EncodingNode *a = pq.top(); + EncodingNode* a = pq.top(); pq.pop(); - EncodingNode *b = pq.top(); + EncodingNode* b = pq.top(); pq.pop(); repository->push_back(LazyMerge(a, b, solver)); pq.push(&repository->back()); @@ -299,13 +299,13 @@ EncodingNode *LazyMergeAllNodeWithPQ(const std::vector &nodes, return pq.top(); } -std::vector CreateInitialEncodingNodes( - const std::vector &literals, - const std::vector &coeffs, Coefficient *offset, - std::deque *repository) { +std::vector CreateInitialEncodingNodes( + const std::vector& literals, + const std::vector& coeffs, Coefficient* offset, + std::deque* repository) { CHECK_EQ(literals.size(), coeffs.size()); *offset = 0; - std::vector nodes; + std::vector nodes; for (int i = 0; i < literals.size(); ++i) { // We want to maximize the cost when this literal is true. if (coeffs[i] > 0) { @@ -324,11 +324,11 @@ std::vector CreateInitialEncodingNodes( return nodes; } -std::vector CreateInitialEncodingNodes( - const LinearObjective &objective_proto, Coefficient *offset, - std::deque *repository) { +std::vector CreateInitialEncodingNodes( + const LinearObjective& objective_proto, Coefficient* offset, + std::deque* repository) { *offset = 0; - std::vector nodes; + std::vector nodes; for (int i = 0; i < objective_proto.literals_size(); ++i) { const Literal literal(objective_proto.literals(i)); @@ -351,27 +351,27 @@ std::vector CreateInitialEncodingNodes( namespace { -bool EncodingNodeByWeight(const EncodingNode *a, const EncodingNode *b) { +bool EncodingNodeByWeight(const EncodingNode* a, const EncodingNode* b) { return a->weight() < b->weight(); } -bool EncodingNodeByDepth(const EncodingNode *a, const EncodingNode *b) { +bool EncodingNodeByDepth(const EncodingNode* a, const EncodingNode* b) { return a->depth() < b->depth(); } -bool EmptyEncodingNode(const EncodingNode *a) { return a->size() == 0; } +bool EmptyEncodingNode(const EncodingNode* a) { return a->size() == 0; } } // namespace std::vector ReduceNodesAndExtractAssumptions( Coefficient upper_bound, Coefficient stratified_lower_bound, - Coefficient *lower_bound, std::vector *nodes, - SatSolver *solver) { + Coefficient* lower_bound, std::vector* nodes, + SatSolver* solver) { // Remove the left-most variables fixed to one from each node. // Also update the lower_bound. Note that Reduce() needs the solver to be // at the root node in order to work. solver->Backtrack(0); - for (EncodingNode *n : *nodes) { + for (EncodingNode* n : *nodes) { *lower_bound += n->Reduce(*solver) * n->weight(); } @@ -379,7 +379,7 @@ std::vector ReduceNodesAndExtractAssumptions( if (upper_bound != kCoefficientMax) { const Coefficient gap = upper_bound - *lower_bound; if (gap <= 0) return {}; - for (EncodingNode *n : *nodes) { + for (EncodingNode* n : *nodes) { n->ApplyUpperBound((gap / n->weight()).value(), solver); } } @@ -407,7 +407,7 @@ std::vector ReduceNodesAndExtractAssumptions( // Extract the assumptions from the nodes. std::vector assumptions; - for (EncodingNode *n : *nodes) { + for (EncodingNode* n : *nodes) { if (n->weight() >= stratified_lower_bound) { assumptions.push_back(n->literal(0).Negated()); } @@ -415,8 +415,8 @@ std::vector ReduceNodesAndExtractAssumptions( return assumptions; } -Coefficient ComputeCoreMinWeight(const std::vector &nodes, - const std::vector &core) { +Coefficient ComputeCoreMinWeight(const std::vector& nodes, + const std::vector& core) { Coefficient min_weight = kCoefficientMax; int index = 0; for (int i = 0; i < core.size(); ++i) { @@ -430,10 +430,10 @@ Coefficient ComputeCoreMinWeight(const std::vector &nodes, return min_weight; } -Coefficient MaxNodeWeightSmallerThan(const std::vector &nodes, +Coefficient MaxNodeWeightSmallerThan(const std::vector& nodes, Coefficient upper_bound) { Coefficient result(0); - for (EncodingNode *n : nodes) { + for (EncodingNode* n : nodes) { CHECK_GT(n->weight(), 0); if (n->weight() < upper_bound) { result = std::max(result, n->weight()); @@ -442,9 +442,9 @@ Coefficient MaxNodeWeightSmallerThan(const std::vector &nodes, return result; } -void ProcessCore(const std::vector &core, Coefficient min_weight, - std::deque *repository, - std::vector *nodes, SatSolver *solver) { +void ProcessCore(const std::vector& core, Coefficient min_weight, + std::deque* repository, + std::vector* nodes, SatSolver* solver) { // Backtrack to be able to add new constraints. solver->Backtrack(0); @@ -452,7 +452,7 @@ void ProcessCore(const std::vector &core, Coefficient min_weight, // The core will be reduced at the beginning of the next loop. // Find the associated node, and call IncreaseNodeSize() on it. CHECK(solver->Assignment().LiteralIsFalse(core[0])); - for (EncodingNode *n : *nodes) { + for (EncodingNode* n : *nodes) { if (n->literal(0).Negated() == core[0]) { IncreaseNodeSize(n, solver); return; @@ -465,7 +465,7 @@ void ProcessCore(const std::vector &core, Coefficient min_weight, // resulting EncodingNode at the back. int index = 0; int new_node_index = 0; - std::vector to_merge; + std::vector to_merge; for (int i = 0; i < core.size(); ++i) { // Since the nodes appear in order in the core, we can find the // relevant "objective" variable efficiently with a simple linear scan diff --git a/ortools/sat/encoding.h b/ortools/sat/encoding.h index 42330b3a9f..14e99a3c06 100644 --- a/ortools/sat/encoding.h +++ b/ortools/sat/encoding.h @@ -61,13 +61,13 @@ class EncodingNode { // beeing in [lb, ub = lb + n). The variables are added to the given solver // with the basic implications linking them: // literal(0) >= ... >= literal(n-1) - void InitializeFullNode(int n, EncodingNode *a, EncodingNode *b, - SatSolver *solver); + void InitializeFullNode(int n, EncodingNode* a, EncodingNode* b, + SatSolver* solver); // Creates a "lazy" encoding node representing the sum of a and b. // Only one literals will be created by this operation. Note that no clauses // linking it with a or b are added by this function. - void InitializeLazyNode(EncodingNode *a, EncodingNode *b, SatSolver *solver); + void InitializeLazyNode(EncodingNode* a, EncodingNode* b, SatSolver* solver); // Returns a literal with the meaning 'this node number is > i'. // The given i must be in [lb_, current_ub). @@ -83,24 +83,24 @@ class EncodingNode { // Sort by decreasing depth first and then by increasing variable index. // This is meant to be used by the priority queue in MergeAllNodesWithPQ(). - bool operator<(const EncodingNode &other) const { + bool operator<(const EncodingNode& other) const { return depth_ > other.depth_ || (depth_ == other.depth_ && other.for_sorting_ > for_sorting_); } // Creates a new literals and increases current_ub. // Returns false if we were already at the upper bound for this node. - bool IncreaseCurrentUB(SatSolver *solver); + bool IncreaseCurrentUB(SatSolver* solver); // Removes the left-side literals fixed to 1 and returns the number of // literals removed this way. Note that this increases lb_ and reduces the // number of active literals. It also removes any right-side literals fixed to // 0. If such a literal exists, ub is updated accordingly. - int Reduce(const SatSolver &solver); + int Reduce(const SatSolver& solver); // Fix the right-side variables with indices >= to the given upper_bound to // false. - void ApplyUpperBound(int64 upper_bound, SatSolver *solver); + void ApplyUpperBound(int64 upper_bound, SatSolver* solver); void set_weight(Coefficient w) { weight_ = w; } Coefficient weight() const { return weight_; } @@ -109,8 +109,8 @@ class EncodingNode { int lb() const { return lb_; } int current_ub() const { return lb_ + literals_.size(); } int ub() const { return ub_; } - EncodingNode *child_a() const { return child_a_; } - EncodingNode *child_b() const { return child_b_; } + EncodingNode* child_a() const { return child_a_; } + EncodingNode* child_b() const { return child_b_; } private: int depth_; @@ -119,17 +119,18 @@ class EncodingNode { BooleanVariable for_sorting_; Coefficient weight_; - EncodingNode *child_a_; - EncodingNode *child_b_; + EncodingNode* child_a_; + EncodingNode* child_b_; // The literals of this node in order. std::vector literals_; }; // Note that we use <= because on 32 bits architecture, the size will actually -// be smaller than 64 bytes. +// be smaller than 64 bytes. One exception is with visual studio on windows, in +// debug mode, where the struct is bigger. #if defined(_M_X64) && defined(_DEBUG) -// In debug std::Vector is 32 +// In debug, with msvc, std::Vector is 32 static_assert(sizeof(EncodingNode) == 72, "ERROR_EncodingNode_is_not_well_compacted"); #else @@ -142,69 +143,69 @@ static_assert(sizeof(EncodingNode) <= 64, // Merges the two given EncodingNodes by creating a new node that corresponds to // the sum of the two given ones. Only the left-most binary variable is created // for the parent node, the other ones will be created later when needed. -EncodingNode LazyMerge(EncodingNode *a, EncodingNode *b, SatSolver *solver); +EncodingNode LazyMerge(EncodingNode* a, EncodingNode* b, SatSolver* solver); // Increases the size of the given node by one. To keep all the needed relations // with its children, we also need to increase their size by one, and so on // recursively. Also adds all the necessary clauses linking the newly added // literals. -void IncreaseNodeSize(EncodingNode *node, SatSolver *solver); +void IncreaseNodeSize(EncodingNode* node, SatSolver* solver); // Merges the two given EncodingNode by creating a new node that corresponds to // the sum of the two given ones. The given upper_bound is interpreted as a // bound on this sum, and allows creating fewer binary variables. -EncodingNode FullMerge(Coefficient upper_bound, EncodingNode *a, - EncodingNode *b, SatSolver *solver); +EncodingNode FullMerge(Coefficient upper_bound, EncodingNode* a, + EncodingNode* b, SatSolver* solver); // Merges all the given nodes two by two until there is only one left. Returns // the final node which encodes the sum of all the given nodes. -EncodingNode *MergeAllNodesWithDeque(Coefficient upper_bound, - const std::vector &nodes, - SatSolver *solver, - std::deque *repository); +EncodingNode* MergeAllNodesWithDeque(Coefficient upper_bound, + const std::vector& nodes, + SatSolver* solver, + std::deque* repository); // Same as MergeAllNodesWithDeque() but use a priority queue to merge in // priority nodes with smaller sizes. -EncodingNode *LazyMergeAllNodeWithPQ(const std::vector &nodes, - SatSolver *solver, - std::deque *repository); +EncodingNode* LazyMergeAllNodeWithPQ(const std::vector& nodes, + SatSolver* solver, + std::deque* repository); // Returns a vector with one new EncodingNode by variable in the given // objective. Sets the offset to the negated sum of the negative coefficient, // because in this case we negate the literals to have only positive // coefficients. -std::vector CreateInitialEncodingNodes( - const std::vector &literals, - const std::vector &coeffs, Coefficient *offset, - std::deque *repository); -std::vector CreateInitialEncodingNodes( - const LinearObjective &objective_proto, Coefficient *offset, - std::deque *repository); +std::vector CreateInitialEncodingNodes( + const std::vector& literals, + const std::vector& coeffs, Coefficient* offset, + std::deque* repository); +std::vector CreateInitialEncodingNodes( + const LinearObjective& objective_proto, Coefficient* offset, + std::deque* repository); // Reduces the nodes using the now fixed literals, update the lower-bound, and // returns the set of assumptions for the next round of the core-based // algorithm. Returns an empty set of assumptions if everything is fixed. std::vector ReduceNodesAndExtractAssumptions( Coefficient upper_bound, Coefficient stratified_lower_bound, - Coefficient *lower_bound, std::vector *nodes, - SatSolver *solver); + Coefficient* lower_bound, std::vector* nodes, + SatSolver* solver); // Returns the minimum weight of the nodes in the core. Note that the literal in // the core must appear in the same order as the one in nodes. -Coefficient ComputeCoreMinWeight(const std::vector &nodes, - const std::vector &core); +Coefficient ComputeCoreMinWeight(const std::vector& nodes, + const std::vector& core); // Returns the maximum node weight under the given upper_bound. Returns zero if // no such weight exist (note that a node weight is strictly positive, so this // make sense). -Coefficient MaxNodeWeightSmallerThan(const std::vector &nodes, +Coefficient MaxNodeWeightSmallerThan(const std::vector& nodes, Coefficient upper_bound); // Updates the encoding using the given core. The literals in the core must // match the order in nodes. -void ProcessCore(const std::vector &core, Coefficient min_weight, - std::deque *repository, - std::vector *nodes, SatSolver *solver); +void ProcessCore(const std::vector& core, Coefficient min_weight, + std::deque* repository, + std::vector* nodes, SatSolver* solver); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/feasibility_pump.cc b/ortools/sat/feasibility_pump.cc index ea6f5f311f..e6a0f89556 100644 --- a/ortools/sat/feasibility_pump.cc +++ b/ortools/sat/feasibility_pump.cc @@ -34,7 +34,7 @@ using glop::RowIndex; const double FeasibilityPump::kCpEpsilon = 1e-4; -FeasibilityPump::FeasibilityPump(Model *model) +FeasibilityPump::FeasibilityPump(Model* model) : sat_parameters_(*(model->GetOrCreate())), time_limit_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), @@ -61,14 +61,14 @@ FeasibilityPump::~FeasibilityPump() { << total_num_simplex_iterations_; } -void FeasibilityPump::AddLinearConstraint(const LinearConstraint &ct) { +void FeasibilityPump::AddLinearConstraint(const LinearConstraint& ct) { // We still create the mirror variable right away though. for (const IntegerVariable var : ct.vars) { GetOrCreateMirrorVariable(PositiveVariable(var)); } integer_lp_.push_back(LinearConstraintInternal()); - LinearConstraintInternal &new_ct = integer_lp_.back(); + LinearConstraintInternal& new_ct = integer_lp_.back(); new_ct.lb = ct.lb; new_ct.ub = ct.ub; const int size = ct.vars.size(); @@ -152,7 +152,7 @@ bool FeasibilityPump::Solve() { for (ColIndex col(0); col < lp_data_.num_variables(); ++col) { lp_data_.SetObjectiveCoefficient(col, 0.0); } - for (const auto &term : integer_objective_) { + for (const auto& term : integer_objective_) { lp_data_.SetObjectiveCoefficient(term.first, ToDouble(term.second)); } @@ -220,16 +220,16 @@ void FeasibilityPump::InitializeWorkingLP() { } // Add constraints. - for (const LinearConstraintInternal &ct : integer_lp_) { + for (const LinearConstraintInternal& ct : integer_lp_) { const ConstraintIndex row = lp_data_.CreateNewConstraint(); lp_data_.SetConstraintBounds(row, ToDouble(ct.lb), ToDouble(ct.ub)); - for (const auto &term : ct.terms) { + for (const auto& term : ct.terms) { lp_data_.SetCoefficient(row, term.first, ToDouble(term.second)); } } // Add objective. - for (const auto &term : integer_objective_) { + for (const auto& term : integer_objective_) { lp_data_.SetObjectiveCoefficient(term.first, ToDouble(term.second)); } @@ -321,10 +321,10 @@ void FeasibilityPump::L1DistanceMinimize() { (1 - 2 * integer_solution_[col.value()]); new_obj_coeffs[col.value()] = objective_coefficient; } else { // The variable is integer. - // Update the bounds of the constraints added in - // InitializeIntegerVariables() (see there for more details): - // d_i - x_i >= -round(x'_i) - // d_i + x_i >= +round(x'_i) + // Update the bounds of the constraints added in + // InitializeIntegerVariables() (see there for more details): + // d_i - x_i >= -round(x'_i) + // d_i + x_i >= +round(x'_i) // TODO(user): We change both the objective and the bounds, thus // breaking the incrementality. Handle integer variables differently, @@ -394,7 +394,7 @@ bool FeasibilityPump::SolveLp() { // Compute the objective value. lp_objective_ = 0; - for (const auto &term : integer_objective_) { + for (const auto& term : integer_objective_) { lp_objective_ += lp_solution_[term.first.value()] * term.second.value(); } lp_solution_is_integer_ = lp_solution_fractionality_ < kCpEpsilon; @@ -551,8 +551,8 @@ bool FeasibilityPump::PropagationRounding() { // Compute an order in which we will fix variables and do the propagation. std::vector rounding_order; { - std::vector > binary_fractionality_vars; - std::vector > general_fractionality_vars; + std::vector> binary_fractionality_vars; + std::vector> general_fractionality_vars; for (int i = 0; i < lp_solution_.size(); ++i) { const double fractionality = std::abs(std::round(lp_solution_[i]) - lp_solution_[i]); @@ -579,7 +579,7 @@ bool FeasibilityPump::PropagationRounding() { if (time_limit_->LimitReached()) return false; // Get the bounds of the variable. const IntegerVariable var = integer_variables_[var_index]; - const Domain &domain = (*domains_)[var]; + const Domain& domain = (*domains_)[var]; const IntegerValue lb = integer_trail_->LowerBound(var); const IntegerValue ub = integer_trail_->UpperBound(var); @@ -675,7 +675,7 @@ bool FeasibilityPump::PropagationRounding() { void FeasibilityPump::FillIntegerSolutionStats() { // Compute the objective value. integer_solution_objective_ = 0; - for (const auto &term : integer_objective_) { + for (const auto& term : integer_objective_) { integer_solution_objective_ += integer_solution_[term.first.value()] * term.second.value(); } @@ -685,7 +685,7 @@ void FeasibilityPump::FillIntegerSolutionStats() { integer_solution_infeasibility_ = 0; for (RowIndex i(0); i < integer_lp_.size(); ++i) { int64 activity = 0; - for (const auto &term : integer_lp_[i].terms) { + for (const auto& term : integer_lp_[i].terms) { const int64 prod = CapProd(integer_solution_[term.first.value()], term.second.value()); if (prod <= kint64min || prod >= kint64max) { diff --git a/ortools/sat/feasibility_pump.h b/ortools/sat/feasibility_pump.h index e28ed9835c..8721d65461 100644 --- a/ortools/sat/feasibility_pump.h +++ b/ortools/sat/feasibility_pump.h @@ -30,7 +30,7 @@ namespace sat { class FeasibilityPump { public: - explicit FeasibilityPump(Model *model); + explicit FeasibilityPump(Model* model); ~FeasibilityPump(); typedef glop::RowIndex ConstraintIndex; @@ -40,7 +40,7 @@ class FeasibilityPump { } // Add a new linear constraint to this LP. - void AddLinearConstraint(const LinearConstraint &ct); + void AddLinearConstraint(const LinearConstraint& ct); // Set the coefficient of the variable in the objective. Calling it twice will // overwrite the previous value. Note that this doesn't set the objective @@ -129,8 +129,7 @@ class FeasibilityPump { double GetVariableValueAtCpScale(glop::ColIndex var); // Shortcut for an integer linear expression type. - using LinearExpression = - std::vector >; + using LinearExpression = std::vector>; // Gets or creates an LP variable that mirrors a model variable. // The variable should be a positive reference. @@ -191,15 +190,15 @@ class FeasibilityPump { bool objective_is_defined_ = false; // Singletons from Model. - const SatParameters &sat_parameters_; - TimeLimit *time_limit_; - IntegerTrail *integer_trail_; - Trail *trail_; - IntegerEncoder *integer_encoder_; - SharedIncompleteSolutionManager *incomplete_solutions_; - SatSolver *sat_solver_; - IntegerDomains *domains_; - const CpModelMapping *mapping_; + const SatParameters& sat_parameters_; + TimeLimit* time_limit_; + IntegerTrail* integer_trail_; + Trail* trail_; + IntegerEncoder* integer_encoder_; + SharedIncompleteSolutionManager* incomplete_solutions_; + SatSolver* sat_solver_; + IntegerDomains* domains_; + const CpModelMapping* mapping_; // Last OPTIMAL/Feasible solution found by a call to the underlying LP solver. bool lp_solution_is_set_ = false; diff --git a/ortools/sat/implied_bounds.cc b/ortools/sat/implied_bounds.cc index caf2a70287..4c3068f053 100644 --- a/ortools/sat/implied_bounds.cc +++ b/ortools/sat/implied_bounds.cc @@ -144,7 +144,7 @@ void ImpliedBounds::Add(Literal literal, IntegerLiteral integer_literal) { } } -const std::vector &ImpliedBounds::GetImpliedBounds( +const std::vector& ImpliedBounds::GetImpliedBounds( IntegerVariable var) { if (var >= var_to_bounds_.size()) return empty_implied_bounds_; @@ -153,11 +153,11 @@ const std::vector &ImpliedBounds::GetImpliedBounds( // TODO(user): Check no duplicate and remove old entry if the enforcement // is tighter. int new_size = 0; - std::vector &ref = var_to_bounds_[var]; + std::vector& ref = var_to_bounds_[var]; const IntegerValue level_zero_lb = std::max( level_zero_lower_bounds_[var], integer_trail_->LevelZeroLowerBound(var)); level_zero_lower_bounds_[var] = level_zero_lb; - for (const ImpliedBoundEntry &entry : ref) { + for (const ImpliedBoundEntry& entry : ref) { if (entry.lower_bound <= level_zero_lb) continue; ref[new_size++] = entry; } diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index 01a6f4f969..ce4c881b62 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -40,7 +40,7 @@ IntegerLiteral AffineExpression::LowerOrEqual(IntegerValue bound) const { } std::vector NegationOf( - const std::vector &vars) { + const std::vector& vars) { std::vector result(vars.size()); for (int i = 0; i < vars.size(); ++i) { result[i] = NegationOf(vars[i]); @@ -102,7 +102,7 @@ bool IntegerEncoder::VariableIsFullyEncoded(IntegerVariable var) const { // TODO(user): Comparing the size might be enough, but we want to be always // valid even if either (*domains_[var]) or PartialDomainEncoding(var) are // not properly synced because the propagation is not finished. - const auto &ref = equality_by_var_[index]; + const auto& ref = equality_by_var_[index]; int i = 0; for (const ClosedInterval interval : (*domains_)[var]) { for (int64 v = interval.start; v <= interval.end; ++v) { @@ -130,7 +130,7 @@ IntegerEncoder::PartialDomainEncoding(IntegerVariable var) const { if (index >= equality_by_var_.size()) return {}; int new_size = 0; - std::vector &ref = equality_by_var_[index]; + std::vector& ref = equality_by_var_[index]; for (int i = 0; i < ref.size(); ++i) { const ValueLiteralPair pair = ref[i]; if (sat_solver_->Assignment().LiteralIsFalse(pair.literal)) continue; @@ -148,7 +148,7 @@ IntegerEncoder::PartialDomainEncoding(IntegerVariable var) const { std::vector result = ref; if (!VariableIsPositive(var)) { std::reverse(result.begin(), result.end()); - for (ValueLiteralPair &ref : result) ref.value = -ref.value; + for (ValueLiteralPair& ref : result) ref.value = -ref.value; } return result; } @@ -157,7 +157,7 @@ IntegerEncoder::PartialDomainEncoding(IntegerVariable var) const { // use twice as much implication (2 by literals) instead of only one between // consecutive literals. void IntegerEncoder::AddImplications( - const std::map &map, + const std::map& map, std::map::const_iterator it, Literal associated_lit) { if (!add_implications_) return; @@ -183,7 +183,7 @@ void IntegerEncoder::AddImplications( void IntegerEncoder::AddAllImplicationsBetweenAssociatedLiterals() { CHECK_EQ(0, sat_solver_->CurrentDecisionLevel()); add_implications_ = true; - for (const std::map &encoding : encoding_by_var_) { + for (const std::map& encoding : encoding_by_var_) { LiteralIndex previous = kNoLiteralIndex; for (const auto value_literal : encoding) { const Literal lit = value_literal.second; @@ -204,7 +204,7 @@ std::pair IntegerEncoder::Canonicalize( CHECK_GE(before, (*domains_)[var].Min()); CHECK_LE(after, (*domains_)[var].Max()); int64 previous = kint64min; - for (const ClosedInterval &interval : (*domains_)[var]) { + for (const ClosedInterval& interval : (*domains_)[var]) { if (before > previous && before < interval.start) before = previous; if (after > previous && after < interval.start) after = interval.start; if (after <= interval.end) break; @@ -272,7 +272,7 @@ Literal IntegerEncoder::GetOrCreateLiteralAssociatedToEquality( // Check for trivial true/false literal to avoid creating variable for no // reasons. - const Domain &domain = (*domains_)[var]; + const Domain& domain = (*domains_)[var]; if (!domain.Contains(value.value())) return GetFalseLiteral(); if (value == domain.Min() && value == domain.Max()) { AssociateToIntegerEqualValue(GetTrueLiteral(), var, value); @@ -296,7 +296,7 @@ Literal IntegerEncoder::GetOrCreateLiteralAssociatedToEquality( void IntegerEncoder::AssociateToIntegerLiteral(Literal literal, IntegerLiteral i_lit) { - const auto &domain = (*domains_)[i_lit.var]; + const auto& domain = (*domains_)[i_lit.var]; const IntegerValue min(domain.Min()); const IntegerValue max(domain.Max()); if (i_lit.bound <= min) { @@ -326,7 +326,7 @@ void IntegerEncoder::AssociateToIntegerEqualValue(Literal literal, // Detect literal view. Note that the same literal can be associated to more // than one variable, and thus already have a view. We don't change it in // this case. - const Domain &domain = (*domains_)[var]; + const Domain& domain = (*domains_)[var]; if (value == 1 && domain.Min() >= 0 && domain.Max() <= 1) { if (literal.Index() >= literal_view_.size()) { literal_view_.resize(literal.Index().value() + 1, kNoIntegerVariable); @@ -435,7 +435,7 @@ void IntegerEncoder::HalfAssociateGivenLiteral(IntegerLiteral i_lit, if (i_lit.var >= encoding_by_var_.size()) { encoding_by_var_.resize(i_lit.var.value() + 1); } - auto &var_encoding = encoding_by_var_[i_lit.var]; + auto& var_encoding = encoding_by_var_[i_lit.var]; auto insert_result = var_encoding.insert({i_lit.bound, literal}); if (insert_result.second) { // New item. AddImplications(var_encoding, insert_result.first, literal); @@ -460,24 +460,24 @@ void IntegerEncoder::HalfAssociateGivenLiteral(IntegerLiteral i_lit, bool IntegerEncoder::LiteralIsAssociated(IntegerLiteral i) const { if (i.var >= encoding_by_var_.size()) return false; - const std::map &encoding = encoding_by_var_[i.var]; + const std::map& encoding = encoding_by_var_[i.var]; return encoding.find(i.bound) != encoding.end(); } LiteralIndex IntegerEncoder::GetAssociatedLiteral(IntegerLiteral i) const { if (i.var >= encoding_by_var_.size()) return kNoLiteralIndex; - const std::map &encoding = encoding_by_var_[i.var]; + const std::map& encoding = encoding_by_var_[i.var]; const auto result = encoding.find(i.bound); if (result == encoding.end()) return kNoLiteralIndex; return result->second.Index(); } LiteralIndex IntegerEncoder::SearchForLiteralAtOrBefore( - IntegerLiteral i, IntegerValue *bound) const { + IntegerLiteral i, IntegerValue* bound) const { // We take the element before the upper_bound() which is either the encoding // of i if it already exists, or the encoding just before it. if (i.var >= encoding_by_var_.size()) return kNoLiteralIndex; - const std::map &encoding = encoding_by_var_[i.var]; + const std::map& encoding = encoding_by_var_[i.var]; auto after_it = encoding.upper_bound(i.bound); if (after_it == encoding.begin()) return kNoLiteralIndex; --after_it; @@ -492,9 +492,9 @@ IntegerTrail::~IntegerTrail() { } } -bool IntegerTrail::Propagate(Trail *trail) { +bool IntegerTrail::Propagate(Trail* trail) { const int level = trail->CurrentDecisionLevel(); - for (ReversibleInterface *rev : reversible_classes_) rev->SetLevel(level); + for (ReversibleInterface* rev : reversible_classes_) rev->SetLevel(level); // Make sure that our internal "integer_search_levels_" size matches the // sat decision levels. At the level zero, integer_search_levels_ should @@ -542,10 +542,10 @@ bool IntegerTrail::Propagate(Trail *trail) { return true; } -void IntegerTrail::Untrail(const Trail &trail, int literal_trail_index) { +void IntegerTrail::Untrail(const Trail& trail, int literal_trail_index) { ++num_untrails_; const int level = trail.CurrentDecisionLevel(); - for (ReversibleInterface *rev : reversible_classes_) rev->SetLevel(level); + for (ReversibleInterface* rev : reversible_classes_) rev->SetLevel(level); var_to_current_lb_interval_index_.SetLevel(level); propagation_trail_index_ = std::min(propagation_trail_index_, literal_trail_index); @@ -563,7 +563,7 @@ void IntegerTrail::Untrail(const Trail &trail, int literal_trail_index) { CHECK_LE(target, integer_trail_.size()); for (int index = integer_trail_.size() - 1; index >= target; --index) { - const TrailEntry &entry = integer_trail_[index]; + const TrailEntry& entry = integer_trail_[index]; if (entry.var < 0) continue; // entry used by EnqueueLiteral(). vars_[entry.var].current_trail_index = entry.prev_trail_index; vars_[entry.var].current_bound = @@ -626,13 +626,13 @@ IntegerVariable IntegerTrail::AddIntegerVariable(IntegerValue lower_bound, var_trail_index_cache_.resize(vars_.size(), integer_trail_.size()); tmp_var_to_trail_index_in_queue_.resize(vars_.size(), 0); - for (SparseBitset *w : watchers_) { + for (SparseBitset* w : watchers_) { w->Resize(NumIntegerVariables()); } return i; } -IntegerVariable IntegerTrail::AddIntegerVariable(const Domain &domain) { +IntegerVariable IntegerTrail::AddIntegerVariable(const Domain& domain) { CHECK(!domain.IsEmpty()); const IntegerVariable var = AddIntegerVariable(IntegerValue(domain.Min()), IntegerValue(domain.Max())); @@ -640,14 +640,14 @@ IntegerVariable IntegerTrail::AddIntegerVariable(const Domain &domain) { return var; } -const Domain &IntegerTrail::InitialVariableDomain(IntegerVariable var) const { +const Domain& IntegerTrail::InitialVariableDomain(IntegerVariable var) const { return (*domains_)[var]; } bool IntegerTrail::UpdateInitialDomain(IntegerVariable var, Domain domain) { CHECK_EQ(trail_->CurrentDecisionLevel(), 0); - const Domain &old_domain = InitialVariableDomain(var); + const Domain& old_domain = InitialVariableDomain(var); domain = domain.IntersectionWith(old_domain); if (old_domain == domain) return true; @@ -760,7 +760,7 @@ int IntegerTrail::FindLowestTrailIndexThatExplainBound( { const int cached_index = var_trail_index_cache_[i_lit.var]; if (cached_index < trail_index) { - const TrailEntry &entry = integer_trail_[cached_index]; + const TrailEntry& entry = integer_trail_[cached_index]; if (entry.var == i_lit.var && entry.bound >= i_lit.bound) { trail_index = cached_index; } @@ -772,7 +772,7 @@ int IntegerTrail::FindLowestTrailIndexThatExplainBound( if (trail_index >= var_trail_index_cache_threshold_) { var_trail_index_cache_[i_lit.var] = trail_index; } - const TrailEntry &entry = integer_trail_[trail_index]; + const TrailEntry& entry = integer_trail_[trail_index]; if (entry.bound == i_lit.bound) return trail_index; if (entry.bound < i_lit.bound) return prev_trail_index; prev_trail_index = trail_index; @@ -783,7 +783,7 @@ int IntegerTrail::FindLowestTrailIndexThatExplainBound( // TODO(user): Get rid of this function and only keep the trail index one? void IntegerTrail::RelaxLinearReason( IntegerValue slack, absl::Span coeffs, - std::vector *reason) const { + std::vector* reason) const { CHECK_GE(slack, 0); if (slack == 0) return; const int size = reason->size(); @@ -806,7 +806,7 @@ void IntegerTrail::RelaxLinearReason( void IntegerTrail::AppendRelaxedLinearReason( IntegerValue slack, absl::Span coeffs, absl::Span vars, - std::vector *reason) const { + std::vector* reason) const { tmp_indices_.clear(); for (const IntegerVariable var : vars) { tmp_indices_.push_back(vars_[var].current_trail_index); @@ -820,7 +820,7 @@ void IntegerTrail::AppendRelaxedLinearReason( void IntegerTrail::RelaxLinearReason(IntegerValue slack, absl::Span coeffs, - std::vector *trail_indices) const { + std::vector* trail_indices) const { DCHECK_GT(slack, 0); DCHECK(relax_heap_.empty()); @@ -847,7 +847,7 @@ void IntegerTrail::RelaxLinearReason(IntegerValue slack, // This is a bit hacky, but when it is used from MergeReasonIntoInternal(), // we never relax a reason that will not be expanded because it is already // part of the current conflict. - const TrailEntry &entry = integer_trail_[index]; + const TrailEntry& entry = integer_trail_[index]; if (entry.var != kNoIntegerVariable && index <= tmp_var_to_trail_index_in_queue_[entry.var]) { (*trail_indices)[new_size++] = index; @@ -855,7 +855,7 @@ void IntegerTrail::RelaxLinearReason(IntegerValue slack, } // Note that both terms of the product are positive. - const TrailEntry &previous_entry = integer_trail_[entry.prev_trail_index]; + const TrailEntry& previous_entry = integer_trail_[entry.prev_trail_index]; const int64 diff = CapProd(coeff.value(), (entry.bound - previous_entry.bound).value()); if (diff > slack) { @@ -889,14 +889,14 @@ void IntegerTrail::RelaxLinearReason(IntegerValue slack, trail_indices->push_back(index); continue; } - const TrailEntry &entry = integer_trail_[index]; + const TrailEntry& entry = integer_trail_[index]; if (entry.var != kNoIntegerVariable && index <= tmp_var_to_trail_index_in_queue_[entry.var]) { trail_indices->push_back(index); continue; } - const TrailEntry &previous_entry = integer_trail_[entry.prev_trail_index]; + const TrailEntry& previous_entry = integer_trail_[entry.prev_trail_index]; const int64 diff = CapProd(heap_entry.coeff.value(), (entry.bound - previous_entry.bound).value()); if (diff > slack) { @@ -909,14 +909,14 @@ void IntegerTrail::RelaxLinearReason(IntegerValue slack, // If we aborted early because of the slack, we need to push all remaining // indices back into the reason. - for (const RelaxHeapEntry &entry : relax_heap_) { + for (const RelaxHeapEntry& entry : relax_heap_) { trail_indices->push_back(entry.index); } relax_heap_.clear(); } void IntegerTrail::RemoveLevelZeroBounds( - std::vector *reason) const { + std::vector* reason) const { int new_size = 0; for (const IntegerLiteral literal : *reason) { if (literal.bound <= LevelZeroLowerBound(literal.var)) continue; @@ -925,16 +925,16 @@ void IntegerTrail::RemoveLevelZeroBounds( reason->resize(new_size); } -std::vector *IntegerTrail::InitializeConflict( - IntegerLiteral integer_literal, const LazyReasonFunction &lazy_reason, +std::vector* IntegerTrail::InitializeConflict( + IntegerLiteral integer_literal, const LazyReasonFunction& lazy_reason, absl::Span literals_reason, absl::Span bounds_reason) { DCHECK(tmp_queue_.empty()); - std::vector *conflict = trail_->MutableConflict(); + std::vector* conflict = trail_->MutableConflict(); if (lazy_reason == nullptr) { conflict->assign(literals_reason.begin(), literals_reason.end()); const int num_vars = vars_.size(); - for (const IntegerLiteral &literal : bounds_reason) { + for (const IntegerLiteral& literal : bounds_reason) { const int trail_index = FindLowestTrailIndexThatExplainBound(literal); if (trail_index >= num_vars) tmp_queue_.push_back(trail_index); } @@ -1008,7 +1008,7 @@ bool IntegerTrail::Enqueue(IntegerLiteral i_lit, bool IntegerTrail::ReasonIsValid( absl::Span literal_reason, absl::Span integer_reason) { - const VariablesAssignment &assignment = trail_->Assignment(); + const VariablesAssignment& assignment = trail_->Assignment(); for (const Literal lit : literal_reason) { if (!assignment.LiteralIsFalse(lit)) return false; } @@ -1097,8 +1097,8 @@ void IntegerTrail::EnqueueLiteralInternal( integer_reason.begin(), integer_reason.end()); } - integer_trail_.push_back({/*bound=*/ - IntegerValue(0), /*var=*/kNoIntegerVariable, + integer_trail_.push_back({/*bound=*/IntegerValue(0), + /*var=*/kNoIntegerVariable, /*prev_trail_index=*/-1, /*reason_index=*/reason_index}); @@ -1185,7 +1185,7 @@ bool IntegerTrail::EnqueueInternal( // Note: The literals in the reason are not necessarily canonical, but then // we always map these to enqueued literals during conflict resolution. if ((*domains_)[var].NumIntervals() > 1) { - const auto &domain = (*domains_)[var]; + const auto& domain = (*domains_)[var]; int index = var_to_current_lb_interval_index_.FindOrDie(var); const int size = domain.NumIntervals(); while (index < size && i_lit.bound > domain[index].end) { @@ -1208,7 +1208,7 @@ bool IntegerTrail::EnqueueInternal( Literal(is_ignored_literals_[var]))) { // Note that we want only one call to MergeReasonIntoInternal() for // efficiency and a potential smaller reason. - auto *conflict = InitializeConflict(i_lit, lazy_reason, literal_reason, + auto* conflict = InitializeConflict(i_lit, lazy_reason, literal_reason, integer_reason); if (IsOptional(var)) { conflict->push_back(Literal(is_ignored_literals_[var])); @@ -1238,7 +1238,7 @@ bool IntegerTrail::EnqueueInternal( &lazy_reason_trail_indices_); std::vector temp; for (const int trail_index : lazy_reason_trail_indices_) { - const TrailEntry &entry = integer_trail_[trail_index]; + const TrailEntry& entry = integer_trail_[trail_index]; temp.push_back(IntegerLiteral(entry.var, entry.bound)); } EnqueueLiteral(is_ignored, lazy_reason_literals_, temp); @@ -1276,7 +1276,7 @@ bool IntegerTrail::EnqueueInternal( } // Notify the watchers. - for (SparseBitset *bitset : watchers_) { + for (SparseBitset* bitset : watchers_) { bitset->Set(i_lit.var); } @@ -1298,7 +1298,7 @@ bool IntegerTrail::EnqueueInternal( if (literal_index != kNoLiteralIndex) { const Literal to_enqueue = Literal(literal_index); if (trail_->Assignment().LiteralIsFalse(to_enqueue)) { - auto *conflict = InitializeConflict(i_lit, lazy_reason, literal_reason, + auto* conflict = InitializeConflict(i_lit, lazy_reason, literal_reason, integer_reason); conflict->push_back(to_enqueue); MergeReasonIntoInternal(conflict); @@ -1376,8 +1376,8 @@ bool IntegerTrail::EnqueueInternal( } const int prev_trail_index = vars_[i_lit.var].current_trail_index; - integer_trail_.push_back({/*bound=*/ - i_lit.bound, /*var=*/i_lit.var, + integer_trail_.push_back({/*bound=*/i_lit.bound, + /*var=*/i_lit.var, /*prev_trail_index=*/prev_trail_index, /*reason_index=*/reason_index}); @@ -1404,7 +1404,7 @@ bool IntegerTrail::EnqueueAssociatedIntegerLiteral(IntegerLiteral i_lit, } // Notify the watchers. - for (SparseBitset *bitset : watchers_) { + for (SparseBitset* bitset : watchers_) { bitset->Set(i_lit.var); } @@ -1429,8 +1429,8 @@ bool IntegerTrail::EnqueueAssociatedIntegerLiteral(IntegerLiteral i_lit, literals_reason_buffer_.push_back(literal_reason.Negated()); const int prev_trail_index = vars_[i_lit.var].current_trail_index; - integer_trail_.push_back({/*bound=*/ - i_lit.bound, /*var=*/i_lit.var, + integer_trail_.push_back({/*bound=*/i_lit.bound, + /*var=*/i_lit.var, /*prev_trail_index=*/prev_trail_index, /*reason_index=*/reason_index}); @@ -1442,7 +1442,7 @@ bool IntegerTrail::EnqueueAssociatedIntegerLiteral(IntegerLiteral i_lit, void IntegerTrail::ComputeLazyReasonIfNeeded(int trail_index) const { const int reason_index = integer_trail_[trail_index].reason_index; if (reason_index == -1) { - const TrailEntry &entry = integer_trail_[trail_index]; + const TrailEntry& entry = integer_trail_[trail_index]; const IntegerLiteral literal(entry.var, entry.bound); lazy_reasons_[trail_index](literal, trail_index, &lazy_reason_literals_, &lazy_reason_trail_indices_); @@ -1491,7 +1491,7 @@ absl::Span IntegerTrail::Dependencies(int trail_index) const { } void IntegerTrail::AppendLiteralsReason(int trail_index, - std::vector *output) const { + std::vector* output) const { CHECK_GE(trail_index, vars_.size()); const int reason_index = integer_trail_[trail_index].reason_index; if (reason_index == -1) { @@ -1526,10 +1526,10 @@ std::vector IntegerTrail::ReasonFor(IntegerLiteral literal) const { // TODO(user): If this is called many time on the same variables, it could be // made faster by using some caching mecanism. void IntegerTrail::MergeReasonInto(absl::Span literals, - std::vector *output) const { + std::vector* output) const { DCHECK(tmp_queue_.empty()); const int num_vars = vars_.size(); - for (const IntegerLiteral &literal : literals) { + for (const IntegerLiteral& literal : literals) { const int trail_index = FindLowestTrailIndexThatExplainBound(literal); // Any indices lower than that means that there is no reason needed. @@ -1541,7 +1541,7 @@ void IntegerTrail::MergeReasonInto(absl::Span literals, // This will expand the reason of the IntegerLiteral already in tmp_queue_ until // everything is explained in term of Literal. -void IntegerTrail::MergeReasonIntoInternal(std::vector *output) const { +void IntegerTrail::MergeReasonIntoInternal(std::vector* output) const { // All relevant trail indices will be >= vars_.size(), so we can safely use // zero to means that no literal refering to this variable is in the queue. DCHECK(std::all_of(tmp_var_to_trail_index_in_queue_.begin(), @@ -1558,7 +1558,7 @@ void IntegerTrail::MergeReasonIntoInternal(std::vector *output) const { for (const int trail_index : tmp_queue_) { DCHECK_GE(trail_index, vars_.size()); DCHECK_LT(trail_index, integer_trail_.size()); - const TrailEntry &entry = integer_trail_[trail_index]; + const TrailEntry& entry = integer_trail_[trail_index]; tmp_var_to_trail_index_in_queue_[entry.var] = std::max(tmp_var_to_trail_index_in_queue_[entry.var], trail_index); } @@ -1573,7 +1573,7 @@ void IntegerTrail::MergeReasonIntoInternal(std::vector *output) const { tmp_to_clear_.clear(); while (!tmp_queue_.empty()) { const int trail_index = tmp_queue_.front(); - const TrailEntry &entry = integer_trail_[trail_index]; + const TrailEntry& entry = integer_trail_[trail_index]; std::pop_heap(tmp_queue_.begin(), tmp_queue_.end()); tmp_queue_.pop_back(); @@ -1634,7 +1634,7 @@ void IntegerTrail::MergeReasonIntoInternal(std::vector *output) const { for (const int next_trail_index : Dependencies(trail_index)) { if (next_trail_index < 0) break; DCHECK_LT(next_trail_index, trail_index); - const TrailEntry &next_entry = integer_trail_[next_trail_index]; + const TrailEntry& next_entry = integer_trail_[next_trail_index]; // Only add literals that are not "implied" by the ones already present. // For instance, do not add (x >= 4) if we already have (x >= 7). This @@ -1663,10 +1663,10 @@ void IntegerTrail::MergeReasonIntoInternal(std::vector *output) const { } } -absl::Span IntegerTrail::Reason(const Trail &trail, +absl::Span IntegerTrail::Reason(const Trail& trail, int trail_index) const { const int index = boolean_trail_index_to_integer_one_[trail_index]; - std::vector *reason = trail.GetEmptyVectorToStoreReason(trail_index); + std::vector* reason = trail.GetEmptyVectorToStoreReason(trail_index); added_variables_.ClearAndResize(BooleanVariable(trail_->NumVariables())); ComputeLazyReasonIfNeeded(index); @@ -1683,13 +1683,13 @@ absl::Span IntegerTrail::Reason(const Trail &trail, // TODO(user): Implement a dense version if there is more trail entries // than variables! -void IntegerTrail::AppendNewBounds(std::vector *output) const { +void IntegerTrail::AppendNewBounds(std::vector* output) const { tmp_marked_.ClearAndResize(IntegerVariable(vars_.size())); // In order to push the best bound for each variable, we loop backward. const int end = vars_.size(); for (int i = integer_trail_.size(); --i >= end;) { - const TrailEntry &entry = integer_trail_[i]; + const TrailEntry& entry = integer_trail_[i]; if (entry.var == kNoIntegerVariable) continue; if (tmp_marked_[entry.var]) continue; @@ -1698,7 +1698,7 @@ void IntegerTrail::AppendNewBounds(std::vector *output) const { } } -GenericLiteralWatcher::GenericLiteralWatcher(Model *model) +GenericLiteralWatcher::GenericLiteralWatcher(Model* model) : SatPropagator("GenericLiteralWatcher"), time_limit_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), @@ -1715,7 +1715,7 @@ GenericLiteralWatcher::GenericLiteralWatcher(Model *model) queue_by_priority_.resize(2); // Because default priority is 1. } -void GenericLiteralWatcher::UpdateCallingNeeds(Trail *trail) { +void GenericLiteralWatcher::UpdateCallingNeeds(Trail* trail) { // Process any new Literal on the trail. while (propagation_trail_index_ < trail->Index()) { const Literal literal = (*trail)[propagation_trail_index_++]; @@ -1746,9 +1746,9 @@ void GenericLiteralWatcher::UpdateCallingNeeds(Trail *trail) { } if (trail->CurrentDecisionLevel() == 0) { - const std::vector &modified_vars = + const std::vector& modified_vars = modified_vars_.PositionsSetAtLeastOnce(); - for (const auto &callback : level_zero_modified_variable_callback_) { + for (const auto& callback : level_zero_modified_variable_callback_) { callback(modified_vars); } } @@ -1756,7 +1756,7 @@ void GenericLiteralWatcher::UpdateCallingNeeds(Trail *trail) { modified_vars_.ClearAndResize(integer_trail_->NumIntegerVariables()); } -bool GenericLiteralWatcher::Propagate(Trail *trail) { +bool GenericLiteralWatcher::Propagate(Trail* trail) { // Only once per call to Propagate(), if we are at level zero, we might want // to call propagators even if the bounds didn't change. const int level = trail->CurrentDecisionLevel(); @@ -1785,7 +1785,7 @@ bool GenericLiteralWatcher::Propagate(Trail *trail) { if (time_limit_->LimitReached()) break; } - std::deque &queue = queue_by_priority_[priority]; + std::deque& queue = queue_by_priority_[priority]; while (!queue.empty()) { const int id = queue.front(); current_id_ = id; @@ -1801,11 +1801,11 @@ bool GenericLiteralWatcher::Propagate(Trail *trail) { id_to_level_at_last_call_[id] = level; id_to_greatest_common_level_since_last_call_.MutableRef(IdType(id)) = level; - for (ReversibleInterface *rev : id_to_reversible_classes_[id]) { + for (ReversibleInterface* rev : id_to_reversible_classes_[id]) { if (low < high) rev->SetLevel(low); if (level > low) rev->SetLevel(level); } - for (int *rev_int : id_to_reversible_ints_[id]) { + for (int* rev_int : id_to_reversible_ints_[id]) { rev_int_repository_->SaveState(rev_int); } } @@ -1816,7 +1816,7 @@ bool GenericLiteralWatcher::Propagate(Trail *trail) { const int64 old_boolean_timestamp = trail->Index(); // TODO(user): Maybe just provide one function Propagate(watch_indices) ? - std::vector &watch_indices_ref = id_to_watch_indices_[id]; + std::vector& watch_indices_ref = id_to_watch_indices_[id]; const bool result = watch_indices_ref.empty() ? watchers_[id]->Propagate() @@ -1870,7 +1870,7 @@ bool GenericLiteralWatcher::Propagate(Trail *trail) { return true; } -void GenericLiteralWatcher::Untrail(const Trail &trail, int trail_index) { +void GenericLiteralWatcher::Untrail(const Trail& trail, int trail_index) { if (propagation_trail_index_ <= trail_index) { // Nothing to do since we found a conflict before Propagate() was called. CHECK_EQ(propagation_trail_index_, trail_index); @@ -1878,7 +1878,7 @@ void GenericLiteralWatcher::Untrail(const Trail &trail, int trail_index) { } // We need to clear the watch indices on untrail. - for (std::deque &queue : queue_by_priority_) { + for (std::deque& queue : queue_by_priority_) { for (const int id : queue) { id_to_watch_indices_[id].clear(); } @@ -1894,13 +1894,13 @@ void GenericLiteralWatcher::Untrail(const Trail &trail, int trail_index) { } // Registers a propagator and returns its unique ids. -int GenericLiteralWatcher::Register(PropagatorInterface *propagator) { +int GenericLiteralWatcher::Register(PropagatorInterface* propagator) { const int id = watchers_.size(); watchers_.push_back(propagator); id_to_level_at_last_call_.push_back(0); id_to_greatest_common_level_since_last_call_.GrowByOne(); - id_to_reversible_classes_.push_back(std::vector()); - id_to_reversible_ints_.push_back(std::vector()); + id_to_reversible_classes_.push_back(std::vector()); + id_to_reversible_ints_.push_back(std::vector()); id_to_watch_indices_.push_back(std::vector()); id_to_priority_.push_back(1); id_to_idempotence_.push_back(true); @@ -1934,21 +1934,21 @@ void GenericLiteralWatcher::AlwaysCallAtLevelZero(int id) { } void GenericLiteralWatcher::RegisterReversibleClass(int id, - ReversibleInterface *rev) { + ReversibleInterface* rev) { id_to_reversible_classes_[id].push_back(rev); } -void GenericLiteralWatcher::RegisterReversibleInt(int id, int *rev) { +void GenericLiteralWatcher::RegisterReversibleInt(int id, int* rev) { id_to_reversible_ints_[id].push_back(rev); } // This is really close to ExcludeCurrentSolutionAndBacktrack(). -std::function +std::function ExcludeCurrentSolutionWithoutIgnoredVariableAndBacktrack() { - return [=](Model *model) { - SatSolver *sat_solver = model->GetOrCreate(); - IntegerTrail *integer_trail = model->GetOrCreate(); - IntegerEncoder *encoder = model->GetOrCreate(); + return [=](Model* model) { + SatSolver* sat_solver = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); const int current_level = sat_solver->CurrentDecisionLevel(); std::vector clause_to_exclude_solution; @@ -1959,7 +1959,7 @@ ExcludeCurrentSolutionWithoutIgnoredVariableAndBacktrack() { // Tests if this decision is associated to a bound of an ignored variable // in the current assignment. - const InlinedIntegerLiteralVector &associated_literals = + const InlinedIntegerLiteralVector& associated_literals = encoder->GetIntegerLiterals(decision); for (const IntegerLiteral bound : associated_literals) { if (integer_trail->IsCurrentlyIgnored(bound.var)) { diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index a495426644..e8df2485ce 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -107,7 +107,7 @@ inline IntegerValue PositiveRemainder(IntegerValue dividend, } // Computes result += a * b, and return false iff there is an overflow. -inline bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue *result) { +inline bool AddProductTo(IntegerValue a, IntegerValue b, IntegerValue* result) { const int64 prod = CapProd(a.value(), b.value()); if (prod == kint64min || prod == kint64max) return false; const int64 add = CapAdd(prod, result->value()); @@ -143,7 +143,7 @@ inline PositiveOnlyIndex GetPositiveOnlyIndex(IntegerVariable var) { // Returns the vector of the negated variables. std::vector NegationOf( - const std::vector &vars); + const std::vector& vars); // The integer equivalent of a literal. // It represents an IntegerVariable and an upper/lower bound on it. @@ -190,7 +190,7 @@ struct IntegerLiteral { IntegerValue bound = IntegerValue(0); }; -inline std::ostream &operator<<(std::ostream &os, IntegerLiteral i_lit) { +inline std::ostream& operator<<(std::ostream& os, IntegerLiteral i_lit) { os << i_lit.DebugString(); return os; } @@ -231,14 +231,14 @@ struct AffineExpression { // A singleton that holds the INITIAL integer variable domains. struct IntegerDomains : public gtl::ITIVector { - explicit IntegerDomains(Model *model) {} + explicit IntegerDomains(Model* model) {} }; // A singleton used for debugging. If this is set in the model, then we can // check that various derived constraint do not exclude this solution (if it is // a known optimal solution for instance). struct DebugSolution : public gtl::ITIVector { - explicit DebugSolution(Model *model) {} + explicit DebugSolution(Model* model) {} }; // Each integer variable x will be associated with a set of literals encoding @@ -261,7 +261,7 @@ struct DebugSolution : public gtl::ITIVector { // though. class IntegerEncoder { public: - explicit IntegerEncoder(Model *model) + explicit IntegerEncoder(Model* model) : sat_solver_(model->GetOrCreate()), domains_(model->GetOrCreate()), num_created_variables_(0) {} @@ -303,10 +303,10 @@ class IntegerEncoder { ValueLiteralPair() {} ValueLiteralPair(IntegerValue v, Literal l) : value(v), literal(l) {} - bool operator==(const ValueLiteralPair &o) const { + bool operator==(const ValueLiteralPair& o) const { return value == o.value && literal == o.literal; } - bool operator<(const ValueLiteralPair &o) const { return value < o.value; } + bool operator<(const ValueLiteralPair& o) const { return value < o.value; } IntegerValue value; Literal literal; }; @@ -373,7 +373,7 @@ class IntegerEncoder { void AddAllImplicationsBetweenAssociatedLiterals(); // Returns the IntegerLiterals that were associated with the given Literal. - const InlinedIntegerLiteralVector &GetIntegerLiterals(Literal lit) const { + const InlinedIntegerLiteralVector& GetIntegerLiterals(Literal lit) const { if (lit.Index() >= reverse_encoding_.size()) { return empty_integer_literal_vector_; } @@ -383,7 +383,7 @@ class IntegerEncoder { // Same as GetIntegerLiterals(), but in addition, if the literal was // associated to an integer == value, then the returned list will contain both // (integer >= value) and (integer <= value). - const InlinedIntegerLiteralVector &GetAllIntegerLiterals(Literal lit) const { + const InlinedIntegerLiteralVector& GetAllIntegerLiterals(Literal lit) const { if (lit.Index() >= full_reverse_encoding_.size()) { return empty_integer_literal_vector_; } @@ -417,7 +417,7 @@ class IntegerEncoder { // (x >= 2) but not to (x >= 3), we will return the literal associated with // (x >= 2). LiteralIndex SearchForLiteralAtOrBefore(IntegerLiteral i, - IntegerValue *bound) const; + IntegerValue* bound) const; // Gets the literal always set to true, make it if it does not exist. Literal GetTrueLiteral() { @@ -456,12 +456,12 @@ class IntegerEncoder { // slight optimization. // - 'it' is the current position of associated_lit in map, i.e we must have // it->second == associated_lit. - void AddImplications(const std::map &map, + void AddImplications(const std::map& map, std::map::const_iterator it, Literal associated_lit); - SatSolver *sat_solver_; - IntegerDomains *domains_; + SatSolver* sat_solver_; + IntegerDomains* domains_; bool add_implications_ = true; int64 num_created_variables_ = 0; @@ -472,7 +472,7 @@ class IntegerEncoder { // // TODO(user): Remove the entry no longer needed because of level zero // propagations. - gtl::ITIVector > + gtl::ITIVector> encoding_by_var_; // Store for a given LiteralIndex the list of its associated IntegerLiterals. @@ -496,7 +496,7 @@ class IntegerEncoder { equality_to_associated_literal_; // Mutable because this is lazily cleaned-up by PartialDomainEncoding(). - mutable gtl::ITIVector > + mutable gtl::ITIVector> equality_by_var_; // Variables that are fully encoded. @@ -517,7 +517,7 @@ class IntegerEncoder { // to maintain the reason for each propagation. class IntegerTrail : public SatPropagator { public: - explicit IntegerTrail(Model *model) + explicit IntegerTrail(Model* model) : SatPropagator("IntegerTrail"), domains_(model->GetOrCreate()), encoder_(model->GetOrCreate()), @@ -531,9 +531,9 @@ class IntegerTrail : public SatPropagator { // information is in sync with the current solver literal trail. Any // class/propagator using this class must make sure it is synced to the // correct state before calling any of its functions. - bool Propagate(Trail *trail) final; - void Untrail(const Trail &trail, int literal_trail_index) final; - absl::Span Reason(const Trail &trail, + bool Propagate(Trail* trail) final; + void Untrail(const Trail& trail, int literal_trail_index) final; + absl::Span Reason(const Trail& trail, int trail_index) const final; // Returns the number of created integer variables. @@ -561,11 +561,11 @@ class IntegerTrail : public SatPropagator { // Same as above but for a more complex domain specified as a sorted list of // disjoint intervals. See the Domain class. - IntegerVariable AddIntegerVariable(const Domain &domain); + IntegerVariable AddIntegerVariable(const Domain& domain); // Returns the initial domain of the given variable. Note that the min/max // are updated with level zero propagation, but not holes. - const Domain &InitialVariableDomain(IntegerVariable var) const; + const Domain& InitialVariableDomain(IntegerVariable var) const; // Takes the intersection with the current initial variable domain. // @@ -671,22 +671,22 @@ class IntegerTrail : public SatPropagator { // TODO(user): Test that the code work in the presence of integer overflow. void RelaxLinearReason(IntegerValue slack, absl::Span coeffs, - std::vector *reason) const; + std::vector* reason) const; // Same as above but take in IntegerVariables instead of IntegerLiterals. void AppendRelaxedLinearReason(IntegerValue slack, absl::Span coeffs, absl::Span vars, - std::vector *reason) const; + std::vector* reason) const; // Same as above but relax the given trail indices. void RelaxLinearReason(IntegerValue slack, absl::Span coeffs, - std::vector *trail_indices) const; + std::vector* trail_indices) const; // Removes from the reasons the literal that are always true. // This is mainly useful for experiments/testing. - void RemoveLevelZeroBounds(std::vector *reason) const; + void RemoveLevelZeroBounds(std::vector* reason) const; // Enqueue new information about a variable bound. Calling this with a less // restrictive bound than the current one will have no effect. @@ -734,7 +734,7 @@ class IntegerTrail : public SatPropagator { // yet in integer_literal[trail_index_of_literal]. using LazyReasonFunction = std::function *literals, std::vector *dependencies)>; + std::vector* literals, std::vector* dependencies)>; ABSL_MUST_USE_RESULT bool Enqueue(IntegerLiteral i_lit, LazyReasonFunction lazy_reason); @@ -751,7 +751,7 @@ class IntegerTrail : public SatPropagator { // Appends the reason for the given integer literals to the output and call // STLSortAndRemoveDuplicates() on it. void MergeReasonInto(absl::Span literals, - std::vector *output) const; + std::vector* output) const; // Returns the number of enqueues that changed a variable bounds. We don't // count enqueues called with a less restrictive bound than the current one. @@ -768,7 +768,7 @@ class IntegerTrail : public SatPropagator { // All the registered bitsets will be set to one each time a LbVar is // modified. It is up to the client to clear it if it wants to be notified // with the newly modified variables. - void RegisterWatcher(SparseBitset *p) { + void RegisterWatcher(SparseBitset* p) { p->ClearAndResize(NumIntegerVariables()); watchers_.push_back(p); } @@ -778,14 +778,14 @@ class IntegerTrail : public SatPropagator { bool ReportConflict(absl::Span literal_reason, absl::Span integer_reason) { DCHECK(ReasonIsValid(literal_reason, integer_reason)); - std::vector *conflict = trail_->MutableConflict(); + std::vector* conflict = trail_->MutableConflict(); conflict->assign(literal_reason.begin(), literal_reason.end()); MergeReasonInto(integer_reason, conflict); return false; } bool ReportConflict(absl::Span integer_reason) { DCHECK(ReasonIsValid({}, integer_reason)); - std::vector *conflict = trail_->MutableConflict(); + std::vector* conflict = trail_->MutableConflict(); conflict->clear(); MergeReasonInto(integer_reason, conflict); return false; @@ -798,7 +798,7 @@ class IntegerTrail : public SatPropagator { // Registers a reversible class. This class will always be synced with the // correct decision level. - void RegisterReversibleClass(ReversibleInterface *rev) { + void RegisterReversibleClass(ReversibleInterface* rev) { reversible_classes_.push_back(rev); } @@ -807,7 +807,7 @@ class IntegerTrail : public SatPropagator { // Inspects the trail and output all the non-level zero bounds (one per // variables) to the output. The algo is sparse if there is only a few // propagations on the trail. - void AppendNewBounds(std::vector *output) const; + void AppendNewBounds(std::vector* output) const; // Returns the trail index < threshold of a TrailEntry about var. Returns -1 // if there is no such entry (at a positive decision level). This is basically @@ -837,8 +837,8 @@ class IntegerTrail : public SatPropagator { // Called by the Enqueue() functions that detected a conflict. This does some // common conflict initialization that must terminate by a call to // MergeReasonIntoInternal(conflict) where conflict is the returned vector. - std::vector *InitializeConflict( - IntegerLiteral integer_literal, const LazyReasonFunction &lazy_reason, + std::vector* InitializeConflict( + IntegerLiteral integer_literal, const LazyReasonFunction& lazy_reason, absl::Span literals_reason, absl::Span bounds_reason); @@ -861,7 +861,7 @@ class IntegerTrail : public SatPropagator { IntegerLiteral i_lit, Literal literal_reason); // Does the work of MergeReasonInto() when queue_ is already initialized. - void MergeReasonIntoInternal(std::vector *output) const; + void MergeReasonIntoInternal(std::vector* output) const; // Returns the lowest trail index of a TrailEntry that can be used to explain // the given IntegerLiteral. The literal must be currently true (CHECKed). @@ -885,7 +885,7 @@ class IntegerTrail : public SatPropagator { // Note that looking at literal.Variable() is enough since all the literals // of a reason must be false. void AppendLiteralsReason(int trail_index, - std::vector *output) const; + std::vector* output) const; // Returns some debugging info. std::string DebugString(); @@ -961,7 +961,7 @@ class IntegerTrail : public SatPropagator { // // TODO(user): Avoid using hash_map here, a simple vector should be more // efficient, but we need the "rev" aspect. - RevMap > + RevMap> var_to_current_lb_interval_index_; // Temporary data used by MergeReasonInto(). @@ -976,7 +976,7 @@ class IntegerTrail : public SatPropagator { int index; IntegerValue coeff; int64 diff; - bool operator<(const RelaxHeapEntry &o) const { return index < o.index; } + bool operator<(const RelaxHeapEntry& o) const { return index < o.index; } }; mutable std::vector relax_heap_; mutable std::vector tmp_indices_; @@ -999,13 +999,13 @@ class IntegerTrail : public SatPropagator { int64 num_level_zero_enqueues_ = 0; mutable int64 num_decisions_to_break_loop_ = 0; - std::vector *> watchers_; - std::vector reversible_classes_; + std::vector*> watchers_; + std::vector reversible_classes_; - IntegerDomains *domains_; - IntegerEncoder *encoder_; - Trail *trail_; - const SatParameters ¶meters_; + IntegerDomains* domains_; + IntegerEncoder* encoder_; + Trail* trail_; + const SatParameters& parameters_; DISALLOW_COPY_AND_ASSIGN(IntegerTrail); }; @@ -1034,7 +1034,7 @@ class PropagatorInterface { // - At level zero, it will not contain any indices associated with literals // that were already fixed when the propagator was registered. Only the // indices of the literals modified after the registration will be present. - virtual bool IncrementalPropagate(const std::vector &watch_indices) { + virtual bool IncrementalPropagate(const std::vector& watch_indices) { LOG(FATAL) << "Not implemented."; return false; // Remove warning in Windows } @@ -1044,13 +1044,13 @@ class PropagatorInterface { // accessed with model->GetOrCreate<>() and properly registered at creation. class RevIntRepository : public RevRepository { public: - explicit RevIntRepository(Model *model) { + explicit RevIntRepository(Model* model) { model->GetOrCreate()->RegisterReversibleClass(this); } }; class RevIntegerValueRepository : public RevRepository { public: - explicit RevIntegerValueRepository(Model *model) { + explicit RevIntegerValueRepository(Model* model) { model->GetOrCreate()->RegisterReversibleClass(this); } }; @@ -1061,17 +1061,17 @@ class RevIntegerValueRepository : public RevRepository { // TODO(user): Move this to its own file. Add unit tests! class GenericLiteralWatcher : public SatPropagator { public: - explicit GenericLiteralWatcher(Model *model); + explicit GenericLiteralWatcher(Model* model); ~GenericLiteralWatcher() final {} // On propagate, the registered propagators will be called if they need to // until a fixed point is reached. Propagators with low ids will tend to be // called first, but it ultimately depends on their "waking" order. - bool Propagate(Trail *trail) final; - void Untrail(const Trail &trail, int literal_trail_index) final; + bool Propagate(Trail* trail) final; + void Untrail(const Trail& trail, int literal_trail_index) final; // Registers a propagator and returns its unique ids. - int Register(PropagatorInterface *propagator); + int Register(PropagatorInterface* propagator); // Changes the priority of the propagator with given id. The priority is a // non-negative integer. Propagators with a lower priority will always be @@ -1113,7 +1113,7 @@ class GenericLiteralWatcher : public SatPropagator { // Doing it just before should minimize cache-misses and bundle as much as // possible the "backtracking" together. Many propagators only watches a // few variables and will not be called at each decision levels. - void RegisterReversibleClass(int id, ReversibleInterface *rev); + void RegisterReversibleClass(int id, ReversibleInterface* rev); // Registers a reversible int with a given propagator. The int will be changed // to its correct value just before Propagate() is called. @@ -1127,7 +1127,7 @@ class GenericLiteralWatcher : public SatPropagator { // a call to model.Get<>(), and use SaveWithStamp() before each modification // to have just a slight overhead per int updates. This later option is what // is usually done in a CP solver at the cost of a sligthly more complex API. - void RegisterReversibleInt(int id, int *rev); + void RegisterReversibleInt(int id, int* rev); // Returns the number of registered propagators. int NumPropagators() const { return in_queue_.size(); } @@ -1142,7 +1142,7 @@ class GenericLiteralWatcher : public SatPropagator { // if no IntegerVariable are changed, so the passed vector to the function // might be empty. void RegisterLevelZeroModifiedVariablesCallback( - const std::function &)> cb) { + const std::function&)> cb) { level_zero_modified_variable_callback_.push_back(cb); } @@ -1154,33 +1154,33 @@ class GenericLiteralWatcher : public SatPropagator { private: // Updates queue_ and in_queue_ with the propagator ids that need to be // called. - void UpdateCallingNeeds(Trail *trail); + void UpdateCallingNeeds(Trail* trail); - TimeLimit *time_limit_; - IntegerTrail *integer_trail_; - RevIntRepository *rev_int_repository_; + TimeLimit* time_limit_; + IntegerTrail* integer_trail_; + RevIntRepository* rev_int_repository_; struct WatchData { int id; int watch_index; }; - gtl::ITIVector > literal_to_watcher_; - gtl::ITIVector > var_to_watcher_; - std::vector watchers_; + gtl::ITIVector> literal_to_watcher_; + gtl::ITIVector> var_to_watcher_; + std::vector watchers_; SparseBitset modified_vars_; // Propagator ids that needs to be called. There is one queue per priority but // just one Boolean to indicate if a propagator is in one of them. - std::vector > queue_by_priority_; + std::vector> queue_by_priority_; std::vector in_queue_; // Data for each propagator. DEFINE_INT_TYPE(IdType, int32); std::vector id_to_level_at_last_call_; RevVector id_to_greatest_common_level_since_last_call_; - std::vector > id_to_reversible_classes_; - std::vector > id_to_reversible_ints_; - std::vector > id_to_watch_indices_; + std::vector> id_to_reversible_classes_; + std::vector> id_to_reversible_ints_; + std::vector> id_to_watch_indices_; std::vector id_to_priority_; std::vector id_to_idempotence_; @@ -1190,7 +1190,7 @@ class GenericLiteralWatcher : public SatPropagator { // The id of the propagator we just called. int current_id_; - std::vector &)> > + std::vector&)>> level_zero_modified_variable_callback_; DISALLOW_COPY_AND_ASSIGN(GenericLiteralWatcher); @@ -1323,47 +1323,47 @@ inline void GenericLiteralWatcher::WatchIntegerVariable(IntegerVariable i, // IntegerValue which is typechecked. // ============================================================================ -inline std::function NewBooleanVariable() { - return [=](Model *model) { +inline std::function NewBooleanVariable() { + return [=](Model* model) { return model->GetOrCreate()->NewBooleanVariable(); }; } -inline std::function ConstantIntegerVariable( +inline std::function ConstantIntegerVariable( int64 value) { - return [=](Model *model) { + return [=](Model* model) { return model->GetOrCreate() ->GetOrCreateConstantIntegerVariable(IntegerValue(value)); }; } -inline std::function NewIntegerVariable(int64 lb, - int64 ub) { - return [=](Model *model) { +inline std::function NewIntegerVariable(int64 lb, + int64 ub) { + return [=](Model* model) { CHECK_LE(lb, ub); return model->GetOrCreate()->AddIntegerVariable( IntegerValue(lb), IntegerValue(ub)); }; } -inline std::function NewIntegerVariable( - const Domain &domain) { - return [=](Model *model) { +inline std::function NewIntegerVariable( + const Domain& domain) { + return [=](Model* model) { return model->GetOrCreate()->AddIntegerVariable(domain); }; } // Creates a 0-1 integer variable "view" of the given literal. It will have a // value of 1 when the literal is true, and 0 when the literal is false. -inline std::function NewIntegerVariableFromLiteral( +inline std::function NewIntegerVariableFromLiteral( Literal lit) { - return [=](Model *model) { - auto *encoder = model->GetOrCreate(); + return [=](Model* model) { + auto* encoder = model->GetOrCreate(); const IntegerVariable candidate = encoder->GetLiteralView(lit); if (candidate != kNoIntegerVariable) return candidate; IntegerVariable var; - const auto &assignment = model->GetOrCreate()->Assignment(); + const auto& assignment = model->GetOrCreate()->Assignment(); if (assignment.LiteralIsTrue(lit)) { var = model->Add(ConstantIntegerVariable(1)); } else if (assignment.LiteralIsFalse(lit)) { @@ -1378,37 +1378,36 @@ inline std::function NewIntegerVariableFromLiteral( }; } -inline std::function LowerBound(IntegerVariable v) { - return [=](const Model &model) { +inline std::function LowerBound(IntegerVariable v) { + return [=](const Model& model) { return model.Get()->LowerBound(v).value(); }; } -inline std::function UpperBound(IntegerVariable v) { - return [=](const Model &model) { +inline std::function UpperBound(IntegerVariable v) { + return [=](const Model& model) { return model.Get()->UpperBound(v).value(); }; } -inline std::function IsFixed(IntegerVariable v) { - return [=](const Model &model) { - const IntegerTrail *trail = model.Get(); +inline std::function IsFixed(IntegerVariable v) { + return [=](const Model& model) { + const IntegerTrail* trail = model.Get(); return trail->LowerBound(v) == trail->UpperBound(v); }; } // This checks that the variable is fixed. -inline std::function Value(IntegerVariable v) { - return [=](const Model &model) { - const IntegerTrail *trail = model.Get(); +inline std::function Value(IntegerVariable v) { + return [=](const Model& model) { + const IntegerTrail* trail = model.Get(); CHECK_EQ(trail->LowerBound(v), trail->UpperBound(v)) << v; return trail->LowerBound(v).value(); }; } -inline std::function GreaterOrEqual(IntegerVariable v, - int64 lb) { - return [=](Model *model) { +inline std::function GreaterOrEqual(IntegerVariable v, int64 lb) { + return [=](Model* model) { if (!model->GetOrCreate()->Enqueue( IntegerLiteral::GreaterOrEqual(v, IntegerValue(lb)), std::vector(), std::vector())) { @@ -1421,8 +1420,8 @@ inline std::function GreaterOrEqual(IntegerVariable v, }; } -inline std::function LowerOrEqual(IntegerVariable v, int64 ub) { - return [=](Model *model) { +inline std::function LowerOrEqual(IntegerVariable v, int64 ub) { + return [=](Model* model) { if (!model->GetOrCreate()->Enqueue( IntegerLiteral::LowerOrEqual(v, IntegerValue(ub)), std::vector(), std::vector())) { @@ -1436,8 +1435,8 @@ inline std::function LowerOrEqual(IntegerVariable v, int64 ub) { } // Fix v to a given value. -inline std::function Equality(IntegerVariable v, int64 value) { - return [=](Model *model) { +inline std::function Equality(IntegerVariable v, int64 value) { + return [=](Model* model) { model->Add(LowerOrEqual(v, value)); model->Add(GreaterOrEqual(v, value)); }; @@ -1449,10 +1448,10 @@ inline std::function Equality(IntegerVariable v, int64 value) { // direction integer-bound => literal, but just literal => integer-bound? This // is the same as using different underlying variable for an integer literal and // its negation. -inline std::function Implication( - const std::vector &enforcement_literals, IntegerLiteral i) { - return [=](Model *model) { - IntegerTrail *integer_trail = model->GetOrCreate(); +inline std::function Implication( + const std::vector& enforcement_literals, IntegerLiteral i) { + return [=](Model* model) { + IntegerTrail* integer_trail = model->GetOrCreate(); if (i.bound <= integer_trail->LowerBound(i.var)) { // Always true! nothing to do. } else if (i.bound > integer_trail->UpperBound(i.var)) { @@ -1465,7 +1464,7 @@ inline std::function Implication( } else { // TODO(user): Double check what happen when we associate a trivially // true or false literal. - IntegerEncoder *encoder = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); std::vector clause{encoder->GetOrCreateAssociatedLiteral(i)}; for (const Literal literal : enforcement_literals) { clause.push_back(literal.Negated()); @@ -1476,12 +1475,12 @@ inline std::function Implication( } // in_interval => v in [lb, ub]. -inline std::function ImpliesInInterval(Literal in_interval, - IntegerVariable v, - int64 lb, int64 ub) { - return [=](Model *model) { +inline std::function ImpliesInInterval(Literal in_interval, + IntegerVariable v, + int64 lb, int64 ub) { + return [=](Model* model) { if (lb == ub) { - IntegerEncoder *encoder = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); model->Add(Implication({in_interval}, encoder->GetOrCreateLiteralAssociatedToEquality( v, IntegerValue(lb)))); @@ -1498,10 +1497,10 @@ inline std::function ImpliesInInterval(Literal in_interval, // in the domain of var (if not already done), and wire everything correctly. // This also returns the full encoding, see the FullDomainEncoding() method of // the IntegerEncoder class. -inline std::function(Model *)> +inline std::function(Model*)> FullyEncodeVariable(IntegerVariable var) { - return [=](Model *model) { - IntegerEncoder *encoder = model->GetOrCreate(); + return [=](Model* model) { + IntegerEncoder* encoder = model->GetOrCreate(); if (!encoder->VariableIsFullyEncoded(var)) { encoder->FullyEncodeVariable(var); } @@ -1514,7 +1513,7 @@ FullyEncodeVariable(IntegerVariable var) { // variable that is ignored can basically take any value, and we don't really // want to enumerate them. This function should exclude all solutions where // only the ignored variable values change. -std::function +std::function ExcludeCurrentSolutionWithoutIgnoredVariableAndBacktrack(); } // namespace sat diff --git a/ortools/sat/integer_expr.cc b/ortools/sat/integer_expr.cc index 6513eda5da..917a055d63 100644 --- a/ortools/sat/integer_expr.cc +++ b/ortools/sat/integer_expr.cc @@ -27,10 +27,10 @@ namespace operations_research { namespace sat { -IntegerSumLE::IntegerSumLE(const std::vector &enforcement_literals, - const std::vector &vars, - const std::vector &coeffs, - IntegerValue upper, Model *model) +IntegerSumLE::IntegerSumLE(const std::vector& enforcement_literals, + const std::vector& vars, + const std::vector& coeffs, + IntegerValue upper, Model* model) : enforcement_literals_(enforcement_literals), upper_bound_(upper), trail_(model->GetOrCreate()), @@ -156,8 +156,8 @@ bool IntegerSumLE::Propagate() { IntegerLiteral::LowerOrEqual(var, new_ub), /*lazy_reason=*/[this, propagation_slack]( IntegerLiteral i_lit, int trail_index, - std::vector *literal_reason, - std::vector *trail_indices_reason) { + std::vector* literal_reason, + std::vector* trail_indices_reason) { *literal_reason = literal_reason_; trail_indices_reason->clear(); reason_coeffs_.clear(); @@ -188,10 +188,10 @@ bool IntegerSumLE::Propagate() { return true; } -void IntegerSumLE::RegisterWith(GenericLiteralWatcher *watcher) { +void IntegerSumLE::RegisterWith(GenericLiteralWatcher* watcher) { is_registered_ = true; const int id = watcher->Register(this); - for (const IntegerVariable &var : vars_) { + for (const IntegerVariable& var : vars_) { watcher->WatchLowerBound(var, id); } for (const Literal literal : enforcement_literals_) { @@ -205,19 +205,19 @@ void IntegerSumLE::RegisterWith(GenericLiteralWatcher *watcher) { } LevelZeroEquality::LevelZeroEquality(IntegerVariable target, - const std::vector &vars, - const std::vector &coeffs, - Model *model) + const std::vector& vars, + const std::vector& coeffs, + Model* model) : target_(target), vars_(vars), coeffs_(coeffs), trail_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()) { - auto *watcher = model->GetOrCreate(); + auto* watcher = model->GetOrCreate(); const int id = watcher->Register(this); watcher->SetPropagatorPriority(id, 2); watcher->WatchIntegerVariable(target, id); - for (const IntegerVariable &var : vars_) { + for (const IntegerVariable& var : vars_) { watcher->WatchIntegerVariable(var, id); } } @@ -273,9 +273,9 @@ bool LevelZeroEquality::Propagate() { return true; } -MinPropagator::MinPropagator(const std::vector &vars, +MinPropagator::MinPropagator(const std::vector& vars, IntegerVariable min_var, - IntegerTrail *integer_trail) + IntegerTrail* integer_trail) : vars_(vars), min_var_(min_var), integer_trail_(integer_trail) {} bool MinPropagator::Propagate() { @@ -356,24 +356,24 @@ bool MinPropagator::Propagate() { return true; } -void MinPropagator::RegisterWith(GenericLiteralWatcher *watcher) { +void MinPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); - for (const IntegerVariable &var : vars_) { + for (const IntegerVariable& var : vars_) { watcher->WatchLowerBound(var, id); } watcher->WatchUpperBound(min_var_, id); } -LinMinPropagator::LinMinPropagator(const std::vector &exprs, - IntegerVariable min_var, Model *model) +LinMinPropagator::LinMinPropagator(const std::vector& exprs, + IntegerVariable min_var, Model* model) : exprs_(exprs), min_var_(min_var), model_(model), integer_trail_(model_->GetOrCreate()) {} bool LinMinPropagator::PropagateLinearUpperBound( - const std::vector &vars, - const std::vector &coeffs, const IntegerValue upper_bound) { + const std::vector& vars, + const std::vector& coeffs, const IntegerValue upper_bound) { IntegerValue sum_lb = IntegerValue(0); const int num_vars = vars.size(); std::vector max_variations; @@ -428,8 +428,8 @@ bool LinMinPropagator::PropagateLinearUpperBound( IntegerLiteral::LowerOrEqual(var, new_ub), /*lazy_reason=*/[this, &vars, &coeffs, propagation_slack]( IntegerLiteral i_lit, int trail_index, - std::vector *literal_reason, - std::vector *trail_indices_reason) { + std::vector* literal_reason, + std::vector* trail_indices_reason) { literal_reason->clear(); trail_indices_reason->clear(); std::vector reason_coeffs; @@ -554,11 +554,11 @@ bool LinMinPropagator::Propagate() { return true; } -void LinMinPropagator::RegisterWith(GenericLiteralWatcher *watcher) { +void LinMinPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); - for (const LinearExpression &expr : exprs_) { + for (const LinearExpression& expr : exprs_) { for (int i = 0; i < expr.vars.size(); ++i) { - const IntegerVariable &var = expr.vars[i]; + const IntegerVariable& var = expr.vars[i]; const IntegerValue coeff = expr.coeffs[i]; if (coeff > 0) { watcher->WatchLowerBound(var, id); @@ -573,7 +573,7 @@ void LinMinPropagator::RegisterWith(GenericLiteralWatcher *watcher) { PositiveProductPropagator::PositiveProductPropagator( IntegerVariable a, IntegerVariable b, IntegerVariable p, - IntegerTrail *integer_trail) + IntegerTrail* integer_trail) : a_(a), b_(b), p_(p), integer_trail_(integer_trail) { // Note that we assume this is true at level zero, and so we never include // that fact in the reasons we compute. @@ -636,7 +636,7 @@ bool PositiveProductPropagator::Propagate() { return true; } -void PositiveProductPropagator::RegisterWith(GenericLiteralWatcher *watcher) { +void PositiveProductPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); watcher->WatchIntegerVariable(a_, id); watcher->WatchIntegerVariable(b_, id); @@ -665,7 +665,7 @@ IntegerValue CeilSquareRoot(IntegerValue a) { } // namespace SquarePropagator::SquarePropagator(IntegerVariable x, IntegerVariable s, - IntegerTrail *integer_trail) + IntegerTrail* integer_trail) : x_(x), s_(s), integer_trail_(integer_trail) { CHECK_GE(integer_trail->LevelZeroLowerBound(x), 0); } @@ -713,7 +713,7 @@ bool SquarePropagator::Propagate() { return true; } -void SquarePropagator::RegisterWith(GenericLiteralWatcher *watcher) { +void SquarePropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); watcher->WatchIntegerVariable(x_, id); watcher->WatchIntegerVariable(s_, id); @@ -722,7 +722,7 @@ void SquarePropagator::RegisterWith(GenericLiteralWatcher *watcher) { DivisionPropagator::DivisionPropagator(IntegerVariable a, IntegerVariable b, IntegerVariable c, - IntegerTrail *integer_trail) + IntegerTrail* integer_trail) : a_(a), b_(b), c_(c), integer_trail_(integer_trail) { // TODO(user): support these cases. CHECK_GE(integer_trail->LevelZeroLowerBound(a), 0); @@ -760,7 +760,7 @@ bool DivisionPropagator::Propagate() { return true; } -void DivisionPropagator::RegisterWith(GenericLiteralWatcher *watcher) { +void DivisionPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); watcher->WatchIntegerVariable(a_, id); watcher->WatchIntegerVariable(b_, id); @@ -771,7 +771,7 @@ void DivisionPropagator::RegisterWith(GenericLiteralWatcher *watcher) { FixedDivisionPropagator::FixedDivisionPropagator(IntegerVariable a, IntegerValue b, IntegerVariable c, - IntegerTrail *integer_trail) + IntegerTrail* integer_trail) : a_(a), b_(b), c_(c), integer_trail_(integer_trail) {} bool FixedDivisionPropagator::Propagate() { @@ -821,23 +821,23 @@ bool FixedDivisionPropagator::Propagate() { return true; } -void FixedDivisionPropagator::RegisterWith(GenericLiteralWatcher *watcher) { +void FixedDivisionPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); watcher->WatchIntegerVariable(a_, id); watcher->WatchIntegerVariable(c_, id); } -std::function IsOneOf(IntegerVariable var, - const std::vector &selectors, - const std::vector &values) { - return [=](Model *model) { - IntegerTrail *integer_trail = model->GetOrCreate(); - IntegerEncoder *encoder = model->GetOrCreate(); +std::function IsOneOf(IntegerVariable var, + const std::vector& selectors, + const std::vector& values) { + return [=](Model* model) { + IntegerTrail* integer_trail = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); CHECK(!values.empty()); CHECK_EQ(values.size(), selectors.size()); std::vector unique_values; - absl::flat_hash_map > value_to_selector; + absl::flat_hash_map> value_to_selector; for (int i = 0; i < values.size(); ++i) { unique_values.push_back(values[i].value()); value_to_selector[values[i].value()].push_back(selectors[i]); @@ -853,7 +853,7 @@ std::function IsOneOf(IntegerVariable var, // Note that it is more efficient to call AssociateToIntegerEqualValue() // with the values ordered, like we do here. for (const int64 v : unique_values) { - const std::vector &selectors = value_to_selector[v]; + const std::vector& selectors = value_to_selector[v]; if (selectors.size() == 1) { encoder->AssociateToIntegerEqualValue(selectors[0], var, IntegerValue(v)); diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index dd3ec7a2bf..dc9997be90 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -34,7 +34,7 @@ namespace operations_research { namespace sat { -IntegerLiteral AtMinValue(IntegerVariable var, IntegerTrail *integer_trail) { +IntegerLiteral AtMinValue(IntegerVariable var, IntegerTrail* integer_trail) { DCHECK(!integer_trail->IsCurrentlyIgnored(var)); const IntegerValue lb = integer_trail->LowerBound(var); DCHECK_LE(lb, integer_trail->UpperBound(var)); @@ -42,10 +42,10 @@ IntegerLiteral AtMinValue(IntegerVariable var, IntegerTrail *integer_trail) { return IntegerLiteral::LowerOrEqual(var, lb); } -IntegerLiteral ChooseBestObjectiveValue(IntegerVariable var, Model *model) { - const auto &variables = +IntegerLiteral ChooseBestObjectiveValue(IntegerVariable var, Model* model) { + const auto& variables = model->GetOrCreate()->objective_impacting_variables; - auto *integer_trail = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); if (variables.contains(var)) { return AtMinValue(var, integer_trail); } else if (variables.contains(NegationOf(var))) { @@ -55,7 +55,7 @@ IntegerLiteral ChooseBestObjectiveValue(IntegerVariable var, Model *model) { } IntegerLiteral GreaterOrEqualToMiddleValue(IntegerVariable var, - IntegerTrail *integer_trail) { + IntegerTrail* integer_trail) { const IntegerValue var_lb = integer_trail->LowerBound(var); const IntegerValue var_ub = integer_trail->UpperBound(var); CHECK_LT(var_lb, var_ub); @@ -66,12 +66,12 @@ IntegerLiteral GreaterOrEqualToMiddleValue(IntegerVariable var, } IntegerLiteral SplitAroundGivenValue(IntegerVariable var, IntegerValue value, - Model *model) { - auto *integer_trail = model->GetOrCreate(); + Model* model) { + auto* integer_trail = model->GetOrCreate(); const IntegerValue lb = integer_trail->LowerBound(var); const IntegerValue ub = integer_trail->UpperBound(var); - const absl::flat_hash_set &variables = + const absl::flat_hash_set& variables = model->GetOrCreate()->objective_impacting_variables; // Heuristic: Prefer the objective direction first. Reference: Conflict-Driven @@ -93,14 +93,14 @@ IntegerLiteral SplitAroundGivenValue(IntegerVariable var, IntegerValue value, return IntegerLiteral(); } -IntegerLiteral SplitAroundLpValue(IntegerVariable var, Model *model) { - auto *parameters = model->GetOrCreate(); - auto *integer_trail = model->GetOrCreate(); - auto *lp_dispatcher = model->GetOrCreate(); +IntegerLiteral SplitAroundLpValue(IntegerVariable var, Model* model) { + auto* parameters = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); + auto* lp_dispatcher = model->GetOrCreate(); DCHECK(!integer_trail->IsCurrentlyIgnored(var)); const IntegerVariable positive_var = PositiveVariable(var); - const LinearProgrammingConstraint *lp = + const LinearProgrammingConstraint* lp = gtl::FindWithDefault(*lp_dispatcher, positive_var, nullptr); // We only use this if the sub-lp has a solution, and depending on the value @@ -120,8 +120,8 @@ IntegerLiteral SplitAroundLpValue(IntegerVariable var, Model *model) { } IntegerLiteral SplitUsingBestSolutionValueInRepository( - IntegerVariable var, const SharedSolutionRepository &solution_repo, - Model *model) { + IntegerVariable var, const SharedSolutionRepository& solution_repo, + Model* model) { if (solution_repo.NumSolutions() == 0) { return IntegerLiteral(); } @@ -146,8 +146,8 @@ IntegerLiteral SplitUsingBestSolutionValueInRepository( // not executed often, but otherwise it is done for each search decision, // which seems expensive. Improve. std::function FirstUnassignedVarAtItsMinHeuristic( - const std::vector &vars, Model *model) { - auto *integer_trail = model->GetOrCreate(); + const std::vector& vars, Model* model) { + auto* integer_trail = model->GetOrCreate(); return [/*copy*/ vars, integer_trail]() { for (const IntegerVariable var : vars) { // Note that there is no point trying to fix a currently ignored variable. @@ -161,8 +161,8 @@ std::function FirstUnassignedVarAtItsMinHeuristic( std::function UnassignedVarWithLowestMinAtItsMinHeuristic( - const std::vector &vars, Model *model) { - auto *integer_trail = model->GetOrCreate(); + const std::vector& vars, Model* model) { + auto* integer_trail = model->GetOrCreate(); return [/*copy */ vars, integer_trail]() { IntegerVariable candidate = kNoIntegerVariable; IntegerValue candidate_lb; @@ -181,9 +181,9 @@ UnassignedVarWithLowestMinAtItsMinHeuristic( } std::function SequentialSearch( - std::vector > heuristics) { + std::vector> heuristics) { return [heuristics]() { - for (const auto &h : heuristics) { + for (const auto& h : heuristics) { const BooleanOrIntegerLiteral decision = h(); if (decision.HasValue()) return decision; } @@ -192,12 +192,12 @@ std::function SequentialSearch( } std::function SequentialValueSelection( - std::vector > + std::vector> value_selection_heuristics, std::function var_selection_heuristic, - Model *model) { - auto *encoder = model->GetOrCreate(); - auto *integer_trail = model->GetOrCreate(); + Model* model) { + auto* encoder = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); return [=]() { // Get the current decision. const BooleanOrIntegerLiteral current_decision = var_selection_heuristic(); @@ -205,7 +205,7 @@ std::function SequentialValueSelection( // IntegerLiteral case. if (current_decision.boolean_literal_index == kNoLiteralIndex) { - for (const auto &value_heuristic : value_selection_heuristics) { + for (const auto& value_heuristic : value_selection_heuristics) { const IntegerLiteral decision = value_heuristic(current_decision.integer_literal.var); if (decision.IsValid()) return BooleanOrIntegerLiteral(decision); @@ -220,7 +220,7 @@ std::function SequentialValueSelection( if (integer_trail->IsCurrentlyIgnored(l.var)) continue; // Sequentially try the value selection heuristics. - for (const auto &value_heuristic : value_selection_heuristics) { + for (const auto& value_heuristic : value_selection_heuristics) { const IntegerLiteral decision = value_heuristic(l.var); if (decision.IsValid()) return BooleanOrIntegerLiteral(decision); } @@ -231,11 +231,11 @@ std::function SequentialValueSelection( }; } -bool LinearizedPartIsLarge(Model *model) { - auto *lp_constraints = +bool LinearizedPartIsLarge(Model* model) { + auto* lp_constraints = model->GetOrCreate(); int num_lp_variables = 0; - for (LinearProgrammingConstraint *lp : *lp_constraints) { + for (LinearProgrammingConstraint* lp : *lp_constraints) { num_lp_variables += lp->NumVariables(); } const int num_integer_variables = @@ -246,9 +246,9 @@ bool LinearizedPartIsLarge(Model *model) { // TODO(user): Experiment more with value selection heuristics. std::function IntegerValueSelectionHeuristic( std::function var_selection_heuristic, - Model *model) { - const SatParameters ¶meters = *(model->GetOrCreate()); - std::vector > + Model* model) { + const SatParameters& parameters = *(model->GetOrCreate()); + std::vector> value_selection_heuristics; // LP based value. @@ -266,7 +266,7 @@ std::function IntegerValueSelectionHeuristic( // Solution based value. if (parameters.exploit_best_solution()) { - auto *response_manager = model->Get(); + auto* response_manager = model->Get(); if (response_manager != nullptr) { VLOG(1) << "Using best solution value selection heuristic."; value_selection_heuristics.push_back( @@ -279,7 +279,7 @@ std::function IntegerValueSelectionHeuristic( // Relaxation Solution based value. if (parameters.exploit_relaxation_solution()) { - auto *relaxation_solutions = + auto* relaxation_solutions = model->Get(); if (relaxation_solutions != nullptr) { value_selection_heuristics.push_back( @@ -303,10 +303,10 @@ std::function IntegerValueSelectionHeuristic( var_selection_heuristic, model); } -std::function SatSolverHeuristic(Model *model) { - SatSolver *sat_solver = model->GetOrCreate(); - Trail *trail = model->GetOrCreate(); - SatDecisionPolicy *decision_policy = model->GetOrCreate(); +std::function SatSolverHeuristic(Model* model) { + SatSolver* sat_solver = model->GetOrCreate(); + Trail* trail = model->GetOrCreate(); + SatDecisionPolicy* decision_policy = model->GetOrCreate(); return [sat_solver, trail, decision_policy] { const bool all_assigned = trail->Index() == sat_solver->NumVariables(); if (all_assigned) return BooleanOrIntegerLiteral(); @@ -316,16 +316,16 @@ std::function SatSolverHeuristic(Model *model) { }; } -std::function PseudoCost(Model *model) { - auto *objective = model->Get(); +std::function PseudoCost(Model* model) { + auto* objective = model->Get(); const bool has_objective = objective != nullptr && objective->objective_var != kNoIntegerVariable; if (!has_objective) { return []() { return BooleanOrIntegerLiteral(); }; } - auto *pseudo_costs = model->GetOrCreate(); - auto *integer_trail = model->GetOrCreate(); + auto* pseudo_costs = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); return [pseudo_costs, integer_trail]() { const IntegerVariable chosen_var = pseudo_costs->GetBestDecisionVar(); if (chosen_var == kNoIntegerVariable) return BooleanOrIntegerLiteral(); @@ -335,24 +335,22 @@ std::function PseudoCost(Model *model) { } std::function RandomizeOnRestartHeuristic( - Model *model) { - SatSolver *sat_solver = model->GetOrCreate(); - SatDecisionPolicy *decision_policy = model->GetOrCreate(); + Model* model) { + SatSolver* sat_solver = model->GetOrCreate(); + SatDecisionPolicy* decision_policy = model->GetOrCreate(); // TODO(user): Add other policy and perform more experiments. std::function sat_policy = SatSolverHeuristic(model); - std::vector > policies{ + std::vector> policies{ sat_policy, SequentialSearch({PseudoCost(model), sat_policy})}; // The higher weight for the sat policy is because this policy actually // contains a lot of variation as we randomize the sat parameters. // TODO(user,user): Do more experiments to find better distribution. - std::discrete_distribution var_dist{ - 3 /*sat_policy*/, 1 /*Pseudo cost*/ - }; + std::discrete_distribution var_dist{3 /*sat_policy*/, 1 /*Pseudo cost*/}; // Value selection. - std::vector > + std::vector> value_selection_heuristics; std::vector value_selection_weight; @@ -363,7 +361,7 @@ std::function RandomizeOnRestartHeuristic( value_selection_weight.push_back(8); // Solution based value. - auto *response_manager = model->Get(); + auto* response_manager = model->Get(); if (response_manager != nullptr) { value_selection_heuristics.push_back( [model, response_manager](IntegerVariable var) { @@ -374,7 +372,7 @@ std::function RandomizeOnRestartHeuristic( } // Relaxation solution based value. - auto *relaxation_solutions = model->Get(); + auto* relaxation_solutions = model->Get(); if (relaxation_solutions != nullptr) { value_selection_heuristics.push_back( [model, relaxation_solutions](IntegerVariable var) { @@ -385,7 +383,7 @@ std::function RandomizeOnRestartHeuristic( } // Middle value. - auto *integer_trail = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); value_selection_heuristics.push_back([integer_trail](IntegerVariable var) { return GreaterOrEqualToMiddleValue(var, integer_trail); }); @@ -407,10 +405,10 @@ std::function RandomizeOnRestartHeuristic( int policy_index = 0; int val_policy_index = 0; - auto *encoder = model->GetOrCreate(); + auto* encoder = model->GetOrCreate(); return [=]() mutable { if (sat_solver->CurrentDecisionLevel() == 0) { - auto *random = model->GetOrCreate(); + auto* random = model->GetOrCreate(); RandomizeDecisionHeuristic(random, model->GetOrCreate()); decision_policy->ResetDecisionHeuristic(); @@ -456,10 +454,10 @@ std::function RandomizeOnRestartHeuristic( // TODO(user): Avoid the quadratic algorithm!! std::function FollowHint( - const std::vector &vars, - const std::vector &values, Model *model) { - const Trail *trail = model->GetOrCreate(); - const IntegerTrail *integer_trail = model->GetOrCreate(); + const std::vector& vars, + const std::vector& values, Model* model) { + const Trail* trail = model->GetOrCreate(); + const IntegerTrail* integer_trail = model->GetOrCreate(); return [=] { // copy for (int i = 0; i < vars.size(); ++i) { const IntegerValue value = values[i]; @@ -485,7 +483,7 @@ std::function FollowHint( }; } -std::function RestartEveryKFailures(int k, SatSolver *solver) { +std::function RestartEveryKFailures(int k, SatSolver* solver) { bool reset_at_next_call = true; int next_num_failures = 0; return [=]() mutable { @@ -499,7 +497,7 @@ std::function RestartEveryKFailures(int k, SatSolver *solver) { }; } -std::function SatSolverRestartPolicy(Model *model) { +std::function SatSolverRestartPolicy(Model* model) { auto policy = model->GetOrCreate(); return [policy]() { return policy->ShouldRestart(); }; } @@ -513,14 +511,14 @@ std::function WrapIntegerLiteralHeuristic( } // namespace -void ConfigureSearchHeuristics(Model *model) { - SearchHeuristics &heuristics = *model->GetOrCreate(); +void ConfigureSearchHeuristics(Model* model) { + SearchHeuristics& heuristics = *model->GetOrCreate(); CHECK(heuristics.fixed_search != nullptr); heuristics.policy_index = 0; heuristics.decision_policies.clear(); heuristics.restart_policies.clear(); - const SatParameters ¶meters = *(model->GetOrCreate()); + const SatParameters& parameters = *(model->GetOrCreate()); switch (parameters.search_branching()) { case SatParameters::AUTOMATIC_SEARCH: { std::function decision_policy; @@ -566,9 +564,9 @@ void ConfigureSearchHeuristics(Model *model) { // TODO(user): This is not used in any of our default config. remove? // It make also no sense to choose a value in the LP heuristic and then // override it with IntegerValueSelectionHeuristic(), clean that up. - std::vector > base_heuristics; + std::vector> base_heuristics; base_heuristics.push_back(heuristics.fixed_search); - for (const auto &ct : + for (const auto& ct : *(model->GetOrCreate())) { base_heuristics.push_back(WrapIntegerLiteralHeuristic( ct->HeuristicLpReducedCostBinary(model))); @@ -578,7 +576,7 @@ void ConfigureSearchHeuristics(Model *model) { heuristics.decision_policies = CompleteHeuristics( base_heuristics, SequentialSearch({SatSolverHeuristic(model), heuristics.fixed_search})); - for (auto &ref : heuristics.decision_policies) { + for (auto& ref : heuristics.decision_policies) { ref = IntegerValueSelectionHeuristic(ref, model); } heuristics.restart_policies.assign(heuristics.decision_policies.size(), @@ -586,8 +584,8 @@ void ConfigureSearchHeuristics(Model *model) { return; } case SatParameters::LP_SEARCH: { - std::vector > lp_heuristics; - for (const auto &ct : + std::vector> lp_heuristics; + for (const auto& ct : *(model->GetOrCreate())) { lp_heuristics.push_back(WrapIntegerLiteralHeuristic( ct->HeuristicLpReducedCostAverageBranching())); @@ -625,24 +623,24 @@ void ConfigureSearchHeuristics(Model *model) { } } -std::vector > CompleteHeuristics( - const std::vector > - &incomplete_heuristics, - const std::function &completion_heuristic) { - std::vector > complete_heuristics; +std::vector> CompleteHeuristics( + const std::vector>& + incomplete_heuristics, + const std::function& completion_heuristic) { + std::vector> complete_heuristics; complete_heuristics.reserve(incomplete_heuristics.size()); - for (const auto &incomplete : incomplete_heuristics) { + for (const auto& incomplete : incomplete_heuristics) { complete_heuristics.push_back( SequentialSearch({incomplete, completion_heuristic})); } return complete_heuristics; } -SatSolver::Status SolveIntegerProblem(Model *model) { - TimeLimit *time_limit = model->GetOrCreate(); +SatSolver::Status SolveIntegerProblem(Model* model) { + TimeLimit* time_limit = model->GetOrCreate(); if (time_limit->LimitReached()) return SatSolver::LIMIT_REACHED; - SearchHeuristics &heuristics = *model->GetOrCreate(); + SearchHeuristics& heuristics = *model->GetOrCreate(); const int num_policies = heuristics.decision_policies.size(); CHECK_NE(num_policies, 0); CHECK_EQ(num_policies, heuristics.restart_policies.size()); @@ -650,14 +648,14 @@ SatSolver::Status SolveIntegerProblem(Model *model) { // This is needed for recording the pseudo-costs. IntegerVariable objective_var = kNoIntegerVariable; { - const ObjectiveDefinition *objective = model->Get(); + const ObjectiveDefinition* objective = model->Get(); if (objective != nullptr) objective_var = objective->objective_var; } // Note that it is important to do the level-zero propagation if it wasn't // already done because EnqueueDecisionAndBackjumpOnConflict() assumes that // the solver is in a "propagated" state. - SatSolver *const sat_solver = model->GetOrCreate(); + SatSolver* const sat_solver = model->GetOrCreate(); // TODO(user): We have the issue that at level zero. calling the propagation // loop more than once can propagate more! This is because we call the LP @@ -670,13 +668,13 @@ SatSolver::Status SolveIntegerProblem(Model *model) { // Create and initialize pseudo costs. // TODO(user): If this ever shows up in a cpu profile, find a way to not // execute the code when pseudo costs are not needed. - PseudoCosts *pseudo_costs = model->GetOrCreate(); + PseudoCosts* pseudo_costs = model->GetOrCreate(); - auto *integer_trail = model->GetOrCreate(); - auto *encoder = model->GetOrCreate(); - auto *implied_bounds = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); + auto* encoder = model->GetOrCreate(); + auto* implied_bounds = model->GetOrCreate(); - const SatParameters &sat_parameters = *(model->GetOrCreate()); + const SatParameters& sat_parameters = *(model->GetOrCreate()); // Main search loop. const int64 old_num_conflicts = sat_solver->num_failures(); @@ -698,9 +696,9 @@ SatSolver::Status SolveIntegerProblem(Model *model) { return SatSolver::INFEASIBLE; } - auto *level_zero_callbacks = + auto* level_zero_callbacks = model->GetOrCreate(); - for (const auto &cb : level_zero_callbacks->callbacks) { + for (const auto& cb : level_zero_callbacks->callbacks) { if (!cb()) { return SatSolver::INFEASIBLE; } @@ -794,8 +792,8 @@ SatSolver::Status SolveIntegerProblem(Model *model) { // submission to the maxSAT 2018 competition by Emir Demirovic and Peter // Stuckey where they show it is a good idea and provide more references. if (model->GetOrCreate()->use_optimization_hints()) { - auto *sat_decision = model->GetOrCreate(); - const auto &trail = *model->GetOrCreate(); + auto* sat_decision = model->GetOrCreate(); + const auto& trail = *model->GetOrCreate(); for (int i = 0; i < trail.Index(); ++i) { sat_decision->SetAssignmentPreference(trail[i], 0.0); } @@ -862,13 +860,13 @@ SatSolver::Status SolveIntegerProblem(Model *model) { } SatSolver::Status ResetAndSolveIntegerProblem( - const std::vector &assumptions, Model *model) { - SatSolver *const solver = model->GetOrCreate(); + const std::vector& assumptions, Model* model) { + SatSolver* const solver = model->GetOrCreate(); // Sync the bound first. if (!solver->ResetToLevelZero()) return solver->UnsatStatus(); - auto *level_zero_callbacks = model->GetOrCreate(); - for (const auto &cb : level_zero_callbacks->callbacks) { + auto* level_zero_callbacks = model->GetOrCreate(); + for (const auto& cb : level_zero_callbacks->callbacks) { if (!cb()) return SatSolver::INFEASIBLE; } @@ -879,7 +877,7 @@ SatSolver::Status ResetAndSolveIntegerProblem( return SolveIntegerProblem(model); } -SatSolver::Status SolveIntegerProblemWithLazyEncoding(Model *model) { +SatSolver::Status SolveIntegerProblemWithLazyEncoding(Model* model) { const IntegerVariable num_vars = model->GetOrCreate()->NumIntegerVariables(); std::vector all_variables; @@ -887,7 +885,7 @@ SatSolver::Status SolveIntegerProblemWithLazyEncoding(Model *model) { all_variables.push_back(var); } - SearchHeuristics &heuristics = *model->GetOrCreate(); + SearchHeuristics& heuristics = *model->GetOrCreate(); heuristics.policy_index = 0; heuristics.decision_policies = {SequentialSearch( {SatSolverHeuristic(model), diff --git a/ortools/sat/integer_search.h b/ortools/sat/integer_search.h index 1bf7c49775..f61d84b5ad 100644 --- a/ortools/sat/integer_search.h +++ b/ortools/sat/integer_search.h @@ -61,8 +61,8 @@ struct SearchHeuristics { // Decision and restart heuristics. The two vectors must be of the same size // and restart_policies[i] will always be used in conjunction with // decision_policies[i]. - std::vector > decision_policies; - std::vector > restart_policies; + std::vector> decision_policies; + std::vector> restart_policies; // Index in the vectors above that indicate the current configuration. int policy_index; @@ -77,12 +77,12 @@ struct SearchHeuristics { // order integer variables are lazily instantiated (and at what value), this // uses the current solver parameters to set the SearchHeuristics class in the // given model. -void ConfigureSearchHeuristics(Model *model); +void ConfigureSearchHeuristics(Model* model); // Callbacks that will be called when the search goes back to level 0. // Callbacks should return false if the propagation fails. struct LevelZeroCallbackHelper { - std::vector > callbacks; + std::vector> callbacks; }; // Tries to find a feasible solution to the current model. @@ -94,47 +94,47 @@ struct LevelZeroCallbackHelper { // // Each time a restart happen, this increment the policy index modulo the number // of heuristics to act as a portfolio search. -SatSolver::Status SolveIntegerProblem(Model *model); +SatSolver::Status SolveIntegerProblem(Model* model); // Resets the solver to the given assumptions before calling // SolveIntegerProblem(). SatSolver::Status ResetAndSolveIntegerProblem( - const std::vector &assumptions, Model *model); + const std::vector& assumptions, Model* model); // Only used in tests. Move to a test utility file. // // This configures the model SearchHeuristics with a simple default heuristic // and then call ResetAndSolveIntegerProblem() without any assumptions. -SatSolver::Status SolveIntegerProblemWithLazyEncoding(Model *model); +SatSolver::Status SolveIntegerProblemWithLazyEncoding(Model* model); // Returns decision corresponding to var at its lower bound. // Returns an invalid literal if the variable is fixed. -IntegerLiteral AtMinValue(IntegerVariable var, IntegerTrail *integer_trail); +IntegerLiteral AtMinValue(IntegerVariable var, IntegerTrail* integer_trail); // If a variable appear in the objective, branch on its best objective value. -IntegerLiteral ChooseBestObjectiveValue(IntegerVariable var, Model *model); +IntegerLiteral ChooseBestObjectiveValue(IntegerVariable var, Model* model); // Returns decision corresponding to var >= lb + max(1, (ub - lb) / 2). It also // CHECKs that the variable is not fixed. IntegerLiteral GreaterOrEqualToMiddleValue(IntegerVariable var, - IntegerTrail *integer_trail); + IntegerTrail* integer_trail); // This method first tries var <= value. If this does not reduce the domain it // tries var >= value. If that also does not reduce the domain then returns // an invalid literal. IntegerLiteral SplitAroundGivenValue(IntegerVariable var, IntegerValue value, - Model *model); + Model* model); // Returns decision corresponding to var <= round(lp_value). If the variable // does not appear in the LP, this method returns an invalid literal. -IntegerLiteral SplitAroundLpValue(IntegerVariable var, Model *model); +IntegerLiteral SplitAroundLpValue(IntegerVariable var, Model* model); // Returns decision corresponding to var <= best_solution[var]. If no solution // has been found, this method returns a literal with kNoIntegerVariable. This // was suggested in paper: "Solution-Based Phase Saving for CP" (2018) by Emir // Demirovic, Geoffrey Chu, and Peter J. Stuckey. IntegerLiteral SplitDomainUsingBestSolutionValue(IntegerVariable var, - Model *model); + Model* model); // Decision heuristic for SolveIntegerProblemWithLazyEncoding(). Returns a // function that will return the literal corresponding to the fact that the @@ -143,7 +143,7 @@ IntegerLiteral SplitDomainUsingBestSolutionValue(IntegerVariable var, // // Note that this function will create the associated literal if needed. std::function FirstUnassignedVarAtItsMinHeuristic( - const std::vector &vars, Model *model); + const std::vector& vars, Model* model); // Decision heuristic for SolveIntegerProblemWithLazyEncoding(). Like // FirstUnassignedVarAtItsMinHeuristic() but the function will return the @@ -151,7 +151,7 @@ std::function FirstUnassignedVarAtItsMinHeuristic( // with the lowest min has a value <= this min. std::function UnassignedVarWithLowestMinAtItsMinHeuristic( - const std::vector &vars, Model *model); + const std::vector& vars, Model* model); // Set the first unassigned Literal/Variable to its value. // @@ -163,14 +163,14 @@ struct BooleanOrIntegerVariable { IntegerVariable int_var = kNoIntegerVariable; }; std::function FollowHint( - const std::vector &vars, - const std::vector &values, Model *model); + const std::vector& vars, + const std::vector& values, Model* model); // Combines search heuristics in order: if the i-th one returns kNoLiteralIndex, // ask the (i+1)-th. If every heuristic returned kNoLiteralIndex, // returns kNoLiteralIndex. std::function SequentialSearch( - std::vector > heuristics); + std::vector> heuristics); // Changes the value of the given decision by 'var_selection_heuristic'. We try // to see if the decision is "associated" with an IntegerVariable, and if it is @@ -178,41 +178,41 @@ std::function SequentialSearch( // that is applicable. If none of the heuristics are applicable then the given // decision by 'var_selection_heuristic' is returned. std::function SequentialValueSelection( - std::vector > + std::vector> value_selection_heuristics, std::function var_selection_heuristic, - Model *model); + Model* model); // Changes the value of the given decision by 'var_selection_heuristic' // according to various value selection heuristics. Looks at the code to know // exactly what heuristic we use. std::function IntegerValueSelectionHeuristic( std::function var_selection_heuristic, - Model *model); + Model* model); // Returns the BooleanOrIntegerLiteral advised by the underliying SAT solver. -std::function SatSolverHeuristic(Model *model); +std::function SatSolverHeuristic(Model* model); // Gets the branching variable using pseudo costs and combines it with a value // for branching. -std::function PseudoCost(Model *model); +std::function PseudoCost(Model* model); // Returns true if the number of variables in the linearized part represent // a large enough proportion of all the problem variables. -bool LinearizedPartIsLarge(Model *model); +bool LinearizedPartIsLarge(Model* model); // A restart policy that restarts every k failures. -std::function RestartEveryKFailures(int k, SatSolver *solver); +std::function RestartEveryKFailures(int k, SatSolver* solver); // A restart policy that uses the underlying sat solver's policy. -std::function SatSolverRestartPolicy(Model *model); +std::function SatSolverRestartPolicy(Model* model); // Concatenates each input_heuristic with a default heuristic that instantiate // all the problem's Boolean variables, into a new vector. -std::vector > CompleteHeuristics( - const std::vector > - &incomplete_heuristics, - const std::function &completion_heuristic); +std::vector> CompleteHeuristics( + const std::vector>& + incomplete_heuristics, + const std::function& completion_heuristic); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/intervals.cc b/ortools/sat/intervals.cc index 91beee7887..6f288e1dbd 100644 --- a/ortools/sat/intervals.cc +++ b/ortools/sat/intervals.cc @@ -51,11 +51,11 @@ IntervalVariable IntervalsRepository::CreateInterval(IntegerVariable start, } SchedulingConstraintHelper::SchedulingConstraintHelper( - const std::vector &tasks, Model *model) + const std::vector& tasks, Model* model) : trail_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), precedences_(model->GetOrCreate()) { - auto *repository = model->GetOrCreate(); + auto* repository = model->GetOrCreate(); start_vars_.clear(); end_vars_.clear(); minus_end_vars_.clear(); @@ -86,7 +86,7 @@ SchedulingConstraintHelper::SchedulingConstraintHelper( } SchedulingConstraintHelper::SchedulingConstraintHelper(int num_tasks, - Model *model) + Model* model) : trail_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), precedences_(model->GetOrCreate()) { @@ -95,7 +95,7 @@ SchedulingConstraintHelper::SchedulingConstraintHelper(int num_tasks, } void SchedulingConstraintHelper::ResetFromSubset( - const SchedulingConstraintHelper &other, absl::Span tasks) { + const SchedulingConstraintHelper& other, absl::Span tasks) { current_time_direction_ = other.current_time_direction_; const int num_tasks = tasks.size(); @@ -153,11 +153,11 @@ void SchedulingConstraintHelper::SetTimeDirection(bool is_forward) { std::swap(shifted_start_min_timestamp_, negated_shifted_end_max_timestamp_); } -const std::vector - &SchedulingConstraintHelper::TaskByIncreasingStartMin() { +const std::vector& +SchedulingConstraintHelper::TaskByIncreasingStartMin() { const int num_tasks = NumTasks(); for (int i = 0; i < num_tasks; ++i) { - TaskTime &ref = task_by_increasing_start_min_[i]; + TaskTime& ref = task_by_increasing_start_min_[i]; ref.time = StartMin(ref.task_index); } IncrementalSort(task_by_increasing_start_min_.begin(), @@ -165,11 +165,11 @@ const std::vector return task_by_increasing_start_min_; } -const std::vector - &SchedulingConstraintHelper::TaskByIncreasingEndMin() { +const std::vector& +SchedulingConstraintHelper::TaskByIncreasingEndMin() { const int num_tasks = NumTasks(); for (int i = 0; i < num_tasks; ++i) { - TaskTime &ref = task_by_increasing_end_min_[i]; + TaskTime& ref = task_by_increasing_end_min_[i]; ref.time = EndMin(ref.task_index); } IncrementalSort(task_by_increasing_end_min_.begin(), @@ -177,11 +177,11 @@ const std::vector return task_by_increasing_end_min_; } -const std::vector - &SchedulingConstraintHelper::TaskByDecreasingStartMax() { +const std::vector& +SchedulingConstraintHelper::TaskByDecreasingStartMax() { const int num_tasks = NumTasks(); for (int i = 0; i < num_tasks; ++i) { - TaskTime &ref = task_by_decreasing_start_max_[i]; + TaskTime& ref = task_by_decreasing_start_max_[i]; ref.time = StartMax(ref.task_index); } IncrementalSort(task_by_decreasing_start_max_.begin(), @@ -190,11 +190,11 @@ const std::vector return task_by_decreasing_start_max_; } -const std::vector - &SchedulingConstraintHelper::TaskByDecreasingEndMax() { +const std::vector& +SchedulingConstraintHelper::TaskByDecreasingEndMax() { const int num_tasks = NumTasks(); for (int i = 0; i < num_tasks; ++i) { - TaskTime &ref = task_by_decreasing_end_max_[i]; + TaskTime& ref = task_by_decreasing_end_max_[i]; ref.time = EndMax(ref.task_index); } IncrementalSort(task_by_decreasing_end_max_.begin(), @@ -202,8 +202,8 @@ const std::vector return task_by_decreasing_end_max_; } -const std::vector - &SchedulingConstraintHelper::TaskByIncreasingShiftedStartMin() { +const std::vector& +SchedulingConstraintHelper::TaskByIncreasingShiftedStartMin() { const int64 new_timestamp = integer_trail_->timestamp(); if (new_timestamp > shifted_start_min_timestamp_) { shifted_start_min_timestamp_ = new_timestamp; @@ -211,7 +211,7 @@ const std::vector bool is_sorted = true; IntegerValue previous = kMinIntegerValue; for (int i = 0; i < num_tasks; ++i) { - TaskTime &ref = task_by_increasing_shifted_start_min_[i]; + TaskTime& ref = task_by_increasing_shifted_start_min_[i]; ref.time = ShiftedStartMin(ref.task_index); is_sorted = is_sorted && ref.time >= previous; previous = ref.time; @@ -358,7 +358,7 @@ bool SchedulingConstraintHelper::ReportConflict() { } void SchedulingConstraintHelper::WatchAllTasks(int id, - GenericLiteralWatcher *watcher, + GenericLiteralWatcher* watcher, bool watch_start_max, bool watch_end_max) const { const int num_tasks = start_vars_.size(); @@ -392,7 +392,7 @@ void SchedulingConstraintHelper::ImportOtherReasons() { } void SchedulingConstraintHelper::ImportOtherReasons( - const SchedulingConstraintHelper &other_helper) { + const SchedulingConstraintHelper& other_helper) { literal_reason_.insert(literal_reason_.end(), other_helper.literal_reason_.begin(), other_helper.literal_reason_.end()); diff --git a/ortools/sat/intervals.h b/ortools/sat/intervals.h index f48c529b23..3b52e1e2fd 100644 --- a/ortools/sat/intervals.h +++ b/ortools/sat/intervals.h @@ -44,7 +44,7 @@ const IntervalVariable kNoIntervalVariable(-1); // provides many helper functions to add precedences relation between intervals. class IntervalsRepository { public: - explicit IntervalsRepository(Model *model) + explicit IntervalsRepository(Model* model) : integer_trail_(model->GetOrCreate()), precedences_(model->GetOrCreate()) {} @@ -103,8 +103,8 @@ class IntervalsRepository { private: // External classes needed. - IntegerTrail *integer_trail_; - PrecedencesPropagator *precedences_; + IntegerTrail* integer_trail_; + PrecedencesPropagator* precedences_; // Literal indicating if the tasks is executed. Tasks that are always executed // will have a kNoLiteralIndex entry in this vector. @@ -138,19 +138,19 @@ class SchedulingConstraintHelper { public: // All the functions below refer to a task by its index t in the tasks // vector given at construction. - SchedulingConstraintHelper(const std::vector &tasks, - Model *model); + SchedulingConstraintHelper(const std::vector& tasks, + Model* model); // Temporary constructor. // The class will not be usable until ResetFromSubset() is called. // // TODO(user): Remove this. It is a hack because the disjunctive class needs // to fetch the maximum possible number of task at construction. - SchedulingConstraintHelper(int num_tasks, Model *model); + SchedulingConstraintHelper(int num_tasks, Model* model); // Resets the class to the same state as if it was constructed with // the given subset of tasks from other. - void ResetFromSubset(const SchedulingConstraintHelper &other, + void ResetFromSubset(const SchedulingConstraintHelper& other, absl::Span tasks); // Returns the number of task. @@ -210,11 +210,11 @@ class SchedulingConstraintHelper { // // TODO(user): we could merge the first loop of IncrementalSort() with the // loop that fill TaskTime.time at each call. - const std::vector &TaskByIncreasingStartMin(); - const std::vector &TaskByIncreasingEndMin(); - const std::vector &TaskByDecreasingStartMax(); - const std::vector &TaskByDecreasingEndMax(); - const std::vector &TaskByIncreasingShiftedStartMin(); + const std::vector& TaskByIncreasingStartMin(); + const std::vector& TaskByIncreasingEndMin(); + const std::vector& TaskByDecreasingStartMax(); + const std::vector& TaskByDecreasingEndMax(); + const std::vector& TaskByIncreasingShiftedStartMin(); // Functions to clear and then set the current reason. void ClearReason(); @@ -234,8 +234,8 @@ class SchedulingConstraintHelper { // It is also possible to directly manipulates the underlying reason vectors // that will be used when pushing something. - std::vector *MutableLiteralReason() { return &literal_reason_; } - std::vector *MutableIntegerReason() { + std::vector* MutableLiteralReason() { return &literal_reason_; } + std::vector* MutableIntegerReason() { return &integer_reason_; } @@ -258,9 +258,9 @@ class SchedulingConstraintHelper { IntegerLiteral lit); // Returns the underlying integer variables. - const std::vector &StartVars() const { return start_vars_; } - const std::vector &EndVars() const { return end_vars_; } - const std::vector &SizeVars() const { return size_vars_; } + const std::vector& StartVars() const { return start_vars_; } + const std::vector& EndVars() const { return end_vars_; } + const std::vector& SizeVars() const { return size_vars_; } IntegerVariable SizeVar(int index) const { return size_vars_[index]; } Literal PresenceLiteral(int index) const { DCHECK(IsOptional(index)); @@ -269,7 +269,7 @@ class SchedulingConstraintHelper { // Registers the given propagator id to be called if any of the tasks // in this class change. Note that we do not watch size max though. - void WatchAllTasks(int id, GenericLiteralWatcher *watcher, + void WatchAllTasks(int id, GenericLiteralWatcher* watcher, bool watch_start_max = true, bool watch_end_max = true) const; @@ -278,7 +278,7 @@ class SchedulingConstraintHelper { // For each interval appearing in a reason on this helper, another reason // will be added. This other reason specifies that on the other helper, the // corresponding interval overlaps 'event'. - void SetOtherHelper(SchedulingConstraintHelper *other_helper, + void SetOtherHelper(SchedulingConstraintHelper* other_helper, IntegerValue event) { CHECK(other_helper != nullptr); other_helper_ = other_helper; @@ -291,7 +291,7 @@ class SchedulingConstraintHelper { // This checks that other_helper_ is null. // // This is used in the 2D energetic reasoning in the diffn constraint. - void ImportOtherReasons(const SchedulingConstraintHelper &other_helper); + void ImportOtherReasons(const SchedulingConstraintHelper& other_helper); private: void InitSortedVectors(); @@ -308,9 +308,9 @@ class SchedulingConstraintHelper { // Import the reasons on the other helper into this helper. void ImportOtherReasons(); - Trail *trail_; - IntegerTrail *integer_trail_; - PrecedencesPropagator *precedences_; + Trail* trail_; + IntegerTrail* integer_trail_; + PrecedencesPropagator* precedences_; // The current direction of time, true for forward, false for backward. bool current_time_direction_ = true; @@ -344,7 +344,7 @@ class SchedulingConstraintHelper { std::vector integer_reason_; // Optional 'slave' helper used in the diffn constraint. - SchedulingConstraintHelper *other_helper_ = nullptr; + SchedulingConstraintHelper* other_helper_ = nullptr; IntegerValue event_for_other_helper_; std::vector already_added_to_other_reasons_; }; @@ -521,56 +521,55 @@ inline void SchedulingConstraintHelper::AddEnergyAfterReason( // Model based functions. // ============================================================================= -inline std::function StartVar( +inline std::function StartVar( IntervalVariable v) { - return [=](const Model &model) { + return [=](const Model& model) { return model.Get()->StartVar(v); }; } -inline std::function EndVar( - IntervalVariable v) { - return [=](const Model &model) { +inline std::function EndVar(IntervalVariable v) { + return [=](const Model& model) { return model.Get()->EndVar(v); }; } -inline std::function SizeVar( +inline std::function SizeVar( IntervalVariable v) { - return [=](const Model &model) { + return [=](const Model& model) { return model.Get()->SizeVar(v); }; } -inline std::function MinSize(IntervalVariable v) { - return [=](const Model &model) { +inline std::function MinSize(IntervalVariable v) { + return [=](const Model& model) { return model.Get()->MinSize(v).value(); }; } -inline std::function MaxSize(IntervalVariable v) { - return [=](const Model &model) { +inline std::function MaxSize(IntervalVariable v) { + return [=](const Model& model) { return model.Get()->MaxSize(v).value(); }; } -inline std::function IsOptional(IntervalVariable v) { - return [=](const Model &model) { +inline std::function IsOptional(IntervalVariable v) { + return [=](const Model& model) { return model.Get()->IsOptional(v); }; } -inline std::function IsPresentLiteral( +inline std::function IsPresentLiteral( IntervalVariable v) { - return [=](const Model &model) { + return [=](const Model& model) { return model.Get()->IsPresentLiteral(v); }; } -inline std::function NewInterval(int64 min_start, - int64 max_end, - int64 size) { - return [=](Model *model) { +inline std::function NewInterval(int64 min_start, + int64 max_end, + int64 size) { + return [=](Model* model) { return model->GetOrCreate()->CreateInterval( model->Add(NewIntegerVariable(min_start, max_end)), model->Add(NewIntegerVariable(min_start, max_end)), kNoIntegerVariable, @@ -578,17 +577,17 @@ inline std::function NewInterval(int64 min_start, }; } -inline std::function NewInterval( +inline std::function NewInterval( IntegerVariable start, IntegerVariable end, IntegerVariable size) { - return [=](Model *model) { + return [=](Model* model) { return model->GetOrCreate()->CreateInterval( start, end, size, IntegerValue(0), kNoLiteralIndex); }; } -inline std::function NewIntervalWithVariableSize( +inline std::function NewIntervalWithVariableSize( int64 min_start, int64 max_end, int64 min_size, int64 max_size) { - return [=](Model *model) { + return [=](Model* model) { return model->GetOrCreate()->CreateInterval( model->Add(NewIntegerVariable(min_start, max_end)), model->Add(NewIntegerVariable(min_start, max_end)), @@ -597,9 +596,9 @@ inline std::function NewIntervalWithVariableSize( }; } -inline std::function NewOptionalInterval( +inline std::function NewOptionalInterval( int64 min_start, int64 max_end, int64 size, Literal is_present) { - return [=](Model *model) { + return [=](Model* model) { return model->GetOrCreate()->CreateInterval( model->Add(NewIntegerVariable(min_start, max_end)), model->Add(NewIntegerVariable(min_start, max_end)), kNoIntegerVariable, @@ -607,16 +606,16 @@ inline std::function NewOptionalInterval( }; } -inline std::function +inline std::function NewOptionalIntervalWithOptionalVariables(int64 min_start, int64 max_end, int64 size, Literal is_present) { - return [=](Model *model) { + return [=](Model* model) { // Note that we need to mark the optionality first. const IntegerVariable start = model->Add(NewIntegerVariable(min_start, max_end)); const IntegerVariable end = model->Add(NewIntegerVariable(min_start, max_end)); - auto *integer_trail = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); integer_trail->MarkIntegerVariableAsOptional(start, is_present); integer_trail->MarkIntegerVariableAsOptional(end, is_present); return model->GetOrCreate()->CreateInterval( @@ -624,20 +623,20 @@ NewOptionalIntervalWithOptionalVariables(int64 min_start, int64 max_end, }; } -inline std::function NewOptionalInterval( +inline std::function NewOptionalInterval( IntegerVariable start, IntegerVariable end, IntegerVariable size, Literal is_present) { - return [=](Model *model) { + return [=](Model* model) { return model->GetOrCreate()->CreateInterval( start, end, size, IntegerValue(0), is_present.Index()); }; } -inline std::function +inline std::function NewOptionalIntervalWithVariableSize(int64 min_start, int64 max_end, int64 min_size, int64 max_size, Literal is_present) { - return [=](Model *model) { + return [=](Model* model) { return model->GetOrCreate()->CreateInterval( model->Add(NewIntegerVariable(min_start, max_end)), model->Add(NewIntegerVariable(min_start, max_end)), @@ -647,10 +646,10 @@ NewOptionalIntervalWithVariableSize(int64 min_start, int64 max_end, } // This requires that all the alternatives are optional tasks. -inline std::function IntervalWithAlternatives( - IntervalVariable master, const std::vector &members) { - return [=](Model *model) { - IntervalsRepository *intervals = model->GetOrCreate(); +inline std::function IntervalWithAlternatives( + IntervalVariable master, const std::vector& members) { + return [=](Model* model) { + IntervalsRepository* intervals = model->GetOrCreate(); std::vector presences; std::vector sizes; diff --git a/ortools/sat/linear_constraint.cc b/ortools/sat/linear_constraint.cc index ef0258ed31..108f79bc0e 100644 --- a/ortools/sat/linear_constraint.cc +++ b/ortools/sat/linear_constraint.cc @@ -80,8 +80,8 @@ ABSL_MUST_USE_RESULT bool LinearConstraintBuilder::AddLiteralTerm( } void CleanTermsAndFillConstraint( - std::vector > *terms, - LinearConstraint *constraint) { + std::vector>* terms, + LinearConstraint* constraint) { constraint->vars.clear(); constraint->coeffs.clear(); @@ -118,8 +118,8 @@ LinearConstraint LinearConstraintBuilder::Build() { return result; } -double ComputeActivity(const LinearConstraint &constraint, - const gtl::ITIVector &values) { +double ComputeActivity(const LinearConstraint& constraint, + const gtl::ITIVector& values) { double activity = 0; for (int i = 0; i < constraint.vars.size(); ++i) { const IntegerVariable var = constraint.vars[i]; @@ -129,7 +129,7 @@ double ComputeActivity(const LinearConstraint &constraint, return activity; } -double ComputeL2Norm(const LinearConstraint &constraint) { +double ComputeL2Norm(const LinearConstraint& constraint) { double sum = 0.0; for (const IntegerValue coeff : constraint.coeffs) { sum += ToDouble(coeff) * ToDouble(coeff); @@ -137,7 +137,7 @@ double ComputeL2Norm(const LinearConstraint &constraint) { return std::sqrt(sum); } -IntegerValue ComputeInfinityNorm(const LinearConstraint &constraint) { +IntegerValue ComputeInfinityNorm(const LinearConstraint& constraint) { IntegerValue result(0); for (const IntegerValue coeff : constraint.coeffs) { result = std::max(result, IntTypeAbs(coeff)); @@ -145,8 +145,8 @@ IntegerValue ComputeInfinityNorm(const LinearConstraint &constraint) { return result; } -double ScalarProduct(const LinearConstraint &constraint1, - const LinearConstraint &constraint2) { +double ScalarProduct(const LinearConstraint& constraint1, + const LinearConstraint& constraint2) { DCHECK(std::is_sorted(constraint1.vars.begin(), constraint1.vars.end())); DCHECK(std::is_sorted(constraint2.vars.begin(), constraint2.vars.end())); double scalar_product = 0.0; @@ -171,7 +171,7 @@ double ScalarProduct(const LinearConstraint &constraint1, namespace { // TODO(user): Template for any integer type and expose this? -IntegerValue ComputeGcd(const std::vector &values) { +IntegerValue ComputeGcd(const std::vector& values) { if (values.empty()) return IntegerValue(1); int64 gcd = 0; for (const IntegerValue value : values) { @@ -184,7 +184,7 @@ IntegerValue ComputeGcd(const std::vector &values) { } // namespace -void DivideByGCD(LinearConstraint *constraint) { +void DivideByGCD(LinearConstraint* constraint) { if (constraint->coeffs.empty()) return; const IntegerValue gcd = ComputeGcd(constraint->coeffs); if (gcd == 1) return; @@ -195,10 +195,10 @@ void DivideByGCD(LinearConstraint *constraint) { if (constraint->ub < kMaxIntegerValue) { constraint->ub = FloorRatio(constraint->ub, gcd); } - for (IntegerValue &coeff : constraint->coeffs) coeff /= gcd; + for (IntegerValue& coeff : constraint->coeffs) coeff /= gcd; } -void RemoveZeroTerms(LinearConstraint *constraint) { +void RemoveZeroTerms(LinearConstraint* constraint) { int new_size = 0; const int size = constraint->vars.size(); for (int i = 0; i < size; ++i) { @@ -211,7 +211,7 @@ void RemoveZeroTerms(LinearConstraint *constraint) { constraint->coeffs.resize(new_size); } -void MakeAllCoefficientsPositive(LinearConstraint *constraint) { +void MakeAllCoefficientsPositive(LinearConstraint* constraint) { const int size = constraint->vars.size(); for (int i = 0; i < size; ++i) { const IntegerValue coeff = constraint->coeffs[i]; @@ -222,7 +222,7 @@ void MakeAllCoefficientsPositive(LinearConstraint *constraint) { } } -void MakeAllVariablesPositive(LinearConstraint *constraint) { +void MakeAllVariablesPositive(LinearConstraint* constraint) { const int size = constraint->vars.size(); for (int i = 0; i < size; ++i) { const IntegerVariable var = constraint->vars[i]; @@ -239,8 +239,8 @@ void MakeAllVariablesPositive(LinearConstraint *constraint) { // TODO(user): This is really similar to CleanTermsAndFillConstraint(), maybe // we should just make the later switch negative variable to positive ones to // avoid an extra linear scan on each new cuts. -void CanonicalizeConstraint(LinearConstraint *ct) { - std::vector > terms; +void CanonicalizeConstraint(LinearConstraint* ct) { + std::vector> terms; const int size = ct->vars.size(); for (int i = 0; i < size; ++i) { @@ -254,13 +254,13 @@ void CanonicalizeConstraint(LinearConstraint *ct) { ct->vars.clear(); ct->coeffs.clear(); - for (const auto &term : terms) { + for (const auto& term : terms) { ct->vars.push_back(term.first); ct->coeffs.push_back(term.second); } } -bool NoDuplicateVariable(const LinearConstraint &ct) { +bool NoDuplicateVariable(const LinearConstraint& ct) { absl::flat_hash_set seen_variables; const int size = ct.vars.size(); for (int i = 0; i < size; ++i) { @@ -273,7 +273,7 @@ bool NoDuplicateVariable(const LinearConstraint &ct) { return true; } -LinearExpression CanonicalizeExpr(const LinearExpression &expr) { +LinearExpression CanonicalizeExpr(const LinearExpression& expr) { LinearExpression canonical_expr; canonical_expr.offset = expr.offset; for (int i = 0; i < expr.vars.size(); ++i) { @@ -288,8 +288,8 @@ LinearExpression CanonicalizeExpr(const LinearExpression &expr) { return canonical_expr; } -IntegerValue LinExprLowerBound(const LinearExpression &expr, - const IntegerTrail &integer_trail) { +IntegerValue LinExprLowerBound(const LinearExpression& expr, + const IntegerTrail& integer_trail) { IntegerValue lower_bound = expr.offset; for (int i = 0; i < expr.vars.size(); ++i) { DCHECK_GE(expr.coeffs[i], 0) << "The expression is not canonicalized"; @@ -298,8 +298,8 @@ IntegerValue LinExprLowerBound(const LinearExpression &expr, return lower_bound; } -IntegerValue LinExprUpperBound(const LinearExpression &expr, - const IntegerTrail &integer_trail) { +IntegerValue LinExprUpperBound(const LinearExpression& expr, + const IntegerTrail& integer_trail) { IntegerValue upper_bound = expr.offset; for (int i = 0; i < expr.vars.size(); ++i) { DCHECK_GE(expr.coeffs[i], 0) << "The expression is not canonicalized"; @@ -308,7 +308,7 @@ IntegerValue LinExprUpperBound(const LinearExpression &expr, return upper_bound; } -LinearExpression NegationOf(const LinearExpression &expr) { +LinearExpression NegationOf(const LinearExpression& expr) { LinearExpression result; result.vars = NegationOf(expr.vars); result.coeffs = expr.coeffs; @@ -316,7 +316,7 @@ LinearExpression NegationOf(const LinearExpression &expr) { return result; } -LinearExpression PositiveVarExpr(const LinearExpression &expr) { +LinearExpression PositiveVarExpr(const LinearExpression& expr) { LinearExpression result; result.offset = expr.offset; for (int i = 0; i < expr.vars.size(); ++i) { @@ -332,7 +332,7 @@ LinearExpression PositiveVarExpr(const LinearExpression &expr) { } IntegerValue GetCoefficient(const IntegerVariable var, - const LinearExpression &expr) { + const LinearExpression& expr) { for (int i = 0; i < expr.vars.size(); ++i) { if (expr.vars[i] == var) { return expr.coeffs[i]; @@ -344,7 +344,7 @@ IntegerValue GetCoefficient(const IntegerVariable var, } IntegerValue GetCoefficientOfPositiveVar(const IntegerVariable var, - const LinearExpression &expr) { + const LinearExpression& expr) { CHECK(VariableIsPositive(var)); for (int i = 0; i < expr.vars.size(); ++i) { if (expr.vars[i] == var) { diff --git a/ortools/sat/linear_constraint_manager.cc b/ortools/sat/linear_constraint_manager.cc index 1c8ed336e3..a0e3db7503 100644 --- a/ortools/sat/linear_constraint_manager.cc +++ b/ortools/sat/linear_constraint_manager.cc @@ -29,7 +29,7 @@ namespace { const LinearConstraintManager::ConstraintIndex kInvalidConstraintIndex(-1); -size_t ComputeHashOfTerms(const LinearConstraint &ct) { +size_t ComputeHashOfTerms(const LinearConstraint& ct) { DCHECK(std::is_sorted(ct.vars.begin(), ct.vars.end())); size_t hash = 0; const int num_terms = ct.vars.size(); @@ -60,7 +60,7 @@ LinearConstraintManager::~LinearConstraintManager() { << num_add_cut_calls_ << " calls) worker: '" << model_->Name() << "'"; LOG(INFO) << "Num simplifications: " << num_simplifications_; - for (const auto &entry : type_to_num_cuts_) { + for (const auto& entry : type_to_num_cuts_) { LOG(INFO) << "Added " << entry.second << " cuts of type '" << entry.first << "'."; } @@ -76,7 +76,7 @@ void LinearConstraintManager::RescaleActiveCounts(const double scaling_factor) { } bool LinearConstraintManager::MaybeRemoveSomeInactiveConstraints( - glop::BasisState *solution_state) { + glop::BasisState* solution_state) { if (solution_state->IsEmpty()) return false; // Mainly to simplify tests. const glop::RowIndex num_rows(lp_constraints_.size()); const glop::ColIndex num_cols = @@ -122,7 +122,7 @@ bool LinearConstraintManager::MaybeRemoveSomeInactiveConstraints( // to detect duplicate constraints and merge bounds. This is also relevant if // we regenerate identical cuts for some reason. LinearConstraintManager::ConstraintIndex LinearConstraintManager::Add( - LinearConstraint ct, bool *added) { + LinearConstraint ct, bool* added) { CHECK(!ct.vars.empty()); DCHECK(NoDuplicateVariable(ct)); SimplifyConstraint(&ct); @@ -184,7 +184,7 @@ void LinearConstraintManager::ComputeObjectiveParallelism( return; } - const LinearConstraint &lc = constraint_infos_[ct_index].constraint; + const LinearConstraint& lc = constraint_infos_[ct_index].constraint; double unscaled_objective_parallelism = 0.0; for (int i = 0; i < lc.vars.size(); ++i) { const IntegerVariable var = lc.vars[i]; @@ -205,7 +205,7 @@ void LinearConstraintManager::ComputeObjectiveParallelism( // Cuts are also handled slightly differently than normal constraints. bool LinearConstraintManager::AddCut( LinearConstraint ct, std::string type_name, - const gtl::ITIVector &lp_solution, + const gtl::ITIVector& lp_solution, std::string extra_info) { ++num_add_cut_calls_; if (ct.vars.empty()) return false; @@ -312,7 +312,7 @@ void LinearConstraintManager::SetObjectiveCoefficient(IntegerVariable var, dense_objective_coeffs_[var] = ToDouble(coeff); } -bool LinearConstraintManager::SimplifyConstraint(LinearConstraint *ct) { +bool LinearConstraintManager::SimplifyConstraint(LinearConstraint* ct) { bool term_changed = false; IntegerValue min_sum(0); @@ -438,8 +438,8 @@ bool LinearConstraintManager::SimplifyConstraint(LinearConstraint *ct) { } bool LinearConstraintManager::ChangeLp( - const gtl::ITIVector &lp_solution, - glop::BasisState *solution_state) { + const gtl::ITIVector& lp_solution, + glop::BasisState* solution_state) { VLOG(3) << "Enter ChangeLP, scan " << constraint_infos_.size() << " constraints"; const double saved_dtime = dtime_; @@ -688,9 +688,9 @@ void LinearConstraintManager::AddAllConstraintsToLp() { } bool LinearConstraintManager::DebugCheckConstraint( - const LinearConstraint &cut) { + const LinearConstraint& cut) { if (model_->Get() == nullptr) return true; - const auto &debug_solution = *(model_->Get()); + const auto& debug_solution = *(model_->Get()); if (debug_solution.empty()) return true; IntegerValue activity(0); @@ -708,8 +708,8 @@ bool LinearConstraintManager::DebugCheckConstraint( } void TopNCuts::AddCut( - LinearConstraint ct, const std::string &name, - const gtl::ITIVector &lp_solution) { + LinearConstraint ct, const std::string& name, + const gtl::ITIVector& lp_solution) { if (ct.vars.empty()) return; const double activity = ComputeActivity(ct, lp_solution); const double violation = @@ -719,9 +719,9 @@ void TopNCuts::AddCut( } void TopNCuts::TransferToManager( - const gtl::ITIVector &lp_solution, - LinearConstraintManager *manager) { - for (const CutCandidate &candidate : cuts_.UnorderedElements()) { + const gtl::ITIVector& lp_solution, + LinearConstraintManager* manager) { + for (const CutCandidate& candidate : cuts_.UnorderedElements()) { manager->AddCut(candidate.cut, candidate.name, lp_solution); } cuts_.Clear(); diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 86b952a1fc..d0aa40f5c4 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -80,7 +80,7 @@ bool ScatteredIntegerVector::Add(glop::ColIndex col, IntegerValue value) { bool ScatteredIntegerVector::AddLinearExpressionMultiple( IntegerValue multiplier, - const std::vector > &terms) { + const std::vector>& terms) { const double threshold = 0.1 * static_cast(dense_vector_.size()); if (is_sparse_ && static_cast(terms.size()) < threshold) { for (const std::pair term : terms) { @@ -107,8 +107,8 @@ bool ScatteredIntegerVector::AddLinearExpressionMultiple( } void ScatteredIntegerVector::ConvertToLinearConstraint( - const std::vector &integer_variables, - IntegerValue upper_bound, LinearConstraint *result) { + const std::vector& integer_variables, + IntegerValue upper_bound, LinearConstraint* result) { result->vars.clear(); result->coeffs.clear(); if (is_sparse_) { @@ -132,9 +132,9 @@ void ScatteredIntegerVector::ConvertToLinearConstraint( result->ub = upper_bound; } -std::vector > +std::vector> ScatteredIntegerVector::GetTerms() { - std::vector > result; + std::vector> result; if (is_sparse_) { std::sort(non_zeros_.begin(), non_zeros_.end()); for (const glop::ColIndex col : non_zeros_) { @@ -153,7 +153,7 @@ ScatteredIntegerVector::GetTerms() { // TODO(user): make SatParameters singleton too, otherwise changing them after // a constraint was added will have no effect on this class. -LinearProgrammingConstraint::LinearProgrammingConstraint(Model *model) +LinearProgrammingConstraint::LinearProgrammingConstraint(Model* model) : constraint_manager_(model), sat_parameters_(*(model->GetOrCreate())), model_(model), @@ -186,7 +186,7 @@ LinearProgrammingConstraint::~LinearProgrammingConstraint() { } void LinearProgrammingConstraint::AddLinearConstraint( - const LinearConstraint &ct) { + const LinearConstraint& ct) { DCHECK(!lp_constraint_is_registered_); constraint_manager_.Add(ct); @@ -254,12 +254,12 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() { // Fill integer_lp_. integer_lp_.clear(); infinity_norms_.clear(); - const auto &all_constraints = constraint_manager_.AllConstraints(); + const auto& all_constraints = constraint_manager_.AllConstraints(); for (const auto index : constraint_manager_.LpConstraints()) { - const LinearConstraint &ct = all_constraints[index].constraint; + const LinearConstraint& ct = all_constraints[index].constraint; integer_lp_.push_back(LinearConstraintInternal()); - LinearConstraintInternal &new_ct = integer_lp_.back(); + LinearConstraintInternal& new_ct = integer_lp_.back(); new_ct.lb = ct.lb; new_ct.ub = ct.ub; const int size = ct.vars.size(); @@ -299,10 +299,10 @@ bool LinearProgrammingConstraint::CreateLpFromConstraintManager() { for (const auto entry : integer_objective_) { lp_data_.SetObjectiveCoefficient(entry.first, ToDouble(entry.second)); } - for (const LinearConstraintInternal &ct : integer_lp_) { + for (const LinearConstraintInternal& ct : integer_lp_) { const ConstraintIndex row = lp_data_.CreateNewConstraint(); lp_data_.SetConstraintBounds(row, ToDouble(ct.lb), ToDouble(ct.ub)); - for (const auto &term : ct.terms) { + for (const auto& term : ct.terms) { lp_data_.SetCoefficient(row, term.first, ToDouble(term.second)); } } @@ -362,8 +362,8 @@ LPSolveInfo LinearProgrammingConstraint::SolveLpForBranching() { } void LinearProgrammingConstraint::FillReducedCostReasonIn( - const glop::DenseRow &reduced_costs, - std::vector *integer_reason) { + const glop::DenseRow& reduced_costs, + std::vector* integer_reason) { integer_reason->clear(); const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { @@ -481,7 +481,7 @@ bool LinearProgrammingConstraint::BranchOnVar(IntegerVariable positive_var) { return true; } -void LinearProgrammingConstraint::RegisterWith(Model *model) { +void LinearProgrammingConstraint::RegisterWith(Model* model) { DCHECK(!lp_constraint_is_registered_); lp_constraint_is_registered_ = true; model->GetOrCreate()->push_back(this); @@ -499,7 +499,7 @@ void LinearProgrammingConstraint::RegisterWith(Model *model) { return; } - GenericLiteralWatcher *watcher = model->GetOrCreate(); + GenericLiteralWatcher* watcher = model->GetOrCreate(); const int watcher_id = watcher->Register(this); const int num_vars = integer_variables_.size(); for (int i = 0; i < num_vars; i++) { @@ -548,7 +548,7 @@ void LinearProgrammingConstraint::AddCutGenerator(CutGenerator generator) { } bool LinearProgrammingConstraint::IncrementalPropagate( - const std::vector &watch_indices) { + const std::vector& watch_indices) { if (!lp_solution_is_set_) return Propagate(); // At level zero, if there is still a chance to add cuts or lazy constraints, @@ -644,9 +644,8 @@ bool LinearProgrammingConstraint::SolveLp() { } bool LinearProgrammingConstraint::AddCutFromConstraints( - const std::string &name, - const std::vector > - &integer_multipliers) { + const std::string& name, + const std::vector>& integer_multipliers) { // This is initialized to a valid linear contraint (by taking linear // combination of the LP rows) and will be transformed into a cut if // possible. @@ -723,7 +722,7 @@ bool LinearProgrammingConstraint::AddCutFromConstraints( for (const IntegerVariable var : cut_.vars) { if (var >= first_new_var) { CHECK(VariableIsPositive(var)); - const auto &info = + const auto& info = ib_slack_infos[(var.value() - first_new_var.value()) / 2]; tmp_lp_values_.push_back(info.lp_value); tmp_var_lbs_.push_back(info.lb); @@ -795,10 +794,10 @@ bool LinearProgrammingConstraint::AddCutFromConstraints( } bool LinearProgrammingConstraint::PostprocessAndAddCut( - const std::string &name, const std::string &info, + const std::string& name, const std::string& info, IntegerVariable first_new_var, IntegerVariable first_slack, - const std::vector &ib_slack_infos, - LinearConstraint *cut) { + const std::vector& ib_slack_infos, + LinearConstraint* cut) { // Compute the activity. Warning: the cut no longer have the same size so we // cannot use tmp_lp_values_. Note that the substitution below shouldn't // change the activity by definition. @@ -843,8 +842,8 @@ bool LinearProgrammingConstraint::PostprocessAndAddCut( const int index = (var.value() - first_new_var.value()) / 2; CHECK_LT(index, ib_slack_infos.size()); - std::vector > terms; - for (const std::pair &term : + std::vector> terms; + for (const std::pair& term : ib_slack_infos[index].terms) { terms.push_back( {gtl::FindOrDie(mirror_lp_variable_, @@ -925,7 +924,7 @@ void LinearProgrammingConstraint::AddCGCuts() { if (time_limit_->LimitReached()) break; - const glop::ScatteredRow &lambda = simplex_.GetUnitRowLeftInverse(row); + const glop::ScatteredRow& lambda = simplex_.GetUnitRowLeftInverse(row); glop::DenseColumn lp_multipliers(num_rows, 0.0); double magnitude = 0.0; int num_non_zeros = 0; @@ -963,10 +962,9 @@ void LinearProgrammingConstraint::AddCGCuts() { // TODO(user): We use a lower value here otherwise we might run into // overflow while computing the cut. This should be fixable. - const std::vector > - integer_multipliers = - ScaleLpMultiplier(/*take_objective_into_account=*/false, - lp_multipliers, &scaling, /*max_pow=*/52); + const std::vector> integer_multipliers = + ScaleLpMultiplier(/*take_objective_into_account=*/false, + lp_multipliers, &scaling, /*max_pow=*/52); AddCutFromConstraints("CG", integer_multipliers); } } @@ -975,9 +973,9 @@ void LinearProgrammingConstraint::AddCGCuts() { namespace { // For each element of a, adds a random one in b and append the pair to output. -void RandomPick(const std::vector &a, const std::vector &b, - ModelRandomGenerator *random, - std::vector > *output) { +void RandomPick(const std::vector& a, const std::vector& b, + ModelRandomGenerator* random, + std::vector>* output) { if (a.empty() || b.empty()) return; for (const RowIndex row : a) { const RowIndex other = b[absl::Uniform(*random, 0, b.size())]; @@ -988,8 +986,8 @@ void RandomPick(const std::vector &a, const std::vector &b, } template -IntegerValue GetCoeff(ColIndex col, const ListOfTerms &terms) { - for (const auto &term : terms) { +IntegerValue GetCoeff(ColIndex col, const ListOfTerms& terms) { + for (const auto& term : terms) { if (term.first == col) return term.second; } return IntegerValue(0); @@ -1021,7 +1019,7 @@ void LinearProgrammingConstraint::AddMirCuts() { // We compute all the rows that are tight, these will be used as the base row // for the MIR_n procedure below. const RowIndex num_rows = lp_data_.num_constraints(); - std::vector > base_rows; + std::vector> base_rows; gtl::ITIVector row_weights(num_rows.value(), 0.0); for (RowIndex row(0); row < num_rows; ++row) { const auto status = simplex_.GetConstraintStatus(row); @@ -1057,8 +1055,8 @@ void LinearProgrammingConstraint::AddMirCuts() { std::vector weights; gtl::ITIVector used_rows; - std::vector > integer_multipliers; - for (const std::pair &entry : base_rows) { + std::vector> integer_multipliers; + for (const std::pair& entry : base_rows) { if (time_limit_->LimitReached()) break; // First try to generate a cut directly from this base row (MIR1). @@ -1199,7 +1197,7 @@ void LinearProgrammingConstraint::AddMirCuts() { // // TODO(user): do that in the possible_rows selection? only problem is // that we do not have the integer coefficient there... - for (std::pair &entry : integer_multipliers) { + for (std::pair& entry : integer_multipliers) { max_magnitude = std::max(max_magnitude, entry.second); } if (CapAdd(CapProd(max_magnitude.value(), std::abs(mult1.value())), @@ -1208,7 +1206,7 @@ void LinearProgrammingConstraint::AddMirCuts() { break; } - for (std::pair &entry : integer_multipliers) { + for (std::pair& entry : integer_multipliers) { entry.second *= mult1; } integer_multipliers.push_back({row_to_combine, mult2}); @@ -1263,7 +1261,7 @@ void LinearProgrammingConstraint::AddZeroHalfCuts() { zero_half_cut_helper_.AddOneConstraint( row, integer_lp_[row].terms, integer_lp_[row].lb, integer_lp_[row].ub); } - for (const std::vector > &multipliers : + for (const std::vector>& multipliers : zero_half_cut_helper_.InterestingCandidates(random_)) { if (time_limit_->LimitReached()) break; @@ -1376,7 +1374,7 @@ bool LinearProgrammingConstraint::Propagate() { if (!cut_generators_.empty() && (trail_->CurrentDecisionLevel() == 0 || !sat_parameters_.only_add_cuts_at_level_zero())) { - for (const CutGenerator &generator : cut_generators_) { + for (const CutGenerator& generator : cut_generators_) { generator.generate_cuts(expanded_lp_solution_, &constraint_manager_); } } @@ -1518,7 +1516,7 @@ bool LinearProgrammingConstraint::Propagate() { // Strong branching on top max_num_branches variable. const int max_num_branches = 3; const int num_vars = integer_variables_.size(); - std::vector > branching_vars; + std::vector> branching_vars; for (int i = 0; i < num_vars; ++i) { const IntegerVariable var = integer_variables_[i]; const IntegerVariable positive_var = PositiveVariable(var); @@ -1551,7 +1549,7 @@ bool LinearProgrammingConstraint::Propagate() { } } - for (const std::pair &branching_var : + for (const std::pair& branching_var : branching_vars) { const IntegerVariable positive_var = branching_var.second; VLOG(2) << "Branching on: " << positive_var; @@ -1573,7 +1571,7 @@ bool LinearProgrammingConstraint::Propagate() { // // TODO(user): Because of PreventOverflow(), this should actually never happen. IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound( - const LinearConstraint &terms) const { + const LinearConstraint& terms) const { IntegerValue lower_bound(0); const int size = terms.vars.size(); for (int i = 0; i < size; ++i) { @@ -1588,7 +1586,7 @@ IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound( } bool LinearProgrammingConstraint::PossibleOverflow( - const LinearConstraint &constraint) { + const LinearConstraint& constraint) { IntegerValue lower_bound(0); const int size = constraint.vars.size(); for (int i = 0; i < size; ++i) { @@ -1619,7 +1617,7 @@ absl::int128 FloorRatio128(absl::int128 x, IntegerValue positive_div) { } // namespace -void LinearProgrammingConstraint::PreventOverflow(LinearConstraint *constraint, +void LinearProgrammingConstraint::PreventOverflow(LinearConstraint* constraint, int max_pow) { // Compute the min/max possible partial sum. // @@ -1684,7 +1682,7 @@ void LinearProgrammingConstraint::PreventOverflow(LinearConstraint *constraint, // TODO(user): combine this with RelaxLinearReason() to avoid the extra // magnitude vector and the weird precondition of RelaxLinearReason(). void LinearProgrammingConstraint::SetImpliedLowerBoundReason( - const LinearConstraint &terms, IntegerValue slack) { + const LinearConstraint& terms, IntegerValue slack) { integer_reason_.clear(); std::vector magnitudes; const int size = terms.vars.size(); @@ -1708,13 +1706,13 @@ void LinearProgrammingConstraint::SetImpliedLowerBoundReason( } // TODO(user): Provide a sparse interface. -std::vector > +std::vector> LinearProgrammingConstraint::ScaleLpMultiplier( bool take_objective_into_account, - const glop::DenseColumn &dense_lp_multipliers, Fractional *scaling, + const glop::DenseColumn& dense_lp_multipliers, Fractional* scaling, int max_pow) const { double max_sum = 0.0; - std::vector > cp_multipliers; + std::vector> cp_multipliers; for (RowIndex row(0); row < dense_lp_multipliers.size(); ++row) { const Fractional lp_multi = dense_lp_multipliers[row]; @@ -1747,7 +1745,7 @@ LinearProgrammingConstraint::ScaleLpMultiplier( } *scaling = 1.0; - std::vector > integer_multipliers; + std::vector> integer_multipliers; if (max_sum == 0.0) { // Empty linear combinaison. return integer_multipliers; @@ -1774,8 +1772,8 @@ LinearProgrammingConstraint::ScaleLpMultiplier( } bool LinearProgrammingConstraint::ComputeNewLinearConstraint( - const std::vector > &integer_multipliers, - ScatteredIntegerVector *scattered_vector, IntegerValue *upper_bound) const { + const std::vector>& integer_multipliers, + ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const { // Initialize the new constraint. *upper_bound = 0; scattered_vector->ClearAndResize(integer_variables_.size()); @@ -1804,10 +1802,10 @@ bool LinearProgrammingConstraint::ComputeNewLinearConstraint( // TODO(user): no need to update the multipliers. void LinearProgrammingConstraint::AdjustNewLinearConstraint( - std::vector > *integer_multipliers, - ScatteredIntegerVector *scattered_vector, IntegerValue *upper_bound) const { + std::vector>* integer_multipliers, + ScatteredIntegerVector* scattered_vector, IntegerValue* upper_bound) const { const IntegerValue kMaxWantedCoeff(1e18); - for (std::pair &term : *integer_multipliers) { + for (std::pair& term : *integer_multipliers) { const RowIndex row = term.first; const IntegerValue multiplier = term.second; if (multiplier == 0) continue; @@ -1978,7 +1976,7 @@ bool LinearProgrammingConstraint::ExactLpReasonning() { } Fractional scaling; - std::vector > integer_multipliers = + std::vector> integer_multipliers = ScaleLpMultiplier(/*take_objective_into_account=*/true, lp_multipliers, &scaling); @@ -2013,7 +2011,7 @@ bool LinearProgrammingConstraint::ExactLpReasonning() { DCHECK(!PossibleOverflow(new_constraint)); DCHECK(constraint_manager_.DebugCheckConstraint(new_constraint)); - IntegerSumLE *cp_constraint = + IntegerSumLE* cp_constraint = new IntegerSumLE({}, new_constraint.vars, new_constraint.coeffs, new_constraint.ub, model_); if (trail_->CurrentDecisionLevel() == 0) { @@ -2028,7 +2026,7 @@ bool LinearProgrammingConstraint::ExactLpReasonning() { bool LinearProgrammingConstraint::FillExactDualRayReason() { Fractional scaling; - std::vector > integer_multipliers = + std::vector> integer_multipliers = ScaleLpMultiplier(/*take_objective_into_account=*/false, simplex_.GetDualRay(), &scaling); @@ -2129,14 +2127,14 @@ namespace { // of flow conservation on these problems, the outgoing flow is always the same // as the incoming flow, so adding this extra cut doesn't seem relevant. void AddOutgoingCut(int num_nodes, int subset_size, - const std::vector &in_subset, - const std::vector &tails, - const std::vector &heads, - const std::vector &literals, - const std::vector &literal_lp_values, + const std::vector& in_subset, + const std::vector& tails, + const std::vector& heads, + const std::vector& literals, + const std::vector& literal_lp_values, int64 rhs_lower_bound, - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager, Model *model) { + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager, Model* model) { // A node is said to be optional if it can be excluded from the subcircuit, // in which case there is a self-loop on that node. // If there are optional nodes, use extended formula: @@ -2226,11 +2224,11 @@ void AddOutgoingCut(int num_nodes, int subset_size, // Note that this is mainly a "symmetric" case algo, but it does still work for // the asymmetric case. void SeparateSubtourInequalities( - int num_nodes, const std::vector &tails, const std::vector &heads, - const std::vector &literals, - const gtl::ITIVector &lp_values, + int num_nodes, const std::vector& tails, const std::vector& heads, + const std::vector& literals, + const gtl::ITIVector& lp_values, absl::Span demands, int64 capacity, - LinearConstraintManager *manager, Model *model) { + LinearConstraintManager* manager, Model* model) { if (num_nodes <= 2) return; // We will collect only the arcs with a positive lp_values to speed up some @@ -2244,8 +2242,8 @@ void SeparateSubtourInequalities( // Sort the arcs by non-increasing lp_values. std::vector literal_lp_values(literals.size()); - std::vector > arc_by_decreasing_lp_values; - auto *encoder = model->GetOrCreate(); + std::vector> arc_by_decreasing_lp_values; + auto* encoder = model->GetOrCreate(); for (int i = 0; i < literals.size(); ++i) { double lp_value; const IntegerVariable direct_view = encoder->GetLiteralView(literals[i]); @@ -2263,7 +2261,7 @@ void SeparateSubtourInequalities( } std::sort(arc_by_decreasing_lp_values.begin(), arc_by_decreasing_lp_values.end(), - std::greater >()); + std::greater>()); // We will do a union-find by adding one by one the arc of the lp solution // in the order above. Every intermediate set during this construction will @@ -2318,9 +2316,9 @@ void SeparateSubtourInequalities( // the pre_order vector, it is why we initialize it once and for all. int new_size = 0; std::vector pre_order(num_nodes); - std::vector > subsets; + std::vector> subsets; { - std::vector > graph(parent.size()); + std::vector> graph(parent.size()); for (int i = 0; i < parent.size(); ++i) { if (parent[i] != i) graph[parent[i]].push_back(i); } @@ -2446,8 +2444,8 @@ namespace { // Returns for each literal its integer view, or the view of its negation. std::vector GetAssociatedVariables( - const std::vector &literals, Model *model) { - auto *encoder = model->GetOrCreate(); + const std::vector& literals, Model* model) { + auto* encoder = model->GetOrCreate(); std::vector result; for (const Literal l : literals) { const IntegerVariable direct_view = encoder->GetLiteralView(l); @@ -2467,33 +2465,33 @@ std::vector GetAssociatedVariables( // rest of the graph in the LP solution, and add cuts to force some arcs to // enter and leave this component from outside. CutGenerator CreateStronglyConnectedGraphCutGenerator( - int num_nodes, const std::vector &tails, const std::vector &heads, - const std::vector &literals, Model *model) { + int num_nodes, const std::vector& tails, const std::vector& heads, + const std::vector& literals, Model* model) { CutGenerator result; result.vars = GetAssociatedVariables(literals, model); result.generate_cuts = [num_nodes, tails, heads, literals, model]( - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { - SeparateSubtourInequalities(num_nodes, tails, heads, literals, - lp_values, /*demands=*/{}, - /*capacity=*/0, manager, model); + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { + SeparateSubtourInequalities( + num_nodes, tails, heads, literals, lp_values, + /*demands=*/{}, /*capacity=*/0, manager, model); }; return result; } CutGenerator CreateCVRPCutGenerator(int num_nodes, - const std::vector &tails, - const std::vector &heads, - const std::vector &literals, - const std::vector &demands, - int64 capacity, Model *model) { + const std::vector& tails, + const std::vector& heads, + const std::vector& literals, + const std::vector& demands, + int64 capacity, Model* model) { CutGenerator result; result.vars = GetAssociatedVariables(literals, model); result.generate_cuts = [num_nodes, tails, heads, demands, capacity, literals, model]( - const gtl::ITIVector &lp_values, - LinearConstraintManager *manager) { + const gtl::ITIVector& lp_values, + LinearConstraintManager* manager) { SeparateSubtourInequalities(num_nodes, tails, heads, literals, lp_values, demands, capacity, manager, model); @@ -2502,7 +2500,7 @@ CutGenerator CreateCVRPCutGenerator(int num_nodes, } std::function -LinearProgrammingConstraint::HeuristicLpMostInfeasibleBinary(Model *model) { +LinearProgrammingConstraint::HeuristicLpMostInfeasibleBinary(Model* model) { // Gather all 0-1 variables that appear in this LP. std::vector variables; for (IntegerVariable var : integer_variables_) { @@ -2548,7 +2546,7 @@ LinearProgrammingConstraint::HeuristicLpMostInfeasibleBinary(Model *model) { } std::function -LinearProgrammingConstraint::HeuristicLpReducedCostBinary(Model *model) { +LinearProgrammingConstraint::HeuristicLpReducedCostBinary(Model* model) { // Gather all 0-1 variables that appear in this LP. std::vector variables; for (IntegerVariable var : integer_variables_) { diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index 7a210da7db..98dd095e9a 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -74,7 +74,7 @@ class ScatteredIntegerVector { // Returns false in case of overflow. bool AddLinearExpressionMultiple( IntegerValue multiplier, - const std::vector > &terms); + const std::vector>& terms); // This is not const only because non_zeros is sorted. Note that sorting the // non-zeros make the result deterministic whether or not we were in sparse @@ -83,11 +83,11 @@ class ScatteredIntegerVector { // TODO(user): Ideally we should convert to IntegerVariable as late as // possible. Prefer to use GetTerms(). void ConvertToLinearConstraint( - const std::vector &integer_variables, - IntegerValue upper_bound, LinearConstraint *result); + const std::vector& integer_variables, + IntegerValue upper_bound, LinearConstraint* result); // Similar to ConvertToLinearConstraint(). - std::vector > GetTerms(); + std::vector> GetTerms(); // We only provide the const []. IntegerValue operator[](glop::ColIndex col) const { @@ -129,11 +129,11 @@ class LinearProgrammingConstraint : public PropagatorInterface, public: typedef glop::RowIndex ConstraintIndex; - explicit LinearProgrammingConstraint(Model *model); + explicit LinearProgrammingConstraint(Model* model); ~LinearProgrammingConstraint() override; // Add a new linear constraint to this LP. - void AddLinearConstraint(const LinearConstraint &ct); + void AddLinearConstraint(const LinearConstraint& ct); // Set the coefficient of the variable in the objective. Calling it twice will // overwrite the previous value. @@ -159,14 +159,14 @@ class LinearProgrammingConstraint : public PropagatorInterface, // PropagatorInterface API. bool Propagate() override; - bool IncrementalPropagate(const std::vector &watch_indices) override; - void RegisterWith(Model *model); + bool IncrementalPropagate(const std::vector& watch_indices) override; + void RegisterWith(Model* model); // ReversibleInterface API. void SetLevel(int level) override; int NumVariables() const { return integer_variables_.size(); } - const std::vector &integer_variables() const { + const std::vector& integer_variables() const { return integer_variables_; } std::string DimensionString() const { return lp_data_.GetDimensionString(); } @@ -181,7 +181,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // TODO(user): This fixes to 1, but for some problems fixing to 0 // or to the std::round(support value) might work better. When this is the // case, change behaviour automatically? - std::function HeuristicLpMostInfeasibleBinary(Model *model); + std::function HeuristicLpMostInfeasibleBinary(Model* model); // Returns a IntegerLiteral guided by the underlying LP constraints. // @@ -200,7 +200,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // does BFS. This might depend on the model, more trials are necessary. We // could also do exponential smoothing instead of decaying every N calls, i.e. // pseudo = a * pseudo + (1-a) reduced. - std::function HeuristicLpReducedCostBinary(Model *model); + std::function HeuristicLpReducedCostBinary(Model* model); // Returns a IntegerLiteral guided by the underlying LP constraints. // @@ -224,8 +224,8 @@ class LinearProgrammingConstraint : public PropagatorInterface, // Generates a set of IntegerLiterals explaining why the best solution can not // be improved using reduced costs. This is used to generate explanations for // both infeasibility and bounds deductions. - void FillReducedCostReasonIn(const glop::DenseRow &reduced_costs, - std::vector *integer_reason); + void FillReducedCostReasonIn(const glop::DenseRow& reduced_costs, + std::vector* integer_reason); // Reinitialize the LP from a potentially new set of constraints. // This fills all data structure and properly rescale the underlying LP. @@ -243,16 +243,16 @@ class LinearProgrammingConstraint : public PropagatorInterface, // // Return true if a new cut was added to the cut manager. bool AddCutFromConstraints( - const std::string &name, - const std::vector > - &integer_multipliers); + const std::string& name, + const std::vector>& + integer_multipliers); // Second half of AddCutFromConstraints(). bool PostprocessAndAddCut( - const std::string &name, const std::string &info, + const std::string& name, const std::string& info, IntegerVariable first_new_var, IntegerVariable first_slack, - const std::vector &ib_slack_infos, - LinearConstraint *cut); + const std::vector& ib_slack_infos, + LinearConstraint* cut); // Computes and adds the corresponding type of cuts. // This can currently only be called at the root node. @@ -282,9 +282,9 @@ class LinearProgrammingConstraint : public PropagatorInterface, // // Note that this will loose some precision, but our subsequent computation // will still be exact as it will work for any set of multiplier. - std::vector > ScaleLpMultiplier( + std::vector> ScaleLpMultiplier( bool take_objective_into_account, - const glop::DenseColumn &dense_lp_multipliers, glop::Fractional *scaling, + const glop::DenseColumn& dense_lp_multipliers, glop::Fractional* scaling, int max_pow = 62) const; // Computes from an integer linear combination of the integer rows of the LP a @@ -293,37 +293,35 @@ class LinearProgrammingConstraint : public PropagatorInterface, // // Returns false if we encountered any integer overflow. bool ComputeNewLinearConstraint( - const std::vector > - &integer_multipliers, - ScatteredIntegerVector *scattered_vector, - IntegerValue *upper_bound) const; + const std::vector>& + integer_multipliers, + ScatteredIntegerVector* scattered_vector, + IntegerValue* upper_bound) const; // Simple heuristic to try to minimize |upper_bound - ImpliedLB(terms)|. This // should make the new constraint tighter and correct a bit the imprecision // introduced by rounding the floating points values. void AdjustNewLinearConstraint( - std::vector > - *integer_multipliers, - ScatteredIntegerVector *scattered_vector, - IntegerValue *upper_bound) const; + std::vector>* integer_multipliers, + ScatteredIntegerVector* scattered_vector, + IntegerValue* upper_bound) const; // Shortcut for an integer linear expression type. - using LinearExpression = - std::vector >; + using LinearExpression = std::vector>; // Converts a dense represenation of a linear constraint to a sparse one // expressed in terms of IntegerVariable. void ConvertToLinearConstraint( - const gtl::ITIVector &dense_vector, - IntegerValue upper_bound, LinearConstraint *result); + const gtl::ITIVector& dense_vector, + IntegerValue upper_bound, LinearConstraint* result); // Compute the implied lower bound of the given linear expression using the // current variable bound. Return kMinIntegerValue in case of overflow. - IntegerValue GetImpliedLowerBound(const LinearConstraint &terms) const; + IntegerValue GetImpliedLowerBound(const LinearConstraint& terms) const; // Tests for possible overflow in the propagation of the given linear // constraint. - bool PossibleOverflow(const LinearConstraint &constraint); + bool PossibleOverflow(const LinearConstraint& constraint); // Reduce the coefficient of the constraint so that we cannot have overflow // in the propagation of the given linear constraint. Note that we may loose @@ -331,11 +329,11 @@ class LinearProgrammingConstraint : public PropagatorInterface, // // We make sure that any partial sum involving any variable value in their // domain do not exceed 2 ^ max_pow. - void PreventOverflow(LinearConstraint *constraint, int max_pow = 62); + void PreventOverflow(LinearConstraint* constraint, int max_pow = 62); // Fills integer_reason_ with the reason for the implied lower bound of the // given linear expression. We relax the reason if we have some slack. - void SetImpliedLowerBoundReason(const LinearConstraint &terms, + void SetImpliedLowerBoundReason(const LinearConstraint& terms, IntegerValue slack); // Fills the deductions vector with reduced cost deductions that can be made @@ -427,20 +425,20 @@ class LinearProgrammingConstraint : public PropagatorInterface, IntegerVariable objective_cp_; // Singletons from Model. - const SatParameters &sat_parameters_; - Model *model_; - TimeLimit *time_limit_; - IntegerTrail *integer_trail_; - Trail *trail_; - IntegerEncoder *integer_encoder_; - ModelRandomGenerator *random_; + const SatParameters& sat_parameters_; + Model* model_; + TimeLimit* time_limit_; + IntegerTrail* integer_trail_; + Trail* trail_; + IntegerEncoder* integer_encoder_; + ModelRandomGenerator* random_; // Used while deriving cuts. ImpliedBoundsProcessor implied_bounds_processor_; // The dispatcher for all LP propagators of the model, allows to find which // LinearProgrammingConstraint has a given IntegerVariable. - LinearProgrammingDispatcher *dispatcher_; + LinearProgrammingDispatcher* dispatcher_; std::vector integer_reason_; std::vector deductions_; @@ -452,7 +450,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // both improve the objective lower bound but also perform reduced cost // fixing. int rev_optimal_constraints_size_ = 0; - std::vector > optimal_constraints_; + std::vector> optimal_constraints_; // Last OPTIMAL solution found by a call to the underlying LP solver. // On IncrementalPropagate(), if the bound updates do not invalidate this @@ -474,7 +472,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, bool lp_at_level_zero_is_final_ = false; // Same as lp_solution_ but this vector is indexed differently. - LinearProgrammingConstraintLpSolution &expanded_lp_solution_; + LinearProgrammingConstraintLpSolution& expanded_lp_solution_; // Linear constraints cannot be created or modified after this is registered. bool lp_constraint_is_registered_ = false; @@ -494,7 +492,7 @@ class LinearProgrammingConstraint : public PropagatorInterface, // to fixed variables and can be ignored. int rev_rc_start_ = 0; RevRepository rc_rev_int_repository_; - std::vector > positions_by_decreasing_rc_score_; + std::vector> positions_by_decreasing_rc_score_; // Defined as average number of nonbasic variables with zero reduced costs. IncrementalAverage average_degeneracy_; @@ -516,14 +514,14 @@ class LinearProgrammingConstraint : public PropagatorInterface, // Important: only positive variable do appear here. class LinearProgrammingDispatcher : public absl::flat_hash_map { + LinearProgrammingConstraint*> { public: - explicit LinearProgrammingDispatcher(Model *model) {} + explicit LinearProgrammingDispatcher(Model* model) {} }; // A class that stores the collection of all LP constraints in a model. class LinearProgrammingConstraintCollection - : public std::vector { + : public std::vector { public: LinearProgrammingConstraintCollection() {} }; @@ -536,19 +534,19 @@ class LinearProgrammingConstraintCollection // connected. Note that we already assume basic constraint to be in the lp, so // we do not add any cuts for components of size 1. CutGenerator CreateStronglyConnectedGraphCutGenerator( - int num_nodes, const std::vector &tails, const std::vector &heads, - const std::vector &literals, Model *model); + int num_nodes, const std::vector& tails, const std::vector& heads, + const std::vector& literals, Model* model); // Almost the same as CreateStronglyConnectedGraphCutGenerator() but for each // components, computes the demand needed to serves it, and depending on whether // it contains the depot (node zero) or not, compute the minimum number of // vehicle that needs to cross the component border. CutGenerator CreateCVRPCutGenerator(int num_nodes, - const std::vector &tails, - const std::vector &heads, - const std::vector &literals, - const std::vector &demands, - int64 capacity, Model *model); + const std::vector& tails, + const std::vector& heads, + const std::vector& literals, + const std::vector& demands, + int64 capacity, Model* model); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/linear_relaxation.cc b/ortools/sat/linear_relaxation.cc index db2ef7d2cf..ee42c6fc6b 100644 --- a/ortools/sat/linear_relaxation.cc +++ b/ortools/sat/linear_relaxation.cc @@ -30,13 +30,13 @@ namespace operations_research { namespace sat { -bool AppendFullEncodingRelaxation(IntegerVariable var, const Model &model, - LinearRelaxation *relaxation) { - const auto *encoder = model.Get(); +bool AppendFullEncodingRelaxation(IntegerVariable var, const Model& model, + LinearRelaxation* relaxation) { + const auto* encoder = model.Get(); if (encoder == nullptr) return false; if (!encoder->VariableIsFullyEncoded(var)) return false; - const auto &encoding = encoder->FullDomainEncoding(var); + const auto& encoding = encoder->FullDomainEncoding(var); const IntegerValue var_min = model.Get()->LowerBound(var); LinearConstraintBuilder at_least_one(&model, IntegerValue(1), @@ -69,9 +69,9 @@ namespace { // TODO(user): Not super efficient. std::pair GetMinAndMaxNotEncoded( IntegerVariable var, - const absl::flat_hash_set &encoded_values, - const Model &model) { - const auto *domains = model.Get(); + const absl::flat_hash_set& encoded_values, + const Model& model) { + const auto* domains = model.Get(); if (domains == nullptr || var >= domains->size()) { return {kMaxIntegerValue, kMinIntegerValue}; } @@ -90,7 +90,7 @@ std::pair GetMinAndMaxNotEncoded( } IntegerValue max = kMinIntegerValue; - const auto &domain = (*domains)[var]; + const auto& domain = (*domains)[var]; for (int i = domain.NumIntervals() - 1; i >= 0; --i) { const ClosedInterval interval = domain[i]; for (IntegerValue v(interval.end); v >= interval.start; --v) { @@ -107,13 +107,13 @@ std::pair GetMinAndMaxNotEncoded( } // namespace -void AppendPartialEncodingRelaxation(IntegerVariable var, const Model &model, - LinearRelaxation *relaxation) { - const auto *encoder = model.Get(); - const auto *integer_trail = model.Get(); +void AppendPartialEncodingRelaxation(IntegerVariable var, const Model& model, + LinearRelaxation* relaxation) { + const auto* encoder = model.Get(); + const auto* integer_trail = model.Get(); if (encoder == nullptr || integer_trail == nullptr) return; - const std::vector &encoding = + const std::vector& encoding = encoder->PartialDomainEncoding(var); if (encoding.empty()) return; @@ -183,13 +183,13 @@ void AppendPartialEncodingRelaxation(IntegerVariable var, const Model &model, } void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var, - const Model &model, - LinearRelaxation *relaxation) { - const auto *integer_trail = model.Get(); - const auto *encoder = model.Get(); + const Model& model, + LinearRelaxation* relaxation) { + const auto* integer_trail = model.Get(); + const auto* encoder = model.Get(); if (integer_trail == nullptr || encoder == nullptr) return; - const std::map &greater_than_encoding = + const std::map& greater_than_encoding = encoder->PartialGreaterThanEncoding(var); if (greater_than_encoding.empty()) return; @@ -244,9 +244,9 @@ namespace { // Adds enforcing_lit => target <= bounding_var to relaxation. void AppendEnforcedUpperBound(const Literal enforcing_lit, const IntegerVariable target, - const IntegerVariable bounding_var, Model *model, - LinearRelaxation *relaxation) { - IntegerTrail *integer_trail = model->GetOrCreate(); + const IntegerVariable bounding_var, Model* model, + LinearRelaxation* relaxation) { + IntegerTrail* integer_trail = model->GetOrCreate(); const IntegerValue max_target_value = integer_trail->UpperBound(target); const IntegerValue min_var_value = integer_trail->LowerBound(bounding_var); const IntegerValue max_term_value = max_target_value - min_var_value; @@ -260,13 +260,13 @@ void AppendEnforcedUpperBound(const Literal enforcing_lit, // Adds {enforcing_lits} => rhs_domain_min <= expr <= rhs_domain_max. // Requires expr offset to be 0. void AppendEnforcedLinearExpression( - const std::vector &enforcing_literals, - const LinearExpression &expr, const IntegerValue rhs_domain_min, - const IntegerValue rhs_domain_max, const Model &model, - LinearRelaxation *relaxation) { + const std::vector& enforcing_literals, + const LinearExpression& expr, const IntegerValue rhs_domain_min, + const IntegerValue rhs_domain_max, const Model& model, + LinearRelaxation* relaxation) { CHECK_EQ(expr.offset, IntegerValue(0)); const LinearExpression canonical_expr = CanonicalizeExpr(expr); - const IntegerTrail *integer_trail = model.Get(); + const IntegerTrail* integer_trail = model.Get(); const IntegerValue min_expr_value = LinExprLowerBound(canonical_expr, *integer_trail); @@ -275,7 +275,7 @@ void AppendEnforcedLinearExpression( // <=> Sum_i (~ei * (rhs_domain_min - min_expr_value)) + terms >= // rhs_domain_min LinearConstraintBuilder lc(&model, rhs_domain_min, kMaxIntegerValue); - for (const Literal &literal : enforcing_literals) { + for (const Literal& literal : enforcing_literals) { CHECK(lc.AddLiteralTerm(literal.Negated(), rhs_domain_min - min_expr_value)); } @@ -291,7 +291,7 @@ void AppendEnforcedLinearExpression( // <=> Sum_i (~ei * (rhs_domain_max - max_expr_value)) + terms <= // rhs_domain_max LinearConstraintBuilder lc(&model, kMinIntegerValue, rhs_domain_max); - for (const Literal &literal : enforcing_literals) { + for (const Literal& literal : enforcing_literals) { CHECK(lc.AddLiteralTerm(literal.Negated(), rhs_domain_max - max_expr_value)); } @@ -312,13 +312,13 @@ void AppendEnforcedLinearExpression( // // TODO(user): In full generality, we could encode all the constraint as an LP. // TODO(user,user): Add unit tests for this method. -void TryToLinearizeConstraint(const CpModelProto &model_proto, - const ConstraintProto &ct, Model *model, +void TryToLinearizeConstraint(const CpModelProto& model_proto, + const ConstraintProto& ct, Model* model, int linearization_level, - LinearRelaxation *relaxation) { + LinearRelaxation* relaxation) { CHECK_EQ(model->GetOrCreate()->CurrentDecisionLevel(), 0); DCHECK_GT(linearization_level, 0); - auto *mapping = model->GetOrCreate(); + auto* mapping = model->GetOrCreate(); if (ct.constraint_case() == ConstraintProto::ConstraintCase::kBoolOr) { if (linearization_level < 2) return; LinearConstraintBuilder lc(model, IntegerValue(1), kMaxIntegerValue); @@ -394,8 +394,8 @@ void TryToLinearizeConstraint(const CpModelProto &model_proto, // Each node must have exactly one incoming and one outgoing arc (note that // it can be the unique self-arc of this node too). - std::map > incoming_arc_constraints; - std::map > outgoing_arc_constraints; + std::map> incoming_arc_constraints; + std::map> outgoing_arc_constraints; for (int i = 0; i < num_arcs; i++) { const Literal arc = mapping->Literal(ct.circuit().literals(i)); const int tail = ct.circuit().tails(i); @@ -406,10 +406,10 @@ void TryToLinearizeConstraint(const CpModelProto &model_proto, outgoing_arc_constraints[tail].push_back(arc); incoming_arc_constraints[head].push_back(arc); } - for (const auto *node_map : + for (const auto* node_map : {&outgoing_arc_constraints, &incoming_arc_constraints}) { - for (const auto &entry : *node_map) { - const std::vector &exactly_one = entry.second; + for (const auto& entry : *node_map) { + const std::vector& exactly_one = entry.second; if (exactly_one.size() > 1) { LinearConstraintBuilder at_least_one_lc(model, IntegerValue(1), kMaxIntegerValue); @@ -434,7 +434,7 @@ void TryToLinearizeConstraint(const CpModelProto &model_proto, // target = sum (index == i) * fixed_vars[i]. LinearConstraintBuilder constraint(model, IntegerValue(0), IntegerValue(0)); constraint.AddTerm(target, IntegerValue(-1)); - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); for (const auto literal_value : model->Add(FullyEncodeVariable((index)))) { const IntegerVariable var = vars[literal_value.value.value()]; if (!model->Get(IsFixed(var))) return; @@ -452,7 +452,7 @@ void TryToLinearizeConstraint(const CpModelProto &model_proto, const IntegerVariable start = mapping->Integer(ct.interval().start()); const IntegerVariable size = mapping->Integer(ct.interval().size()); const IntegerVariable end = mapping->Integer(ct.interval().end()); - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); const bool size_is_fixed = integer_trail->IsFixed(size); const IntegerValue rhs = size_is_fixed ? -integer_trail->LowerBound(size) : IntegerValue(0); @@ -484,14 +484,14 @@ void TryToLinearizeConstraint(const CpModelProto &model_proto, } } -void AddCumulativeCut(const std::vector &intervals, - const std::vector &demands, - IntegerValue capacity_lower_bound, Model *model, - LinearRelaxation *relaxation) { +void AddCumulativeCut(const std::vector& intervals, + const std::vector& demands, + IntegerValue capacity_lower_bound, Model* model, + LinearRelaxation* relaxation) { SchedulingConstraintHelper helper(intervals, model); const int num_intervals = helper.NumTasks(); - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); IntegerValue min_of_starts = kMaxIntegerValue; IntegerValue max_of_ends = kMinIntegerValue; @@ -573,15 +573,15 @@ void AddCumulativeCut(const std::vector &intervals, relaxation->linear_constraints.push_back(lc.Build()); } -void AppendCumulativeRelaxation(const CpModelProto &model_proto, - const ConstraintProto &ct, - int linearization_level, Model *model, - LinearRelaxation *relaxation) { +void AppendCumulativeRelaxation(const CpModelProto& model_proto, + const ConstraintProto& ct, + int linearization_level, Model* model, + LinearRelaxation* relaxation) { CHECK(ct.has_cumulative()); if (linearization_level < 2) return; if (HasEnforcementLiteral(ct)) return; - auto *mapping = model->GetOrCreate(); + auto* mapping = model->GetOrCreate(); const std::vector demands = mapping->Integers(ct.cumulative().demands()); std::vector intervals = @@ -592,15 +592,15 @@ void AppendCumulativeRelaxation(const CpModelProto &model_proto, AddCumulativeCut(intervals, demands, capacity_lower_bound, model, relaxation); } -void AppendNoOverlapRelaxation(const CpModelProto &model_proto, - const ConstraintProto &ct, - int linearization_level, Model *model, - LinearRelaxation *relaxation) { +void AppendNoOverlapRelaxation(const CpModelProto& model_proto, + const ConstraintProto& ct, + int linearization_level, Model* model, + LinearRelaxation* relaxation) { CHECK(ct.has_no_overlap()); if (linearization_level < 2) return; if (HasEnforcementLiteral(ct)) return; - auto *mapping = model->GetOrCreate(); + auto* mapping = model->GetOrCreate(); std::vector intervals = mapping->Intervals(ct.no_overlap().intervals()); AddCumulativeCut(intervals, /*demands=*/{}, @@ -608,9 +608,9 @@ void AppendNoOverlapRelaxation(const CpModelProto &model_proto, } void AppendMaxRelaxation(IntegerVariable target, - const std::vector &vars, - int linearization_level, Model *model, - LinearRelaxation *relaxation) { + const std::vector& vars, + int linearization_level, Model* model, + LinearRelaxation* relaxation) { // Case X = max(X_1, X_2, ..., X_N) // Part 1: Encode X >= max(X_1, X_2, ..., X_N) for (const IntegerVariable var : vars) { @@ -625,9 +625,9 @@ void AppendMaxRelaxation(IntegerVariable target, // Part 2: Encode upper bound on X. if (linearization_level < 2) return; - GenericLiteralWatcher *watcher = model->GetOrCreate(); + GenericLiteralWatcher* watcher = model->GetOrCreate(); // For size = 2, we do this with 1 less variable. - IntegerEncoder *encoder = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); if (vars.size() == 2) { IntegerVariable y = model->Add(NewIntegerVariable(0, 1)); const Literal y_lit = @@ -636,14 +636,14 @@ void AppendMaxRelaxation(IntegerVariable target, // TODO(user,user): It makes more sense to use ConditionalLowerOrEqual() // here, but that degrades perf on the road*.fzn problem. Understand why. - IntegerSumLE *upper_bound1 = new IntegerSumLE( + IntegerSumLE* upper_bound1 = new IntegerSumLE( {y_lit}, {target, vars[0]}, {IntegerValue(1), IntegerValue(-1)}, IntegerValue(0), model); upper_bound1->RegisterWith(watcher); model->TakeOwnership(upper_bound1); AppendEnforcedUpperBound(y_lit.Negated(), target, vars[1], model, relaxation); - IntegerSumLE *upper_bound2 = new IntegerSumLE( + IntegerSumLE* upper_bound2 = new IntegerSumLE( {y_lit.Negated()}, {target, vars[1]}, {IntegerValue(1), IntegerValue(-1)}, IntegerValue(0), model); upper_bound2->RegisterWith(watcher); @@ -668,7 +668,7 @@ void AppendMaxRelaxation(IntegerVariable target, encoder->GetOrCreateLiteralAssociatedToEquality(y, IntegerValue(1)); AppendEnforcedUpperBound(y_lit, target, var, model, relaxation); - IntegerSumLE *upper_bound_constraint = new IntegerSumLE( + IntegerSumLE* upper_bound_constraint = new IntegerSumLE( {y_lit}, {target, var}, {IntegerValue(1), IntegerValue(-1)}, IntegerValue(0), model); upper_bound_constraint->RegisterWith(watcher); @@ -682,11 +682,11 @@ void AppendMaxRelaxation(IntegerVariable target, } std::vector AppendLinMaxRelaxation( - IntegerVariable target, const std::vector &exprs, - Model *model, LinearRelaxation *relaxation) { + IntegerVariable target, const std::vector& exprs, + Model* model, LinearRelaxation* relaxation) { // We want to linearize X = max(exprs[1], exprs[2], ..., exprs[d]). // Part 1: Encode X >= max(exprs[1], exprs[2], ..., exprs[d]) - for (const LinearExpression &expr : exprs) { + for (const LinearExpression& expr : exprs) { LinearConstraintBuilder lc(model, kMinIntegerValue, -expr.offset); for (int i = 0; i < expr.vars.size(); ++i) { lc.AddTerm(expr.vars[i], expr.coeffs[i]); @@ -700,8 +700,8 @@ std::vector AppendLinMaxRelaxation( // Add linking constraint to the CP solver // sum zi = 1 and for all i, zi => max = expr_i. const int num_exprs = exprs.size(); - IntegerEncoder *encoder = model->GetOrCreate(); - GenericLiteralWatcher *watcher = model->GetOrCreate(); + IntegerEncoder* encoder = model->GetOrCreate(); + GenericLiteralWatcher* watcher = model->GetOrCreate(); // TODO(user): For the case where num_exprs = 2, Create only one z var. std::vector z_vars; @@ -722,7 +722,7 @@ std::vector AppendLinMaxRelaxation( local_expr.vars.push_back(target); local_expr.coeffs = exprs[i].coeffs; local_expr.coeffs.push_back(IntegerValue(1)); - IntegerSumLE *upper_bound = new IntegerSumLE( + IntegerSumLE* upper_bound = new IntegerSumLE( {z_lit}, local_expr.vars, local_expr.coeffs, exprs[i].offset, model); upper_bound->RegisterWith(watcher); model->TakeOwnership(upper_bound); @@ -745,10 +745,10 @@ std::vector AppendLinMaxRelaxation( return VariableIsPositive(var); })); - std::vector > sum_of_max_corner_diff( + std::vector> sum_of_max_corner_diff( num_exprs, std::vector(num_exprs, IntegerValue(0))); - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); for (int i = 0; i < num_exprs; ++i) { for (int j = 0; j < num_exprs; ++j) { if (i == j) continue; @@ -778,11 +778,11 @@ std::vector AppendLinMaxRelaxation( return z_vars; } -void AppendLinearConstraintRelaxation(const ConstraintProto &constraint_proto, +void AppendLinearConstraintRelaxation(const ConstraintProto& constraint_proto, const int linearization_level, - const Model &model, - LinearRelaxation *relaxation) { - auto *mapping = model.Get(); + const Model& model, + LinearRelaxation* relaxation) { + auto* mapping = model.Get(); // Note that we ignore the holes in the domain. // diff --git a/ortools/sat/lp_utils.cc b/ortools/sat/lp_utils.cc index a62d1853ec..75bb10dab8 100644 --- a/ortools/sat/lp_utils.cc +++ b/ortools/sat/lp_utils.cc @@ -49,8 +49,8 @@ using operations_research::MPVariableProto; namespace { -void ScaleConstraint(const std::vector &var_scaling, - MPConstraintProto *mp_constraint) { +void ScaleConstraint(const std::vector& var_scaling, + MPConstraintProto* mp_constraint) { const int num_terms = mp_constraint->coefficient_size(); for (int i = 0; i < num_terms; ++i) { const int var_index = mp_constraint->var_index(i); @@ -60,11 +60,11 @@ void ScaleConstraint(const std::vector &var_scaling, } void ApplyVarScaling(const std::vector var_scaling, - MPModelProto *mp_model) { + MPModelProto* mp_model) { const int num_variables = mp_model->variable_size(); for (int i = 0; i < num_variables; ++i) { const double scaling = var_scaling[i]; - const MPVariableProto &mp_var = mp_model->variable(i); + const MPVariableProto& mp_var = mp_model->variable(i); const double old_lb = mp_var.lower_bound(); const double old_ub = mp_var.upper_bound(); const double old_obj = mp_var.objective_coefficient(); @@ -72,10 +72,10 @@ void ApplyVarScaling(const std::vector var_scaling, mp_model->mutable_variable(i)->set_upper_bound(old_ub * scaling); mp_model->mutable_variable(i)->set_objective_coefficient(old_obj / scaling); } - for (MPConstraintProto &mp_constraint : *mp_model->mutable_constraint()) { + for (MPConstraintProto& mp_constraint : *mp_model->mutable_constraint()) { ScaleConstraint(var_scaling, &mp_constraint); } - for (MPGeneralConstraintProto &general_constraint : + for (MPGeneralConstraintProto& general_constraint : *mp_model->mutable_general_constraint()) { switch (general_constraint.general_constraint_case()) { case MPGeneralConstraintProto::kIndicatorConstraint: @@ -98,7 +98,7 @@ void ApplyVarScaling(const std::vector var_scaling, } // namespace std::vector ScaleContinuousVariables(double scaling, double max_bound, - MPModelProto *mp_model) { + MPModelProto* mp_model) { const int num_variables = mp_model->variable_size(); std::vector var_scaling(num_variables, 1.0); for (int i = 0; i < num_variables; ++i) { @@ -141,11 +141,11 @@ namespace { // satisfy the given constraint. Return 0.0 if we didn't find such factor. // // Precondition: var must be the only non-integer in the given constraint. -double GetIntegralityMultiplier(const MPModelProto &mp_model, - const std::vector &var_scaling, int var, +double GetIntegralityMultiplier(const MPModelProto& mp_model, + const std::vector& var_scaling, int var, int ct_index, double tolerance) { DCHECK(!mp_model.variable(var).is_integer()); - const MPConstraintProto &ct = mp_model.constraint(ct_index); + const MPConstraintProto& ct = mp_model.constraint(ct_index); double multiplier = 1.0; double var_coeff = 0.0; const double max_multiplier = 1e4; @@ -180,7 +180,7 @@ double GetIntegralityMultiplier(const MPModelProto &mp_model, } // namespace std::vector DetectImpliedIntegers(bool log_info, - MPModelProto *mp_model) { + MPModelProto* mp_model) { const int num_variables = mp_model->variable_size(); std::vector var_scaling(num_variables, 1.0); @@ -196,9 +196,9 @@ std::vector DetectImpliedIntegers(bool log_info, const int num_constraints = mp_model->constraint_size(); std::vector constraint_to_num_non_integer(num_constraints, 0); - std::vector > var_to_constraints(num_variables); + std::vector> var_to_constraints(num_variables); for (int i = 0; i < num_constraints; ++i) { - const MPConstraintProto &mp_constraint = mp_model->constraint(i); + const MPConstraintProto& mp_constraint = mp_model->constraint(i); for (const int var : mp_constraint.var_index()) { if (!mp_model->variable(var).is_integer()) { @@ -252,7 +252,7 @@ std::vector DetectImpliedIntegers(bool log_info, if (constraint_to_num_non_integer[top_ct_index] == 0) continue; // Ignore non-equality here. - const MPConstraintProto &ct = mp_model->constraint(top_ct_index); + const MPConstraintProto& ct = mp_model->constraint(top_ct_index); if (ct.lower_bound() + tolerance < ct.upper_bound()) continue; ++num_processed_constraints; @@ -313,7 +313,7 @@ std::vector DetectImpliedIntegers(bool log_info, if (constraint_to_num_non_integer[ct_index] != 1) continue; // Ignore non-equality here. - const MPConstraintProto &ct = mp_model->constraint(top_ct_index); + const MPConstraintProto& ct = mp_model->constraint(top_ct_index); if (ct.lower_bound() + tolerance < ct.upper_bound()) continue; const double multiplier = GetIntegralityMultiplier( @@ -430,9 +430,9 @@ namespace { // We use a class to reuse the temporay memory. struct ConstraintScaler { // Scales an individual constraint. - ConstraintProto *AddConstraint(const MPModelProto &mp_model, - const MPConstraintProto &mp_constraint, - CpModelProto *cp_model); + ConstraintProto* AddConstraint(const MPModelProto& mp_model, + const MPConstraintProto& mp_constraint, + CpModelProto* cp_model); double max_relative_coeff_error = 0.0; double max_relative_rhs_error = 0.0; @@ -448,7 +448,7 @@ struct ConstraintScaler { namespace { -double FindFractionalScaling(const std::vector &coefficients, +double FindFractionalScaling(const std::vector& coefficients, double tolerance) { double multiplier = 1.0; for (const double coeff : coefficients) { @@ -461,17 +461,17 @@ double FindFractionalScaling(const std::vector &coefficients, } // namespace -ConstraintProto *ConstraintScaler::AddConstraint( - const MPModelProto &mp_model, const MPConstraintProto &mp_constraint, - CpModelProto *cp_model) { +ConstraintProto* ConstraintScaler::AddConstraint( + const MPModelProto& mp_model, const MPConstraintProto& mp_constraint, + CpModelProto* cp_model) { if (mp_constraint.lower_bound() == -kInfinity && mp_constraint.upper_bound() == kInfinity) { return nullptr; } - auto *constraint = cp_model->add_constraints(); + auto* constraint = cp_model->add_constraints(); constraint->set_name(mp_constraint.name()); - auto *arg = constraint->mutable_linear(); + auto* arg = constraint->mutable_linear(); // First scale the coefficients of the constraints so that the constraint // sum can always be computed without integer overflow. @@ -481,7 +481,7 @@ ConstraintProto *ConstraintScaler::AddConstraint( upper_bounds.clear(); const int num_coeffs = mp_constraint.coefficient_size(); for (int i = 0; i < num_coeffs; ++i) { - const auto &var_proto = cp_model->variables(mp_constraint.var_index(i)); + const auto& var_proto = cp_model->variables(mp_constraint.var_index(i)); const int64 lb = var_proto.domain(0); const int64 ub = var_proto.domain(var_proto.domain_size() - 1); if (lb == 0 && ub == 0) continue; @@ -587,9 +587,9 @@ ConstraintProto *ConstraintScaler::AddConstraint( } // namespace -bool ConvertMPModelProtoToCpModelProto(const SatParameters ¶ms, - const MPModelProto &mp_model, - CpModelProto *cp_model) { +bool ConvertMPModelProtoToCpModelProto(const SatParameters& params, + const MPModelProto& mp_model, + CpModelProto* cp_model) { CHECK(cp_model != nullptr); cp_model->Clear(); cp_model->set_name(mp_model.name()); @@ -616,8 +616,8 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters ¶ms, // Add the variables. const int num_variables = mp_model.variable_size(); for (int i = 0; i < num_variables; ++i) { - const MPVariableProto &mp_var = mp_model.variable(i); - IntegerVariableProto *cp_var = cp_model->add_variables(); + const MPVariableProto& mp_var = mp_model.variable(i); + IntegerVariableProto* cp_var = cp_model->add_variables(); cp_var->set_name(mp_var.name()); // Deal with the corner case of a domain far away from zero. @@ -683,18 +683,18 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters ¶ms, scaler.scaling_target = kScalingTarget; // Add the constraints. We scale each of them individually. - for (const MPConstraintProto &mp_constraint : mp_model.constraint()) { + for (const MPConstraintProto& mp_constraint : mp_model.constraint()) { scaler.AddConstraint(mp_model, mp_constraint, cp_model); } - for (const MPGeneralConstraintProto &general_constraint : + for (const MPGeneralConstraintProto& general_constraint : mp_model.general_constraint()) { switch (general_constraint.general_constraint_case()) { case MPGeneralConstraintProto::kIndicatorConstraint: { - const auto &indicator_constraint = + const auto& indicator_constraint = general_constraint.indicator_constraint(); - const MPConstraintProto &mp_constraint = + const MPConstraintProto& mp_constraint = indicator_constraint.constraint(); - ConstraintProto *ct = + ConstraintProto* ct = scaler.AddConstraint(mp_model, mp_constraint, cp_model); if (ct == nullptr) continue; @@ -705,16 +705,16 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters ¶ms, break; } case MPGeneralConstraintProto::kAndConstraint: { - const auto &and_constraint = general_constraint.and_constraint(); - const std::string &name = general_constraint.name(); + const auto& and_constraint = general_constraint.and_constraint(); + const std::string& name = general_constraint.name(); - ConstraintProto *ct_pos = cp_model->add_constraints(); + ConstraintProto* ct_pos = cp_model->add_constraints(); ct_pos->set_name(name.empty() ? "" : absl::StrCat(name, "_pos")); ct_pos->add_enforcement_literal(and_constraint.resultant_var_index()); *ct_pos->mutable_bool_and()->mutable_literals() = and_constraint.var_index(); - ConstraintProto *ct_neg = cp_model->add_constraints(); + ConstraintProto* ct_neg = cp_model->add_constraints(); ct_neg->set_name(name.empty() ? "" : absl::StrCat(name, "_neg")); ct_neg->add_enforcement_literal( NegatedRef(and_constraint.resultant_var_index())); @@ -724,16 +724,16 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters ¶ms, break; } case MPGeneralConstraintProto::kOrConstraint: { - const auto &or_constraint = general_constraint.or_constraint(); - const std::string &name = general_constraint.name(); + const auto& or_constraint = general_constraint.or_constraint(); + const std::string& name = general_constraint.name(); - ConstraintProto *ct_pos = cp_model->add_constraints(); + ConstraintProto* ct_pos = cp_model->add_constraints(); ct_pos->set_name(name.empty() ? "" : absl::StrCat(name, "_pos")); ct_pos->add_enforcement_literal(or_constraint.resultant_var_index()); *ct_pos->mutable_bool_or()->mutable_literals() = or_constraint.var_index(); - ConstraintProto *ct_neg = cp_model->add_constraints(); + ConstraintProto* ct_neg = cp_model->add_constraints(); ct_neg->set_name(name.empty() ? "" : absl::StrCat(name, "_neg")); ct_neg->add_enforcement_literal( NegatedRef(or_constraint.resultant_var_index())); @@ -772,10 +772,10 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters ¶ms, double max_magnitude = 0.0; double l1_norm = 0.0; for (int i = 0; i < num_variables; ++i) { - const MPVariableProto &mp_var = mp_model.variable(i); + const MPVariableProto& mp_var = mp_model.variable(i); if (mp_var.objective_coefficient() == 0.0) continue; - const auto &var_proto = cp_model->variables(i); + const auto& var_proto = cp_model->variables(i); const int64 lb = var_proto.domain(0); const int64 ub = var_proto.domain(var_proto.domain_size() - 1); if (lb == 0 && ub == 0) continue; @@ -840,7 +840,7 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters ¶ms, // Note that here we set the scaling factor for the inverse operation of // getting the "true" objective value from the scaled one. Hence the // inverse. - auto *objective = cp_model->mutable_objective(); + auto* objective = cp_model->mutable_objective(); const int mult = mp_model.maximize() ? -1 : 1; objective->set_offset(mp_model.objective_offset() * scaling_factor / gcd * mult); @@ -859,8 +859,8 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters ¶ms, return true; } -bool ConvertBinaryMPModelProtoToBooleanProblem(const MPModelProto &mp_model, - LinearBooleanProblem *problem) { +bool ConvertBinaryMPModelProtoToBooleanProblem(const MPModelProto& mp_model, + LinearBooleanProblem* problem) { CHECK(problem != nullptr); problem->Clear(); problem->set_name(mp_model.name()); @@ -870,7 +870,7 @@ bool ConvertBinaryMPModelProtoToBooleanProblem(const MPModelProto &mp_model, // Test if the variables are binary variables. // Add constraints for the fixed variables. for (int var_id(0); var_id < num_variables; ++var_id) { - const MPVariableProto &mp_var = mp_model.variable(var_id); + const MPVariableProto& mp_var = mp_model.variable(var_id); problem->add_var_names(mp_var.name()); // This will be changed to false as soon as we detect the variable to be @@ -888,14 +888,14 @@ bool ConvertBinaryMPModelProtoToBooleanProblem(const MPModelProto &mp_model, // Binary variable. Ok. } else if (lb <= 1.0 && ub >= 1.0) { // Fixed variable at 1. - LinearBooleanConstraint *constraint = problem->add_constraints(); + LinearBooleanConstraint* constraint = problem->add_constraints(); constraint->set_lower_bound(1); constraint->set_upper_bound(1); constraint->add_literals(var_id + 1); constraint->add_coefficients(1); } else if (lb <= 0.0 && ub >= 0.0) { // Fixed variable at 0. - LinearBooleanConstraint *constraint = problem->add_constraints(); + LinearBooleanConstraint* constraint = problem->add_constraints(); constraint->set_lower_bound(0); constraint->set_upper_bound(0); constraint->add_literals(var_id + 1); @@ -925,8 +925,8 @@ bool ConvertBinaryMPModelProtoToBooleanProblem(const MPModelProto &mp_model, std::vector coefficients; // Add all constraints. - for (const MPConstraintProto &mp_constraint : mp_model.constraint()) { - LinearBooleanConstraint *constraint = problem->add_constraints(); + for (const MPConstraintProto& mp_constraint : mp_model.constraint()) { + LinearBooleanConstraint* constraint = problem->add_constraints(); constraint->set_name(mp_constraint.name()); // First scale the coefficients of the constraints. @@ -992,7 +992,7 @@ bool ConvertBinaryMPModelProtoToBooleanProblem(const MPModelProto &mp_model, // Add the objective. coefficients.clear(); for (int var_id = 0; var_id < num_variables; ++var_id) { - const MPVariableProto &mp_var = mp_model.variable(var_id); + const MPVariableProto& mp_var = mp_model.variable(var_id); coefficients.push_back(mp_var.objective_coefficient()); } GetBestScalingOfDoublesToInt64(coefficients, kInt64Max, &scaling_factor, @@ -1004,14 +1004,14 @@ bool ConvertBinaryMPModelProtoToBooleanProblem(const MPModelProto &mp_model, LOG(INFO) << "objective relative error: " << relative_error; LOG(INFO) << "objective scaling factor: " << scaling_factor / gcd; - LinearObjective *objective = problem->mutable_objective(); + LinearObjective* objective = problem->mutable_objective(); objective->set_offset(mp_model.objective_offset() * scaling_factor / gcd); // Note that here we set the scaling factor for the inverse operation of // getting the "true" objective value from the scaled one. Hence the inverse. objective->set_scaling_factor(1.0 / scaling_factor * gcd); for (int var_id = 0; var_id < num_variables; ++var_id) { - const MPVariableProto &mp_var = mp_model.variable(var_id); + const MPVariableProto& mp_var = mp_model.variable(var_id); const int64 value = static_cast(round( mp_var.objective_coefficient() * scaling_factor)) / gcd; @@ -1034,8 +1034,8 @@ bool ConvertBinaryMPModelProtoToBooleanProblem(const MPModelProto &mp_model, return true; } -void ConvertBooleanProblemToLinearProgram(const LinearBooleanProblem &problem, - glop::LinearProgram *lp) { +void ConvertBooleanProblemToLinearProgram(const LinearBooleanProblem& problem, + glop::LinearProgram* lp) { lp->Clear(); for (int i = 0; i < problem.num_variables(); ++i) { const ColIndex col = lp->CreateNewVariable(); @@ -1051,7 +1051,7 @@ void ConvertBooleanProblemToLinearProgram(const LinearBooleanProblem &problem, } } - for (const LinearBooleanConstraint &constraint : problem.constraints()) { + for (const LinearBooleanConstraint& constraint : problem.constraints()) { const RowIndex constraint_index = lp->CreateNewConstraint(); lp->SetConstraintName(constraint_index, constraint.name()); double sum = 0.0; @@ -1077,7 +1077,7 @@ void ConvertBooleanProblemToLinearProgram(const LinearBooleanProblem &problem, // Objective. { double sum = 0.0; - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); const double scaling_factor = objective.scaling_factor(); for (int i = 0; i < objective.literals_size(); ++i) { const int literal = objective.literals(i); @@ -1098,9 +1098,9 @@ void ConvertBooleanProblemToLinearProgram(const LinearBooleanProblem &problem, lp->CleanUp(); } -int FixVariablesFromSat(const SatSolver &solver, glop::LinearProgram *lp) { +int FixVariablesFromSat(const SatSolver& solver, glop::LinearProgram* lp) { int num_fixed_variables = 0; - const Trail &trail = solver.LiteralTrail(); + const Trail& trail = solver.LiteralTrail(); for (int i = 0; i < trail.Index(); ++i) { const BooleanVariable var = trail[i].Variable(); const int value = trail[i].IsPositive() ? 1.0 : 0.0; @@ -1113,20 +1113,20 @@ int FixVariablesFromSat(const SatSolver &solver, glop::LinearProgram *lp) { } bool SolveLpAndUseSolutionForSatAssignmentPreference( - const glop::LinearProgram &lp, SatSolver *sat_solver, + const glop::LinearProgram& lp, SatSolver* sat_solver, double max_time_in_seconds) { glop::LPSolver solver; glop::GlopParameters glop_parameters; glop_parameters.set_max_time_in_seconds(max_time_in_seconds); solver.SetParameters(glop_parameters); - const glop::ProblemStatus &status = solver.Solve(lp); + const glop::ProblemStatus& status = solver.Solve(lp); if (status != glop::ProblemStatus::OPTIMAL && status != glop::ProblemStatus::IMPRECISE && status != glop::ProblemStatus::PRIMAL_FEASIBLE) { return false; } for (ColIndex col(0); col < lp.num_variables(); ++col) { - const Fractional &value = solver.variable_values()[col]; + const Fractional& value = solver.variable_values()[col]; sat_solver->SetAssignmentPreference( Literal(BooleanVariable(col.value()), round(value) == 1), 1 - std::abs(value - round(value))); @@ -1134,27 +1134,27 @@ bool SolveLpAndUseSolutionForSatAssignmentPreference( return true; } -bool SolveLpAndUseIntegerVariableToStartLNS(const glop::LinearProgram &lp, - LinearBooleanProblem *problem) { +bool SolveLpAndUseIntegerVariableToStartLNS(const glop::LinearProgram& lp, + LinearBooleanProblem* problem) { glop::LPSolver solver; - const glop::ProblemStatus &status = solver.Solve(lp); + const glop::ProblemStatus& status = solver.Solve(lp); if (status != glop::ProblemStatus::OPTIMAL && status != glop::ProblemStatus::PRIMAL_FEASIBLE) return false; int num_variable_fixed = 0; for (ColIndex col(0); col < lp.num_variables(); ++col) { const Fractional tolerance = 1e-5; - const Fractional &value = solver.variable_values()[col]; + const Fractional& value = solver.variable_values()[col]; if (value > 1 - tolerance) { ++num_variable_fixed; - LinearBooleanConstraint *constraint = problem->add_constraints(); + LinearBooleanConstraint* constraint = problem->add_constraints(); constraint->set_lower_bound(1); constraint->set_upper_bound(1); constraint->add_coefficients(1); constraint->add_literals(col.value() + 1); } else if (value < tolerance) { ++num_variable_fixed; - LinearBooleanConstraint *constraint = problem->add_constraints(); + LinearBooleanConstraint* constraint = problem->add_constraints(); constraint->set_lower_bound(0); constraint->set_upper_bound(0); constraint->add_coefficients(1); diff --git a/ortools/sat/optimization.cc b/ortools/sat/optimization.cc index f19e8e3e0f..c08baba19b 100644 --- a/ortools/sat/optimization.cc +++ b/ortools/sat/optimization.cc @@ -58,7 +58,7 @@ namespace { class Logger { public: explicit Logger(LogBehavior v) : use_stdout_(v == STDOUT_LOG) {} - void Log(const std::string &message) { + void Log(const std::string& message) { if (use_stdout_) { absl::PrintF("%s\n", message); } else { @@ -72,7 +72,7 @@ class Logger { // Outputs the current objective value in the cnf output format. // Note that this function scale the given objective. -std::string CnfObjectiveLine(const LinearBooleanProblem &problem, +std::string CnfObjectiveLine(const LinearBooleanProblem& problem, Coefficient objective) { const double scaled_objective = AddOffsetAndScaleObjectiveValue(problem, objective); @@ -89,7 +89,7 @@ struct LiteralWithCoreIndex { // increasing order. The order of the non-deleted entries in the vector is // preserved. template -void DeleteVectorIndices(const std::vector &indices, Vector *v) { +void DeleteVectorIndices(const std::vector& indices, Vector* v) { int new_size = 0; int indices_index = 0; for (int i = 0; i < v->size(); ++i) { @@ -187,7 +187,7 @@ class FuMalikSymmetryBreaker { } // Deletes the given assumption indices. - void DeleteIndices(const std::vector &indices) { + void DeleteIndices(const std::vector& indices) { DeleteVectorIndices(indices, &info_by_assumption_index_); } @@ -207,16 +207,16 @@ class FuMalikSymmetryBreaker { } private: - std::vector > info_by_assumption_index_; - std::vector > literal_by_core_; + std::vector> info_by_assumption_index_; + std::vector> literal_by_core_; DISALLOW_COPY_AND_ASSIGN(FuMalikSymmetryBreaker); }; } // namespace -void MinimizeCoreWithPropagation(SatSolver *solver, - std::vector *core) { +void MinimizeCoreWithPropagation(SatSolver* solver, + std::vector* core) { if (solver->IsModelUnsat()) return; std::set moved_last; std::vector candidate(core->begin(), core->end()); @@ -266,9 +266,9 @@ void MinimizeCoreWithPropagation(SatSolver *solver, // and relax in each step some of these fixed variables until the problem // becomes satisfiable. SatSolver::Status SolveWithFuMalik(LogBehavior log, - const LinearBooleanProblem &problem, - SatSolver *solver, - std::vector *solution) { + const LinearBooleanProblem& problem, + SatSolver* solver, + std::vector* solution) { Logger logger(log); FuMalikSymmetryBreaker symmetry; @@ -291,11 +291,11 @@ SatSolver::Status SolveWithFuMalik(LogBehavior log, // // ex: If a variable "x" as a cost of 3, its cost contribution is smaller when // it is set to false (since it will contribute to zero instead of 3). - std::vector > blocking_clauses; + std::vector> blocking_clauses; std::vector assumptions; // Initialize blocking_clauses and assumptions. - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); CHECK_GT(objective.coefficients_size(), 0); const Coefficient unique_objective_coeff(std::abs(objective.coefficients(0))); for (int i = 0; i < objective.literals_size(); ++i) { @@ -463,9 +463,9 @@ SatSolver::Status SolveWithFuMalik(LogBehavior log, } SatSolver::Status SolveWithWPM1(LogBehavior log, - const LinearBooleanProblem &problem, - SatSolver *solver, - std::vector *solution) { + const LinearBooleanProblem& problem, + SatSolver* solver, + std::vector* solution) { Logger logger(log); FuMalikSymmetryBreaker symmetry; @@ -480,7 +480,7 @@ SatSolver::Status SolveWithWPM1(LogBehavior log, std::vector reference; // Initialization. - const LinearObjective &objective = problem.objective(); + const LinearObjective& objective = problem.objective(); CHECK_GT(objective.coefficients_size(), 0); for (int i = 0; i < objective.literals_size(); ++i) { const Literal literal(objective.literals(i)); @@ -760,9 +760,9 @@ SatSolver::Status SolveWithWPM1(LogBehavior log, } SatSolver::Status SolveWithRandomParameters(LogBehavior log, - const LinearBooleanProblem &problem, - int num_times, SatSolver *solver, - std::vector *solution) { + const LinearBooleanProblem& problem, + int num_times, SatSolver* solver, + std::vector* solution) { Logger logger(log); const SatParameters initial_parameters = solver->parameters(); @@ -840,9 +840,9 @@ SatSolver::Status SolveWithRandomParameters(LogBehavior log, } SatSolver::Status SolveWithLinearScan(LogBehavior log, - const LinearBooleanProblem &problem, - SatSolver *solver, - std::vector *solution) { + const LinearBooleanProblem& problem, + SatSolver* solver, + std::vector* solution) { Logger logger(log); // This has a big positive impact on most problems. @@ -886,20 +886,20 @@ SatSolver::Status SolveWithLinearScan(LogBehavior log, } SatSolver::Status SolveWithCardinalityEncoding( - LogBehavior log, const LinearBooleanProblem &problem, SatSolver *solver, - std::vector *solution) { + LogBehavior log, const LinearBooleanProblem& problem, SatSolver* solver, + std::vector* solution) { Logger logger(log); std::deque repository; // Create one initial node per variables with cost. Coefficient offset(0); - std::vector nodes = + std::vector nodes = CreateInitialEncodingNodes(problem.objective(), &offset, &repository); // This algorithm only work with weights of the same magnitude. CHECK(!nodes.empty()); const Coefficient reference = nodes.front()->weight(); - for (const EncodingNode *n : nodes) CHECK_EQ(n->weight(), reference); + for (const EncodingNode* n : nodes) CHECK_EQ(n->weight(), reference); // Initialize the current objective. Coefficient objective = kCoefficientMax; @@ -917,7 +917,7 @@ SatSolver::Status SolveWithCardinalityEncoding( // Create the sorter network. solver->Backtrack(0); - EncodingNode *root = + EncodingNode* root = MergeAllNodesWithDeque(upper_bound, nodes, solver, &repository); logger.Log(absl::StrFormat("c encoding depth:%d", root->depth())); @@ -954,15 +954,15 @@ SatSolver::Status SolveWithCardinalityEncoding( } SatSolver::Status SolveWithCardinalityEncodingAndCore( - LogBehavior log, const LinearBooleanProblem &problem, SatSolver *solver, - std::vector *solution) { + LogBehavior log, const LinearBooleanProblem& problem, SatSolver* solver, + std::vector* solution) { Logger logger(log); SatParameters parameters = solver->parameters(); // Create one initial nodes per variables with cost. Coefficient offset(0); std::deque repository; - std::vector nodes = + std::vector nodes = CreateInitialEncodingNodes(problem.objective(), &offset, &repository); // Initialize the bounds. @@ -984,7 +984,7 @@ SatSolver::Status SolveWithCardinalityEncodingAndCore( if (parameters.max_sat_stratification() == SatParameters::STRATIFICATION_DESCENT) { // In this case, we initialize it to the maximum assumption weights. - for (EncodingNode *n : nodes) { + for (EncodingNode* n : nodes) { stratified_lower_bound = std::max(stratified_lower_bound, n->weight()); } } @@ -1057,10 +1057,10 @@ SatSolver::Status SolveWithCardinalityEncodingAndCore( SatSolver::Status MinimizeIntegerVariableWithLinearScanAndLazyEncoding( IntegerVariable objective_var, - const std::function &feasible_solution_observer, Model *model) { - SatSolver *sat_solver = model->GetOrCreate(); - IntegerTrail *integer_trail = model->GetOrCreate(); - const SatParameters ¶meters = *(model->GetOrCreate()); + const std::function& feasible_solution_observer, Model* model) { + SatSolver* sat_solver = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); + const SatParameters& parameters = *(model->GetOrCreate()); // Simple linear scan algorithm to find the optimal. while (true) { @@ -1090,11 +1090,11 @@ SatSolver::Status MinimizeIntegerVariableWithLinearScanAndLazyEncoding( void RestrictObjectiveDomainWithBinarySearch( IntegerVariable objective_var, - const std::function &feasible_solution_observer, Model *model) { + const std::function& feasible_solution_observer, Model* model) { const SatParameters old_params = *model->GetOrCreate(); - SatSolver *sat_solver = model->GetOrCreate(); - IntegerTrail *integer_trail = model->GetOrCreate(); - IntegerEncoder *integer_encoder = model->GetOrCreate(); + SatSolver* sat_solver = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); + IntegerEncoder* integer_encoder = model->GetOrCreate(); // Set the requested conflict limit. { @@ -1207,11 +1207,11 @@ namespace { // cores. SatSolver::Status FindCores(std::vector assumptions, std::vector assumption_weights, - IntegerValue stratified_threshold, Model *model, - std::vector > *cores) { + IntegerValue stratified_threshold, Model* model, + std::vector>* cores) { cores->clear(); - SatSolver *sat_solver = model->GetOrCreate(); - TimeLimit *limit = model->GetOrCreate(); + SatSolver* sat_solver = model->GetOrCreate(); + TimeLimit* limit = model->GetOrCreate(); do { if (limit->LimitReached()) return SatSolver::LIMIT_REACHED; @@ -1269,11 +1269,11 @@ SatSolver::Status FindCores(std::vector assumptions, // Slightly different algo than FindCores() which aim to extract more cores, but // not necessarily non-overlaping ones. SatSolver::Status FindMultipleCoresForMaxHs( - std::vector assumptions, Model *model, - std::vector > *cores) { + std::vector assumptions, Model* model, + std::vector>* cores) { cores->clear(); - SatSolver *sat_solver = model->GetOrCreate(); - TimeLimit *limit = model->GetOrCreate(); + SatSolver* sat_solver = model->GetOrCreate(); + TimeLimit* limit = model->GetOrCreate(); do { if (limit->LimitReached()) return SatSolver::LIMIT_REACHED; @@ -1291,7 +1291,7 @@ SatSolver::Status FindMultipleCoresForMaxHs( // Pick a random literal from the core and remove it from the set of // assumptions. CHECK(!core.empty()); - auto *random = model->GetOrCreate(); + auto* random = model->GetOrCreate(); const Literal random_literal = core[absl::Uniform(*random, 0, core.size())]; for (int i = 0; i < assumptions.size(); ++i) { @@ -1309,9 +1309,9 @@ SatSolver::Status FindMultipleCoresForMaxHs( CoreBasedOptimizer::CoreBasedOptimizer( IntegerVariable objective_var, - const std::vector &variables, - const std::vector &coefficients, - std::function feasible_solution_observer, Model *model) + const std::vector& variables, + const std::vector& coefficients, + std::function feasible_solution_observer, Model* model) : parameters_(model->GetOrCreate()), sat_solver_(model->GetOrCreate()), time_limit_(model->GetOrCreate()), @@ -1345,7 +1345,7 @@ bool CoreBasedOptimizer::ProcessSolution() { // We don't assume that objective_var is linked with its linear term, so // we recompute the objective here. IntegerValue objective(0); - for (ObjectiveTerm &term : terms_) { + for (ObjectiveTerm& term : terms_) { const IntegerValue value = integer_trail_->LowerBound(term.var); objective += term.weight * value; @@ -1386,7 +1386,7 @@ bool CoreBasedOptimizer::PropagateObjectiveBounds() { // Compute implied lb. IntegerValue implied_objective_lb(0); - for (ObjectiveTerm &term : terms_) { + for (ObjectiveTerm& term : terms_) { const IntegerValue var_lb = integer_trail_->LowerBound(term.var); term.old_var_lb = var_lb; implied_objective_lb += term.weight * var_lb.value(); @@ -1412,7 +1412,7 @@ bool CoreBasedOptimizer::PropagateObjectiveBounds() { const IntegerValue gap = integer_trail_->UpperBound(objective_var_) - implied_objective_lb; - for (const ObjectiveTerm &term : terms_) { + for (const ObjectiveTerm& term : terms_) { if (term.weight == 0) continue; const IntegerValue var_lb = integer_trail_->LowerBound(term.var); const IntegerValue var_ub = integer_trail_->UpperBound(term.var); @@ -1449,7 +1449,7 @@ bool CoreBasedOptimizer::PropagateObjectiveBounds() { // didn't have the time to properly compare them. void CoreBasedOptimizer::ComputeNextStratificationThreshold() { std::vector weights; - for (ObjectiveTerm &term : terms_) { + for (ObjectiveTerm& term : terms_) { if (term.weight >= stratification_threshold_) continue; if (term.weight == 0) continue; @@ -1479,7 +1479,7 @@ bool CoreBasedOptimizer::CoverOptimization() { parameters_->set_max_deterministic_time(old_time_limit); }); - for (const ObjectiveTerm &term : terms_) { + for (const ObjectiveTerm& term : terms_) { // We currently skip the initial objective terms as there could be many // of them. TODO(user): provide an option to cover-optimize them? I // fear that this will slow down the solver too much though. @@ -1624,7 +1624,7 @@ SatSolver::Status CoreBasedOptimizer::Optimize() { // Display the progress. if (VLOG_IS_ON(1)) { int max_depth = 0; - for (const ObjectiveTerm &term : terms_) { + for (const ObjectiveTerm& term : terms_) { max_depth = std::max(max_depth, term.depth); } const int64 lb = integer_trail_->LowerBound(objective_var_).value(); @@ -1663,7 +1663,7 @@ SatSolver::Status CoreBasedOptimizer::Optimize() { // TODO(user): If the "search" is interupted while computing cores, we // currently do not resume it flawlessly. We however add any cores we found // before aborting. - std::vector > cores; + std::vector> cores; const SatSolver::Status result = FindCores(assumptions, assumption_weights, stratification_threshold_, model_, &cores); @@ -1681,7 +1681,7 @@ SatSolver::Status CoreBasedOptimizer::Optimize() { // Process the cores by creating new variables and transferring the minimum // weight of each core to it. if (!sat_solver_->ResetToLevelZero()) return SatSolver::INFEASIBLE; - for (const std::vector &core : cores) { + for (const std::vector& core : cores) { // This just increase the lower-bound of the corresponding node, which // should already be done by the solver. if (core.size() == 1) continue; @@ -1755,17 +1755,17 @@ SatSolver::Status CoreBasedOptimizer::Optimize() { // // TODO(user): remove code duplication with MinimizeWithCoreAndLazyEncoding(); SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( - const ObjectiveDefinition &objective_definition, - const std::function &feasible_solution_observer, Model *model) { + const ObjectiveDefinition& objective_definition, + const std::function& feasible_solution_observer, Model* model) { #if !defined(__PORTABLE_PLATFORM__) && defined(USE_SCIP) IntegerVariable objective_var = objective_definition.objective_var; std::vector variables = objective_definition.vars; std::vector coefficients = objective_definition.coeffs; - SatSolver *sat_solver = model->GetOrCreate(); - IntegerTrail *integer_trail = model->GetOrCreate(); - IntegerEncoder *integer_encoder = model->GetOrCreate(); + SatSolver* sat_solver = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); + IntegerEncoder* integer_encoder = model->GetOrCreate(); // This will be called each time a feasible solution is found. const auto process_solution = [&]() { @@ -1800,7 +1800,7 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( request.set_solver_specific_parameters("limits/gap = 0"); request.set_solver_type(MPModelRequest::SCIP_MIXED_INTEGER_PROGRAMMING); - MPModelProto &hs_model = *request.mutable_model(); + MPModelProto& hs_model = *request.mutable_model(); const int num_variables_in_objective = variables.size(); for (int i = 0; i < num_variables_in_objective; ++i) { if (coefficients[i] < 0) { @@ -1808,7 +1808,7 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( coefficients[i] = -coefficients[i]; } const IntegerVariable var = variables[i]; - MPVariableProto *var_proto = hs_model.add_variable(); + MPVariableProto* var_proto = hs_model.add_variable(); var_proto->set_lower_bound(integer_trail->LowerBound(var).value()); var_proto->set_upper_bound(integer_trail->UpperBound(var).value()); var_proto->set_objective_coefficient(coefficients[i].value()); @@ -1825,12 +1825,12 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( // TODO(user): The core is returned in the same order as the assumptions, // so we don't really need this map, we could just do a linear scan to // recover which node are part of the core. - std::map > assumption_to_indices; + std::map> assumption_to_indices; // New Booleans variable in the MIP model to represent X >= cte. std::map, int> created_var; - const SatParameters ¶meters = *(model->GetOrCreate()); + const SatParameters& parameters = *(model->GetOrCreate()); // Start the algorithm. SatSolver::Status result; @@ -1895,7 +1895,7 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( // cores for only one MIP solve. // // TODO(user): Use the real weights and exploit the extra cores. - std::vector > cores; + std::vector> cores; result = FindMultipleCoresForMaxHs(assumptions, model, &cores); if (result == SatSolver::FEASIBLE) { if (!process_solution()) return SatSolver::INFEASIBLE; @@ -1916,7 +1916,7 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( sat_solver->Backtrack(0); sat_solver->SetAssumptionLevel(0); - for (const std::vector &core : cores) { + for (const std::vector& core : cores) { if (core.size() == 1) { for (const int index : gtl::FindOrDie(assumption_to_indices, core.front().Index())) { @@ -1927,7 +1927,7 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( } // Add the corresponding constraint to hs_model. - MPConstraintProto *ct = hs_model.add_constraint(); + MPConstraintProto* ct = hs_model.add_constraint(); ct->set_lower_bound(1.0); for (const Literal lit : core) { for (const int index : @@ -1944,14 +1944,14 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( const int new_var_index = hs_model.variable_size(); created_var[key] = new_var_index; - MPVariableProto *new_var = hs_model.add_variable(); + MPVariableProto* new_var = hs_model.add_variable(); new_var->set_lower_bound(0); new_var->set_upper_bound(1); new_var->set_is_integer(true); // (new_var == 1) => x > hs_value. // (x - lb) - (hs_value - lb + 1) * new_var >= 0. - MPConstraintProto *implication = hs_model.add_constraint(); + MPConstraintProto* implication = hs_model.add_constraint(); implication->set_lower_bound(lb); implication->add_var_index(index); implication->add_coefficient(1.0); @@ -1967,7 +1967,7 @@ SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( } return result; -#else // !__PORTABLE_PLATFORM__ && USE_SCIP +#else // !__PORTABLE_PLATFORM__ && USE_SCIP LOG(FATAL) << "Not supported."; #endif // !__PORTABLE_PLATFORM__ && USE_SCIP } diff --git a/ortools/sat/optimization.h b/ortools/sat/optimization.h index e1f14fa811..288cd1de11 100644 --- a/ortools/sat/optimization.h +++ b/ortools/sat/optimization.h @@ -35,7 +35,7 @@ namespace sat { // removed. // // Note that this function doest NOT preserve the order of Literal in the core. -void MinimizeCoreWithPropagation(SatSolver *solver, std::vector *core); +void MinimizeCoreWithPropagation(SatSolver* solver, std::vector* core); // Because the Solve*() functions below are also used in scripts that requires a // special output format, we use this to tell them whether or not to use the @@ -62,9 +62,9 @@ enum LogBehavior { DEFAULT_LOG, STDOUT_LOG }; // TODO(user): double-check the correctness if the objective coefficients are // negative. SatSolver::Status SolveWithFuMalik(LogBehavior log, - const LinearBooleanProblem &problem, - SatSolver *solver, - std::vector *solution); + const LinearBooleanProblem& problem, + SatSolver* solver, + std::vector* solution); // The WPM1 algorithm is a generalization of the Fu & Malik algorithm to // weighted problems. Note that if all objective weights are the same, this is @@ -72,21 +72,20 @@ SatSolver::Status SolveWithFuMalik(LogBehavior log, // slightly different. // // Ansotegui, C., Bonet, M.L., Levy, J.: Solving (weighted) partial MaxSAT -// through satisfiability testing. In: Proc. of the 12th Int. Conf. on Theory -// and +// through satisfiability testing. In: Proc. of the 12th Int. Conf. on Theory and // Applications of Satisfiability Testing (SAT’09). pp. 427-440 (2009) SatSolver::Status SolveWithWPM1(LogBehavior log, - const LinearBooleanProblem &problem, - SatSolver *solver, std::vector *solution); + const LinearBooleanProblem& problem, + SatSolver* solver, std::vector* solution); // Solves num_times the decision version of the given problem with different // random parameters. Keep the best solution (regarding the objective) and // returns it in solution. The problem is assumed to be already loaded into the // given solver. SatSolver::Status SolveWithRandomParameters(LogBehavior log, - const LinearBooleanProblem &problem, - int num_times, SatSolver *solver, - std::vector *solution); + const LinearBooleanProblem& problem, + int num_times, SatSolver* solver, + std::vector* solution); // Starts by solving the decision version of the given LinearBooleanProblem and // then simply add a constraint to find a lower objective that the current best @@ -96,22 +95,22 @@ SatSolver::Status SolveWithRandomParameters(LogBehavior log, // solution is initially a feasible solution, the search will starts from there. // solution will be updated with the best solution found so far. SatSolver::Status SolveWithLinearScan(LogBehavior log, - const LinearBooleanProblem &problem, - SatSolver *solver, - std::vector *solution); + const LinearBooleanProblem& problem, + SatSolver* solver, + std::vector* solution); // Similar algorithm as the one used by qmaxsat, this is a linear scan with the // at-most k constraint encoded in SAT. This only works on problems with // constant weights. SatSolver::Status SolveWithCardinalityEncoding( - LogBehavior log, const LinearBooleanProblem &problem, SatSolver *solver, - std::vector *solution); + LogBehavior log, const LinearBooleanProblem& problem, SatSolver* solver, + std::vector* solution); // This is an original algorithm. It is a mix between the cardinality encoding // and the Fu & Malik algorithm. It also works on general weighted instances. SatSolver::Status SolveWithCardinalityEncodingAndCore( - LogBehavior log, const LinearBooleanProblem &problem, SatSolver *solver, - std::vector *solution); + LogBehavior log, const LinearBooleanProblem& problem, SatSolver* solver, + std::vector* solution); // Model-based API, for now we just provide a basic algorithm that minimizes a // given IntegerVariable by solving a sequence of decision problem by using @@ -125,13 +124,13 @@ SatSolver::Status SolveWithCardinalityEncodingAndCore( // solver, and it is up to the client to backtrack to the root node if needed. SatSolver::Status MinimizeIntegerVariableWithLinearScanAndLazyEncoding( IntegerVariable objective_var, - const std::function &feasible_solution_observer, Model *model); + const std::function& feasible_solution_observer, Model* model); // Use a low conflict limit and performs a binary search to try to restrict the // domain of objective_var. void RestrictObjectiveDomainWithBinarySearch( IntegerVariable objective_var, - const std::function &feasible_solution_observer, Model *model); + const std::function& feasible_solution_observer, Model* model); // Same as MinimizeIntegerVariableWithLinearScanAndLazyEncoding() but use // a core-based approach instead. Note that the given objective_var is just used @@ -144,10 +143,10 @@ void RestrictObjectiveDomainWithBinarySearch( class CoreBasedOptimizer { public: CoreBasedOptimizer(IntegerVariable objective_var, - const std::vector &variables, - const std::vector &coefficients, + const std::vector& variables, + const std::vector& coefficients, std::function feasible_solution_observer, - Model *model); + Model* model); // TODO(user): Change the algo slighlty to allow resuming from the last // aborted position. Currently, the search is "resumable", but it will restart @@ -155,8 +154,8 @@ class CoreBasedOptimizer { SatSolver::Status Optimize(); private: - CoreBasedOptimizer(const CoreBasedOptimizer &) = delete; - CoreBasedOptimizer &operator=(const CoreBasedOptimizer &) = delete; + CoreBasedOptimizer(const CoreBasedOptimizer&) = delete; + CoreBasedOptimizer& operator=(const CoreBasedOptimizer&) = delete; struct ObjectiveTerm { IntegerVariable var; @@ -186,12 +185,12 @@ class CoreBasedOptimizer { // Sets it to zero if all the assumptions where already considered. void ComputeNextStratificationThreshold(); - SatParameters *parameters_; - SatSolver *sat_solver_; - TimeLimit *time_limit_; - IntegerTrail *integer_trail_; - IntegerEncoder *integer_encoder_; - Model *model_; // TODO(user): remove this one. + SatParameters* parameters_; + SatSolver* sat_solver_; + TimeLimit* time_limit_; + IntegerTrail* integer_trail_; + IntegerEncoder* integer_encoder_; + Model* model_; // TODO(user): remove this one. IntegerVariable objective_var_; std::vector terms_; @@ -225,8 +224,8 @@ class CoreBasedOptimizer { // TODO(user): This function brings dependency to the SCIP MIP solver which is // quite big, maybe we should find a way not to do that. SatSolver::Status MinimizeWithHittingSetAndLazyEncoding( - const ObjectiveDefinition &objective_definition, - const std::function &feasible_solution_observer, Model *model); + const ObjectiveDefinition& objective_definition, + const std::function& feasible_solution_observer, Model* model); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/pb_constraint.cc b/ortools/sat/pb_constraint.cc index 704c9fbea2..f37556b274 100644 --- a/ortools/sat/pb_constraint.cc +++ b/ortools/sat/pb_constraint.cc @@ -24,11 +24,11 @@ namespace sat { namespace { -bool LiteralComparator(const LiteralWithCoeff &a, const LiteralWithCoeff &b) { +bool LiteralComparator(const LiteralWithCoeff& a, const LiteralWithCoeff& b) { return a.literal.Index() < b.literal.Index(); } -bool CoeffComparator(const LiteralWithCoeff &a, const LiteralWithCoeff &b) { +bool CoeffComparator(const LiteralWithCoeff& a, const LiteralWithCoeff& b) { if (a.coefficient == b.coefficient) { return a.literal.Index() < b.literal.Index(); } @@ -38,8 +38,8 @@ bool CoeffComparator(const LiteralWithCoeff &a, const LiteralWithCoeff &b) { } // namespace bool ComputeBooleanLinearExpressionCanonicalForm( - std::vector *cst, Coefficient *bound_shift, - Coefficient *max_value) { + std::vector* cst, Coefficient* bound_shift, + Coefficient* max_value) { // Note(user): For some reason, the IntType checking doesn't work here ?! that // is a bit worrying, but the code seems to behave correctly. *bound_shift = 0; @@ -49,7 +49,7 @@ bool ComputeBooleanLinearExpressionCanonicalForm( // This also remove term with a zero coefficient. std::sort(cst->begin(), cst->end(), LiteralComparator); int index = 0; - LiteralWithCoeff *representative = nullptr; + LiteralWithCoeff* representative = nullptr; for (int i = 0; i < cst->size(); ++i) { const LiteralWithCoeff current = (*cst)[i]; if (current.coefficient == 0) continue; @@ -100,12 +100,12 @@ bool ComputeBooleanLinearExpressionCanonicalForm( } bool ApplyLiteralMapping( - const gtl::ITIVector &mapping, - std::vector *cst, Coefficient *bound_shift, - Coefficient *max_value) { + const gtl::ITIVector& mapping, + std::vector* cst, Coefficient* bound_shift, + Coefficient* max_value) { int index = 0; Coefficient shift_due_to_fixed_variables(0); - for (const LiteralWithCoeff &entry : *cst) { + for (const LiteralWithCoeff& entry : *cst) { if (mapping[entry.literal.Index()] >= 0) { (*cst)[index] = LiteralWithCoeff(Literal(mapping[entry.literal.Index()]), entry.coefficient); @@ -133,7 +133,7 @@ bool ApplyLiteralMapping( // TODO(user): Also check for no duplicates literals + unit tests. bool BooleanLinearExpressionIsCanonical( - const std::vector &cst) { + const std::vector& cst) { Coefficient previous(1); for (LiteralWithCoeff term : cst) { if (term.coefficient < previous) return false; @@ -145,13 +145,13 @@ bool BooleanLinearExpressionIsCanonical( // TODO(user): Use more complex simplification like dividing by the gcd of // everyone and using less different coefficients if possible. void SimplifyCanonicalBooleanLinearConstraint( - std::vector *cst, Coefficient *rhs) { + std::vector* cst, Coefficient* rhs) { // Replace all coefficient >= rhs by rhs + 1 (these literal must actually be // false). Note that the linear sum of literals remains canonical. // // TODO(user): It is probably better to remove these literals and have other // constraint setting them to false from the symmetry finder perspective. - for (LiteralWithCoeff &x : *cst) { + for (LiteralWithCoeff& x : *cst) { if (x.coefficient > *rhs) x.coefficient = *rhs + 1; } } @@ -199,7 +199,7 @@ Coefficient ComputeNegatedCanonicalRhs(Coefficient lower_bound, bool CanonicalBooleanLinearProblem::AddLinearConstraint( bool use_lower_bound, Coefficient lower_bound, bool use_upper_bound, - Coefficient upper_bound, std::vector *cst) { + Coefficient upper_bound, std::vector* cst) { // Canonicalize the linear expression of the constraint. Coefficient bound_shift; Coefficient max_value; @@ -225,7 +225,7 @@ bool CanonicalBooleanLinearProblem::AddLinearConstraint( } bool CanonicalBooleanLinearProblem::AddConstraint( - const std::vector &cst, Coefficient max_value, + const std::vector& cst, Coefficient max_value, Coefficient rhs) { if (rhs < 0) return false; // Trivially unsatisfiable. if (rhs >= max_value) return true; // Trivially satisfiable. @@ -287,7 +287,7 @@ std::string MutableUpperBoundedLinearConstraint::DebugString() { // TODO(user): Keep this for DCHECK(), but maintain the slack incrementally // instead of recomputing it. Coefficient MutableUpperBoundedLinearConstraint::ComputeSlackForTrailPrefix( - const Trail &trail, int trail_index) const { + const Trail& trail, int trail_index) const { Coefficient activity(0); for (BooleanVariable var : PossibleNonZeros()) { if (GetCoefficient(var) == 0) continue; @@ -300,7 +300,7 @@ Coefficient MutableUpperBoundedLinearConstraint::ComputeSlackForTrailPrefix( } Coefficient MutableUpperBoundedLinearConstraint:: - ReduceCoefficientsAndComputeSlackForTrailPrefix(const Trail &trail, + ReduceCoefficientsAndComputeSlackForTrailPrefix(const Trail& trail, int trail_index) { Coefficient activity(0); Coefficient removed_sum(0); @@ -330,7 +330,7 @@ Coefficient MutableUpperBoundedLinearConstraint:: } void MutableUpperBoundedLinearConstraint::ReduceSlackTo( - const Trail &trail, int trail_index, Coefficient initial_slack, + const Trail& trail, int trail_index, Coefficient initial_slack, Coefficient target) { // Positive slack. const Coefficient slack = initial_slack; @@ -367,7 +367,7 @@ void MutableUpperBoundedLinearConstraint::ReduceSlackTo( } void MutableUpperBoundedLinearConstraint::CopyIntoVector( - std::vector *output) { + std::vector* output) { output->clear(); for (BooleanVariable var : non_zeros_.PositionsSetAtLeastOnce()) { const Coefficient coeff = GetCoefficient(var); @@ -387,7 +387,7 @@ Coefficient MutableUpperBoundedLinearConstraint::ComputeMaxSum() const { } UpperBoundedLinearConstraint::UpperBoundedLinearConstraint( - const std::vector &cst) + const std::vector& cst) : is_marked_for_deletion_(false), is_learned_(false), first_reason_trail_index_(-1), @@ -423,12 +423,12 @@ UpperBoundedLinearConstraint::UpperBoundedLinearConstraint( // Sentinel. starts_.push_back(literals_.size()); - hash_ = ThoroughHash(reinterpret_cast(cst.data()), + hash_ = ThoroughHash(reinterpret_cast(cst.data()), cst.size() * sizeof(LiteralWithCoeff)); } void UpperBoundedLinearConstraint::AddToConflict( - MutableUpperBoundedLinearConstraint *conflict) { + MutableUpperBoundedLinearConstraint* conflict) { int literal_index = 0; int coeff_index = 0; for (Literal literal : literals_) { @@ -440,7 +440,7 @@ void UpperBoundedLinearConstraint::AddToConflict( } bool UpperBoundedLinearConstraint::HasIdenticalTerms( - const std::vector &cst) { + const std::vector& cst) { if (cst.size() != literals_.size()) return false; int literal_index = 0; int coeff_index = 0; @@ -456,8 +456,8 @@ bool UpperBoundedLinearConstraint::HasIdenticalTerms( } bool UpperBoundedLinearConstraint::InitializeRhs( - Coefficient rhs, int trail_index, Coefficient *threshold, Trail *trail, - PbConstraintsEnqueueHelper *helper) { + Coefficient rhs, int trail_index, Coefficient* threshold, Trail* trail, + PbConstraintsEnqueueHelper* helper) { // Compute the slack from the assigned variables with a trail index // smaller than the given trail_index. The variable at trail_index has not // yet been propagated. @@ -530,8 +530,8 @@ bool UpperBoundedLinearConstraint::InitializeRhs( } bool UpperBoundedLinearConstraint::Propagate( - int trail_index, Coefficient *threshold, Trail *trail, - PbConstraintsEnqueueHelper *helper) { + int trail_index, Coefficient* threshold, Trail* trail, + PbConstraintsEnqueueHelper* helper) { DCHECK_LT(*threshold, 0); const Coefficient slack = GetSlackFromThreshold(*threshold); DCHECK_GE(slack, 0) << "The constraint is already a conflict!"; @@ -573,8 +573,8 @@ bool UpperBoundedLinearConstraint::Propagate( } void UpperBoundedLinearConstraint::FillReason( - const Trail &trail, int source_trail_index, - BooleanVariable propagated_variable, std::vector *reason) { + const Trail& trail, int source_trail_index, + BooleanVariable propagated_variable, std::vector* reason) { reason->clear(); // Optimization for an "at most one" constraint. @@ -644,8 +644,8 @@ void UpperBoundedLinearConstraint::FillReason( } Coefficient UpperBoundedLinearConstraint::ComputeCancelation( - const Trail &trail, int trail_index, - const MutableUpperBoundedLinearConstraint &conflict) { + const Trail& trail, int trail_index, + const MutableUpperBoundedLinearConstraint& conflict) { Coefficient result(0); int literal_index = 0; int coeff_index = 0; @@ -661,9 +661,9 @@ Coefficient UpperBoundedLinearConstraint::ComputeCancelation( } void UpperBoundedLinearConstraint::ResolvePBConflict( - const Trail &trail, BooleanVariable var, - MutableUpperBoundedLinearConstraint *conflict, - Coefficient *conflict_slack) { + const Trail& trail, BooleanVariable var, + MutableUpperBoundedLinearConstraint* conflict, + Coefficient* conflict_slack) { const int limit_trail_index = trail.Info(var).trail_index; // Compute the constraint activity at the time and the coefficient of the @@ -808,7 +808,7 @@ void UpperBoundedLinearConstraint::ResolvePBConflict( conflict->AddToRhs(rhs_ - diff); } -void UpperBoundedLinearConstraint::Untrail(Coefficient *threshold, +void UpperBoundedLinearConstraint::Untrail(Coefficient* threshold, int trail_index) { const Coefficient slack = GetSlackFromThreshold(*threshold); while (index_ + 1 < coeffs_.size() && coeffs_[index_ + 1] <= slack) ++index_; @@ -820,8 +820,8 @@ void UpperBoundedLinearConstraint::Untrail(Coefficient *threshold, // TODO(user): This is relatively slow. Take the "transpose" all at once, and // maybe put small constraints first on the to_update_ lists. -bool PbConstraints::AddConstraint(const std::vector &cst, - Coefficient rhs, Trail *trail) { +bool PbConstraints::AddConstraint(const std::vector& cst, + Coefficient rhs, Trail* trail) { SCOPED_TIME_STAT(&stats_); DCHECK(!cst.empty()); DCHECK(std::is_sorted(cst.begin(), cst.end(), CoeffComparator)); @@ -836,11 +836,11 @@ bool PbConstraints::AddConstraint(const std::vector &cst, std::unique_ptr c( new UpperBoundedLinearConstraint(cst)); - std::vector &duplicate_candidates = + std::vector& duplicate_candidates = possible_duplicates_[c->hash()]; // Optimization if the constraint terms are duplicates. - for (UpperBoundedLinearConstraint *candidate : duplicate_candidates) { + for (UpperBoundedLinearConstraint* candidate : duplicate_candidates) { if (candidate->HasIdenticalTerms(cst)) { if (rhs < candidate->Rhs()) { // TODO(user): the index is needed to give the correct thresholds_ entry @@ -882,7 +882,7 @@ bool PbConstraints::AddConstraint(const std::vector &cst, } bool PbConstraints::AddLearnedConstraint( - const std::vector &cst, Coefficient rhs, Trail *trail) { + const std::vector& cst, Coefficient rhs, Trail* trail) { DeleteSomeLearnedConstraintIfNeeded(); const int old_num_constraints = constraints_.size(); const bool result = AddConstraint(cst, rhs, trail); @@ -895,7 +895,7 @@ bool PbConstraints::AddLearnedConstraint( return result; } -bool PbConstraints::PropagateNext(Trail *trail) { +bool PbConstraints::PropagateNext(Trail* trail) { SCOPED_TIME_STAT(&stats_); const int source_trail_index = propagation_trail_index_; const Literal true_literal = (*trail)[propagation_trail_index_]; @@ -905,12 +905,12 @@ bool PbConstraints::PropagateNext(Trail *trail) { // synchronized. bool conflict = false; num_threshold_updates_ += to_update_[true_literal.Index()].size(); - for (ConstraintIndexWithCoeff &update : to_update_[true_literal.Index()]) { + for (ConstraintIndexWithCoeff& update : to_update_[true_literal.Index()]) { const Coefficient threshold = thresholds_[update.index] - update.coefficient; thresholds_[update.index] = threshold; if (threshold < 0 && !conflict) { - UpperBoundedLinearConstraint *const cst = + UpperBoundedLinearConstraint* const cst = constraints_[update.index.value()].get(); update.need_untrail_inspection = true; ++num_constraint_lookups_; @@ -931,7 +931,7 @@ bool PbConstraints::PropagateNext(Trail *trail) { return !conflict; } -bool PbConstraints::Propagate(Trail *trail) { +bool PbConstraints::Propagate(Trail* trail) { const int old_index = trail->Index(); while (trail->Index() == old_index && propagation_trail_index_ < old_index) { if (!PropagateNext(trail)) return false; @@ -939,13 +939,13 @@ bool PbConstraints::Propagate(Trail *trail) { return true; } -void PbConstraints::Untrail(const Trail &trail, int trail_index) { +void PbConstraints::Untrail(const Trail& trail, int trail_index) { SCOPED_TIME_STAT(&stats_); to_untrail_.ClearAndResize(ConstraintIndex(constraints_.size())); while (propagation_trail_index_ > trail_index) { --propagation_trail_index_; const Literal literal = trail[propagation_trail_index_]; - for (ConstraintIndexWithCoeff &update : to_update_[literal.Index()]) { + for (ConstraintIndexWithCoeff& update : to_update_[literal.Index()]) { thresholds_[update.index] += update.coefficient; // Only the constraints which were inspected during Propagate() need @@ -962,20 +962,20 @@ void PbConstraints::Untrail(const Trail &trail, int trail_index) { } } -absl::Span PbConstraints::Reason(const Trail &trail, +absl::Span PbConstraints::Reason(const Trail& trail, int trail_index) const { SCOPED_TIME_STAT(&stats_); - const PbConstraintsEnqueueHelper::ReasonInfo &reason_info = + const PbConstraintsEnqueueHelper::ReasonInfo& reason_info = enqueue_helper_.reasons[trail_index]; - std::vector *reason = trail.GetEmptyVectorToStoreReason(trail_index); + std::vector* reason = trail.GetEmptyVectorToStoreReason(trail_index); reason_info.pb_constraint->FillReason(trail, reason_info.source_trail_index, trail[trail_index].Variable(), reason); return *reason; } -UpperBoundedLinearConstraint *PbConstraints::ReasonPbConstraint( +UpperBoundedLinearConstraint* PbConstraints::ReasonPbConstraint( int trail_index) const { - const PbConstraintsEnqueueHelper::ReasonInfo &reason_info = + const PbConstraintsEnqueueHelper::ReasonInfo& reason_info = enqueue_helper_.reasons[trail_index]; return reason_info.pb_constraint; } @@ -1007,7 +1007,7 @@ void PbConstraints::DeleteSomeLearnedConstraintIfNeeded() { // We do that in two pass, first we extract the activities. std::vector activities; for (int i = 0; i < constraints_.size(); ++i) { - const UpperBoundedLinearConstraint &constraint = *(constraints_[i].get()); + const UpperBoundedLinearConstraint& constraint = *(constraints_[i].get()); if (constraint.is_learned() && !constraint.is_used_as_a_reason()) { activities.push_back(constraint.activity()); } @@ -1023,7 +1023,7 @@ void PbConstraints::DeleteSomeLearnedConstraintIfNeeded() { // Unlikely, but may happen, so in this case, we just delete all the // constraint that can possibly be deleted for (int i = 0; i < constraints_.size(); ++i) { - UpperBoundedLinearConstraint &constraint = *(constraints_[i].get()); + UpperBoundedLinearConstraint& constraint = *(constraints_[i].get()); if (constraint.is_learned() && !constraint.is_used_as_a_reason()) { constraint.MarkForDeletion(); } @@ -1044,7 +1044,7 @@ void PbConstraints::DeleteSomeLearnedConstraintIfNeeded() { // exactly equal ot limit_activity, it is why the loop is in the reverse // order. for (int i = constraints_.size() - 1; i >= 0; --i) { - UpperBoundedLinearConstraint &constraint = *(constraints_[i].get()); + UpperBoundedLinearConstraint& constraint = *(constraints_[i].get()); if (constraint.is_learned() && !constraint.is_used_as_a_reason()) { if (constraint.activity() <= limit_activity) { if (constraint.activity() == limit_activity && @@ -1063,7 +1063,7 @@ void PbConstraints::DeleteSomeLearnedConstraintIfNeeded() { ComputeNewLearnedConstraintLimit(); } -void PbConstraints::BumpActivity(UpperBoundedLinearConstraint *constraint) { +void PbConstraints::BumpActivity(UpperBoundedLinearConstraint* constraint) { if (!constraint->is_learned()) return; const double max_activity = parameters_->max_clause_activity_value(); constraint->set_activity(constraint->activity() + @@ -1099,8 +1099,8 @@ void PbConstraints::DeleteConstraintMarkedForDeletion() { ++new_index; } else { // Remove it from possible_duplicates_. - UpperBoundedLinearConstraint *c = constraints_[i.value()].get(); - std::vector &ref = + UpperBoundedLinearConstraint* c = constraints_[i.value()].get(); + std::vector& ref = possible_duplicates_[c->hash()]; for (int i = 0; i < ref.size(); ++i) { if (ref[i] == c) { @@ -1117,7 +1117,7 @@ void PbConstraints::DeleteConstraintMarkedForDeletion() { // This is the slow part, we need to remap all the ConstraintIndex to the // new ones. for (LiteralIndex lit(0); lit < to_update_.size(); ++lit) { - std::vector &updates = to_update_[lit]; + std::vector& updates = to_update_[lit]; int new_index = 0; for (int i = 0; i < updates.size(); ++i) { const ConstraintIndex m = index_mapping[updates[i].index]; diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index bf64ef1d72..1e02ed3197 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -28,8 +28,8 @@ namespace sat { namespace { void AppendLowerBoundReasonIfValid(IntegerVariable var, - const IntegerTrail &i_trail, - std::vector *reason) { + const IntegerTrail& i_trail, + std::vector* reason) { if (var != kNoIntegerVariable) { reason->push_back(i_trail.LowerBoundAsLiteral(var)); } @@ -37,7 +37,7 @@ void AppendLowerBoundReasonIfValid(IntegerVariable var, } // namespace -bool PrecedencesPropagator::Propagate(Trail *trail) { return Propagate(); } +bool PrecedencesPropagator::Propagate(Trail* trail) { return Propagate(); } bool PrecedencesPropagator::Propagate() { while (propagation_trail_index_ < trail_->Index()) { @@ -49,7 +49,7 @@ bool PrecedencesPropagator::Propagate() { for (const ArcIndex arc_index : literal_to_new_impacted_arcs_[literal.Index()]) { if (--arc_counts_[arc_index] == 0) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; impacted_arcs_[arc.tail_var].push_back(arc_index); } } @@ -59,7 +59,7 @@ bool PrecedencesPropagator::Propagate() { for (const ArcIndex arc_index : literal_to_new_impacted_arcs_[literal.Index()]) { if (arc_counts_[arc_index] > 0) continue; - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue; const IntegerValue new_head_lb = integer_trail_->LowerBound(arc.tail_var) + ArcOffset(arc); @@ -94,7 +94,7 @@ bool PrecedencesPropagator::Propagate() { bool PrecedencesPropagator::PropagateOutgoingArcs(IntegerVariable var) { for (const ArcIndex arc_index : impacted_arcs_[var]) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue; const IntegerValue new_head_lb = integer_trail_->LowerBound(arc.tail_var) + ArcOffset(arc); @@ -105,7 +105,7 @@ bool PrecedencesPropagator::PropagateOutgoingArcs(IntegerVariable var) { return true; } -void PrecedencesPropagator::Untrail(const Trail &trail, int trail_index) { +void PrecedencesPropagator::Untrail(const Trail& trail, int trail_index) { if (propagation_trail_index_ > trail_index) { // This means that we already propagated all there is to propagate // at the level trail_index, so we can safely clear modified_vars_ in case @@ -118,7 +118,7 @@ void PrecedencesPropagator::Untrail(const Trail &trail, int trail_index) { for (const ArcIndex arc_index : literal_to_new_impacted_arcs_[literal.Index()]) { if (arc_counts_[arc_index]++ == 0) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; impacted_arcs_[arc.tail_var].pop_back(); } } @@ -130,8 +130,8 @@ void PrecedencesPropagator::Untrail(const Trail &trail, int trail_index) { // by first computing how many times they appear and then apply the sorting // permutation. void PrecedencesPropagator::ComputePrecedences( - const std::vector &vars, - std::vector *output) { + const std::vector& vars, + std::vector* output) { tmp_sorted_vars_.clear(); tmp_precedences_.clear(); for (int index = 0; index < vars.size(); ++index) { @@ -139,7 +139,7 @@ void PrecedencesPropagator::ComputePrecedences( CHECK_NE(kNoIntegerVariable, var); if (var >= impacted_arcs_.size()) continue; for (const ArcIndex arc_index : impacted_arcs_[var]) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue; IntegerValue offset = arc.offset; @@ -194,7 +194,7 @@ void PrecedencesPropagator::ComputePrecedences( } } output->resize(start); - for (const IntegerPrecedences &precedence : tmp_precedences_) { + for (const IntegerPrecedences& precedence : tmp_precedences_) { if (var_to_degree_[precedence.var] < 0) continue; (*output)[var_to_degree_[precedence.var]++] = precedence; } @@ -208,9 +208,9 @@ void PrecedencesPropagator::ComputePrecedences( void PrecedencesPropagator::AddPrecedenceReason( int arc_index, IntegerValue min_offset, - std::vector *literal_reason, - std::vector *integer_reason) const { - const ArcInfo &arc = arcs_[ArcIndex(arc_index)]; + std::vector* literal_reason, + std::vector* integer_reason) const { + const ArcInfo& arc = arcs_[ArcIndex(arc_index)]; for (const Literal l : arc.presence_literals) { literal_reason->push_back(l.Negated()); } @@ -353,7 +353,7 @@ void PrecedencesPropagator::AddArc( const ArcIndex arc_index(arcs_.size()); arcs_.push_back( {a.tail_var, a.head_var, offset, a.offset_var, enforcement_literals}); - auto &presence_literals = arcs_.back().presence_literals; + auto& presence_literals = arcs_.back().presence_literals; if (integer_trail_->IsOptional(a.head_var)) { // TODO(user): More generally, we can remove any literal that is implied // by to_remove. @@ -383,14 +383,15 @@ void PrecedencesPropagator::AddArc( // This is because, for each lower bound changed, we inspect 500 arcs even // though they will never be propagated because the other bound is still at the // horizon. Find an even sparser algorithm? -void PrecedencesPropagator::PropagateOptionalArcs(Trail *trail) { +void PrecedencesPropagator::PropagateOptionalArcs(Trail* trail) { for (const IntegerVariable var : modified_vars_.PositionsSetAtLeastOnce()) { - if (var >= impacted_potential_arcs_.size()) break; + // The variables are not in increasing order, so we need to continue. + if (var >= impacted_potential_arcs_.size()) continue; // Note that we can currently check the same ArcInfo up to 3 times, one for // each of the arc variables: tail, NegationOf(head) and offset_var. for (const OptionalArcIndex arc_index : impacted_potential_arcs_[var]) { - const ArcInfo &arc = potential_arcs_[arc_index]; + const ArcInfo& arc = potential_arcs_[arc_index]; int num_not_true = 0; Literal to_propagate; for (const Literal l : arc.presence_literals) { @@ -425,15 +426,15 @@ void PrecedencesPropagator::PropagateOptionalArcs(Trail *trail) { } } -IntegerValue PrecedencesPropagator::ArcOffset(const ArcInfo &arc) const { +IntegerValue PrecedencesPropagator::ArcOffset(const ArcInfo& arc) const { return arc.offset + (arc.offset_var == kNoIntegerVariable ? IntegerValue(0) : integer_trail_->LowerBound(arc.offset_var)); } -bool PrecedencesPropagator::EnqueueAndCheck(const ArcInfo &arc, +bool PrecedencesPropagator::EnqueueAndCheck(const ArcInfo& arc, IntegerValue new_head_lb, - Trail *trail) { + Trail* trail) { DCHECK_GT(new_head_lb, integer_trail_->LowerBound(arc.head_var)); // Compute the reason for new_head_lb. @@ -487,11 +488,11 @@ bool PrecedencesPropagator::EnqueueAndCheck(const ArcInfo &arc, literal_reason_, integer_reason_); } -bool PrecedencesPropagator::NoPropagationLeft(const Trail &trail) const { +bool PrecedencesPropagator::NoPropagationLeft(const Trail& trail) const { const int num_nodes = impacted_arcs_.size(); for (IntegerVariable var(0); var < num_nodes; ++var) { for (const ArcIndex arc_index : impacted_arcs_[var]) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue; if (integer_trail_->LowerBound(arc.tail_var) + ArcOffset(arc) > integer_trail_->LowerBound(arc.head_var)) { @@ -538,7 +539,7 @@ void PrecedencesPropagator::CleanUpMarkedArcsAndParents() { } bool PrecedencesPropagator::DisassembleSubtree( - int source, int target, std::vector *can_be_skipped) { + int source, int target, std::vector* can_be_skipped) { // Note that we explore a tree, so we can do it in any order, and the one // below seems to be the fastest. tmp_vector_.clear(); @@ -547,7 +548,7 @@ bool PrecedencesPropagator::DisassembleSubtree( const int tail = tmp_vector_.back(); tmp_vector_.pop_back(); for (const ArcIndex arc_index : impacted_arcs_[IntegerVariable(tail)]) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; if (arc.is_marked) { arc.is_marked = false; // mutable. if (arc.head_var.value() == target) return true; @@ -561,9 +562,9 @@ bool PrecedencesPropagator::DisassembleSubtree( } void PrecedencesPropagator::AnalyzePositiveCycle( - ArcIndex first_arc, Trail *trail, std::vector *must_be_all_true, - std::vector *literal_reason, - std::vector *integer_reason) { + ArcIndex first_arc, Trail* trail, std::vector* must_be_all_true, + std::vector* literal_reason, + std::vector* integer_reason) { must_be_all_true->clear(); literal_reason->clear(); integer_reason->clear(); @@ -580,7 +581,7 @@ void PrecedencesPropagator::AnalyzePositiveCycle( const int num_nodes = impacted_arcs_.size(); while (arc_on_cycle.size() <= num_nodes) { arc_on_cycle.push_back(arc_index); - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; if (arc.tail_var == first_arc_head) break; arc_index = bf_parent_arc_of_[arc.tail_var.value()]; CHECK_NE(arc_index, ArcIndex(-1)); @@ -590,7 +591,7 @@ void PrecedencesPropagator::AnalyzePositiveCycle( // Compute the reason for this cycle. IntegerValue sum(0); for (const ArcIndex arc_index : arc_on_cycle) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; sum += ArcOffset(arc); AppendLowerBoundReasonIfValid(arc.offset_var, *integer_trail_, integer_reason); @@ -619,7 +620,7 @@ void PrecedencesPropagator::AnalyzePositiveCycle( // // TODO(user): The current algorithm is quite efficient, but there is probably // still room for improvments. -bool PrecedencesPropagator::BellmanFordTarjan(Trail *trail) { +bool PrecedencesPropagator::BellmanFordTarjan(Trail* trail) { const int num_nodes = impacted_arcs_.size(); // These vector are reset by CleanUpMarkedArcsAndParents() so resize is ok. @@ -651,7 +652,7 @@ bool PrecedencesPropagator::BellmanFordTarjan(Trail *trail) { const IntegerValue tail_lb = integer_trail_->LowerBound(IntegerVariable(node)); for (const ArcIndex arc_index : impacted_arcs_[IntegerVariable(node)]) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; DCHECK_EQ(arc.tail_var, node); const IntegerValue candidate = tail_lb + ArcOffset(arc); if (candidate > integer_trail_->LowerBound(arc.head_var)) { @@ -730,7 +731,7 @@ bool PrecedencesPropagator::BellmanFordTarjan(Trail *trail) { } int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraintsFromClause( - const absl::Span clause, Model *model) { + const absl::Span clause, Model* model) { CHECK_EQ(model->GetOrCreate()->CurrentDecisionLevel(), 0); if (clause.size() < 2) return 0; @@ -739,7 +740,7 @@ int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraintsFromClause( for (const Literal l : clause) { if (l.Index() >= literal_to_new_impacted_arcs_.size()) continue; for (const ArcIndex arc_index : literal_to_new_impacted_arcs_[l.Index()]) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; if (arc.presence_literals.size() != 1) continue; // TODO(user): Support variable offset. @@ -752,13 +753,13 @@ int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraintsFromClause( // Stable sort by head_var so that for a same head_var, the entry are sorted // by Literal as they appear in clause. std::stable_sort(infos.begin(), infos.end(), - [](const ArcInfo &a, const ArcInfo &b) { + [](const ArcInfo& a, const ArcInfo& b) { return a.head_var < b.head_var; }); // We process ArcInfo with the same head_var toghether. int num_added_constraints = 0; - auto *solver = model->GetOrCreate(); + auto* solver = model->GetOrCreate(); for (int i = 0; i < infos.size();) { const int start = i; const IntegerVariable head_var = infos[start].head_var; @@ -812,14 +813,14 @@ int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraintsFromClause( } int PrecedencesPropagator:: - AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection(Model *model) { - auto *time_limit = model->GetOrCreate(); - auto *solver = model->GetOrCreate(); + AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection(Model* model) { + auto* time_limit = model->GetOrCreate(); + auto* solver = model->GetOrCreate(); // Fill the set of incoming conditional arcs for each variables. - gtl::ITIVector > incoming_arcs_; + gtl::ITIVector> incoming_arcs_; for (ArcIndex arc_index(0); arc_index < arcs_.size(); ++arc_index) { - const ArcInfo &arc = arcs_[arc_index]; + const ArcInfo& arc = arcs_[arc_index]; // Only keep arc that have a fixed offset and a single presence_literals. if (arc.offset_var != kNoIntegerVariable) continue; @@ -888,11 +889,11 @@ int PrecedencesPropagator:: return num_added_constraints; } -int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraints(Model *model) { +int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraints(Model* model) { VLOG(1) << "Detecting GreaterThanAtLeastOneOf() constraints..."; - auto *time_limit = model->GetOrCreate(); - auto *solver = model->GetOrCreate(); - auto *clauses = model->GetOrCreate(); + auto* time_limit = model->GetOrCreate(); + auto* solver = model->GetOrCreate(); + auto* clauses = model->GetOrCreate(); int num_added_constraints = 0; // We have two possible approaches. For now, we prefer the first one except if @@ -909,7 +910,7 @@ int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraints(Model *model) { // them. we need to experiments. // - The automatic clause detection might be a better approach and it // could be combined with probing. - for (const SatClause *clause : clauses->AllClausesInCreationOrder()) { + for (const SatClause* clause : clauses->AllClausesInCreationOrder()) { if (time_limit->LimitReached()) return num_added_constraints; if (solver->IsModelUnsat()) return num_added_constraints; num_added_constraints += AddGreaterThanAtLeastOneOfConstraintsFromClause( diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 0aa9a4ce4a..61f289a69d 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -50,7 +50,7 @@ namespace sat { // propagator and the overhead of supporting coefficient should not be too bad. class PrecedencesPropagator : public SatPropagator, PropagatorInterface { public: - explicit PrecedencesPropagator(Model *model) + explicit PrecedencesPropagator(Model* model) : SatPropagator("PrecedencesPropagator"), trail_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), @@ -62,8 +62,8 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { } bool Propagate() final; - bool Propagate(Trail *trail) final; - void Untrail(const Trail &trail, int trail_index) final; + bool Propagate(Trail* trail) final; + void Untrail(const Trail& trail, int trail_index) final; // Propagates all the outgoing arcs of the given variable (and only those). It // is more efficient to do all these propagation in one go by calling @@ -114,11 +114,11 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { int arc_index; // Used by AddPrecedenceReason(). IntegerValue offset; // we have: vars[index] + offset <= var }; - void ComputePrecedences(const std::vector &vars, - std::vector *output); + void ComputePrecedences(const std::vector& vars, + std::vector* output); void AddPrecedenceReason(int arc_index, IntegerValue min_offset, - std::vector *literal_reason, - std::vector *integer_reason) const; + std::vector* literal_reason, + std::vector* integer_reason) const; // Advanced usage. To be called once all the constraints have been added to // the model. This will loop over all "node" in this class, and if one of its @@ -128,7 +128,7 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // // TODO(user): This can be quite slow, add some kind of deterministic limit // so that we can use it all the time. - int AddGreaterThanAtLeastOneOfConstraints(Model *model); + int AddGreaterThanAtLeastOneOfConstraints(Model* model); private: DEFINE_INT_TYPE(ArcIndex, int); @@ -138,14 +138,14 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // least one of" type of constraints. Returns the number of such constraint // added. int AddGreaterThanAtLeastOneOfConstraintsFromClause( - const absl::Span clause, Model *model); + const absl::Span clause, Model* model); // Another approach for AddGreaterThanAtLeastOneOfConstraints(), this one // might be a bit slow as it relies on the propagation engine to detect // clauses between incoming arcs presence literals. // Returns the number of added constraints. int AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection( - Model *model); + Model* model); // Information about an individual arc. struct ArcInfo { @@ -175,13 +175,13 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // Enqueue a new lower bound for the variable arc.head_lb that was deduced // from the current value of arc.tail_lb and the offset of this arc. - bool EnqueueAndCheck(const ArcInfo &arc, IntegerValue new_head_lb, - Trail *trail); - IntegerValue ArcOffset(const ArcInfo &arc) const; + bool EnqueueAndCheck(const ArcInfo& arc, IntegerValue new_head_lb, + Trail* trail); + IntegerValue ArcOffset(const ArcInfo& arc) const; // Inspect all the optional arcs that needs inspection (to stay sparse) and // check if their presence literal can be propagated to false. - void PropagateOptionalArcs(Trail *trail); + void PropagateOptionalArcs(Trail* trail); // The core algorithm implementation is split in these functions. One must // first call InitializeBFQueueWithModifiedNodes() that will push all the @@ -200,24 +200,24 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // algorithms", Boris V. Cherkassky, Andrew V. Goldberg, 1996, // http://people.cs.nctu.edu.tw/~tjshen/doc/ne.pdf void InitializeBFQueueWithModifiedNodes(); - bool BellmanFordTarjan(Trail *trail); + bool BellmanFordTarjan(Trail* trail); bool DisassembleSubtree(int source, int target, - std::vector *can_be_skipped); - void AnalyzePositiveCycle(ArcIndex first_arc, Trail *trail, - std::vector *must_be_all_true, - std::vector *literal_reason, - std::vector *integer_reason); + std::vector* can_be_skipped); + void AnalyzePositiveCycle(ArcIndex first_arc, Trail* trail, + std::vector* must_be_all_true, + std::vector* literal_reason, + std::vector* integer_reason); void CleanUpMarkedArcsAndParents(); // Loops over all the arcs and verify that there is no propagation left. // This is only meant to be used in a DCHECK() and is not optimized. - bool NoPropagationLeft(const Trail &trail) const; + bool NoPropagationLeft(const Trail& trail) const; // External class needed to get the IntegerVariable lower bounds and Enqueue // new ones. - Trail *trail_; - IntegerTrail *integer_trail_; - GenericLiteralWatcher *watcher_; + Trail* trail_; + IntegerTrail* integer_trail_; + GenericLiteralWatcher* watcher_; int watcher_id_; // The key to our incrementality. This will be cleared once the propagation @@ -234,7 +234,7 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // consecutive like in StaticGraph should have a big performance impact. // // TODO(user): We do not need to store ArcInfo.tail_var here. - gtl::ITIVector > + gtl::ITIVector> impacted_arcs_; gtl::ITIVector arcs_; @@ -242,7 +242,7 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // one of the presence literals when the arc cannot be present. An arc needs // to appear only once in potential_arcs_, but it will be referenced by // all its variable in impacted_potential_arcs_. - gtl::ITIVector > + gtl::ITIVector> impacted_potential_arcs_; gtl::ITIVector potential_arcs_; @@ -252,7 +252,7 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { struct SortedVar { IntegerVariable var; IntegerValue lower_bound; - bool operator<(const SortedVar &other) const { + bool operator<(const SortedVar& other) const { return lower_bound < other.lower_bound; } }; @@ -266,7 +266,7 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // // TODO(user): Try a one-watcher approach instead. Note that in most cases // arc should be controlled by 1 or 2 literals, so not sure it is worth it. - gtl::ITIVector > + gtl::ITIVector> literal_to_new_impacted_arcs_; gtl::ITIVector arc_counts_; @@ -294,8 +294,8 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { inline void PrecedencesPropagator::AddPrecedence(IntegerVariable i1, IntegerVariable i2) { - AddArc(i1, i2, /*offset=*/IntegerValue(0), - /*offset_var=*/kNoIntegerVariable, {}); + AddArc(i1, i2, /*offset=*/IntegerValue(0), /*offset_var=*/kNoIntegerVariable, + {}); } inline void PrecedencesPropagator::AddPrecedenceWithOffset( @@ -306,8 +306,8 @@ inline void PrecedencesPropagator::AddPrecedenceWithOffset( inline void PrecedencesPropagator::AddConditionalPrecedence(IntegerVariable i1, IntegerVariable i2, Literal l) { - AddArc(i1, i2, /*offset=*/IntegerValue(0), - /*offset_var=*/kNoIntegerVariable, {l}); + AddArc(i1, i2, /*offset=*/IntegerValue(0), /*offset_var=*/kNoIntegerVariable, + {l}); } inline void PrecedencesPropagator::AddConditionalPrecedenceWithOffset( @@ -331,121 +331,121 @@ inline void PrecedencesPropagator::AddPrecedenceWithAllOptions( // ============================================================================= // a <= b. -inline std::function LowerOrEqual(IntegerVariable a, - IntegerVariable b) { - return [=](Model *model) { +inline std::function LowerOrEqual(IntegerVariable a, + IntegerVariable b) { + return [=](Model* model) { return model->GetOrCreate()->AddPrecedence(a, b); }; } // a + offset <= b. -inline std::function LowerOrEqualWithOffset(IntegerVariable a, - IntegerVariable b, - int64 offset) { - return [=](Model *model) { +inline std::function LowerOrEqualWithOffset(IntegerVariable a, + IntegerVariable b, + int64 offset) { + return [=](Model* model) { return model->GetOrCreate()->AddPrecedenceWithOffset( a, b, IntegerValue(offset)); }; } // a + b <= ub. -inline std::function Sum2LowerOrEqual(IntegerVariable a, - IntegerVariable b, - int64 ub) { +inline std::function Sum2LowerOrEqual(IntegerVariable a, + IntegerVariable b, + int64 ub) { return LowerOrEqualWithOffset(a, NegationOf(b), -ub); } // l => (a + b <= ub). -inline std::function ConditionalSum2LowerOrEqual( +inline std::function ConditionalSum2LowerOrEqual( IntegerVariable a, IntegerVariable b, int64 ub, - const std::vector &enforcement_literals) { - return [=](Model *model) { - PrecedencesPropagator *p = model->GetOrCreate(); + const std::vector& enforcement_literals) { + return [=](Model* model) { + PrecedencesPropagator* p = model->GetOrCreate(); p->AddPrecedenceWithAllOptions(a, NegationOf(b), IntegerValue(-ub), kNoIntegerVariable, enforcement_literals); }; } // a + b + c <= ub. -inline std::function Sum3LowerOrEqual(IntegerVariable a, - IntegerVariable b, - IntegerVariable c, - int64 ub) { - return [=](Model *model) { - PrecedencesPropagator *p = model->GetOrCreate(); +inline std::function Sum3LowerOrEqual(IntegerVariable a, + IntegerVariable b, + IntegerVariable c, + int64 ub) { + return [=](Model* model) { + PrecedencesPropagator* p = model->GetOrCreate(); p->AddPrecedenceWithAllOptions(a, NegationOf(c), IntegerValue(-ub), b, {}); }; } // l => (a + b + c <= ub). -inline std::function ConditionalSum3LowerOrEqual( +inline std::function ConditionalSum3LowerOrEqual( IntegerVariable a, IntegerVariable b, IntegerVariable c, int64 ub, - const std::vector &enforcement_literals) { - return [=](Model *model) { - PrecedencesPropagator *p = model->GetOrCreate(); + const std::vector& enforcement_literals) { + return [=](Model* model) { + PrecedencesPropagator* p = model->GetOrCreate(); p->AddPrecedenceWithAllOptions(a, NegationOf(c), IntegerValue(-ub), b, enforcement_literals); }; } // a >= b. -inline std::function GreaterOrEqual(IntegerVariable a, - IntegerVariable b) { - return [=](Model *model) { +inline std::function GreaterOrEqual(IntegerVariable a, + IntegerVariable b) { + return [=](Model* model) { return model->GetOrCreate()->AddPrecedence(b, a); }; } // a == b. -inline std::function Equality(IntegerVariable a, - IntegerVariable b) { - return [=](Model *model) { +inline std::function Equality(IntegerVariable a, + IntegerVariable b) { + return [=](Model* model) { model->Add(LowerOrEqual(a, b)); model->Add(LowerOrEqual(b, a)); }; } // a + offset == b. -inline std::function EqualityWithOffset(IntegerVariable a, - IntegerVariable b, - int64 offset) { - return [=](Model *model) { +inline std::function EqualityWithOffset(IntegerVariable a, + IntegerVariable b, + int64 offset) { + return [=](Model* model) { model->Add(LowerOrEqualWithOffset(a, b, offset)); model->Add(LowerOrEqualWithOffset(b, a, -offset)); }; } // is_le => (a + offset <= b). -inline std::function ConditionalLowerOrEqualWithOffset( +inline std::function ConditionalLowerOrEqualWithOffset( IntegerVariable a, IntegerVariable b, int64 offset, Literal is_le) { - return [=](Model *model) { - PrecedencesPropagator *p = model->GetOrCreate(); + return [=](Model* model) { + PrecedencesPropagator* p = model->GetOrCreate(); p->AddConditionalPrecedenceWithOffset(a, b, IntegerValue(offset), is_le); }; } // is_le => (a <= b). -inline std::function ConditionalLowerOrEqual(IntegerVariable a, - IntegerVariable b, - Literal is_le) { +inline std::function ConditionalLowerOrEqual(IntegerVariable a, + IntegerVariable b, + Literal is_le) { return ConditionalLowerOrEqualWithOffset(a, b, 0, is_le); } // literals => (a <= b). -inline std::function ConditionalLowerOrEqual( +inline std::function ConditionalLowerOrEqual( IntegerVariable a, IntegerVariable b, absl::Span literals) { - return [=](Model *model) { - PrecedencesPropagator *p = model->GetOrCreate(); + return [=](Model* model) { + PrecedencesPropagator* p = model->GetOrCreate(); p->AddPrecedenceWithAllOptions(a, b, IntegerValue(0), /*offset_var*/ kNoIntegerVariable, literals); }; } // is_le <=> (a + offset <= b). -inline std::function ReifiedLowerOrEqualWithOffset( +inline std::function ReifiedLowerOrEqualWithOffset( IntegerVariable a, IntegerVariable b, int64 offset, Literal is_le) { - return [=](Model *model) { - PrecedencesPropagator *p = model->GetOrCreate(); + return [=](Model* model) { + PrecedencesPropagator* p = model->GetOrCreate(); p->AddConditionalPrecedenceWithOffset(a, b, IntegerValue(offset), is_le); // The negation of (a + offset <= b) is (a + offset > b) which can be @@ -456,10 +456,10 @@ inline std::function ReifiedLowerOrEqualWithOffset( } // is_eq <=> (a == b). -inline std::function ReifiedEquality(IntegerVariable a, - IntegerVariable b, - Literal is_eq) { - return [=](Model *model) { +inline std::function ReifiedEquality(IntegerVariable a, + IntegerVariable b, + Literal is_eq) { + return [=](Model* model) { // We creates two extra Boolean variables in this case. // // TODO(user): Avoid creating them if we already have some literal that @@ -474,11 +474,11 @@ inline std::function ReifiedEquality(IntegerVariable a, } // is_eq <=> (a + offset == b). -inline std::function ReifiedEqualityWithOffset(IntegerVariable a, - IntegerVariable b, - int64 offset, - Literal is_eq) { - return [=](Model *model) { +inline std::function ReifiedEqualityWithOffset(IntegerVariable a, + IntegerVariable b, + int64 offset, + Literal is_eq) { + return [=](Model* model) { // We creates two extra Boolean variables in this case. // // TODO(user): Avoid creating them if we already have some literal that @@ -493,9 +493,9 @@ inline std::function ReifiedEqualityWithOffset(IntegerVariable a, } // a != b. -inline std::function NotEqual(IntegerVariable a, - IntegerVariable b) { - return [=](Model *model) { +inline std::function NotEqual(IntegerVariable a, + IntegerVariable b) { + return [=](Model* model) { // We have two options (is_gt or is_lt) and one must be true. const Literal is_lt = Literal(model->Add(NewBooleanVariable()), true); const Literal is_gt = is_lt.Negated(); diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index 992844d164..24b61a3f9b 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -20,18 +20,18 @@ namespace operations_research { namespace sat { -int SavedLiteral::Get(PresolveContext *context) const { +int SavedLiteral::Get(PresolveContext* context) const { return context->GetLiteralRepresentative(ref_); } -int SavedVariable::Get(PresolveContext *context) const { +int SavedVariable::Get(PresolveContext* context) const { return context->GetVariableRepresentative(ref_); } void PresolveContext::ClearStats() { stats_by_rule_name.clear(); } -int PresolveContext::NewIntVar(const Domain &domain) { - IntegerVariableProto *const var = working_model->add_variables(); +int PresolveContext::NewIntVar(const Domain& domain) { + IntegerVariableProto* const var = working_model->add_variables(); FillDomainInProto(domain, var); InitializeNewDomains(); return working_model->variables_size() - 1; @@ -42,7 +42,7 @@ int PresolveContext::NewBoolVar() { return NewIntVar(Domain(0, 1)); } int PresolveContext::GetOrCreateConstantVar(int64 cst) { if (!gtl::ContainsKey(constant_to_ref_, cst)) { constant_to_ref_[cst] = SavedVariable(working_model->variables_size()); - IntegerVariableProto *const var_proto = working_model->add_variables(); + IntegerVariableProto* const var_proto = working_model->add_variables(); var_proto->add_domain(cst); var_proto->add_domain(cst); InitializeNewDomains(); @@ -52,19 +52,19 @@ int PresolveContext::GetOrCreateConstantVar(int64 cst) { // a => b. void PresolveContext::AddImplication(int a, int b) { - ConstraintProto *const ct = working_model->add_constraints(); + ConstraintProto* const ct = working_model->add_constraints(); ct->add_enforcement_literal(a); ct->mutable_bool_and()->add_literals(b); } // b => x in [lb, ub]. -void PresolveContext::AddImplyInDomain(int b, int x, const Domain &domain) { - ConstraintProto *const imply = working_model->add_constraints(); +void PresolveContext::AddImplyInDomain(int b, int x, const Domain& domain) { + ConstraintProto* const imply = working_model->add_constraints(); // Doing it like this seems to use slightly less memory. // TODO(user): Find the best way to create such small proto. imply->mutable_enforcement_literal()->Resize(1, b); - LinearConstraintProto *mutable_linear = imply->mutable_linear(); + LinearConstraintProto* mutable_linear = imply->mutable_linear(); mutable_linear->mutable_vars()->Resize(1, x); mutable_linear->mutable_coeffs()->Resize(1, 1); FillDomainInProto(domain, mutable_linear); @@ -115,7 +115,7 @@ int64 PresolveContext::MaxOf(int ref) const { : -domains[PositiveRef(ref)].Min(); } -int64 PresolveContext::MinOf(const LinearExpressionProto &expr) const { +int64 PresolveContext::MinOf(const LinearExpressionProto& expr) const { int64 result = expr.offset(); for (int i = 0; i < expr.vars_size(); ++i) { const int64 coeff = expr.coeffs(i); @@ -128,7 +128,7 @@ int64 PresolveContext::MinOf(const LinearExpressionProto &expr) const { return result; } -int64 PresolveContext::MaxOf(const LinearExpressionProto &expr) const { +int64 PresolveContext::MaxOf(const LinearExpressionProto& expr) const { int64 result = expr.offset(); for (int i = 0; i < expr.vars_size(); ++i) { const int64 coeff = expr.coeffs(i); @@ -236,7 +236,7 @@ bool PresolveContext::DomainContains(int ref, int64 value) const { } ABSL_MUST_USE_RESULT bool PresolveContext::IntersectDomainWith( - int ref, const Domain &domain, bool *domain_modified) { + int ref, const Domain& domain, bool* domain_modified) { DCHECK(!DomainIsEmpty(ref)); const int var = PositiveRef(ref); @@ -282,7 +282,7 @@ ABSL_MUST_USE_RESULT bool PresolveContext::SetLiteralToTrue(int lit) { return SetLiteralToFalse(NegatedRef(lit)); } -void PresolveContext::UpdateRuleStats(const std::string &name) { +void PresolveContext::UpdateRuleStats(const std::string& name) { if (enable_stats) { VLOG(1) << num_presolve_operations << " : " << name; stats_by_rule_name[name]++; @@ -290,7 +290,7 @@ void PresolveContext::UpdateRuleStats(const std::string &name) { num_presolve_operations++; } -void PresolveContext::UpdateLinear1Usage(const ConstraintProto &ct, int c) { +void PresolveContext::UpdateLinear1Usage(const ConstraintProto& ct, int c) { const int old_var = constraint_to_linear1_var_[c]; if (old_var >= 0) { var_to_num_linear1_[old_var]--; @@ -304,7 +304,7 @@ void PresolveContext::UpdateLinear1Usage(const ConstraintProto &ct, int c) { } void PresolveContext::AddVariableUsage(int c) { - const ConstraintProto &ct = working_model->constraints(c); + const ConstraintProto& ct = working_model->constraints(c); constraint_to_vars_[c] = UsedVariables(ct); constraint_to_intervals_[c] = UsedIntervals(ct); for (const int v : constraint_to_vars_[c]) { @@ -318,7 +318,7 @@ void PresolveContext::AddVariableUsage(int c) { void PresolveContext::UpdateConstraintVariableUsage(int c) { if (is_unsat) return; DCHECK_EQ(constraint_to_vars_.size(), working_model->constraints_size()); - const ConstraintProto &ct = working_model->constraints(c); + const ConstraintProto& ct = working_model->constraints(c); // We don't optimize the interval usage as this is not super frequent. for (const int i : constraint_to_intervals_[c]) interval_usage_[i]--; @@ -328,7 +328,7 @@ void PresolveContext::UpdateConstraintVariableUsage(int c) { // For the variables, we avoid an erase() followed by an insert() for the // variables that didn't change. tmp_new_usage_ = UsedVariables(ct); - const std::vector &old_usage = constraint_to_vars_[c]; + const std::vector& old_usage = constraint_to_vars_[c]; const int old_size = old_usage.size(); int i = 0; for (const int var : tmp_new_usage_) { @@ -413,7 +413,7 @@ bool PresolveContext::ConstraintVariableUsageIsConsistent() { // become usable as boolean, then we have a bug. Because of that, the code // for GetLiteralRepresentative() is not as simple as it should be. bool PresolveContext::AddRelation(int x, int y, int64 c, int64 o, - AffineRelation *repo) { + AffineRelation* repo) { // When the coefficient is larger than one, then if later one variable becomes // Boolean, it must be the representative. if (std::abs(c) != 1) return repo->TryAdd(x, y, c, o); @@ -491,7 +491,7 @@ bool PresolveContext::PropagateAffineRelation(int ref) { } void PresolveContext::RemoveAllVariablesFromAffineRelationConstraint() { - for (auto &ref_map : var_to_constraints_) { + for (auto& ref_map : var_to_constraints_) { ref_map.erase(kAffineRelationConstraint); } } @@ -687,7 +687,7 @@ bool PresolveContext::StoreAbsRelation(int target_ref, int ref) { return true; } -bool PresolveContext::GetAbsRelation(int target_ref, int *ref) { +bool PresolveContext::GetAbsRelation(int target_ref, int* ref) { auto it = abs_relations_.find(target_ref); if (it == abs_relations_.end()) return false; @@ -796,8 +796,8 @@ bool PresolveContext::RemapEncodingMaps() { // Encoding. { - const absl::flat_hash_map &var_map = encoding_[var]; - for (const auto &entry : var_map) { + const absl::flat_hash_map& var_map = encoding_[var]; + for (const auto& entry : var_map) { const int lit = entry.second.Get(this); if (removed_variables_.contains(PositiveRef(lit))) continue; if ((entry.first - r.offset) % r.coeff != 0) continue; @@ -812,9 +812,9 @@ bool PresolveContext::RemapEncodingMaps() { // Eq half encoding. { - const absl::flat_hash_map > &var_map = + const absl::flat_hash_map>& var_map = eq_half_encoding_[var]; - for (const auto &entry : var_map) { + for (const auto& entry : var_map) { if ((entry.first - r.offset) % r.coeff != 0) continue; const int64 rep_value = (entry.first - r.offset) / r.coeff; for (int literal : entry.second) { @@ -830,9 +830,9 @@ bool PresolveContext::RemapEncodingMaps() { // Neq half encoding. { - const absl::flat_hash_map > &var_map = + const absl::flat_hash_map>& var_map = neq_half_encoding_[var]; - for (const auto &entry : var_map) { + for (const auto& entry : var_map) { if ((entry.first - r.offset) % r.coeff != 0) continue; const int64 rep_value = (entry.first - r.offset) / r.coeff; for (int literal : entry.second) { @@ -863,7 +863,7 @@ void PresolveContext::CanonicalizeDomainOfSizeTwo(int var) { if (is_unsat) return; - absl::flat_hash_map &var_map = encoding_[var]; + absl::flat_hash_map& var_map = encoding_[var]; // Find encoding for min if present. auto min_it = var_map.find(var_min); @@ -946,7 +946,7 @@ void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, bool add_constraints) { CHECK(!VariableWasRemoved(literal)); CHECK(!VariableWasRemoved(var)); - absl::flat_hash_map &var_map = encoding_[var]; + absl::flat_hash_map& var_map = encoding_[var]; // Ticky and rare: I have only observed this on the LNS of // radiation_m18_12_05_sat.fzn. The value was encoded, but maybe we never @@ -965,10 +965,11 @@ void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, // If an encoding already exist, make the two Boolean equals. if (!insert.second) { - UpdateRuleStats("variables: merge equivalent var value encoding literals"); const int previous_literal = insert.first->second.Get(this); CHECK(!VariableWasRemoved(previous_literal)); if (literal != previous_literal) { + UpdateRuleStats( + "variables: merge equivalent var value encoding literals"); StoreBooleanEqualityRelation(literal, previous_literal); } return; @@ -979,8 +980,8 @@ void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, } else { VLOG(2) << "Insert lit(" << literal << ") <=> var(" << var << ") == " << value; - // eq_half_encoding_[var][value].insert(literal); - // neq_half_encoding_[var][value].insert(NegatedRef(literal)); + eq_half_encoding_[var][value].insert(literal); + neq_half_encoding_[var][value].insert(NegatedRef(literal)); if (add_constraints) { UpdateRuleStats("variables: add encoding constraint"); AddImplyInDomain(literal, var, Domain(value)); @@ -996,7 +997,7 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, // Creates the linking sets on demand. // Insert the enforcement literal in the half encoding map. - auto &direct_set = + auto& direct_set = imply_eq ? eq_half_encoding_[var][value] : neq_half_encoding_[var][value]; if (!direct_set.insert(literal).second) return false; // Already there. @@ -1006,7 +1007,7 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, // Note(user): We don't expect a lot of literals in these sets, so doing // a scan should be okay. - auto &other_set = + auto& other_set = imply_eq ? neq_half_encoding_[var][value] : eq_half_encoding_[var][value]; for (const int other : other_set) { if (GetLiteralRepresentative(other) != NegatedRef(literal)) continue; @@ -1021,7 +1022,7 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, return true; } -bool PresolveContext::CanonicalizeEncoding(int *ref, int64 *value) { +bool PresolveContext::CanonicalizeEncoding(int* ref, int64* value) { const AffineRelation::Relation r = GetAffineRelation(*ref); if ((*value - r.offset) % r.coeff != 0) return false; *ref = r.representative; @@ -1034,8 +1035,7 @@ void PresolveContext::InsertVarValueEncoding(int literal, int ref, if (!RemapEncodingMaps()) return; if (!CanonicalizeEncoding(&ref, &value)) return; literal = GetLiteralRepresentative(literal); - InsertVarValueEncodingInternal(literal, ref, value, - /*add_constraints=*/true); + InsertVarValueEncodingInternal(literal, ref, value, /*add_constraints=*/true); } bool PresolveContext::StoreLiteralImpliesVarEqValue(int literal, int var, @@ -1054,10 +1054,10 @@ bool PresolveContext::StoreLiteralImpliesVarNEqValue(int literal, int var, return InsertHalfVarValueEncoding(literal, var, value, /*imply_eq=*/false); } -bool PresolveContext::HasVarValueEncoding(int ref, int64 value, int *literal) { +bool PresolveContext::HasVarValueEncoding(int ref, int64 value, int* literal) { if (!RemapEncodingMaps()) return false; if (!CanonicalizeEncoding(&ref, &value)) return false; - const absl::flat_hash_map &var_map = encoding_[ref]; + const absl::flat_hash_map& var_map = encoding_[ref]; const auto it = var_map.find(value); if (it != var_map.end()) { if (literal != nullptr) { @@ -1081,7 +1081,7 @@ int PresolveContext::GetOrCreateVarValueEncoding(int ref, int64 value) { } // Returns the associated literal if already present. - absl::flat_hash_map &var_map = encoding_[var]; + absl::flat_hash_map& var_map = encoding_[var]; auto it = var_map.find(value); if (it != var_map.end()) { return it->second.Get(this); @@ -1128,7 +1128,7 @@ int PresolveContext::GetOrCreateVarValueEncoding(int ref, int64 value) { } void PresolveContext::ReadObjectiveFromProto() { - const CpObjectiveProto &obj = working_model->objective(); + const CpObjectiveProto& obj = working_model->objective(); objective_offset = obj.offset(); objective_scaling_factor = obj.scaling_factor(); @@ -1170,14 +1170,14 @@ bool PresolveContext::CanonicalizeObjective() { // one the map while modifying it, it is safer to do a copy rather than to // try to handle that in one pass. tmp_entries.clear(); - for (const auto &entry : objective_map) { + for (const auto& entry : objective_map) { tmp_entries.push_back(entry); } // TODO(user): This is a bit duplicated with the presolve linear code. // We also do not propagate back any domain restriction from the objective to // the variables if any. - for (const auto &entry : tmp_entries) { + for (const auto& entry : tmp_entries) { const int var = entry.first; const auto it = objective_map.find(var); if (it == objective_map.end()) continue; @@ -1238,11 +1238,11 @@ bool PresolveContext::CanonicalizeObjective() { // We need to sort the entries to be deterministic. tmp_entries.clear(); - for (const auto &entry : objective_map) { + for (const auto& entry : objective_map) { tmp_entries.push_back(entry); } std::sort(tmp_entries.begin(), tmp_entries.end()); - for (const auto &entry : tmp_entries) { + for (const auto& entry : tmp_entries) { const int var = entry.first; const int64 coeff = entry.second; gcd = MathUtil::GCD64(gcd, std::abs(coeff)); @@ -1263,7 +1263,7 @@ bool PresolveContext::CanonicalizeObjective() { // Maybe divide by GCD. if (gcd > 1) { - for (auto &entry : objective_map) { + for (auto& entry : objective_map) { entry.second /= gcd; } objective_domain = objective_domain.InverseMultiplicationBy(gcd); @@ -1285,7 +1285,7 @@ bool PresolveContext::CanonicalizeObjective() { void PresolveContext::SubstituteVariableInObjective( int var_in_equality, int64 coeff_in_equality, - const ConstraintProto &equality, std::vector *new_vars_in_objective) { + const ConstraintProto& equality, std::vector* new_vars_in_objective) { CHECK(equality.enforcement_literal().empty()); CHECK(RefIsPositive(var_in_equality)); @@ -1308,7 +1308,7 @@ void PresolveContext::SubstituteVariableInObjective( } if (var == var_in_equality) continue; - int64 &map_ref = objective_map[var]; + int64& map_ref = objective_map[var]; if (map_ref == 0 && new_vars_in_objective != nullptr) { new_vars_in_objective->push_back(var); } @@ -1340,27 +1340,27 @@ void PresolveContext::SubstituteVariableInObjective( // (otherwise it would have been removed), the objective domain should be now // constraining. objective_domain_is_constraining = true; -} -void PresolveContext::WriteObjectiveToProto() { if (objective_domain.IsEmpty()) { return (void)NotifyThatModelIsUnsat(); } +} +void PresolveContext::WriteObjectiveToProto() const { // We need to sort the entries to be deterministic. - std::vector > entries; - for (const auto &entry : objective_map) { + std::vector> entries; + for (const auto& entry : objective_map) { entries.push_back(entry); } std::sort(entries.begin(), entries.end()); - CpObjectiveProto *mutable_obj = working_model->mutable_objective(); + CpObjectiveProto* mutable_obj = working_model->mutable_objective(); mutable_obj->set_offset(objective_offset); mutable_obj->set_scaling_factor(objective_scaling_factor); FillDomainInProto(objective_domain, mutable_obj); mutable_obj->clear_vars(); mutable_obj->clear_coeffs(); - for (const auto &entry : entries) { + for (const auto& entry : entries) { mutable_obj->add_vars(entry.first); mutable_obj->add_coeffs(entry.second); } diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index 39b04d7eb6..f087c95346 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -37,7 +37,7 @@ constexpr int kAssumptionsConstraint = -3; struct PresolveOptions { bool log_info = true; SatParameters parameters; - TimeLimit *time_limit = nullptr; + TimeLimit* time_limit = nullptr; }; class PresolveContext; @@ -50,7 +50,7 @@ class SavedLiteral { public: SavedLiteral() {} explicit SavedLiteral(int ref) : ref_(ref) {} - int Get(PresolveContext *context) const; + int Get(PresolveContext* context) const; private: int ref_ = 0; @@ -61,7 +61,7 @@ class SavedVariable { public: SavedVariable() {} explicit SavedVariable(int ref) : ref_(ref) {} - int Get(PresolveContext *context) const; + int Get(PresolveContext* context) const; private: int ref_ = 0; @@ -71,11 +71,11 @@ class SavedVariable { // in-memory domain of each variables and the constraint variable graph. class PresolveContext { public: - explicit PresolveContext(CpModelProto *model, CpModelProto *mapping) + explicit PresolveContext(CpModelProto* model, CpModelProto* mapping) : working_model(model), mapping_model(mapping) {} // Helpers to adds new variables to the presolved model. - int NewIntVar(const Domain &domain); + int NewIntVar(const Domain& domain); int NewBoolVar(); int GetOrCreateConstantVar(int64 cst); @@ -83,7 +83,7 @@ class PresolveContext { void AddImplication(int a, int b); // b => x in [lb, ub]. - void AddImplyInDomain(int b, int x, const Domain &domain); + void AddImplyInDomain(int b, int x, const Domain& domain); // Helpers to query the current domain of a variable. bool DomainIsEmpty(int ref) const; @@ -99,11 +99,11 @@ class PresolveContext { // Helpers to query the current domain of a linear expression. // This doesn't check for integer overflow, but our linear expression // should be such that this cannot happen (tested at validation). - int64 MinOf(const LinearExpressionProto &expr) const; - int64 MaxOf(const LinearExpressionProto &expr) const; + int64 MinOf(const LinearExpressionProto& expr) const; + int64 MaxOf(const LinearExpressionProto& expr) const; // This function takes a positive variable reference. - bool DomainOfVarIsIncludedIn(int var, const Domain &domain) { + bool DomainOfVarIsIncludedIn(int var, const Domain& domain) { return domains[var].IsIncludedIn(domain); } @@ -131,7 +131,7 @@ class PresolveContext { // Returns false if the new domain is empty. Sets 'domain_modified' (if // provided) to true iff the domain is modified otherwise does not change it. ABSL_MUST_USE_RESULT bool IntersectDomainWith( - int ref, const Domain &domain, bool *domain_modified = nullptr); + int ref, const Domain& domain, bool* domain_modified = nullptr); // Returns false if the 'lit' doesn't have the desired value in the domain. ABSL_MUST_USE_RESULT bool SetLiteralToFalse(int lit); @@ -140,7 +140,7 @@ class PresolveContext { // This function always return false. It is just a way to make a little bit // more sure that we abort right away when infeasibility is detected. ABSL_MUST_USE_RESULT bool NotifyThatModelIsUnsat( - const std::string &message = "") { + const std::string& message = "") { // TODO(user): Report any explanation for the client in a nicer way? VLOG(1) << "INFEASIBLE: " << message; DCHECK(!is_unsat); @@ -151,7 +151,7 @@ class PresolveContext { // Stores a description of a rule that was just applied to have a summary of // what the presolve did at the end. - void UpdateRuleStats(const std::string &name); + void UpdateRuleStats(const std::string& name); // Updates the constraints <-> variables graph. This needs to be called each // time a constraint is modified. @@ -198,7 +198,7 @@ class PresolveContext { // Stores/Get the relation target_ref = abs(ref); The first function returns // false if it already exist and the second false if it is not present. bool StoreAbsRelation(int target_ref, int ref); - bool GetAbsRelation(int target_ref, int *ref); + bool GetAbsRelation(int target_ref, int* ref); // Returns the representative of a literal. int GetLiteralRepresentative(int ref) const; @@ -250,7 +250,7 @@ class PresolveContext { // Returns true if a literal attached to ref == var exists. // It assigns the corresponding to `literal` if non null. - bool HasVarValueEncoding(int ref, int64 value, int *literal = nullptr); + bool HasVarValueEncoding(int ref, int64 value, int* literal = nullptr); // Stores the fact that literal implies var == value. // It returns true if that information is new. @@ -275,7 +275,7 @@ class PresolveContext { // anything with that variable since it appear in at least two constraints. void ReadObjectiveFromProto(); ABSL_MUST_USE_RESULT bool CanonicalizeObjective(); - void WriteObjectiveToProto(); + void WriteObjectiveToProto() const; // Given a variable defined by the given inequality that also appear in the // objective, remove it from the objective by transferring its cost to other @@ -286,12 +286,12 @@ class PresolveContext { // substitution. void SubstituteVariableInObjective( int var_in_equality, int64 coeff_in_equality, - const ConstraintProto &equality, - std::vector *new_vars_in_objective = nullptr); + const ConstraintProto& equality, + std::vector* new_vars_in_objective = nullptr); // Objective getters. - const Domain &ObjectiveDomain() const { return objective_domain; } - const absl::flat_hash_map &ObjectiveMap() const { + const Domain& ObjectiveDomain() const { return objective_domain; } + const absl::flat_hash_map& ObjectiveMap() const { return objective_map; } bool ObjectiveDomainIsConstraining() const { @@ -309,11 +309,11 @@ class PresolveContext { // Important: To properly handle the objective, var_to_constraints[objective] // contains -1 so that if the objective appear in only one constraint, the // constraint cannot be simplified. - const std::vector &ConstraintToVars(int c) const { + const std::vector& ConstraintToVars(int c) const { DCHECK(ConstraintVariableGraphIsUpToDate()); return constraint_to_vars_[c]; } - const absl::flat_hash_set &VarToConstraints(int var) const { + const absl::flat_hash_set& VarToConstraints(int var) const { DCHECK(ConstraintVariableGraphIsUpToDate()); return var_to_constraints_[var]; } @@ -338,11 +338,11 @@ class PresolveContext { // TODO(user): Keeping these extra vector of hash_set seems inefficient. Come // up with a better way to detect if a variable is only constrainted in one // direction. - std::vector > var_to_ub_only_constraints; - std::vector > var_to_lb_only_constraints; + std::vector> var_to_ub_only_constraints; + std::vector> var_to_lb_only_constraints; - CpModelProto *working_model = nullptr; - CpModelProto *mapping_model = nullptr; + CpModelProto* working_model = nullptr; + CpModelProto* mapping_model = nullptr; // Indicate if we are allowed to remove irrelevant feasible solution from the // set of feasible solution. For example, if a variable is unused, can we fix @@ -379,10 +379,10 @@ class PresolveContext { private: // Helper to add an affine relation x = c.y + o to the given repository. - bool AddRelation(int x, int y, int64 c, int64 o, AffineRelation *repo); + bool AddRelation(int x, int y, int64 c, int64 o, AffineRelation* repo); void AddVariableUsage(int c); - void UpdateLinear1Usage(const ConstraintProto &ct, int c); + void UpdateLinear1Usage(const ConstraintProto& ct, int c); // Returns true iff the variable is not the representative of an equivalence // class of size at least 2. @@ -396,7 +396,7 @@ class PresolveContext { // // Returns false if ref cannot take the given value (it might not have been // propagated yed). - bool CanonicalizeEncoding(int *ref, int64 *value); + bool CanonicalizeEncoding(int* ref, int64* value); // Inserts an half reified var value encoding (literal => var ==/!= value). // It returns true if the new state is different from the old state. @@ -424,22 +424,22 @@ class PresolveContext { // on large problems (also because the objective is often dense). At the end // we re-convert it to its proto form. absl::flat_hash_map objective_map; - std::vector > tmp_entries; + std::vector> tmp_entries; bool objective_domain_is_constraining = false; Domain objective_domain; double objective_offset; double objective_scaling_factor; // Constraints <-> Variables graph. - std::vector > constraint_to_vars_; - std::vector > var_to_constraints_; + std::vector> constraint_to_vars_; + std::vector> var_to_constraints_; // Number of constraints of the form [lit =>] var in domain. std::vector constraint_to_linear1_var_; std::vector var_to_num_linear1_; // We maintain how many time each interval is used. - std::vector > constraint_to_intervals_; + std::vector> constraint_to_intervals_; std::vector interval_usage_; // Contains abs relation (key = abs(saved_variable)). @@ -457,17 +457,15 @@ class PresolveContext { // Contains variables with some encoded value: encoding_[i][v] points // to the literal attached to the value v of the variable i. - absl::flat_hash_map > encoding_; + absl::flat_hash_map> encoding_; // Contains the currently collected half value encodings: // i.e.: literal => var ==/!= value // The state is accumulated (adding x => var == value then !x => var != value) // will deduce that x equivalent to var == value. - absl::flat_hash_map > > + absl::flat_hash_map>> eq_half_encoding_; - absl::flat_hash_map > > + absl::flat_hash_map>> neq_half_encoding_; // This regroups all the affine relations between variables. Note that the diff --git a/ortools/sat/presolve_util.cc b/ortools/sat/presolve_util.cc index 24eabdc7ee..3be0bd6c69 100644 --- a/ortools/sat/presolve_util.cc +++ b/ortools/sat/presolve_util.cc @@ -36,7 +36,7 @@ void DomainDeductions::AddDeduction(int literal_ref, int var, Domain domain) { enforcement_to_vars_[index].push_back(var); } else { // Existing element. - const Domain &old_domain = insert.first->second; + const Domain& old_domain = insert.first->second; if (!old_domain.IsIncludedIn(domain)) { insert.first->second = domain.IntersectionWith(old_domain); something_changed_.Set(index); @@ -44,9 +44,9 @@ void DomainDeductions::AddDeduction(int literal_ref, int var, Domain domain) { } } -std::vector > DomainDeductions::ProcessClause( +std::vector> DomainDeductions::ProcessClause( absl::Span clause) { - std::vector > result; + std::vector> result; // We only need to process this clause if something changed since last time. bool abort = true; @@ -101,8 +101,8 @@ namespace { // 'proto' and copies other terms in 'terms'. template int64 GetVarCoeffAndCopyOtherTerms(const int var, - const ProtoWithVarsAndCoeffs &proto, - std::vector > *terms) { + const ProtoWithVarsAndCoeffs& proto, + std::vector>* terms) { bool found = false; int64 var_coeff = 0; const int size = proto.vars().size(); @@ -130,8 +130,8 @@ int64 GetVarCoeffAndCopyOtherTerms(const int var, // Helper method for variable substituion. Sorts and merges the terms in 'terms' // and adds them to 'proto'. template -void SortAndMergeTerms(std::vector > *terms, - ProtoWithVarsAndCoeffs *proto) { +void SortAndMergeTerms(std::vector>* terms, + ProtoWithVarsAndCoeffs* proto) { proto->clear_vars(); proto->clear_coeffs(); std::sort(terms->begin(), terms->end()); @@ -159,8 +159,8 @@ void SortAndMergeTerms(std::vector > *terms, // Adds all the terms from the var definition constraint with given var // coefficient. void AddTermsFromVarDefinition(const int var, const int64 var_coeff, - const ConstraintProto &definition, - std::vector > *terms) { + const ConstraintProto& definition, + std::vector>* terms) { const int definition_size = definition.linear().vars().size(); for (int i = 0; i < definition_size; ++i) { int ref = definition.linear().vars(i); @@ -180,13 +180,13 @@ void AddTermsFromVarDefinition(const int var, const int64 var_coeff, } // namespace void SubstituteVariable(int var, int64 var_coeff_in_definition, - const ConstraintProto &definition, - ConstraintProto *ct) { + const ConstraintProto& definition, + ConstraintProto* ct) { CHECK(RefIsPositive(var)); CHECK_EQ(std::abs(var_coeff_in_definition), 1); // Copy all the terms (except the one refering to var). - std::vector > terms; + std::vector> terms; int64 var_coeff = GetVarCoeffAndCopyOtherTerms(var, ct->linear(), &terms); if (var_coeff_in_definition < 0) var_coeff *= -1; diff --git a/ortools/sat/probing.cc b/ortools/sat/probing.cc index 860ed93d34..4d6d07cd4c 100644 --- a/ortools/sat/probing.cc +++ b/ortools/sat/probing.cc @@ -27,11 +27,11 @@ namespace operations_research { namespace sat { -bool ProbeBooleanVariables(const double deterministic_time_limit, Model *model, +bool ProbeBooleanVariables(const double deterministic_time_limit, Model* model, bool log_info) { - auto *sat_solver = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); const int num_variables = sat_solver->NumVariables(); - auto *implication_graph = model->GetOrCreate(); + auto* implication_graph = model->GetOrCreate(); std::vector bool_vars; for (BooleanVariable b(0); b < num_variables; ++b) { const Literal literal(b, true); @@ -46,18 +46,18 @@ bool ProbeBooleanVariables(const double deterministic_time_limit, Model *model, bool ProbeBooleanVariables(const double deterministic_time_limit, absl::Span bool_vars, - Model *model, bool log_info) { + Model* model, bool log_info) { log_info |= VLOG_IS_ON(1); WallTimer wall_timer; wall_timer.Start(); // Reset the solver in case it was already used. - auto *sat_solver = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); sat_solver->SetAssumptionLevel(0); if (!sat_solver->RestoreSolverToAssumptionLevel()) return false; - auto *time_limit = model->GetOrCreate(); - const auto &assignment = sat_solver->LiteralTrail().Assignment(); + auto* time_limit = model->GetOrCreate(); + const auto& assignment = sat_solver->LiteralTrail().Assignment(); const int initial_num_fixed = sat_solver->LiteralTrail().Index(); const double initial_deterministic_time = @@ -66,15 +66,15 @@ bool ProbeBooleanVariables(const double deterministic_time_limit, // For the new direct implication detected. int64 num_new_binary = 0; - std::vector > new_binary_clauses; - auto *implication_graph = model->GetOrCreate(); + std::vector> new_binary_clauses; + auto* implication_graph = model->GetOrCreate(); const int id = implication_graph->PropagatorId(); // This is used to tighten the integer variable bounds. int num_new_holes = 0; int num_new_integer_bounds = 0; - auto *integer_trail = model->Mutable(); - ImpliedBounds *implied_bounds = nullptr; + auto* integer_trail = model->Mutable(); + ImpliedBounds* implied_bounds = nullptr; if (integer_trail != nullptr) { implied_bounds = model->GetOrCreate(); } @@ -89,7 +89,7 @@ bool ProbeBooleanVariables(const double deterministic_time_limit, bool limit_reached = false; int num_probed = 0; - const auto &trail = *(model->Get()); + const auto& trail = *(model->Get()); for (const BooleanVariable b : bool_vars) { const Literal literal(b, true); if (implication_graph->RepresentativeOf(literal) != literal) { @@ -158,20 +158,17 @@ bool ProbeBooleanVariables(const double deterministic_time_limit, if (!sat_solver->FinishPropagation()) return false; } - // We have at most two lower bounds for each variables (one for b==0 and - // one + // We have at most two lower bounds for each variables (one for b==0 and one // for b==1), so the min of the two is a valid level zero bound! More // generally, the domain of a variable can be intersected with the union // of the two propagated domains. This also allow to detect "holes". // // TODO(user): More generally, for any clauses (b or not(b) is one), we - // could probe all the literal inside, and for any integer variable, we - // can + // could probe all the literal inside, and for any integer variable, we can // take the union of the propagated domain as a new domain. // // TODO(user): fix binary variable in the same way? It might not be as - // useful since probing on such variable will also fix it. But then we - // might + // useful since probing on such variable will also fix it. But then we might // abort probing early, so it might still be good. std::sort(new_integer_bounds.begin(), new_integer_bounds.end(), [](IntegerLiteral a, IntegerLiteral b) { return a.var < b.var; }); @@ -258,18 +255,18 @@ bool ProbeBooleanVariables(const double deterministic_time_limit, return true; } -bool LookForTrivialSatSolution(double deterministic_time_limit, Model *model, +bool LookForTrivialSatSolution(double deterministic_time_limit, Model* model, bool log_info) { log_info |= VLOG_IS_ON(1); WallTimer wall_timer; wall_timer.Start(); // Reset the solver in case it was already used. - auto *sat_solver = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); sat_solver->SetAssumptionLevel(0); if (!sat_solver->RestoreSolverToAssumptionLevel()) return false; - auto *time_limit = model->GetOrCreate(); + auto* time_limit = model->GetOrCreate(); const int initial_num_fixed = sat_solver->LiteralTrail().Index(); // Note that this code do not care about the non-Boolean part and just try to @@ -284,7 +281,7 @@ bool LookForTrivialSatSolution(double deterministic_time_limit, Model *model, const int num_times = 1000; bool limit_reached = false; - auto *random = model->GetOrCreate(); + auto* random = model->GetOrCreate(); for (int i = 0; i < num_times; ++i) { if (time_limit->LimitReached() || elapsed_dtime > deterministic_time_limit) { @@ -338,24 +335,24 @@ bool LookForTrivialSatSolution(double deterministic_time_limit, Model *model, return sat_solver->FinishPropagation(); } -bool FailedLiteralProbingRound(ProbingOptions options, Model *model) { +bool FailedLiteralProbingRound(ProbingOptions options, Model* model) { WallTimer wall_timer; wall_timer.Start(); options.log_info |= VLOG_IS_ON(1); // Reset the solver in case it was already used. - auto *sat_solver = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); sat_solver->SetAssumptionLevel(0); if (!sat_solver->RestoreSolverToAssumptionLevel()) return false; // When called from Inprocessing, the implication graph should already be a // DAG, so these two calls should return right away. But we do need them to // get the topological order if this is used in isolation. - auto *implication_graph = model->GetOrCreate(); + auto* implication_graph = model->GetOrCreate(); if (!implication_graph->DetectEquivalences()) return false; if (!sat_solver->FinishPropagation()) return false; - auto *time_limit = model->GetOrCreate(); + auto* time_limit = model->GetOrCreate(); const int initial_num_fixed = sat_solver->LiteralTrail().Index(); const double initial_deterministic_time = time_limit->GetElapsedDeterministicTime(); @@ -370,9 +367,9 @@ bool FailedLiteralProbingRound(ProbingOptions options, Model *model) { int64 num_new_binary = 0; int64 num_subsumed = 0; - const auto &trail = *(model->Get()); - const auto &assignment = trail.Assignment(); - auto *clause_manager = model->GetOrCreate(); + const auto& trail = *(model->Get()); + const auto& assignment = trail.Assignment(); + auto* clause_manager = model->GetOrCreate(); const int id = implication_graph->PropagatorId(); const int clause_id = clause_manager->PropagatorId(); @@ -381,7 +378,7 @@ bool FailedLiteralProbingRound(ProbingOptions options, Model *model) { LiteralIndex literal_index; // kNoLiteralIndex if we need to backtrack. int rank; // Cached position_in_order, we prefer lower positions. - bool operator<(const SavedNextLiteral &o) const { return rank < o.rank; } + bool operator<(const SavedNextLiteral& o) const { return rank < o.rank; } }; std::vector queue; gtl::ITIVector position_in_order; @@ -433,7 +430,7 @@ bool FailedLiteralProbingRound(ProbingOptions options, Model *model) { const Literal prev_decision = sat_solver->Decisions()[sat_solver->CurrentDecisionLevel() - 1] .literal; - const auto &list = + const auto& list = implication_graph->Implications(prev_decision.Negated()); const int saved_queue_size = queue.size(); for (const Literal l : list) { @@ -499,7 +496,7 @@ bool FailedLiteralProbingRound(ProbingOptions options, Model *model) { } else if (next_decision == kNoLiteralIndex) { const int level = sat_solver->CurrentDecisionLevel(); const Literal prev_decision = sat_solver->Decisions()[level - 1].literal; - const auto &list = + const auto& list = implication_graph->Implications(prev_decision.Negated()); // Probe a literal that implies previous decision. @@ -662,7 +659,7 @@ bool FailedLiteralProbingRound(ProbingOptions options, Model *model) { // true. So after many propagations, we hope to have such configuration // which is quite cheap to test here. if (options.subsume_with_binary_clause) { - for (const auto &w : + for (const auto& w : clause_manager->WatcherListOnFalse(last_decision.Negated())) { if (assignment.LiteralIsTrue(w.blocking_literal)) { if (w.clause->empty()) continue; @@ -681,7 +678,7 @@ bool FailedLiteralProbingRound(ProbingOptions options, Model *model) { implication_graph->AddBinaryClause(last_decision.Negated(), w.blocking_literal); - const auto &info = trail.Info(w.blocking_literal.Variable()); + const auto& info = trail.Info(w.blocking_literal.Variable()); if (info.level > 0) { const Literal d = sat_solver->Decisions()[info.level - 1].literal; if (d != w.blocking_literal) { diff --git a/ortools/sat/pseudo_costs.cc b/ortools/sat/pseudo_costs.cc index 69ac9f54fc..904ed2fca7 100644 --- a/ortools/sat/pseudo_costs.cc +++ b/ortools/sat/pseudo_costs.cc @@ -24,7 +24,7 @@ namespace operations_research { namespace sat { -PseudoCosts::PseudoCosts(Model *model) +PseudoCosts::PseudoCosts(Model* model) : integer_trail_(*model->GetOrCreate()), parameters_(*model->GetOrCreate()) { const int num_vars = integer_trail_.NumIntegerVariables().value(); @@ -51,12 +51,12 @@ void PseudoCosts::UpdateCostForVar(IntegerVariable var, double new_cost) { } void PseudoCosts::UpdateCost( - const std::vector &bound_changes, + const std::vector& bound_changes, const IntegerValue obj_bound_improvement) { DCHECK_GE(obj_bound_improvement, 0); if (obj_bound_improvement == IntegerValue(0)) return; - for (const VariableBoundChange &decision : bound_changes) { + for (const VariableBoundChange& decision : bound_changes) { if (integer_trail_.IsCurrentlyIgnored(decision.var)) continue; if (decision.lower_bound_change > IntegerValue(0)) { @@ -118,11 +118,11 @@ IntegerVariable PseudoCosts::GetBestDecisionVar() { } std::vector GetBoundChanges( - LiteralIndex decision, Model *model) { + LiteralIndex decision, Model* model) { std::vector bound_changes; if (decision == kNoLiteralIndex) return bound_changes; - auto *encoder = model->GetOrCreate(); - auto *integer_trail = model->GetOrCreate(); + auto* encoder = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); // NOTE: We ignore negation of equality decisions. for (const IntegerLiteral l : encoder->GetAllIntegerLiterals(Literal(decision))) { diff --git a/ortools/sat/restart.cc b/ortools/sat/restart.cc index 6239c09822..a530bc39fc 100644 --- a/ortools/sat/restart.cc +++ b/ortools/sat/restart.cc @@ -45,7 +45,7 @@ void RestartPolicy::Reset() { if (strategies_.empty()) { const std::vector string_values = absl::StrSplit( parameters_.default_restart_algorithms(), ',', absl::SkipEmpty()); - for (const std::string &string_value : string_values) { + for (const std::string& string_value : string_values) { SatParameters::RestartAlgorithm tmp; #if defined(__PORTABLE_PLATFORM__) if (string_value == "NO_RESTART") { @@ -63,7 +63,7 @@ void RestartPolicy::Reset() { << string_value << "'."; continue; } -#else // __PORTABLE_PLATFORM__ +#else // __PORTABLE_PLATFORM__ if (!SatParameters::RestartAlgorithm_Parse(string_value, &tmp)) { LOG(WARNING) << "Couldn't parse the RestartAlgorithm name: '" << string_value << "'."; diff --git a/ortools/sat/rins.cc b/ortools/sat/rins.cc index a539005155..a1f854f420 100644 --- a/ortools/sat/rins.cc +++ b/ortools/sat/rins.cc @@ -22,20 +22,20 @@ namespace operations_research { namespace sat { -void RecordLPRelaxationValues(Model *model) { - auto *lp_solutions = model->Mutable(); +void RecordLPRelaxationValues(Model* model) { + auto* lp_solutions = model->Mutable(); if (lp_solutions == nullptr) return; - const LPVariables &lp_vars = *model->GetOrCreate(); + const LPVariables& lp_vars = *model->GetOrCreate(); std::vector relaxation_values( lp_vars.model_vars_size, std::numeric_limits::infinity()); - auto *integer_trail = model->GetOrCreate(); - for (const LPVariable &lp_var : lp_vars.vars) { + auto* integer_trail = model->GetOrCreate(); + for (const LPVariable& lp_var : lp_vars.vars) { const IntegerVariable positive_var = lp_var.positive_var; if (integer_trail->IsCurrentlyIgnored(positive_var)) continue; - LinearProgrammingConstraint *lp = lp_var.lp; + LinearProgrammingConstraint* lp = lp_var.lp; if (lp == nullptr || !lp->HasSolution()) continue; relaxation_values[lp_var.model_var] = lp->GetSolutionValue(positive_var); @@ -46,7 +46,7 @@ void RecordLPRelaxationValues(Model *model) { namespace { std::vector GetLPRelaxationValues( - const SharedLPSolutionRepository *lp_solutions, random_engine_t *random) { + const SharedLPSolutionRepository* lp_solutions, random_engine_t* random) { std::vector relaxation_values; if (lp_solutions == nullptr || lp_solutions->NumSolutions() == 0) { @@ -65,8 +65,8 @@ std::vector GetLPRelaxationValues( } std::vector GetGeneralRelaxationValues( - const SharedRelaxationSolutionRepository *relaxation_solutions, - random_engine_t *random) { + const SharedRelaxationSolutionRepository* relaxation_solutions, + random_engine_t* random) { std::vector relaxation_values; if (relaxation_solutions == nullptr || @@ -84,7 +84,7 @@ std::vector GetGeneralRelaxationValues( } std::vector GetIncompleteSolutionValues( - SharedIncompleteSolutionManager *incomplete_solutions) { + SharedIncompleteSolutionManager* incomplete_solutions) { std::vector empty_solution_values; if (incomplete_solutions == nullptr || @@ -97,11 +97,11 @@ std::vector GetIncompleteSolutionValues( } // namespace RINSNeighborhood GetRINSNeighborhood( - const SharedResponseManager *response_manager, - const SharedRelaxationSolutionRepository *relaxation_solutions, - const SharedLPSolutionRepository *lp_solutions, - SharedIncompleteSolutionManager *incomplete_solutions, - random_engine_t *random) { + const SharedResponseManager* response_manager, + const SharedRelaxationSolutionRepository* relaxation_solutions, + const SharedLPSolutionRepository* lp_solutions, + SharedIncompleteSolutionManager* incomplete_solutions, + random_engine_t* random) { RINSNeighborhood rins_neighborhood; const bool use_only_relaxation_values = diff --git a/ortools/sat/rins.h b/ortools/sat/rins.h index d504796d0c..4131714ea2 100644 --- a/ortools/sat/rins.h +++ b/ortools/sat/rins.h @@ -30,7 +30,7 @@ namespace sat { // Links IntegerVariable with model variable and its lp constraint if any. struct LPVariable { IntegerVariable positive_var = kNoIntegerVariable; - LinearProgrammingConstraint *lp = nullptr; + LinearProgrammingConstraint* lp = nullptr; int model_var; bool operator==(const LPVariable other) const { @@ -54,9 +54,8 @@ struct LPVariables { // relaxation ignore those. struct RINSNeighborhood { // A variable will appear only once and not in both vectors. - std::vector > fixed_vars; - std::vector< - std::pair > > + std::vector> fixed_vars; + std::vector>> reduced_domain_vars; }; @@ -74,14 +73,14 @@ struct RINSNeighborhood { // relaxation value is integer, then we fix the domain of the variable to that // value. RINSNeighborhood GetRINSNeighborhood( - const SharedResponseManager *response_manager, - const SharedRelaxationSolutionRepository *relaxation_solutions, - const SharedLPSolutionRepository *lp_solutions, - SharedIncompleteSolutionManager *incomplete_solutions, - random_engine_t *random); + const SharedResponseManager* response_manager, + const SharedRelaxationSolutionRepository* relaxation_solutions, + const SharedLPSolutionRepository* lp_solutions, + SharedIncompleteSolutionManager* incomplete_solutions, + random_engine_t* random); // Adds the current LP solution to the pool. -void RecordLPRelaxationValues(Model *model); +void RecordLPRelaxationValues(Model* model); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/samples/multiple_knapsack_sat.cc b/ortools/sat/samples/multiple_knapsack_sat.cc index 5c7aa732de..bfdbe4fb97 100644 --- a/ortools/sat/samples/multiple_knapsack_sat.cc +++ b/ortools/sat/samples/multiple_knapsack_sat.cc @@ -40,7 +40,7 @@ struct DataModel { const std::vector values = { {10, 30, 25, 50, 35, 30, 15, 40, 30, 35, 45, 10, 20, 30, 25}}; const int num_items = weights.size(); - const int total_value = accumulate(values.begin(), values.end(), 0); + const int total_value = std::accumulate(values.begin(), values.end(), 0); const std::vector kBinCapacities = {{100, 100, 100, 100, 100}}; const int kNumBins = 5; }; diff --git a/ortools/sat/samples/stop_after_n_solutions_sample_sat.cc b/ortools/sat/samples/stop_after_n_solutions_sample_sat.cc index 87468bf30b..49d2875dc3 100644 --- a/ortools/sat/samples/stop_after_n_solutions_sample_sat.cc +++ b/ortools/sat/samples/stop_after_n_solutions_sample_sat.cc @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. + #include #include "ortools/sat/cp_model.h" diff --git a/ortools/sat/sat_decision.cc b/ortools/sat/sat_decision.cc index 92ad3ac88f..45964746bf 100644 --- a/ortools/sat/sat_decision.cc +++ b/ortools/sat/sat_decision.cc @@ -18,7 +18,7 @@ namespace operations_research { namespace sat { -SatDecisionPolicy::SatDecisionPolicy(Model *model) +SatDecisionPolicy::SatDecisionPolicy(Model* model) : parameters_(*(model->GetOrCreate())), trail_(*model->GetOrCreate()), random_(model->GetOrCreate()) {} @@ -260,9 +260,9 @@ void SatDecisionPolicy::SetAssignmentPreference(Literal literal, var_ordering_is_initialized_ = false; } -std::vector > SatDecisionPolicy::AllPreferences() +std::vector> SatDecisionPolicy::AllPreferences() const { - std::vector > prefs; + std::vector> prefs; for (BooleanVariable var(0); var < var_polarity_.size(); ++var) { // TODO(user): we currently assume that if the tie_breaker is zero then // no preference was set (which is not 100% correct). Fix that. @@ -275,8 +275,8 @@ std::vector > SatDecisionPolicy::AllPreferences() } void SatDecisionPolicy::UpdateWeightedSign( - const std::vector &terms, Coefficient rhs) { - for (const LiteralWithCoeff &term : terms) { + const std::vector& terms, Coefficient rhs) { + for (const LiteralWithCoeff& term : terms) { const double weight = static_cast(term.coefficient.value()) / static_cast(rhs.value()); weighted_sign_[term.literal.Variable()] += @@ -285,7 +285,7 @@ void SatDecisionPolicy::UpdateWeightedSign( } void SatDecisionPolicy::BumpVariableActivities( - const std::vector &literals) { + const std::vector& literals) { if (parameters_.use_erwa_heuristic()) { for (const Literal literal : literals) { // Note that we don't really need to bump level 0 variables since they diff --git a/ortools/sat/sat_inprocessing.cc b/ortools/sat/sat_inprocessing.cc index 5493420e7b..15e32eda47 100644 --- a/ortools/sat/sat_inprocessing.cc +++ b/ortools/sat/sat_inprocessing.cc @@ -288,7 +288,7 @@ bool Inprocessing::RemoveFixedAndEquivalentVariables(bool log_info) { clause_manager_->DeleteRemovedClauses(); clause_manager_->DetachAllClauses(); - for (SatClause *clause : clause_manager_->AllClausesInCreationOrder()) { + for (SatClause* clause : clause_manager_->AllClausesInCreationOrder()) { bool removed = false; bool need_rewrite = false; @@ -378,10 +378,10 @@ bool Inprocessing::SubsumeAndStrenghtenRound(bool log_info) { // Process clause by increasing sizes. // TODO(user): probably faster without the size indirection. - std::vector clauses = + std::vector clauses = clause_manager_->AllClausesInCreationOrder(); std::sort(clauses.begin(), clauses.end(), - [](SatClause *a, SatClause *b) { return a->size() < b->size(); }); + [](SatClause* a, SatClause* b) { return a->size() < b->size(); }); // Used to mark clause literals. const LiteralIndex num_literals(sat_solver_->NumVariables() * 2); @@ -389,7 +389,7 @@ bool Inprocessing::SubsumeAndStrenghtenRound(bool log_info) { // Clause index in clauses. // TODO(user): Storing signatures here might be faster? - gtl::ITIVector > one_watcher( + gtl::ITIVector> one_watcher( num_literals.value()); // Clause signatures in the same order as clauses. @@ -397,7 +397,7 @@ bool Inprocessing::SubsumeAndStrenghtenRound(bool log_info) { std::vector candidates_for_removal; for (int clause_index = 0; clause_index < clauses.size(); ++clause_index) { - SatClause *clause = clauses[clause_index]; + SatClause* clause = clauses[clause_index]; // TODO(user): Better abort limit. We could also limit the watcher sizes and // never look at really long clauses. Note that for an easier @@ -647,7 +647,7 @@ void StampingSimplifier::SampleTreeAndFillParent() { // TODO(user): More generally, we could sample a parent while probing so // that we consider all hyper binary implications (in the case we don't add // them to the implication graph already). - const auto &children_of_not_l = + const auto& children_of_not_l = implication_graph_->DirectImplications(Literal(i).Negated()); if (children_of_not_l.empty()) continue; for (int num_tries = 0; num_tries < 10; ++num_tries) { @@ -756,14 +756,14 @@ bool StampingSimplifier::ProcessClauses() { bool is_negated; // Correspond to clause[i] or clause[i].Negated(); int start; // Note that all start stamps are different. int end; - bool operator<(const Entry &o) const { return start < o.start; } + bool operator<(const Entry& o) const { return start < o.start; } }; std::vector to_remove; std::vector new_clause; std::vector entries; clause_manager_->DeleteRemovedClauses(); clause_manager_->DetachAllClauses(); - for (SatClause *clause : clause_manager_->AllClausesInCreationOrder()) { + for (SatClause* clause : clause_manager_->AllClausesInCreationOrder()) { const auto span = clause->AsSpan(); if (span.empty()) continue; @@ -797,7 +797,7 @@ bool StampingSimplifier::ProcessClauses() { Entry top_entry; top_entry.end = -1; // Sentinel. to_remove.clear(); - for (const Entry &e : entries) { + for (const Entry& e : entries) { if (e.end < top_entry.end) { // We found an implication: top_entry => this entry. const Literal lhs = top_entry.is_negated ? span[top_entry.i].Negated() @@ -917,7 +917,7 @@ void BlockedClauseSimplifier::InitializeForNewRound() { clauses_.clear(); clause_manager_->DeleteRemovedClauses(); clause_manager_->DetachAllClauses(); - for (SatClause *c : clause_manager_->AllClausesInCreationOrder()) { + for (SatClause* c : clause_manager_->AllClausesInCreationOrder()) { // We ignore redundant clause. This shouldn't cause any validity issue. if (clause_manager_->IsRemovable(c)) continue; @@ -965,7 +965,7 @@ void BlockedClauseSimplifier::ProcessLiteral(Literal current_literal) { // // TODO(user): Make this work in the presence of at most ones. int num_binary = 0; - const std::vector &implications = + const std::vector& implications = implication_graph_->DirectImplications(current_literal); for (const Literal l : implications) { if (l == current_literal) continue; @@ -1085,7 +1085,7 @@ bool BoundedVariableElimination::DoOneRound(bool log_info) { clauses_.clear(); clause_manager_->DeleteRemovedClauses(); clause_manager_->DetachAllClauses(); - for (SatClause *c : clause_manager_->AllClausesInCreationOrder()) { + for (SatClause* c : clause_manager_->AllClausesInCreationOrder()) { // We ignore redundant clause. This shouldn't cause any validity issue. // TODO(user): but we shouldn't keep clauses containing removed literals. // It is still valid to do so, but it should be less efficient. @@ -1158,7 +1158,7 @@ bool BoundedVariableElimination::DoOneRound(bool log_info) { // Remove all redundant clause containing a removed literal. This avoid to // re-introduce a removed literal via conflict learning. - for (SatClause *c : clause_manager_->AllClausesInCreationOrder()) { + for (SatClause* c : clause_manager_->AllClausesInCreationOrder()) { if (!clause_manager_->IsRemovable(c)) continue; bool remove = false; for (const Literal l : c->AsSpan()) { @@ -1191,7 +1191,7 @@ bool BoundedVariableElimination::DoOneRound(bool log_info) { } bool BoundedVariableElimination::RemoveLiteralFromClause( - Literal lit, SatClause *sat_clause) { + Literal lit, SatClause* sat_clause) { num_literals_diff_ -= sat_clause->size(); resolvant_.clear(); for (const Literal l : sat_clause->AsSpan()) { @@ -1259,7 +1259,7 @@ void BoundedVariableElimination::UpdatePriorityQueue(BooleanVariable var) { } } -void BoundedVariableElimination::DeleteClause(SatClause *sat_clause) { +void BoundedVariableElimination::DeleteClause(SatClause* sat_clause) { const auto clause = sat_clause->AsSpan(); num_clauses_diff_--; @@ -1289,7 +1289,7 @@ void BoundedVariableElimination::DeleteAllClausesContaining(Literal literal) { } void BoundedVariableElimination::AddClause(absl::Span clause) { - SatClause *pt = clause_manager_->InprocessingAddClause(clause); + SatClause* pt = clause_manager_->InprocessingAddClause(clause); if (pt == nullptr) return; num_clauses_diff_++; @@ -1311,9 +1311,9 @@ template bool BoundedVariableElimination::ResolveAllClauseContaining(Literal lit) { const int clause_weight = parameters_.presolve_bve_clause_weight(); - const std::vector &implications = + const std::vector& implications = implication_graph_->DirectImplications(lit); - auto &clause_containing_lit = literal_to_clauses_[lit.Index()]; + auto& clause_containing_lit = literal_to_clauses_[lit.Index()]; for (int i = 0; i < clause_containing_lit.size(); ++i) { const ClauseIndex clause_index = clause_containing_lit[i]; const auto clause = clauses_[clause_index]->AsSpan(); @@ -1351,7 +1351,7 @@ bool BoundedVariableElimination::ResolveAllClauseContaining(Literal lit) { // Resolution with non-binary clauses. if (!with_binary_only && !clause_can_be_simplified) { - auto &clause_containing_not_lit = literal_to_clauses_[lit.NegatedIndex()]; + auto& clause_containing_not_lit = literal_to_clauses_[lit.NegatedIndex()]; for (int j = 0; j < clause_containing_not_lit.size(); ++j) { if (score_only && new_score_ > score_threshold_) break; const ClauseIndex other_index = clause_containing_not_lit[j]; diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 938a6ce279..4b95d4186a 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -21,7 +21,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 173 +// NEXT TAG: 175 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -469,6 +469,14 @@ message SatParameters { // off and any positive value performs substitution. optional int32 presolve_substitution_level = 147 [default = 1]; + // If true, we will extract from linear constraints, enforcement literals of + // the form "integer variable at bound => simplified constraint". This should + // always be beneficial except that we don't always handle them as efficiently + // as we could for now. This causes problem on manna81.mps (LP relaxation not + // as tight it seems) and on neos-3354841-apure.mps.gz (too many literals + // created this way). + optional bool presolve_extract_integer_enforcement = 174 [default = false]; + // ========================================================================== // Max-sat parameters // ========================================================================== @@ -777,10 +785,20 @@ message SatParameters { // because the presolve rules only guarantee the existence of one feasible // solution to the presolved problem. // - // TODO(user): Activate the presolve but with just the rules that do not - // change the set of feasible solutions. + // TODO(user): Do not disable the presolve and let the user choose what + // behavior is best by setting keep_all_feasible_solutions_in_presolve. optional bool enumerate_all_solutions = 87 [default = false]; + // If true, we disable the presolve reductions that remove feasible solutions + // from the search space. Such solution are usually dominated by a "better" + // solution that is kept, but depending on the situation, we might want to + // keep all solutions. + // + // A trivial example is when a variable is unused. If this is true, then the + // presolve will not fix it to an arbitrary value and it will stay in the + // search space. + optional bool keep_all_feasible_solutions_in_presolve = 173 [default = false]; + // If true, add information about the derived variable domains to the // CpSolverResponse. It is an option because it makes the response slighly // bigger and there is a bit more work involved during the postsolve to diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index 64569d3bb8..4dda0e820f 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -39,7 +39,7 @@ SatSolver::SatSolver() : SatSolver(new Model()) { model_->Register(this); } -SatSolver::SatSolver(Model *model) +SatSolver::SatSolver(Model* model) : model_(model), binary_implication_graph_(model->GetOrCreate()), clauses_propagator_(model->GetOrCreate()), @@ -105,12 +105,12 @@ double SatSolver::deterministic_time() const { 1.0 * pb_constraints_->num_inspected_constraint_literals()); } -const SatParameters &SatSolver::parameters() const { +const SatParameters& SatSolver::parameters() const { SCOPED_TIME_STAT(&stats_); return *parameters_; } -void SatSolver::SetParameters(const SatParameters ¶meters) { +void SatSolver::SetParameters(const SatParameters& parameters) { SCOPED_TIME_STAT(&stats_); *parameters_ = parameters; restart_->Reset(); @@ -166,8 +166,7 @@ bool SatSolver::AddUnitClause(Literal true_literal) { if (trail_->Assignment().LiteralIsFalse(true_literal)) return SetModelUnsat(); if (trail_->Assignment().LiteralIsTrue(true_literal)) return true; if (drat_proof_handler_ != nullptr) { - // Note that we will output problem unit clauses twice, but that is a - // small + // Note that we will output problem unit clauses twice, but that is a small // price to pay for having a single variable fixing API. drat_proof_handler_->AddClause({true_literal}); } @@ -241,7 +240,7 @@ bool SatSolver::AddProblemClauseInternal(absl::Span literals) { } bool SatSolver::AddLinearConstraintInternal( - const std::vector &cst, Coefficient rhs, + const std::vector& cst, Coefficient rhs, Coefficient max_value) { SCOPED_TIME_STAT(&stats_); DCHECK(BooleanLinearExpressionIsCanonical(cst)); @@ -261,7 +260,7 @@ bool SatSolver::AddLinearConstraintInternal( if (max_value - min_coeff <= rhs) { // This constraint is actually a clause. It is faster to treat it as one. literals_scratchpad_.clear(); - for (const LiteralWithCoeff &term : cst) { + for (const LiteralWithCoeff& term : cst) { literals_scratchpad_.push_back(term.literal.Negated()); } return AddProblemClauseInternal(literals_scratchpad_); @@ -273,7 +272,7 @@ bool SatSolver::AddLinearConstraintInternal( !parameters_->use_pb_resolution() && max_coeff <= rhs && 2 * min_coeff > rhs) { literals_scratchpad_.clear(); - for (const LiteralWithCoeff &term : cst) { + for (const LiteralWithCoeff& term : cst) { literals_scratchpad_.push_back(term.literal); } if (!binary_implication_graph_->AddAtMostOne(literals_scratchpad_)) { @@ -299,7 +298,7 @@ bool SatSolver::AddLinearConstraint(bool use_lower_bound, Coefficient lower_bound, bool use_upper_bound, Coefficient upper_bound, - std::vector *cst) { + std::vector* cst) { SCOPED_TIME_STAT(&stats_); CHECK_EQ(CurrentDecisionLevel(), 0); if (model_is_unsat_) return false; @@ -308,7 +307,7 @@ bool SatSolver::AddLinearConstraint(bool use_lower_bound, Coefficient fixed_variable_shift(0); { int index = 0; - for (const LiteralWithCoeff &term : *cst) { + for (const LiteralWithCoeff& term : *cst) { if (trail_->Assignment().LiteralIsFalse(term.literal)) continue; if (trail_->Assignment().LiteralIsTrue(term.literal)) { CHECK(SafeAddInto(-term.coefficient, &fixed_variable_shift)); @@ -358,7 +357,7 @@ bool SatSolver::AddLinearConstraint(bool use_lower_bound, } int SatSolver::AddLearnedClauseAndEnqueueUnitPropagation( - const std::vector &literals, bool is_redundant) { + const std::vector& literals, bool is_redundant) { SCOPED_TIME_STAT(&stats_); if (literals.size() == 1) { @@ -388,7 +387,7 @@ int SatSolver::AddLearnedClauseAndEnqueueUnitPropagation( if (is_redundant && lbd > parameters_->clause_cleanup_lbd_bound()) { --num_learned_clause_before_cleanup_; - SatClause *clause = + SatClause* clause = clauses_propagator_->AddRemovableClause(literals, trail_); // BumpClauseActivity() must be called after clauses_info_[clause] has @@ -401,7 +400,7 @@ int SatSolver::AddLearnedClauseAndEnqueueUnitPropagation( return lbd; } -void SatSolver::AddPropagator(SatPropagator *propagator) { +void SatSolver::AddPropagator(SatPropagator* propagator) { CHECK_EQ(CurrentDecisionLevel(), 0); problem_is_pure_sat_ = false; trail_->RegisterPropagator(propagator); @@ -409,7 +408,7 @@ void SatSolver::AddPropagator(SatPropagator *propagator) { InitializePropagators(); } -void SatSolver::AddLastPropagator(SatPropagator *propagator) { +void SatSolver::AddLastPropagator(SatPropagator* propagator) { CHECK_EQ(CurrentDecisionLevel(), 0); CHECK(last_propagator_ == nullptr); problem_is_pure_sat_ = false; @@ -418,20 +417,20 @@ void SatSolver::AddLastPropagator(SatPropagator *propagator) { InitializePropagators(); } -UpperBoundedLinearConstraint *SatSolver::ReasonPbConstraintOrNull( +UpperBoundedLinearConstraint* SatSolver::ReasonPbConstraintOrNull( BooleanVariable var) const { // It is important to deal properly with "SameReasonAs" variables here. var = trail_->ReferenceVarWithSameReason(var); - const AssignmentInfo &info = trail_->Info(var); + const AssignmentInfo& info = trail_->Info(var); if (trail_->AssignmentType(var) == pb_constraints_->PropagatorId()) { return pb_constraints_->ReasonPbConstraint(info.trail_index); } return nullptr; } -SatClause *SatSolver::ReasonClauseOrNull(BooleanVariable var) const { +SatClause* SatSolver::ReasonClauseOrNull(BooleanVariable var) const { DCHECK(trail_->Assignment().VariableIsAssigned(var)); - const AssignmentInfo &info = trail_->Info(var); + const AssignmentInfo& info = trail_->Info(var); if (trail_->AssignmentType(var) == clauses_propagator_->PropagatorId()) { return clauses_propagator_->ReasonClause(info.trail_index); } @@ -456,7 +455,7 @@ void SatSolver::AddBinaryClauseInternal(Literal a, Literal b) { } bool SatSolver::ClauseIsValidUnderDebugAssignement( - const std::vector &clause) const { + const std::vector& clause) const { for (Literal l : clause) { if (l.Variable() >= debug_assignment_.NumberOfVariables() || debug_assignment_.LiteralIsTrue(l)) { @@ -467,7 +466,7 @@ bool SatSolver::ClauseIsValidUnderDebugAssignement( } bool SatSolver::PBConstraintIsValidUnderDebugAssignment( - const std::vector &cst, const Coefficient rhs) const { + const std::vector& cst, const Coefficient rhs) const { Coefficient sum(0.0); for (LiteralWithCoeff term : cst) { if (term.literal.Variable() >= debug_assignment_.NumberOfVariables()) { @@ -484,7 +483,7 @@ namespace { // Returns true iff 'b' is subsumed by 'a' (i.e 'a' is included in 'b'). // This is slow and only meant to be used in DCHECKs. -bool ClauseSubsumption(const std::vector &a, SatClause *b) { +bool ClauseSubsumption(const std::vector& a, SatClause* b) { std::vector superset(b->begin(), b->end()); std::vector subset(a.begin(), a.end()); std::sort(superset.begin(), superset.end()); @@ -533,7 +532,7 @@ bool SatSolver::ResetToLevelZero() { } bool SatSolver::ResetWithGivenAssumptions( - const std::vector &assumptions) { + const std::vector& assumptions) { if (!ResetToLevelZero()) return false; // Assuming there is no duplicate in assumptions, but they can be a literal @@ -795,7 +794,7 @@ bool SatSolver::PropagateAndStopAfterOneConflictResolution() { bool is_redundant = true; if (!subsumed_clauses_.empty() && parameters_->subsumption_during_conflict_analysis()) { - for (SatClause *clause : subsumed_clauses_) { + for (SatClause* clause : subsumed_clauses_) { DCHECK(ClauseSubsumption(learned_conflict_, clause)); if (!clauses_propagator_->IsRemovable(clause)) { is_redundant = false; @@ -815,7 +814,7 @@ bool SatSolver::PropagateAndStopAfterOneConflictResolution() { } SatSolver::Status SatSolver::ReapplyDecisionsUpTo( - int max_level, int *first_propagation_index) { + int max_level, int* first_propagation_index) { SCOPED_TIME_STAT(&stats_); int decision_index = current_decision_level_; while (decision_index <= max_level) { @@ -911,7 +910,7 @@ void SatSolver::Backtrack(int target_level) { last_decision_or_backtrack_trail_index_ = trail_->Index(); } -bool SatSolver::AddBinaryClauses(const std::vector &clauses) { +bool SatSolver::AddBinaryClauses(const std::vector& clauses) { SCOPED_TIME_STAT(&stats_); CHECK_EQ(CurrentDecisionLevel(), 0); for (BinaryClause c : clauses) { @@ -925,7 +924,7 @@ bool SatSolver::AddBinaryClauses(const std::vector &clauses) { return true; } -const std::vector &SatSolver::NewlyAddedBinaryClauses() { +const std::vector& SatSolver::NewlyAddedBinaryClauses() { return binary_clauses_.newly_added(); } @@ -941,7 +940,7 @@ int64 NextMultipleOf(int64 value, int64 interval) { } // namespace SatSolver::Status SatSolver::ResetAndSolveWithGivenAssumptions( - const std::vector &assumptions) { + const std::vector& assumptions) { SCOPED_TIME_STAT(&stats_); if (!ResetWithGivenAssumptions(assumptions)) return UnsatStatus(); return SolveInternal(time_limit_); @@ -961,7 +960,7 @@ void SatSolver::SetAssumptionLevel(int assumption_level) { assumption_level_ = assumption_level; } -SatSolver::Status SatSolver::SolveWithTimeLimit(TimeLimit *time_limit) { +SatSolver::Status SatSolver::SolveWithTimeLimit(TimeLimit* time_limit) { return SolveInternal(time_limit == nullptr ? time_limit_ : time_limit); } @@ -980,12 +979,12 @@ void SatSolver::KeepAllClauseUsedToInfer(BooleanVariable variable) { --num; const BooleanVariable var = (*trail_)[trail_index].Variable(); - SatClause *clause = ReasonClauseOrNull(var); + SatClause* clause = ReasonClauseOrNull(var); if (clause != nullptr) { clauses_propagator_->mutable_clauses_info()->erase(clause); } for (const Literal l : trail_->Reason(var)) { - const AssignmentInfo &info = trail_->Info(l.Variable()); + const AssignmentInfo& info = trail_->Info(l.Variable()); if (info.level == 0) continue; if (!is_marked[info.trail_index]) { is_marked[info.trail_index] = true; @@ -998,7 +997,7 @@ void SatSolver::KeepAllClauseUsedToInfer(BooleanVariable variable) { // TODO(user): this is really an in-processing stuff and should be moved out // of here. I think the name for that (or similar) technique is called vivify. // Ideally this should be scheduled after other faster in-processing technique. -void SatSolver::TryToMinimizeClause(SatClause *clause) { +void SatSolver::TryToMinimizeClause(SatClause* clause) { CHECK_EQ(CurrentDecisionLevel(), 0); ++counters_.minimization_num_clauses; @@ -1111,7 +1110,7 @@ void SatSolver::TryToMinimizeClause(SatClause *clause) { } } -SatSolver::Status SatSolver::SolveInternal(TimeLimit *time_limit) { +SatSolver::Status SatSolver::SolveInternal(TimeLimit* time_limit) { SCOPED_TIME_STAT(&stats_); if (model_is_unsat_) return INFEASIBLE; @@ -1248,7 +1247,7 @@ void SatSolver::MinimizeSomeClauses(int decisions_budget) { const int64 target_num_branches = counters_.num_branches + decisions_budget; while (counters_.num_branches < target_num_branches && (time_limit_ == nullptr || !time_limit_->LimitReached())) { - SatClause *to_minimize = clauses_propagator_->NextClauseToMinimize(); + SatClause* to_minimize = clauses_propagator_->NextClauseToMinimize(); if (to_minimize != nullptr) { TryToMinimizeClause(to_minimize); if (model_is_unsat_) return; @@ -1328,16 +1327,16 @@ std::vector SatSolver::GetLastIncompatibleDecisions() { return unsat_assumptions; } -void SatSolver::BumpReasonActivities(const std::vector &literals) { +void SatSolver::BumpReasonActivities(const std::vector& literals) { SCOPED_TIME_STAT(&stats_); for (const Literal literal : literals) { const BooleanVariable var = literal.Variable(); if (DecisionLevel(var) > 0) { - SatClause *clause = ReasonClauseOrNull(var); + SatClause* clause = ReasonClauseOrNull(var); if (clause != nullptr) { BumpClauseActivity(clause); } else { - UpperBoundedLinearConstraint *pb_constraint = + UpperBoundedLinearConstraint* pb_constraint = ReasonPbConstraintOrNull(var); if (pb_constraint != nullptr) { // TODO(user): Because one pb constraint may propagate many literals, @@ -1349,7 +1348,7 @@ void SatSolver::BumpReasonActivities(const std::vector &literals) { } } -void SatSolver::BumpClauseActivity(SatClause *clause) { +void SatSolver::BumpClauseActivity(SatClause* clause) { // We only bump the activity of the clauses that have some info. So if we know // that we will keep a clause forever, we don't need to create its Info. More // than the speed, this allows to limit as much as possible the activity @@ -1394,7 +1393,7 @@ void SatSolver::BumpClauseActivity(SatClause *clause) { void SatSolver::RescaleClauseActivities(double scaling_factor) { SCOPED_TIME_STAT(&stats_); clause_activity_increment_ *= scaling_factor; - for (auto &entry : *clauses_propagator_->mutable_clauses_info()) { + for (auto& entry : *clauses_propagator_->mutable_clauses_info()) { entry.second.activity *= scaling_factor; } } @@ -1404,7 +1403,7 @@ void SatSolver::UpdateClauseActivityIncrement() { clause_activity_increment_ *= 1.0 / parameters_->clause_activity_decay(); } -bool SatSolver::IsConflictValid(const std::vector &literals) { +bool SatSolver::IsConflictValid(const std::vector& literals) { SCOPED_TIME_STAT(&stats_); if (literals.empty()) return false; const int highest_level = DecisionLevel(literals[0].Variable()); @@ -1415,7 +1414,7 @@ bool SatSolver::IsConflictValid(const std::vector &literals) { return true; } -int SatSolver::ComputeBacktrackLevel(const std::vector &literals) { +int SatSolver::ComputeBacktrackLevel(const std::vector& literals) { SCOPED_TIME_STAT(&stats_); DCHECK_GT(CurrentDecisionLevel(), 0); @@ -1439,7 +1438,7 @@ int SatSolver::ComputeBacktrackLevel(const std::vector &literals) { } template -int SatSolver::ComputeLbd(const LiteralList &literals) { +int SatSolver::ComputeLbd(const LiteralList& literals) { SCOPED_TIME_STAT(&stats_); const int limit = parameters_->count_assumption_levels_in_lbd() ? 0 : assumption_level_; @@ -1567,7 +1566,7 @@ void SatSolver::ProcessNewlyFixedVariables() { // We remove the clauses that are always true and the fixed literals from the // others. Note that none of the clause should be all false because we should // have detected a conflict before this is called. - for (SatClause *clause : clauses_propagator_->AllClausesInCreationOrder()) { + for (SatClause* clause : clauses_propagator_->AllClausesInCreationOrder()) { if (!clause->IsAttached()) continue; const size_t old_size = clause->size(); @@ -1626,7 +1625,7 @@ bool SatSolver::Propagate() { // and that its Propagate() functions will not abort on the first // propagation to be slightly more efficient. const int old_index = trail_->Index(); - for (SatPropagator *propagator : propagators_) { + for (SatPropagator* propagator : propagators_) { DCHECK(propagator->PropagatePreconditionsAreSatisfied(*trail_)); if (!propagator->Propagate(trail_)) return false; if (trail_->Index() > old_index) break; @@ -1664,22 +1663,22 @@ void SatSolver::InitializePropagators() { } bool SatSolver::PropagationIsDone() const { - for (SatPropagator *propagator : propagators_) { + for (SatPropagator* propagator : propagators_) { if (!propagator->PropagationIsDone(*trail_)) return false; } return true; } bool SatSolver::ResolvePBConflict(BooleanVariable var, - MutableUpperBoundedLinearConstraint *conflict, - Coefficient *slack) { + MutableUpperBoundedLinearConstraint* conflict, + Coefficient* slack) { const int trail_index = trail_->Info(var).trail_index; // This is the slack of the conflict < trail_index DCHECK_EQ(*slack, conflict->ComputeSlackForTrailPrefix(*trail_, trail_index)); // Pseudo-Boolean case. - UpperBoundedLinearConstraint *pb_reason = ReasonPbConstraintOrNull(var); + UpperBoundedLinearConstraint* pb_reason = ReasonPbConstraintOrNull(var); if (pb_reason != nullptr) { pb_reason->ResolvePBConflict(*trail_, var, conflict, slack); return false; @@ -1754,14 +1753,14 @@ void SatSolver::EnqueueNewDecision(Literal literal) { void SatSolver::Untrail(int target_trail_index) { SCOPED_TIME_STAT(&stats_); DCHECK_LT(target_trail_index, trail_->Index()); - for (SatPropagator *propagator : propagators_) { + for (SatPropagator* propagator : propagators_) { propagator->Untrail(*trail_, target_trail_index); } decision_policy_->Untrail(target_trail_index); trail_->Untrail(target_trail_index); } -std::string SatSolver::DebugString(const SatClause &clause) const { +std::string SatSolver::DebugString(const SatClause& clause) const { std::string result; for (const Literal literal : clause) { if (!result.empty()) { @@ -1791,9 +1790,9 @@ int SatSolver::ComputeMaxTrailIndex(absl::Span clause) const { // http://www.cs.tau.ac.il/~msagiv/courses/ATP/iccad2001_final.pdf // http://gauss.ececs.uc.edu/SAT/articles/FAIA185-0131.pdf void SatSolver::ComputeFirstUIPConflict( - int max_trail_index, std::vector *conflict, - std::vector *reason_used_to_infer_the_conflict, - std::vector *subsumed_clauses) { + int max_trail_index, std::vector* conflict, + std::vector* reason_used_to_infer_the_conflict, + std::vector* subsumed_clauses) { SCOPED_TIME_STAT(&stats_); // This will be used to mark all the literals inspected while we process the @@ -1829,7 +1828,7 @@ void SatSolver::ComputeFirstUIPConflict( // This last literal will be the first UIP because by definition all the // propagation done at the current level will pass though it at some point. absl::Span clause_to_expand = trail_->FailingClause(); - SatClause *sat_clause = trail_->FailingSatClause(); + SatClause* sat_clause = trail_->FailingSatClause(); DCHECK(!clause_to_expand.empty()); int num_literal_at_highest_level_that_needs_to_be_processed = 0; while (true) { @@ -1913,8 +1912,8 @@ void SatSolver::ComputeFirstUIPConflict( } } -void SatSolver::ComputeUnionOfReasons(const std::vector &input, - std::vector *literals) { +void SatSolver::ComputeUnionOfReasons(const std::vector& input, + std::vector* literals) { tmp_mark_.ClearAndResize(num_variables_); literals->clear(); for (const Literal l : input) tmp_mark_.Set(l.Variable()); @@ -1933,8 +1932,8 @@ void SatSolver::ComputeUnionOfReasons(const std::vector &input, // TODO(user): Remove the literals assigned at level 0. void SatSolver::ComputePBConflict(int max_trail_index, Coefficient initial_slack, - MutableUpperBoundedLinearConstraint *conflict, - int *pb_backjump_level) { + MutableUpperBoundedLinearConstraint* conflict, + int* pb_backjump_level) { SCOPED_TIME_STAT(&stats_); int trail_index = max_trail_index; @@ -2089,8 +2088,8 @@ void SatSolver::ComputePBConflict(int max_trail_index, } void SatSolver::MinimizeConflict( - std::vector *conflict, - std::vector *reason_used_to_infer_the_conflict) { + std::vector* conflict, + std::vector* reason_used_to_infer_the_conflict) { SCOPED_TIME_STAT(&stats_); const int old_size = conflict->size(); @@ -2123,7 +2122,7 @@ void SatSolver::MinimizeConflict( // Note that because of the assignement structure, there is no need to process // the literals of the conflict in order. While exploring the reason for a // literal assignement, there will be no cycles. -void SatSolver::MinimizeConflictSimple(std::vector *conflict) { +void SatSolver::MinimizeConflictSimple(std::vector* conflict) { SCOPED_TIME_STAT(&stats_); const int current_level = CurrentDecisionLevel(); @@ -2161,7 +2160,7 @@ void SatSolver::MinimizeConflictSimple(std::vector *conflict) { // can be infered from the conflict variables alone, or if we show that this is // not the case. The result of any variable expension will be cached in order // not to be expended again. -void SatSolver::MinimizeConflictRecursively(std::vector *conflict) { +void SatSolver::MinimizeConflictRecursively(std::vector* conflict) { SCOPED_TIME_STAT(&stats_); // is_marked_ will contains all the conflict literals plus the literals that @@ -2350,8 +2349,8 @@ struct WeightedVariable { // Lexical order, by larger weight, then by smaller variable number // to break ties struct VariableWithLargerWeightFirst { - bool operator()(const WeightedVariable &wv1, - const WeightedVariable &wv2) const { + bool operator()(const WeightedVariable& wv1, + const WeightedVariable& wv2) const { return (wv1.weight > wv2.weight || (wv1.weight == wv2.weight && wv1.var < wv2.var)); } @@ -2369,7 +2368,7 @@ struct VariableWithLargerWeightFirst { // they just do MinimizeConflictRecursively() with a different implementation. // Note that their behavior also make more sense with the way they (and we) bump // the variable activities. -void SatSolver::MinimizeConflictExperimental(std::vector *conflict) { +void SatSolver::MinimizeConflictExperimental(std::vector* conflict) { SCOPED_TIME_STAT(&stats_); // First, sort the variables in the conflict by decreasing decision levels. @@ -2453,10 +2452,10 @@ void SatSolver::CleanClauseDatabaseIfNeeded() { // Creates a list of clauses that can be deleted. Note that only the clauses // that appear in clauses_info can potentially be removed. - typedef std::pair Entry; + typedef std::pair Entry; std::vector entries; - auto &clauses_info = *(clauses_propagator_->mutable_clauses_info()); - for (auto &entry : clauses_info) { + auto& clauses_info = *(clauses_propagator_->mutable_clauses_info()); + for (auto& entry : clauses_info) { if (ClauseIsUsedAsReason(entry.first)) continue; if (entry.second.protected_during_next_cleanup) { entry.second.protected_during_next_cleanup = false; @@ -2469,7 +2468,7 @@ void SatSolver::CleanClauseDatabaseIfNeeded() { if (parameters_->clause_cleanup_ordering() == SatParameters::CLAUSE_LBD) { // Order the clauses by decreasing LBD and then increasing activity. std::sort(entries.begin(), entries.end(), - [](const Entry &a, const Entry &b) { + [](const Entry& a, const Entry& b) { if (a.second.lbd == b.second.lbd) { return a.second.activity < b.second.activity; } @@ -2478,7 +2477,7 @@ void SatSolver::CleanClauseDatabaseIfNeeded() { } else { // Order the clauses by increasing activity and then decreasing LBD. std::sort(entries.begin(), entries.end(), - [](const Entry &a, const Entry &b) { + [](const Entry& a, const Entry& b) { if (a.second.activity == b.second.activity) { return a.second.lbd > b.second.lbd; } @@ -2495,16 +2494,16 @@ void SatSolver::CleanClauseDatabaseIfNeeded() { // deterministic (pointer keys), we also keep all the clauses which have the // same LBD and activity as the last one so the behavior is deterministic. while (num_deleted_clauses > 0) { - const ClauseInfo &a = entries[num_deleted_clauses].second; - const ClauseInfo &b = entries[num_deleted_clauses - 1].second; + const ClauseInfo& a = entries[num_deleted_clauses].second; + const ClauseInfo& b = entries[num_deleted_clauses - 1].second; if (a.activity != b.activity || a.lbd != b.lbd) break; --num_deleted_clauses; ++num_kept_clauses; } if (num_deleted_clauses > 0) { entries.resize(num_deleted_clauses); - for (const Entry &entry : entries) { - SatClause *clause = entry.first; + for (const Entry& entry : entries) { + SatClause* clause = entry.first; counters_.num_literals_forgotten += clause->size(); clauses_propagator_->LazyDetach(clause); } @@ -2540,7 +2539,7 @@ std::string SatStatusString(SatSolver::Status status) { return "UNKNOWN"; } -void MinimizeCore(SatSolver *solver, std::vector *core) { +void MinimizeCore(SatSolver* solver, std::vector* core) { std::vector temp = *core; std::reverse(temp.begin(), temp.end()); solver->Backtrack(0); diff --git a/ortools/sat/scheduling_constraints.cc b/ortools/sat/scheduling_constraints.cc index 102f26f2d9..a162ac9bcd 100644 --- a/ortools/sat/scheduling_constraints.cc +++ b/ortools/sat/scheduling_constraints.cc @@ -23,9 +23,9 @@ class SelectedMinPropagator : public PropagatorInterface { public: explicit SelectedMinPropagator(Literal enforcement_literal, IntegerVariable target, - const std::vector &vars, - const std::vector &selectors, - Model *model) + const std::vector& vars, + const std::vector& selectors, + Model* model) : enforcement_literal_(enforcement_literal), target_(target), vars_(vars), @@ -35,16 +35,16 @@ class SelectedMinPropagator : public PropagatorInterface { precedences_(model->GetOrCreate()), true_literal_(model->GetOrCreate()->GetTrueLiteral()) {} bool Propagate() final; - int RegisterWith(GenericLiteralWatcher *watcher); + int RegisterWith(GenericLiteralWatcher* watcher); private: const Literal enforcement_literal_; const IntegerVariable target_; const std::vector vars_; const std::vector selectors_; - Trail *trail_; - IntegerTrail *integer_trail_; - PrecedencesPropagator *precedences_; + Trail* trail_; + IntegerTrail* integer_trail_; + PrecedencesPropagator* precedences_; const Literal true_literal_; std::vector literal_reason_; @@ -54,7 +54,7 @@ class SelectedMinPropagator : public PropagatorInterface { }; bool SelectedMinPropagator::Propagate() { - const VariablesAssignment &assignment = trail_->Assignment(); + const VariablesAssignment& assignment = trail_->Assignment(); // helpers. const auto add_var_non_selection_to_reason = [&](int i) { @@ -253,7 +253,7 @@ bool SelectedMinPropagator::Propagate() { return true; } -int SelectedMinPropagator::RegisterWith(GenericLiteralWatcher *watcher) { +int SelectedMinPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); for (int t = 0; t < vars_.size(); ++t) { watcher->WatchLowerBound(vars_[t], id); @@ -266,12 +266,12 @@ int SelectedMinPropagator::RegisterWith(GenericLiteralWatcher *watcher) { return id; } -std::function EqualMinOfSelectedVariables( +std::function EqualMinOfSelectedVariables( Literal enforcement_literal, IntegerVariable target, - const std::vector &vars, - const std::vector &selectors) { + const std::vector& vars, + const std::vector& selectors) { CHECK_EQ(vars.size(), selectors.size()); - return [=](Model *model) { + return [=](Model* model) { // If both a variable is selected and the enforcement literal is true, then // the var is always greater than the target. for (int i = 0; i < vars.size(); ++i) { @@ -281,19 +281,19 @@ std::function EqualMinOfSelectedVariables( } // Add the dedicated propagator. - SelectedMinPropagator *constraint = new SelectedMinPropagator( + SelectedMinPropagator* constraint = new SelectedMinPropagator( enforcement_literal, target, vars, selectors, model); constraint->RegisterWith(model->GetOrCreate()); model->TakeOwnership(constraint); }; } -std::function EqualMaxOfSelectedVariables( +std::function EqualMaxOfSelectedVariables( Literal enforcement_literal, IntegerVariable target, - const std::vector &vars, - const std::vector &selectors) { + const std::vector& vars, + const std::vector& selectors) { CHECK_EQ(vars.size(), selectors.size()); - return [=](Model *model) { + return [=](Model* model) { std::vector negations; for (const IntegerVariable var : vars) { negations.push_back(NegationOf(var)); @@ -303,10 +303,10 @@ std::function EqualMaxOfSelectedVariables( }; } -std::function SpanOfIntervals( - IntervalVariable span, const std::vector &intervals) { - return [=](Model *model) { - SatSolver *sat_solver = model->GetOrCreate(); +std::function SpanOfIntervals( + IntervalVariable span, const std::vector& intervals) { + return [=](Model* model) { + SatSolver* sat_solver = model->GetOrCreate(); SchedulingConstraintHelper task_helper(intervals, model); SchedulingConstraintHelper target_helper({span}, model); diff --git a/ortools/sat/simplification.cc b/ortools/sat/simplification.cc index a05f0e8087..8d120dad59 100644 --- a/ortools/sat/simplification.cc +++ b/ortools/sat/simplification.cc @@ -48,7 +48,7 @@ void SatPostsolver::Add(Literal x, absl::Span clause) { DCHECK(std::find(clause.begin(), clause.end(), x) != clause.end()); associated_literal_.push_back(ApplyReverseMapping(x)); clauses_start_.push_back(clauses_literals_.size()); - for (const Literal &l : clause) { + for (const Literal& l : clause) { clauses_literals_.push_back(ApplyReverseMapping(l)); } } @@ -59,7 +59,7 @@ void SatPostsolver::FixVariable(Literal x) { } void SatPostsolver::ApplyMapping( - const gtl::ITIVector &mapping) { + const gtl::ITIVector& mapping) { gtl::ITIVector new_mapping; if (reverse_mapping_.size() < mapping.size()) { // We have new variables. @@ -95,7 +95,7 @@ Literal SatPostsolver::ApplyReverseMapping(Literal l) { return result; } -void SatPostsolver::Postsolve(VariablesAssignment *assignment) const { +void SatPostsolver::Postsolve(VariablesAssignment* assignment) const { // First, we set all unassigned variable to true. // This will be a valid assignment of the presolved problem. for (BooleanVariable var(0); var < assignment->NumberOfVariables(); ++var) { @@ -126,7 +126,7 @@ void SatPostsolver::Postsolve(VariablesAssignment *assignment) const { } std::vector SatPostsolver::ExtractAndPostsolveSolution( - const SatSolver &solver) { + const SatSolver& solver) { std::vector solution(solver.NumVariables()); for (BooleanVariable var(0); var < solver.NumVariables(); ++var) { DCHECK(solver.Assignment().VariableIsAssigned(var)); @@ -137,7 +137,7 @@ std::vector SatPostsolver::ExtractAndPostsolveSolution( } std::vector SatPostsolver::PostsolveSolution( - const std::vector &solution) { + const std::vector& solution) { for (BooleanVariable var(0); var < solution.size(); ++var) { DCHECK_LT(var, reverse_mapping_.size()); DCHECK_NE(reverse_mapping_[var], kNoBooleanVariable); @@ -165,7 +165,7 @@ void SatPresolver::AddClause(absl::Span clause) { clause_to_process_.push_back(ci); bool changed = false; - std::vector &clause_ref = clauses_.back(); + std::vector& clause_ref = clauses_.back(); if (!equiv_mapping_.empty()) { for (int i = 0; i < clause_ref.size(); ++i) { const Literal old_literal = clause_ref[i]; @@ -220,7 +220,7 @@ void SatPresolver::SetNumVariables(int num_variables) { } } -void SatPresolver::AddClauseInternal(std::vector *clause) { +void SatPresolver::AddClauseInternal(std::vector* clause) { if (drat_proof_handler_ != nullptr) drat_proof_handler_->AddClause(*clause); DCHECK(std::is_sorted(clause->begin(), clause->end())); @@ -257,7 +257,7 @@ gtl::ITIVector SatPresolver::VariableMapping() return result; } -void SatPresolver::LoadProblemIntoSatSolver(SatSolver *solver) { +void SatPresolver::LoadProblemIntoSatSolver(SatSolver* solver) { // Cleanup some memory that is not needed anymore. Note that we do need // literal_to_clause_sizes_ for VariableMapping() to work. var_pq_.Clear(); @@ -276,7 +276,7 @@ void SatPresolver::LoadProblemIntoSatSolver(SatSolver *solver) { std::vector temp; solver->SetNumVariables(new_size); - for (std::vector &clause_ref : clauses_) { + for (std::vector& clause_ref : clauses_) { temp.clear(); for (Literal l : clause_ref) { DCHECK_NE(mapping[l.Variable()], kNoBooleanVariable); @@ -322,7 +322,7 @@ bool SatPresolver::Presolve() { return Presolve(can_be_removed); } -bool SatPresolver::Presolve(const std::vector &can_be_removed, +bool SatPresolver::Presolve(const std::vector& can_be_removed, bool log_info) { log_info |= VLOG_IS_ON(1); @@ -397,7 +397,7 @@ void SatPresolver::SimpleBva(LiteralIndex l) { flattened_p_.clear(); for (const ClauseIndex c : m_cls_) { - const std::vector &clause = clauses_[c]; + const std::vector& clause = clauses_[c]; if (clause.empty()) continue; // It has been deleted. // Find a literal different from l that occur in the less number of @@ -485,7 +485,7 @@ void SatPresolver::SimpleBva(LiteralIndex l) { for (const ClauseIndex ci : m_cls_) { tmp_new_clause_ = clauses_[ci]; DCHECK(!tmp_new_clause_.empty()); - for (Literal &ref : tmp_new_clause_) { + for (Literal& ref : tmp_new_clause_) { if (ref.Index() == l) { ref = Literal(x_false); break; @@ -506,7 +506,7 @@ void SatPresolver::SimpleBva(LiteralIndex l) { // reduction occur, whereas the start of this function occur all the time, so // we want it to be as fast as possible. for (const ClauseIndex c : m_cls_) { - const std::vector &clause = clauses_[c]; + const std::vector& clause = clauses_[c]; DCHECK(!clause.empty()); const LiteralIndex l_min = FindLiteralWithShortestOccurrenceListExcluding(clause, Literal(l)); @@ -549,7 +549,7 @@ uint64 SatPresolver::ComputeSignatureOfClauseVariables(ClauseIndex ci) { // clauses_[clause_index] negated. bool SatPresolver::ProcessClauseToSimplifyOthersUsingLiteral( ClauseIndex clause_index, Literal lit) { - const std::vector &clause = clauses_[clause_index]; + const std::vector& clause = clauses_[clause_index]; const uint64 clause_signature = signatures_[clause_index]; LiteralIndex opposite_literal; @@ -607,7 +607,7 @@ bool SatPresolver::ProcessClauseToSimplifyOthersUsingLiteral( if (need_cleaning) { int new_index = 0; - auto &occurrence_list_ref = literal_to_clauses_[lit.Index()]; + auto& occurrence_list_ref = literal_to_clauses_[lit.Index()]; for (const ClauseIndex ci : occurrence_list_ref) { if (signatures_[ci] != 0) occurrence_list_ref[new_index++] = ci; } @@ -622,7 +622,7 @@ bool SatPresolver::ProcessClauseToSimplifyOthersUsingLiteral( // more efficiently for them. For instance, we could just take the intersection // of two sorted lists to get the simplified clauses. bool SatPresolver::ProcessClauseToSimplifyOthers(ClauseIndex clause_index) { - const std::vector &clause = clauses_[clause_index]; + const std::vector& clause = clauses_[clause_index]; if (clause.empty()) return true; DCHECK(std::is_sorted(clause.begin(), clause.end())); @@ -646,7 +646,7 @@ bool SatPresolver::ProcessClauseToSimplifyOthers(ClauseIndex clause_index) { // Treat the clauses containing lit.Negated(). int new_index = 0; bool something_removed = false; - auto &occurrence_list_ref = literal_to_clauses_[lit.NegatedIndex()]; + auto& occurrence_list_ref = literal_to_clauses_[lit.NegatedIndex()]; const uint64 clause_signature = signatures_[clause_index]; for (const ClauseIndex ci : occurrence_list_ref) { const uint64 ci_signature = signatures_[ci]; @@ -807,7 +807,7 @@ void SatPresolver::RemoveAndRegisterForPostsolve(ClauseIndex ci, Literal x) { } Literal SatPresolver::FindLiteralWithShortestOccurrenceList( - const std::vector &clause) { + const std::vector& clause) { DCHECK(!clause.empty()); Literal result = clause.front(); int best_size = literal_to_clause_sizes_[result.Index()]; @@ -822,7 +822,7 @@ Literal SatPresolver::FindLiteralWithShortestOccurrenceList( } LiteralIndex SatPresolver::FindLiteralWithShortestOccurrenceListExcluding( - const std::vector &clause, Literal to_exclude) { + const std::vector& clause, Literal to_exclude) { DCHECK(!clause.empty()); LiteralIndex result = kNoLiteralIndex; int num_occurrences = std::numeric_limits::max(); @@ -838,7 +838,7 @@ LiteralIndex SatPresolver::FindLiteralWithShortestOccurrenceListExcluding( void SatPresolver::UpdatePriorityQueue(BooleanVariable var) { if (var_pq_elements_.empty()) return; // not initialized. - PQElement *element = &var_pq_elements_[var]; + PQElement* element = &var_pq_elements_[var]; element->weight = literal_to_clause_sizes_[Literal(var, true).Index()] + literal_to_clause_sizes_[Literal(var, false).Index()]; if (var_pq_.Contains(element)) { @@ -852,7 +852,7 @@ void SatPresolver::InitializePriorityQueue() { const int num_vars = NumVariables(); var_pq_elements_.resize(num_vars); for (BooleanVariable var(0); var < num_vars; ++var) { - PQElement *element = &var_pq_elements_[var]; + PQElement* element = &var_pq_elements_[var]; element->variable = var; element->weight = literal_to_clause_sizes_[Literal(var, true).Index()] + literal_to_clause_sizes_[Literal(var, false).Index()]; @@ -863,7 +863,7 @@ void SatPresolver::InitializePriorityQueue() { void SatPresolver::UpdateBvaPriorityQueue(LiteralIndex lit) { if (bva_pq_elements_.empty()) return; // not initialized. DCHECK_LT(lit, bva_pq_elements_.size()); - BvaPqElement *element = &bva_pq_elements_[lit.value()]; + BvaPqElement* element = &bva_pq_elements_[lit.value()]; element->weight = literal_to_clause_sizes_[lit]; if (bva_pq_.Contains(element)) { bva_pq_.NoteChangedPriority(element); @@ -873,7 +873,7 @@ void SatPresolver::UpdateBvaPriorityQueue(LiteralIndex lit) { void SatPresolver::AddToBvaPriorityQueue(LiteralIndex lit) { if (bva_pq_elements_.empty()) return; // not initialized. DCHECK_LT(lit, bva_pq_elements_.size()); - BvaPqElement *element = &bva_pq_elements_[lit.value()]; + BvaPqElement* element = &bva_pq_elements_[lit.value()]; element->weight = literal_to_clause_sizes_[lit]; DCHECK(!bva_pq_.Contains(element)); if (element->weight > 2) bva_pq_.Add(element); @@ -884,7 +884,7 @@ void SatPresolver::InitializeBvaPriorityQueue() { bva_pq_.Clear(); bva_pq_elements_.assign(num_literals, BvaPqElement()); for (LiteralIndex lit(0); lit < num_literals; ++lit) { - BvaPqElement *element = &bva_pq_elements_[lit.value()]; + BvaPqElement* element = &bva_pq_elements_[lit.value()]; element->literal = lit; element->weight = literal_to_clause_sizes_[lit]; @@ -898,7 +898,7 @@ void SatPresolver::DisplayStats(double elapsed_seconds) { int num_literals = 0; int num_clauses = 0; int num_singleton_clauses = 0; - for (const std::vector &c : clauses_) { + for (const std::vector& c : clauses_) { if (!c.empty()) { if (c.size() == 1) ++num_singleton_clauses; ++num_clauses; @@ -927,9 +927,9 @@ void SatPresolver::DisplayStats(double elapsed_seconds) { << " singleton_clauses:" << num_singleton_clauses; } -bool SimplifyClause(const std::vector &a, std::vector *b, - LiteralIndex *opposite_literal, - int64 *num_inspected_literals) { +bool SimplifyClause(const std::vector& a, std::vector* b, + LiteralIndex* opposite_literal, + int64* num_inspected_literals) { if (b->size() < a.size()) return false; DCHECK(std::is_sorted(a.begin(), a.end())); DCHECK(std::is_sorted(b->begin(), b->end())); @@ -975,8 +975,8 @@ bool SimplifyClause(const std::vector &a, std::vector *b, return true; } -LiteralIndex DifferAtGivenLiteral(const std::vector &a, - const std::vector &b, Literal l) { +LiteralIndex DifferAtGivenLiteral(const std::vector& a, + const std::vector& b, Literal l) { DCHECK_EQ(b.size(), a.size()); DCHECK(std::is_sorted(a.begin(), a.end())); DCHECK(std::is_sorted(b.begin(), b.end())); @@ -1009,9 +1009,9 @@ LiteralIndex DifferAtGivenLiteral(const std::vector &a, return result; } -bool ComputeResolvant(Literal x, const std::vector &a, - const std::vector &b, - std::vector *out) { +bool ComputeResolvant(Literal x, const std::vector& a, + const std::vector& b, + std::vector* out) { DCHECK(std::is_sorted(a.begin(), a.end())); DCHECK(std::is_sorted(b.begin(), b.end())); @@ -1044,8 +1044,8 @@ bool ComputeResolvant(Literal x, const std::vector &a, } // Note that this function takes a big chunk of the presolve running time. -int ComputeResolvantSize(Literal x, const std::vector &a, - const std::vector &b) { +int ComputeResolvantSize(Literal x, const std::vector& a, + const std::vector& b) { DCHECK(std::is_sorted(a.begin(), a.end())); DCHECK(std::is_sorted(b.begin(), b.end())); @@ -1081,7 +1081,7 @@ int ComputeResolvantSize(Literal x, const std::vector &a, // for a deterministic time limit. class PropagationGraph { public: - PropagationGraph(double deterministic_time_limit, SatSolver *solver) + PropagationGraph(double deterministic_time_limit, SatSolver* solver) : solver_(solver), deterministic_time_limit(solver->deterministic_time() + deterministic_time_limit) {} @@ -1089,7 +1089,7 @@ class PropagationGraph { // Returns the set of node adjacent to the given one. // Interface needed by FindStronglyConnectedComponents(), note that it needs // to be const. - const std::vector &operator[](int32 index) const { + const std::vector& operator[](int32 index) const { scratchpad_.clear(); solver_->Backtrack(0); @@ -1119,16 +1119,16 @@ class PropagationGraph { private: mutable std::vector scratchpad_; - SatSolver *const solver_; + SatSolver* const solver_; const double deterministic_time_limit; DISALLOW_COPY_AND_ASSIGN(PropagationGraph); }; void ProbeAndFindEquivalentLiteral( - SatSolver *solver, SatPostsolver *postsolver, - DratProofHandler *drat_proof_handler, - gtl::ITIVector *mapping) { + SatSolver* solver, SatPostsolver* postsolver, + DratProofHandler* drat_proof_handler, + gtl::ITIVector* mapping) { WallTimer timer; timer.Start(); @@ -1139,7 +1139,7 @@ void ProbeAndFindEquivalentLiteral( PropagationGraph graph( solver->parameters().presolve_probing_deterministic_time_limit(), solver); const int32 size = solver->NumVariables() * 2; - std::vector > scc; + std::vector> scc; FindStronglyConnectedComponents(size, graph, &scc); // We have no guarantee that the cycle of x and not(x) touch the same @@ -1152,7 +1152,7 @@ void ProbeAndFindEquivalentLiteral( // // Because of this, we "merge" the cycles. MergingPartition partition(size); - for (const std::vector &component : scc) { + for (const std::vector& component : scc) { if (component.size() > 1) { if (mapping->empty()) mapping->resize(size, LiteralIndex(-1)); const Literal representative((LiteralIndex(component[0]))); @@ -1187,7 +1187,7 @@ void ProbeAndFindEquivalentLiteral( // // TODO(user): Fixing a variable might fix more of them by propagation, so // we might not fix everything possible with these loops. - const VariablesAssignment &assignment = solver->Assignment(); + const VariablesAssignment& assignment = solver->Assignment(); for (LiteralIndex i(0); i < size; ++i) { const LiteralIndex rep(partition.GetRootAndCompressPath(i.value())); if (assignment.LiteralIsAssigned(Literal(i)) && @@ -1243,10 +1243,10 @@ void ProbeAndFindEquivalentLiteral( << solver->NumVariables() << " wtime: " << timer.Get(); } -SatSolver::Status SolveWithPresolve(std::unique_ptr *solver, - TimeLimit *time_limit, - std::vector *solution, - DratProofHandler *drat_proof_handler) { +SatSolver::Status SolveWithPresolve(std::unique_ptr* solver, + TimeLimit* time_limit, + std::vector* solution, + DratProofHandler* drat_proof_handler) { // We save the initial parameters. const SatParameters parameters = (*solver)->parameters(); SatPostsolver postsolver((*solver)->NumVariables()); @@ -1258,7 +1258,7 @@ SatSolver::Status SolveWithPresolve(std::unique_ptr *solver, // is possible that the presolve lose this "lucky" ordering. This is in // particular the case on the SAT14.crafted.complete-xxx-... problems. { - Model *model = (*solver)->model(); + Model* model = (*solver)->model(); const double dtime = std::min(1.0, time_limit->GetDeterministicTimeLeft()); if (!LookForTrivialSatSolution(dtime, model, log_info)) { VLOG(1) << "UNSAT during probing."; @@ -1288,7 +1288,7 @@ SatSolver::Status SolveWithPresolve(std::unique_ptr *solver, // "probing" code afterwards even if it will not fix more literals, but it // will do one pass of proper equivalence detection. { - Model *model = (*solver)->model(); + Model* model = (*solver)->model(); model->GetOrCreate()->MergeWithGlobalTimeLimit(time_limit); SatPresolveOptions options; options.log_info = log_info; @@ -1301,7 +1301,7 @@ SatSolver::Status SolveWithPresolve(std::unique_ptr *solver, VLOG(1) << "UNSAT during probing."; return SatSolver::INFEASIBLE; } - for (const auto &c : model->GetOrCreate()->clauses) { + for (const auto& c : model->GetOrCreate()->clauses) { postsolver.Add(c[0], c); } } @@ -1380,7 +1380,7 @@ SatSolver::Status SolveWithPresolve(std::unique_ptr *solver, // using binary ones. Or if/when we support at most one better in pure SAT // solving and presolve. { - Model *model = (*solver)->model(); + Model* model = (*solver)->model(); model->GetOrCreate()->MergeWithGlobalTimeLimit(time_limit); SatPresolveOptions options; options.log_info = log_info; @@ -1392,7 +1392,7 @@ SatSolver::Status SolveWithPresolve(std::unique_ptr *solver, if (!model->GetOrCreate()->PresolveLoop(options)) { return SatSolver::INFEASIBLE; } - for (const auto &c : model->GetOrCreate()->clauses) { + for (const auto& c : model->GetOrCreate()->clauses) { postsolver.Add(c[0], c); } } diff --git a/ortools/sat/subsolver.cc b/ortools/sat/subsolver.cc index b301eed8df..5172f8e65f 100644 --- a/ortools/sat/subsolver.cc +++ b/ortools/sat/subsolver.cc @@ -31,8 +31,8 @@ namespace { // // For now we use a really basic logic: call the least frequently called. int NextSubsolverToSchedule( - const std::vector > &subsolvers, - const std::vector &num_generated_tasks) { + const std::vector>& subsolvers, + const std::vector& num_generated_tasks) { int best = -1; for (int i = 0; i < subsolvers.size(); ++i) { if (subsolvers[i]->TaskIsAvailable()) { @@ -45,15 +45,13 @@ int NextSubsolverToSchedule( return best; } -void SynchronizeAll( - const std::vector > &subsolvers) { - for (const auto &subsolver : subsolvers) subsolver->Synchronize(); +void SynchronizeAll(const std::vector>& subsolvers) { + for (const auto& subsolver : subsolvers) subsolver->Synchronize(); } } // namespace -void SequentialLoop( - const std::vector > &subsolvers) { +void SequentialLoop(const std::vector>& subsolvers) { int64 task_id = 0; std::vector num_generated_tasks(subsolvers.size(), 0); while (true) { @@ -70,13 +68,13 @@ void SequentialLoop( // On portable platform, we don't support multi-threading for now. void NonDeterministicLoop( - const std::vector > &subsolvers, + const std::vector>& subsolvers, int num_threads) { SequentialLoop(subsolvers); } void DeterministicLoop( - const std::vector > &subsolvers, int num_threads, + const std::vector>& subsolvers, int num_threads, int batch_size) { SequentialLoop(subsolvers); } @@ -84,7 +82,7 @@ void DeterministicLoop( #else // __PORTABLE_PLATFORM__ void DeterministicLoop( - const std::vector > &subsolvers, int num_threads, + const std::vector>& subsolvers, int num_threads, int batch_size) { CHECK_GT(num_threads, 0); CHECK_GT(batch_size, 0); @@ -116,7 +114,7 @@ void DeterministicLoop( } void NonDeterministicLoop( - const std::vector > &subsolvers, + const std::vector>& subsolvers, int num_threads) { CHECK_GT(num_threads, 0); if (num_threads == 1) { diff --git a/ortools/sat/subsolver.h b/ortools/sat/subsolver.h index ff08c05e58..8f4e9c783e 100644 --- a/ortools/sat/subsolver.h +++ b/ortools/sat/subsolver.h @@ -40,7 +40,7 @@ namespace sat { // tasks generated by GenerateTask() are executed in parallel in a threadpool. class SubSolver { public: - explicit SubSolver(const std::string &name) : name_(name) {} + explicit SubSolver(const std::string& name) : name_(name) {} virtual ~SubSolver() {} // Returns true iff GenerateTask() can be called. @@ -114,8 +114,7 @@ class SynchronizationPoint : public SubSolver { // any tasks. This can be used to synchronize classes used by many subsolvers // just once for instance. void NonDeterministicLoop( - const std::vector > &subsolvers, - int num_threads); + const std::vector>& subsolvers, int num_threads); // Similar to NonDeterministicLoop() except this should result in a // deterministic solver provided that all SubSolver respect the Synchronize() @@ -128,14 +127,14 @@ void NonDeterministicLoop( // 3/ wait for all task to finish. // 4/ repeat until no task can be generated in step 2. void DeterministicLoop( - const std::vector > &subsolvers, int num_threads, + const std::vector>& subsolvers, int num_threads, int batch_size); // Same as above, but specialized implementation for the case num_threads=1. // This avoids using a Threadpool altogether. It should have the same behavior // than the functions above with num_threads=1 and batch_size=1. Note that an // higher batch size will not behave in the same way, even if num_threads=1. -void SequentialLoop(const std::vector > &subsolvers); +void SequentialLoop(const std::vector>& subsolvers); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/symmetry.cc b/ortools/sat/symmetry.cc index 3bd5c5ca96..93ac3e842a 100644 --- a/ortools/sat/symmetry.cc +++ b/ortools/sat/symmetry.cc @@ -57,17 +57,17 @@ void SymmetryPropagator::AddSymmetry( permutations_.emplace_back(permutation.release()); } -bool SymmetryPropagator::PropagateNext(Trail *trail) { +bool SymmetryPropagator::PropagateNext(Trail* trail) { SCOPED_TIME_STAT(&stats_); const Literal true_literal = (*trail)[propagation_trail_index_]; if (true_literal.Index() < images_.size()) { - const std::vector &images = images_[true_literal.Index()]; + const std::vector& images = images_[true_literal.Index()]; for (int image_index = 0; image_index < images.size(); ++image_index) { const int p_index = images[image_index].permutation_index; // TODO(user): some optim ideas: no need to enqueue if a decision image is // already assigned to false. But then the Untrail() is more involved. - std::vector *p_trail = + std::vector* p_trail = &(permutation_trails_[p_index]); if (Enqueue(*trail, true_literal, images[image_index].image, p_trail)) { continue; @@ -76,14 +76,14 @@ bool SymmetryPropagator::PropagateNext(Trail *trail) { // We have a non-symmetric literal and its image is not already assigned // to // true. - const AssignedLiteralInfo &non_symmetric = + const AssignedLiteralInfo& non_symmetric = (*p_trail)[p_trail->back().first_non_symmetric_info_index_so_far]; // If the first non-symmetric literal is a decision, then we can't deduce // anything. Otherwise, it is either a conflict or a propagation. const BooleanVariable non_symmetric_var = non_symmetric.literal.Variable(); - const AssignmentInfo &assignment_info = trail->Info(non_symmetric_var); + const AssignmentInfo& assignment_info = trail->Info(non_symmetric_var); if (trail->AssignmentType(non_symmetric_var) == AssignmentType::kSearchDecision) { continue; @@ -94,7 +94,7 @@ bool SymmetryPropagator::PropagateNext(Trail *trail) { // Set the conflict on the trail. // Note that we need to fetch a reason for this. - std::vector *conflict = trail->MutableConflict(); + std::vector* conflict = trail->MutableConflict(); const absl::Span initial_reason = trail->Reason(non_symmetric.literal.Variable()); Permute(p_index, initial_reason, conflict); @@ -123,7 +123,7 @@ bool SymmetryPropagator::PropagateNext(Trail *trail) { return true; } -bool SymmetryPropagator::Propagate(Trail *trail) { +bool SymmetryPropagator::Propagate(Trail* trail) { const int old_index = trail->Index(); while (trail->Index() == old_index && propagation_trail_index_ < old_index) { if (!PropagateNext(trail)) return false; @@ -131,33 +131,33 @@ bool SymmetryPropagator::Propagate(Trail *trail) { return true; } -void SymmetryPropagator::Untrail(const Trail &trail, int trail_index) { +void SymmetryPropagator::Untrail(const Trail& trail, int trail_index) { SCOPED_TIME_STAT(&stats_); while (propagation_trail_index_ > trail_index) { --propagation_trail_index_; const Literal true_literal = trail[propagation_trail_index_]; if (true_literal.Index() < images_.size()) { - for (ImageInfo &info : images_[true_literal.Index()]) { + for (ImageInfo& info : images_[true_literal.Index()]) { permutation_trails_[info.permutation_index].pop_back(); } } } } -absl::Span SymmetryPropagator::Reason(const Trail &trail, +absl::Span SymmetryPropagator::Reason(const Trail& trail, int trail_index) const { SCOPED_TIME_STAT(&stats_); - const ReasonInfo &reason_info = reasons_[trail_index]; - std::vector *reason = trail.GetEmptyVectorToStoreReason(trail_index); + const ReasonInfo& reason_info = reasons_[trail_index]; + std::vector* reason = trail.GetEmptyVectorToStoreReason(trail_index); Permute(reason_info.symmetry_index, trail.Reason(trail[reason_info.source_trail_index].Variable()), reason); return *reason; } -bool SymmetryPropagator::Enqueue(const Trail &trail, Literal literal, +bool SymmetryPropagator::Enqueue(const Trail& trail, Literal literal, Literal image, - std::vector *p_trail) { + std::vector* p_trail) { // Small optimization to get the trail index of literal. const int literal_trail_index = propagation_trail_index_; DCHECK_EQ(literal_trail_index, trail.Info(literal.Variable()).trail_index); @@ -171,7 +171,7 @@ bool SymmetryPropagator::Enqueue(const Trail &trail, Literal literal, p_trail->empty() ? 0 : p_trail->back().first_non_symmetric_info_index_so_far)); - int *index = &(p_trail->back().first_non_symmetric_info_index_so_far); + int* index = &(p_trail->back().first_non_symmetric_info_index_so_far); // Compute first_non_symmetric_info_index_so_far. while (*index < p_trail->size() && @@ -190,11 +190,11 @@ bool SymmetryPropagator::Enqueue(const Trail &trail, Literal literal, } void SymmetryPropagator::Permute(int index, absl::Span input, - std::vector *output) const { + std::vector* output) const { SCOPED_TIME_STAT(&stats_); // Initialize tmp_literal_mapping_ (resize it if needed). - const SparsePermutation &permutation = *(permutations_[index].get()); + const SparsePermutation& permutation = *(permutations_[index].get()); if (permutation.Size() > tmp_literal_mapping_.size()) { tmp_literal_mapping_.resize(permutation.Size()); for (LiteralIndex i(0); i < tmp_literal_mapping_.size(); ++i) { diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index ac731665cd..2ee3e320e3 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -31,8 +31,8 @@ ABSL_FLAG(bool, cp_model_dump_solutions, false, "DEBUG ONLY. If true, all the intermediate solution will be dumped " - "under '\"absl::GetFlag(FLAGS_cp_model_dump_prefix)\" + " - "\"solution_xxx.pb.txt\"'."); + "under '\"FLAGS_cp_model_dump_prefix\" + \"solution_xxx.pb.txt\"'."); + ABSL_FLAG( std::string, cp_model_load_debug_solution, "", "DEBUG ONLY. When this is set to a non-empty file name, " @@ -43,7 +43,7 @@ namespace operations_research { namespace sat { void SharedRelaxationSolutionRepository::NewRelaxationSolution( - const CpSolverResponse &response) { + const CpSolverResponse& response) { // Note that the Add() method already applies mutex lock. So we don't need it // here. if (response.solution().empty()) return; @@ -92,7 +92,7 @@ std::vector SharedIncompleteSolutionManager::GetNewSolution() { } void SharedIncompleteSolutionManager::AddNewSolution( - const std::vector &lp_solution) { + const std::vector& lp_solution) { absl::MutexLock mutex_lock(&mutex_); solutions_.push_back(lp_solution); } @@ -100,9 +100,9 @@ void SharedIncompleteSolutionManager::AddNewSolution( // TODO(user): Experiments and play with the num_solutions_to_keep parameter. SharedResponseManager::SharedResponseManager(bool log_updates, bool enumerate_all_solutions, - const CpModelProto *proto, - const WallTimer *wall_timer, - SharedTimeLimit *shared_time_limit) + const CpModelProto* proto, + const WallTimer* wall_timer, + SharedTimeLimit* shared_time_limit) : log_updates_(log_updates), enumerate_all_solutions_(enumerate_all_solutions), model_proto_(*proto), @@ -112,9 +112,9 @@ SharedResponseManager::SharedResponseManager(bool log_updates, namespace { -void LogNewSolution(const std::string &event_or_solution_count, +void LogNewSolution(const std::string& event_or_solution_count, double time_in_seconds, double obj_best, double obj_lb, - double obj_ub, const std::string &solution_info) { + double obj_ub, const std::string& solution_info) { const std::string obj_next = absl::StrFormat("next:[%.9g,%.9g]", obj_lb, obj_ub); LOG(INFO) << absl::StrFormat("#%-5s %6.2fs best:%-5.9g %-15s %s", @@ -122,9 +122,9 @@ void LogNewSolution(const std::string &event_or_solution_count, obj_best, obj_next, solution_info); } -void LogNewSatSolution(const std::string &event_or_solution_count, +void LogNewSatSolution(const std::string& event_or_solution_count, double time_in_seconds, - const std::string &solution_info) { + const std::string& solution_info) { LOG(INFO) << absl::StrFormat("#%-5s %6.2fs %s", event_or_solution_count, time_in_seconds, solution_info); } @@ -144,7 +144,7 @@ void SharedResponseManager::UpdatePrimalIntegral() { // Using the log should count no solution as just log(2*64) = 18, and // otherwise just compare order of magnitude which seems nice. Also, It is // more easy to compare the primal integral with the total time. - const CpObjectiveProto &obj = model_proto_.objective(); + const CpObjectiveProto& obj = model_proto_.objective(); const double factor = obj.scaling_factor() != 0.0 ? std::abs(obj.scaling_factor()) : 1.0; const double bounds_delta = std::log( @@ -154,7 +154,7 @@ void SharedResponseManager::UpdatePrimalIntegral() { } void SharedResponseManager::SetGapLimitsFromParameters( - const SatParameters ¶meters) { + const SatParameters& parameters) { absl::MutexLock mutex_lock(&mutex_); if (!model_proto_.has_objective()) return; absolute_gap_limit_ = parameters.absolute_gap_limit(); @@ -166,7 +166,7 @@ void SharedResponseManager::TestGapLimitsIfNeeded() { if (best_solution_objective_value_ >= kMaxIntegerValue) return; if (inner_objective_lower_bound_ <= kMinIntegerValue) return; - const CpObjectiveProto &obj = model_proto_.objective(); + const CpObjectiveProto& obj = model_proto_.objective(); const double user_best = ScaleObjectiveValue(obj, best_solution_objective_value_); const double user_bound = @@ -193,7 +193,7 @@ void SharedResponseManager::TestGapLimitsIfNeeded() { } void SharedResponseManager::UpdateInnerObjectiveBounds( - const std::string &worker_info, IntegerValue lb, IntegerValue ub) { + const std::string& worker_info, IntegerValue lb, IntegerValue ub) { absl::MutexLock mutex_lock(&mutex_); CHECK(model_proto_.has_objective()); @@ -231,7 +231,7 @@ void SharedResponseManager::UpdateInnerObjectiveBounds( return; } if (log_updates_ && change) { - const CpObjectiveProto &obj = model_proto_.objective(); + const CpObjectiveProto& obj = model_proto_.objective(); const double best = ScaleObjectiveValue(obj, best_solution_objective_value_); double new_lb = ScaleObjectiveValue(obj, inner_objective_lower_bound_); @@ -249,7 +249,7 @@ void SharedResponseManager::UpdateInnerObjectiveBounds( // UNKNOWN -> FEASIBLE -> OPTIMAL // UNKNOWN -> INFEASIBLE void SharedResponseManager::NotifyThatImprovingProblemIsInfeasible( - const std::string &worker_info) { + const std::string& worker_info) { absl::MutexLock mutex_lock(&mutex_); if (best_response_.status() == CpSolverStatus::FEASIBLE || best_response_.status() == CpSolverStatus::OPTIMAL) { @@ -270,7 +270,7 @@ void SharedResponseManager::NotifyThatImprovingProblemIsInfeasible( if (log_updates_) LogNewSatSolution("Done", wall_timer_.Get(), worker_info); } -void SharedResponseManager::AddUnsatCore(const std::vector &core) { +void SharedResponseManager::AddUnsatCore(const std::vector& core) { absl::MutexLock mutex_lock(&mutex_); best_response_.clear_sufficient_assumptions_for_infeasibility(); for (const int ref : core) { @@ -317,7 +317,7 @@ double SharedResponseManager::PrimalIntegral() const { } int SharedResponseManager::AddSolutionCallback( - std::function callback) { + std::function callback) { absl::MutexLock mutex_lock(&mutex_); const int id = next_callback_id_++; callbacks_.emplace_back(id, std::move(callback)); @@ -343,7 +343,7 @@ CpSolverResponse SharedResponseManager::GetResponse() { void SharedResponseManager::FillObjectiveValuesInBestResponse() { if (!model_proto_.has_objective()) return; - const CpObjectiveProto &obj = model_proto_.objective(); + const CpObjectiveProto& obj = model_proto_.objective(); if (best_response_.status() == CpSolverStatus::INFEASIBLE) { best_response_.clear_objective_value(); @@ -369,8 +369,8 @@ void SharedResponseManager::FillObjectiveValuesInBestResponse() { best_response_.set_primal_integral(primal_integral_); } -void SharedResponseManager::NewSolution(const CpSolverResponse &response, - Model *model) { +void SharedResponseManager::NewSolution(const CpSolverResponse& response, + Model* model) { absl::MutexLock mutex_lock(&mutex_); if (model_proto_.has_objective()) { @@ -433,7 +433,7 @@ void SharedResponseManager::NewSolution(const CpSolverResponse &response, } if (model_proto_.has_objective()) { - const CpObjectiveProto &obj = model_proto_.objective(); + const CpObjectiveProto& obj = model_proto_.objective(); const double best = ScaleObjectiveValue(obj, best_solution_objective_value_); double lb = ScaleObjectiveValue(obj, inner_objective_lower_bound_); @@ -455,7 +455,7 @@ void SharedResponseManager::NewSolution(const CpSolverResponse &response, if (!callbacks_.empty()) { FillObjectiveValuesInBestResponse(); SetStatsFromModelInternal(model); - for (const auto &pair : callbacks_) { + for (const auto& pair : callbacks_) { pair.second(best_response_); } } @@ -472,7 +472,7 @@ void SharedResponseManager::NewSolution(const CpSolverResponse &response, #endif // __PORTABLE_PLATFORM__ } -void SharedResponseManager::LoadDebugSolution(Model *model) { +void SharedResponseManager::LoadDebugSolution(Model* model) { #if !defined(__PORTABLE_PLATFORM__) if (absl::GetFlag(FLAGS_cp_model_load_debug_solution).empty()) return; if (model->Get() != nullptr) return; // Already loaded. @@ -483,8 +483,8 @@ void SharedResponseManager::LoadDebugSolution(Model *model) { CHECK_OK(file::GetTextProto(absl::GetFlag(FLAGS_cp_model_load_debug_solution), &response, file::Defaults())); - const auto &mapping = *model->GetOrCreate(); - auto &debug_solution = *model->GetOrCreate(); + const auto& mapping = *model->GetOrCreate(); + auto& debug_solution = *model->GetOrCreate(); debug_solution.resize( model->GetOrCreate()->NumIntegerVariables().value()); for (int i = 0; i < response.solution().size(); ++i) { @@ -496,7 +496,7 @@ void SharedResponseManager::LoadDebugSolution(Model *model) { // The objective variable is usually not part of the proto, but it is still // nice to have it, so we recompute it here. - auto *objective_def = model->Get(); + auto* objective_def = model->Get(); if (objective_def == nullptr) return; const IntegerVariable objective_var = objective_def->objective_var; @@ -507,22 +507,22 @@ void SharedResponseManager::LoadDebugSolution(Model *model) { #endif // __PORTABLE_PLATFORM__ } -void SharedResponseManager::SetStatsFromModel(Model *model) { +void SharedResponseManager::SetStatsFromModel(Model* model) { absl::MutexLock mutex_lock(&mutex_); SetStatsFromModelInternal(model); } -void SharedResponseManager::SetStatsFromModelInternal(Model *model) { +void SharedResponseManager::SetStatsFromModelInternal(Model* model) { if (model == nullptr) return; - auto *sat_solver = model->Get(); - auto *integer_trail = model->Get(); + auto* sat_solver = model->Get(); + auto* integer_trail = model->Get(); best_response_.set_num_booleans(sat_solver->NumVariables()); best_response_.set_num_branches(sat_solver->num_branches()); best_response_.set_num_conflicts(sat_solver->num_failures()); best_response_.set_num_binary_propagations(sat_solver->num_propagations()); best_response_.set_num_integer_propagations( integer_trail == nullptr ? 0 : integer_trail->num_enqueues()); - auto *time_limit = model->Get(); + auto* time_limit = model->Get(); best_response_.set_wall_time(time_limit->GetElapsedTime()); best_response_.set_deterministic_time( time_limit->GetElapsedDeterministicTime()); @@ -534,7 +534,7 @@ bool SharedResponseManager::ProblemIsSolved() const { best_response_.status() == CpSolverStatus::INFEASIBLE; } -SharedBoundsManager::SharedBoundsManager(const CpModelProto &model_proto) +SharedBoundsManager::SharedBoundsManager(const CpModelProto& model_proto) : num_variables_(model_proto.variables_size()), model_proto_(model_proto), lower_bounds_(num_variables_, kint64min), @@ -552,10 +552,10 @@ SharedBoundsManager::SharedBoundsManager(const CpModelProto &model_proto) } void SharedBoundsManager::ReportPotentialNewBounds( - const CpModelProto &model_proto, const std::string &worker_name, - const std::vector &variables, - const std::vector &new_lower_bounds, - const std::vector &new_upper_bounds) { + const CpModelProto& model_proto, const std::string& worker_name, + const std::vector& variables, + const std::vector& new_lower_bounds, + const std::vector& new_upper_bounds) { CHECK_EQ(variables.size(), new_lower_bounds.size()); CHECK_EQ(variables.size(), new_upper_bounds.size()); @@ -613,8 +613,8 @@ int SharedBoundsManager::RegisterNewId() { } void SharedBoundsManager::GetChangedBounds( - int id, std::vector *variables, std::vector *new_lower_bounds, - std::vector *new_upper_bounds) { + int id, std::vector* variables, std::vector* new_lower_bounds, + std::vector* new_upper_bounds) { variables->clear(); new_lower_bounds->clear(); new_upper_bounds->clear(); diff --git a/ortools/sat/table.cc b/ortools/sat/table.cc index 5930a64f00..0135599fa9 100644 --- a/ortools/sat/table.cc +++ b/ortools/sat/table.cc @@ -38,10 +38,10 @@ namespace { // Converts the vector representation returned by FullDomainEncoding() to a map. absl::flat_hash_map GetEncoding(IntegerVariable var, - Model *model) { + Model* model) { absl::flat_hash_map encoding; - IntegerEncoder *encoder = model->GetOrCreate(); - for (const auto &entry : encoder->FullDomainEncoding(var)) { + IntegerEncoder* encoder = model->GetOrCreate(); + for (const auto& entry : encoder->FullDomainEncoding(var)) { encoding[entry.value] = entry.literal; } return encoding; @@ -53,12 +53,12 @@ absl::flat_hash_map GetEncoding(IntegerVariable var, // map. Thew tuples_with_any vector provides a list of line_literals that will // support any value. void ProcessOneColumn( - const std::vector &line_literals, - const std::vector &values, - const absl::flat_hash_map &encoding, - const std::vector &tuples_with_any, Model *model) { + const std::vector& line_literals, + const std::vector& values, + const absl::flat_hash_map& encoding, + const std::vector& tuples_with_any, Model* model) { CHECK_EQ(line_literals.size(), values.size()); - std::vector > pairs; + std::vector> pairs; // If a value is false (i.e not possible), then the tuple with this value // is false too (i.e not possible). Conversely, if the tuple is selected, @@ -95,14 +95,14 @@ void ProcessOneColumn( // Simpler encoding for table constraints with 2 variables. void AddSizeTwoTable( absl::Span vars, - const std::vector > &tuples, - const std::vector > &values_per_var, - Model *model) { + const std::vector>& tuples, + const std::vector>& values_per_var, + Model* model) { const int n = vars.size(); CHECK_EQ(n, 2); - IntegerTrail *const integer_trail = model->GetOrCreate(); + IntegerTrail* const integer_trail = model->GetOrCreate(); - std::vector > encodings(n); + std::vector> encodings(n); for (int i = 0; i < n; ++i) { const std::vector reached_values(values_per_var[i].begin(), values_per_var[i].end()); @@ -117,10 +117,10 @@ void AddSizeTwoTable( // One variable is fixed. Propagation is complete. if (values_per_var[0].size() == 1 || values_per_var[1].size() == 1) return; - std::map > left_to_right; - std::map > right_to_left; + std::map> left_to_right; + std::map> right_to_left; - for (const auto &tuple : tuples) { + for (const auto& tuple : tuples) { const IntegerValue left_value(tuple[0]); const IntegerValue right_value(tuple[1]); if (!encodings[0].contains(left_value) || @@ -140,7 +140,7 @@ void AddSizeTwoTable( std::vector clause; auto add_support_constraint = [model, &num_clause_added, &num_large_clause_added, &num_implications, - &clause](LiteralIndex lit, const std::vector &supports, + &clause](LiteralIndex lit, const std::vector& supports, int max_support_size) { if (supports.size() == max_support_size) return; if (supports.size() == 1) { @@ -157,10 +157,10 @@ void AddSizeTwoTable( } }; - for (const auto &it : left_to_right) { + for (const auto& it : left_to_right) { add_support_constraint(it.first, it.second, values_per_var[1].size()); } - for (const auto &it : right_to_left) { + for (const auto& it : right_to_left) { add_support_constraint(it.first, it.second, values_per_var[0].size()); } VLOG(2) << "Table: 2 variables, " << tuples.size() << " tuples encoded using " @@ -175,9 +175,9 @@ void AddSizeTwoTable( // In that case, it creates the complement of the projected tuples and add that // as a forbidden assignment constraint. void ExploreSubsetOfVariablesAndAddNegatedTables( - const std::vector > &tuples, - const std::vector > &var_domains, - absl::Span vars, Model *model) { + const std::vector>& tuples, + const std::vector>& var_domains, + absl::Span vars, Model* model) { const int num_vars = var_domains.size(); for (int start = 0; start < num_vars; ++start) { const int limit = start == 0 ? num_vars : std::min(num_vars, start + 3); @@ -198,9 +198,9 @@ void ExploreSubsetOfVariablesAndAddNegatedTables( // Abort early. if (max_num_prefix_tuples > 2 * tuples.size()) break; - absl::flat_hash_set > prefixes; + absl::flat_hash_set> prefixes; bool skip = false; - for (const std::vector &tuple : tuples) { + for (const std::vector& tuple : tuples) { prefixes.insert(absl::MakeSpan(&tuple[start], end - start + 1)); if (prefixes.size() == max_num_prefix_tuples) { // Nothing to add with this range [start..end]. @@ -211,7 +211,7 @@ void ExploreSubsetOfVariablesAndAddNegatedTables( if (skip) continue; const int num_prefix_tuples = prefixes.size(); - std::vector > negated_tuples; + std::vector> negated_tuples; int created = 0; if (num_prefix_tuples < max_num_prefix_tuples && @@ -246,14 +246,14 @@ void ExploreSubsetOfVariablesAndAddNegatedTables( // the decomposition uses clauses corresponding to the equivalence: // (\/_{row | tuples[row][col] = val} tuple_literals[row]) <=> (vars[col] = val) void AddTableConstraint(absl::Span vars, - std::vector > tuples, Model *model) { + std::vector> tuples, Model* model) { const int n = vars.size(); - IntegerTrail *integer_trail = model->GetOrCreate(); + IntegerTrail* integer_trail = model->GetOrCreate(); const int num_original_tuples = tuples.size(); // Compute the set of possible values for each variable (from the table). // Remove invalid tuples along the way. - std::vector > values_per_var(n); + std::vector> values_per_var(n); int index = 0; for (int tuple_index = 0; tuple_index < num_original_tuples; ++tuple_index) { bool keep = true; @@ -290,14 +290,14 @@ void AddTableConstraint(absl::Span vars, // tuples. int num_prefix_tuples = 0; { - absl::flat_hash_set > prefixes; - for (const std::vector &tuple : tuples) { + absl::flat_hash_set> prefixes; + for (const std::vector& tuple : tuples) { prefixes.insert(absl::MakeSpan(tuple.data(), n - 1)); } num_prefix_tuples = prefixes.size(); } - std::vector > var_domains(n); + std::vector> var_domains(n); for (int j = 0; j < n; ++j) { var_domains[j].assign(values_per_var[j].begin(), values_per_var[j].end()); std::sort(var_domains[j].begin(), var_domains[j].end()); @@ -310,7 +310,7 @@ void AddTableConstraint(absl::Span vars, // UpdateInitialDomain(), the domain of other variable could become more // restricted that values_per_var. For now, we do not try to reach a fixed // point here. - std::vector > encodings(n); + std::vector> encodings(n); for (int i = 0; i < n; ++i) { const std::vector reached_values(values_per_var[i].begin(), values_per_var[i].end()); @@ -455,11 +455,11 @@ void AddTableConstraint(absl::Span vars, } void AddNegatedTableConstraint(absl::Span vars, - std::vector > tuples, - Model *model) { + std::vector> tuples, + Model* model) { const int n = vars.size(); - auto *integer_trail = model->GetOrCreate(); - auto *integer_encoder = model->GetOrCreate(); + auto* integer_trail = model->GetOrCreate(); + auto* integer_encoder = model->GetOrCreate(); // Remove unreachable tuples. int index = 0; @@ -494,7 +494,7 @@ void AddNegatedTableConstraint(absl::Span vars, CompressTuples(domain_sizes, any_value, &tuples); // Collect all relevant var == value literal. - std::vector > mapping(n); + std::vector> mapping(n); for (int i = 0; i < n; ++i) { for (const auto pair : integer_encoder->PartialDomainEncoding(vars[i])) { mapping[i][pair.value.value()] = pair.literal; @@ -503,7 +503,7 @@ void AddNegatedTableConstraint(absl::Span vars, // For each tuple, forbid the variables values to be this tuple. std::vector clause; - for (const std::vector &tuple : tuples) { + for (const std::vector& tuple : tuples) { bool add_tuple = true; clause.clear(); for (int i = 0; i < n; ++i) { @@ -541,10 +541,10 @@ void AddNegatedTableConstraint(absl::Span vars, } } -std::function LiteralTableConstraint( - const std::vector > &literal_tuples, - const std::vector &line_literals) { - return [=](Model *model) { +std::function LiteralTableConstraint( + const std::vector>& literal_tuples, + const std::vector& line_literals) { + return [=](Model* model) { CHECK_EQ(literal_tuples.size(), line_literals.size()); const int num_tuples = line_literals.size(); if (num_tuples == 0) return; @@ -554,7 +554,7 @@ std::function LiteralTableConstraint( CHECK_EQ(tuple_size, literal_tuples[i].size()); } - absl::flat_hash_map > + absl::flat_hash_map> line_literals_per_literal; for (int i = 0; i < num_tuples; ++i) { const LiteralIndex selected_index = line_literals[i].Index(); @@ -577,9 +577,9 @@ std::function LiteralTableConstraint( // If all selected literals of the lines containing a literal are false, // then the literal is false. - for (const auto &p : line_literals_per_literal) { + for (const auto& p : line_literals_per_literal) { std::vector clause; - for (const auto &index : p.second) { + for (const auto& index : p.second) { clause.push_back(Literal(index)); } clause.push_back(Literal(p.first).Negated()); @@ -588,19 +588,19 @@ std::function LiteralTableConstraint( }; } -std::function TransitionConstraint( - const std::vector &vars, - const std::vector > &automaton, int64 initial_state, - const std::vector &final_states) { - return [=](Model *model) { - IntegerTrail *integer_trail = model->GetOrCreate(); +std::function TransitionConstraint( + const std::vector& vars, + const std::vector>& automaton, int64 initial_state, + const std::vector& final_states) { + return [=](Model* model) { + IntegerTrail* integer_trail = model->GetOrCreate(); const int n = vars.size(); CHECK_GT(n, 0) << "No variables in TransitionConstraint()."; // Test precondition. { - std::set > unique_transition_checker; - for (const std::vector &transition : automaton) { + std::set> unique_transition_checker; + for (const std::vector& transition : automaton) { CHECK_EQ(transition.size(), 3); const std::pair p{transition[0], transition[1]}; CHECK(!gtl::ContainsKey(unique_transition_checker, p)) @@ -611,10 +611,10 @@ std::function TransitionConstraint( } // Construct a table with the possible values of each vars. - std::vector > possible_values(n); + std::vector> possible_values(n); for (int time = 0; time < n; ++time) { const auto domain = integer_trail->InitialVariableDomain(vars[time]); - for (const std::vector &transition : automaton) { + for (const std::vector& transition : automaton) { // TODO(user): quadratic algo, improve! if (domain.Contains(transition[1])) { possible_values[time].insert(transition[1]); @@ -623,7 +623,7 @@ std::function TransitionConstraint( } // Compute the set of reachable state at each time point. - std::vector > reachable_states(n + 1); + std::vector> reachable_states(n + 1); reachable_states[0].insert(initial_state); reachable_states[n] = {final_states.begin(), final_states.end()}; @@ -632,7 +632,7 @@ std::function TransitionConstraint( // TODO(user): filter using the domain of vars[time] that may not contain // all the possible transitions. for (int time = 0; time + 1 < n; ++time) { - for (const std::vector &transition : automaton) { + for (const std::vector& transition : automaton) { if (!gtl::ContainsKey(reachable_states[time], transition[0])) continue; if (!gtl::ContainsKey(possible_values[time], transition[1])) continue; reachable_states[time + 1].insert(transition[2]); @@ -642,7 +642,7 @@ std::function TransitionConstraint( // Backward. for (int time = n - 1; time > 0; --time) { std::set new_set; - for (const std::vector &transition : automaton) { + for (const std::vector& transition : automaton) { if (!gtl::ContainsKey(reachable_states[time], transition[0])) continue; if (!gtl::ContainsKey(possible_values[time], transition[1])) continue; if (!gtl::ContainsKey(reachable_states[time + 1], transition[2])) @@ -668,7 +668,7 @@ std::function TransitionConstraint( std::vector in_states; std::vector transition_values; std::vector out_states; - for (const std::vector &transition : automaton) { + for (const std::vector& transition : automaton) { if (!gtl::ContainsKey(reachable_states[time], transition[0])) continue; if (!gtl::ContainsKey(possible_values[time], transition[1])) continue; if (!gtl::ContainsKey(reachable_states[time + 1], transition[2])) diff --git a/ortools/sat/theta_tree.cc b/ortools/sat/theta_tree.cc index f8d8300a81..2b0a5300ad 100644 --- a/ortools/sat/theta_tree.cc +++ b/ortools/sat/theta_tree.cc @@ -187,8 +187,8 @@ int ThetaLambdaTree::GetMaxEventWithEnvelopeGreaterThan( template void ThetaLambdaTree::GetEventsWithOptionalEnvelopeGreaterThan( - IntegerType target_envelope, int *critical_event, int *optional_event, - IntegerType *available_energy) const { + IntegerType target_envelope, int* critical_event, int* optional_event, + IntegerType* available_energy) const { DCHECK(!leaf_nodes_have_delayed_operations_); int critical_leaf; int optional_leaf; @@ -222,7 +222,7 @@ void ThetaLambdaTree::RefreshNode(int node) { template int ThetaLambdaTree::GetMaxLeafWithEnvelopeGreaterThan( - int node, IntegerType target_envelope, IntegerType *extra) const { + int node, IntegerType target_envelope, IntegerType* extra) const { DCHECK(!leaf_nodes_have_delayed_operations_); DCHECK_LT(target_envelope, tree_[node].envelope); while (node < num_leaves_) { @@ -261,8 +261,8 @@ int ThetaLambdaTree::GetLeafWithMaxEnergyDelta(int node) const { template void ThetaLambdaTree::GetLeavesWithOptionalEnvelopeGreaterThan( - IntegerType target_envelope, int *critical_leaf, int *optional_leaf, - IntegerType *available_energy) const { + IntegerType target_envelope, int* critical_leaf, int* optional_leaf, + IntegerType* available_energy) const { DCHECK(!leaf_nodes_have_delayed_operations_); DCHECK_LT(target_envelope, tree_[1].envelope_opt); int node = 1; diff --git a/ortools/sat/timetable.cc b/ortools/sat/timetable.cc index 5e9328e423..c1012d8f76 100644 --- a/ortools/sat/timetable.cc +++ b/ortools/sat/timetable.cc @@ -25,8 +25,8 @@ namespace operations_research { namespace sat { TimeTablingPerTask::TimeTablingPerTask( - const std::vector &demands, AffineExpression capacity, - IntegerTrail *integer_trail, SchedulingConstraintHelper *helper) + const std::vector& demands, AffineExpression capacity, + IntegerTrail* integer_trail, SchedulingConstraintHelper* helper) : num_tasks_(helper->NumTasks()), demands_(demands), capacity_(capacity), @@ -58,7 +58,7 @@ TimeTablingPerTask::TimeTablingPerTask( } } -void TimeTablingPerTask::RegisterWith(GenericLiteralWatcher *watcher) { +void TimeTablingPerTask::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); helper_->WatchAllTasks(id, watcher); watcher->WatchUpperBound(capacity_.var, id); @@ -107,8 +107,8 @@ bool TimeTablingPerTask::BuildProfile() { } } - const auto &by_decreasing_start_max = helper_->TaskByDecreasingStartMax(); - const auto &by_end_min = helper_->TaskByIncreasingEndMin(); + const auto& by_decreasing_start_max = helper_->TaskByDecreasingStartMax(); + const auto& by_end_min = helper_->TaskByIncreasingEndMin(); // Build the profile. // ------------------ @@ -189,9 +189,9 @@ bool TimeTablingPerTask::SweepAllTasks(bool is_forward) { CapSub(CapacityMax().value(), profile_max_height_.value())); // Select the correct members depending on the direction. - int &num_tasks = + int& num_tasks = is_forward ? forward_num_tasks_to_sweep_ : backward_num_tasks_to_sweep_; - std::vector &tasks = + std::vector& tasks = is_forward ? forward_tasks_to_sweep_ : backward_tasks_to_sweep_; // TODO(user): On some problem, a big chunk of the time is spend just checking @@ -248,7 +248,7 @@ bool TimeTablingPerTask::SweepTask(int task_id) { DCHECK(std::is_sorted(profile_.begin(), profile_.end())); int rec_id = std::upper_bound(profile_.begin(), profile_.end(), new_start_min, - [&](IntegerValue value, const ProfileRectangle &rect) { + [&](IntegerValue value, const ProfileRectangle& rect) { return value < rect.start; }) - profile_.begin(); diff --git a/ortools/sat/timetable_edgefinding.cc b/ortools/sat/timetable_edgefinding.cc index a289adbbc9..b0b3c98929 100644 --- a/ortools/sat/timetable_edgefinding.cc +++ b/ortools/sat/timetable_edgefinding.cc @@ -28,8 +28,8 @@ namespace operations_research { namespace sat { TimeTableEdgeFinding::TimeTableEdgeFinding( - const std::vector &demands, AffineExpression capacity, - SchedulingConstraintHelper *helper, IntegerTrail *integer_trail) + const std::vector& demands, AffineExpression capacity, + SchedulingConstraintHelper* helper, IntegerTrail* integer_trail) : num_tasks_(helper->NumTasks()), demands_(demands), capacity_(capacity), @@ -44,7 +44,7 @@ TimeTableEdgeFinding::TimeTableEdgeFinding( energy_free_.resize(num_tasks_); } -void TimeTableEdgeFinding::RegisterWith(GenericLiteralWatcher *watcher) { +void TimeTableEdgeFinding::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); watcher->WatchUpperBound(capacity_.var, id); helper_->WatchAllTasks(id, watcher); @@ -94,9 +94,9 @@ void TimeTableEdgeFinding::BuildTimeTable() { DCHECK_EQ(scp_.size(), ecp_.size()); - const std::vector &by_decreasing_end_max = + const std::vector& by_decreasing_end_max = helper_->TaskByDecreasingEndMax(); - const std::vector &by_start_min = + const std::vector& by_start_min = helper_->TaskByIncreasingStartMin(); IntegerValue height = IntegerValue(0); @@ -173,7 +173,7 @@ bool TimeTableEdgeFinding::TimeTableEdgeFindingPass() { } BuildTimeTable(); - const auto &by_start_min = helper_->TaskByIncreasingStartMin(); + const auto& by_start_min = helper_->TaskByIncreasingStartMin(); IntegerValue previous_end = kMaxIntegerValue; @@ -306,7 +306,7 @@ bool TimeTableEdgeFinding::IncreaseStartMin(IntegerValue begin, IntegerValue end, int task_index, IntegerValue new_start) { helper_->ClearReason(); - std::vector *mutable_reason = helper_->MutableIntegerReason(); + std::vector* mutable_reason = helper_->MutableIntegerReason(); // Capacity of the resource. if (capacity_.var != kNoIntegerVariable) { diff --git a/ortools/sat/util.cc b/ortools/sat/util.cc index 90ffa1dcbf..07d2c0ba48 100644 --- a/ortools/sat/util.cc +++ b/ortools/sat/util.cc @@ -21,9 +21,9 @@ namespace operations_research { namespace sat { -int MoveOneUnprocessedLiteralLast(const std::set &processed, +int MoveOneUnprocessedLiteralLast(const std::set& processed, int relevant_prefix_size, - std::vector *literals) { + std::vector* literals) { if (literals->empty()) return -1; if (!gtl::ContainsKey(processed, literals->back().Index())) { return std::min(relevant_prefix_size, literals->size()); @@ -110,7 +110,7 @@ double Percentile::GetPercentile(double percent) { } void CompressTuples(absl::Span domain_sizes, int64 any_value, - std::vector > *tuples) { + std::vector>* tuples) { if (tuples->empty()) return; // Remove duplicates if any. @@ -123,7 +123,7 @@ void CompressTuples(absl::Span domain_sizes, int64 any_value, for (int i = 0; i < num_vars; ++i) { const int domain_size = domain_sizes[i]; if (domain_size == 1) continue; - absl::flat_hash_map, std::vector > + absl::flat_hash_map, std::vector> masked_tuples_to_indices; for (int t = 0; t < tuples->size(); ++t) { int out = 0; @@ -134,7 +134,7 @@ void CompressTuples(absl::Span domain_sizes, int64 any_value, masked_tuples_to_indices[tuple_minus_var_i].push_back(t); } to_remove.clear(); - for (const auto &it : masked_tuples_to_indices) { + for (const auto& it : masked_tuples_to_indices) { if (it.second.size() != domain_size) continue; (*tuples)[it.second.front()][i] = any_value; to_remove.insert(to_remove.end(), it.second.begin() + 1, it.second.end()); diff --git a/ortools/sat/var_domination.cc b/ortools/sat/var_domination.cc new file mode 100644 index 0000000000..68b82f28dd --- /dev/null +++ b/ortools/sat/var_domination.cc @@ -0,0 +1,1094 @@ +// Copyright 2010-2018 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/var_domination.h" + +namespace operations_research { +namespace sat { + +void VarDomination::Reset(int num_variables) { + phase_ = 0; + num_vars_with_negation_ = 2 * num_variables; + partition_ = absl::make_unique(num_vars_with_negation_); + + can_freely_decrease_.assign(num_vars_with_negation_, true); + + shared_buffer_.clear(); + initial_candidates_.assign(num_vars_with_negation_, IntegerVariableSpan()); + + buffer_.clear(); + dominating_vars_.assign(num_vars_with_negation_, IntegerVariableSpan()); +} + +void VarDomination::RefinePartition(std::vector* vars) { + if (vars->empty()) return; + partition_->Refine(*vars); + for (int& var : *vars) { + const IntegerVariable wrapped(var); + can_freely_decrease_[wrapped] = false; + can_freely_decrease_[NegationOf(wrapped)] = false; + var = NegationOf(wrapped).value(); + } + partition_->Refine(*vars); +} + +void VarDomination::CanOnlyDominateEachOther(absl::Span refs) { + if (phase_ != 0) return; + tmp_vars_.clear(); + for (const int ref : refs) { + tmp_vars_.push_back(RefToIntegerVariable(ref).value()); + } + RefinePartition(&tmp_vars_); + tmp_vars_.clear(); +} + +void VarDomination::ActivityShouldNotChange(absl::Span refs, + absl::Span coeffs) { + if (phase_ != 0) return; + FillTempRanks(/*reverse_references=*/false, /*enforcements=*/{}, refs, + coeffs); + tmp_vars_.clear(); + for (int i = 0; i < tmp_ranks_.size(); ++i) { + if (i > 0 && tmp_ranks_[i].rank != tmp_ranks_[i - 1].rank) { + RefinePartition(&tmp_vars_); + tmp_vars_.clear(); + } + tmp_vars_.push_back(tmp_ranks_[i].var.value()); + } + RefinePartition(&tmp_vars_); + tmp_vars_.clear(); +} + +// This correspond to a lower bounded constraint. +void VarDomination::ProcessTempRanks() { + if (phase_ == 0) { + // We actually "split" tmp_ranks_ according to the current partition and + // process each resulting list independently for a faster algo. + for (IntegerVariableWithRank& entry : tmp_ranks_) { + can_freely_decrease_[entry.var] = false; + entry.part = partition_->PartOf(entry.var.value()); + } + std::stable_sort( + tmp_ranks_.begin(), tmp_ranks_.end(), + [](const IntegerVariableWithRank& a, const IntegerVariableWithRank& b) { + return a.part < b.part; + }); + int start = 0; + for (int i = 1; i < tmp_ranks_.size(); ++i) { + if (tmp_ranks_[i].part != tmp_ranks_[start].part) { + Initialize({&tmp_ranks_[start], static_cast(i - start)}); + start = i; + } + } + if (start < tmp_ranks_.size()) { + Initialize({&tmp_ranks_[start], tmp_ranks_.size() - start}); + } + } else if (phase_ == 1) { + FilterUsingTempRanks(); + } else { + // This is only used for debugging, and we shouldn't reach here in prod. + CheckUsingTempRanks(); + } +} + +void VarDomination::ActivityShouldNotDecrease( + absl::Span enforcements, absl::Span refs, + absl::Span coeffs) { + FillTempRanks(/*reverse_references=*/false, enforcements, refs, coeffs); + ProcessTempRanks(); +} + +void VarDomination::ActivityShouldNotIncrease( + absl::Span enforcements, absl::Span refs, + absl::Span coeffs) { + FillTempRanks(/*reverse_references=*/true, enforcements, refs, coeffs); + ProcessTempRanks(); +} + +void VarDomination::MakeRankEqualToStartOfPart( + absl::Span span) { + const int size = span.size(); + int start = 0; + int previous_value = 0; + for (int i = 0; i < size; ++i) { + const int64 value = span[i].rank; + if (value != previous_value) { + previous_value = value; + start = i; + } + span[i].rank = start; + } +} + +void VarDomination::Initialize(absl::Span span) { + // The rank can be wrong and need to be recomputed because of how we splitted + // tmp_ranks_ into spans. + MakeRankEqualToStartOfPart(span); + + const int future_start = shared_buffer_.size(); + int first_start = -1; + + // This is mainly to avoid corner case and hopefully, it should be big enough + // to not matter too much. + const int kSizeThreshold = 1000; + const int size = span.size(); + for (int i = std::max(0, size - kSizeThreshold); i < size; ++i) { + const IntegerVariableWithRank entry = span[i]; + const int num_candidates = size - entry.rank; + if (num_candidates >= kSizeThreshold) continue; + + // Compute size to beat. + int size_threshold = kSizeThreshold; + + // Take into account the current partition size. + const int var_part = partition_->PartOf(entry.var.value()); + const int part_size = partition_->SizeOfPart(var_part); + size_threshold = std::min(size_threshold, part_size); + + // Take into account our current best candidate if there is one. + const int current_num_candidates = initial_candidates_[entry.var].size; + if (current_num_candidates != 0) { + size_threshold = std::min(size_threshold, current_num_candidates); + } + + if (num_candidates < size_threshold) { + if (first_start == -1) first_start = entry.rank; + initial_candidates_[entry.var] = { + future_start - first_start + static_cast(entry.rank), + num_candidates}; + } + } + + // Only store what is necessary. + if (first_start == -1) return; + for (int i = first_start; i < size; ++i) { + shared_buffer_.push_back(span[i].var); + } +} + +// TODO(user): Use more heuristics to not miss as much dominance relation when +// we crop initial lists. +void VarDomination::EndFirstPhase() { + CHECK_EQ(phase_, 0); + phase_ = 1; + + // Some initial lists ar too long and will be cropped to this size. + // We will handle them slightly differently. + // + // TODO(user): Tune the initial size, 50 might be a bit large, since our + // complexity is borned by this number times the number of entries in the + // constraints. Still we should in most situation be a lot lower than that. + const int kMaxInitialSize = 50; + std::vector cropped_lists; + gtl::ITIVector is_cropped(num_vars_with_negation_, + false); + + // Fill the initial domination candidates. + for (IntegerVariable var(0); var < num_vars_with_negation_; ++var) { + if (can_freely_decrease_[var]) continue; + const int part = partition_->PartOf(var.value()); + const int part_size = partition_->SizeOfPart(part); + + const int start = buffer_.size(); + int new_size = 0; + + const int stored_size = initial_candidates_[var].size; + if (stored_size == 0 || part_size < stored_size) { + // We start with the partition part. + // Note that all constraint will be filtered again in the second pass. + for (const int value : partition_->ElementsInPart(part)) { + const IntegerVariable c = IntegerVariable(value); + if (PositiveVariable(c) == PositiveVariable(var)) continue; + if (can_freely_decrease_[NegationOf(c)]) continue; + ++new_size; + buffer_.push_back(c); + + // We do not want too many candidates per variables. + // TODO(user): randomize? + if (new_size > kMaxInitialSize) { + is_cropped[var] = true; + cropped_lists.push_back(var); + break; + } + } + } else { + // Copy the one that are in the same partition_ part. + // + // TODO(user): This can be too long maybe? even if we have list of at + // most 1000 at this point, see InitializeUsingTempRanks(). + for (const IntegerVariable c : InitialDominatingCandidates(var)) { + if (PositiveVariable(c) == PositiveVariable(var)) continue; + if (can_freely_decrease_[NegationOf(c)]) continue; + if (partition_->PartOf(c.value()) != part) continue; + ++new_size; + buffer_.push_back(c); + + // We do not want too many candidates per variables. + // TODO(user): randomize? + if (new_size > kMaxInitialSize) { + is_cropped[var] = true; + cropped_lists.push_back(var); + break; + } + } + } + + dominating_vars_[var] = {start, new_size}; + } + + // Heuristic: To try not to remove domination relations corresponding to short + // lists during transposition (see EndSecondPhase()), we fill half of the + // cropped list with the transpose of the short list relations. This helps + // finding more relation in the presence of cropped lists. + for (const IntegerVariable var : cropped_lists) { + dominating_vars_[var].size = kMaxInitialSize / 2; // Restrict. + } + for (IntegerVariable var(0); var < num_vars_with_negation_; ++var) { + for (const IntegerVariable dom : DominatingVariables(var)) { + if (!is_cropped[NegationOf(dom)]) continue; + IntegerVariableSpan& s = dominating_vars_[NegationOf(dom)]; + if (s.size >= kMaxInitialSize) continue; + buffer_[s.start + s.size++] = NegationOf(var); + } + } + + // Remove any duplicates. + // + // TODO(user): Maybe we should do that with all lists in case the + // input function are called with duplicates too. + for (const IntegerVariable var : cropped_lists) { + if (!is_cropped[var]) continue; + IntegerVariableSpan& s = dominating_vars_[var]; + std::sort(&buffer_[s.start], &buffer_[s.start + s.size]); + const auto p = std::unique(&buffer_[s.start], &buffer_[s.start + s.size]); + s.size = p - &buffer_[s.start]; + } + + // We no longer need the first phase memory. + VLOG(1) << "Num initial list that where cropped: " << cropped_lists.size(); + VLOG(1) << "Shared buffer size: " << shared_buffer_.size(); + VLOG(1) << "Buffer size: " << buffer_.size(); + gtl::STLClearObject(&initial_candidates_); + gtl::STLClearObject(&shared_buffer_); +} + +void VarDomination::EndSecondPhase() { + CHECK_EQ(phase_, 1); + phase_ = 2; + + // Perform intersection with transpose. + shared_buffer_.clear(); + initial_candidates_.assign(num_vars_with_negation_, IntegerVariableSpan()); + + // Pass 1: count. + for (IntegerVariable var(0); var < num_vars_with_negation_; ++var) { + for (const IntegerVariable dom : DominatingVariables(var)) { + ++initial_candidates_[NegationOf(dom)].size; + } + } + + // Pass 2: compute starts. + int start = 0; + for (IntegerVariable var(0); var < num_vars_with_negation_; ++var) { + initial_candidates_[var].start = start; + start += initial_candidates_[var].size; + initial_candidates_[var].size = 0; + } + shared_buffer_.resize(start); + + // Pass 3: transpose. + for (IntegerVariable var(0); var < num_vars_with_negation_; ++var) { + for (const IntegerVariable dom : DominatingVariables(var)) { + IntegerVariableSpan& span = initial_candidates_[NegationOf(dom)]; + shared_buffer_[span.start + span.size++] = NegationOf(var); + } + } + + // Pass 4: intersect. + int num_removed = 0; + tmp_var_to_rank_.resize(num_vars_with_negation_, -1); + for (IntegerVariable var(0); var < num_vars_with_negation_; ++var) { + for (const IntegerVariable dom : InitialDominatingCandidates(var)) { + tmp_var_to_rank_[dom] = 1; + } + + int new_size = 0; + IntegerVariableSpan& span = dominating_vars_[var]; + for (const IntegerVariable dom : DominatingVariables(var)) { + if (tmp_var_to_rank_[dom] != 1) { + ++num_removed; + continue; + } + buffer_[span.start + new_size++] = dom; + } + span.size = new_size; + + for (const IntegerVariable dom : InitialDominatingCandidates(var)) { + tmp_var_to_rank_[dom] = -1; + } + } + + VLOG(1) << "Transpose removed " << num_removed; + gtl::STLClearObject(&initial_candidates_); + gtl::STLClearObject(&shared_buffer_); +} + +void VarDomination::FillTempRanks(bool reverse_references, + absl::Span enforcements, + absl::Span refs, + absl::Span coeffs) { + tmp_ranks_.clear(); + if (coeffs.empty()) { + // Simple case: all coefficients are assumed to be the same. + for (const int ref : refs) { + const IntegerVariable var = + RefToIntegerVariable(reverse_references ? NegatedRef(ref) : ref); + tmp_ranks_.push_back({var, 0, 0}); + } + } else { + // Complex case: different coefficients. + for (int i = 0; i < refs.size(); ++i) { + if (coeffs[i] == 0) continue; + const IntegerVariable var = RefToIntegerVariable( + reverse_references ? NegatedRef(refs[i]) : refs[i]); + if (coeffs[i] > 0) { + tmp_ranks_.push_back({var, 0, coeffs[i]}); + } else { + tmp_ranks_.push_back({NegationOf(var), 0, -coeffs[i]}); + } + } + std::sort(tmp_ranks_.begin(), tmp_ranks_.end()); + MakeRankEqualToStartOfPart({&tmp_ranks_[0], tmp_ranks_.size()}); + } + + // Add the enforcement last with a new rank. We add their negation since + // we want the activity to not decrease, and we want to allow any + // enforcement-- to dominate a variable in the constraint. + const int enforcement_rank = tmp_ranks_.size(); + for (const int ref : enforcements) { + tmp_ranks_.push_back( + {RefToIntegerVariable(NegatedRef(ref)), 0, enforcement_rank}); + } +} + +// We take the intersection of the current dominating candidate with the +// restriction imposed by the current content of tmp_ranks_. +void VarDomination::FilterUsingTempRanks() { + // Expand ranks in temp vector. + tmp_var_to_rank_.resize(num_vars_with_negation_, -1); + for (const IntegerVariableWithRank entry : tmp_ranks_) { + tmp_var_to_rank_[entry.var] = entry.rank; + } + + // The activity of the variable in tmp_rank must not decrease. + for (const IntegerVariableWithRank entry : tmp_ranks_) { + // The only variables that can be paired with a var-- in the constriants are + // the var++ in the constraints with the same rank or higher. + // + // Note that we only filter the var-- domination lists here, we do not + // remove the var-- appearing in all the lists corresponding to wrong var++. + // This is left to the tranpose operation in EndSecondPhase(). + { + IntegerVariableSpan& span = dominating_vars_[entry.var]; + if (span.size == 0) continue; + int new_size = 0; + for (const IntegerVariable candidate : DominatingVariables(entry.var)) { + if (tmp_var_to_rank_[candidate] < entry.rank) continue; + buffer_[span.start + new_size++] = candidate; + } + span.size = new_size; + } + } + + // Reset temporary vector to all -1. + for (const IntegerVariableWithRank entry : tmp_ranks_) { + tmp_var_to_rank_[entry.var] = -1; + } +} + +// Slow: This is for debugging only. +void VarDomination::CheckUsingTempRanks() { + tmp_var_to_rank_.resize(num_vars_with_negation_, -1); + for (const IntegerVariableWithRank entry : tmp_ranks_) { + tmp_var_to_rank_[entry.var] = entry.rank; + } + + // The activity of the variable in tmp_rank must not decrease. + for (IntegerVariable var(0); var < num_vars_with_negation_; ++var) { + const int var_rank = tmp_var_to_rank_[var]; + const int negated_var_rank = tmp_var_to_rank_[NegationOf(var)]; + for (const IntegerVariable dom : DominatingVariables(var)) { + CHECK(!can_freely_decrease_[NegationOf(dom)]); + + // Doing X--, Y++ is compatible if the rank[X] <= rank[Y]. But we also + // need to check if doing Not(Y)-- is compatible with Not(X)++. + CHECK_LE(var_rank, tmp_var_to_rank_[dom]); + CHECK_LE(tmp_var_to_rank_[NegationOf(dom)], negated_var_rank); + } + } + + for (const IntegerVariableWithRank entry : tmp_ranks_) { + tmp_var_to_rank_[entry.var] = -1; + } +} + +bool VarDomination::CanFreelyDecrease(int ref) const { + return CanFreelyDecrease(RefToIntegerVariable(ref)); +} + +bool VarDomination::CanFreelyDecrease(IntegerVariable var) const { + return can_freely_decrease_[var]; +} + +absl::Span VarDomination::InitialDominatingCandidates( + IntegerVariable var) const { + const IntegerVariableSpan span = initial_candidates_[var]; + if (span.size == 0) return absl::Span(); + return absl::Span(&shared_buffer_[span.start], + span.size); +} + +absl::Span VarDomination::DominatingVariables( + int ref) const { + return DominatingVariables(RefToIntegerVariable(ref)); +} + +absl::Span VarDomination::DominatingVariables( + IntegerVariable var) const { + const IntegerVariableSpan span = dominating_vars_[var]; + if (span.size == 0) return absl::Span(); + return absl::Span(&buffer_[span.start], span.size); +} + +std::string VarDomination::DominationDebugString(IntegerVariable var) const { + const int ref = IntegerVariableToRef(var); + std::string result = + absl::StrCat(PositiveRef(ref), RefIsPositive(ref) ? "--" : "++", " : "); + for (const IntegerVariable dom : DominatingVariables(var)) { + const int dom_ref = IntegerVariableToRef(dom); + absl::StrAppend(&result, PositiveRef(dom_ref), + RefIsPositive(dom_ref) ? "++" : "--", " "); + } + return result; +} + +void DualBoundStrengthening::CannotDecrease(absl::Span refs) { + for (const int ref : refs) { + const IntegerVariable var = RefToIntegerVariable(ref); + can_freely_decrease_until_[var] = kMaxIntegerValue; + } +} + +void DualBoundStrengthening::CannotIncrease(absl::Span refs) { + for (const int ref : refs) { + const IntegerVariable var = RefToIntegerVariable(ref); + can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue; + } +} + +void DualBoundStrengthening::CannotMove(absl::Span refs) { + for (const int ref : refs) { + const IntegerVariable var = RefToIntegerVariable(ref); + can_freely_decrease_until_[var] = kMaxIntegerValue; + can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue; + } +} + +template +void DualBoundStrengthening::ProcessLinearConstraint( + bool is_objective, const PresolveContext& context, + const LinearProto& linear, int64 min_activity, int64 max_activity) { + const int64 lb_limit = linear.domain(linear.domain_size() - 2); + const int64 ub_limit = linear.domain(1); + const int num_terms = linear.vars_size(); + for (int i = 0; i < num_terms; ++i) { + int ref = linear.vars(i); + int64 coeff = linear.coeffs(i); + if (coeff < 0) { + ref = NegatedRef(ref); + coeff = -coeff; + } + + const int64 min_term = coeff * context.MinOf(ref); + const int64 max_term = coeff * context.MaxOf(ref); + const int64 term_diff = max_term - min_term; + const IntegerVariable var = RefToIntegerVariable(ref); + + // lb side. + if (min_activity < lb_limit) { + if (min_activity + term_diff < lb_limit) { + can_freely_decrease_until_[var] = kMaxIntegerValue; + } else { + const IntegerValue slack(lb_limit - min_activity); + const IntegerValue var_diff = + CeilRatio(IntegerValue(slack), IntegerValue(coeff)); + can_freely_decrease_until_[var] = + std::max(can_freely_decrease_until_[var], + IntegerValue(context.MinOf(ref)) + var_diff); + } + } + + if (is_objective) { + // We never want to increase the objective value. + can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue; + continue; + } + + // ub side. + if (max_activity > ub_limit) { + if (max_activity - term_diff > ub_limit) { + can_freely_decrease_until_[NegationOf(var)] = kMaxIntegerValue; + } else { + const IntegerValue slack(max_activity - ub_limit); + const IntegerValue var_diff = + CeilRatio(IntegerValue(slack), IntegerValue(coeff)); + can_freely_decrease_until_[NegationOf(var)] = + std::max(can_freely_decrease_until_[NegationOf(var)], + -IntegerValue(context.MaxOf(ref)) + var_diff); + } + } + } +} + +bool DualBoundStrengthening::Strengthen(PresolveContext* context) { + const CpModelProto& cp_model = *context->working_model; + const int num_vars = cp_model.variables_size(); + for (int var = 0; var < num_vars; ++var) { + if (context->IsFixed(var)) continue; + + // Fix to lb? + const int64 lb = context->MinOf(var); + const int64 ub_limit = std::max(lb, CanFreelyDecreaseUntil(var)); + if (ub_limit == lb) { + context->UpdateRuleStats("dual: fix variable"); + CHECK(context->IntersectDomainWith(var, Domain(lb))); + continue; + } + + // Fix to ub? + const int64 ub = context->MaxOf(var); + const int64 lb_limit = + std::min(ub, -CanFreelyDecreaseUntil(NegatedRef(var))); + if (lb_limit == ub) { + context->UpdateRuleStats("dual: fix variable"); + CHECK(context->IntersectDomainWith(var, Domain(ub))); + continue; + } + + // Here we can fix to any value in [ub_limit, lb_limit] that is compatible + // with the current domain. We prefer zero or the lowest possible magnitude. + if (lb_limit > ub_limit) { + const Domain domain = + context->DomainOf(var).IntersectionWith(Domain(ub_limit, lb_limit)); + if (!domain.IsEmpty()) { + int64 value = domain.Contains(0) ? 0 : domain.Min(); + if (value != 0) { + for (const int64 bound : domain.FlattenedIntervals()) { + if (std::abs(bound) < std::abs(value)) value = bound; + } + } + context->UpdateRuleStats("dual: fix variable with multiple choices"); + CHECK(context->IntersectDomainWith(var, Domain(value))); + continue; + } + } + + // Here we can reduce the domain, but we must be careful when the domain + // has holes. + if (lb_limit > lb || ub_limit < ub) { + const int64 new_ub = + ub_limit < ub ? context->DomainOf(var) + .IntersectionWith(Domain(ub_limit, kint64max)) + .Min() + : ub; + const int64 new_lb = + lb_limit > lb ? context->DomainOf(var) + .IntersectionWith(Domain(kint64min, lb_limit)) + .Max() + : lb; + context->UpdateRuleStats("dual: reduced domain"); + CHECK(context->IntersectDomainWith(var, Domain(new_lb, new_ub))); + } + } + return true; +} + +namespace { + +// TODO(user): Maybe we should avoid recomputing that here. +template +void FillMinMaxActivity(const PresolveContext& context, + const LinearExprProto& proto, int64* min_activity, + int64* max_activity) { + *min_activity = 0; + *max_activity = 0; + const int num_vars = proto.vars().size(); + for (int i = 0; i < num_vars; ++i) { + const int64 a = proto.coeffs(i) * context.MinOf(proto.vars(i)); + const int64 b = proto.coeffs(i) * context.MaxOf(proto.vars(i)); + *min_activity += std::min(a, b); + *max_activity += std::max(a, b); + } +} + +} // namespace + +void DetectDominanceRelations( + const PresolveContext& context, VarDomination* var_domination, + DualBoundStrengthening* dual_bound_strengthening) { + const CpModelProto& cp_model = *context.working_model; + const int num_vars = cp_model.variables().size(); + var_domination->Reset(num_vars); + dual_bound_strengthening->Reset(num_vars); + + int64 min_activity = kint64min; + int64 max_activity = kint64max; + + for (int var = 0; var < num_vars; ++var) { + // Deal with the affine relations that are not part of the proto. + // Those only need to be processed in the first pass. + // + // TODO(user): This is not ideal since if only the representative is still + // used, we shouldn't restrict any dominance relation involving it. + const AffineRelation::Relation r = context.GetAffineRelation(var); + if (r.representative != var) { + dual_bound_strengthening->CannotMove({var, r.representative}); + if (r.coeff == 1) { + var_domination->CanOnlyDominateEachOther( + {NegatedRef(var), r.representative}); + } else if (r.coeff == -1) { + var_domination->CanOnlyDominateEachOther({var, r.representative}); + } else { + var_domination->CanOnlyDominateEachOther({var}); + var_domination->CanOnlyDominateEachOther({r.representative}); + } + } + + // Also ignore variables that have been substitued already or are unused. + if (context.IsFixed(var) || context.VariableWasRemoved(var) || + context.VariableIsNotUsedAnymore(var)) { + dual_bound_strengthening->CannotMove({var}); + var_domination->CanOnlyDominateEachOther({var}); + } + } + + // TODO(user): Benchmark and experiment with 3 phases algo: + // - Only ActivityShouldNotChange()/CanOnlyDominateEachOther(). + // - The other cases once. + // - EndFirstPhase() and then the other cases a second time. + std::vector tmp; + const int num_constraints = cp_model.constraints_size(); + for (int phase = 0; phase < 2; phase++) { + for (int c = 0; c < num_constraints; ++c) { + const ConstraintProto& ct = cp_model.constraints(c); + if (phase == 0) { + dual_bound_strengthening->CannotIncrease(ct.enforcement_literal()); + } + switch (ct.constraint_case()) { + case ConstraintProto::kBoolOr: + if (phase == 0) { + dual_bound_strengthening->CannotDecrease(ct.bool_or().literals()); + } + var_domination->ActivityShouldNotDecrease(ct.enforcement_literal(), + ct.bool_or().literals(), + /*coeffs=*/{}); + break; + case ConstraintProto::kBoolAnd: + if (phase == 0) { + dual_bound_strengthening->CannotDecrease(ct.bool_and().literals()); + } + + // We process it like n clauses. + // + // TODO(user): the way we process that is a bit restrictive. By + // working on the implication graph we could detect more dominance + // relations. Since if a => b we say that a++ can only be paired with + // b--, but it could actually be paired with any variables that when + // dereased implies b = 0. This is a bit mitigated by the fact that + // we regroup when we can such implications into big at most ones. + tmp.clear(); + for (const int ref : ct.enforcement_literal()) { + tmp.push_back(NegatedRef(ref)); + } + for (const int ref : ct.bool_and().literals()) { + tmp.push_back(ref); + var_domination->ActivityShouldNotDecrease(/*enforcements=*/{}, tmp, + /*coeffs=*/{}); + tmp.pop_back(); + } + break; + case ConstraintProto::kAtMostOne: + if (phase == 0) { + dual_bound_strengthening->CannotIncrease( + ct.at_most_one().literals()); + } + var_domination->ActivityShouldNotIncrease(ct.enforcement_literal(), + ct.at_most_one().literals(), + /*coeffs=*/{}); + break; + case ConstraintProto::kLinear: { + FillMinMaxActivity(context, ct.linear(), &min_activity, + &max_activity); + if (phase == 0) { + dual_bound_strengthening->ProcessLinearConstraint( + false, context, ct.linear(), min_activity, max_activity); + } + const bool domain_is_simple = ct.linear().domain().size() == 2; + const bool free_to_increase = + domain_is_simple && ct.linear().domain(1) >= max_activity; + const bool free_to_decrease = + domain_is_simple && ct.linear().domain(0) <= min_activity; + if (free_to_decrease && free_to_increase) break; + if (free_to_increase) { + var_domination->ActivityShouldNotDecrease(ct.enforcement_literal(), + ct.linear().vars(), + ct.linear().coeffs()); + } else if (free_to_decrease) { + var_domination->ActivityShouldNotIncrease(ct.enforcement_literal(), + ct.linear().vars(), + ct.linear().coeffs()); + } else { + // TODO(user): Handle enforcement better here. + if (!ct.enforcement_literal().empty()) { + var_domination->ActivityShouldNotIncrease( + /*enforcements=*/{}, ct.enforcement_literal(), /*coeffs=*/{}); + } + var_domination->ActivityShouldNotChange(ct.linear().vars(), + ct.linear().coeffs()); + } + break; + } + default: + // We cannot infer anything if we don't know the constraint. + // TODO(user): Handle enforcement better here. + if (phase == 0) { + dual_bound_strengthening->CannotMove(context.ConstraintToVars(c)); + } + for (const int var : context.ConstraintToVars(c)) { + var_domination->CanOnlyDominateEachOther({var}); + } + break; + } + } + + // The objective is handled like a <= constraints, or an == constraint if + // there is a non-trivial domain. + if (cp_model.has_objective()) { + // WARNING: The proto objective might not be up to date, so we need to + // write it first. + if (phase == 0) context.WriteObjectiveToProto(); + FillMinMaxActivity(context, cp_model.objective(), &min_activity, + &max_activity); + dual_bound_strengthening->ProcessLinearConstraint( + true, context, cp_model.objective(), min_activity, max_activity); + const auto& domain = cp_model.objective().domain(); + if (domain.empty() || (domain.size() == 2 && domain[0] <= min_activity)) { + var_domination->ActivityShouldNotIncrease( + /*enforcements=*/{}, cp_model.objective().vars(), + cp_model.objective().coeffs()); + } else { + var_domination->ActivityShouldNotChange(cp_model.objective().vars(), + cp_model.objective().coeffs()); + } + } + + if (phase == 0) var_domination->EndFirstPhase(); + if (phase == 1) var_domination->EndSecondPhase(); + } + + // Some statistics. + int64 num_unconstrained_refs = 0; + int64 num_dominated_refs = 0; + int64 num_dominance_relations = 0; + for (int var = 0; var < num_vars; ++var) { + if (context.IsFixed(var)) continue; + + for (const int ref : {var, NegatedRef(var)}) { + if (var_domination->CanFreelyDecrease(ref)) { + num_unconstrained_refs++; + } else if (!var_domination->DominatingVariables(ref).empty()) { + num_dominated_refs++; + num_dominance_relations += + var_domination->DominatingVariables(ref).size(); + } + } + } + if (num_unconstrained_refs == 0 && num_dominated_refs == 0) return; + VLOG(1) << "Dominance:" + << " num_unconstrained_refs=" << num_unconstrained_refs + << " num_dominated_refs=" << num_dominated_refs + << " num_dominance_relations=" << num_dominance_relations; +} + +bool ExploitDominanceRelations(const VarDomination& var_domination, + PresolveContext* context) { + const CpModelProto& cp_model = *context->working_model; + const int num_vars = cp_model.variables_size(); + + // Abort early if there is nothing to do. + bool work_to_do = false; + for (int var = 0; var < num_vars; ++var) { + if (context->IsFixed(var)) continue; + if (!var_domination.DominatingVariables(var).empty() || + !var_domination.DominatingVariables(NegatedRef(var)).empty()) { + work_to_do = true; + break; + } + } + if (!work_to_do) return true; + + gtl::ITIVector var_lb_to_ub_diff(num_vars * 2, 0); + gtl::ITIVector in_constraints(num_vars * 2, false); + + const int num_constraints = cp_model.constraints_size(); + for (int c = 0; c < num_constraints; ++c) { + const ConstraintProto& ct = cp_model.constraints(c); + + if (ct.constraint_case() == ConstraintProto::kBoolAnd) { + if (ct.enforcement_literal().size() != 1) continue; + const int a = ct.enforcement_literal(0); + if (context->IsFixed(a)) continue; + for (const int b : ct.bool_and().literals()) { + if (context->IsFixed(b)) continue; + + // If (a--, b--) is valid, we can always set a to false. + for (const IntegerVariable ivar : + var_domination.DominatingVariables(a)) { + const int ref = VarDomination::IntegerVariableToRef(ivar); + if (ref == NegatedRef(b)) { + context->UpdateRuleStats("domination: in implication"); + if (!context->SetLiteralToFalse(a)) return false; + break; + } + } + if (context->IsFixed(a)) break; + + // If (b++, a++) is valid, then we can always set b to true. + for (const IntegerVariable ivar : + var_domination.DominatingVariables(NegatedRef(b))) { + const int ref = VarDomination::IntegerVariableToRef(ivar); + if (ref == a) { + context->UpdateRuleStats("domination: in implication"); + if (!context->SetLiteralToTrue(b)) return false; + break; + } + } + } + continue; + } + + if (!ct.enforcement_literal().empty()) continue; + + if (ct.constraint_case() == ConstraintProto::kAtMostOne) { + for (const int ref : ct.at_most_one().literals()) { + in_constraints[VarDomination::RefToIntegerVariable(ref)] = true; + } + for (const int ref : ct.at_most_one().literals()) { + if (context->IsFixed(ref)) continue; + + const auto dominating_ivars = var_domination.DominatingVariables(ref); + if (dominating_ivars.empty()) continue; + for (const IntegerVariable ivar : dominating_ivars) { + if (!in_constraints[ivar]) continue; + if (context->IsFixed(VarDomination::IntegerVariableToRef(ivar))) { + continue; + } + + // We can set the dominated variable to false. + context->UpdateRuleStats("domination: in at most one"); + if (!context->SetLiteralToFalse(ref)) return false; + break; + } + } + for (const int ref : ct.at_most_one().literals()) { + in_constraints[VarDomination::RefToIntegerVariable(ref)] = false; + } + } + + if (ct.constraint_case() != ConstraintProto::kLinear) continue; + + int num_dominated = 0; + for (const int var : context->ConstraintToVars(c)) { + if (!var_domination.DominatingVariables(var).empty()) ++num_dominated; + if (!var_domination.DominatingVariables(NegatedRef(var)).empty()) { + ++num_dominated; + } + } + if (num_dominated == 0) continue; + + // Precompute. + int64 min_activity = 0; + int64 max_activity = 0; + const int num_terms = ct.linear().vars_size(); + for (int i = 0; i < num_terms; ++i) { + int ref = ct.linear().vars(i); + int64 coeff = ct.linear().coeffs(i); + if (coeff < 0) { + ref = NegatedRef(ref); + coeff = -coeff; + } + const int64 min_term = coeff * context->MinOf(ref); + const int64 max_term = coeff * context->MaxOf(ref); + min_activity += min_term; + max_activity += max_term; + const IntegerVariable ivar = VarDomination::RefToIntegerVariable(ref); + var_lb_to_ub_diff[ivar] = max_term - min_term; + var_lb_to_ub_diff[NegationOf(ivar)] = min_term - max_term; + } + const int64 rhs_lb = ct.linear().domain(0); + const int64 rhs_ub = ct.linear().domain(ct.linear().domain_size() - 1); + if (max_activity < rhs_lb || min_activity > rhs_ub) { + return context->NotifyThatModelIsUnsat("linear equation unsat."); + } + + // Look for dominated var. + for (int i = 0; i < num_terms; ++i) { + const int ref = ct.linear().vars(i); + const int64 coeff = ct.linear().coeffs(i); + const int64 coeff_magnitude = std::abs(coeff); + if (context->IsFixed(ref)) continue; + + for (const int current_ref : {ref, NegatedRef(ref)}) { + const absl::Span dominated_by = + var_domination.DominatingVariables(current_ref); + if (dominated_by.empty()) continue; + + const bool ub_side = (coeff > 0) == (current_ref == ref); + if (ub_side) { + if (max_activity <= rhs_ub) continue; + } else { + if (min_activity >= rhs_lb) continue; + } + const int64 slack = + ub_side ? rhs_ub - min_activity : max_activity - rhs_lb; + + // Compute the delta in activity if all dominating var moves to their + // other bound. + int64 delta = 0; + for (const IntegerVariable ivar : dominated_by) { + if (ub_side) { + delta += std::max(int64{0}, var_lb_to_ub_diff[ivar]); + } else { + delta += std::max(int64{0}, -var_lb_to_ub_diff[ivar]); + } + } + + const int64 lb = context->MinOf(current_ref); + if (delta + coeff_magnitude > slack) { + context->UpdateRuleStats("domination: fixed to lb."); + if (!context->IntersectDomainWith(current_ref, Domain(lb))) { + return false; + } + + // We need to update the precomputed quantities. + const IntegerVariable current_var = + VarDomination::RefToIntegerVariable(current_ref); + if (ub_side) { + CHECK_GE(var_lb_to_ub_diff[current_var], 0); + max_activity -= var_lb_to_ub_diff[current_var]; + } else { + CHECK_LE(var_lb_to_ub_diff[current_var], 0); + min_activity -= var_lb_to_ub_diff[current_var]; + } + var_lb_to_ub_diff[current_var] = 0; + var_lb_to_ub_diff[NegationOf(current_var)] = 0; + + continue; + } + + const IntegerValue diff = FloorRatio(IntegerValue(slack - delta), + IntegerValue(coeff_magnitude)); + const int64 new_ub = lb + diff.value(); + if (new_ub < context->MaxOf(current_ref)) { + context->UpdateRuleStats("domination: reduced ub."); + if (!context->IntersectDomainWith(current_ref, Domain(lb, new_ub))) { + return false; + } + + // We need to update the precomputed quantities. + const IntegerVariable current_var = + VarDomination::RefToIntegerVariable(current_ref); + if (ub_side) { + CHECK_GE(var_lb_to_ub_diff[current_var], 0); + max_activity -= var_lb_to_ub_diff[current_var]; + } else { + CHECK_LE(var_lb_to_ub_diff[current_var], 0); + min_activity -= var_lb_to_ub_diff[current_var]; + } + const int64 new_diff = std::abs(coeff_magnitude * (new_ub - lb)); + if (ub_side) { + var_lb_to_ub_diff[current_var] = new_diff; + var_lb_to_ub_diff[NegationOf(current_var)] = -new_diff; + max_activity += new_diff; + } else { + var_lb_to_ub_diff[current_var] = -new_diff; + var_lb_to_ub_diff[NegationOf(current_var)] = +new_diff; + min_activity -= new_diff; + } + } + } + } + + // Restore. + for (const int ref : ct.linear().vars()) { + const IntegerVariable ivar = VarDomination::RefToIntegerVariable(ref); + var_lb_to_ub_diff[ivar] = 0; + var_lb_to_ub_diff[NegationOf(ivar)] = 0; + } + } + + // For any dominance relation still left (i.e. between non-fixed vars), if + // the variable are Boolean and X is dominated by Y, we can add + // (X == 1) => (Y = 1). But, as soon as we do that, we break some symmetry + // and cannot add any incompatible relations. + // + // EX: It is possible that X dominate Y and Y dominate X if they are both + // appearing in exactly the same constraint with the same coefficient. + // + // TODO(user): generalize to non Booleans? + // TODO(user): We always keep adding the same relations. Do that only once! + int num_added = 0; + gtl::ITIVector increase_is_forbidden(2 * num_vars, + false); + for (int positive_ref = 0; positive_ref < num_vars; ++positive_ref) { + if (context->IsFixed(positive_ref)) continue; + if (!context->CanBeUsedAsLiteral(positive_ref)) continue; + for (const int ref : {positive_ref, NegatedRef(positive_ref)}) { + const IntegerVariable var = VarDomination::RefToIntegerVariable(ref); + if (increase_is_forbidden[NegationOf(var)]) continue; + for (const IntegerVariable dom : + var_domination.DominatingVariables(ref)) { + if (increase_is_forbidden[dom]) continue; + const int dom_ref = VarDomination::IntegerVariableToRef(dom); + if (context->IsFixed(dom_ref)) continue; + if (!context->CanBeUsedAsLiteral(dom_ref)) continue; + ++num_added; + context->AddImplication(ref, dom_ref); + + // dom-- or var++ are now forbidden. + increase_is_forbidden[var] = true; + increase_is_forbidden[NegationOf(dom)] = true; + } + } + } + if (num_added > 0) { + VLOG(1) << "Added " << num_added << " domination implications."; + context->UpdateNewConstraintsVariableUsage(); + context->UpdateRuleStats("domination: added implications."); + } + + return true; +} + +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/var_domination.h b/ortools/sat/var_domination.h new file mode 100644 index 0000000000..cfe24d4f5a --- /dev/null +++ b/ortools/sat/var_domination.h @@ -0,0 +1,263 @@ +// Copyright 2010-2018 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_SAT_VAR_DOMINATION_H_ +#define OR_TOOLS_SAT_VAR_DOMINATION_H_ + +#include "ortools/algorithms/dynamic_partition.h" +#include "ortools/base/stl_util.h" +#include "ortools/sat/cp_model_utils.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/presolve_context.h" + +namespace operations_research { +namespace sat { + +// A variable X is say to dominate a variable Y if, from any feasible solution, +// doing X++ and Y-- is also feasible (modulo the domain of X and Y) and has the +// same or a better objective value. +// +// Note that we also look for dominance between the negation of the variables. +// So we detect all (X++, Y++), (X--, Y--), (X++, Y--) and (X--, Y++) cases. +// We reuse both ref / Negated(ref) and translate that to IntegerVariable for +// indexing vectors. +// +// Once detected, dominance relation can lead to more propagation. Note however, +// that we will loose feasible solution that are dominated by better solutions. +// In particular, in a linear constraint sum coeff * Xi <= rhs with positive +// coeff, if an X is dominated by a set of other variable in the constraint, +// then its upper bound can be propagated assuming the dominating variables are +// at their upper bound. This can in many case result in X being fixed to its +// lower bound. +// +// TODO(user): We have a lot of benchmarks and tests that shows that we don't +// report wrong relations, but we lack unit test that make sure we don't miss +// any. Try to improve the situation. +class VarDomination { + public: + VarDomination() {} + + // This is the translation used from "ref" to IntegerVariable. The API + // understand the cp_mode.proto ref, but internally we only store + // IntegerVariable. + static IntegerVariable RefToIntegerVariable(int ref) { + return RefIsPositive(ref) ? IntegerVariable(2 * ref) + : IntegerVariable(2 * NegatedRef(ref) + 1); + } + static int IntegerVariableToRef(IntegerVariable var) { + return VariableIsPositive(var) ? var.value() / 2 + : NegatedRef(var.value() / 2); + } + + // Reset the class to a clean state. + // At the beginning, we assume that there is no constraint. + void Reset(int num_variables); + + // These functions are used to encode all of our constraints. + // The algorithm work in two passes, so one should do: + // - 1/ Convert all problem constraints to one or more calls + // - 2/ Call EndFirstPhase() + // - 3/ Redo 1. Only the one sided constraint need to be processed again. But + // calling the others will just do nothing, so it is fine too. + // - 4/ Call EndSecondPhase() + // + // The names are pretty self-explanatory. A few linear constraint ex: + // - To encode terms = cte, one should call ActivityShouldNotChange() + // - To encode terms >= cte, one should call ActivityShouldNotDecrease() + // - To encode terms <= cte, one should call ActivityShouldNotIncrease() + // + // The coeffs vector can be left empty, in which case all variable are assumed + // to have the same coefficients. CanOnlyDominateEachOther() is basically the + // same as ActivityShouldNotChange() without any coefficients. + // + // Note(user): It is better complexity wise to first refine the underlying + // partition as much as possible, and then process all + // ActivityShouldNotIncrease() and ActivityShouldNotDecrease() in two passes. + // Experiment with it, it might require changing the API slightly since the + // increase / decrease functions also refine the partition. + void CanOnlyDominateEachOther(absl::Span refs); + void ActivityShouldNotChange(absl::Span refs, + absl::Span coeffs); + void ActivityShouldNotDecrease(absl::Span enforcements, + absl::Span refs, + absl::Span coeffs); + void ActivityShouldNotIncrease(absl::Span enforcements, + absl::Span refs, + absl::Span coeffs); + + // EndFirstPhase() must be called once all constraints have been processed + // once. One then needs to redo the calls to ActivityShouldNotIncrease() and + // ActivityShouldNotDecrease(). And finally call EndSecondPhase() before + // querying the domination information. + void EndFirstPhase(); + void EndSecondPhase(); + + // This is true if this variable was never restricted by any call. We can thus + // fix it to its lower bound. + bool CanFreelyDecrease(int ref) const; + bool CanFreelyDecrease(IntegerVariable var) const; + + // Returns a set of variable dominating the given ones. Note that to keep the + // algo efficient, this might not include all the possible dominations. + // + // Note: we never include as part of the dominating candidate variables that + // can freely increase. + absl::Span DominatingVariables(int ref) const; + absl::Span DominatingVariables( + IntegerVariable var) const; + + // Returns readable string with the possible valid combinations of the form + // (var++/--, dom++/--) to facilitate debugging. + std::string DominationDebugString(IntegerVariable var) const; + + private: + struct IntegerVariableWithRank { + IntegerVariable var; + int part; + int64 rank; + + bool operator<(const IntegerVariableWithRank& o) const { + return rank < o.rank; + } + }; + + // This refine the partition can_dominate_partition_ with the given set. + void RefinePartition(std::vector* vars); + + // Convert the input from the public API into tmp_ranks_. + void MakeRankEqualToStartOfPart(absl::Span span); + void FillTempRanks(bool reverse_references, + absl::Span enforcements, + absl::Span refs, + absl::Span coeffs); + + // First phase functions. We will keep for each variable a list of possible + // candidates which is as short as possible. + absl::Span InitialDominatingCandidates( + IntegerVariable var) const; + void ProcessTempRanks(); + void Initialize(absl::Span span); + + // Second phase function to filter the current candidate lists. + void FilterUsingTempRanks(); + + // Debug function. + void CheckUsingTempRanks(); + + // Starts at zero on Reset(), move to one on EndFirstPhase() and to 2 on + // EndSecondPhase(). This is used for debug checks and to control what happen + // on the constraint processing functions. + int phase_ = 0; + + // The variables will be sorted by non-decreasking rank. The rank is also the + // start of the first variable in tmp_ranks_ with this rank. + // + // Note that the rank should be int, but to reuse the same vector when we + // construct it, we need int64. See FillTempRanks(). + std::vector tmp_ranks_; + + // This do not change after EndFirstPhase(). + // + // We will add to the Dynamic partion, a set of subset S, each meaning that + // any variable in S can only dominate or be dominated by another variable in + // S. + std::vector tmp_vars_; + std::unique_ptr partition_; + gtl::ITIVector can_freely_decrease_; + + // Used by FilterUsingTempRanks(). + int num_vars_with_negation_; + gtl::ITIVector tmp_var_to_rank_; + + // We don't use absl::Span() because the underlying buffer can be resized. + // This however serve the same purpose. + struct IntegerVariableSpan { + int start = 0; + int size = 0; + }; + + // This hold the first phase best candidate. + // Warning, the initial candidates span can overlap in the shared_buffer_. + std::vector shared_buffer_; + gtl::ITIVector initial_candidates_; + + // This will hold the final result. + // Buffer with independent content for each vars. + std::vector buffer_; + gtl::ITIVector dominating_vars_; +}; + +// This detects variables that can move freely in one direction, or that can +// move freely as long as their value do not cross a bound. +// +// TODO(user): This is actually an important step to do before scaling as it can +// usually reduce really large bounds! +class DualBoundStrengthening { + public: + // Reset the class to a clean state. + // This must be called before processing the constraints. + void Reset(int num_variables) { + can_freely_decrease_until_.assign(2 * num_variables, kMinIntegerValue); + } + + // All constraints should be mapped to one of more call to these functions. + void CannotDecrease(absl::Span refs); + void CannotIncrease(absl::Span refs); + void CannotMove(absl::Span refs); + + // Most of the logic here deals with linear constraints. + template + void ProcessLinearConstraint(bool is_objective, + const PresolveContext& context, + const LinearProto& linear, int64 min_activity, + int64 max_activity); + + // Once ALL constraints have been processed, call this to fix variables or + // reduce their domain if possible. + bool Strengthen(PresolveContext* context); + + // The given ref can always freely decrease until the returned value. + // Note that this does not take into account the domain of the variable. + int64 CanFreelyDecreaseUntil(int ref) const { + return can_freely_decrease_until_[RefToIntegerVariable(ref)].value(); + } + + private: + // We encode proto ref as IntegerVariable for indexing vectors. + static IntegerVariable RefToIntegerVariable(int ref) { + return RefIsPositive(ref) ? IntegerVariable(2 * ref) + : IntegerVariable(2 * NegatedRef(ref) + 1); + } + + // Starts with kMaxIntegerValue, and decrease as constraints are processed. + gtl::ITIVector can_freely_decrease_until_; +}; + +// Detect the variable dominance relations within the given model. Note that +// to avoid doing too much work, we might miss some relations. This does two +// full scan of the model. +void DetectDominanceRelations(const PresolveContext& context, + VarDomination* var_domination, + DualBoundStrengthening* dual_bound_strengthening); + +// Once detected, exploit the dominance relations that appear in the same +// constraint. This does a full scan of the model. +// +// Return false if the problem is infeasible. +bool ExploitDominanceRelations(const VarDomination& var_domination, + PresolveContext* context); + +} // namespace sat +} // namespace operations_research + +#endif // OR_TOOLS_SAT_VAR_DOMINATION_H_ diff --git a/ortools/sat/zero_half_cuts.cc b/ortools/sat/zero_half_cuts.cc index e290de423c..b1998556e2 100644 --- a/ortools/sat/zero_half_cuts.cc +++ b/ortools/sat/zero_half_cuts.cc @@ -26,9 +26,9 @@ void ZeroHalfCutHelper::Reset(int size) { } void ZeroHalfCutHelper::ProcessVariables( - const std::vector &lp_values, - const std::vector &lower_bounds, - const std::vector &upper_bounds) { + const std::vector& lp_values, + const std::vector& lower_bounds, + const std::vector& upper_bounds) { Reset(lp_values.size()); // Shift all variables to their closest bound. @@ -46,7 +46,7 @@ void ZeroHalfCutHelper::ProcessVariables( } } -void ZeroHalfCutHelper::AddBinaryRow(const CombinationOfRows &binary_row) { +void ZeroHalfCutHelper::AddBinaryRow(const CombinationOfRows& binary_row) { // No point pushing an all zero row with a zero rhs. if (binary_row.cols.empty() && !binary_row.rhs_parity) return; for (const int col : binary_row.cols) { @@ -57,7 +57,7 @@ void ZeroHalfCutHelper::AddBinaryRow(const CombinationOfRows &binary_row) { void ZeroHalfCutHelper::AddOneConstraint( const glop::RowIndex row, - const std::vector > &terms, + const std::vector>& terms, IntegerValue lb, IntegerValue ub) { if (terms.size() > kMaxInputConstraintSize) return; @@ -65,7 +65,7 @@ void ZeroHalfCutHelper::AddOneConstraint( IntegerValue magnitude(0); CombinationOfRows binary_row; int rhs_adjust = 0; - for (const auto &term : terms) { + for (const auto& term : terms) { const int col = term.first.value(); activity += ToDouble(term.second) * lp_values_[col]; magnitude = std::max(magnitude, IntTypeAbs(term.second)); @@ -109,8 +109,8 @@ void ZeroHalfCutHelper::AddOneConstraint( } void ZeroHalfCutHelper::SymmetricDifference( - std::function extra_condition, const std::vector &a, - std::vector *b) { + std::function extra_condition, const std::vector& a, + std::vector* b) { for (const int v : *b) tmp_marked_[v] = true; for (const int v : a) { if (tmp_marked_[v]) { @@ -142,7 +142,7 @@ void ZeroHalfCutHelper::ProcessSingletonColumns() { CHECK_EQ(col_to_rows_[singleton_col].size(), 1); const int row = col_to_rows_[singleton_col][0]; int new_size = 0; - auto &mutable_cols = rows_[row].cols; + auto& mutable_cols = rows_[row].cols; for (const int col : mutable_cols) { if (col == singleton_col) continue; mutable_cols[new_size++] = col; @@ -179,13 +179,13 @@ void ZeroHalfCutHelper::EliminateVarUsingRow(int eliminated_col, // Update the multipliers the same way. { - auto &mutable_multipliers = rows_[other_row].multipliers; + auto& mutable_multipliers = rows_[other_row].multipliers; mutable_multipliers.insert(mutable_multipliers.end(), rows_[eliminated_row].multipliers.begin(), rows_[eliminated_row].multipliers.end()); std::sort(mutable_multipliers.begin(), mutable_multipliers.end()); int new_size = 0; - for (const auto &entry : mutable_multipliers) { + for (const auto& entry : mutable_multipliers) { if (new_size > 0 && entry == mutable_multipliers[new_size - 1]) { // Cancel both. --new_size; @@ -223,9 +223,9 @@ void ZeroHalfCutHelper::EliminateVarUsingRow(int eliminated_col, rows_[eliminated_row].slack += shifted_lp_values_[eliminated_col]; } -std::vector > > -ZeroHalfCutHelper::InterestingCandidates(ModelRandomGenerator *random) { - std::vector > > result; +std::vector>> +ZeroHalfCutHelper::InterestingCandidates(ModelRandomGenerator* random) { + std::vector>> result; // Initialize singleton_cols_. singleton_cols_.clear(); @@ -264,7 +264,7 @@ ZeroHalfCutHelper::InterestingCandidates(ModelRandomGenerator *random) { // As an heuristic, we just try to add zero rows with an odd rhs and a low // enough slack. - for (const auto &row : rows_) { + for (const auto& row : rows_) { if (row.cols.empty() && row.rhs_parity && row.slack < kSlackThreshold) { result.push_back(row.multipliers); } diff --git a/ortools/util/file_util.cc b/ortools/util/file_util.cc index dae1eda53c..f76c96712a 100644 --- a/ortools/util/file_util.cc +++ b/ortools/util/file_util.cc @@ -35,7 +35,7 @@ absl::StatusOr ReadFileToString(absl::string_view filename) { } bool ReadFileToProto(absl::string_view filename, - google::protobuf::Message *proto) { + google::protobuf::Message* proto) { std::string data; CHECK_OK(file::GetContents(filename, &data, file::Defaults())); // Note that gzipped files are currently not supported. @@ -75,7 +75,7 @@ bool ReadFileToProto(absl::string_view filename, } bool WriteProtoToFile(absl::string_view filename, - const google::protobuf::Message &proto, + const google::protobuf::Message& proto, ProtoWriteFormat proto_write_format, bool gzipped, bool append_extension_to_file_name) { // Note that gzipped files are currently not supported. diff --git a/ortools/util/fp_utils.cc b/ortools/util/fp_utils.cc index 2130c322d0..e6876e52e2 100644 --- a/ortools/util/fp_utils.cc +++ b/ortools/util/fp_utils.cc @@ -21,18 +21,18 @@ namespace operations_research { namespace { -void ReorderAndCapTerms(double *min, double *max) { +void ReorderAndCapTerms(double* min, double* max) { if (*min > *max) std::swap(*min, *max); if (*min > 0.0) *min = 0.0; if (*max < 0.0) *max = 0.0; } template -void ComputeScalingErrors(const std::vector &input, - const std::vector &lb, - const std::vector &ub, double scaling_factor, - double *max_relative_coeff_error, - double *max_scaled_sum_error) { +void ComputeScalingErrors(const std::vector& input, + const std::vector& lb, + const std::vector& ub, double scaling_factor, + double* max_relative_coeff_error, + double* max_scaled_sum_error) { double max_error = 0.0; double min_error = 0.0; *max_relative_coeff_error = 0.0; @@ -59,11 +59,11 @@ void ComputeScalingErrors(const std::vector &input, } template -void GetBestScalingOfDoublesToInt64(const std::vector &input, - const std::vector &lb, - const std::vector &ub, +void GetBestScalingOfDoublesToInt64(const std::vector& input, + const std::vector& lb, + const std::vector& ub, int64 max_absolute_sum, - double *scaling_factor) { + double* scaling_factor) { const double kInfinity = std::numeric_limits::infinity(); // We start by initializing the returns value to the "error" state. @@ -156,18 +156,18 @@ void GetBestScalingOfDoublesToInt64(const std::vector &input, } // namespace -void ComputeScalingErrors(const std::vector &input, - const std::vector &lb, - const std::vector &ub, double scaling_factor, - double *max_relative_coeff_error, - double *max_scaled_sum_error) { +void ComputeScalingErrors(const std::vector& input, + const std::vector& lb, + const std::vector& ub, double scaling_factor, + double* max_relative_coeff_error, + double* max_scaled_sum_error) { ComputeScalingErrors(input, lb, ub, scaling_factor, max_relative_coeff_error, max_scaled_sum_error); } -double GetBestScalingOfDoublesToInt64(const std::vector &input, - const std::vector &lb, - const std::vector &ub, +double GetBestScalingOfDoublesToInt64(const std::vector& input, + const std::vector& lb, + const std::vector& ub, int64 max_absolute_sum) { double scaling_factor; GetBestScalingOfDoublesToInt64(input, lb, ub, max_absolute_sum, @@ -175,10 +175,10 @@ double GetBestScalingOfDoublesToInt64(const std::vector &input, return scaling_factor; } -void GetBestScalingOfDoublesToInt64(const std::vector &input, +void GetBestScalingOfDoublesToInt64(const std::vector& input, int64 max_absolute_sum, - double *scaling_factor, - double *max_relative_coeff_error) { + double* scaling_factor, + double* max_relative_coeff_error) { double max_scaled_sum_error; GetBestScalingOfDoublesToInt64(input, {}, {}, max_absolute_sum, scaling_factor); @@ -186,7 +186,7 @@ void GetBestScalingOfDoublesToInt64(const std::vector &input, max_relative_coeff_error, &max_scaled_sum_error); } -int64 ComputeGcdOfRoundedDoubles(const std::vector &x, +int64 ComputeGcdOfRoundedDoubles(const std::vector& x, double scaling_factor) { int64 gcd = 0; for (int i = 0; i < x.size() && gcd != 1; ++i) { diff --git a/ortools/util/graph_export.cc b/ortools/util/graph_export.cc index d6e40fc754..8f9a462e33 100644 --- a/ortools/util/graph_export.cc +++ b/ortools/util/graph_export.cc @@ -31,15 +31,15 @@ class GraphSyntax { virtual ~GraphSyntax() {} // Node in the right syntax. - virtual std::string Node(const std::string &name, const std::string &label, - const std::string &shape, - const std::string &color) = 0; + virtual std::string Node(const std::string& name, const std::string& label, + const std::string& shape, + const std::string& color) = 0; // Adds one link in the generated graph. - virtual std::string Link(const std::string &source, - const std::string &destination, - const std::string &label) = 0; + virtual std::string Link(const std::string& source, + const std::string& destination, + const std::string& label) = 0; // File header. - virtual std::string Header(const std::string &name) = 0; + virtual std::string Header(const std::string& name) = 0; // File footer. virtual std::string Footer() = 0; @@ -49,21 +49,21 @@ class DotSyntax : public GraphSyntax { public: ~DotSyntax() override {} - std::string Node(const std::string &name, const std::string &label, - const std::string &shape, - const std::string &color) override { + std::string Node(const std::string& name, const std::string& label, + const std::string& shape, + const std::string& color) override { return absl::StrFormat("%s [shape=%s label=\"%s\" color=%s]\n", name, shape, label, color); } // Adds one link in the generated graph. - std::string Link(const std::string &source, const std::string &destination, - const std::string &label) override { + std::string Link(const std::string& source, const std::string& destination, + const std::string& label) override { return absl::StrFormat("%s -> %s [label=%s]\n", source, destination, label); } // File header. - std::string Header(const std::string &name) override { + std::string Header(const std::string& name) override { return absl::StrFormat("graph %s {\n", name); } @@ -75,9 +75,9 @@ class GmlSyntax : public GraphSyntax { public: ~GmlSyntax() override {} - std::string Node(const std::string &name, const std::string &label, - const std::string &shape, - const std::string &color) override { + std::string Node(const std::string& name, const std::string& label, + const std::string& shape, + const std::string& color) override { return absl::StrFormat( " node [\n" " name \"%s\"\n" @@ -91,8 +91,8 @@ class GmlSyntax : public GraphSyntax { } // Adds one link in the generated graph. - std::string Link(const std::string &source, const std::string &destination, - const std::string &label) override { + std::string Link(const std::string& source, const std::string& destination, + const std::string& label) override { return absl::StrFormat( " edge [\n" " label \"%s\"\n" @@ -103,7 +103,7 @@ class GmlSyntax : public GraphSyntax { } // File header. - std::string Header(const std::string &name) override { + std::string Header(const std::string& name) override { return absl::StrFormat( "graph [\n" " name \"%s\"\n", @@ -118,42 +118,42 @@ class GmlSyntax : public GraphSyntax { // Takes ownership of the GraphSyntax parameter. class FileGraphExporter : public GraphExporter { public: - FileGraphExporter(File *const file, GraphSyntax *const syntax) + FileGraphExporter(File* const file, GraphSyntax* const syntax) : file_(file), syntax_(syntax) {} ~FileGraphExporter() override {} // Write node in GML or DOT format. - void WriteNode(const std::string &name, const std::string &label, - const std::string &shape, const std::string &color) override { + void WriteNode(const std::string& name, const std::string& label, + const std::string& shape, const std::string& color) override { Append(syntax_->Node(name, label, shape, color)); } // Adds one link in the generated graph. - void WriteLink(const std::string &source, const std::string &destination, - const std::string &label) override { + void WriteLink(const std::string& source, const std::string& destination, + const std::string& label) override { Append(syntax_->Link(source, destination, label)); } - void WriteHeader(const std::string &name) override { + void WriteHeader(const std::string& name) override { Append(syntax_->Header(name)); } void WriteFooter() override { Append(syntax_->Footer()); } private: - void Append(const std::string &str) { + void Append(const std::string& str) { file::WriteString(file_, str, file::Defaults()).IgnoreError(); } - File *const file_; + File* const file_; std::unique_ptr syntax_; }; } // namespace -GraphExporter *GraphExporter::MakeFileExporter( - File *const file, GraphExporter::GraphFormat format) { - GraphSyntax *syntax = nullptr; +GraphExporter* GraphExporter::MakeFileExporter( + File* const file, GraphExporter::GraphFormat format) { + GraphSyntax* syntax = nullptr; switch (format) { case GraphExporter::DOT_FORMAT: { syntax = new DotSyntax(); diff --git a/ortools/util/piecewise_linear_function.cc b/ortools/util/piecewise_linear_function.cc index 3ef5df4a9c..d30ffc115e 100644 --- a/ortools/util/piecewise_linear_function.cc +++ b/ortools/util/piecewise_linear_function.cc @@ -29,7 +29,7 @@ namespace { // the index of the right segment. If the x value is not in the function's // domain, it returns the index of the previous segment or kNotFound if x // is before the first segment's start. -int FindSegmentIndex(const std::vector &segments, int64 x) { +int FindSegmentIndex(const std::vector& segments, int64 x) { if (segments.empty() || segments.front().start_x() > x) { return PiecewiseLinearFunction::kNotFound; } @@ -56,8 +56,8 @@ inline bool PointInsideRange(int64 point, int64 range_start, int64 range_end) { // Checks whether two segments form a convex pair, i.e. they are continuous and // the slope of the right is bigger than the slope of the left. -inline bool FormConvexPair(const PiecewiseSegment &left, - const PiecewiseSegment &right) { +inline bool FormConvexPair(const PiecewiseSegment& left, + const PiecewiseSegment& right) { return right.slope() >= left.slope() && right.start_x() == left.end_x() && right.start_y() == left.end_y(); } @@ -223,13 +223,13 @@ int64 PiecewiseSegment::SafeValuePreReference(int64 x) const { } } -bool PiecewiseSegment::SortComparator(const PiecewiseSegment &segment1, - const PiecewiseSegment &segment2) { +bool PiecewiseSegment::SortComparator(const PiecewiseSegment& segment1, + const PiecewiseSegment& segment2) { return segment1.start_x_ < segment2.start_x_; } bool PiecewiseSegment::FindComparator(int64 point, - const PiecewiseSegment &segment) { + const PiecewiseSegment& segment) { return point == kint64min || point < segment.start_x(); } @@ -282,12 +282,12 @@ PiecewiseLinearFunction::PiecewiseLinearFunction( } } // Construct the piecewise linear function. - for (const auto &segment : segments) { + for (const auto& segment : segments) { InsertSegment(segment); } } -PiecewiseLinearFunction *PiecewiseLinearFunction::CreatePiecewiseLinearFunction( +PiecewiseLinearFunction* PiecewiseLinearFunction::CreatePiecewiseLinearFunction( std::vector points_x, std::vector points_y, std::vector slopes, std::vector other_points_x) { CHECK_EQ(points_x.size(), points_y.size()); @@ -304,7 +304,7 @@ PiecewiseLinearFunction *PiecewiseLinearFunction::CreatePiecewiseLinearFunction( return new PiecewiseLinearFunction(std::move(segments)); } -PiecewiseLinearFunction *PiecewiseLinearFunction::CreateStepFunction( +PiecewiseLinearFunction* PiecewiseLinearFunction::CreateStepFunction( std::vector points_x, std::vector points_y, std::vector other_points_x) { CHECK_EQ(points_x.size(), points_y.size()); @@ -320,7 +320,7 @@ PiecewiseLinearFunction *PiecewiseLinearFunction::CreateStepFunction( return new PiecewiseLinearFunction(std::move(segments)); } -PiecewiseLinearFunction *PiecewiseLinearFunction::CreateFullDomainFunction( +PiecewiseLinearFunction* PiecewiseLinearFunction::CreateFullDomainFunction( int64 initial_level, std::vector points_x, std::vector slopes) { CHECK_EQ(points_x.size(), slopes.size() - 1); @@ -344,7 +344,7 @@ PiecewiseLinearFunction *PiecewiseLinearFunction::CreateFullDomainFunction( return new PiecewiseLinearFunction(std::move(segments)); } -PiecewiseLinearFunction *PiecewiseLinearFunction::CreateOneSegmentFunction( +PiecewiseLinearFunction* PiecewiseLinearFunction::CreateOneSegmentFunction( int64 point_x, int64 point_y, int64 slope, int64 other_point_x) { // Visual studio 2013: We cannot inline the vector in the // PiecewiseLinearFunction ctor. @@ -353,21 +353,21 @@ PiecewiseLinearFunction *PiecewiseLinearFunction::CreateOneSegmentFunction( return new PiecewiseLinearFunction(std::move(segments)); } -PiecewiseLinearFunction *PiecewiseLinearFunction::CreateRightRayFunction( +PiecewiseLinearFunction* PiecewiseLinearFunction::CreateRightRayFunction( int64 point_x, int64 point_y, int64 slope) { std::vector segments = { PiecewiseSegment(point_x, point_y, slope, kint64max)}; return new PiecewiseLinearFunction(std::move(segments)); } -PiecewiseLinearFunction *PiecewiseLinearFunction::CreateLeftRayFunction( +PiecewiseLinearFunction* PiecewiseLinearFunction::CreateLeftRayFunction( int64 point_x, int64 point_y, int64 slope) { std::vector segments = { PiecewiseSegment(point_x, point_y, slope, kint64min)}; return new PiecewiseLinearFunction(std::move(segments)); } -PiecewiseLinearFunction *PiecewiseLinearFunction::CreateFixedChargeFunction( +PiecewiseLinearFunction* PiecewiseLinearFunction::CreateFixedChargeFunction( int64 slope, int64 value) { std::vector segments = { PiecewiseSegment(0, 0, 0, kint64min), @@ -377,7 +377,7 @@ PiecewiseLinearFunction *PiecewiseLinearFunction::CreateFixedChargeFunction( return new PiecewiseLinearFunction(std::move(segments)); } -PiecewiseLinearFunction *PiecewiseLinearFunction::CreateEarlyTardyFunction( +PiecewiseLinearFunction* PiecewiseLinearFunction::CreateEarlyTardyFunction( int64 reference, int64 earliness_slope, int64 tardiness_slope) { std::vector segments = { PiecewiseSegment(reference, 0, -earliness_slope, kint64min), @@ -387,7 +387,7 @@ PiecewiseLinearFunction *PiecewiseLinearFunction::CreateEarlyTardyFunction( return new PiecewiseLinearFunction(std::move(segments)); } -PiecewiseLinearFunction * +PiecewiseLinearFunction* PiecewiseLinearFunction::CreateEarlyTardyFunctionWithSlack( int64 early_slack, int64 late_slack, int64 earliness_slope, int64 tardiness_slope) { @@ -413,17 +413,17 @@ bool PiecewiseLinearFunction::InDomain(int64 x) const { } bool PiecewiseLinearFunction::IsConvex() const { - const_cast(this)->UpdateStatus(); + const_cast(this)->UpdateStatus(); return is_convex_; } bool PiecewiseLinearFunction::IsNonDecreasing() const { - const_cast(this)->UpdateStatus(); + const_cast(this)->UpdateStatus(); return is_non_decreasing_; } bool PiecewiseLinearFunction::IsNonIncreasing() const { - const_cast(this)->UpdateStatus(); + const_cast(this)->UpdateStatus(); return is_non_increasing_; } @@ -597,7 +597,7 @@ std::pair PiecewiseLinearFunction::GetSmallestRangeInValueRange( return {reduced_range_start, reduced_range_end}; } for (int i = std::max(0, start_segment); i <= end_segment; ++i) { - const auto &segment = segments_[i]; + const auto& segment = segments_[i]; const int64 start_x = std::max(range_start, segment.start_x()); const int64 end_x = std::min(range_end, segment.end_x()); const int64 start_y = segment.Value(start_x); @@ -624,31 +624,31 @@ void PiecewiseLinearFunction::AddConstantToY(int64 constant) { } } -void PiecewiseLinearFunction::Add(const PiecewiseLinearFunction &other) { +void PiecewiseLinearFunction::Add(const PiecewiseLinearFunction& other) { Operation(other, [](int64 a, int64 b) { return CapAdd(a, b); }); } -void PiecewiseLinearFunction::Subtract(const PiecewiseLinearFunction &other) { +void PiecewiseLinearFunction::Subtract(const PiecewiseLinearFunction& other) { Operation(other, [](int64 a, int64 b) { return CapSub(a, b); }); } -std::vector +std::vector PiecewiseLinearFunction::DecomposeToConvexFunctions() const { CHECK_GE(segments_.size(), 1); if (IsConvex()) { return {new PiecewiseLinearFunction(segments_)}; } - std::vector convex_functions; + std::vector convex_functions; std::vector convex_segments; - for (const PiecewiseSegment &segment : segments_) { + for (const PiecewiseSegment& segment : segments_) { if (convex_segments.empty()) { convex_segments.push_back(segment); continue; } - const PiecewiseSegment &last = convex_segments.back(); + const PiecewiseSegment& last = convex_segments.back(); if (FormConvexPair(last, segment)) { // The segment belongs to the convex sub-function formulated up to now. convex_segments.push_back(segment); @@ -675,7 +675,7 @@ std::string PiecewiseLinearFunction::DebugString() const { return result; } -void PiecewiseLinearFunction::InsertSegment(const PiecewiseSegment &segment) { +void PiecewiseLinearFunction::InsertSegment(const PiecewiseSegment& segment) { is_modified_ = true; // No intersection. if (segments_.empty() || segments_.back().end_x() < segment.start_x()) { @@ -695,11 +695,11 @@ void PiecewiseLinearFunction::InsertSegment(const PiecewiseSegment &segment) { } void PiecewiseLinearFunction::Operation( - const PiecewiseLinearFunction &other, - const std::function &operation) { + const PiecewiseLinearFunction& other, + const std::function& operation) { is_modified_ = true; std::vector own_segments; - const std::vector &other_segments = other.segments(); + const std::vector& other_segments = other.segments(); own_segments.swap(segments_); std::set start_x_points; @@ -714,8 +714,8 @@ void PiecewiseLinearFunction::Operation( const int own_index = FindSegmentIndex(own_segments, start_x); const int other_index = FindSegmentIndex(other_segments, start_x); if (own_index >= 0 && other_index >= 0) { - const PiecewiseSegment &own_segment = own_segments[own_index]; - const PiecewiseSegment &other_segment = other_segments[other_index]; + const PiecewiseSegment& own_segment = own_segments[own_index]; + const PiecewiseSegment& other_segment = other_segments[other_index]; const int64 end_x = std::min(own_segment.end_x(), other_segment.end_x()); const int64 start_y = @@ -740,8 +740,8 @@ void PiecewiseLinearFunction::Operation( } bool PiecewiseLinearFunction::FindSegmentIndicesFromRange( - int64 range_start, int64 range_end, int *start_segment, - int *end_segment) const { + int64 range_start, int64 range_end, int* start_segment, + int* end_segment) const { *start_segment = FindSegmentIndex(segments_, range_start); *end_segment = FindSegmentIndex(segments_, range_end); if (*start_segment == *end_segment) { @@ -768,7 +768,7 @@ bool PiecewiseLinearFunction::IsConvexInternal() const { bool PiecewiseLinearFunction::IsNonDecreasingInternal() const { int64 value = kint64min; - for (const auto &segment : segments_) { + for (const auto& segment : segments_) { const int64 start_y = segment.start_y(); const int64 end_y = segment.end_y(); if (end_y < start_y || start_y < value) return false; @@ -779,7 +779,7 @@ bool PiecewiseLinearFunction::IsNonDecreasingInternal() const { bool PiecewiseLinearFunction::IsNonIncreasingInternal() const { int64 value = kint64max; - for (const auto &segment : segments_) { + for (const auto& segment : segments_) { const int64 start_y = segment.start_y(); const int64 end_y = segment.end_y(); if (end_y > start_y || start_y > value) return false; diff --git a/ortools/util/proto_tools.cc b/ortools/util/proto_tools.cc index c2a1872cbc..a59a67e2ed 100644 --- a/ortools/util/proto_tools.cc +++ b/ortools/util/proto_tools.cc @@ -26,14 +26,14 @@ using ::google::protobuf::Reflection; using ::google::protobuf::TextFormat; namespace { -void WriteFullProtocolMessage(const google::protobuf::Message &message, - int indent_level, std::string *out) { +void WriteFullProtocolMessage(const google::protobuf::Message& message, + int indent_level, std::string* out) { std::string temp_string; const std::string indent(indent_level * 2, ' '); - const Descriptor *desc = message.GetDescriptor(); - const Reflection *refl = message.GetReflection(); + const Descriptor* desc = message.GetDescriptor(); + const Reflection* refl = message.GetReflection(); for (int i = 0; i < desc->field_count(); ++i) { - const FieldDescriptor *fd = desc->field(i); + const FieldDescriptor* fd = desc->field(i); const bool repeated = fd->is_repeated(); const int start = repeated ? 0 : -1; const int limit = repeated ? refl->FieldSize(message, fd) : 0; @@ -41,7 +41,7 @@ void WriteFullProtocolMessage(const google::protobuf::Message &message, absl::StrAppend(out, indent, fd->name()); if (fd->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) { absl::StrAppend(out, " {\n"); - const google::protobuf::Message &nested_message = + const google::protobuf::Message& nested_message = repeated ? refl->GetRepeatedMessage(message, fd, j) : refl->GetMessage(message, fd); WriteFullProtocolMessage(nested_message, indent_level + 1, out); @@ -56,7 +56,7 @@ void WriteFullProtocolMessage(const google::protobuf::Message &message, } // namespace std::string FullProtocolMessageAsString( - const google::protobuf::Message &message, int indent_level) { + const google::protobuf::Message& message, int indent_level) { std::string message_str; WriteFullProtocolMessage(message, indent_level, &message_str); return message_str; diff --git a/ortools/util/range_query_function.cc b/ortools/util/range_query_function.cc index 9f953952f5..09d2f9c1c7 100644 --- a/ortools/util/range_query_function.cc +++ b/ortools/util/range_query_function.cc @@ -87,7 +87,7 @@ class LinearRangeIntToIntFunction : public RangeIntToIntFunction { DISALLOW_COPY_AND_ASSIGN(LinearRangeIntToIntFunction); }; -std::vector FunctionToVector(const std::function &f, +std::vector FunctionToVector(const std::function& f, int64 domain_start, int64 domain_end) { CHECK_LT(domain_start, domain_end); std::vector output(domain_end - domain_start, 0); @@ -103,7 +103,7 @@ std::vector FunctionToVector(const std::function &f, // 2. It creates a data structure for quick answer to range queries. class CachedRangeIntToIntFunction : public RangeIntToIntFunction { public: - CachedRangeIntToIntFunction(const std::function &base_function, + CachedRangeIntToIntFunction(const std::function& base_function, int64 domain_start, int64 domain_end) : domain_start_(domain_start), rmq_min_(FunctionToVector(base_function, domain_start, domain_end)), @@ -162,18 +162,18 @@ class CachedRangeIntToIntFunction : public RangeIntToIntFunction { } private: - const std::vector &array() const { return rmq_min_.array(); } + const std::vector& array() const { return rmq_min_.array(); } const int64 domain_start_; - const RangeMinimumQuery > rmq_min_; - const RangeMinimumQuery > rmq_max_; + const RangeMinimumQuery> rmq_min_; + const RangeMinimumQuery> rmq_max_; DISALLOW_COPY_AND_ASSIGN(CachedRangeIntToIntFunction); }; class CachedRangeMinMaxIndexFunction : public RangeMinMaxIndexFunction { public: - CachedRangeMinMaxIndexFunction(const std::function &f, + CachedRangeMinMaxIndexFunction(const std::function& f, int64 domain_start, int64 domain_end) : domain_start_(domain_start), domain_end_(domain_end), @@ -202,25 +202,25 @@ class CachedRangeMinMaxIndexFunction : public RangeMinMaxIndexFunction { private: const int64 domain_start_; const int64 domain_end_; - const RangeMinimumIndexQuery > index_rmq_min_; - const RangeMinimumIndexQuery > index_rmq_max_; + const RangeMinimumIndexQuery> index_rmq_min_; + const RangeMinimumIndexQuery> index_rmq_max_; DISALLOW_COPY_AND_ASSIGN(CachedRangeMinMaxIndexFunction); }; } // namespace -RangeIntToIntFunction *MakeBareIntToIntFunction(std::function f) { +RangeIntToIntFunction* MakeBareIntToIntFunction(std::function f) { return new LinearRangeIntToIntFunction(std::move(f)); } -RangeIntToIntFunction *MakeCachedIntToIntFunction( - const std::function &f, int64 domain_start, +RangeIntToIntFunction* MakeCachedIntToIntFunction( + const std::function& f, int64 domain_start, int64 domain_end) { return new CachedRangeIntToIntFunction(f, domain_start, domain_end); } -RangeMinMaxIndexFunction *MakeCachedRangeMinMaxIndexFunction( - const std::function &f, int64 domain_start, +RangeMinMaxIndexFunction* MakeCachedRangeMinMaxIndexFunction( + const std::function& f, int64 domain_start, int64 domain_end) { return new CachedRangeMinMaxIndexFunction(f, domain_start, domain_end); } diff --git a/ortools/util/saturated_arithmetic.h b/ortools/util/saturated_arithmetic.h index 9b3b8c88e0..7999ea4477 100644 --- a/ortools/util/saturated_arithmetic.h +++ b/ortools/util/saturated_arithmetic.h @@ -84,7 +84,7 @@ inline int64 SubOverflows(int64 x, int64 y) { // Performs *b += a and returns false iff the addition overflow or underflow. // This function only works for typed integer type (IntType<>). template -bool SafeAddInto(IntegerType a, IntegerType *b) { +bool SafeAddInto(IntegerType a, IntegerType* b) { const int64 x = a.value(); const int64 y = b->value(); const int64 sum = TwosComplementAddition(x, y); @@ -110,14 +110,12 @@ inline int64 CapAddFast(int64 x, int64 y) { const int64 cap = CapWithSignOf(x); int64 result = x; // clang-format off - asm volatile( // 'volatile': ask compiler optimizer "keep as is". - "\t" - "addq %[y],%[result]" - "\n\t" - "cmovoq %[cap],%[result]" // Conditional move if overflow. - : [result] "=r"(result) // Output - : "[result]"(result), [y] "r"(y), [cap] "r"(cap) // Input. - : "cc" /* Clobbered registers */); + asm volatile( // 'volatile': ask compiler optimizer "keep as is". + "\t" "addq %[y],%[result]" + "\n\t" "cmovoq %[cap],%[result]" // Conditional move if overflow. + : [result] "=r"(result) // Output + : "[result]" (result), [y] "r"(y), [cap] "r"(cap) // Input. + : "cc" /* Clobbered registers */ ); // clang-format on return result; } @@ -142,14 +140,12 @@ inline int64 CapSubFast(int64 x, int64 y) { const int64 cap = CapWithSignOf(x); int64 result = x; // clang-format off - asm volatile( // 'volatile': ask compiler optimizer "keep as is". - "\t" - "subq %[y],%[result]" - "\n\t" - "cmovoq %[cap],%[result]" // Conditional move if overflow. - : [result] "=r"(result) // Output - : "[result]"(result), [y] "r"(y), [cap] "r"(cap) // Input. - : "cc" /* Clobbered registers */); + asm volatile( // 'volatile': ask compiler optimizer "keep as is". + "\t" "subq %[y],%[result]" + "\n\t" "cmovoq %[cap],%[result]" // Conditional move if overflow. + : [result] "=r"(result) // Output + : "[result]" (result), [y] "r"(y), [cap] "r"(cap) // Input. + : "cc" /* Clobbered registers */ ); // clang-format on return result; } @@ -221,13 +217,11 @@ inline int64 CapProdFast(int64 x, int64 y) { // the carry flag if 64 bits were not enough. // We therefore use cmovc to return cap if the carry was set. // clang-format off - asm volatile( // 'volatile': ask compiler optimizer "keep as is". - "\n\t" - "imulq %[y],%[result]" - "\n\t" - "cmovcq %[cap],%[result]" // Conditional move if carry. - : [result] "=r"(result) // Output - : "[result]"(result), [y] "r"(y), [cap] "r"(cap) // Input. + asm volatile( // 'volatile': ask compiler optimizer "keep as is". + "\n\t" "imulq %[y],%[result]" + "\n\t" "cmovcq %[cap],%[result]" // Conditional move if carry. + : [result] "=r"(result) // Output + : "[result]" (result), [y] "r"(y), [cap] "r"(cap) // Input. : "cc" /* Clobbered registers */); // clang-format on return result; diff --git a/ortools/util/sigint.cc b/ortools/util/sigint.cc index 9303b49555..70975cdd1f 100644 --- a/ortools/util/sigint.cc +++ b/ortools/util/sigint.cc @@ -19,7 +19,7 @@ namespace operations_research { -void SigintHandler::Register(const std::function &f) { +void SigintHandler::Register(const std::function& f) { handler_ = [this, f]() -> void { ++num_sigint_calls_; if (num_sigint_calls_ >= 3) { diff --git a/ortools/util/sorted_interval_list.cc b/ortools/util/sorted_interval_list.cc index 6faa2c41a9..8ce5cdc829 100644 --- a/ortools/util/sorted_interval_list.cc +++ b/ortools/util/sorted_interval_list.cc @@ -51,7 +51,7 @@ bool IntervalsAreSortedAndNonAdjacent( namespace { template -std::string IntervalsAsString(const Intervals &intervals) { +std::string IntervalsAsString(const Intervals& intervals) { std::string result; for (ClosedInterval interval : intervals) { result += interval.DebugString(); @@ -61,10 +61,10 @@ std::string IntervalsAsString(const Intervals &intervals) { // Transforms a sorted list of intervals in a sorted DISJOINT list for which // IntervalsAreSortedAndNonAdjacent() would return true. -void UnionOfSortedIntervals(absl::InlinedVector *intervals) { +void UnionOfSortedIntervals(absl::InlinedVector* intervals) { DCHECK(std::is_sorted(intervals->begin(), intervals->end())); int new_size = 0; - for (const ClosedInterval &i : *intervals) { + for (const ClosedInterval& i : *intervals) { if (new_size > 0 && i.start <= CapAdd((*intervals)[new_size - 1].end, 1)) { (*intervals)[new_size - 1].end = std::max(i.end, (*intervals)[new_size - 1].end); @@ -97,16 +97,16 @@ int64 FloorRatio(int64 value, int64 positive_coeff) { return result - adjust; } -std::ostream &operator<<(std::ostream &out, const ClosedInterval &interval) { +std::ostream& operator<<(std::ostream& out, const ClosedInterval& interval) { return out << interval.DebugString(); } -std::ostream &operator<<(std::ostream &out, - const std::vector &intervals) { +std::ostream& operator<<(std::ostream& out, + const std::vector& intervals) { return out << IntervalsAsString(intervals); } -std::ostream &operator<<(std::ostream &out, const Domain &domain) { +std::ostream& operator<<(std::ostream& out, const Domain& domain) { return out << IntervalsAsString(domain); } @@ -167,14 +167,14 @@ Domain Domain::FromFlatSpanOfIntervals(absl::Span flat_intervals) { return result; } -Domain Domain::FromFlatIntervals(const std::vector &flat_intervals) { +Domain Domain::FromFlatIntervals(const std::vector& flat_intervals) { return FromFlatSpanOfIntervals(absl::MakeSpan(flat_intervals)); } Domain Domain::FromVectorIntervals( - const std::vector > &intervals) { + const std::vector>& intervals) { Domain result; - for (const std::vector &interval : intervals) { + for (const std::vector& interval : intervals) { if (interval.size() == 1) { result.intervals_.push_back({interval[0], interval[0]}); } else { @@ -229,9 +229,9 @@ bool Domain::Contains(int64 value) const { return value <= it->end; } -bool Domain::IsIncludedIn(const Domain &domain) const { +bool Domain::IsIncludedIn(const Domain& domain) const { int i = 0; - const auto &others = domain.intervals_; + const auto& others = domain.intervals_; for (const ClosedInterval interval : intervals_) { // Find the unique interval in others that contains interval if any. for (; i < others.size() && interval.end > others[i].end; ++i) { @@ -246,7 +246,7 @@ Domain Domain::Complement() const { Domain result; int64 next_start = kint64min; result.intervals_.reserve(intervals_.size() + 1); - for (const ClosedInterval &interval : intervals_) { + for (const ClosedInterval& interval : intervals_) { if (interval.start != kint64min) { result.intervals_.push_back({next_start, interval.start - 1}); } @@ -271,7 +271,7 @@ void Domain::NegateInPlace() { // corner-case intervals_.pop_back(); } - for (ClosedInterval &ref : intervals_) { + for (ClosedInterval& ref : intervals_) { std::swap(ref.start, ref.end); ref.start = ref.start == kint64min ? kint64max : -ref.start; ref.end = ref.end == kint64min ? kint64max : -ref.end; @@ -279,10 +279,10 @@ void Domain::NegateInPlace() { DCHECK(IntervalsAreSortedAndNonAdjacent(intervals_)); } -Domain Domain::IntersectionWith(const Domain &domain) const { +Domain Domain::IntersectionWith(const Domain& domain) const { Domain result; - const auto &a = intervals_; - const auto &b = domain.intervals_; + const auto& a = intervals_; + const auto& b = domain.intervals_; for (int i = 0, j = 0; i < a.size() && j < b.size();) { if (a[i].start <= b[j].start) { if (a[i].end < b[j].start) { @@ -300,7 +300,7 @@ Domain Domain::IntersectionWith(const Domain &domain) const { } } } else { // a[i].start > b[i].start. - // We do the exact same thing as above, but swapping a and b. + // We do the exact same thing as above, but swapping a and b. if (b[j].end < a[i].start) { ++j; } else { // b[j].end >= a[i].start @@ -318,10 +318,10 @@ Domain Domain::IntersectionWith(const Domain &domain) const { return result; } -Domain Domain::UnionWith(const Domain &domain) const { +Domain Domain::UnionWith(const Domain& domain) const { Domain result; - const auto &a = intervals_; - const auto &b = domain.intervals_; + const auto& a = intervals_; + const auto& b = domain.intervals_; result.intervals_.resize(a.size() + b.size()); std::merge(a.begin(), a.end(), b.begin(), b.end(), result.intervals_.begin()); UnionOfSortedIntervals(&result.intervals_); @@ -329,14 +329,14 @@ Domain Domain::UnionWith(const Domain &domain) const { } // TODO(user): Use a better algorithm. -Domain Domain::AdditionWith(const Domain &domain) const { +Domain Domain::AdditionWith(const Domain& domain) const { Domain result; - const auto &a = intervals_; - const auto &b = domain.intervals_; + const auto& a = intervals_; + const auto& b = domain.intervals_; result.intervals_.reserve(a.size() * b.size()); - for (const ClosedInterval &i : a) { - for (const ClosedInterval &j : b) { + for (const ClosedInterval& i : a) { + for (const ClosedInterval& j : b) { result.intervals_.push_back( {CapAdd(i.start, j.start), CapAdd(i.end, j.end)}); } @@ -358,7 +358,7 @@ Domain Domain::RelaxIfTooComplex() const { } } -Domain Domain::MultiplicationBy(int64 coeff, bool *exact) const { +Domain Domain::MultiplicationBy(int64 coeff, bool* exact) const { if (exact != nullptr) *exact = true; if (intervals_.empty() || coeff == 0) return {}; @@ -372,7 +372,7 @@ Domain Domain::MultiplicationBy(int64 coeff, bool *exact) const { Domain result; if (abs_coeff > 1) { result.intervals_.reserve(size_if_non_trivial); - for (const ClosedInterval &i : intervals_) { + for (const ClosedInterval& i : intervals_) { for (int v = i.start; v <= i.end; ++v) { // Because abs_coeff > 1, all new values are disjoint. const int64 new_value = CapProd(v, abs_coeff); @@ -389,7 +389,7 @@ Domain Domain::MultiplicationBy(int64 coeff, bool *exact) const { Domain Domain::ContinuousMultiplicationBy(int64 coeff) const { Domain result = *this; const int64 abs_coeff = std::abs(coeff); - for (ClosedInterval &i : result.intervals_) { + for (ClosedInterval& i : result.intervals_) { i.start = CapProd(i.start, abs_coeff); i.end = CapProd(i.end, abs_coeff); } @@ -398,10 +398,10 @@ Domain Domain::ContinuousMultiplicationBy(int64 coeff) const { return result; } -Domain Domain::ContinuousMultiplicationBy(const Domain &domain) const { +Domain Domain::ContinuousMultiplicationBy(const Domain& domain) const { Domain result; - for (const ClosedInterval &i : this->intervals_) { - for (const ClosedInterval &j : domain.intervals_) { + for (const ClosedInterval& i : this->intervals_) { + for (const ClosedInterval& j : domain.intervals_) { ClosedInterval new_interval; const int64 a = CapProd(i.start, j.start); const int64 b = CapProd(i.end, j.end); @@ -421,7 +421,7 @@ Domain Domain::DivisionBy(int64 coeff) const { CHECK_NE(coeff, 0); Domain result = *this; const int64 abs_coeff = std::abs(coeff); - for (ClosedInterval &i : result.intervals_) { + for (ClosedInterval& i : result.intervals_) { i.start = i.start / abs_coeff; i.end = i.end / abs_coeff; } @@ -437,7 +437,7 @@ Domain Domain::InverseMultiplicationBy(const int64 coeff) const { Domain result = *this; int new_size = 0; const int64 abs_coeff = std::abs(coeff); - for (const ClosedInterval &i : result.intervals_) { + for (const ClosedInterval& i : result.intervals_) { const int64 start = CeilRatio(i.start, abs_coeff); const int64 end = FloorRatio(i.end, abs_coeff); if (start > end) continue; @@ -458,7 +458,7 @@ Domain Domain::InverseMultiplicationBy(const int64 coeff) const { // for all interval in this.UnionWith(implied_domain.Complement())): // - Take the two extreme points (min and max) in interval \inter implied. // - Append to result [min, max] if these points exists. -Domain Domain::SimplifyUsingImpliedDomain(const Domain &implied_domain) const { +Domain Domain::SimplifyUsingImpliedDomain(const Domain& implied_domain) const { Domain result; if (implied_domain.IsEmpty()) return result; @@ -508,20 +508,20 @@ Domain Domain::SimplifyUsingImpliedDomain(const Domain &implied_domain) const { std::vector Domain::FlattenedIntervals() const { std::vector result; - for (const ClosedInterval &interval : intervals_) { + for (const ClosedInterval& interval : intervals_) { result.push_back(interval.start); result.push_back(interval.end); } return result; } -bool Domain::operator<(const Domain &other) const { - const auto &d1 = intervals_; - const auto &d2 = other.intervals_; +bool Domain::operator<(const Domain& other) const { + const auto& d1 = intervals_; + const auto& d2 = other.intervals_; const int common_size = std::min(d1.size(), d2.size()); for (int i = 0; i < common_size; ++i) { - const ClosedInterval &i1 = d1[i]; - const ClosedInterval &i2 = d2[i]; + const ClosedInterval& i1 = d1[i]; + const ClosedInterval& i2 = d2[i]; if (i1.start < i2.start) return true; if (i1.start > i2.start) return false; if (i1.end < i2.end) return true; @@ -532,7 +532,7 @@ bool Domain::operator<(const Domain &other) const { std::string Domain::ToString() const { return IntervalsAsString(intervals_); } -int64 SumOfKMinValueInDomain(const Domain &domain, int k) { +int64 SumOfKMinValueInDomain(const Domain& domain, int k) { int64 current_sum = 0.0; int current_index = 0; for (const ClosedInterval interval : domain) { @@ -546,24 +546,24 @@ int64 SumOfKMinValueInDomain(const Domain &domain, int k) { return current_sum; } -int64 SumOfKMaxValueInDomain(const Domain &domain, int k) { +int64 SumOfKMaxValueInDomain(const Domain& domain, int k) { return -SumOfKMinValueInDomain(domain.Negation(), k); } SortedDisjointIntervalList::SortedDisjointIntervalList() {} SortedDisjointIntervalList::SortedDisjointIntervalList( - const std::vector &starts, const std::vector &ends) { + const std::vector& starts, const std::vector& ends) { InsertIntervals(starts, ends); } SortedDisjointIntervalList::SortedDisjointIntervalList( - const std::vector &starts, const std::vector &ends) { + const std::vector& starts, const std::vector& ends) { InsertIntervals(starts, ends); } SortedDisjointIntervalList::SortedDisjointIntervalList( - const std::vector &intervals) { + const std::vector& intervals) { for (ClosedInterval interval : intervals) { InsertInterval(interval.start, interval.end); } @@ -574,7 +574,7 @@ SortedDisjointIntervalList::BuildComplementOnInterval(int64 start, int64 end) { SortedDisjointIntervalList interval_list; int64 next_start = start; for (auto it = FirstIntervalGreaterOrEqual(start); it != this->end(); ++it) { - const ClosedInterval &interval = *it; + const ClosedInterval& interval = *it; const int64 next_end = CapSub(interval.start, 1); if (next_end > end) break; if (next_start <= next_end) { @@ -647,13 +647,13 @@ SortedDisjointIntervalList::Iterator SortedDisjointIntervalList::InsertInterval( // because if one alters a set element's value, then it collapses the set // property! But in this very special case, we know that we can just overwrite // it->start, so we do it. - const_cast(&(*it))->start = new_start; - const_cast(&(*it))->end = new_end; + const_cast(&(*it))->start = new_start; + const_cast(&(*it))->end = new_end; return it; } SortedDisjointIntervalList::Iterator SortedDisjointIntervalList::GrowRightByOne( - int64 value, int64 *newly_covered) { + int64 value, int64* newly_covered) { auto it = intervals_.upper_bound({value, kint64max}); auto it_prev = it; @@ -672,7 +672,7 @@ SortedDisjointIntervalList::Iterator SortedDisjointIntervalList::GrowRightByOne( // interval on the left, since there were no interval adjacent to "value" // on the left. DCHECK_EQ(it->start, value + 1); - const_cast(&(*it))->start = value; + const_cast(&(*it))->start = value; return it; } } @@ -686,28 +686,28 @@ SortedDisjointIntervalList::Iterator SortedDisjointIntervalList::GrowRightByOne( *newly_covered = it_prev->end + 1; if (it != end() && it_prev->end + 2 == it->start) { // We need to merge it_prev with 'it'. - const_cast(&(*it_prev))->end = it->end; + const_cast(&(*it_prev))->end = it->end; intervals_.erase(it); } else { - const_cast(&(*it_prev))->end = it_prev->end + 1; + const_cast(&(*it_prev))->end = it_prev->end + 1; } return it_prev; } template -void SortedDisjointIntervalList::InsertAll(const std::vector &starts, - const std::vector &ends) { +void SortedDisjointIntervalList::InsertAll(const std::vector& starts, + const std::vector& ends) { CHECK_EQ(starts.size(), ends.size()); for (int i = 0; i < starts.size(); ++i) InsertInterval(starts[i], ends[i]); } void SortedDisjointIntervalList::InsertIntervals( - const std::vector &starts, const std::vector &ends) { + const std::vector& starts, const std::vector& ends) { InsertAll(starts, ends); } -void SortedDisjointIntervalList::InsertIntervals(const std::vector &starts, - const std::vector &ends) { +void SortedDisjointIntervalList::InsertIntervals(const std::vector& starts, + const std::vector& ends) { // TODO(user): treat kint32min and kint32max as their kint64 variants. InsertAll(starts, ends); } @@ -733,7 +733,7 @@ SortedDisjointIntervalList::LastIntervalLessOrEqual(int64 value) const { std::string SortedDisjointIntervalList::DebugString() const { std::string str; - for (const ClosedInterval &interval : intervals_) { + for (const ClosedInterval& interval : intervals_) { str += interval.DebugString(); } return str; diff --git a/ortools/util/sorted_interval_list.h b/ortools/util/sorted_interval_list.h index 159f51ee25..5863a84f3f 100644 --- a/ortools/util/sorted_interval_list.h +++ b/ortools/util/sorted_interval_list.h @@ -38,14 +38,14 @@ struct ClosedInterval { } std::string DebugString() const; - bool operator==(const ClosedInterval &other) const { + bool operator==(const ClosedInterval& other) const { return start == other.start && end == other.end; } // Because we mainly manipulate vector of disjoint intervals, we only need to // sort by the start. We do not care about the order in which interval with // the same start appear since they will always be merged into one interval. - bool operator<(const ClosedInterval &other) const { + bool operator<(const ClosedInterval& other) const { return start < other.start; } @@ -53,9 +53,9 @@ struct ClosedInterval { int64 end = 0; // Inclusive. }; -std::ostream &operator<<(std::ostream &out, const ClosedInterval &interval); -std::ostream &operator<<(std::ostream &out, - const std::vector &intervals); +std::ostream& operator<<(std::ostream& out, const ClosedInterval& interval); +std::ostream& operator<<(std::ostream& out, + const std::vector& intervals); /** * Returns true iff we have: @@ -85,19 +85,19 @@ class Domain { #if !defined(SWIG) /// Copy constructor (mandatory as we define the move constructor). - Domain(const Domain &other) : intervals_(other.intervals_) {} + Domain(const Domain& other) : intervals_(other.intervals_) {} /// Copy operator (mandatory as we define the move operator). - Domain &operator=(const Domain &other) { + Domain& operator=(const Domain& other) { intervals_ = other.intervals_; return *this; } /// Move constructor. - Domain(Domain &&other) : intervals_(std::move(other.intervals_)) {} + Domain(Domain&& other) : intervals_(std::move(other.intervals_)) {} /// Move operator. - Domain &operator=(Domain &&other) { + Domain& operator=(Domain&& other) { intervals_ = std::move(other.intervals_); return *this; } @@ -140,14 +140,14 @@ class Domain { * .NET, [[0, 2], [5, 5], [8, 10]] in python). */ static Domain FromVectorIntervals( - const std::vector > &intervals); + const std::vector >& intervals); /** * This method is available in Python, Java and .NET. It allows * building a Domain object from a flattened list of intervals * (long[] in Java and .NET, [0, 2, 5, 5, 8, 10] in python). */ - static Domain FromFlatIntervals(const std::vector &flat_intervals); + static Domain FromFlatIntervals(const std::vector& flat_intervals); /** * This method returns the flattened list of interval bounds of the domain. @@ -201,7 +201,7 @@ class Domain { /** * Returns true iff D is included in the given domain. */ - bool IsIncludedIn(const Domain &domain) const; + bool IsIncludedIn(const Domain& domain) const; /** * Returns the set Int64 ∖ D. @@ -219,17 +219,17 @@ class Domain { /** * Returns the intersection of D and domain. */ - Domain IntersectionWith(const Domain &domain) const; + Domain IntersectionWith(const Domain& domain) const; /** * Returns the union of D and domain. */ - Domain UnionWith(const Domain &domain) const; + Domain UnionWith(const Domain& domain) const; /** * Returns {x ∈ Int64, ∃ a ∈ D, ∃ b ∈ domain, x = a + b}. */ - Domain AdditionWith(const Domain &domain) const; + Domain AdditionWith(const Domain& domain) const; /** * Returns {x ∈ Int64, ∃ e ∈ D, x = e * coeff}. @@ -239,7 +239,7 @@ class Domain { * larger than a fixed constant, exact will be set to false and the result * will be set to ContinuousMultiplicationBy(coeff). */ - Domain MultiplicationBy(int64 coeff, bool *exact = nullptr) const; + Domain MultiplicationBy(int64 coeff, bool* exact = nullptr) const; /** * If NumIntervals() is too large, this return a superset of the domain. @@ -255,8 +255,7 @@ class Domain { * For instance, [1, 100] * 2 will be transformed in [2, 200] and not in * [2][4][6]...[200] like in MultiplicationBy(). Note that this would be * similar to a InverseDivisionBy(), but not quite the same because if we - * look for {x ∈ Int64, ∃ e ∈ D, x / coeff = e}, then we will get [2, - * 201] in + * look for {x ∈ Int64, ∃ e ∈ D, x / coeff = e}, then we will get [2, 201] in * the case above. */ Domain ContinuousMultiplicationBy(int64 coeff) const; @@ -270,11 +269,10 @@ class Domain { * For instance, [1, 100] * 2 will be transformed in [2, 200] and not in * [2][4][6]...[200] like in MultiplicationBy(). Note that this would be * similar to a InverseDivisionBy(), but not quite the same because if we - * look for {x ∈ Int64, ∃ e ∈ D, x / coeff = e}, then we will get [2, - * 201] in + * look for {x ∈ Int64, ∃ e ∈ D, x / coeff = e}, then we will get [2, 201] in * the case above. */ - Domain ContinuousMultiplicationBy(const Domain &domain) const; + Domain ContinuousMultiplicationBy(const Domain& domain) const; /** * Returns {x ∈ Int64, ∃ e ∈ D, x = e / coeff}. @@ -310,7 +308,7 @@ class Domain { * [domain.Min(), domain.Max()]. This is meant to be applied to the right-hand * side of a constraint to make its propagation more efficient. */ - Domain SimplifyUsingImpliedDomain(const Domain &implied_domain) const; + Domain SimplifyUsingImpliedDomain(const Domain& implied_domain) const; /** * Returns a compact string of a vector of intervals like "[1,4][6][10,20]". @@ -320,13 +318,13 @@ class Domain { /** * Lexicographic order on the intervals() representation. */ - bool operator<(const Domain &other) const; + bool operator<(const Domain& other) const; - bool operator==(const Domain &other) const { + bool operator==(const Domain& other) const { return intervals_ == other.intervals_; } - bool operator!=(const Domain &other) const { + bool operator!=(const Domain& other) const { return intervals_ != other.intervals_; } @@ -370,13 +368,13 @@ class Domain { absl::InlinedVector intervals_; }; -std::ostream &operator<<(std::ostream &out, const Domain &domain); +std::ostream& operator<<(std::ostream& out, const Domain& domain); // Returns the sum of smallest k values in the domain. -int64 SumOfKMinValueInDomain(const Domain &domain, int k); +int64 SumOfKMinValueInDomain(const Domain& domain, int k); // Returns the sum of largest k values in the domain. -int64 SumOfKMaxValueInDomain(const Domain &domain, int k); +int64 SumOfKMaxValueInDomain(const Domain& domain, int k); /** * This class represents a sorted list of disjoint, closed intervals. When an @@ -389,7 +387,7 @@ int64 SumOfKMaxValueInDomain(const Domain &domain, int k); class SortedDisjointIntervalList { public: struct IntervalComparator { - bool operator()(const ClosedInterval &a, const ClosedInterval &b) const { + bool operator()(const ClosedInterval& a, const ClosedInterval& b) const { return a.start != b.start ? a.start < b.start : a.end < b.end; } }; @@ -399,7 +397,7 @@ class SortedDisjointIntervalList { SortedDisjointIntervalList(); explicit SortedDisjointIntervalList( - const std::vector &intervals); + const std::vector& intervals); /** * Creates a SortedDisjointIntervalList and fills it with intervals @@ -408,10 +406,10 @@ class SortedDisjointIntervalList { */ // TODO(user): Explain why we favored this API to the more natural // input std::vector or std::vector>. - SortedDisjointIntervalList(const std::vector &starts, - const std::vector &ends); - SortedDisjointIntervalList(const std::vector &starts, - const std::vector &ends); + SortedDisjointIntervalList(const std::vector& starts, + const std::vector& ends); + SortedDisjointIntervalList(const std::vector& starts, + const std::vector& ends); /** * Builds the complement of the interval list on the interval [start, end]. @@ -438,7 +436,7 @@ class SortedDisjointIntervalList { * If this causes an interval ending at kint64max to grow, it will die with a * CHECK fail. */ - Iterator GrowRightByOne(int64 value, int64 *newly_covered); + Iterator GrowRightByOne(int64 value, int64* newly_covered); /** * Adds all intervals [starts[i]..ends[i]]. @@ -446,10 +444,10 @@ class SortedDisjointIntervalList { * Same behavior as InsertInterval() upon invalid intervals. There's a version * with int64 and int32. */ - void InsertIntervals(const std::vector &starts, - const std::vector &ends); - void InsertIntervals(const std::vector &starts, - const std::vector &ends); + void InsertIntervals(const std::vector& starts, + const std::vector& ends); + void InsertIntervals(const std::vector& starts, + const std::vector& ends); /** * Returns the number of disjoint intervals in the list. @@ -485,16 +483,16 @@ class SortedDisjointIntervalList { /** * Returns a const& to the last interval. The list must not be empty. */ - const ClosedInterval &last() const { return *intervals_.rbegin(); } + const ClosedInterval& last() const { return *intervals_.rbegin(); } void clear() { intervals_.clear(); } - void swap(SortedDisjointIntervalList &other) { + void swap(SortedDisjointIntervalList& other) { intervals_.swap(other.intervals_); } private: template - void InsertAll(const std::vector &starts, const std::vector &ends); + void InsertAll(const std::vector& starts, const std::vector& ends); IntervalSet intervals_; }; diff --git a/ortools/util/stats.cc b/ortools/util/stats.cc index a463810877..66ca89fb79 100644 --- a/ortools/util/stats.cc +++ b/ortools/util/stats.cc @@ -39,7 +39,7 @@ std::string MemoryUsage() { } } -Stat::Stat(const std::string &name, StatsGroup *group) : name_(name) { +Stat::Stat(const std::string& name, StatsGroup* group) : name_(name) { group->Register(this); } @@ -47,7 +47,7 @@ std::string Stat::StatString() const { return name_ + ": " + ValueAsString(); } StatsGroup::~StatsGroup() { gtl::STLDeleteValues(&time_distributions_); } -void StatsGroup::Register(Stat *stat) { stats_.push_back(stat); } +void StatsGroup::Register(Stat* stat) { stats_.push_back(stat); } void StatsGroup::Reset() { for (int i = 0; i < stats_.size(); ++i) { @@ -57,7 +57,7 @@ void StatsGroup::Reset() { namespace { -bool CompareStatPointers(const Stat *s1, const Stat *s2) { +bool CompareStatPointers(const Stat* s1, const Stat* s2) { if (s1->Priority() == s2->Priority()) { if (s1->Sum() == s2->Sum()) return s1->Name() < s2->Name(); return (s1->Sum() > s2->Sum()); @@ -72,7 +72,7 @@ std::string StatsGroup::StatString() const { // Computes the longest name of all the stats we want to display. // Also create a temporary vector so we can sort the stats by names. int longest_name_size = 0; - std::vector sorted_stats; + std::vector sorted_stats; for (int i = 0; i < stats_.size(); ++i) { if (!stats_[i]->WorthPrinting()) continue; // We support UTF8 characters in the stat names. @@ -86,7 +86,7 @@ std::string StatsGroup::StatString() const { break; case SORT_BY_NAME: std::sort(sorted_stats.begin(), sorted_stats.end(), - [](const Stat *s1, const Stat *s2) -> bool { + [](const Stat* s1, const Stat* s2) -> bool { return s1->Name() < s2->Name(); }); break; @@ -111,8 +111,8 @@ std::string StatsGroup::StatString() const { return result; } -TimeDistribution *StatsGroup::LookupOrCreateTimeDistribution(std::string name) { - TimeDistribution *&ref = time_distributions_[name]; +TimeDistribution* StatsGroup::LookupOrCreateTimeDistribution(std::string name) { + TimeDistribution*& ref = time_distributions_[name]; if (ref == nullptr) { ref = new TimeDistribution(name); Register(ref); @@ -120,7 +120,7 @@ TimeDistribution *StatsGroup::LookupOrCreateTimeDistribution(std::string name) { return ref; } -DistributionStat::DistributionStat(const std::string &name) +DistributionStat::DistributionStat(const std::string& name) : Stat(name), sum_(0.0), average_(0.0), @@ -129,7 +129,7 @@ DistributionStat::DistributionStat(const std::string &name) max_(0.0), num_(0) {} -DistributionStat::DistributionStat(const std::string &name, StatsGroup *group) +DistributionStat::DistributionStat(const std::string& name, StatsGroup* group) : Stat(name, group), sum_(0.0), average_(0.0), @@ -237,7 +237,7 @@ std::string IntegerDistribution::ValueAsString() const { #ifdef HAS_PERF_SUBSYSTEM EnabledScopedInstructionCounter::EnabledScopedInstructionCounter( - const std::string &name, TimeLimit *time_limit) + const std::string& name, TimeLimit* time_limit) : time_limit_(time_limit), name_(name) { starting_count_ = time_limit_ != nullptr ? time_limit_->ReadInstructionCounter() : 0; diff --git a/ortools/util/time_limit.cc b/ortools/util/time_limit.cc index b14c6aa951..68bb53eeac 100644 --- a/ortools/util/time_limit.cc +++ b/ortools/util/time_limit.cc @@ -19,7 +19,7 @@ ABSL_FLAG(bool, time_limit_use_usertime, false, "If true, rely on the user time in the TimeLimit class. This is " "only recommended for benchmarking on a non-isolated environment."); -ABSL_FLAG(bool, ime_limit_use_instruction_count, false, +ABSL_FLAG(bool, time_limit_use_instruction_count, false, "If true, measures the number of instructions executed"); namespace operations_research { @@ -35,8 +35,8 @@ std::string TimeLimit::DebugString() const { "\nElapsed time: ", (GetElapsedTime()), "\nElapsed deterministic time: ", (GetElapsedDeterministicTime())); #ifndef NDEBUG - for (const auto &counter : deterministic_counters_) { - const std::string &counter_name = counter.first; + for (const auto& counter : deterministic_counters_) { + const std::string& counter_name = counter.first; const double counter_value = counter.second; absl::StrAppend(&buffer, "\n", counter_name, ": ", (counter_value)); } @@ -44,7 +44,7 @@ std::string TimeLimit::DebugString() const { return buffer; } -NestedTimeLimit::NestedTimeLimit(TimeLimit *base_time_limit, +NestedTimeLimit::NestedTimeLimit(TimeLimit* base_time_limit, double limit_in_seconds, double deterministic_limit) : base_time_limit_(ABSL_DIE_IF_NULL(base_time_limit)), diff --git a/ortools/util/time_limit.h b/ortools/util/time_limit.h index e552832f2d..e683e06af5 100644 --- a/ortools/util/time_limit.h +++ b/ortools/util/time_limit.h @@ -66,8 +66,7 @@ namespace operations_research { * unless the time_limit_use_instruction_count flag is set. * * The limit is very conservative: it returns true (i.e. the limit is reached) - * when current_time + max(T, ε) >= limit_time, where ε is a small constant - * (see + * when current_time + max(T, ε) >= limit_time, where ε is a small constant (see * TimeLimit::kSafetyBufferSeconds), and T is the maximum measured time interval * between two consecutive calls to LimitReached() over the last kHistorySize * calls (so that we only consider "recent" history). @@ -125,8 +124,8 @@ class TimeLimit { double instruction_limit = std::numeric_limits::infinity()); TimeLimit() : TimeLimit(std::numeric_limits::infinity()) {} - TimeLimit(const TimeLimit &) = delete; - TimeLimit &operator=(const TimeLimit &) = delete; + TimeLimit(const TimeLimit&) = delete; + TimeLimit& operator=(const TimeLimit&) = delete; /** * Creates a time limit object that uses infinite time for wall time, @@ -158,7 +157,7 @@ class TimeLimit { // TODO(user): Support adding instruction count limit from parameters. template static std::unique_ptr FromParameters( - const Parameters ¶meters) { + const Parameters& parameters) { return absl::make_unique( parameters.max_time_in_seconds(), parameters.max_deterministic_time(), std::numeric_limits::infinity()); @@ -239,7 +238,7 @@ class TimeLimit { * \c AdvanceDeterministicTime(double). */ inline void AdvanceDeterministicTime(double deterministic_duration, - const char *counter_name) { + const char* counter_name) { AdvanceDeterministicTime(deterministic_duration); #ifndef NDEBUG deterministic_counters_[counter_name] += deterministic_duration; @@ -271,14 +270,14 @@ class TimeLimit { * Note : The external_boolean_as_limit can be modified during solve. */ void RegisterExternalBooleanAsLimit( - std::atomic *external_boolean_as_limit) { + std::atomic* external_boolean_as_limit) { external_boolean_as_limit_ = external_boolean_as_limit; } /** * Returns the current external Boolean limit. */ - std::atomic *ExternalBooleanAsLimit() const { + std::atomic* ExternalBooleanAsLimit() const { return external_boolean_as_limit_; } @@ -287,8 +286,8 @@ class TimeLimit { * any registered external Boolean. */ template - void ResetLimitFromParameters(const Parameters ¶meters); - void MergeWithGlobalTimeLimit(TimeLimit *other); + void ResetLimitFromParameters(const Parameters& parameters); + void MergeWithGlobalTimeLimit(TimeLimit* other); /** * Returns information about the time limit object in a human-readable form. @@ -310,20 +309,20 @@ class TimeLimit { const int64 safety_buffer_ns_; RunningMax running_max_; - // Only used when absl::GetFlag(FLAGS_time_limit_use_usertime) is true. + // Only used when FLAGS_time_limit_use_usertime is true. UserTimer user_timer_; double limit_in_seconds_; double deterministic_limit_; double elapsed_deterministic_time_; - std::atomic *external_boolean_as_limit_; + std::atomic* external_boolean_as_limit_; #ifdef HAS_PERF_SUBSYSTEM // PMU counter to help count the instructions. exegesis::PerfSubsystem perf_subsystem_; #endif // HAS_PERF_SUBSYSTEM - // Given limit in terms of number of instructions. + // Given limit in terms of number of instructions. double instruction_limit_; #ifndef NDEBUG @@ -338,7 +337,7 @@ class TimeLimit { // Wrapper around TimeLimit to make it thread safe and add Stop() support. class SharedTimeLimit { public: - explicit SharedTimeLimit(TimeLimit *time_limit) + explicit SharedTimeLimit(TimeLimit* time_limit) : time_limit_(time_limit), stopped_boolean_(false) { // We use the one already registered if present or ours otherwise. stopped_ = time_limit->ExternalBooleanAsLimit(); @@ -366,7 +365,7 @@ class SharedTimeLimit { *stopped_ = true; } - void UpdateLocalLimit(TimeLimit *local_limit) { + void UpdateLocalLimit(TimeLimit* local_limit) { absl::MutexLock lock(&mutex_); local_limit->MergeWithGlobalTimeLimit(time_limit_); } @@ -388,9 +387,9 @@ class SharedTimeLimit { private: mutable absl::Mutex mutex_; - TimeLimit *time_limit_ ABSL_GUARDED_BY(mutex_); + TimeLimit* time_limit_ ABSL_GUARDED_BY(mutex_); std::atomic stopped_boolean_ ABSL_GUARDED_BY(mutex_); - std::atomic *stopped_ ABSL_GUARDED_BY(mutex_); + std::atomic* stopped_ ABSL_GUARDED_BY(mutex_); }; /** @@ -429,7 +428,7 @@ class NestedTimeLimit { * Creates the nested time limit. Note that 'base_time_limit' must remain * valid for the whole lifetime of the nested time limit object. */ - NestedTimeLimit(TimeLimit *base_time_limit, double limit_in_seconds, + NestedTimeLimit(TimeLimit* base_time_limit, double limit_in_seconds, double deterministic_limit); /** @@ -446,7 +445,7 @@ class NestedTimeLimit { */ template static std::unique_ptr FromBaseTimeLimitAndParameters( - TimeLimit *time_limit, const Parameters ¶meters) { + TimeLimit* time_limit, const Parameters& parameters) { return absl::make_unique( time_limit, parameters.max_time_in_seconds(), parameters.max_deterministic_time()); @@ -458,10 +457,10 @@ class NestedTimeLimit { * is owned by the nested time limit object that returns it, and it will * remain valid until the nested time limit object is destroyed. */ - TimeLimit *GetTimeLimit() { return &time_limit_; } + TimeLimit* GetTimeLimit() { return &time_limit_; } private: - TimeLimit *const base_time_limit_; + TimeLimit* const base_time_limit_; TimeLimit time_limit_; DISALLOW_COPY_AND_ASSIGN(NestedTimeLimit); @@ -503,13 +502,13 @@ inline void TimeLimit::ResetTimers(double limit_in_seconds, } template -inline void TimeLimit::ResetLimitFromParameters(const Parameters ¶meters) { +inline void TimeLimit::ResetLimitFromParameters(const Parameters& parameters) { ResetTimers(parameters.max_time_in_seconds(), parameters.max_deterministic_time(), std::numeric_limits::infinity()); } -inline void TimeLimit::MergeWithGlobalTimeLimit(TimeLimit *other) { +inline void TimeLimit::MergeWithGlobalTimeLimit(TimeLimit* other) { if (other == nullptr) return; ResetTimers( std::min(GetTimeLeft(), other->GetTimeLeft()),