diff --git a/ortools/sat/2d_orthogonal_packing.cc b/ortools/sat/2d_orthogonal_packing.cc index 7f5f88c889..b433283297 100644 --- a/ortools/sat/2d_orthogonal_packing.cc +++ b/ortools/sat/2d_orthogonal_packing.cc @@ -561,9 +561,14 @@ OrthogonalPackingInfeasibilityDetector::CheckFeasibilityWithDualFunction2( bool OrthogonalPackingInfeasibilityDetector::RelaxConflictWithBruteForce( OrthogonalPackingResult& result, - std::pair bounding_box_size) { + std::pair bounding_box_size, + int brute_force_threshold) { const int num_items_originally = result.items_participating_on_conflict_.size(); + if (num_items_originally > 2 * brute_force_threshold) { + // Don't even try on problems too big. + return false; + } std::vector sizes_x; std::vector sizes_y; std::vector indexes; @@ -582,9 +587,9 @@ bool OrthogonalPackingInfeasibilityDetector::RelaxConflictWithBruteForce( sizes_x.push_back(result.items_participating_on_conflict_[j].size_x); sizes_y.push_back(result.items_participating_on_conflict_[j].size_y); } - const auto solution = - BruteForceOrthogonalPacking(sizes_x, sizes_y, bounding_box_size); - if (solution.empty()) { + const auto solution = BruteForceOrthogonalPacking( + sizes_x, sizes_y, bounding_box_size, brute_force_threshold); + if (solution.status == BruteForceResult::Status::kNoSolutionExists) { // We still have a conflict if we remove the i-th item! to_be_removed[i] = true; } @@ -791,28 +796,26 @@ OrthogonalPackingInfeasibilityDetector::TestFeasibilityImpl( } } - if (result.result_ == OrthogonalPackingResult::Status::UNKNOWN && - num_items <= options.brute_force_threshold) { - num_brute_force_calls_++; - auto solution = - BruteForceOrthogonalPacking(sizes_x, sizes_y, bounding_box_size); - if (solution.empty()) { + if (result.result_ == OrthogonalPackingResult::Status::UNKNOWN) { + auto solution = BruteForceOrthogonalPacking( + sizes_x, sizes_y, bounding_box_size, options.brute_force_threshold); + num_brute_force_calls_ += + (solution.status != BruteForceResult::Status::kTooBig); + if (solution.status == BruteForceResult::Status::kNoSolutionExists) { result.conflict_type_ = ConflictType::BRUTE_FORCE; result.result_ = OrthogonalPackingResult::Status::INFEASIBLE; result.items_participating_on_conflict_.resize(num_items); for (int i = 0; i < num_items; i++) { result.items_participating_on_conflict_[i] = make_item(i); } - } else { + } else if (solution.status == BruteForceResult::Status::kFoundSolution) { result.result_ = OrthogonalPackingResult::Status::FEASIBLE; } } - if (result.result_ == OrthogonalPackingResult::Status::INFEASIBLE && - result.items_participating_on_conflict_.size() <= - options.brute_force_threshold) { - num_brute_force_relaxation_ += - RelaxConflictWithBruteForce(result, bounding_box_size); + if (result.result_ == OrthogonalPackingResult::Status::INFEASIBLE) { + num_brute_force_relaxation_ += RelaxConflictWithBruteForce( + result, bounding_box_size, options.brute_force_threshold); } return result; diff --git a/ortools/sat/2d_orthogonal_packing.h b/ortools/sat/2d_orthogonal_packing.h index 40e296e0b0..d591af85b2 100644 --- a/ortools/sat/2d_orthogonal_packing.h +++ b/ortools/sat/2d_orthogonal_packing.h @@ -148,7 +148,8 @@ class OrthogonalPackingInfeasibilityDetector { private: bool RelaxConflictWithBruteForce( OrthogonalPackingResult& result, - std::pair bounding_box_size); + std::pair bounding_box_size, + int brute_force_threshold); OrthogonalPackingResult TestFeasibilityImpl( absl::Span sizes_x, diff --git a/ortools/sat/2d_packing_brute_force.cc b/ortools/sat/2d_packing_brute_force.cc index 3e92d3a6dc..d2aa9f0196 100644 --- a/ortools/sat/2d_packing_brute_force.cc +++ b/ortools/sat/2d_packing_brute_force.cc @@ -21,6 +21,7 @@ #include "absl/container/inlined_vector.h" #include "absl/log/check.h" +#include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/sat/diffn_util.h" @@ -350,15 +351,14 @@ bool BruteForceOrthogonalPackingImpl( return !has_unplaced_item; } -} // namespace - -std::vector BruteForceOrthogonalPacking( +bool BruteForceOrthogonalPackingNoPreprocessing( absl::Span sizes_x, absl::Span sizes_y, - std::pair bounding_box_size) { + const std::pair bounding_box_size, + absl::Span result) { IntegerValue smallest_x = std::numeric_limits::max(); IntegerValue smallest_y = std::numeric_limits::max(); - int num_items = sizes_x.size(); + const int num_items = sizes_x.size(); CHECK_LE(num_items, kMaxProblemSize); std::vector item_index_sorted_by_area_desc(num_items); std::array, kMaxProblemSize> @@ -366,11 +366,16 @@ std::vector BruteForceOrthogonalPacking( absl::Span> potential_item_positions(potential_item_positions_storage.data(), num_items); + for (int i = 0; i < num_items; ++i) { smallest_x = std::min(smallest_x, sizes_x[i]); smallest_y = std::min(smallest_y, sizes_y[i]); item_index_sorted_by_area_desc[i] = i; potential_item_positions[i].push_back({0, 0, false}); + if (sizes_x[i] > bounding_box_size.first || + sizes_y[i] > bounding_box_size.second) { + return false; + } } std::sort(item_index_sorted_by_area_desc.begin(), item_index_sorted_by_area_desc.end(), @@ -398,15 +403,153 @@ std::vector BruteForceOrthogonalPacking( new_sizes_x, new_sizes_y, bounding_box_size, smallest_x, smallest_y, item_positions, placed_item_indexes, potential_item_positions, slack); if (!found_solution) { - return {}; + return false; } - std::vector result(num_items); for (int i = 0; i < num_items; ++i) { result[item_index_sorted_by_area_desc[i]] = item_positions[i]; } - VLOG_EVERY_N_SEC(2, 3) << "Found a feasible packing by brute force. Dot:\n " + return true; +} + +// Try to find an equivalent smaller OPP problem by fixing large items. +// The API is a bit unusual: it takes a reference to a mutable Span of sizes and +// rectangles. When this function finds an item that can be fixed, it first adds +// it fixed position to `result` then reorders `sizes_x`, `sizes_y`, +// `result_index_map` and `result` to put that item in the end of the span and +// then resizes the span so it contain only non-fixed items. +bool Preprocess(absl::Span& sizes_x, + absl::Span& sizes_y, + std::pair& bounding_box_size, + absl::Span& result_index_map, + absl::Span& result) { + const int num_items = sizes_x.size(); + if (num_items == 1) { + return false; + } + IntegerValue smallest_x = std::numeric_limits::max(); + IntegerValue largest_x = std::numeric_limits::min(); + IntegerValue smallest_y = std::numeric_limits::max(); + IntegerValue largest_y = std::numeric_limits::min(); + int largest_x_idx = -1; + int largest_y_idx = -1; + for (int i = 0; i < num_items; ++i) { + if (sizes_x[i] > largest_x) { + largest_x = sizes_x[i]; + largest_x_idx = i; + } + if (sizes_y[i] > largest_y) { + largest_y = sizes_y[i]; + largest_y_idx = i; + } + smallest_x = std::min(smallest_x, sizes_x[i]); + smallest_y = std::min(smallest_y, sizes_y[i]); + } + + if (largest_x > bounding_box_size.first || + largest_y > bounding_box_size.second) { + // No point in optimizing obviously infeasible instance. + return false; + } + const auto remove_item = [&sizes_x, &sizes_y, + &result_index_map](int index_to_remove) { + std::swap(sizes_x[index_to_remove], sizes_x.back()); + sizes_x.remove_suffix(1); + std::swap(sizes_y[index_to_remove], sizes_y.back()); + sizes_y.remove_suffix(1); + std::swap(result_index_map[index_to_remove], result_index_map.back()); + result_index_map.remove_suffix(1); + }; + if (largest_x + smallest_x > bounding_box_size.first) { + // No item (not even the narrowest one) fit alongside the widest item. So we + // care only about fitting the remaining items in the remaining space. + bounding_box_size.second -= sizes_y[largest_x_idx]; + result.back() = { + .x_min = 0, + .x_max = largest_x, + .y_min = bounding_box_size.second, + .y_max = bounding_box_size.second + sizes_y[largest_x_idx]}; + result.remove_suffix(1); + remove_item(largest_x_idx); + Preprocess(sizes_x, sizes_y, bounding_box_size, result_index_map, result); + return true; + } + if (largest_y + smallest_y > bounding_box_size.second) { + bounding_box_size.first -= sizes_x[largest_y_idx]; + result.back() = {.x_min = bounding_box_size.first, + .x_max = bounding_box_size.first + sizes_x[largest_y_idx], + .y_min = 0, + .y_max = largest_y}; + result.remove_suffix(1); + remove_item(largest_y_idx); + Preprocess(sizes_x, sizes_y, bounding_box_size, result_index_map, result); + return true; + } + return false; +} + +} // namespace + +BruteForceResult BruteForceOrthogonalPacking( + absl::Span sizes_x, + absl::Span sizes_y, + std::pair bounding_box_size, + int max_complexity) { + const int num_items = sizes_x.size(); + + if (num_items > 2 * max_complexity) { + // It is unlikely that preprocessing will remove half of the items, so don't + // lose time trying. + return {.status = BruteForceResult::Status::kTooBig}; + } + CHECK_LE(num_items, kMaxProblemSize); + std::vector preprocessing_order(num_items); + std::array new_sizes_x_storage, + new_sizes_y_storage; + // We need a mutable array of sizes for preprocessing. + absl::Span new_sizes_x(new_sizes_x_storage.data(), num_items); + absl::Span new_sizes_y(new_sizes_y_storage.data(), num_items); + for (int i = 0; i < num_items; ++i) { + preprocessing_order[i] = i; + new_sizes_x[i] = sizes_x[i]; + new_sizes_y[i] = sizes_y[i]; + } + std::array unordered_result_storage; + absl::Span unordered_result(unordered_result_storage.data(), + num_items); + absl::Span post_processed_sizes_x = new_sizes_x; + absl::Span post_processed_sizes_y = new_sizes_y; + absl::Span result_after_preprocessing = unordered_result; + std::pair post_processed_bounding_box_size = + bounding_box_size; + absl::Span permutation = absl::MakeSpan(preprocessing_order); + const bool post_processed = + Preprocess(post_processed_sizes_x, post_processed_sizes_y, + post_processed_bounding_box_size, permutation, + result_after_preprocessing); + DCHECK_EQ(result_after_preprocessing.size(), post_processed_sizes_x.size()); + if (result_after_preprocessing.size() > max_complexity) { + return {.status = BruteForceResult::Status::kTooBig}; + } + const bool is_feasible = BruteForceOrthogonalPackingNoPreprocessing( + post_processed_sizes_x, post_processed_sizes_y, + post_processed_bounding_box_size, result_after_preprocessing); + VLOG_EVERY_N_SEC(2, 3) + << "Solved by brute force a problem of " << num_items << " items" + << (post_processed ? absl::StrCat(" (", post_processed_sizes_x.size(), + " after preprocessing)") + : "") + << ": solution " << (is_feasible ? "found" : "not found") << "."; + if (!is_feasible) { + return {.status = BruteForceResult::Status::kNoSolutionExists}; + } + std::vector result(num_items); + for (int i = 0; i < num_items; ++i) { + result[preprocessing_order[i]] = unordered_result[i]; + } + VLOG_EVERY_N_SEC(3, 3) << "Found a feasible packing by brute force. Dot:\n " << RenderDot(bounding_box_size, result); - return result; + return {.status = BruteForceResult::Status::kFoundSolution, + .positions_for_solution = result}; } } // namespace sat diff --git a/ortools/sat/2d_packing_brute_force.h b/ortools/sat/2d_packing_brute_force.h index a5ef0ad5f0..b3c9d68148 100644 --- a/ortools/sat/2d_packing_brute_force.h +++ b/ortools/sat/2d_packing_brute_force.h @@ -25,14 +25,28 @@ namespace operations_research { namespace sat { // Try to solve the Orthogonal Packing Problem by enumeration of all possible -// solutions. Returns an empty vector if the problem is infeasible, otherwise -// returns the items in the positions they appear in the solution in the same -// order as the input arguments. -// Warning: do not call this with too many item as it will run forever. -std::vector BruteForceOrthogonalPacking( +// solutions. It will try to preprocess the problem into a smaller one and will +// only try to solve it if it the reduced problem has `max_complexity` or less +// items. +// Warning: do not call this with a too many items and a large value of +// `max_complexity` or it will run forever. +struct BruteForceResult { + enum class Status { + kFoundSolution, + kNoSolutionExists, + kTooBig, + }; + + Status status; + // Only non-empty if status==kFoundSolution. + std::vector positions_for_solution; +}; + +BruteForceResult BruteForceOrthogonalPacking( absl::Span sizes_x, absl::Span sizes_y, - std::pair bounding_box_size); + std::pair bounding_box_size, + int max_complexity); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 4b11f8bd0a..ee5c8adf07 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -1856,6 +1856,7 @@ cc_library( "@com_google_absl//absl/container:inlined_vector", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", + "@com_google_absl//absl/strings", "@com_google_absl//absl/types:span", ], ) diff --git a/ortools/sat/docs/scheduling.md b/ortools/sat/docs/scheduling.md index d8a820cbf3..d4b1ca7c0a 100644 --- a/ortools/sat/docs/scheduling.md +++ b/ortools/sat/docs/scheduling.md @@ -859,15 +859,15 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: start_hour max_load 0 0 2 0 - 4 1 - 6 3 - 8 6 + 4 3 + 6 6 + 8 8 10 12 12 8 14 12 16 10 - 18 4 - 20 2 + 18 6 + 20 4 22 0 """ @@ -927,6 +927,43 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: return max_load_df, min_load_df, tasks_df +def check_solution( + tasks: list[tuple[int, int, int]], + min_load_df: pd.DataFrame, + max_load_df: pd.DataFrame, +) -> bool: + """Checks the solution validity against the min and max load constraints.""" + period_length: int = 120 + horizon: int = 24 * 60 + actual_load_profile = [0 for _ in range(horizon)] + min_load_profile = [0 for _ in range(horizon)] + max_load_profile = [0 for _ in range(horizon)] + for task in tasks: + for t in range(task[1]): + actual_load_profile[task[0] + t] += task[2] + for row in max_load_df.itertuples(): + for t in range(period_length): + max_load_profile[row.start_hour * 60 + t] = row.max_load + for row in min_load_df.itertuples(): + for t in range(period_length): + min_load_profile[row.start_hour * 60 + t] = row.min_load + + for time in range(horizon): + if actual_load_profile[time] > max_load_profile[time]: + print( + f"actual load {actual_load_profile[time]} at time {time} is greater" + f" than max load {max_load_profile[time]}" + ) + return False + if actual_load_profile[time] < min_load_profile[time]: + print( + f"actual load {actual_load_profile[time]} at time {time} is" + f" less than min load {min_load_profile[time]}" + ) + return False + return True + + def main(_) -> None: """Create the model and solves it.""" max_load_df, min_load_df, tasks_df = create_data_model() @@ -944,7 +981,10 @@ def main(_) -> None: # Variables starts = model.new_int_var_series( - name="starts", lower_bounds=0, upper_bounds=horizon, index=tasks_df.index + name="starts", + lower_bounds=0, + upper_bounds=horizon - tasks_df.duration, + index=tasks_df.index, ) performed = model.new_bool_var_series(name="performed", index=tasks_df.index) @@ -956,7 +996,7 @@ def main(_) -> None: are_present=performed, ) - # Set up complement intervals (from 0 to start, and from start + size to + # Set up complemented intervals (from 0 to start, and from start + size to # horizon). prefix_intervals = model.new_optional_interval_var_series( name="prefix_intervals", @@ -993,9 +1033,12 @@ def main(_) -> None: max_load, ) - # Set up the min profile. We use complement intervals to maintain the + # Set up the min profile. We use complemented intervals to maintain the # complement of the work load, and fixed intervals to enforce the min # number of active workers per time period. + # + # Note that this works only if the max load cumulative is also added to the + # model. time_period_min_intervals = model.new_fixed_size_interval_var_series( name="time_period_min_intervals", index=min_load_df.index, @@ -1004,6 +1047,8 @@ def main(_) -> None: ) time_period_min_heights = min_load_df.min_load + # We take into account optional intervals. The actual capacity of the min load + # cumulative is the sum of all the active demands. sum_of_demands = sum(tasks_df.load) complement_capacity = model.new_int_var(0, sum_of_demands, "complement_capacity") model.add(complement_capacity == performed.dot(tasks_df.load)) @@ -1026,7 +1071,7 @@ def main(_) -> None: # Create the solver and solve the model. solver = cp_model.CpSolver() - solver.parameters.log_search_progress = True + solver.parameters.log_search_progress = False solver.parameters.num_workers = 16 solver.parameters.max_time_in_seconds = 30.0 status = solver.solve(model) @@ -1034,11 +1079,19 @@ def main(_) -> None: if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE: start_values = solver.values(starts) performed_values = solver.boolean_values(performed) + tasks: list[tuple[int, int, int]] = [] for task in tasks_df.index: if performed_values[task]: - print(f"task {task} starts at {start_values[task]}") + print( + f'task {task} duration={tasks_df["duration"][task]} ' + f'load={tasks_df["load"][task]} starts at {start_values[task]}' + ) + tasks.append( + (start_values[task], tasks_df.duration[task], tasks_df.load[task]) + ) else: print(f"task {task} is not performed") + assert check_solution(tasks, min_load_df, max_load_df) elif status == cp_model.INFEASIBLE: print("No solution found") else: diff --git a/ortools/sat/samples/cumulative_variable_profile_sample_sat.py b/ortools/sat/samples/cumulative_variable_profile_sample_sat.py index 8e6df0a3b5..f6c88822c0 100644 --- a/ortools/sat/samples/cumulative_variable_profile_sample_sat.py +++ b/ortools/sat/samples/cumulative_variable_profile_sample_sat.py @@ -34,15 +34,15 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: start_hour max_load 0 0 2 0 - 4 1 - 6 3 - 8 6 + 4 3 + 6 6 + 8 8 10 12 12 8 14 12 16 10 - 18 4 - 20 2 + 18 6 + 20 4 22 0 """ @@ -103,6 +103,43 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: # [END data_model] +def check_solution( + tasks: list[tuple[int, int, int]], + min_load_df: pd.DataFrame, + max_load_df: pd.DataFrame, +) -> bool: + """Checks the solution validity against the min and max load constraints.""" + period_length: int = 120 + horizon: int = 24 * 60 + actual_load_profile = [0 for _ in range(horizon)] + min_load_profile = [0 for _ in range(horizon)] + max_load_profile = [0 for _ in range(horizon)] + for task in tasks: + for t in range(task[1]): + actual_load_profile[task[0] + t] += task[2] + for row in max_load_df.itertuples(): + for t in range(period_length): + max_load_profile[row.start_hour * 60 + t] = row.max_load + for row in min_load_df.itertuples(): + for t in range(period_length): + min_load_profile[row.start_hour * 60 + t] = row.min_load + + for time in range(horizon): + if actual_load_profile[time] > max_load_profile[time]: + print( + f"actual load {actual_load_profile[time]} at time {time} is greater" + f" than max load {max_load_profile[time]}" + ) + return False + if actual_load_profile[time] < min_load_profile[time]: + print( + f"actual load {actual_load_profile[time]} at time {time} is" + f" less than min load {min_load_profile[time]}" + ) + return False + return True + + def main(_) -> None: """Create the model and solves it.""" # [START data] @@ -127,7 +164,10 @@ def main(_) -> None: # [START variables] # Variables starts = model.new_int_var_series( - name="starts", lower_bounds=0, upper_bounds=horizon, index=tasks_df.index + name="starts", + lower_bounds=0, + upper_bounds=horizon - tasks_df.duration, + index=tasks_df.index, ) performed = model.new_bool_var_series(name="performed", index=tasks_df.index) @@ -139,7 +179,7 @@ def main(_) -> None: are_present=performed, ) - # Set up complement intervals (from 0 to start, and from start + size to + # Set up complemented intervals (from 0 to start, and from start + size to # horizon). prefix_intervals = model.new_optional_interval_var_series( name="prefix_intervals", @@ -178,9 +218,12 @@ def main(_) -> None: max_load, ) - # Set up the min profile. We use complement intervals to maintain the + # Set up the min profile. We use complemented intervals to maintain the # complement of the work load, and fixed intervals to enforce the min # number of active workers per time period. + # + # Note that this works only if the max load cumulative is also added to the + # model. time_period_min_intervals = model.new_fixed_size_interval_var_series( name="time_period_min_intervals", index=min_load_df.index, @@ -189,6 +232,8 @@ def main(_) -> None: ) time_period_min_heights = min_load_df.min_load + # We take into account optional intervals. The actual capacity of the min load + # cumulative is the sum of all the active demands. sum_of_demands = sum(tasks_df.load) complement_capacity = model.new_int_var(0, sum_of_demands, "complement_capacity") model.add(complement_capacity == performed.dot(tasks_df.load)) @@ -215,7 +260,7 @@ def main(_) -> None: # [START solve] # Create the solver and solve the model. solver = cp_model.CpSolver() - solver.parameters.log_search_progress = True + solver.parameters.log_search_progress = False solver.parameters.num_workers = 16 solver.parameters.max_time_in_seconds = 30.0 status = solver.solve(model) @@ -225,11 +270,19 @@ def main(_) -> None: if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE: start_values = solver.values(starts) performed_values = solver.boolean_values(performed) + tasks: list[tuple[int, int, int]] = [] for task in tasks_df.index: if performed_values[task]: - print(f"task {task} starts at {start_values[task]}") + print( + f'task {task} duration={tasks_df["duration"][task]} ' + f'load={tasks_df["load"][task]} starts at {start_values[task]}' + ) + tasks.append( + (start_values[task], tasks_df.duration[task], tasks_df.load[task]) + ) else: print(f"task {task} is not performed") + assert check_solution(tasks, min_load_df, max_load_df) elif status == cp_model.INFEASIBLE: print("No solution found") else: diff --git a/ortools/util/fp_utils.cc b/ortools/util/fp_utils.cc index e44d20cd56..8ca04d1432 100644 --- a/ortools/util/fp_utils.cc +++ b/ortools/util/fp_utils.cc @@ -174,18 +174,18 @@ void GetBestScalingOfDoublesToInt64(absl::Span input, } // namespace -void ComputeScalingErrors(const std::vector& input, - const std::vector& lb, - const std::vector& ub, double scaling_factor, +void ComputeScalingErrors(absl::Span input, + absl::Span lb, + absl::Span ub, double scaling_factor, double* max_relative_coeff_error, double* max_scaled_sum_error) { ComputeScalingErrors(input, lb, ub, scaling_factor, max_relative_coeff_error, max_scaled_sum_error); } -double GetBestScalingOfDoublesToInt64(const std::vector& input, - const std::vector& lb, - const std::vector& ub, +double GetBestScalingOfDoublesToInt64(absl::Span input, + absl::Span lb, + absl::Span ub, int64_t max_absolute_sum) { double scaling_factor; GetBestScalingOfDoublesToInt64(input, lb, ub, max_absolute_sum, diff --git a/ortools/util/fp_utils.h b/ortools/util/fp_utils.h index 00eceef8ab..2a943040d9 100644 --- a/ortools/util/fp_utils.h +++ b/ortools/util/fp_utils.h @@ -215,9 +215,9 @@ void GetBestScalingOfDoublesToInt64(const std::vector& input, // - The sum over i of min(0, round(factor * x[i])) >= -max_sum. // - The sum over i of max(0, round(factor * x[i])) <= max_sum. // For any possible values of the x[i] such that x[i] is in [lb[i], ub[i]]. -double GetBestScalingOfDoublesToInt64(const std::vector& input, - const std::vector& lb, - const std::vector& ub, +double GetBestScalingOfDoublesToInt64(absl::Span input, + absl::Span lb, + absl::Span ub, int64_t max_absolute_sum); // This computes: // @@ -227,9 +227,9 @@ double GetBestScalingOfDoublesToInt64(const std::vector& input, // The max_scaled_sum_error which is a bound on the maximum difference between // the exact scaled sum and the rounded one. One needs to divide this by // scaling_factor to have the maximum absolute error on the original sum. -void ComputeScalingErrors(const std::vector& input, - const std::vector& lb, - const std::vector& ub, double scaling_factor, +void ComputeScalingErrors(absl::Span input, + absl::Span lb, + absl::Span ub, double scaling_factor, double* max_relative_coeff_error, double* max_scaled_sum_error);