[CP-SAT] improve 2d packing; fix cumulative with variable profile sample

This commit is contained in:
Laurent Perron
2024-04-15 15:22:37 +02:00
committed by Corentin Le Molgat
parent fa2473affe
commit 16907575a6
9 changed files with 332 additions and 64 deletions

View File

@@ -561,9 +561,14 @@ OrthogonalPackingInfeasibilityDetector::CheckFeasibilityWithDualFunction2(
bool OrthogonalPackingInfeasibilityDetector::RelaxConflictWithBruteForce(
OrthogonalPackingResult& result,
std::pair<IntegerValue, IntegerValue> bounding_box_size) {
std::pair<IntegerValue, IntegerValue> bounding_box_size,
int brute_force_threshold) {
const int num_items_originally =
result.items_participating_on_conflict_.size();
if (num_items_originally > 2 * brute_force_threshold) {
// Don't even try on problems too big.
return false;
}
std::vector<IntegerValue> sizes_x;
std::vector<IntegerValue> sizes_y;
std::vector<int> indexes;
@@ -582,9 +587,9 @@ bool OrthogonalPackingInfeasibilityDetector::RelaxConflictWithBruteForce(
sizes_x.push_back(result.items_participating_on_conflict_[j].size_x);
sizes_y.push_back(result.items_participating_on_conflict_[j].size_y);
}
const auto solution =
BruteForceOrthogonalPacking(sizes_x, sizes_y, bounding_box_size);
if (solution.empty()) {
const auto solution = BruteForceOrthogonalPacking(
sizes_x, sizes_y, bounding_box_size, brute_force_threshold);
if (solution.status == BruteForceResult::Status::kNoSolutionExists) {
// We still have a conflict if we remove the i-th item!
to_be_removed[i] = true;
}
@@ -791,28 +796,26 @@ OrthogonalPackingInfeasibilityDetector::TestFeasibilityImpl(
}
}
if (result.result_ == OrthogonalPackingResult::Status::UNKNOWN &&
num_items <= options.brute_force_threshold) {
num_brute_force_calls_++;
auto solution =
BruteForceOrthogonalPacking(sizes_x, sizes_y, bounding_box_size);
if (solution.empty()) {
if (result.result_ == OrthogonalPackingResult::Status::UNKNOWN) {
auto solution = BruteForceOrthogonalPacking(
sizes_x, sizes_y, bounding_box_size, options.brute_force_threshold);
num_brute_force_calls_ +=
(solution.status != BruteForceResult::Status::kTooBig);
if (solution.status == BruteForceResult::Status::kNoSolutionExists) {
result.conflict_type_ = ConflictType::BRUTE_FORCE;
result.result_ = OrthogonalPackingResult::Status::INFEASIBLE;
result.items_participating_on_conflict_.resize(num_items);
for (int i = 0; i < num_items; i++) {
result.items_participating_on_conflict_[i] = make_item(i);
}
} else {
} else if (solution.status == BruteForceResult::Status::kFoundSolution) {
result.result_ = OrthogonalPackingResult::Status::FEASIBLE;
}
}
if (result.result_ == OrthogonalPackingResult::Status::INFEASIBLE &&
result.items_participating_on_conflict_.size() <=
options.brute_force_threshold) {
num_brute_force_relaxation_ +=
RelaxConflictWithBruteForce(result, bounding_box_size);
if (result.result_ == OrthogonalPackingResult::Status::INFEASIBLE) {
num_brute_force_relaxation_ += RelaxConflictWithBruteForce(
result, bounding_box_size, options.brute_force_threshold);
}
return result;

View File

@@ -148,7 +148,8 @@ class OrthogonalPackingInfeasibilityDetector {
private:
bool RelaxConflictWithBruteForce(
OrthogonalPackingResult& result,
std::pair<IntegerValue, IntegerValue> bounding_box_size);
std::pair<IntegerValue, IntegerValue> bounding_box_size,
int brute_force_threshold);
OrthogonalPackingResult TestFeasibilityImpl(
absl::Span<const IntegerValue> sizes_x,

View File

@@ -21,6 +21,7 @@
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "ortools/base/logging.h"
#include "ortools/sat/diffn_util.h"
@@ -350,15 +351,14 @@ bool BruteForceOrthogonalPackingImpl(
return !has_unplaced_item;
}
} // namespace
std::vector<Rectangle> BruteForceOrthogonalPacking(
bool BruteForceOrthogonalPackingNoPreprocessing(
absl::Span<const IntegerValue> sizes_x,
absl::Span<const IntegerValue> sizes_y,
std::pair<IntegerValue, IntegerValue> bounding_box_size) {
const std::pair<IntegerValue, IntegerValue> bounding_box_size,
absl::Span<Rectangle> result) {
IntegerValue smallest_x = std::numeric_limits<IntegerValue>::max();
IntegerValue smallest_y = std::numeric_limits<IntegerValue>::max();
int num_items = sizes_x.size();
const int num_items = sizes_x.size();
CHECK_LE(num_items, kMaxProblemSize);
std::vector<int> item_index_sorted_by_area_desc(num_items);
std::array<absl::InlinedVector<PotentialPositionForItem, 16>, kMaxProblemSize>
@@ -366,11 +366,16 @@ std::vector<Rectangle> BruteForceOrthogonalPacking(
absl::Span<absl::InlinedVector<PotentialPositionForItem, 16>>
potential_item_positions(potential_item_positions_storage.data(),
num_items);
for (int i = 0; i < num_items; ++i) {
smallest_x = std::min(smallest_x, sizes_x[i]);
smallest_y = std::min(smallest_y, sizes_y[i]);
item_index_sorted_by_area_desc[i] = i;
potential_item_positions[i].push_back({0, 0, false});
if (sizes_x[i] > bounding_box_size.first ||
sizes_y[i] > bounding_box_size.second) {
return false;
}
}
std::sort(item_index_sorted_by_area_desc.begin(),
item_index_sorted_by_area_desc.end(),
@@ -398,15 +403,153 @@ std::vector<Rectangle> BruteForceOrthogonalPacking(
new_sizes_x, new_sizes_y, bounding_box_size, smallest_x, smallest_y,
item_positions, placed_item_indexes, potential_item_positions, slack);
if (!found_solution) {
return {};
return false;
}
std::vector<Rectangle> result(num_items);
for (int i = 0; i < num_items; ++i) {
result[item_index_sorted_by_area_desc[i]] = item_positions[i];
}
VLOG_EVERY_N_SEC(2, 3) << "Found a feasible packing by brute force. Dot:\n "
return true;
}
// Try to find an equivalent smaller OPP problem by fixing large items.
// The API is a bit unusual: it takes a reference to a mutable Span of sizes and
// rectangles. When this function finds an item that can be fixed, it first adds
// it fixed position to `result` then reorders `sizes_x`, `sizes_y`,
// `result_index_map` and `result` to put that item in the end of the span and
// then resizes the span so it contain only non-fixed items.
bool Preprocess(absl::Span<IntegerValue>& sizes_x,
absl::Span<IntegerValue>& sizes_y,
std::pair<IntegerValue, IntegerValue>& bounding_box_size,
absl::Span<int>& result_index_map,
absl::Span<Rectangle>& result) {
const int num_items = sizes_x.size();
if (num_items == 1) {
return false;
}
IntegerValue smallest_x = std::numeric_limits<IntegerValue>::max();
IntegerValue largest_x = std::numeric_limits<IntegerValue>::min();
IntegerValue smallest_y = std::numeric_limits<IntegerValue>::max();
IntegerValue largest_y = std::numeric_limits<IntegerValue>::min();
int largest_x_idx = -1;
int largest_y_idx = -1;
for (int i = 0; i < num_items; ++i) {
if (sizes_x[i] > largest_x) {
largest_x = sizes_x[i];
largest_x_idx = i;
}
if (sizes_y[i] > largest_y) {
largest_y = sizes_y[i];
largest_y_idx = i;
}
smallest_x = std::min(smallest_x, sizes_x[i]);
smallest_y = std::min(smallest_y, sizes_y[i]);
}
if (largest_x > bounding_box_size.first ||
largest_y > bounding_box_size.second) {
// No point in optimizing obviously infeasible instance.
return false;
}
const auto remove_item = [&sizes_x, &sizes_y,
&result_index_map](int index_to_remove) {
std::swap(sizes_x[index_to_remove], sizes_x.back());
sizes_x.remove_suffix(1);
std::swap(sizes_y[index_to_remove], sizes_y.back());
sizes_y.remove_suffix(1);
std::swap(result_index_map[index_to_remove], result_index_map.back());
result_index_map.remove_suffix(1);
};
if (largest_x + smallest_x > bounding_box_size.first) {
// No item (not even the narrowest one) fit alongside the widest item. So we
// care only about fitting the remaining items in the remaining space.
bounding_box_size.second -= sizes_y[largest_x_idx];
result.back() = {
.x_min = 0,
.x_max = largest_x,
.y_min = bounding_box_size.second,
.y_max = bounding_box_size.second + sizes_y[largest_x_idx]};
result.remove_suffix(1);
remove_item(largest_x_idx);
Preprocess(sizes_x, sizes_y, bounding_box_size, result_index_map, result);
return true;
}
if (largest_y + smallest_y > bounding_box_size.second) {
bounding_box_size.first -= sizes_x[largest_y_idx];
result.back() = {.x_min = bounding_box_size.first,
.x_max = bounding_box_size.first + sizes_x[largest_y_idx],
.y_min = 0,
.y_max = largest_y};
result.remove_suffix(1);
remove_item(largest_y_idx);
Preprocess(sizes_x, sizes_y, bounding_box_size, result_index_map, result);
return true;
}
return false;
}
} // namespace
BruteForceResult BruteForceOrthogonalPacking(
absl::Span<const IntegerValue> sizes_x,
absl::Span<const IntegerValue> sizes_y,
std::pair<IntegerValue, IntegerValue> bounding_box_size,
int max_complexity) {
const int num_items = sizes_x.size();
if (num_items > 2 * max_complexity) {
// It is unlikely that preprocessing will remove half of the items, so don't
// lose time trying.
return {.status = BruteForceResult::Status::kTooBig};
}
CHECK_LE(num_items, kMaxProblemSize);
std::vector<int> preprocessing_order(num_items);
std::array<IntegerValue, kMaxProblemSize> new_sizes_x_storage,
new_sizes_y_storage;
// We need a mutable array of sizes for preprocessing.
absl::Span<IntegerValue> new_sizes_x(new_sizes_x_storage.data(), num_items);
absl::Span<IntegerValue> new_sizes_y(new_sizes_y_storage.data(), num_items);
for (int i = 0; i < num_items; ++i) {
preprocessing_order[i] = i;
new_sizes_x[i] = sizes_x[i];
new_sizes_y[i] = sizes_y[i];
}
std::array<Rectangle, kMaxProblemSize> unordered_result_storage;
absl::Span<Rectangle> unordered_result(unordered_result_storage.data(),
num_items);
absl::Span<IntegerValue> post_processed_sizes_x = new_sizes_x;
absl::Span<IntegerValue> post_processed_sizes_y = new_sizes_y;
absl::Span<Rectangle> result_after_preprocessing = unordered_result;
std::pair<IntegerValue, IntegerValue> post_processed_bounding_box_size =
bounding_box_size;
absl::Span<int> permutation = absl::MakeSpan(preprocessing_order);
const bool post_processed =
Preprocess(post_processed_sizes_x, post_processed_sizes_y,
post_processed_bounding_box_size, permutation,
result_after_preprocessing);
DCHECK_EQ(result_after_preprocessing.size(), post_processed_sizes_x.size());
if (result_after_preprocessing.size() > max_complexity) {
return {.status = BruteForceResult::Status::kTooBig};
}
const bool is_feasible = BruteForceOrthogonalPackingNoPreprocessing(
post_processed_sizes_x, post_processed_sizes_y,
post_processed_bounding_box_size, result_after_preprocessing);
VLOG_EVERY_N_SEC(2, 3)
<< "Solved by brute force a problem of " << num_items << " items"
<< (post_processed ? absl::StrCat(" (", post_processed_sizes_x.size(),
" after preprocessing)")
: "")
<< ": solution " << (is_feasible ? "found" : "not found") << ".";
if (!is_feasible) {
return {.status = BruteForceResult::Status::kNoSolutionExists};
}
std::vector<Rectangle> result(num_items);
for (int i = 0; i < num_items; ++i) {
result[preprocessing_order[i]] = unordered_result[i];
}
VLOG_EVERY_N_SEC(3, 3) << "Found a feasible packing by brute force. Dot:\n "
<< RenderDot(bounding_box_size, result);
return result;
return {.status = BruteForceResult::Status::kFoundSolution,
.positions_for_solution = result};
}
} // namespace sat

View File

@@ -25,14 +25,28 @@ namespace operations_research {
namespace sat {
// Try to solve the Orthogonal Packing Problem by enumeration of all possible
// solutions. Returns an empty vector if the problem is infeasible, otherwise
// returns the items in the positions they appear in the solution in the same
// order as the input arguments.
// Warning: do not call this with too many item as it will run forever.
std::vector<Rectangle> BruteForceOrthogonalPacking(
// solutions. It will try to preprocess the problem into a smaller one and will
// only try to solve it if it the reduced problem has `max_complexity` or less
// items.
// Warning: do not call this with a too many items and a large value of
// `max_complexity` or it will run forever.
struct BruteForceResult {
enum class Status {
kFoundSolution,
kNoSolutionExists,
kTooBig,
};
Status status;
// Only non-empty if status==kFoundSolution.
std::vector<Rectangle> positions_for_solution;
};
BruteForceResult BruteForceOrthogonalPacking(
absl::Span<const IntegerValue> sizes_x,
absl::Span<const IntegerValue> sizes_y,
std::pair<IntegerValue, IntegerValue> bounding_box_size);
std::pair<IntegerValue, IntegerValue> bounding_box_size,
int max_complexity);
} // namespace sat
} // namespace operations_research

View File

@@ -1856,6 +1856,7 @@ cc_library(
"@com_google_absl//absl/container:inlined_vector",
"@com_google_absl//absl/log",
"@com_google_absl//absl/log:check",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/types:span",
],
)

View File

@@ -859,15 +859,15 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
start_hour max_load
0 0
2 0
4 1
6 3
8 6
4 3
6 6
8 8
10 12
12 8
14 12
16 10
18 4
20 2
18 6
20 4
22 0
"""
@@ -927,6 +927,43 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
return max_load_df, min_load_df, tasks_df
def check_solution(
tasks: list[tuple[int, int, int]],
min_load_df: pd.DataFrame,
max_load_df: pd.DataFrame,
) -> bool:
"""Checks the solution validity against the min and max load constraints."""
period_length: int = 120
horizon: int = 24 * 60
actual_load_profile = [0 for _ in range(horizon)]
min_load_profile = [0 for _ in range(horizon)]
max_load_profile = [0 for _ in range(horizon)]
for task in tasks:
for t in range(task[1]):
actual_load_profile[task[0] + t] += task[2]
for row in max_load_df.itertuples():
for t in range(period_length):
max_load_profile[row.start_hour * 60 + t] = row.max_load
for row in min_load_df.itertuples():
for t in range(period_length):
min_load_profile[row.start_hour * 60 + t] = row.min_load
for time in range(horizon):
if actual_load_profile[time] > max_load_profile[time]:
print(
f"actual load {actual_load_profile[time]} at time {time} is greater"
f" than max load {max_load_profile[time]}"
)
return False
if actual_load_profile[time] < min_load_profile[time]:
print(
f"actual load {actual_load_profile[time]} at time {time} is"
f" less than min load {min_load_profile[time]}"
)
return False
return True
def main(_) -> None:
"""Create the model and solves it."""
max_load_df, min_load_df, tasks_df = create_data_model()
@@ -944,7 +981,10 @@ def main(_) -> None:
# Variables
starts = model.new_int_var_series(
name="starts", lower_bounds=0, upper_bounds=horizon, index=tasks_df.index
name="starts",
lower_bounds=0,
upper_bounds=horizon - tasks_df.duration,
index=tasks_df.index,
)
performed = model.new_bool_var_series(name="performed", index=tasks_df.index)
@@ -956,7 +996,7 @@ def main(_) -> None:
are_present=performed,
)
# Set up complement intervals (from 0 to start, and from start + size to
# Set up complemented intervals (from 0 to start, and from start + size to
# horizon).
prefix_intervals = model.new_optional_interval_var_series(
name="prefix_intervals",
@@ -993,9 +1033,12 @@ def main(_) -> None:
max_load,
)
# Set up the min profile. We use complement intervals to maintain the
# Set up the min profile. We use complemented intervals to maintain the
# complement of the work load, and fixed intervals to enforce the min
# number of active workers per time period.
#
# Note that this works only if the max load cumulative is also added to the
# model.
time_period_min_intervals = model.new_fixed_size_interval_var_series(
name="time_period_min_intervals",
index=min_load_df.index,
@@ -1004,6 +1047,8 @@ def main(_) -> None:
)
time_period_min_heights = min_load_df.min_load
# We take into account optional intervals. The actual capacity of the min load
# cumulative is the sum of all the active demands.
sum_of_demands = sum(tasks_df.load)
complement_capacity = model.new_int_var(0, sum_of_demands, "complement_capacity")
model.add(complement_capacity == performed.dot(tasks_df.load))
@@ -1026,7 +1071,7 @@ def main(_) -> None:
# Create the solver and solve the model.
solver = cp_model.CpSolver()
solver.parameters.log_search_progress = True
solver.parameters.log_search_progress = False
solver.parameters.num_workers = 16
solver.parameters.max_time_in_seconds = 30.0
status = solver.solve(model)
@@ -1034,11 +1079,19 @@ def main(_) -> None:
if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
start_values = solver.values(starts)
performed_values = solver.boolean_values(performed)
tasks: list[tuple[int, int, int]] = []
for task in tasks_df.index:
if performed_values[task]:
print(f"task {task} starts at {start_values[task]}")
print(
f'task {task} duration={tasks_df["duration"][task]} '
f'load={tasks_df["load"][task]} starts at {start_values[task]}'
)
tasks.append(
(start_values[task], tasks_df.duration[task], tasks_df.load[task])
)
else:
print(f"task {task} is not performed")
assert check_solution(tasks, min_load_df, max_load_df)
elif status == cp_model.INFEASIBLE:
print("No solution found")
else:

View File

@@ -34,15 +34,15 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
start_hour max_load
0 0
2 0
4 1
6 3
8 6
4 3
6 6
8 8
10 12
12 8
14 12
16 10
18 4
20 2
18 6
20 4
22 0
"""
@@ -103,6 +103,43 @@ def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
# [END data_model]
def check_solution(
tasks: list[tuple[int, int, int]],
min_load_df: pd.DataFrame,
max_load_df: pd.DataFrame,
) -> bool:
"""Checks the solution validity against the min and max load constraints."""
period_length: int = 120
horizon: int = 24 * 60
actual_load_profile = [0 for _ in range(horizon)]
min_load_profile = [0 for _ in range(horizon)]
max_load_profile = [0 for _ in range(horizon)]
for task in tasks:
for t in range(task[1]):
actual_load_profile[task[0] + t] += task[2]
for row in max_load_df.itertuples():
for t in range(period_length):
max_load_profile[row.start_hour * 60 + t] = row.max_load
for row in min_load_df.itertuples():
for t in range(period_length):
min_load_profile[row.start_hour * 60 + t] = row.min_load
for time in range(horizon):
if actual_load_profile[time] > max_load_profile[time]:
print(
f"actual load {actual_load_profile[time]} at time {time} is greater"
f" than max load {max_load_profile[time]}"
)
return False
if actual_load_profile[time] < min_load_profile[time]:
print(
f"actual load {actual_load_profile[time]} at time {time} is"
f" less than min load {min_load_profile[time]}"
)
return False
return True
def main(_) -> None:
"""Create the model and solves it."""
# [START data]
@@ -127,7 +164,10 @@ def main(_) -> None:
# [START variables]
# Variables
starts = model.new_int_var_series(
name="starts", lower_bounds=0, upper_bounds=horizon, index=tasks_df.index
name="starts",
lower_bounds=0,
upper_bounds=horizon - tasks_df.duration,
index=tasks_df.index,
)
performed = model.new_bool_var_series(name="performed", index=tasks_df.index)
@@ -139,7 +179,7 @@ def main(_) -> None:
are_present=performed,
)
# Set up complement intervals (from 0 to start, and from start + size to
# Set up complemented intervals (from 0 to start, and from start + size to
# horizon).
prefix_intervals = model.new_optional_interval_var_series(
name="prefix_intervals",
@@ -178,9 +218,12 @@ def main(_) -> None:
max_load,
)
# Set up the min profile. We use complement intervals to maintain the
# Set up the min profile. We use complemented intervals to maintain the
# complement of the work load, and fixed intervals to enforce the min
# number of active workers per time period.
#
# Note that this works only if the max load cumulative is also added to the
# model.
time_period_min_intervals = model.new_fixed_size_interval_var_series(
name="time_period_min_intervals",
index=min_load_df.index,
@@ -189,6 +232,8 @@ def main(_) -> None:
)
time_period_min_heights = min_load_df.min_load
# We take into account optional intervals. The actual capacity of the min load
# cumulative is the sum of all the active demands.
sum_of_demands = sum(tasks_df.load)
complement_capacity = model.new_int_var(0, sum_of_demands, "complement_capacity")
model.add(complement_capacity == performed.dot(tasks_df.load))
@@ -215,7 +260,7 @@ def main(_) -> None:
# [START solve]
# Create the solver and solve the model.
solver = cp_model.CpSolver()
solver.parameters.log_search_progress = True
solver.parameters.log_search_progress = False
solver.parameters.num_workers = 16
solver.parameters.max_time_in_seconds = 30.0
status = solver.solve(model)
@@ -225,11 +270,19 @@ def main(_) -> None:
if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
start_values = solver.values(starts)
performed_values = solver.boolean_values(performed)
tasks: list[tuple[int, int, int]] = []
for task in tasks_df.index:
if performed_values[task]:
print(f"task {task} starts at {start_values[task]}")
print(
f'task {task} duration={tasks_df["duration"][task]} '
f'load={tasks_df["load"][task]} starts at {start_values[task]}'
)
tasks.append(
(start_values[task], tasks_df.duration[task], tasks_df.load[task])
)
else:
print(f"task {task} is not performed")
assert check_solution(tasks, min_load_df, max_load_df)
elif status == cp_model.INFEASIBLE:
print("No solution found")
else:

View File

@@ -174,18 +174,18 @@ void GetBestScalingOfDoublesToInt64(absl::Span<const double> input,
} // namespace
void ComputeScalingErrors(const std::vector<double>& input,
const std::vector<double>& lb,
const std::vector<double>& ub, double scaling_factor,
void ComputeScalingErrors(absl::Span<const double> input,
absl::Span<const double> lb,
absl::Span<const double> ub, double scaling_factor,
double* max_relative_coeff_error,
double* max_scaled_sum_error) {
ComputeScalingErrors<true>(input, lb, ub, scaling_factor,
max_relative_coeff_error, max_scaled_sum_error);
}
double GetBestScalingOfDoublesToInt64(const std::vector<double>& input,
const std::vector<double>& lb,
const std::vector<double>& ub,
double GetBestScalingOfDoublesToInt64(absl::Span<const double> input,
absl::Span<const double> lb,
absl::Span<const double> ub,
int64_t max_absolute_sum) {
double scaling_factor;
GetBestScalingOfDoublesToInt64<true>(input, lb, ub, max_absolute_sum,

View File

@@ -215,9 +215,9 @@ void GetBestScalingOfDoublesToInt64(const std::vector<double>& input,
// - The sum over i of min(0, round(factor * x[i])) >= -max_sum.
// - The sum over i of max(0, round(factor * x[i])) <= max_sum.
// For any possible values of the x[i] such that x[i] is in [lb[i], ub[i]].
double GetBestScalingOfDoublesToInt64(const std::vector<double>& input,
const std::vector<double>& lb,
const std::vector<double>& ub,
double GetBestScalingOfDoublesToInt64(absl::Span<const double> input,
absl::Span<const double> lb,
absl::Span<const double> ub,
int64_t max_absolute_sum);
// This computes:
//
@@ -227,9 +227,9 @@ double GetBestScalingOfDoublesToInt64(const std::vector<double>& input,
// The max_scaled_sum_error which is a bound on the maximum difference between
// the exact scaled sum and the rounded one. One needs to divide this by
// scaling_factor to have the maximum absolute error on the original sum.
void ComputeScalingErrors(const std::vector<double>& input,
const std::vector<double>& lb,
const std::vector<double>& ub, double scaling_factor,
void ComputeScalingErrors(absl::Span<const double> input,
absl::Span<const double> lb,
absl::Span<const double> ub, double scaling_factor,
double* max_relative_coeff_error,
double* max_scaled_sum_error);