27#include "Eigen/SparseCore"
28#include "absl/algorithm/container.h"
29#include "absl/random/distributions.h"
30#include "absl/types/optional.h"
39constexpr double kInfinity = std::numeric_limits<double>::infinity();
40using ::Eigen::ColMajor;
41using ::Eigen::SparseMatrix;
42using ::Eigen::VectorXd;
43using ::Eigen::VectorXi;
58 CHECK_EQ(datapoint.size(), average_.size());
59 const double weight_ratio =
weight / (sum_weights_ +
weight);
61 shard(average_) += weight_ratio * (shard(datapoint) - shard(average_));
84double CombineBounds(
const double v1,
const double v2,
85 const double infinite_bound_threshold) {
87 if (std::abs(v1) < infinite_bound_threshold) {
90 if (std::abs(v2) < infinite_bound_threshold) {
116double MaxOrZero(
const VectorXd& vec) {
117 if (vec.size() == 0) {
119 }
else if (std::isinf(vec.maxCoeff())) {
122 return vec.maxCoeff();
126double MinOrZero(
const VectorXd& vec) {
127 if (vec.size() == 0) {
129 }
else if (std::isinf(vec.minCoeff())) {
132 return vec.minCoeff();
136VectorInfo ComputeVectorInfo(
const Eigen::Ref<const VectorXd>& vec,
137 const Sharder& sharder) {
138 VectorXd local_max(sharder.NumShards());
139 VectorXd local_min(sharder.NumShards());
140 VectorXd local_sum(sharder.NumShards());
141 VectorXd local_sum_squared(sharder.NumShards());
142 std::vector<int64_t> local_num_nonzero(sharder.NumShards());
143 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
144 const VectorXd shard_abs = shard(vec).cwiseAbs();
145 local_max[shard.Index()] = shard_abs.maxCoeff();
146 local_min[shard.Index()] = shard_abs.minCoeff();
147 local_sum[shard.Index()] = shard_abs.sum();
148 local_sum_squared[shard.Index()] = shard_abs.squaredNorm();
149 for (
double element : shard_abs) {
151 local_num_nonzero[shard.Index()] += 1;
155 const int64_t num_elements = vec.size();
157 .num_finite = num_elements,
158 .num_nonzero = std::accumulate(local_num_nonzero.begin(),
159 local_num_nonzero.end(), int64_t{0}),
160 .largest = MaxOrZero(local_max),
161 .smallest = MinOrZero(local_min),
162 .average = (num_elements > 0) ? local_sum.sum() / num_elements : NAN,
163 .l2_norm = (num_elements > 0) ? std::sqrt(local_sum_squared.sum()) : 0.0};
166VectorInfo VariableBoundGapInfo(
const VectorXd&
lower_bounds,
168 const Sharder& sharder) {
169 VectorXd local_max(sharder.NumShards());
170 VectorXd local_min(sharder.NumShards());
171 VectorXd local_sum(sharder.NumShards());
172 std::vector<int64_t> local_num_finite(sharder.NumShards());
173 std::vector<int64_t> local_num_nonzero(sharder.NumShards());
174 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
181 for (int64_t i = 0; i < gap_shard.size(); ++i) {
182 if (std::isfinite(gap_shard[i])) {
187 if (gap_shard[i] != 0) {
192 local_max[shard.Index()] =
max;
193 local_min[shard.Index()] =
min;
194 local_sum[shard.Index()] = sum;
201 local_num_finite.begin(), local_num_finite.end(), int64_t{0});
204 .num_nonzero = std::accumulate(local_num_nonzero.begin(),
205 local_num_nonzero.end(), int64_t{0}),
206 .largest = MaxOrZero(local_max),
207 .smallest = MinOrZero(local_min),
211VectorInfo MatrixAbsElementInfo(
212 const SparseMatrix<double, ColMajor, int64_t>& matrix,
213 const Sharder& sharder) {
214 VectorXd local_max(sharder.NumShards());
215 VectorXd local_min(sharder.NumShards());
216 VectorXd local_sum(sharder.NumShards());
217 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
218 const auto matrix_shard = shard(matrix);
222 for (int64_t col_idx = 0; col_idx < matrix_shard.outerSize(); ++col_idx) {
223 for (
decltype(matrix_shard)::InnerIterator it(matrix_shard, col_idx); it;
227 sum += std::abs(it.value());
230 local_max[shard.Index()] =
max;
231 local_min[shard.Index()] =
min;
232 local_sum[shard.Index()] = sum;
234 const int64_t num_nonzeros = matrix.nonZeros();
236 .num_finite = num_nonzeros,
237 .largest = MaxOrZero(local_max),
238 .smallest = MinOrZero(local_min),
239 .average = (num_nonzeros > 0) ? local_sum.sum() / num_nonzeros : NAN};
242VectorInfo CombinedBoundsInfo(
const VectorXd& rhs_upper_bounds,
243 const VectorXd& rhs_lower_bounds,
244 const Sharder& sharder,
245 const double infinite_bound_threshold =
246 std::numeric_limits<double>::infinity()) {
247 VectorXd local_max(sharder.NumShards());
248 VectorXd local_min(sharder.NumShards());
249 VectorXd local_sum(sharder.NumShards());
250 VectorXd local_sum_squared(sharder.NumShards());
251 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
252 const auto lb_shard = shard(rhs_lower_bounds);
253 const auto ub_shard = shard(rhs_upper_bounds);
257 double sum_squared = 0.0;
258 for (int64_t i = 0; i < lb_shard.size(); ++i) {
259 const double combined =
260 CombineBounds(ub_shard[i], lb_shard[i], infinite_bound_threshold);
264 sum_squared += combined * combined;
266 local_max[shard.Index()] =
max;
267 local_min[shard.Index()] =
min;
268 local_sum[shard.Index()] = sum;
269 local_sum_squared[shard.Index()] = sum_squared;
271 const int num_constraints = rhs_lower_bounds.size();
273 .num_finite = num_constraints,
274 .largest = MaxOrZero(local_max),
275 .smallest = MinOrZero(local_min),
277 (num_constraints > 0) ? local_sum.sum() / num_constraints : NAN,
279 (num_constraints > 0) ? std::sqrt(local_sum_squared.sum()) : 0.0};
282InfNormInfo ConstraintMatrixRowColInfo(
283 const SparseMatrix<double, ColMajor, int64_t>& constraint_matrix,
284 const SparseMatrix<double, ColMajor, int64_t>& constraint_matrix_transpose,
285 const Sharder& matrix_sharder,
const Sharder& matrix_transpose_sharder) {
288 VectorXd::Ones(constraint_matrix.rows()),
289 VectorXd::Ones(constraint_matrix.cols()),
293 VectorXd::Ones(constraint_matrix_transpose.rows()),
294 VectorXd::Ones(constraint_matrix_transpose.cols()),
295 matrix_transpose_sharder);
296 return InfNormInfo{.max_col_norm = MaxOrZero(col_norms),
297 .min_col_norm = MinOrZero(col_norms),
298 .max_row_norm = MaxOrZero(row_norms),
299 .min_row_norm = MinOrZero(row_norms)};
305 const double infinite_constraint_bound_threshold) {
308 InfNormInfo cons_matrix_norm_info = ConstraintMatrixRowColInfo(
311 VectorInfo cons_matrix_info = MatrixAbsElementInfo(
313 VectorInfo combined_bounds_info = CombinedBoundsInfo(
315 qp.
DualSharder(), infinite_constraint_bound_threshold);
316 VectorInfo obj_vec_info =
318 VectorInfo gaps_info =
321 QuadraticProgramStats program_stats;
322 program_stats.set_num_variables(qp.
PrimalSize());
323 program_stats.set_num_constraints(qp.
DualSize());
324 program_stats.set_constraint_matrix_col_min_l_inf_norm(
325 cons_matrix_norm_info.min_col_norm);
326 program_stats.set_constraint_matrix_row_min_l_inf_norm(
327 cons_matrix_norm_info.min_row_norm);
328 program_stats.set_constraint_matrix_num_nonzeros(cons_matrix_info.num_finite);
329 program_stats.set_constraint_matrix_abs_max(cons_matrix_info.largest);
330 program_stats.set_constraint_matrix_abs_min(cons_matrix_info.smallest);
331 program_stats.set_constraint_matrix_abs_avg(cons_matrix_info.average);
332 program_stats.set_combined_bounds_max(combined_bounds_info.largest);
333 program_stats.set_combined_bounds_min(combined_bounds_info.smallest);
334 program_stats.set_combined_bounds_avg(combined_bounds_info.average);
335 program_stats.set_combined_bounds_l2_norm(combined_bounds_info.l2_norm);
336 program_stats.set_variable_bound_gaps_num_finite(gaps_info.num_finite);
337 program_stats.set_variable_bound_gaps_max(gaps_info.largest);
338 program_stats.set_variable_bound_gaps_min(gaps_info.smallest);
339 program_stats.set_variable_bound_gaps_avg(gaps_info.average);
340 program_stats.set_objective_vector_abs_max(obj_vec_info.largest);
341 program_stats.set_objective_vector_abs_min(obj_vec_info.smallest);
342 program_stats.set_objective_vector_abs_avg(obj_vec_info.average);
343 program_stats.set_objective_vector_l2_norm(obj_vec_info.l2_norm);
345 program_stats.set_objective_matrix_num_nonzeros(0);
346 program_stats.set_objective_matrix_abs_max(0);
347 program_stats.set_objective_matrix_abs_min(0);
348 program_stats.set_objective_matrix_abs_avg(NAN);
350 VectorInfo obj_matrix_info = ComputeVectorInfo(
352 program_stats.set_objective_matrix_num_nonzeros(
353 obj_matrix_info.num_nonzero);
354 program_stats.set_objective_matrix_abs_max(obj_matrix_info.largest);
355 program_stats.set_objective_matrix_abs_min(obj_matrix_info.smallest);
356 program_stats.set_objective_matrix_abs_avg(obj_matrix_info.average);
358 return program_stats;
363enum class ScalingNorm { kL2, kLInf };
370void DivideBySquareRootOfDivisor(
const VectorXd& divisor,
371 const Sharder& sharder, VectorXd& vector) {
372 sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
373 auto vec_shard = shard(vector);
374 auto divisor_shard = shard(divisor);
376 if (divisor_shard[
index] != 0) {
377 vec_shard[
index] /= std::sqrt(divisor_shard[
index]);
383void ApplyScalingIterationsForNorm(
const ShardedQuadraticProgram& sharded_qp,
384 const int num_iterations,
385 const ScalingNorm norm,
386 VectorXd& row_scaling_vec,
387 VectorXd& col_scaling_vec) {
388 const QuadraticProgram& qp = sharded_qp.Qp();
389 const int64_t num_col = qp.constraint_matrix.cols();
390 const int64_t num_row = qp.constraint_matrix.rows();
391 CHECK_EQ(num_col, col_scaling_vec.size());
392 CHECK_EQ(num_row, row_scaling_vec.size());
393 for (
int i = 0; i < num_iterations; ++i) {
397 case ScalingNorm::kL2: {
400 sharded_qp.ConstraintMatrixSharder());
402 sharded_qp.TransposedConstraintMatrix(), col_scaling_vec,
403 row_scaling_vec, sharded_qp.TransposedConstraintMatrixSharder());
406 case ScalingNorm::kLInf: {
409 sharded_qp.ConstraintMatrixSharder());
411 sharded_qp.TransposedConstraintMatrix(), col_scaling_vec,
412 row_scaling_vec, sharded_qp.TransposedConstraintMatrixSharder());
416 DivideBySquareRootOfDivisor(col_norm, sharded_qp.PrimalSharder(),
418 DivideBySquareRootOfDivisor(row_norm, sharded_qp.DualSharder(),
426 const int num_iterations, VectorXd& row_scaling_vec,
427 VectorXd& col_scaling_vec) {
428 ApplyScalingIterationsForNorm(sharded_qp, num_iterations, ScalingNorm::kLInf,
429 row_scaling_vec, col_scaling_vec);
433 VectorXd& row_scaling_vec, VectorXd& col_scaling_vec) {
434 ApplyScalingIterationsForNorm(sharded_qp, 1,
435 ScalingNorm::kL2, row_scaling_vec,
443 .col_scaling_vec = VectorXd::Ones(sharded_qp.
PrimalSize())};
444 bool do_rescale =
false;
448 scaling.row_scaling_vec, scaling.col_scaling_vec);
453 scaling.col_scaling_vec);
457 scaling.row_scaling_vec);
463 const VectorXd& primal_solution,
464 const VectorXd& dual_product) {
471 shard(result.gradient) =
473 value_parts[shard.
Index()] =
474 shard(primal_solution).dot(shard(result.gradient));
479 const VectorXd objective_product =
482 objective_product - shard(dual_product);
483 value_parts[shard.Index()] =
484 shard(primal_solution)
485 .dot(shard(result.gradient) - 0.5 * objective_product);
488 result.value = value_parts.sum();
493 const double constraint_upper_bound,
495 const double primal_product) {
497 return constraint_upper_bound;
498 }
else if (dual > 0.0) {
499 return constraint_lower_bound;
500 }
else if (std::isfinite(constraint_lower_bound) &&
501 std::isfinite(constraint_upper_bound)) {
502 if (primal_product < constraint_lower_bound) {
503 return constraint_lower_bound;
504 }
else if (primal_product > constraint_upper_bound) {
505 return constraint_upper_bound;
507 return primal_product;
509 }
else if (std::isfinite(constraint_lower_bound)) {
510 return constraint_lower_bound;
511 }
else if (std::isfinite(constraint_upper_bound)) {
512 return constraint_upper_bound;
519 const Eigen::VectorXd& dual_solution,
520 const Eigen::VectorXd& primal_product) {
528 auto dual_solution_shard = shard(dual_solution);
529 auto dual_gradient_shard = shard(result.gradient);
530 auto primal_product_shard = shard(primal_product);
531 double value_sum = 0.0;
532 for (int64_t i = 0; i < dual_gradient_shard.size(); ++i) {
534 constraint_lower_bounds[i], constraint_upper_bounds[i],
535 dual_solution_shard[i], primal_product_shard[i]);
536 value_sum += dual_gradient_shard[i] * dual_solution_shard[i];
538 value_parts[shard.
Index()] = value_sum;
539 dual_gradient_shard -= primal_product_shard;
541 result.value = value_parts.sum();
547using ::Eigen::ColMajor;
548using ::Eigen::SparseMatrix;
552double NormalizeVector(
const Sharder& sharder, VectorXd& vector) {
553 const double norm =
Norm(vector, sharder);
555 sharder.ParallelForEachShard(
556 [&](
const Sharder::Shard& shard) { shard(vector) /= norm; });
566double PowerMethodFailureProbability(int64_t dimension,
double epsilon,
int k) {
567 if (k < 2 || epsilon <= 0.0) {
571 return std::min(0.824, 0.354 / (epsilon * (k - 1))) * std::sqrt(dimension) *
572 std::pow(1.0 - epsilon, k - 0.5);
575SingularValueAndIterations EstimateMaximumSingularValue(
576 const SparseMatrix<double, ColMajor, int64_t>& matrix,
577 const SparseMatrix<double, ColMajor, int64_t>& matrix_transpose,
578 const absl::optional<VectorXd>& active_set_indicator,
579 const absl::optional<VectorXd>& transpose_active_set_indicator,
580 const Sharder& matrix_sharder,
const Sharder& matrix_transpose_sharder,
581 const Sharder& primal_vector_sharder,
const Sharder& dual_vector_sharder,
582 const double desired_relative_error,
const double failure_probability,
583 std::mt19937& mt_generator) {
586 VectorXd local_max(matrix_sharder.NumShards());
587 matrix_sharder.ParallelForEachShard([&](
const Sharder::Shard& shard) {
588 const auto matrix_shard = shard(matrix);
589 local_max[shard.Index()] =
591 VectorXd::Ones(matrix_sharder.ShardSize(shard.Index())))
592 .lpNorm<Eigen::Infinity>();
594 return {.singular_value = local_max.lpNorm<Eigen::Infinity>(),
596 .estimated_relative_error = 0.0};
598 const int64_t dimension = matrix.cols();
599 VectorXd eigenvector(dimension);
602 for (
double& entry : eigenvector) {
603 entry = absl::Gaussian<double>(mt_generator);
605 if (active_set_indicator.has_value()) {
609 NormalizeVector(primal_vector_sharder, eigenvector);
610 double eigenvalue_estimate = 0.0;
612 int num_iterations = 0;
617 const double epsilon = 1.0 -
MathUtil::Square(1.0 - desired_relative_error);
618 while (PowerMethodFailureProbability(dimension, epsilon, num_iterations) >
619 failure_probability) {
621 matrix_transpose, eigenvector, matrix_transpose_sharder);
622 if (transpose_active_set_indicator.has_value()) {
624 dual_vector_sharder, dual_eigenvector);
626 VectorXd next_eigenvector =
628 if (active_set_indicator.has_value()) {
630 primal_vector_sharder, next_eigenvector);
632 eigenvalue_estimate =
633 Dot(eigenvector, next_eigenvector, primal_vector_sharder);
634 eigenvector = std::move(next_eigenvector);
636 const double primal_norm =
637 NormalizeVector(primal_vector_sharder, eigenvector);
639 VLOG(1) <<
"Iteration " << num_iterations <<
" singular value estimate "
640 << std::sqrt(eigenvalue_estimate) <<
" primal norm " << primal_norm;
642 return SingularValueAndIterations{
643 .singular_value = std::sqrt(eigenvalue_estimate),
644 .num_iterations = num_iterations,
645 .estimated_relative_error = desired_relative_error};
650VectorXd ComputePrimalActiveSetIndicator(
651 const ShardedQuadraticProgram& sharded_qp,
652 const VectorXd& primal_solution) {
653 VectorXd indicator(sharded_qp.PrimalSize());
654 sharded_qp.PrimalSharder().ParallelForEachShard(
655 [&](
const Sharder::Shard& shard) {
656 const auto lower_bound_shard =
657 shard(sharded_qp.Qp().variable_lower_bounds);
658 const auto upper_bound_shard =
659 shard(sharded_qp.Qp().variable_upper_bounds);
660 const auto primal_solution_shard = shard(primal_solution);
661 auto indicator_shard = shard(indicator);
662 const int64_t shard_size =
663 sharded_qp.PrimalSharder().ShardSize(shard.Index());
664 for (int64_t i = 0; i < shard_size; ++i) {
665 if ((primal_solution_shard[i] == lower_bound_shard[i]) ||
666 (primal_solution_shard[i] == upper_bound_shard[i])) {
667 indicator_shard[i] = 0.0;
669 indicator_shard[i] = 1.0;
678VectorXd ComputeDualActiveSetIndicator(
679 const ShardedQuadraticProgram& sharded_qp,
const VectorXd& dual_solution) {
680 VectorXd indicator(sharded_qp.DualSize());
681 sharded_qp.DualSharder().ParallelForEachShard(
682 [&](
const Sharder::Shard& shard) {
683 const auto lower_bound_shard =
684 shard(sharded_qp.Qp().constraint_lower_bounds);
685 const auto upper_bound_shard =
686 shard(sharded_qp.Qp().constraint_upper_bounds);
687 const auto dual_solution_shard = shard(dual_solution);
688 auto indicator_shard = shard(indicator);
689 const int64_t shard_size =
690 sharded_qp.DualSharder().ShardSize(shard.Index());
691 for (int64_t i = 0; i < shard_size; ++i) {
692 if (dual_solution_shard[i] == 0.0 &&
693 (std::isinf(lower_bound_shard[i]) ||
694 std::isinf(upper_bound_shard[i]))) {
695 indicator_shard[i] = 0.0;
697 indicator_shard[i] = 1.0;
708 const absl::optional<VectorXd>& primal_solution,
709 const absl::optional<VectorXd>& dual_solution,
710 const double desired_relative_error,
const double failure_probability,
711 std::mt19937& mt_generator) {
712 absl::optional<VectorXd> primal_active_set_indicator;
713 absl::optional<VectorXd> dual_active_set_indicator;
714 if (primal_solution.has_value()) {
715 primal_active_set_indicator =
716 ComputePrimalActiveSetIndicator(sharded_qp, *primal_solution);
718 if (dual_solution.has_value()) {
719 dual_active_set_indicator =
720 ComputeDualActiveSetIndicator(sharded_qp, *dual_solution);
722 return EstimateMaximumSingularValue(
728 desired_relative_error, failure_probability, mt_generator);
733 const bool constraint_bounds_valid =
740 const bool variable_bounds_valid =
748 return constraint_bounds_valid && variable_bounds_valid;
756 shard(primal) = shard(primal)
769 auto dual_shard = shard(dual);
771 for (int64_t i = 0; i < dual_shard.size(); ++i) {
772 if (!std::isfinite(upper_bound_shard[i])) {
773 dual_shard[i] =
std::max(dual_shard[i], 0.0);
775 if (!std::isfinite(lower_bound_shard[i])) {
776 dual_shard[i] =
std::min(dual_shard[i], 0.0);
#define CHECK_EQ(val1, val2)
#define CHECK_GE(val1, val2)
#define VLOG(verboselevel)
static T Square(const T x)
const Sharder & DualSharder() const
void RescaleQuadraticProgram(const Eigen::VectorXd &col_scaling_vec, const Eigen::VectorXd &row_scaling_vec)
const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > & TransposedConstraintMatrix() const
const Sharder & TransposedConstraintMatrixSharder() const
const Sharder & PrimalSharder() const
int64_t PrimalSize() const
const QuadraticProgram & Qp() const
const Sharder & ConstraintMatrixSharder() const
ShardedWeightedAverage(const Sharder *sharder)
void Add(const Eigen::VectorXd &datapoint, double weight)
Eigen::VectorXd ComputeAverage() const
void ParallelForEachShard(const std::function< void(const Shard &)> &func) const
bool ParallelTrueForAllShards(const std::function< bool(const Shard &)> &func) const
int64_t NumElements() const
double Dot(const VectorXd &v1, const VectorXd &v2, const Sharder &sharder)
LagrangianPart ComputePrimalGradient(const ShardedQuadraticProgram &sharded_qp, const VectorXd &primal_solution, const VectorXd &dual_product)
VectorXd TransposedMatrixVectorProduct(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &vector, const Sharder &sharder)
void LInfRuizRescaling(const ShardedQuadraticProgram &sharded_qp, const int num_iterations, VectorXd &row_scaling_vec, VectorXd &col_scaling_vec)
LagrangianPart ComputeDualGradient(const ShardedQuadraticProgram &sharded_qp, const Eigen::VectorXd &dual_solution, const Eigen::VectorXd &primal_product)
constexpr double kInfinity
VectorXd ScaledColLInfNorm(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &row_scaling_vec, const VectorXd &col_scaling_vec, const Sharder &sharder)
double DualSubgradientCoefficient(const double constraint_lower_bound, const double constraint_upper_bound, const double dual, const double primal_product)
bool HasValidBounds(const QuadraticProgram &qp)
bool IsLinearProgram(const QuadraticProgram &qp)
SingularValueAndIterations EstimateMaximumSingularValueOfConstraintMatrix(const ShardedQuadraticProgram &sharded_qp, const absl::optional< VectorXd > &primal_solution, const absl::optional< VectorXd > &dual_solution, const double desired_relative_error, const double failure_probability, std::mt19937 &mt_generator)
void ProjectToDualVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &dual)
void CoefficientWiseProductInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
void L2NormRescaling(const ShardedQuadraticProgram &sharded_qp, VectorXd &row_scaling_vec, VectorXd &col_scaling_vec)
VectorXd ScaledColL2Norm(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &row_scaling_vec, const VectorXd &col_scaling_vec, const Sharder &sharder)
ScalingVectors ApplyRescaling(const RescalingOptions &rescaling_options, ShardedQuadraticProgram &sharded_qp)
QuadraticProgramStats ComputeStats(const ShardedQuadraticProgram &qp, const double infinite_constraint_bound_threshold)
bool IsDiagonal(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const Sharder &sharder)
void ProjectToPrimalVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &primal)
double Norm(const VectorXd &vector, const Sharder &sharder)
void AssignVector(const VectorXd &vec, const Sharder &sharder, VectorXd &dest)
std::vector< double > lower_bounds
std::vector< double > upper_bounds
Eigen::VectorXd variable_upper_bounds
Eigen::VectorXd variable_lower_bounds
Eigen::VectorXd constraint_lower_bounds
Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > constraint_matrix
std::optional< Eigen::DiagonalMatrix< double, Eigen::Dynamic > > objective_matrix
Eigen::VectorXd constraint_upper_bounds
Eigen::VectorXd objective_vector
int l_inf_ruiz_iterations
Eigen::VectorXd row_scaling_vec