OR-Tools  9.3
primal_dual_hybrid_gradient.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <atomic>
18#include <cmath>
19#include <cstdint>
20#include <functional>
21#include <limits>
22#include <random>
23#include <string>
24#include <utility>
25#include <vector>
26
27#include "Eigen/Core"
28#include "Eigen/SparseCore"
29#include "absl/status/status.h"
30#include "absl/status/statusor.h"
31#include "absl/strings/str_cat.h"
32#include "absl/strings/str_format.h"
33#include "absl/strings/string_view.h"
34#include "absl/types/optional.h"
37#include "ortools/base/timer.h"
38#include "ortools/glop/parameters.pb.h"
40#include "ortools/linear_solver/linear_solver.pb.h"
49#include "ortools/pdlp/solve_log.pb.h"
50#include "ortools/pdlp/solvers.pb.h"
54
56
57namespace {
58
59using ::Eigen::VectorXd;
60
61using IterationStatsCallback =
62 std::function<void(const IterationCallbackInfo&)>;
63
64// Returns infinity norm of the given matrix viewed as a vector.
65double MaxAbsCoefficient(
66 const Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t>& matrix) {
67 // Note: matrix.coeffs().lpNorm<Eigen::Infinity>() gives a link error.
68 return matrix.nonZeros() ? matrix.coeffs().cwiseAbs().maxCoeff() : 0;
69}
70
71// If `num_shards' is positive, returns it. Otherwise returns a reasonable
72// number of shards to use with ShardedQuadraticProgram for the given number of
73// threads.
74int NumShards(const int num_shards, const int num_threads) {
75 if (num_shards > 0) return num_shards;
76 return num_threads == 1 ? 1 : 4 * num_threads;
77}
78
79std::string ToString(const ConvergenceInformation& convergence_information,
80 const RelativeConvergenceInformation& relative_information,
81 const OptimalityNorm residual_norm) {
82 constexpr absl::string_view kFormatStr =
83 "%#12.6g %#12.6g %#12.6g | %#12.6g %#12.6g %#12.6g | %#12.6g %#12.6g | "
84 "%#12.6g %#12.6g";
85 switch (residual_norm) {
86 case OPTIMALITY_NORM_L2:
87 return absl::StrFormat(kFormatStr,
88 relative_information.relative_l2_primal_residual,
89 relative_information.relative_l2_dual_residual,
90 relative_information.relative_optimality_gap,
91 convergence_information.l2_primal_residual(),
92 convergence_information.l2_dual_residual(),
93 convergence_information.primal_objective() -
94 convergence_information.dual_objective(),
95 convergence_information.primal_objective(),
96 convergence_information.dual_objective(),
97 convergence_information.l2_primal_variable(),
98 convergence_information.l2_dual_variable());
99 case OPTIMALITY_NORM_L_INF:
100 return absl::StrFormat(
101 kFormatStr, relative_information.relative_l_inf_primal_residual,
102 relative_information.relative_l_inf_dual_residual,
103 relative_information.relative_optimality_gap,
104 convergence_information.l_inf_primal_residual(),
105 convergence_information.l_inf_dual_residual(),
106 convergence_information.primal_objective() -
107 convergence_information.dual_objective(),
108 convergence_information.primal_objective(),
109 convergence_information.dual_objective(),
110 convergence_information.l2_primal_variable(),
111 convergence_information.l2_dual_variable());
112 case OPTIMALITY_NORM_UNSPECIFIED:
113 LOG(FATAL) << "Invalid residual norm.";
114 }
115}
116
117std::string ToShortString(
118 const ConvergenceInformation& convergence_information,
119 const RelativeConvergenceInformation& relative_information,
120 const OptimalityNorm residual_norm) {
121 constexpr absl::string_view kFormatStr =
122 "%#10.4g %#10.4g %#10.4g | %#10.4g %#10.4g";
123 switch (residual_norm) {
124 case OPTIMALITY_NORM_L2:
125 return absl::StrFormat(kFormatStr,
126 relative_information.relative_l2_primal_residual,
127 relative_information.relative_l2_dual_residual,
128 relative_information.relative_optimality_gap,
129 convergence_information.primal_objective(),
130 convergence_information.dual_objective());
131 case OPTIMALITY_NORM_L_INF:
132 return absl::StrFormat(
133 kFormatStr, relative_information.relative_l_inf_primal_residual,
134 relative_information.relative_l_inf_dual_residual,
135 relative_information.relative_optimality_gap,
136 convergence_information.primal_objective(),
137 convergence_information.dual_objective());
138 case OPTIMALITY_NORM_UNSPECIFIED:
139 LOG(FATAL) << "Invalid residual norm.";
140 }
141}
142
143// Returns a string describing iter_stats, based on the ConvergenceInformation
144// with candidate_type==preferred_candidate if one exists, otherwise based on
145// the first value, if any. termination_criteria.optimality_norm determines the
146// norm in which the residuals are displayed.
147std::string ToString(const IterationStats& iter_stats,
148 const TerminationCriteria& termination_criteria,
149 const QuadraticProgramBoundNorms& bound_norms,
150 PointType preferred_candidate) {
151 std::string iteration_string =
152 absl::StrFormat("%6d %8.1f %6.1f", iter_stats.iteration_number(),
153 iter_stats.cumulative_kkt_matrix_passes(),
154 iter_stats.cumulative_time_sec());
155 auto convergence_information =
156 GetConvergenceInformation(iter_stats, preferred_candidate);
157 if (!convergence_information.has_value() &&
158 iter_stats.convergence_information_size() > 0) {
159 convergence_information = iter_stats.convergence_information(0);
160 }
161 if (convergence_information.has_value()) {
162 const RelativeConvergenceInformation relative_information =
163 ComputeRelativeResiduals(termination_criteria.eps_optimal_absolute(),
164 termination_criteria.eps_optimal_relative(),
165 bound_norms, *convergence_information);
166 return absl::StrCat(iteration_string, " | ",
167 ToString(*convergence_information, relative_information,
168 termination_criteria.optimality_norm()));
169 }
170 return iteration_string;
171}
172
173std::string ToShortString(const IterationStats& iter_stats,
174 const TerminationCriteria& termination_criteria,
175 const QuadraticProgramBoundNorms& bound_norms,
176 PointType preferred_candidate) {
177 std::string iteration_string =
178 absl::StrFormat("%6d %6.1f", iter_stats.iteration_number(),
179 iter_stats.cumulative_time_sec());
180 auto convergence_information =
181 GetConvergenceInformation(iter_stats, preferred_candidate);
182 if (!convergence_information.has_value() &&
183 iter_stats.convergence_information_size() > 0) {
184 convergence_information = iter_stats.convergence_information(0);
185 }
186 if (convergence_information.has_value()) {
187 const RelativeConvergenceInformation relative_information =
188 ComputeRelativeResiduals(termination_criteria.eps_optimal_absolute(),
189 termination_criteria.eps_optimal_relative(),
190 bound_norms, *convergence_information);
191 return absl::StrCat(
192 iteration_string, " | ",
193 ToShortString(*convergence_information, relative_information,
194 termination_criteria.optimality_norm()));
195 }
196 return iteration_string;
197}
198
199// Returns a label string corresponding to the format of ToString().
200std::string ConvergenceInformationLabelString() {
201 return absl::StrFormat(
202 "%12s %12s %12s | %12s %12s %12s | %12s %12s | %12s %12s", "rel_prim_res",
203 "rel_dual_res", "rel_gap", "prim_resid", "dual_resid", "obj_gap",
204 "prim_obj", "dual_obj", "prim_var_l2", "dual_var_l2");
205}
206
207std::string ConvergenceInformationLabelShortString() {
208 return absl::StrFormat("%10s %10s %10s | %10s %10s", "rel_p_res", "rel_d_res",
209 "rel_gap", "prim_obj", "dual_obj");
210}
211
212std::string IterationStatsLabelString() {
213 return absl::StrCat(
214 absl::StrFormat("%6s %8s %6s", "iter#", "kkt_pass", "time"), " | ",
215 ConvergenceInformationLabelString());
216}
217
218std::string IterationStatsLabelShortString() {
219 return absl::StrCat(absl::StrFormat("%6s %6s", "iter#", "time"), " | ",
220 ConvergenceInformationLabelShortString());
221}
222
223enum class InnerStepOutcome {
224 kSuccessful,
225 kForceNumericalTermination,
226};
227
228class Solver {
229 public:
230 // Assumes that the qp and params are valid.
231 // Note that the qp is intentionally passed by value.
232 Solver(QuadraticProgram qp, const PrimalDualHybridGradientParams& params);
233
234 // Not copyable or movable because of const and reference members.
235 Solver(const Solver&) = delete;
236 Solver& operator=(const Solver&) = delete;
237
238 // Zero is used if initial_solution is nullopt.
239 SolverResult Solve(absl::optional<PrimalAndDualSolution> initial_solution,
240 const std::atomic<bool>* interrupt_solve,
241 IterationStatsCallback iteration_stats_callback);
242
243 private:
244 struct NextSolutionAndDelta {
245 VectorXd value;
246 // delta is value - current_solution.
247 VectorXd delta;
248 };
249
250 struct DistanceBasedRestartInfo {
253 };
254
255 struct PresolveInfo {
256 explicit PresolveInfo(ShardedQuadraticProgram original_qp,
257 const PrimalDualHybridGradientParams& params)
258 : preprocessor_parameters(PreprocessorParameters(params)),
260 sharded_original_qp(std::move(original_qp)),
262 VectorXd::Ones(sharded_original_qp.PrimalSize())),
264 VectorXd::Ones(sharded_original_qp.DualSize())) {}
265 glop::GlopParameters preprocessor_parameters;
266 glop::MainLpPreprocessor preprocessor;
267 ShardedQuadraticProgram sharded_original_qp;
270 };
271
272 // Movement terms (weighted squared norms of primal and dual deltas) larger
273 // than this cause termination because iterates are diverging, and likely to
274 // cause infinite and NaN values.
275 constexpr static double kDivergentMovement = 1.0e100;
276
277 NextSolutionAndDelta ComputeNextPrimalSolution(double primal_step_size) const;
278
279 NextSolutionAndDelta ComputeNextDualSolution(
280 double dual_step_size, double extrapolation_factor,
281 const NextSolutionAndDelta& next_primal) const;
282
283 double ComputeMovement(const VectorXd& delta_primal,
284 const VectorXd& delta_dual) const;
285
286 double ComputeNonlinearity(const VectorXd& delta_primal,
287 const VectorXd& next_dual_product) const;
288
289 // Creates all the simple-to-compute statistics in stats.
290 IterationStats CreateSimpleIterationStats(RestartChoice restart_used) const;
291
292 RestartChoice ChooseRestartToApply(bool is_major_iteration);
293
294 VectorXd PrimalAverage() const;
295
296 VectorXd DualAverage() const;
297
298 double ComputeNewPrimalWeight() const;
299
300 // Picks the primal and dual solutions according to output_type, unscales them
301 // and makes the closing changes to the SolveLog. This function should only be
302 // called once the solver is finishing its execution.
303 // NOTE: The primal_solution and dual_solution are used as the output except
304 // when output_type is POINT_TYPE_CURRENT_ITERATE or
305 // POINT_TYPE_ITERATE_DIFFERENCE, in which case the values are computed from
306 // Solver data. NOTE: Both primal_solution and dual_solution are passed by
307 // copy. To avoid unnecessary copying, move these objects (i.e. use
308 // std::move()).
309 SolverResult ConstructSolverResult(VectorXd primal_solution,
310 VectorXd dual_solution,
311 const IterationStats& stats,
312 TerminationReason termination_reason,
313 PointType output_type,
314 SolveLog solve_log) const;
315
316 // Adds one entry of convergence information and infeasibility information to
317 // stats using the input solutions. The primal_solution and dual_solution are
318 // solutions for sharded_qp. The col_scaling_vec and row_scaling_vec are used
319 // to implicitly unscale sharded_qp when computing the relevant information.
320 void AddConvergenceAndInfeasibilityInformation(
321 const VectorXd& primal_solution, const VectorXd& dual_solution,
322 const ShardedQuadraticProgram& sharded_qp,
323 const VectorXd& col_scaling_vec, const VectorXd& row_scaling_vec,
324 PointType candidate_type, IterationStats& stats) const;
325
326 // Adds one entry of PointMetadata to stats using the input solutions.
327 void AddPointMetadata(const VectorXd& primal_solution,
328 const VectorXd& dual_solution, PointType point_type,
329 IterationStats& stats) const;
330
331 // Returns a TerminationReasonAndPointType when the termination criteria are
332 // satisfied, otherwise returns nothing. Uses the primal and dual vectors to
333 // compute solution statistics and adds them to the stats proto.
334 // NOTE: The primal and dual input pair should be a scaled solution.
335 absl::optional<TerminationReasonAndPointType>
336 UpdateIterationStatsAndCheckTermination(bool force_numerical_termination,
337 const VectorXd& primal_average,
338 const VectorXd& dual_average,
339 IterationStats& stats) const;
340
341 double DistanceTraveledFromLastStart(const VectorXd& primal_solution,
342 const VectorXd& dual_solution) const;
343
344 LocalizedLagrangianBounds ComputeLocalizedBoundsAtCurrent() const;
345
346 LocalizedLagrangianBounds ComputeLocalizedBoundsAtAverage() const;
347
348 double InitialPrimalWeight(double l2_norm_primal_linear_objective,
349 double l2_norm_constraint_bounds) const;
350
351 void ComputeAndApplyRescaling();
352
353 // Applies the given RestartChoice. If a restart is chosen, updates the
354 // state of the algorithm accordingly and computes a new primal weight.
355 void ApplyRestartChoice(RestartChoice restart_to_apply);
356
357 absl::optional<SolverResult> MajorIterationAndTerminationCheck(
358 bool force_numerical_termination, SolveLog& solve_log);
359
360 bool ShouldDoAdaptiveRestartHeuristic(double candidate_normalized_gap) const;
361
362 RestartChoice DetermineDistanceBasedRestartChoice() const;
363
364 void ResetAverageToCurrent();
365
366 void LogNumericalTermination() const;
367
368 void LogInnerIterationLimitHit() const;
369
370 void LogQuadraticProgramStats(const QuadraticProgramStats& stats);
371
372 // Takes a step based on the Malitsky and Pock linesearch algorithm.
373 // (https://arxiv.org/pdf/1608.08883.pdf)
374 // The current implementation is provably convergent (at an optimal rate)
375 // for LP programs (provided we do not change the primal weight at every major
376 // iteration). Further, we have observed that this rule is very sensitive to
377 // the parameter choice whenever we apply the primal weight recomputation
378 // heuristic.
379 InnerStepOutcome TakeMalitskyPockStep();
380
381 // Takes a step based on the adaptive heuristic presented in Section 3.1 of
382 // https://arxiv.org/pdf/2106.04756.pdf (further generalized to QP).
383 InnerStepOutcome TakeAdaptiveStep();
384
385 // Takes a constant-size step.
386 InnerStepOutcome TakeConstantSizeStep();
387
388 const QuadraticProgram& WorkingQp() const { return sharded_working_qp_.Qp(); }
389
390 // TODO(user): experiment with different preprocessor types.
391 static glop::GlopParameters PreprocessorParameters(
392 const PrimalDualHybridGradientParams& params);
393
394 // If presolve is enabled, moves sharded_working_qp_ to
395 // presolve_info_.sharded_original_qp and computes the presolved linear
396 // program and installs it in sharded_working_qp_. Clears initial_solution if
397 // presolve is enabled. If presolve solves the problem completely returns the
398 // appropriate TerminationReason. Otherwise returns nullopt. If presolve
399 // is disabled or an error occurs modifies nothing and returns nullopt.
400 absl::optional<TerminationReason> ApplyPresolveIfEnabled(
401 absl::optional<PrimalAndDualSolution>* initial_solution);
402
403 PrimalAndDualSolution RecoverOriginalSolution(
404 PrimalAndDualSolution working_solution) const;
405
406 WallTimer timer_;
407 const PrimalDualHybridGradientParams params_;
408 const int num_shards_;
409 // This is the QP that PDHG is run on. It has been reduced by presolve and/or
410 // rescaled, if those are enabled. The original problem is available in
411 // presolve_info_->sharded_original_qp if presolve_info_.has_value(), and
412 // otherwise can be obtained by undoing the scaling of sharded_working_qp_ by
413 // col_scaling_vec_ and row_scaling_vec_.
414 ShardedQuadraticProgram sharded_working_qp_;
415 ShardedWeightedAverage primal_average_;
416 ShardedWeightedAverage dual_average_;
417 IterationStatsCallback iteration_stats_callback_;
418 QuadraticProgramBoundNorms original_bound_norms_;
419
420 // Set iff presolve is enabled.
421 absl::optional<PresolveInfo> presolve_info_;
422
423 double step_size_;
424 // For Malitsky-Pock linesearch only: step_size_ / previous_step_size
425 double ratio_last_two_step_sizes_;
426 double primal_weight_;
427 // For adaptive restarts only.
428 double normalized_gap_at_last_trial_ =
429 std::numeric_limits<double>::infinity();
430 // For adaptive restarts only.
431 double normalized_gap_at_last_restart_ =
432 std::numeric_limits<double>::infinity();
433 int iterations_completed_;
434 int num_rejected_steps_;
435 VectorXd current_primal_solution_;
436 VectorXd current_dual_solution_;
437 VectorXd current_primal_delta_;
438 VectorXd current_dual_delta_;
439 // A cache of constraint_matrix.transpose() * current_dual_solution.
440 VectorXd current_dual_product_;
441 // The primal point at which the algorithm was last restarted from, or
442 // the initial primal starting point if no restart has occurred.
443 VectorXd last_primal_start_point_;
444 // The dual point at which the algorithm was last restarted from, or
445 // the initial dual starting point if no restart has occurred.
446 VectorXd last_dual_start_point_;
447 // Information for deciding whether to trigger a distance-based restart.
448 // The distances are initialized to +inf to force a restart during the first
449 // major iteration check.
450 DistanceBasedRestartInfo distance_based_restart_info_ = {
451 .distance_moved_last_restart_period =
452 std::numeric_limits<double>::infinity(),
453 .length_of_last_restart_period = 1,
454 };
455 // The scaling vectors that map the original (or presolved) quadratic program
456 // to the working version. See
457 // ShardedQuadraticProgram::RescaleQuadraticProgram() for details.
458 VectorXd col_scaling_vec_;
459 VectorXd row_scaling_vec_;
460};
461
462Solver::Solver(QuadraticProgram qp,
463 const PrimalDualHybridGradientParams& params)
464 : params_(params),
465 num_shards_(NumShards(params.num_shards(), params.num_threads())),
466 sharded_working_qp_(std::move(qp), params.num_threads(), num_shards_),
467 primal_average_(&sharded_working_qp_.PrimalSharder()),
468 dual_average_(&sharded_working_qp_.DualSharder()) {}
469
470Solver::NextSolutionAndDelta Solver::ComputeNextPrimalSolution(
471 double primal_step_size) const {
472 const int64_t primal_size = sharded_working_qp_.PrimalSize();
473 NextSolutionAndDelta result = {
474 .value = VectorXd(primal_size),
475 .delta = VectorXd(primal_size),
476 };
477 const QuadraticProgram& qp = WorkingQp();
478 // This computes the primal portion of the PDHG algorithm:
479 // argmin_x[gradient(f)(current_primal_solution)'x + g(x)
480 // + current_dual_solution' K x
481 // + (0.5 / primal_step_size) * norm(x - current_primal_solution) ^ 2]
482 // See Sections 2 - 3 of Chambolle and Pock and the comment in the header.
483 // We omitted the constant terms from Chambolle and Pock's (7).
484 // This minimization is easy to do in closed form since it can be separated
485 // into independent problems for each of the primal variables.
486 sharded_working_qp_.PrimalSharder().ParallelForEachShard(
487 [&](const Sharder::Shard& shard) {
488 if (!IsLinearProgram(qp)) {
489 const VectorXd diagonal_scaling =
490 primal_step_size *
491 shard(qp.objective_matrix->diagonal()).array() +
492 1.0;
493 shard(result.value) =
494 (shard(current_primal_solution_) -
495 primal_step_size *
496 (shard(qp.objective_vector) - shard(current_dual_product_)))
497 // Scale i-th element by 1 / (1 + primal_step_size * Q_{ii})
498 .cwiseQuotient(diagonal_scaling)
499 .cwiseMin(shard(qp.variable_upper_bounds))
500 .cwiseMax(shard(qp.variable_lower_bounds));
501 } else {
502 // The formula in the LP case is simplified for better performance.
503 shard(result.value) =
504 (shard(current_primal_solution_) -
505 primal_step_size *
506 (shard(qp.objective_vector) - shard(current_dual_product_)))
507 .cwiseMin(shard(qp.variable_upper_bounds))
508 .cwiseMax(shard(qp.variable_lower_bounds));
509 }
510 shard(result.delta) =
511 shard(result.value) - shard(current_primal_solution_);
512 });
513 return result;
514}
515
516Solver::NextSolutionAndDelta Solver::ComputeNextDualSolution(
517 double dual_step_size, double extrapolation_factor,
518 const NextSolutionAndDelta& next_primal_solution) const {
519 const int64_t dual_size = sharded_working_qp_.DualSize();
520 NextSolutionAndDelta result = {
521 .value = VectorXd(dual_size),
522 .delta = VectorXd(dual_size),
523 };
524 const QuadraticProgram& qp = WorkingQp();
525 VectorXd extrapolated_primal(sharded_working_qp_.PrimalSize());
526 sharded_working_qp_.PrimalSharder().ParallelForEachShard(
527 [&](const Sharder::Shard& shard) {
528 shard(extrapolated_primal) =
529 (shard(next_primal_solution.value) +
530 extrapolation_factor * shard(next_primal_solution.delta));
531 });
532 // TODO(user): Refactor this multiplication so that we only do one matrix
533 // vector mutiply for the primal variable. This only applies to Malitsky and
534 // Pock and not to the adaptive step size rule.
535 sharded_working_qp_.TransposedConstraintMatrixSharder().ParallelForEachShard(
536 [&](const Sharder::Shard& shard) {
537 VectorXd temp =
538 shard(current_dual_solution_) -
539 dual_step_size *
540 shard(sharded_working_qp_.TransposedConstraintMatrix())
541 .transpose() *
542 extrapolated_primal;
543 // Each element of the argument of the cwiseMin is the critical point
544 // of the respective 1D minimization problem if it's negative.
545 // Likewise the argument to the cwiseMax is the critical point if
546 // positive.
547 shard(result.value) =
548 VectorXd::Zero(temp.size())
549 .cwiseMin(temp +
550 dual_step_size * shard(qp.constraint_upper_bounds))
551 .cwiseMax(temp +
552 dual_step_size * shard(qp.constraint_lower_bounds));
553 shard(result.delta) =
554 (shard(result.value) - shard(current_dual_solution_));
555 });
556 return result;
557}
558
559double Solver::ComputeMovement(const VectorXd& delta_primal,
560 const VectorXd& delta_dual) const {
561 const double primal_movement =
562 (0.5 * primal_weight_) *
563 SquaredNorm(delta_primal, sharded_working_qp_.PrimalSharder());
564 const double dual_movement =
565 (0.5 / primal_weight_) *
566 SquaredNorm(delta_dual, sharded_working_qp_.DualSharder());
567 return primal_movement + dual_movement;
568}
569
570double Solver::ComputeNonlinearity(const VectorXd& delta_primal,
571 const VectorXd& next_dual_product) const {
572 // Lemma 1 in Chambolle and Pock includes a term with L_f, the Lipshitz
573 // constant of f. This is zero in our formulation.
574 return sharded_working_qp_.PrimalSharder().ParallelSumOverShards(
575 [&](const Sharder::Shard& shard) {
576 return -shard(delta_primal)
577 .dot(shard(next_dual_product) -
578 shard(current_dual_product_));
579 });
580}
581
582IterationStats Solver::CreateSimpleIterationStats(
583 RestartChoice restart_used) const {
584 IterationStats stats;
585 double num_kkt_passes_per_rejected_step = 1.0;
586 if (params_.linesearch_rule() ==
587 PrimalDualHybridGradientParams::MALITSKY_POCK_LINESEARCH_RULE) {
588 num_kkt_passes_per_rejected_step = 0.5;
589 }
590 stats.set_iteration_number(iterations_completed_);
591 stats.set_cumulative_rejected_steps(num_rejected_steps_);
592 // TODO(user): This formula doesn't account for kkt passes in major
593 // iterations.
594 stats.set_cumulative_kkt_matrix_passes(iterations_completed_ +
595 num_kkt_passes_per_rejected_step *
596 num_rejected_steps_);
597 stats.set_cumulative_time_sec(timer_.Get());
598 stats.set_restart_used(restart_used);
599 stats.set_step_size(step_size_);
600 stats.set_primal_weight(primal_weight_);
601 return stats;
602}
603
604double Solver::DistanceTraveledFromLastStart(
605 const VectorXd& primal_solution, const VectorXd& dual_solution) const {
606 return std::sqrt((0.5 * primal_weight_) *
607 SquaredDistance(primal_solution,
608 last_primal_start_point_,
609 sharded_working_qp_.PrimalSharder()) +
610 (0.5 / primal_weight_) *
611 SquaredDistance(dual_solution, last_dual_start_point_,
612 sharded_working_qp_.DualSharder()));
613}
614
615LocalizedLagrangianBounds Solver::ComputeLocalizedBoundsAtCurrent() const {
616 const double distance_traveled_by_current = DistanceTraveledFromLastStart(
617 current_primal_solution_, current_dual_solution_);
619 sharded_working_qp_, current_primal_solution_, current_dual_solution_,
620 PrimalDualNorm::kEuclideanNorm, primal_weight_,
621 distance_traveled_by_current,
622 /*primal_product=*/nullptr, &current_dual_product_,
623 params_.use_diagonal_qp_trust_region_solver(),
624 params_.diagonal_qp_trust_region_solver_tolerance());
625}
626
627LocalizedLagrangianBounds Solver::ComputeLocalizedBoundsAtAverage() const {
628 // TODO(user): These vectors are recomputed again for termination checks
629 // and again if we eventually restart to the average.
630 VectorXd average_primal = PrimalAverage();
631 VectorXd average_dual = DualAverage();
632
633 const double distance_traveled_by_average =
634 DistanceTraveledFromLastStart(average_primal, average_dual);
635
637 sharded_working_qp_, average_primal, average_dual,
638 PrimalDualNorm::kEuclideanNorm, primal_weight_,
639 distance_traveled_by_average,
640 /*primal_product=*/nullptr, /*dual_product=*/nullptr,
641 params_.use_diagonal_qp_trust_region_solver(),
642 params_.diagonal_qp_trust_region_solver_tolerance());
643}
644
645bool AverageHasBetterPotential(
646 const LocalizedLagrangianBounds& local_bounds_at_average,
647 const LocalizedLagrangianBounds& local_bounds_at_current) {
648 return BoundGap(local_bounds_at_average) /
649 MathUtil::Square(local_bounds_at_average.radius) <
650 BoundGap(local_bounds_at_current) /
651 MathUtil::Square(local_bounds_at_current.radius);
652}
653
654double NormalizedGap(
655 const LocalizedLagrangianBounds& local_bounds_at_candidate) {
656 const double distance_traveled_by_candidate =
657 local_bounds_at_candidate.radius;
658 return BoundGap(local_bounds_at_candidate) / distance_traveled_by_candidate;
659}
660
661// TODO(user): Review / cleanup adaptive heuristic.
662bool Solver::ShouldDoAdaptiveRestartHeuristic(
663 double candidate_normalized_gap) const {
664 const double gap_reduction_ratio =
665 candidate_normalized_gap / normalized_gap_at_last_restart_;
666 if (gap_reduction_ratio < params_.sufficient_reduction_for_restart()) {
667 return true;
668 }
669 if (gap_reduction_ratio < params_.necessary_reduction_for_restart() &&
670 candidate_normalized_gap > normalized_gap_at_last_trial_) {
671 // We've made the "necessary" amount of progress, and iterates appear to
672 // be getting worse, so restart.
673 return true;
674 }
675 return false;
676}
677
678RestartChoice Solver::DetermineDistanceBasedRestartChoice() const {
679 // The following checks are safeguards that normally should not be triggered.
680 if (primal_average_.NumTerms() == 0) {
681 return RESTART_CHOICE_NO_RESTART;
682 } else if (distance_based_restart_info_.length_of_last_restart_period == 0) {
683 return RESTART_CHOICE_RESTART_TO_AVERAGE;
684 }
685 const int restart_period_length = primal_average_.NumTerms();
686 const double distance_moved_this_restart_period_by_average =
687 DistanceTraveledFromLastStart(primal_average_.ComputeAverage(),
688 dual_average_.ComputeAverage());
690 distance_based_restart_info_.distance_moved_last_restart_period;
691
692 // A restart should be triggered when the normalized distance traveled by
693 // the average is at least a constant factor smaller than the last.
694 // TODO(user): Experiment with using .necessary_reduction_for_restart()
695 // as a heuristic when deciding if a restart should be triggered.
696 if ((distance_moved_this_restart_period_by_average / restart_period_length) <
697 params_.sufficient_reduction_for_restart() *
699 distance_based_restart_info_.length_of_last_restart_period)) {
700 // Restart at current solution when it yields a smaller normalized potential
701 // function value than the average (heuristic suggested by ohinder@).
702 if (AverageHasBetterPotential(ComputeLocalizedBoundsAtAverage(),
703 ComputeLocalizedBoundsAtCurrent())) {
704 return RESTART_CHOICE_RESTART_TO_AVERAGE;
705 } else {
706 return RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
707 }
708 } else {
709 return RESTART_CHOICE_NO_RESTART;
710 }
711}
712
713RestartChoice Solver::ChooseRestartToApply(const bool is_major_iteration) {
714 if (!primal_average_.HasNonzeroWeight() &&
715 !dual_average_.HasNonzeroWeight()) {
716 return RESTART_CHOICE_NO_RESTART;
717 }
718 // TODO(user): This forced restart is very important for the performance of
719 // ADAPTIVE_HEURISTIC. Test if the impact comes primarily from the first
720 // forced restart (which would unseat a good initial starting point that could
721 // prevent restarts early in the solve) or if it's really needed for the full
722 // duration of the solve. If it is really needed, should we then trigger major
723 // iterations on powers of two?
724 const int restart_length = primal_average_.NumTerms();
725 if (restart_length >= iterations_completed_ / 2 &&
726 params_.restart_strategy() ==
727 PrimalDualHybridGradientParams::ADAPTIVE_HEURISTIC) {
728 if (AverageHasBetterPotential(ComputeLocalizedBoundsAtAverage(),
729 ComputeLocalizedBoundsAtCurrent())) {
730 return RESTART_CHOICE_RESTART_TO_AVERAGE;
731 } else {
732 return RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
733 }
734 }
735 if (is_major_iteration) {
736 switch (params_.restart_strategy()) {
737 case PrimalDualHybridGradientParams::NO_RESTARTS:
738 return RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
739 case PrimalDualHybridGradientParams::EVERY_MAJOR_ITERATION:
740 return RESTART_CHOICE_RESTART_TO_AVERAGE;
741 case PrimalDualHybridGradientParams::ADAPTIVE_HEURISTIC: {
742 const LocalizedLagrangianBounds local_bounds_at_average =
743 ComputeLocalizedBoundsAtAverage();
744 const LocalizedLagrangianBounds local_bounds_at_current =
745 ComputeLocalizedBoundsAtCurrent();
746 double normalized_gap;
747 RestartChoice choice;
748 if (AverageHasBetterPotential(local_bounds_at_average,
749 local_bounds_at_current)) {
750 normalized_gap = NormalizedGap(local_bounds_at_average);
751 choice = RESTART_CHOICE_RESTART_TO_AVERAGE;
752 } else {
753 normalized_gap = NormalizedGap(local_bounds_at_current);
754 choice = RESTART_CHOICE_WEIGHTED_AVERAGE_RESET;
755 }
756 if (ShouldDoAdaptiveRestartHeuristic(normalized_gap)) {
757 return choice;
758 } else {
759 normalized_gap_at_last_trial_ = normalized_gap;
760 return RESTART_CHOICE_NO_RESTART;
761 }
762 }
763 case PrimalDualHybridGradientParams::ADAPTIVE_DISTANCE_BASED: {
764 return DetermineDistanceBasedRestartChoice();
765 }
766 default:
767 LOG(FATAL) << "Unrecognized restart_strategy "
768 << params_.restart_strategy();
769 return RESTART_CHOICE_UNSPECIFIED;
770 }
771 } else {
772 return RESTART_CHOICE_NO_RESTART;
773 }
774}
775
776VectorXd Solver::PrimalAverage() const {
777 if (primal_average_.HasNonzeroWeight()) {
778 return primal_average_.ComputeAverage();
779 } else {
780 return current_primal_solution_;
781 }
782}
783
784VectorXd Solver::DualAverage() const {
785 if (dual_average_.HasNonzeroWeight()) {
786 return dual_average_.ComputeAverage();
787 } else {
788 return current_dual_solution_;
789 }
790}
791
792double Solver::ComputeNewPrimalWeight() const {
793 const double primal_distance =
794 Distance(current_primal_solution_, last_primal_start_point_,
795 sharded_working_qp_.PrimalSharder());
796 const double dual_distance =
797 Distance(current_dual_solution_, last_dual_start_point_,
798 sharded_working_qp_.DualSharder());
799 // This choice of a nonzero tolerance balances performance and numerical
800 // issues caused by very huge or very tiny weights. It was picked as the best
801 // among {0.0, 1.0e-20, 2.0e-16, 1.0e-10, 1.0e-5} on the preprocessed MIPLIB
802 // dataset. The effect of changing this value is relatively minor overall.
803 constexpr double kNonzeroTol = 1.0e-10;
804 if (primal_distance <= kNonzeroTol || primal_distance >= 1.0 / kNonzeroTol ||
805 dual_distance <= kNonzeroTol || dual_distance >= 1.0 / kNonzeroTol) {
806 return primal_weight_;
807 }
808 const double smoothing_param = params_.primal_weight_update_smoothing();
809 const double unsmoothed_new_primal_weight = dual_distance / primal_distance;
810 const double new_primal_weight =
811 std::exp(smoothing_param * std::log(unsmoothed_new_primal_weight) +
812 (1.0 - smoothing_param) * std::log(primal_weight_));
813 LOG_IF(INFO, params_.verbosity_level() >= 4)
814 << "New computed primal weight is " << new_primal_weight
815 << " at iteration " << iterations_completed_;
816 return new_primal_weight;
817}
818
819SolverResult Solver::ConstructSolverResult(VectorXd primal_solution,
820 VectorXd dual_solution,
821 const IterationStats& stats,
822 TerminationReason termination_reason,
823 PointType output_type,
824 SolveLog solve_log) const {
825 switch (output_type) {
826 case POINT_TYPE_AVERAGE_ITERATE:
827 solve_log.set_solution_type(POINT_TYPE_AVERAGE_ITERATE);
828 break;
829 case POINT_TYPE_CURRENT_ITERATE:
830 AssignVector(current_primal_solution_,
831 sharded_working_qp_.PrimalSharder(), primal_solution);
832 AssignVector(current_dual_solution_, sharded_working_qp_.DualSharder(),
833 dual_solution);
834 solve_log.set_solution_type(POINT_TYPE_CURRENT_ITERATE);
835 break;
836 case POINT_TYPE_ITERATE_DIFFERENCE:
837 AssignVector(current_primal_delta_, sharded_working_qp_.PrimalSharder(),
838 primal_solution);
839 AssignVector(current_dual_delta_, sharded_working_qp_.DualSharder(),
840 dual_solution);
841 solve_log.set_solution_type(POINT_TYPE_ITERATE_DIFFERENCE);
842 break;
843 case POINT_TYPE_PRESOLVER_SOLUTION:
844 solve_log.set_solution_type(POINT_TYPE_PRESOLVER_SOLUTION);
845 break;
846 default:
847 // Default to average whenever the type is POINT_TYPE_NONE.
848 solve_log.set_solution_type(POINT_TYPE_AVERAGE_ITERATE);
849 break;
850 }
851 VectorXd reduced_costs;
852 const bool use_zero_primal_objective =
853 termination_reason == TERMINATION_REASON_PRIMAL_INFEASIBLE;
854 if (presolve_info_.has_value()) {
855 // Transform the solutions so they match the original unscaled problem.
856 PrimalAndDualSolution original_solution =
857 RecoverOriginalSolution({.primal_solution = std::move(primal_solution),
858 .dual_solution = std::move(dual_solution)});
859 primal_solution = std::move(original_solution.primal_solution);
860 dual_solution = std::move(original_solution.dual_solution);
861 // RecoverOriginalSolution doesn't recover reduced costs so we need to
862 // compute them with respect to the original problem.
863 reduced_costs =
864 ReducedCosts(presolve_info_->sharded_original_qp, primal_solution,
865 dual_solution, use_zero_primal_objective);
866 } else {
867 reduced_costs = ReducedCosts(sharded_working_qp_, primal_solution,
868 dual_solution, use_zero_primal_objective);
869 // Transform the solutions so they match the original unscaled problem.
871 col_scaling_vec_, sharded_working_qp_.PrimalSharder(), primal_solution);
873 row_scaling_vec_, sharded_working_qp_.DualSharder(), dual_solution);
875 col_scaling_vec_, sharded_working_qp_.PrimalSharder(), reduced_costs);
876 }
877 if (iteration_stats_callback_ != nullptr) {
878 iteration_stats_callback_(
879 {.termination_criteria = params_.termination_criteria(),
880 .iteration_stats = stats,
881 .bound_norms = original_bound_norms_});
882 }
883
884 if (params_.verbosity_level() >= 1) {
885 LOG(INFO) << "Termination reason: "
886 << TerminationReason_Name(termination_reason);
887 LOG(INFO) << "Solution point type: " << PointType_Name(output_type);
888 LOG(INFO) << "Final solution stats:";
889 LOG(INFO) << IterationStatsLabelString();
890 LOG(INFO) << ToString(stats, params_.termination_criteria(),
891 original_bound_norms_, solve_log.solution_type());
892 }
893 solve_log.set_iteration_count(stats.iteration_number());
894 solve_log.set_termination_reason(termination_reason);
895 solve_log.set_solve_time_sec(stats.cumulative_time_sec());
896 *solve_log.mutable_solution_stats() = stats;
897 return SolverResult{.primal_solution = std::move(primal_solution),
898 .dual_solution = std::move(dual_solution),
899 .reduced_costs = std::move(reduced_costs),
900 .solve_log = std::move(solve_log)};
901}
902
903void SetActiveSetInformation(const ShardedQuadraticProgram& sharded_qp,
904 const VectorXd& primal_solution,
905 const VectorXd& dual_solution,
906 const VectorXd& primal_start_point,
907 const VectorXd& dual_start_point,
908 PointMetadata& metadata) {
909 CHECK_EQ(primal_solution.size(), sharded_qp.PrimalSize());
910 CHECK_EQ(dual_solution.size(), sharded_qp.DualSize());
911 CHECK_EQ(primal_start_point.size(), sharded_qp.PrimalSize());
912 CHECK_EQ(dual_start_point.size(), sharded_qp.DualSize());
913
914 const QuadraticProgram& qp = sharded_qp.Qp();
915 metadata.set_active_primal_variable_count(
916 static_cast<int64_t>(sharded_qp.PrimalSharder().ParallelSumOverShards(
917 [&](const Sharder::Shard& shard) {
918 const auto primal_shard = shard(primal_solution);
919 const auto lower_bound_shard = shard(qp.variable_lower_bounds);
920 const auto upper_bound_shard = shard(qp.variable_upper_bounds);
921 return (primal_shard.array() > lower_bound_shard.array() &&
922 primal_shard.array() < upper_bound_shard.array())
923 .count();
924 })));
925
926 // Most of the computation from the previous ParallelSumOverShards is
927 // duplicated here. However the overhead shouldn't be too large, and using
928 // ParallelSumOverShards is simpler than just using ParallelForEachShard.
929 metadata.set_active_primal_variable_change(
930 static_cast<int64_t>(sharded_qp.PrimalSharder().ParallelSumOverShards(
931 [&](const Sharder::Shard& shard) {
932 const auto primal_shard = shard(primal_solution);
933 const auto primal_start_shard = shard(primal_start_point);
934 const auto lower_bound_shard = shard(qp.variable_lower_bounds);
935 const auto upper_bound_shard = shard(qp.variable_upper_bounds);
936 return ((primal_shard.array() > lower_bound_shard.array() &&
937 primal_shard.array() < upper_bound_shard.array()) !=
938 (primal_start_shard.array() > lower_bound_shard.array() &&
939 primal_start_shard.array() < upper_bound_shard.array()))
940 .count();
941 })));
942
943 metadata.set_active_dual_variable_count(
944 static_cast<int64_t>(sharded_qp.DualSharder().ParallelSumOverShards(
945 [&](const Sharder::Shard& shard) {
946 const auto dual_shard = shard(dual_solution);
947 const auto lower_bound_shard = shard(qp.constraint_lower_bounds);
948 const auto upper_bound_shard = shard(qp.constraint_upper_bounds);
949 const double kInfinity = std::numeric_limits<double>::infinity();
950 return (dual_shard.array() != 0.0 ||
951 (lower_bound_shard.array() == -kInfinity &&
952 upper_bound_shard.array() == kInfinity))
953 .count();
954 })));
955
956 metadata.set_active_dual_variable_change(
957 static_cast<int64_t>(sharded_qp.DualSharder().ParallelSumOverShards(
958 [&](const Sharder::Shard& shard) {
959 const auto dual_shard = shard(dual_solution);
960 const auto dual_start_shard = shard(dual_start_point);
961 const auto lower_bound_shard = shard(qp.constraint_lower_bounds);
962 const auto upper_bound_shard = shard(qp.constraint_upper_bounds);
963 const double kInfinity = std::numeric_limits<double>::infinity();
964 return ((dual_shard.array() != 0.0 ||
965 (lower_bound_shard.array() == -kInfinity &&
966 upper_bound_shard.array() == kInfinity)) !=
967 (dual_start_shard.array() != 0.0 ||
968 (lower_bound_shard.array() == -kInfinity &&
969 upper_bound_shard.array() == kInfinity)))
970 .count();
971 })));
972}
973
974void Solver::AddConvergenceAndInfeasibilityInformation(
975 const VectorXd& primal_solution, const VectorXd& dual_solution,
976 const ShardedQuadraticProgram& sharded_qp, const VectorXd& col_scaling_vec,
977 const VectorXd& row_scaling_vec, PointType candidate_type,
978 IterationStats& stats) const {
979 *stats.add_convergence_information() = ComputeConvergenceInformation(
980 sharded_qp, col_scaling_vec, row_scaling_vec, primal_solution,
981 dual_solution, candidate_type);
982 *stats.add_infeasibility_information() = ComputeInfeasibilityInformation(
983 sharded_qp, col_scaling_vec, row_scaling_vec, primal_solution,
984 dual_solution, candidate_type);
985}
986
987void Solver::AddPointMetadata(const VectorXd& primal_solution,
988 const VectorXd& dual_solution,
989 PointType point_type,
990 IterationStats& stats) const {
991 PointMetadata metadata;
992 metadata.set_point_type(point_type);
993 std::vector<int> random_projection_seeds(
994 params_.random_projection_seeds().begin(),
995 params_.random_projection_seeds().end());
996 SetRandomProjections(sharded_working_qp_, primal_solution, dual_solution,
997 random_projection_seeds, metadata);
998 if (point_type != POINT_TYPE_ITERATE_DIFFERENCE) {
999 SetActiveSetInformation(sharded_working_qp_, primal_solution, dual_solution,
1000 last_primal_start_point_, last_dual_start_point_,
1001 metadata);
1002 }
1003 *stats.add_point_metadata() = metadata;
1004}
1005
1006void LogInfoWithoutPrefix(absl::string_view message) {
1008 .stream()
1009 << message;
1010}
1011
1012absl::optional<TerminationReasonAndPointType>
1013Solver::UpdateIterationStatsAndCheckTermination(
1014 bool force_numerical_termination, const VectorXd& working_primal_average,
1015 const VectorXd& working_dual_average, IterationStats& stats) const {
1016 if (presolve_info_.has_value()) {
1017 { // This block exists to destroy `original_current` to save RAM.
1018 PrimalAndDualSolution original_current =
1019 RecoverOriginalSolution({.primal_solution = current_primal_solution_,
1020 .dual_solution = current_dual_solution_});
1021 AddConvergenceAndInfeasibilityInformation(
1022 original_current.primal_solution, original_current.dual_solution,
1023 presolve_info_->sharded_original_qp,
1024 presolve_info_->trivial_col_scaling_vec,
1025 presolve_info_->trivial_row_scaling_vec, POINT_TYPE_CURRENT_ITERATE,
1026 stats);
1027 }
1028 if (primal_average_.HasNonzeroWeight()) {
1029 PrimalAndDualSolution original_average =
1030 RecoverOriginalSolution({.primal_solution = working_primal_average,
1031 .dual_solution = working_dual_average});
1032 AddConvergenceAndInfeasibilityInformation(
1033 original_average.primal_solution, original_average.dual_solution,
1034 presolve_info_->sharded_original_qp,
1035 presolve_info_->trivial_col_scaling_vec,
1036 presolve_info_->trivial_row_scaling_vec, POINT_TYPE_AVERAGE_ITERATE,
1037 stats);
1038 }
1039 } else {
1040 AddConvergenceAndInfeasibilityInformation(
1041 current_primal_solution_, current_dual_solution_, sharded_working_qp_,
1042 col_scaling_vec_, row_scaling_vec_, POINT_TYPE_CURRENT_ITERATE, stats);
1043 if (primal_average_.HasNonzeroWeight()) {
1044 AddConvergenceAndInfeasibilityInformation(
1045 working_primal_average, working_dual_average, sharded_working_qp_,
1046 col_scaling_vec_, row_scaling_vec_, POINT_TYPE_AVERAGE_ITERATE,
1047 stats);
1048 }
1049 }
1050 AddPointMetadata(current_primal_solution_, current_dual_solution_,
1051 POINT_TYPE_CURRENT_ITERATE, stats);
1052 if (primal_average_.HasNonzeroWeight()) {
1053 AddPointMetadata(working_primal_average, working_dual_average,
1054 POINT_TYPE_AVERAGE_ITERATE, stats);
1055 }
1056 if (current_primal_delta_.size() > 0 && current_dual_delta_.size() > 0) {
1057 if (presolve_info_.has_value()) {
1058 PrimalAndDualSolution original_delta =
1059 RecoverOriginalSolution({.primal_solution = current_primal_delta_,
1060 .dual_solution = current_dual_delta_});
1061 *stats.add_infeasibility_information() = ComputeInfeasibilityInformation(
1062 presolve_info_->sharded_original_qp,
1063 presolve_info_->trivial_col_scaling_vec,
1064 presolve_info_->trivial_row_scaling_vec,
1065 original_delta.primal_solution, original_delta.dual_solution,
1066 POINT_TYPE_ITERATE_DIFFERENCE);
1067 } else {
1068 *stats.add_infeasibility_information() = ComputeInfeasibilityInformation(
1069 sharded_working_qp_, col_scaling_vec_, row_scaling_vec_,
1070 current_primal_delta_, current_dual_delta_,
1071 POINT_TYPE_ITERATE_DIFFERENCE);
1072 }
1073 AddPointMetadata(current_primal_delta_, current_dual_delta_,
1074 POINT_TYPE_ITERATE_DIFFERENCE, stats);
1075 }
1076 constexpr int kLogEvery = 15;
1077 static std::atomic_int log_counter{0};
1078 if (params_.verbosity_level() >= 4) {
1079 if (log_counter == 0) {
1080 LogInfoWithoutPrefix(absl::StrCat("I ", IterationStatsLabelString()));
1081 }
1082 LogInfoWithoutPrefix(absl::StrCat(
1083 "A ", ToString(stats, params_.termination_criteria(),
1084 original_bound_norms_, POINT_TYPE_AVERAGE_ITERATE)));
1085 LogInfoWithoutPrefix(absl::StrCat(
1086 "C ", ToString(stats, params_.termination_criteria(),
1087 original_bound_norms_, POINT_TYPE_CURRENT_ITERATE)));
1088 } else if (params_.verbosity_level() >= 3) {
1089 if (log_counter == 0) {
1090 LogInfoWithoutPrefix(IterationStatsLabelString());
1091 }
1092 LogInfoWithoutPrefix(ToString(stats, params_.termination_criteria(),
1093 original_bound_norms_,
1094 POINT_TYPE_AVERAGE_ITERATE));
1095 } else if (params_.verbosity_level() >= 2) {
1096 if (log_counter == 0) {
1097 LogInfoWithoutPrefix(IterationStatsLabelShortString());
1098 }
1099 LogInfoWithoutPrefix(ToShortString(stats, params_.termination_criteria(),
1100 original_bound_norms_,
1101 POINT_TYPE_AVERAGE_ITERATE));
1102 }
1103 if (++log_counter >= kLogEvery) {
1104 log_counter = 0;
1105 }
1106 if (iteration_stats_callback_ != nullptr) {
1107 iteration_stats_callback_(
1108 {.termination_criteria = params_.termination_criteria(),
1109 .iteration_stats = stats,
1110 .bound_norms = original_bound_norms_});
1111 }
1112
1113 return CheckTerminationCriteria(params_.termination_criteria(), stats,
1114 original_bound_norms_,
1115 force_numerical_termination);
1116}
1117
1118double Solver::InitialPrimalWeight(
1119 const double l2_norm_primal_linear_objective,
1120 const double l2_norm_constraint_bounds) const {
1121 if (params_.has_initial_primal_weight()) {
1122 return params_.initial_primal_weight();
1123 }
1124 if (l2_norm_primal_linear_objective > 0.0 &&
1125 l2_norm_constraint_bounds > 0.0) {
1126 // The hand-wavy motivation for this choice is that the objective vector
1127 // has units of (objective units)/(primal units) and the constraint
1128 // bounds vector has units of (objective units)/(dual units),
1129 // therefore this ratio has units (dual units)/(primal units). By
1130 // dimensional analysis, these are the same units as the primal weight.
1131 return l2_norm_primal_linear_objective / l2_norm_constraint_bounds;
1132 } else {
1133 return 1.0;
1134 }
1135}
1136
1137void Solver::ComputeAndApplyRescaling() {
1138 ScalingVectors scaling = ApplyRescaling(
1139 RescalingOptions{.l_inf_ruiz_iterations = params_.l_inf_ruiz_iterations(),
1140 .l2_norm_rescaling = params_.l2_norm_rescaling()},
1141 sharded_working_qp_);
1142 row_scaling_vec_ = std::move(scaling.row_scaling_vec);
1143 col_scaling_vec_ = std::move(scaling.col_scaling_vec);
1144
1145 CoefficientWiseQuotientInPlace(col_scaling_vec_,
1146 sharded_working_qp_.PrimalSharder(),
1147 current_primal_solution_);
1148 CoefficientWiseQuotientInPlace(row_scaling_vec_,
1149 sharded_working_qp_.DualSharder(),
1150 current_dual_solution_);
1151}
1152
1153void Solver::ApplyRestartChoice(const RestartChoice restart_to_apply) {
1154 switch (restart_to_apply) {
1155 case RESTART_CHOICE_UNSPECIFIED:
1156 case RESTART_CHOICE_NO_RESTART:
1157 return;
1158 case RESTART_CHOICE_WEIGHTED_AVERAGE_RESET:
1159 LOG_IF(INFO, params_.verbosity_level() >= 4)
1160 << "Restarted to current on iteration " << iterations_completed_
1161 << " after " << primal_average_.NumTerms() << " iterations";
1162 break;
1163 case RESTART_CHOICE_RESTART_TO_AVERAGE:
1164 LOG_IF(INFO, params_.verbosity_level() >= 4)
1165 << "Restarted to average on iteration " << iterations_completed_
1166 << " after " << primal_average_.NumTerms() << " iterations";
1167 current_primal_solution_ = primal_average_.ComputeAverage();
1168 current_dual_solution_ = dual_average_.ComputeAverage();
1169 current_dual_product_ = TransposedMatrixVectorProduct(
1170 WorkingQp().constraint_matrix, current_dual_solution_,
1171 sharded_working_qp_.ConstraintMatrixSharder());
1172 break;
1173 }
1174 primal_weight_ = ComputeNewPrimalWeight();
1175 ratio_last_two_step_sizes_ = 1;
1176 if (params_.restart_strategy() ==
1177 PrimalDualHybridGradientParams::ADAPTIVE_HEURISTIC) {
1178 // It's important for the theory that the distances here are calculated
1179 // given the new primal weight.
1180 const LocalizedLagrangianBounds local_bounds_at_last_restart =
1181 ComputeLocalizedBoundsAtCurrent();
1182 const double distance_traveled_since_last_restart =
1183 local_bounds_at_last_restart.radius;
1184 normalized_gap_at_last_restart_ = BoundGap(local_bounds_at_last_restart) /
1185 distance_traveled_since_last_restart;
1186 normalized_gap_at_last_trial_ = std::numeric_limits<double>::infinity();
1187 } else if (params_.restart_strategy() ==
1188 PrimalDualHybridGradientParams::ADAPTIVE_DISTANCE_BASED) {
1189 // Update parameters for distance-based restarts.
1190 distance_based_restart_info_ = {
1191 .distance_moved_last_restart_period = DistanceTraveledFromLastStart(
1192 current_primal_solution_, current_dual_solution_),
1193 .length_of_last_restart_period = primal_average_.NumTerms()};
1194 }
1195 primal_average_.Clear();
1196 dual_average_.Clear();
1197 AssignVector(current_primal_solution_, sharded_working_qp_.PrimalSharder(),
1198 /*dest=*/last_primal_start_point_);
1199 AssignVector(current_dual_solution_, sharded_working_qp_.DualSharder(),
1200 /*dest=*/last_dual_start_point_);
1201}
1202
1203absl::optional<SolverResult> Solver::MajorIterationAndTerminationCheck(
1204 bool force_numerical_termination, SolveLog& solve_log) {
1205 const int iteration_limit = params_.termination_criteria().iteration_limit();
1206 const int major_iteration_cycle =
1207 iterations_completed_ % params_.major_iteration_frequency();
1208 const bool is_major_iteration =
1209 major_iteration_cycle == 0 && iterations_completed_ > 0;
1210 const bool check_termination =
1211 major_iteration_cycle % params_.termination_check_frequency() == 0 ||
1212 iterations_completed_ == iteration_limit || force_numerical_termination;
1213 // We check termination on every major iteration.
1214 DCHECK(!is_major_iteration || check_termination);
1215 // Just decide what to do for now. The actual restart, if any, is
1216 // performed after the termination check.
1217 const RestartChoice restart = force_numerical_termination
1218 ? RESTART_CHOICE_NO_RESTART
1219 : ChooseRestartToApply(is_major_iteration);
1220 IterationStats stats = CreateSimpleIterationStats(restart);
1221 if (check_termination) {
1222 // Check for termination and update iteration stats with both simple and
1223 // solution statistics. The later are computationally harder to compute and
1224 // hence only computed here.
1225 VectorXd primal_average = PrimalAverage();
1226 VectorXd dual_average = DualAverage();
1227
1228 const absl::optional<TerminationReasonAndPointType>
1229 maybe_termination_reason = UpdateIterationStatsAndCheckTermination(
1230 force_numerical_termination, primal_average, dual_average, stats);
1231 if (params_.record_iteration_stats()) {
1232 *solve_log.add_iteration_stats() = stats;
1233 }
1234 // We've terminated.
1235 if (maybe_termination_reason.has_value()) {
1236 return ConstructSolverResult(std::move(primal_average),
1237 std::move(dual_average), stats,
1238 maybe_termination_reason->reason,
1239 maybe_termination_reason->type, solve_log);
1240 }
1241 } else if (params_.record_iteration_stats()) {
1242 // Record simple iteration stats only.
1243 *solve_log.add_iteration_stats() = stats;
1244 }
1245 ApplyRestartChoice(restart);
1246 return absl::nullopt;
1247}
1248
1249void Solver::ResetAverageToCurrent() {
1250 primal_average_.Clear();
1251 dual_average_.Clear();
1252 primal_average_.Add(current_primal_solution_, /*weight=*/1.0);
1253 dual_average_.Add(current_dual_solution_, /*weight=*/1.0);
1254}
1255
1256void Solver::LogNumericalTermination() const {
1257 LOG(WARNING) << "Forced numerical termination at iteration "
1258 << iterations_completed_;
1259}
1260
1261void Solver::LogInnerIterationLimitHit() const {
1262 LOG(WARNING) << "Inner iteration limit reached at iteration "
1263 << iterations_completed_;
1264}
1265
1266void Solver::LogQuadraticProgramStats(const QuadraticProgramStats& stats) {
1267 LOG(INFO) << absl::StrFormat(
1268 "There are %i variables, %i constraints, and %i ",
1269 stats.num_variables(), stats.num_constraints(),
1270 stats.constraint_matrix_num_nonzeros())
1271 << "constraint matrix nonzeros.";
1272 if (WorkingQp().constraint_matrix.nonZeros() > 0) {
1273 LOG(INFO) << "Absolute values of nonzero constraint matrix elements: "
1274 << absl::StrFormat("largest=%f, smallest=%f, avg=%f",
1275 stats.constraint_matrix_abs_max(),
1276 stats.constraint_matrix_abs_min(),
1277 stats.constraint_matrix_abs_avg());
1278 LOG(INFO) << "Constraint matrix, infinity norm: "
1279 << absl::StrFormat("max(row & col)=%f, min_col=%f, min_row=%f",
1280 stats.constraint_matrix_abs_max(),
1281 stats.constraint_matrix_col_min_l_inf_norm(),
1282 stats.constraint_matrix_row_min_l_inf_norm());
1283 LOG(INFO) << "Constraint bounds statistics (max absolute value per row): "
1284 << absl::StrFormat("largest=%f, smallest=%f, avg=%f, l2_norm=%f",
1285 stats.combined_bounds_max(),
1286 stats.combined_bounds_min(),
1287 stats.combined_bounds_avg(),
1288 stats.combined_bounds_l2_norm());
1289 }
1290 if (!IsLinearProgram(WorkingQp())) {
1291 LOG(INFO) << absl::StrFormat(
1292 "There are %i nonzero diagonal coefficients in the objective matrix.",
1293 stats.objective_matrix_num_nonzeros());
1294 LOG(INFO) << "Absolute values of nonzero objective matrix elements: "
1295 << absl::StrFormat("largest=%f, smallest=%f, avg=%f",
1296 stats.objective_matrix_abs_max(),
1297 stats.objective_matrix_abs_min(),
1298 stats.objective_matrix_abs_avg());
1299 }
1300 LOG(INFO) << "Absolute values of objective vector elements: "
1301 << absl::StrFormat("largest=%f, smallest=%f, avg=%f, l2_norm=%f",
1302 stats.objective_vector_abs_max(),
1303 stats.objective_vector_abs_min(),
1304 stats.objective_vector_abs_avg(),
1305 stats.objective_vector_l2_norm());
1306
1307 LOG(INFO) << "Gaps between variable upper and lower bounds: "
1308 << absl::StrFormat(
1309 "#finite=%i of %i, largest=%f, smallest=%f, avg=%f",
1310 stats.variable_bound_gaps_num_finite(),
1311 stats.num_variables(), stats.variable_bound_gaps_max(),
1312 stats.variable_bound_gaps_min(),
1313 stats.variable_bound_gaps_avg());
1314}
1315
1316InnerStepOutcome Solver::TakeMalitskyPockStep() {
1317 InnerStepOutcome outcome = InnerStepOutcome::kSuccessful;
1318 const double primal_step_size = step_size_ / primal_weight_;
1319 NextSolutionAndDelta next_primal_solution =
1320 ComputeNextPrimalSolution(primal_step_size);
1321 // The theory by Malitsky and Pock holds for any new_step_size in the interval
1322 // [step_size, step_size * sqrt(1 + theta)]. The dilating coefficient
1323 // determines where in this interval the new step size lands.
1324 double dilating_coeff =
1325 1 + (params_.malitsky_pock_parameters().step_size_interpolation() *
1326 (sqrt(1 + ratio_last_two_step_sizes_) - 1));
1327 double new_primal_step_size = primal_step_size * dilating_coeff;
1328 double step_size_downscaling =
1329 params_.malitsky_pock_parameters().step_size_downscaling_factor();
1330 double contraction_factor =
1331 params_.malitsky_pock_parameters().linesearch_contraction_factor();
1332 const double dual_weight = primal_weight_ * primal_weight_;
1333 int inner_iterations = 0;
1334 for (bool accepted_step = false; !accepted_step; ++inner_iterations) {
1335 if (inner_iterations >= 60) {
1336 LogInnerIterationLimitHit();
1337 ResetAverageToCurrent();
1338 outcome = InnerStepOutcome::kForceNumericalTermination;
1339 break;
1340 }
1341 const double new_last_two_step_sizes_ratio =
1342 new_primal_step_size / primal_step_size;
1343 NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution(
1344 dual_weight * new_primal_step_size, new_last_two_step_sizes_ratio,
1345 next_primal_solution);
1346
1347 VectorXd next_dual_product = TransposedMatrixVectorProduct(
1348 WorkingQp().constraint_matrix, next_dual_solution.value,
1349 sharded_working_qp_.ConstraintMatrixSharder());
1350 double delta_dual_norm =
1351 Norm(next_dual_solution.delta, sharded_working_qp_.DualSharder());
1352 double delta_dual_prod_norm =
1353 Distance(current_dual_product_, next_dual_product,
1354 sharded_working_qp_.PrimalSharder());
1355 if (primal_weight_ * new_primal_step_size * delta_dual_prod_norm <=
1356 contraction_factor * delta_dual_norm) {
1357 // Accept new_step_size as a good step.
1358 step_size_ = new_primal_step_size * primal_weight_;
1359 ratio_last_two_step_sizes_ = new_last_two_step_sizes_ratio;
1360 // Malitsky and Pock guarantee uses a nonsymmetric weighted average,
1361 // the primal variable average involves the initial point, while the dual
1362 // doesn't. See Theorem 2 in https://arxiv.org/pdf/1608.08883.pdf for
1363 // details.
1364 if (!primal_average_.HasNonzeroWeight()) {
1365 primal_average_.Add(
1366 current_primal_solution_,
1367 /*weight=*/new_primal_step_size * new_last_two_step_sizes_ratio);
1368 }
1369
1370 current_primal_solution_ = std::move(next_primal_solution.value);
1371 current_dual_solution_ = std::move(next_dual_solution.value);
1372 current_dual_product_ = std::move(next_dual_product);
1373 primal_average_.Add(current_primal_solution_,
1374 /*weight=*/new_primal_step_size);
1375 dual_average_.Add(current_dual_solution_,
1376 /*weight=*/new_primal_step_size);
1377 const double movement =
1378 ComputeMovement(next_primal_solution.delta, next_dual_solution.delta);
1379 if (movement == 0.0) {
1380 LogNumericalTermination();
1381 ResetAverageToCurrent();
1382 outcome = InnerStepOutcome::kForceNumericalTermination;
1383 } else if (movement > kDivergentMovement) {
1384 LogNumericalTermination();
1385 outcome = InnerStepOutcome::kForceNumericalTermination;
1386 }
1387 current_primal_delta_ = std::move(next_primal_solution.delta);
1388 current_dual_delta_ = std::move(next_dual_solution.delta);
1389 break;
1390 } else {
1391 new_primal_step_size = step_size_downscaling * new_primal_step_size;
1392 }
1393 }
1394 // inner_iterations isn't incremented for the accepted step.
1395 num_rejected_steps_ += inner_iterations;
1396 return outcome;
1397}
1398
1399InnerStepOutcome Solver::TakeAdaptiveStep() {
1400 bool force_numerical_termination = false;
1401 for (bool accepted_step = false; !accepted_step;) {
1402 const double primal_step_size = step_size_ / primal_weight_;
1403 const double dual_step_size = step_size_ * primal_weight_;
1404 NextSolutionAndDelta next_primal_solution =
1405 ComputeNextPrimalSolution(primal_step_size);
1406 NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution(
1407 dual_step_size, /*extrapolation_factor=*/1.0, next_primal_solution);
1408 const double movement =
1409 ComputeMovement(next_primal_solution.delta, next_dual_solution.delta);
1410 if (movement == 0.0) {
1411 LogNumericalTermination();
1412 ResetAverageToCurrent();
1413 force_numerical_termination = true;
1414 break;
1415 } else if (movement > kDivergentMovement) {
1416 LogNumericalTermination();
1417 force_numerical_termination = true;
1418 break;
1419 }
1420 VectorXd next_dual_product = TransposedMatrixVectorProduct(
1421 WorkingQp().constraint_matrix, next_dual_solution.value,
1422 sharded_working_qp_.ConstraintMatrixSharder());
1423 const double nonlinearity =
1424 ComputeNonlinearity(next_primal_solution.delta, next_dual_product);
1425
1426 // See equation (5) in https://arxiv.org/pdf/2106.04756.pdf.
1427 const double step_size_limit =
1428 nonlinearity > 0 ? movement / nonlinearity
1429 : std::numeric_limits<double>::infinity();
1430
1431 if (step_size_ <= step_size_limit) {
1432 current_primal_solution_ = std::move(next_primal_solution.value);
1433 current_dual_solution_ = std::move(next_dual_solution.value);
1434 current_dual_product_ = std::move(next_dual_product);
1435 current_primal_delta_ = std::move(next_primal_solution.delta);
1436 current_dual_delta_ = std::move(next_dual_solution.delta);
1437 primal_average_.Add(current_primal_solution_, /*weight=*/step_size_);
1438 dual_average_.Add(current_dual_solution_, /*weight=*/step_size_);
1439 accepted_step = true;
1440 }
1441 const double total_steps_attempted =
1442 num_rejected_steps_ + iterations_completed_ + 1;
1443 // Our step sizes are a factor 1 - (total_steps_attempted + 1)^(-
1444 // step_size_reduction_exponent) smaller than they could be as a margin to
1445 // reduce rejected steps.
1446 const double first_term =
1447 (1 - std::pow(total_steps_attempted + 1.0,
1448 -params_.adaptive_linesearch_parameters()
1449 .step_size_reduction_exponent())) *
1450 step_size_limit;
1451 const double second_term =
1452 (1 + std::pow(total_steps_attempted + 1.0,
1453 -params_.adaptive_linesearch_parameters()
1454 .step_size_growth_exponent())) *
1455 step_size_;
1456 // From the first term when we have to reject a step, the step_size
1457 // decreases by a factor of at least 1 - (total_steps_attempted + 1)^(-
1458 // step_size_reduction_exponent). From the second term we increase the
1459 // step_size by a factor of at most 1 + (total_steps_attempted +
1460 // 1)^(-step_size_growth_exponent) Therefore if more than order
1461 // (total_steps_attempted + 1)^(step_size_reduction_exponent
1462 // - step_size_growth_exponent) fraction of the time we have a rejected
1463 // step, we overall decrease the step_size. When the step_size is
1464 // sufficiently small we stop having rejected steps.
1465 step_size_ = std::min(first_term, second_term);
1466 if (!accepted_step) {
1467 ++num_rejected_steps_;
1468 }
1469 }
1470 if (force_numerical_termination) {
1471 return InnerStepOutcome::kForceNumericalTermination;
1472 }
1473 return InnerStepOutcome::kSuccessful;
1474}
1475
1476InnerStepOutcome Solver::TakeConstantSizeStep() {
1477 const double primal_step_size = step_size_ / primal_weight_;
1478 const double dual_step_size = step_size_ * primal_weight_;
1479 NextSolutionAndDelta next_primal_solution =
1480 ComputeNextPrimalSolution(primal_step_size);
1481 NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution(
1482 dual_step_size, /*extrapolation_factor=*/1.0, next_primal_solution);
1483 const double movement =
1484 ComputeMovement(next_primal_solution.delta, next_dual_solution.delta);
1485 if (movement == 0.0) {
1486 LogNumericalTermination();
1487 ResetAverageToCurrent();
1488 return InnerStepOutcome::kForceNumericalTermination;
1489 } else if (movement > kDivergentMovement) {
1490 LogNumericalTermination();
1491 return InnerStepOutcome::kForceNumericalTermination;
1492 }
1493 VectorXd next_dual_product = TransposedMatrixVectorProduct(
1494 WorkingQp().constraint_matrix, next_dual_solution.value,
1495 sharded_working_qp_.ConstraintMatrixSharder());
1496 current_primal_solution_ = std::move(next_primal_solution.value);
1497 current_dual_solution_ = std::move(next_dual_solution.value);
1498 current_dual_product_ = std::move(next_dual_product);
1499 current_primal_delta_ = std::move(next_primal_solution.delta);
1500 current_dual_delta_ = std::move(next_dual_solution.delta);
1501 primal_average_.Add(current_primal_solution_, /*weight=*/step_size_);
1502 dual_average_.Add(current_dual_solution_, /*weight=*/step_size_);
1503 return InnerStepOutcome::kSuccessful;
1504}
1505
1506glop::GlopParameters Solver::PreprocessorParameters(
1507 const PrimalDualHybridGradientParams& params) {
1508 glop::GlopParameters glop_params;
1509 // TODO(user): Test if dualization helps or hurts performance.
1510 glop_params.set_solve_dual_problem(glop::GlopParameters::NEVER_DO);
1511 // Experiments show that this preprocessing step can hurt because it relaxes
1512 // variable bounds.
1513 glop_params.set_use_implied_free_preprocessor(false);
1514 // We do our own scaling.
1515 glop_params.set_use_scaling(false);
1516 if (params.presolve_options().has_glop_parameters()) {
1517 glop_params.MergeFrom(params.presolve_options().glop_parameters());
1518 }
1519 return glop_params;
1520}
1521
1522namespace {
1523
1524SolverResult ErrorSolverResult(const TerminationReason reason,
1525 const std::string& message) {
1526 SolveLog error_log;
1527 error_log.set_termination_reason(reason);
1528 error_log.set_termination_string(message);
1529 LOG(WARNING) << "The solver did not run because of invalid input: "
1530 << message;
1531 return SolverResult{.solve_log = error_log};
1532}
1533
1534TerminationReason GlopStatusToTerminationReason(
1535 const glop::ProblemStatus glop_status) {
1536 switch (glop_status) {
1537 case glop::ProblemStatus::OPTIMAL:
1538 return TERMINATION_REASON_OPTIMAL;
1539 case glop::ProblemStatus::INVALID_PROBLEM:
1540 return TERMINATION_REASON_INVALID_PROBLEM;
1541 case glop::ProblemStatus::ABNORMAL:
1542 case glop::ProblemStatus::IMPRECISE:
1543 return TERMINATION_REASON_NUMERICAL_ERROR;
1544 case glop::ProblemStatus::PRIMAL_INFEASIBLE:
1545 case glop::ProblemStatus::DUAL_INFEASIBLE:
1546 case glop::ProblemStatus::INFEASIBLE_OR_UNBOUNDED:
1547 case glop::ProblemStatus::DUAL_UNBOUNDED:
1548 case glop::ProblemStatus::PRIMAL_UNBOUNDED:
1549 return TERMINATION_REASON_PRIMAL_OR_DUAL_INFEASIBLE;
1550 default:
1551 LOG(WARNING) << "Unexpected preprocessor status " << glop_status;
1552 return TERMINATION_REASON_OTHER;
1553 }
1554}
1555
1556} // namespace
1557
1558absl::optional<TerminationReason> Solver::ApplyPresolveIfEnabled(
1559 absl::optional<PrimalAndDualSolution>* const initial_solution) {
1560 const bool presolve_enabled = params_.presolve_options().use_glop();
1561 if (!presolve_enabled) {
1562 return absl::nullopt;
1563 }
1564 if (!IsLinearProgram(WorkingQp())) {
1565 LOG(WARNING)
1566 << "Skipping presolve, which is only supported for linear programs";
1567 return absl::nullopt;
1568 }
1569 absl::StatusOr<MPModelProto> model = QpToMpModelProto(WorkingQp());
1570 if (!model.ok()) {
1571 LOG(WARNING)
1572 << "Skipping presolve because of error converting to MPModelProto: "
1573 << model.status();
1574 return absl::nullopt;
1575 }
1576 if (initial_solution->has_value()) {
1577 LOG(WARNING) << "Ignoring initial solution. Initial solutions "
1578 "are ignored when presolve is on.";
1579 initial_solution->reset();
1580 }
1581 glop::LinearProgram glop_lp;
1583 // Save RAM
1584 model->Clear();
1585 presolve_info_.emplace(std::move(sharded_working_qp_), params_);
1586 // To simplify our code we ignore the return value indicating whether
1587 // postprocessing is required. We simply call RecoverSolution()
1588 // unconditionally, which may do nothing.
1589 presolve_info_->preprocessor.Run(&glop_lp);
1590 presolve_info_->presolved_problem_was_maximization =
1591 glop_lp.IsMaximizationProblem();
1592 // MpModelProto doesn't support scaling factors so any scaling factor was
1593 // eliminated when we converted to MpModelProto. Nothing afterwards should set
1594 // scaling factor.
1595 CHECK_EQ(glop_lp.objective_scaling_factor(), 1.0);
1596 MPModelProto output;
1597 glop::LinearProgramToMPModelProto(glop_lp, &output);
1598 // This will only fail if given an invalid LP, which shouldn't happen.
1599 absl::StatusOr<QuadraticProgram> presolved_qp =
1600 QpFromMpModelProto(output, /*relax_integer_variables=*/false);
1601 CHECK_OK(presolved_qp.status());
1602 sharded_working_qp_ = ShardedQuadraticProgram(
1603 std::move(*presolved_qp), params_.num_threads(), num_shards_);
1604 primal_average_ =
1605 ShardedWeightedAverage(&sharded_working_qp_.PrimalSharder());
1606 dual_average_ = ShardedWeightedAverage(&sharded_working_qp_.DualSharder());
1607 // A status of INIT means the preprocessor created a (usually) smaller
1608 // problem that needs solving. Other statuses mean the preprocessor solved
1609 // the problem completely.
1610 if (presolve_info_->preprocessor.status() != glop::ProblemStatus::INIT) {
1611 col_scaling_vec_.setOnes(sharded_working_qp_.PrimalSize());
1612 row_scaling_vec_.setOnes(sharded_working_qp_.DualSize());
1613 return GlopStatusToTerminationReason(presolve_info_->preprocessor.status());
1614 }
1615 return absl::nullopt;
1616}
1617
1618PrimalAndDualSolution Solver::RecoverOriginalSolution(
1619 PrimalAndDualSolution working_solution) const {
1620 glop::ProblemSolution glop_solution(glop::RowIndex{0}, glop::ColIndex{0});
1621 if (presolve_info_.has_value()) {
1622 // We compute statuses relative to the working problem so we can detect when
1623 // variables are at their bounds without floating-point roundoff induced by
1624 // scaling.
1625 glop_solution = internal::ComputeStatuses(WorkingQp(), working_solution);
1626 }
1627 CoefficientWiseProductInPlace(col_scaling_vec_,
1628 sharded_working_qp_.PrimalSharder(),
1629 working_solution.primal_solution);
1630 CoefficientWiseProductInPlace(row_scaling_vec_,
1631 sharded_working_qp_.DualSharder(),
1632 working_solution.dual_solution);
1633 if (presolve_info_.has_value()) {
1634 glop_solution.primal_values =
1635 glop::DenseRow(working_solution.primal_solution.begin(),
1636 working_solution.primal_solution.end());
1637 glop_solution.dual_values =
1638 glop::DenseColumn(working_solution.dual_solution.begin(),
1639 working_solution.dual_solution.end());
1640 // We got the working QP by calling LinearProgramToMPModelProto() and
1641 // QpFromMpModelProto(). We need to negate the duals if the LP resulting
1642 // from presolve was a max problem.
1643 if (presolve_info_->presolved_problem_was_maximization) {
1644 for (glop::RowIndex i{0}; i < glop_solution.dual_values.size(); ++i) {
1645 glop_solution.dual_values[i] *= -1;
1646 }
1647 }
1648 presolve_info_->preprocessor.RecoverSolution(&glop_solution);
1649 PrimalAndDualSolution solution;
1650 solution.primal_solution =
1651 Eigen::Map<Eigen::VectorXd>(glop_solution.primal_values.data(),
1652 glop_solution.primal_values.size().value());
1653 solution.dual_solution =
1654 Eigen::Map<Eigen::VectorXd>(glop_solution.dual_values.data(),
1655 glop_solution.dual_values.size().value());
1656 // We called QpToMpModelProto() and MPModelProtoToLinearProgram() to convert
1657 // our original QP into input for glop's preprocessor. The former multiplies
1658 // the objective vector by the objective_scaling_factor, which multiplies
1659 // the duals by that factor as well. To undo this we divide by the
1660 // objective_scaling_factor.
1661 solution.dual_solution /=
1662 presolve_info_->sharded_original_qp.Qp().objective_scaling_factor;
1663 // Glop's preprocessor sometimes violates the primal bounds constraints. To
1664 // be safe we project both primal and dual.
1665 ProjectToPrimalVariableBounds(presolve_info_->sharded_original_qp,
1666 solution.primal_solution);
1667 ProjectToDualVariableBounds(presolve_info_->sharded_original_qp,
1668 solution.dual_solution);
1669 return solution;
1670 } else {
1671 return working_solution;
1672 }
1673}
1674
1675SolverResult Solver::Solve(
1676 absl::optional<PrimalAndDualSolution> initial_solution,
1677 const std::atomic<bool>* interrupt_solve,
1678 IterationStatsCallback iteration_stats_callback) {
1679 SolveLog solve_log;
1680 if (sharded_working_qp_.Qp().problem_name.has_value()) {
1681 solve_log.set_instance_name(*sharded_working_qp_.Qp().problem_name);
1682 }
1683 *solve_log.mutable_params() = params_;
1684 *solve_log.mutable_original_problem_stats() = ComputeStats(
1685 sharded_working_qp_, params_.infinite_constraint_bound_threshold());
1686 original_bound_norms_ =
1687 BoundNormsFromProblemStats(solve_log.original_problem_stats());
1688 const std::string preprocessing_string = absl::StrCat(
1689 params_.presolve_options().use_glop() ? "presolving and " : "",
1690 "rescaling:");
1691 if (params_.verbosity_level() >= 1) {
1692 LOG(INFO) << "Problem stats before " << preprocessing_string;
1693 LogQuadraticProgramStats(solve_log.original_problem_stats());
1694 }
1695 timer_.Start();
1696 iteration_stats_callback_ = std::move(iteration_stats_callback);
1697 absl::optional<TerminationReason> maybe_terminate =
1698 ApplyPresolveIfEnabled(&initial_solution);
1699 if (maybe_terminate.has_value()) {
1700 // Glop also feeds zero primal and dual solutions when the preprocessor
1701 // has a non-INIT status. When the preprocessor status is optimal the
1702 // vectors have length 0. When the status is something else the lengths
1703 // may be non-zero, but that's OK since we don't promise to produce a
1704 // meaningful solution in that case.
1705 const int working_dual_size = sharded_working_qp_.DualSize();
1706 const int working_primal_size = sharded_working_qp_.PrimalSize();
1707 IterationStats iteration_stats;
1708 iteration_stats.set_cumulative_time_sec(timer_.Get());
1709 solve_log.set_preprocessing_time_sec(iteration_stats.cumulative_time_sec());
1710 VectorXd working_primal = VectorXd::Zero(working_primal_size);
1711 VectorXd working_dual = VectorXd::Zero(working_dual_size);
1712 PrimalAndDualSolution original = RecoverOriginalSolution(
1713 {.primal_solution = working_primal, .dual_solution = working_dual});
1714 AddConvergenceAndInfeasibilityInformation(
1715 original.primal_solution, original.dual_solution,
1716 presolve_info_->sharded_original_qp,
1717 presolve_info_->trivial_col_scaling_vec,
1718 presolve_info_->trivial_row_scaling_vec, POINT_TYPE_PRESOLVER_SOLUTION,
1719 iteration_stats);
1720 absl::optional<TerminationReasonAndPointType> earned_termination =
1721 CheckTerminationCriteria(params_.termination_criteria(),
1722 iteration_stats, original_bound_norms_,
1723 /*force_numerical_termination=*/false);
1724 TerminationReason final_termination_reason;
1725 if (earned_termination.has_value() &&
1726 (earned_termination->reason == TERMINATION_REASON_OPTIMAL ||
1727 earned_termination->reason == TERMINATION_REASON_PRIMAL_INFEASIBLE ||
1728 earned_termination->reason == TERMINATION_REASON_DUAL_INFEASIBLE)) {
1729 final_termination_reason = earned_termination->reason;
1730 } else {
1731 if (*maybe_terminate == TERMINATION_REASON_OPTIMAL) {
1732 final_termination_reason = TERMINATION_REASON_NUMERICAL_ERROR;
1733 LOG(WARNING) << "Presolve claimed to solve the LP optimally but the "
1734 "solution doesn't satisfy the optimality criteria.";
1735 } else {
1736 final_termination_reason = *maybe_terminate;
1737 }
1738 }
1739 return ConstructSolverResult(
1740 std::move(working_primal), std::move(working_dual),
1741 std::move(iteration_stats), final_termination_reason,
1742 POINT_TYPE_PRESOLVER_SOLUTION, std::move(solve_log));
1743 }
1744
1745 // The current solution is updated by ComputeAndApplyRescaling.
1746 if (initial_solution.has_value()) {
1747 current_primal_solution_ = std::move(initial_solution->primal_solution);
1748 current_dual_solution_ = std::move(initial_solution->dual_solution);
1749 } else {
1750 current_primal_solution_.setZero(sharded_working_qp_.PrimalSize());
1751 current_dual_solution_.setZero(sharded_working_qp_.DualSize());
1752 }
1753 // The following projections are necessary since all our checks assume that
1754 // the primal and dual variable bounds are satisfied.
1755 ProjectToPrimalVariableBounds(sharded_working_qp_, current_primal_solution_);
1756 ProjectToDualVariableBounds(sharded_working_qp_, current_dual_solution_);
1757
1758 ComputeAndApplyRescaling();
1759 *solve_log.mutable_preprocessed_problem_stats() = ComputeStats(
1760 sharded_working_qp_, params_.infinite_constraint_bound_threshold());
1761 if (params_.verbosity_level() >= 1) {
1762 LOG(INFO) << "Problem stats after " << preprocessing_string;
1763 LogQuadraticProgramStats(solve_log.preprocessed_problem_stats());
1764 }
1765
1766 if (params_.linesearch_rule() ==
1767 PrimalDualHybridGradientParams::CONSTANT_STEP_SIZE_RULE) {
1768 std::mt19937 random(1);
1769 double inverse_step_size;
1770 const auto lipschitz_result =
1772 sharded_working_qp_, absl::nullopt, absl::nullopt,
1773 /*desired_relative_error=*/0.2, /*failure_probability=*/0.0005,
1774 random);
1775 // With high probability, the estimate of the lipschitz term is within
1776 // +/- estimated_relative_error * lipschitz_term.
1777 const double lipschitz_term_upper_bound =
1778 lipschitz_result.singular_value /
1779 (1.0 - lipschitz_result.estimated_relative_error);
1780 inverse_step_size = lipschitz_term_upper_bound;
1781 step_size_ = inverse_step_size > 0.0 ? 1.0 / inverse_step_size : 1.0;
1782 } else {
1783 // This initial step size is designed to err on the side of being too big.
1784 // This is because
1785 // (i) too-big steps are rejected and hence don't hurt us other than
1786 // wasting
1787 // an iteration and
1788 // (ii) the step size adjustment algorithm shrinks the step size as far as
1789 // needed in a single iteration but raises it slowly.
1790 // The tiny constant is there to keep the step size finite in the case of a
1791 // trivial LP with no constraints.
1792 step_size_ =
1793 1.0 /
1794 std::max(
1795 1.0e-20,
1796 solve_log.preprocessed_problem_stats().constraint_matrix_abs_max());
1797 }
1798 step_size_ *= params_.initial_step_size_scaling();
1799
1800 primal_weight_ = InitialPrimalWeight(
1801 solve_log.preprocessed_problem_stats().objective_vector_l2_norm(),
1802 solve_log.preprocessed_problem_stats().combined_bounds_l2_norm());
1803 last_primal_start_point_ = CloneVector(current_primal_solution_,
1804 sharded_working_qp_.PrimalSharder());
1805 last_dual_start_point_ =
1806 CloneVector(current_dual_solution_, sharded_working_qp_.DualSharder());
1807 // Note: Any cached values computed here also need to be recomputed after a
1808 // restart.
1809 ratio_last_two_step_sizes_ = 1;
1810 current_dual_product_ = TransposedMatrixVectorProduct(
1811 WorkingQp().constraint_matrix, current_dual_solution_,
1812 sharded_working_qp_.ConstraintMatrixSharder());
1813
1814 // This is set to true if we can't proceed any more because of numerical
1815 // issues. We may or may not have found the optimal solution.
1816 bool force_numerical_termination = false;
1817
1818 num_rejected_steps_ = 0;
1819
1820 solve_log.set_preprocessing_time_sec(timer_.Get());
1821
1822 for (iterations_completed_ = 0;; ++iterations_completed_) {
1823 // This code performs the logic of the major iterations and termination
1824 // checks. It may modify the current solution and primal weight (e.g., when
1825 // performing a restart).
1826 const absl::optional<SolverResult> maybe_result =
1827 MajorIterationAndTerminationCheck(force_numerical_termination,
1828 solve_log);
1829 if (maybe_result.has_value()) {
1830 return maybe_result.value();
1831 }
1832 // Check for interrupt on every iteration.
1833 if (interrupt_solve != nullptr && interrupt_solve->load() == true) {
1834 return ConstructSolverResult(
1835 PrimalAverage(), DualAverage(),
1836 CreateSimpleIterationStats(RESTART_CHOICE_NO_RESTART),
1837 TERMINATION_REASON_INTERRUPTED_BY_USER, POINT_TYPE_NONE, solve_log);
1838 }
1839
1840 // TODO(user): If we use a step rule that could reject many steps in a
1841 // row, we should add a termination check within this loop also. For the
1842 // Malitsky and Pock rule, we perform a termination check and declare
1843 // NUMERICAL_ERROR whenever we hit 60 inner iterations.
1844 InnerStepOutcome outcome;
1845 switch (params_.linesearch_rule()) {
1846 case PrimalDualHybridGradientParams::MALITSKY_POCK_LINESEARCH_RULE:
1847 outcome = TakeMalitskyPockStep();
1848 break;
1849 case PrimalDualHybridGradientParams::ADAPTIVE_LINESEARCH_RULE:
1850 outcome = TakeAdaptiveStep();
1851 break;
1852 case PrimalDualHybridGradientParams::CONSTANT_STEP_SIZE_RULE:
1853 outcome = TakeConstantSizeStep();
1854 break;
1855 default:
1856 LOG(FATAL) << "Unrecognized linesearch rule "
1857 << params_.linesearch_rule();
1858 }
1859 if (outcome == InnerStepOutcome::kForceNumericalTermination) {
1860 force_numerical_termination = true;
1861 }
1862 } // loop over iterations
1863}
1864
1865} // namespace
1866
1868 QuadraticProgram qp, const PrimalDualHybridGradientParams& params,
1869 const std::atomic<bool>* interrupt_solve,
1870 IterationStatsCallback iteration_stats_callback) {
1871 return PrimalDualHybridGradient(std::move(qp), params, absl::nullopt,
1872 interrupt_solve,
1873 std::move(iteration_stats_callback));
1874}
1875
1877 QuadraticProgram qp, const PrimalDualHybridGradientParams& params,
1878 absl::optional<PrimalAndDualSolution> initial_solution,
1879 const std::atomic<bool>* interrupt_solve,
1880 IterationStatsCallback iteration_stats_callback) {
1881 const absl::Status params_status =
1883 if (!params_status.ok()) {
1884 return ErrorSolverResult(TERMINATION_REASON_INVALID_PARAMETER,
1885 params_status.ToString());
1886 }
1887 if (!qp.constraint_matrix.isCompressed()) {
1888 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
1889 "constraint_matrix must be in compressed format. "
1890 "Call constraint_matrix.makeCompressed()");
1891 }
1892 const absl::Status dimensions_status = ValidateQuadraticProgramDimensions(qp);
1893 if (!dimensions_status.ok()) {
1894 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
1895 dimensions_status.ToString());
1896 }
1897 if (!HasValidBounds(qp)) {
1898 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
1899 "The input problem has inconsistent bounds.");
1900 }
1901 if (qp.objective_scaling_factor == 0) {
1902 return ErrorSolverResult(TERMINATION_REASON_INVALID_PROBLEM,
1903 "The objective scaling factor cannot be zero.");
1904 }
1905 Solver solver(std::move(qp), params);
1906 return solver.Solve(std::move(initial_solution), interrupt_solve,
1907 std::move(iteration_stats_callback));
1908}
1909
1910namespace internal {
1911
1913 const PrimalAndDualSolution& solution) {
1914 glop::ProblemSolution glop_solution(
1915 glop::RowIndex(solution.dual_solution.size()),
1916 glop::ColIndex(solution.primal_solution.size()));
1917 // This doesn't matter much as glop's preprocessor doesn't use this much.
1918 // We pick IMPRECISE since we are often calling this code early in the solve.
1919 glop_solution.status = glop::ProblemStatus::IMPRECISE;
1920 for (glop::RowIndex i{0}; i.value() < solution.dual_solution.size(); ++i) {
1921 if (qp.constraint_lower_bounds[i.value()] ==
1922 qp.constraint_upper_bounds[i.value()]) {
1923 glop_solution.constraint_statuses[i] =
1924 glop::ConstraintStatus::FIXED_VALUE;
1925 } else if (solution.dual_solution[i.value()] > 0) {
1926 glop_solution.constraint_statuses[i] =
1927 glop::ConstraintStatus::AT_LOWER_BOUND;
1928 } else if (solution.dual_solution[i.value()] < 0) {
1929 glop_solution.constraint_statuses[i] =
1930 glop::ConstraintStatus::AT_UPPER_BOUND;
1931 } else {
1932 glop_solution.constraint_statuses[i] = glop::ConstraintStatus::BASIC;
1933 }
1934 }
1935
1936 for (glop::ColIndex i{0}; i.value() < solution.primal_solution.size(); ++i) {
1937 const bool at_lb = solution.primal_solution[i.value()] <=
1938 qp.variable_lower_bounds[i.value()];
1939 const bool at_ub = solution.primal_solution[i.value()] >=
1940 qp.variable_upper_bounds[i.value()];
1941 // Note that ShardedWeightedAverage is designed so that variables at their
1942 // bounds will be exactly at their bounds even with floating-point roundoff.
1943 if (at_lb) {
1944 if (at_ub) {
1945 glop_solution.variable_statuses[i] = glop::VariableStatus::FIXED_VALUE;
1946 } else {
1947 glop_solution.variable_statuses[i] =
1948 glop::VariableStatus::AT_LOWER_BOUND;
1949 }
1950 } else {
1951 if (at_ub) {
1952 glop_solution.variable_statuses[i] =
1953 glop::VariableStatus::AT_UPPER_BOUND;
1954 } else {
1955 glop_solution.variable_statuses[i] = glop::VariableStatus::BASIC;
1956 }
1957 }
1958 }
1959 return glop_solution;
1960}
1961
1962} // namespace internal
1963
1964} // namespace operations_research::pdlp
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define LOG_IF(severity, condition)
Definition: base/logging.h:479
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:703
#define CHECK_OK(x)
Definition: base/logging.h:44
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:890
void Start()
Definition: timer.h:31
double Get() const
Definition: timer.h:45
std::ostream & stream()
Solver(const std::string &name)
Solver API.
GRBmodel * model
const int WARNING
Definition: log_severity.h:31
const int INFO
Definition: log_severity.h:31
const int FATAL
Definition: log_severity.h:32
const int GLOG_INFO
Definition: log_severity.h:25
Fractional Square(Fractional f)
Fractional SquaredNorm(const SparseColumn &v)
void MPModelProtoToLinearProgram(const MPModelProto &input, LinearProgram *output)
Definition: proto_utils.cc:51
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:303
void LinearProgramToMPModelProto(const LinearProgram &input, MPModelProto *output)
Definition: proto_utils.cc:20
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:332
absl::StatusOr< SolveResult > Solve(const Model &model, const SolverType solver_type, const SolveArguments &solve_args, const SolverInitArguments &init_args)
Definition: solve.cc:94
glop::ProblemSolution ComputeStatuses(const QuadraticProgram &qp, const PrimalAndDualSolution &solution)
absl::StatusOr< QuadraticProgram > QpFromMpModelProto(const MPModelProto &proto, bool relax_integer_variables, bool include_names)
absl::Status ValidateQuadraticProgramDimensions(const QuadraticProgram &qp)
double SquaredDistance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition: sharder.cc:224
double Distance(const VectorXd &vector1, const VectorXd &vector2, const Sharder &sharder)
Definition: sharder.cc:231
VectorXd TransposedMatrixVectorProduct(const Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > &matrix, const VectorXd &vector, const Sharder &sharder)
Definition: sharder.cc:151
VectorXd ReducedCosts(const ShardedQuadraticProgram &sharded_qp, const VectorXd &primal_solution, const VectorXd &dual_solution, bool use_zero_primal_objective)
InfeasibilityInformation ComputeInfeasibilityInformation(const ShardedQuadraticProgram &scaled_sharded_qp, const Eigen::VectorXd &col_scaling_vec, const Eigen::VectorXd &row_scaling_vec, const Eigen::VectorXd &scaled_primal_ray, const Eigen::VectorXd &scaled_dual_ray, PointType candidate_type)
bool HasValidBounds(const QuadraticProgram &qp)
absl::optional< ConvergenceInformation > GetConvergenceInformation(const IterationStats &stats, PointType candidate_type)
absl::optional< TerminationReasonAndPointType > CheckTerminationCriteria(const TerminationCriteria &criteria, const IterationStats &stats, const QuadraticProgramBoundNorms &bound_norms, const bool force_numerical_termination)
Definition: termination.cc:90
absl::StatusOr< MPModelProto > QpToMpModelProto(const QuadraticProgram &qp)
bool IsLinearProgram(const QuadraticProgram &qp)
void SetRandomProjections(const ShardedQuadraticProgram &sharded_qp, const Eigen::VectorXd &primal_solution, const Eigen::VectorXd &dual_solution, const std::vector< int > &random_projection_seeds, PointMetadata &metadata)
SingularValueAndIterations EstimateMaximumSingularValueOfConstraintMatrix(const ShardedQuadraticProgram &sharded_qp, const absl::optional< VectorXd > &primal_solution, const absl::optional< VectorXd > &dual_solution, const double desired_relative_error, const double failure_probability, std::mt19937 &mt_generator)
void ProjectToDualVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &dual)
RelativeConvergenceInformation ComputeRelativeResiduals(const double eps_optimal_absolute, const double eps_optimal_relative, const QuadraticProgramBoundNorms &norms, const ConvergenceInformation &stats)
Definition: termination.cc:147
void CoefficientWiseProductInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
Definition: sharder.cc:182
void CoefficientWiseQuotientInPlace(const VectorXd &scale, const Sharder &sharder, VectorXd &dest)
Definition: sharder.cc:190
VectorXd CloneVector(const VectorXd &vec, const Sharder &sharder)
Definition: sharder.cc:175
ScalingVectors ApplyRescaling(const RescalingOptions &rescaling_options, ShardedQuadraticProgram &sharded_qp)
QuadraticProgramStats ComputeStats(const ShardedQuadraticProgram &qp, const double infinite_constraint_bound_threshold)
QuadraticProgramBoundNorms BoundNormsFromProblemStats(const QuadraticProgramStats &stats)
Definition: termination.cc:138
ConvergenceInformation ComputeConvergenceInformation(const ShardedQuadraticProgram &scaled_sharded_qp, const Eigen::VectorXd &col_scaling_vec, const Eigen::VectorXd &row_scaling_vec, const Eigen::VectorXd &scaled_primal_solution, const Eigen::VectorXd &scaled_dual_solution, PointType candidate_type)
void ProjectToPrimalVariableBounds(const ShardedQuadraticProgram &sharded_qp, VectorXd &primal)
LocalizedLagrangianBounds ComputeLocalizedLagrangianBounds(const ShardedQuadraticProgram &sharded_qp, const VectorXd &primal_solution, const VectorXd &dual_solution, const PrimalDualNorm primal_dual_norm, const double primal_weight, const double radius, const VectorXd *primal_product, const VectorXd *dual_product, const bool use_diagonal_qp_trust_region_solver, const double diagonal_qp_trust_region_solver_tolerance)
double Norm(const VectorXd &vector, const Sharder &sharder)
Definition: sharder.cc:220
SolverResult PrimalDualHybridGradient(QuadraticProgram qp, const PrimalDualHybridGradientParams &params, absl::optional< PrimalAndDualSolution > initial_solution, const std::atomic< bool > *interrupt_solve, IterationStatsCallback iteration_stats_callback)
void AssignVector(const VectorXd &vec, const Sharder &sharder, VectorXd &dest)
Definition: sharder.cc:169
double BoundGap(const LocalizedLagrangianBounds &bounds)
Definition: trust_region.h:113
absl::Status ValidatePrimalDualHybridGradientParams(const PrimalDualHybridGradientParams &params)
const absl::string_view ToString(MPSolver::OptimizationProblemType optimization_problem_type)
int64_t Zero()
NOLINT.
STL namespace.
bool presolved_problem_was_maximization
glop::MainLpPreprocessor preprocessor
VectorXd value
const VectorXd trivial_row_scaling_vec
ShardedQuadraticProgram sharded_original_qp
const VectorXd trivial_col_scaling_vec
double distance_moved_last_restart_period
VectorXd delta
int length_of_last_restart_period
glop::GlopParameters preprocessor_parameters
ConstraintStatusColumn constraint_statuses
Definition: lp_data.h:689
Eigen::SparseMatrix< double, Eigen::ColMajor, int64_t > constraint_matrix
std::string message
Definition: trace.cc:398