diff --git a/ortools/pdlp/primal_dual_hybrid_gradient.cc b/ortools/pdlp/primal_dual_hybrid_gradient.cc index e825ba039a..b68d71934b 100644 --- a/ortools/pdlp/primal_dual_hybrid_gradient.cc +++ b/ortools/pdlp/primal_dual_hybrid_gradient.cc @@ -906,13 +906,6 @@ SolverResult Solver::ConstructSolverResult(VectorXd primal_solution, LOG(INFO) << IterationStatsLabelString(); LOG(INFO) << ToString(stats, params_.termination_criteria(), original_bound_norms_, solve_log.solution_type()); - const auto& convergence_info = GetConvergenceInformation(stats, solve_log.solution_type()); - if (convergence_info.has_value()) { - if (std::isfinite(convergence_info->corrected_dual_objective())) { - LOG(INFO) << "Dual objective after infeasibility correction: " << - convergence_info->corrected_dual_objective(); - } - } } solve_log.set_iteration_count(stats.iteration_number()); solve_log.set_termination_reason(termination_reason); @@ -1969,6 +1962,8 @@ SolverResult Solver::Solve( CloneVector(current_dual_solution_, sharded_working_qp_.DualSharder()); // Note: Any cached values computed here also need to be recomputed after a // restart. + solve_log.set_preprocessing_time_sec(timer_.Get()); + ratio_last_two_step_sizes_ = 1; current_dual_product_ = TransposedMatrixVectorProduct( WorkingQp().constraint_matrix, current_dual_solution_, @@ -1980,8 +1975,6 @@ SolverResult Solver::Solve( num_rejected_steps_ = 0; - solve_log.set_preprocessing_time_sec(timer_.Get()); - for (iterations_completed_ = 0;; ++iterations_completed_) { // This code performs the logic of the major iterations and termination // checks. It may modify the current solution and primal weight (e.g., when diff --git a/ortools/pdlp/sharded_optimization_utils.cc b/ortools/pdlp/sharded_optimization_utils.cc index ea38dbb6ab..29b657f082 100644 --- a/ortools/pdlp/sharded_optimization_utils.cc +++ b/ortools/pdlp/sharded_optimization_utils.cc @@ -560,20 +560,6 @@ SingularValueAndIterations EstimateMaximumSingularValue( const Sharder& primal_vector_sharder, const Sharder& dual_vector_sharder, const double desired_relative_error, const double failure_probability, std::mt19937& mt_generator) { - // Easy case: matrix is diagonal. - if (IsDiagonal(matrix, matrix_sharder)) { - VectorXd local_max(matrix_sharder.NumShards()); - matrix_sharder.ParallelForEachShard([&](const Sharder::Shard& shard) { - const auto matrix_shard = shard(matrix); - local_max[shard.Index()] = - (matrix_shard * - VectorXd::Ones(matrix_sharder.ShardSize(shard.Index()))) - .lpNorm(); - }); - return {.singular_value = local_max.lpNorm(), - .num_iterations = 0, - .estimated_relative_error = 0.0}; - } const int64_t dimension = matrix.cols(); VectorXd eigenvector(dimension); // Even though it will be slower, we initialize eigenvector sequentially so diff --git a/ortools/pdlp/sharded_optimization_utils_test.cc b/ortools/pdlp/sharded_optimization_utils_test.cc index f4b1bc085b..047d78e00d 100644 --- a/ortools/pdlp/sharded_optimization_utils_test.cc +++ b/ortools/pdlp/sharded_optimization_utils_test.cc @@ -626,6 +626,24 @@ TEST(EstimateSingularValuesTest, CorrectForTestLpWithBothActiveSubspaces) { EXPECT_LT(result.num_iterations, 300); } +TEST(EstimateSingularValuesTest, CorrectForDiagonalLp) { + QuadraticProgram diagonal_lp = TestLp(); + std::vector> triplets = { + {0, 0, 2}, {1, 1, 1}, {2, 2, -3}, {3, 3, -1}}; + diagonal_lp.constraint_matrix.setFromTriplets(triplets.begin(), + triplets.end()); + ShardedQuadraticProgram lp(diagonal_lp, /*num_threads=*/2, /*num_shards=*/2); + + // The diagonal_lp matrix is [ 2 0 0 0; 0 1 0 0; 0 0 -3 0; 0 0 0 -1]. + std::mt19937 random(1); + auto result = EstimateMaximumSingularValueOfConstraintMatrix( + lp, absl::nullopt, absl::nullopt, + /*desired_relative_error=*/0.01, + /*failure_probability=*/0.001, random); + EXPECT_NEAR(result.singular_value, 3, 0.0001); + EXPECT_LT(result.num_iterations, 300); +} + TEST(ProjectToPrimalVariableBoundsTest, TestLp) { ShardedQuadraticProgram qp(TestLp(), /*num_threads=*/2, /*num_shards=*/2); diff --git a/ortools/pdlp/sharder.cc b/ortools/pdlp/sharder.cc index a49e1e2120..df4b98f5ae 100644 --- a/ortools/pdlp/sharder.cc +++ b/ortools/pdlp/sharder.cc @@ -324,20 +324,4 @@ VectorXd ScaledColL2Norm( return answer; } -bool IsDiagonal( - const Eigen::SparseMatrix& matrix, - const Sharder& sharder) { - return sharder.ParallelTrueForAllShards([&](const Sharder::Shard& shard) { - auto matrix_shard = shard(matrix); - const int64_t col_offset = sharder.ShardStart(shard.Index()); - for (int64_t col_idx = 0; col_idx < matrix_shard.outerSize(); ++col_idx) { - for (decltype(matrix_shard)::InnerIterator it(matrix_shard, col_idx); it; - ++it) { - if (it.row() != (col_offset + it.col())) return false; - } - } - return true; - }); -} - } // namespace operations_research::pdlp diff --git a/ortools/pdlp/sharder.h b/ortools/pdlp/sharder.h index 2a6e703dbb..32ac79a83b 100644 --- a/ortools/pdlp/sharder.h +++ b/ortools/pdlp/sharder.h @@ -237,7 +237,7 @@ class Sharder { // constructor. Eigen::VectorXd TransposedMatrixVectorProduct( const Eigen::SparseMatrix& matrix, - const Eigen::VectorXd& vector, const Sharder& Sharder); + const Eigen::VectorXd& vector, const Sharder& sharder); //////////////////////////////////////////////////////////////////////////////// // The following functions use a Sharder to compute a vector operation in @@ -330,11 +330,6 @@ Eigen::VectorXd ScaledColL2Norm( const Eigen::VectorXd& row_scaling_vec, const Eigen::VectorXd& col_scaling_vec, const Sharder& sharder); -// Checks if a matrix is diagonal. -bool IsDiagonal( - const Eigen::SparseMatrix& matrix, - const Sharder& sharder); - } // namespace operations_research::pdlp #endif // PDLP_SHARDER_H_ diff --git a/ortools/pdlp/sharder_test.cc b/ortools/pdlp/sharder_test.cc index 03488a8c26..1ee7ae7aaa 100644 --- a/ortools/pdlp/sharder_test.cc +++ b/ortools/pdlp/sharder_test.cc @@ -458,33 +458,6 @@ TEST(ScaledColL2Norm, SmallExample) { EXPECT_THAT(answer, ElementsAre(std::sqrt(54), 1.0, 6.0, std::sqrt(41))); } -TEST(IsDiagonal, DiagonalSquareMatrix) { - Eigen::SparseMatrix mat(4, 4); - std::vector> matrix_triplets = { - {0, 0, 1.0}, {1, 1, 2.5}, {3, 3, -3}}; - mat.setFromTriplets(matrix_triplets.begin(), matrix_triplets.end()); - Sharder sharder(mat, /*num_shards=*/3, nullptr); - EXPECT_TRUE(IsDiagonal(mat, sharder)); -} - -TEST(IsDiagonal, DiagonalRectangularMatrix) { - Eigen::SparseMatrix mat(3, 5); - std::vector> matrix_triplets = { - {0, 0, 2}, {1, 1, -1}, {2, 2, 3}}; - mat.setFromTriplets(matrix_triplets.begin(), matrix_triplets.end()); - Sharder sharder(mat, /*num_shards=*/3, nullptr); - EXPECT_TRUE(IsDiagonal(mat, sharder)); -} - -TEST(IsDiagonal, NonDiagonalSquareMatrix) { - Eigen::SparseMatrix mat(3, 3); - std::vector> matrix_triplets = { - {0, 0, 2}, {0, 1, -1}, {1, 0, -1}, {2, 2, 1}}; - mat.setFromTriplets(matrix_triplets.begin(), matrix_triplets.end()); - Sharder sharder(mat, /*num_shards=*/3, nullptr); - EXPECT_FALSE(IsDiagonal(mat, sharder)); -} - class VariousSizesTest : public testing::TestWithParam {}; TEST_P(VariousSizesTest, LargeMatVec) {