This commit is contained in:
Laurent Perron
2022-07-04 12:27:57 +02:00
parent 6771946e67
commit a8afea5c9c
6 changed files with 21 additions and 72 deletions

View File

@@ -906,13 +906,6 @@ SolverResult Solver::ConstructSolverResult(VectorXd primal_solution,
LOG(INFO) << IterationStatsLabelString();
LOG(INFO) << ToString(stats, params_.termination_criteria(),
original_bound_norms_, solve_log.solution_type());
const auto& convergence_info = GetConvergenceInformation(stats, solve_log.solution_type());
if (convergence_info.has_value()) {
if (std::isfinite(convergence_info->corrected_dual_objective())) {
LOG(INFO) << "Dual objective after infeasibility correction: " <<
convergence_info->corrected_dual_objective();
}
}
}
solve_log.set_iteration_count(stats.iteration_number());
solve_log.set_termination_reason(termination_reason);
@@ -1969,6 +1962,8 @@ SolverResult Solver::Solve(
CloneVector(current_dual_solution_, sharded_working_qp_.DualSharder());
// Note: Any cached values computed here also need to be recomputed after a
// restart.
solve_log.set_preprocessing_time_sec(timer_.Get());
ratio_last_two_step_sizes_ = 1;
current_dual_product_ = TransposedMatrixVectorProduct(
WorkingQp().constraint_matrix, current_dual_solution_,
@@ -1980,8 +1975,6 @@ SolverResult Solver::Solve(
num_rejected_steps_ = 0;
solve_log.set_preprocessing_time_sec(timer_.Get());
for (iterations_completed_ = 0;; ++iterations_completed_) {
// This code performs the logic of the major iterations and termination
// checks. It may modify the current solution and primal weight (e.g., when

View File

@@ -560,20 +560,6 @@ SingularValueAndIterations EstimateMaximumSingularValue(
const Sharder& primal_vector_sharder, const Sharder& dual_vector_sharder,
const double desired_relative_error, const double failure_probability,
std::mt19937& mt_generator) {
// Easy case: matrix is diagonal.
if (IsDiagonal(matrix, matrix_sharder)) {
VectorXd local_max(matrix_sharder.NumShards());
matrix_sharder.ParallelForEachShard([&](const Sharder::Shard& shard) {
const auto matrix_shard = shard(matrix);
local_max[shard.Index()] =
(matrix_shard *
VectorXd::Ones(matrix_sharder.ShardSize(shard.Index())))
.lpNorm<Eigen::Infinity>();
});
return {.singular_value = local_max.lpNorm<Eigen::Infinity>(),
.num_iterations = 0,
.estimated_relative_error = 0.0};
}
const int64_t dimension = matrix.cols();
VectorXd eigenvector(dimension);
// Even though it will be slower, we initialize eigenvector sequentially so

View File

@@ -626,6 +626,24 @@ TEST(EstimateSingularValuesTest, CorrectForTestLpWithBothActiveSubspaces) {
EXPECT_LT(result.num_iterations, 300);
}
TEST(EstimateSingularValuesTest, CorrectForDiagonalLp) {
QuadraticProgram diagonal_lp = TestLp();
std::vector<Eigen::Triplet<double, int64_t>> triplets = {
{0, 0, 2}, {1, 1, 1}, {2, 2, -3}, {3, 3, -1}};
diagonal_lp.constraint_matrix.setFromTriplets(triplets.begin(),
triplets.end());
ShardedQuadraticProgram lp(diagonal_lp, /*num_threads=*/2, /*num_shards=*/2);
// The diagonal_lp matrix is [ 2 0 0 0; 0 1 0 0; 0 0 -3 0; 0 0 0 -1].
std::mt19937 random(1);
auto result = EstimateMaximumSingularValueOfConstraintMatrix(
lp, absl::nullopt, absl::nullopt,
/*desired_relative_error=*/0.01,
/*failure_probability=*/0.001, random);
EXPECT_NEAR(result.singular_value, 3, 0.0001);
EXPECT_LT(result.num_iterations, 300);
}
TEST(ProjectToPrimalVariableBoundsTest, TestLp) {
ShardedQuadraticProgram qp(TestLp(), /*num_threads=*/2,
/*num_shards=*/2);

View File

@@ -324,20 +324,4 @@ VectorXd ScaledColL2Norm(
return answer;
}
bool IsDiagonal(
const Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t>& matrix,
const Sharder& sharder) {
return sharder.ParallelTrueForAllShards([&](const Sharder::Shard& shard) {
auto matrix_shard = shard(matrix);
const int64_t col_offset = sharder.ShardStart(shard.Index());
for (int64_t col_idx = 0; col_idx < matrix_shard.outerSize(); ++col_idx) {
for (decltype(matrix_shard)::InnerIterator it(matrix_shard, col_idx); it;
++it) {
if (it.row() != (col_offset + it.col())) return false;
}
}
return true;
});
}
} // namespace operations_research::pdlp

View File

@@ -237,7 +237,7 @@ class Sharder {
// constructor.
Eigen::VectorXd TransposedMatrixVectorProduct(
const Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t>& matrix,
const Eigen::VectorXd& vector, const Sharder& Sharder);
const Eigen::VectorXd& vector, const Sharder& sharder);
////////////////////////////////////////////////////////////////////////////////
// The following functions use a Sharder to compute a vector operation in
@@ -330,11 +330,6 @@ Eigen::VectorXd ScaledColL2Norm(
const Eigen::VectorXd& row_scaling_vec,
const Eigen::VectorXd& col_scaling_vec, const Sharder& sharder);
// Checks if a matrix is diagonal.
bool IsDiagonal(
const Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t>& matrix,
const Sharder& sharder);
} // namespace operations_research::pdlp
#endif // PDLP_SHARDER_H_

View File

@@ -458,33 +458,6 @@ TEST(ScaledColL2Norm, SmallExample) {
EXPECT_THAT(answer, ElementsAre(std::sqrt(54), 1.0, 6.0, std::sqrt(41)));
}
TEST(IsDiagonal, DiagonalSquareMatrix) {
Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t> mat(4, 4);
std::vector<Eigen::Triplet<double, int64_t>> matrix_triplets = {
{0, 0, 1.0}, {1, 1, 2.5}, {3, 3, -3}};
mat.setFromTriplets(matrix_triplets.begin(), matrix_triplets.end());
Sharder sharder(mat, /*num_shards=*/3, nullptr);
EXPECT_TRUE(IsDiagonal(mat, sharder));
}
TEST(IsDiagonal, DiagonalRectangularMatrix) {
Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t> mat(3, 5);
std::vector<Eigen::Triplet<double, int64_t>> matrix_triplets = {
{0, 0, 2}, {1, 1, -1}, {2, 2, 3}};
mat.setFromTriplets(matrix_triplets.begin(), matrix_triplets.end());
Sharder sharder(mat, /*num_shards=*/3, nullptr);
EXPECT_TRUE(IsDiagonal(mat, sharder));
}
TEST(IsDiagonal, NonDiagonalSquareMatrix) {
Eigen::SparseMatrix<double, Eigen::ColMajor, int64_t> mat(3, 3);
std::vector<Eigen::Triplet<double, int64_t>> matrix_triplets = {
{0, 0, 2}, {0, 1, -1}, {1, 0, -1}, {2, 2, 1}};
mat.setFromTriplets(matrix_triplets.begin(), matrix_triplets.end());
Sharder sharder(mat, /*num_shards=*/3, nullptr);
EXPECT_FALSE(IsDiagonal(mat, sharder));
}
class VariousSizesTest : public testing::TestWithParam<int64_t> {};
TEST_P(VariousSizesTest, LargeMatVec) {