restructure glop code

This commit is contained in:
Laurent Perron
2019-06-17 11:34:10 +02:00
parent 277792fa64
commit 1837b57d0b
12 changed files with 198 additions and 145 deletions

View File

@@ -175,12 +175,10 @@ void EtaFactorization::RightSolve(DenseColumn* d) const {
// BasisFactorization
// --------------------------------------------------------
BasisFactorization::BasisFactorization(
const MatrixView& matrix, const CompactSparseMatrix& compact_matrix,
const RowToColMapping& basis)
const CompactSparseMatrix* compact_matrix, const RowToColMapping* basis)
: stats_(),
matrix_(matrix),
compact_matrix_(compact_matrix),
basis_(basis),
compact_matrix_(*compact_matrix),
basis_(*basis),
tau_is_computed_(false),
max_num_updates_(0),
num_updates_(0),
@@ -209,8 +207,7 @@ Status BasisFactorization::Initialize() {
SCOPED_TIME_STAT(&stats_);
Clear();
if (IsIdentityBasis()) return Status::OK();
MatrixView basis_matrix;
basis_matrix.PopulateFromBasis(matrix_, basis_);
CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
return lu_factorization_.ComputeFactorization(basis_matrix);
}
@@ -225,8 +222,7 @@ Status BasisFactorization::ForceRefactorization() {
SCOPED_TIME_STAT(&stats_);
stats_.refactorization_interval.Add(num_updates_);
Clear();
MatrixView basis_matrix;
basis_matrix.PopulateFromBasis(matrix_, basis_);
CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
const Status status = lu_factorization_.ComputeFactorization(basis_matrix);
const double kLuComplexityFactor = 10;
@@ -498,15 +494,13 @@ bool BasisFactorization::IsIdentityBasis() const {
Fractional BasisFactorization::ComputeOneNorm() const {
if (IsIdentityBasis()) return 1.0;
MatrixView basis_matrix;
basis_matrix.PopulateFromBasis(matrix_, basis_);
CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
return basis_matrix.ComputeOneNorm();
}
Fractional BasisFactorization::ComputeInfinityNorm() const {
if (IsIdentityBasis()) return 1.0;
MatrixView basis_matrix;
basis_matrix.PopulateFromBasis(matrix_, basis_);
CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
return basis_matrix.ComputeInfinityNorm();
}

View File

@@ -143,11 +143,13 @@ class EtaFactorization {
//
// To speed-up and improve stability the factorization is refactorized at least
// every 'refactorization_period' updates.
//
// This class does not take ownership of the underlying matrix and basis, and
// thus they must outlive this class (and keep the same address in memory).
class BasisFactorization {
public:
BasisFactorization(const MatrixView& matrix,
const CompactSparseMatrix& compact_matrix,
const RowToColMapping& basis);
BasisFactorization(const CompactSparseMatrix* compact_matrix,
const RowToColMapping* basis);
virtual ~BasisFactorization();
// Sets the parameters for this component.
@@ -306,7 +308,6 @@ class BasisFactorization {
GlopParameters parameters_;
// References to the basis subpart of the linear program matrix.
const MatrixView& matrix_;
const CompactSparseMatrix& compact_matrix_;
const RowToColMapping& basis_;

View File

@@ -39,15 +39,16 @@ void LuFactorization::Clear() {
inverse_col_perm_.clear();
}
Status LuFactorization::ComputeFactorization(const MatrixView& matrix) {
Status LuFactorization::ComputeFactorization(
const CompactSparseMatrixView& compact_matrix) {
SCOPED_TIME_STAT(&stats_);
Clear();
if (matrix.num_rows().value() != matrix.num_cols().value()) {
if (compact_matrix.num_rows().value() != compact_matrix.num_cols().value()) {
GLOP_RETURN_AND_LOG_ERROR(Status::ERROR_LU, "Not a square matrix!!");
}
GLOP_RETURN_IF_ERROR(
markowitz_.ComputeLU(matrix, &row_perm_, &col_perm_, &lower_, &upper_));
GLOP_RETURN_IF_ERROR(markowitz_.ComputeLU(compact_matrix, &row_perm_,
&col_perm_, &lower_, &upper_));
inverse_col_perm_.PopulateFromInverse(col_perm_);
inverse_row_perm_.PopulateFromInverse(row_perm_);
ComputeTransposeUpper();
@@ -55,10 +56,10 @@ Status LuFactorization::ComputeFactorization(const MatrixView& matrix) {
is_identity_factorization_ = false;
IF_STATS_ENABLED({
stats_.lu_fill_in.Add(GetFillInPercentage(matrix));
stats_.lu_fill_in.Add(GetFillInPercentage(compact_matrix));
stats_.basis_num_entries.Add(matrix.num_entries().value());
});
DCHECK(CheckFactorization(matrix, Fractional(1e-6)));
DCHECK(CheckFactorization(compact_matrix, Fractional(1e-6)));
return Status::OK();
}
@@ -80,7 +81,7 @@ void LuFactorization::LeftSolve(DenseRow* y) const {
DenseColumn* const x = reinterpret_cast<DenseColumn*>(y);
ApplyInversePermutation(inverse_col_perm_, *x, &dense_column_scratchpad_);
upper_.TransposeUpperSolve(&dense_column_scratchpad_);
lower_.TransposeLowerSolve(&dense_column_scratchpad_, nullptr);
lower_.TransposeLowerSolve(&dense_column_scratchpad_);
ApplyInversePermutation(row_perm_, dense_column_scratchpad_, x);
}
@@ -187,13 +188,13 @@ void LuFactorization::RightSolveLWithPermutedInput(const DenseColumn& a,
}
}
void LuFactorization::RightSolveLForColumnView(
const CompactSparseMatrix::ColumnView& b, ScatteredColumn* x) const {
void LuFactorization::RightSolveLForColumnView(const ColumnView& b,
ScatteredColumn* x) const {
SCOPED_TIME_STAT(&stats_);
DCHECK(IsAllZero(x->values));
x->non_zeros.clear();
if (is_identity_factorization_) {
for (const CompactSparseMatrix::ColumnView::Entry e : b) {
for (const ColumnView::Entry e : b) {
(*x)[e.row()] = e.coefficient();
x->non_zeros.push_back(e.row());
}
@@ -207,7 +208,7 @@ void LuFactorization::RightSolveLForColumnView(
// of b.
ColIndex first_column_to_consider(RowToColIndex(x->values.size()));
const ColIndex limit = lower_.GetFirstNonIdentityColumn();
for (const CompactSparseMatrix::ColumnView::Entry e : b) {
for (const ColumnView::Entry e : b) {
const RowIndex permuted_row = row_perm_[e.row()];
(*x)[permuted_row] = e.coefficient();
x->non_zeros.push_back(permuted_row);
@@ -343,11 +344,10 @@ bool LuFactorization::LeftSolveLWithNonZeros(
std::vector<RowIndex>* nz = reinterpret_cast<RowIndexVector*>(&y->non_zeros);
// Hypersparse?
RowIndex last_non_zero_row = ColToRowIndex(y->values.size() - 1);
transpose_lower_.ComputeRowsToConsiderInSortedOrder(nz);
y->non_zeros_are_sorted = true;
if (nz->empty()) {
lower_.TransposeLowerSolve(x, &last_non_zero_row);
lower_.TransposeLowerSolve(x);
} else {
lower_.TransposeHyperSparseSolveWithReversedNonZeros(x, nz);
}
@@ -377,7 +377,7 @@ bool LuFactorization::LeftSolveLWithNonZeros(
x->swap(result_before_permutation->values);
x->AssignToZero(inverse_row_perm_.size());
y->non_zeros.clear();
for (RowIndex row(0); row <= last_non_zero_row; ++row) {
for (RowIndex row(0); row < inverse_row_perm_.size(); ++row) {
const Fractional value = (*result_before_permutation)[row];
if (value != 0.0) {
const RowIndex permuted_row = inverse_row_perm_[row];
@@ -432,7 +432,8 @@ const SparseColumn& LuFactorization::GetColumnOfU(ColIndex col) const {
return column_of_upper_;
}
double LuFactorization::GetFillInPercentage(const MatrixView& matrix) const {
double LuFactorization::GetFillInPercentage(
const CompactSparseMatrixView& matrix) const {
const int initial_num_entries = matrix.num_entries().value();
const int lu_num_entries =
(lower_.num_entries() + upper_.num_entries()).value();
@@ -503,13 +504,13 @@ Fractional LuFactorization::ComputeInverseInfinityNorm() const {
}
Fractional LuFactorization::ComputeOneNormConditionNumber(
const MatrixView& matrix) const {
const CompactSparseMatrixView& matrix) const {
if (is_identity_factorization_) return 1.0;
return matrix.ComputeOneNorm() * ComputeInverseOneNorm();
}
Fractional LuFactorization::ComputeInfinityNormConditionNumber(
const MatrixView& matrix) const {
const CompactSparseMatrixView& matrix) const {
if (is_identity_factorization_) return 1.0;
return matrix.ComputeInfinityNorm() * ComputeInverseInfinityNorm();
}
@@ -543,7 +544,7 @@ void LuFactorization::ComputeTransposeLower() const {
transpose_lower_.PopulateFromTranspose(lower_);
}
bool LuFactorization::CheckFactorization(const MatrixView& matrix,
bool LuFactorization::CheckFactorization(const CompactSparseMatrixView& matrix,
Fractional tolerance) const {
if (is_identity_factorization_) return true;
SparseMatrix lu;

View File

@@ -48,7 +48,8 @@ class LuFactorization {
// it being confused by this revert to identity factorization behavior. The
// reason behind it is that this way, calling any public function of this
// class will never cause a crash of the program.
ABSL_MUST_USE_RESULT Status ComputeFactorization(const MatrixView& matrix);
ABSL_MUST_USE_RESULT Status
ComputeFactorization(const CompactSparseMatrixView& compact_matrix);
// Returns the column permutation used by the LU factorization.
const ColumnPermutation& GetColumnPermutation() const { return col_perm_; }
@@ -103,8 +104,7 @@ class LuFactorization {
// or a ScatteredColumn as input. non_zeros will either be cleared or set to
// the non zeros of the result. Important: the output x must be of the correct
// size and all zero.
void RightSolveLForColumnView(const CompactSparseMatrix::ColumnView& b,
ScatteredColumn* x) const;
void RightSolveLForColumnView(const ColumnView& b, ScatteredColumn* x) const;
void RightSolveLForScatteredColumn(const ScatteredColumn& b,
ScatteredColumn* x) const;
@@ -135,7 +135,7 @@ class LuFactorization {
//
// This returns the number of entries in lower + upper as the percentage of
// the number of entries in B.
double GetFillInPercentage(const MatrixView& matrix) const;
double GetFillInPercentage(const CompactSparseMatrixView& matrix) const;
// Returns the number of entries in L + U.
// If the factorization is the identity, this returns 0.
@@ -173,8 +173,10 @@ class LuFactorization {
// for ComputeFactorization().
//
// TODO(user): separate this from LuFactorization.
Fractional ComputeOneNormConditionNumber(const MatrixView& matrix) const;
Fractional ComputeInfinityNormConditionNumber(const MatrixView& matrix) const;
Fractional ComputeOneNormConditionNumber(
const CompactSparseMatrixView& matrix) const;
Fractional ComputeInfinityNormConditionNumber(
const CompactSparseMatrixView& matrix) const;
Fractional ComputeInverseInfinityNormUpperBound() const;
// Sets the current parameters.
@@ -226,7 +228,8 @@ class LuFactorization {
// Computes R = P.B.Q^{-1} - L.U and returns false if the largest magnitude of
// the coefficients of P.B.Q^{-1} - L.U is greater than tolerance.
bool CheckFactorization(const MatrixView& matrix, Fractional tolerance) const;
bool CheckFactorization(const CompactSparseMatrixView& matrix,
Fractional tolerance) const;
// Special case where we have nothing to do. This happens at the beginning
// when we start the problem with an all-slack basis and gives a good speedup

View File

@@ -17,13 +17,14 @@
#include "absl/strings/str_format.h"
#include "ortools/lp_data/lp_utils.h"
#include "ortools/lp_data/sparse.h"
namespace operations_research {
namespace glop {
Status Markowitz::ComputeRowAndColumnPermutation(const MatrixView& basis_matrix,
RowPermutation* row_perm,
ColumnPermutation* col_perm) {
Status Markowitz::ComputeRowAndColumnPermutation(
const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm,
ColumnPermutation* col_perm) {
SCOPED_TIME_STAT(&stats_);
Clear();
const RowIndex num_rows = basis_matrix.num_rows();
@@ -138,7 +139,7 @@ Status Markowitz::ComputeRowAndColumnPermutation(const MatrixView& basis_matrix,
return Status::OK();
}
Status Markowitz::ComputeLU(const MatrixView& basis_matrix,
Status Markowitz::ComputeLU(const CompactSparseMatrixView& basis_matrix,
RowPermutation* row_perm,
ColumnPermutation* col_perm,
TriangularMatrix* lower, TriangularMatrix* upper) {
@@ -182,15 +183,14 @@ struct MatrixEntry {
} // namespace
void Markowitz::ExtractSingletonColumns(const MatrixView& basis_matrix,
RowPermutation* row_perm,
ColumnPermutation* col_perm,
int* index) {
void Markowitz::ExtractSingletonColumns(
const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm,
ColumnPermutation* col_perm, int* index) {
SCOPED_TIME_STAT(&stats_);
std::vector<MatrixEntry> singleton_entries;
const ColIndex num_cols = basis_matrix.num_cols();
for (ColIndex col(0); col < num_cols; ++col) {
const SparseColumn& column = basis_matrix.column(col);
const ColumnView& column = basis_matrix.column(col);
if (column.num_entries().value() == 1) {
singleton_entries.push_back(
MatrixEntry(column.GetFirstRow(), col, column.GetFirstCoefficient()));
@@ -213,15 +213,14 @@ void Markowitz::ExtractSingletonColumns(const MatrixView& basis_matrix,
num_cols.value());
}
void Markowitz::ExtractResidualSingletonColumns(const MatrixView& basis_matrix,
RowPermutation* row_perm,
ColumnPermutation* col_perm,
int* index) {
void Markowitz::ExtractResidualSingletonColumns(
const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm,
ColumnPermutation* col_perm, int* index) {
SCOPED_TIME_STAT(&stats_);
const ColIndex num_cols = basis_matrix.num_cols();
for (ColIndex col(0); col < num_cols; ++col) {
if ((*col_perm)[col] != kInvalidCol) continue;
const SparseColumn& column = basis_matrix.column(col);
const ColumnView& column = basis_matrix.column(col);
int residual_degree = 0;
RowIndex row;
for (const SparseColumn::Entry e : column) {
@@ -262,8 +261,8 @@ const SparseColumn& Markowitz::ComputeColumn(const RowPermutation& row_perm,
// Solve a sparse triangular system. If the column 'col' of permuted_lower_
// was never computed before by ComputeColumn(), we use the column 'col' of
// the matrix to factorize.
const SparseColumn& input =
first_time ? basis_matrix_->column(col) : *lower_column;
const ColumnView& input =
first_time ? basis_matrix_->column(col) : ColumnView(*lower_column);
lower_.PermutedLowerSparseSolve(input, row_perm, lower_column,
permuted_upper_.mutable_column(col));
permuted_lower_column_needs_solve_[col] = false;
@@ -277,9 +276,14 @@ const SparseColumn& Markowitz::ComputeColumn(const RowPermutation& row_perm,
return *lower_column;
}
// In this case, we just need to "split" the lower column.
// In this case, we just need to "split" the lower column. We copy from the
// appropriate ColumnView in basis_matrix_.
// TODO(user): add PopulateFromColumnView if it is useful elsewhere.
if (first_time) {
lower_column->PopulateFromSparseVector(basis_matrix_->column(col));
lower_column->Reserve(basis_matrix_->column(col).num_entries());
for (const auto e : basis_matrix_->column(col)) {
lower_column->SetCoefficient(e.row(), e.coefficient());
}
}
lower_column->MoveTaggedEntriesTo(row_perm,
permuted_upper_.mutable_column(col));
@@ -549,7 +553,7 @@ void MatrixNonZeroPattern::Reset(RowIndex num_rows, ColIndex num_cols) {
}
void MatrixNonZeroPattern::InitializeFromMatrixSubset(
const MatrixView& basis_matrix, const RowPermutation& row_perm,
const CompactSparseMatrixView& basis_matrix, const RowPermutation& row_perm,
const ColumnPermutation& col_perm, std::vector<ColIndex>* singleton_columns,
std::vector<RowIndex>* singleton_rows) {
const ColIndex num_cols = basis_matrix.num_cols();

View File

@@ -106,7 +106,7 @@ class MatrixNonZeroPattern {
// Resets the pattern to the one of the given matrix but only for the
// rows/columns whose given permutation is kInvalidRow or kInvalidCol.
// This also fills the singleton columns/rows with the corresponding entries.
void InitializeFromMatrixSubset(const MatrixView& basis_matrix,
void InitializeFromMatrixSubset(const CompactSparseMatrixView& basis_matrix,
const RowPermutation& row_perm,
const ColumnPermutation& col_perm,
std::vector<ColIndex>* singleton_columns,
@@ -277,11 +277,10 @@ class Markowitz {
// of the matrix. Moreover, by adding singleton columns with a one at the rows
// such that 'row_perm[row] == kInvalidRow', then the matrix will be
// non-singular.
ABSL_MUST_USE_RESULT Status ComputeLU(const MatrixView& basis_matrix,
RowPermutation* row_perm,
ColumnPermutation* col_perm,
TriangularMatrix* lower,
TriangularMatrix* upper);
ABSL_MUST_USE_RESULT Status
ComputeLU(const CompactSparseMatrixView& basis_matrix,
RowPermutation* row_perm, ColumnPermutation* col_perm,
TriangularMatrix* lower, TriangularMatrix* upper);
// Only computes P and Q^{-1}, L and U can be computed later from these
// permutations using another algorithm (for instance left-looking L.U). This
@@ -294,7 +293,7 @@ class Markowitz {
// independent columns of maximum size. If all the given columns are
// independent, the returned Status will be OK.
ABSL_MUST_USE_RESULT Status ComputeRowAndColumnPermutation(
const MatrixView& basis_matrix, RowPermutation* row_perm,
const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm,
ColumnPermutation* col_perm);
// Releases the memory used by this class.
@@ -331,7 +330,7 @@ class Markowitz {
//
// Note(user): Linear programming bases usually have a resonable percentage of
// slack columns in them, so this gives a big speedup.
void ExtractSingletonColumns(const MatrixView& basis_matrix,
void ExtractSingletonColumns(const CompactSparseMatrixView& basis_matrix,
RowPermutation* row_perm,
ColumnPermutation* col_perm, int* index);
@@ -342,9 +341,9 @@ class Markowitz {
//
// The main gain here is that it avoids taking these columns into account in
// InitializeResidualMatrix() and later in RemoveRowFromResidualMatrix().
void ExtractResidualSingletonColumns(const MatrixView& basis_matrix,
RowPermutation* row_perm,
ColumnPermutation* col_perm, int* index);
void ExtractResidualSingletonColumns(
const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm,
ColumnPermutation* col_perm, int* index);
// Returns the column of the current residual matrix with an index 'col' in
// the initial matrix. We compute it by solving a linear system with the
@@ -386,7 +385,7 @@ class Markowitz {
void UpdateResidualMatrix(RowIndex pivot_row, ColIndex pivot_col);
// Pointer to the matrix to factorize.
MatrixView const* basis_matrix_;
CompactSparseMatrixView const* basis_matrix_;
// These matrices are transformed during the algorithm into the final L and U
// matrices modulo some row and column permutations. Note that the columns of

View File

@@ -85,7 +85,7 @@ RevisedSimplex::RevisedSimplex()
variable_name_(),
direction_(),
error_(),
basis_factorization_(matrix_with_slack_, compact_matrix_, basis_),
basis_factorization_(&compact_matrix_, &basis_),
variables_info_(compact_matrix_, lower_bound_, upper_bound_),
variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
basis_factorization_),
@@ -2190,8 +2190,7 @@ bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
// TODO(user): If 'is_ok' is true, we could use the computed lu in
// basis_factorization_ rather than recompute it during UpdateAndPivot().
MatrixView basis_matrix;
basis_matrix.PopulateFromBasis(matrix_with_slack_, basis_);
CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
basis_[leaving_row] = leaving_col;
return is_ok;

View File

@@ -198,7 +198,7 @@ bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols,
}
for (ColIndex col(0); col < num_cols; ++col) {
const SparseColumn& col_a = matrix_a.column(col);
const CompactSparseMatrix::ColumnView& col_b = matrix_b.column(col);
const ColumnView& col_b = matrix_b.column(col);
const EntryIndex end = std::min(col_a.num_entries(), col_b.num_entries());
if (end < col_a.num_entries() && col_a.EntryRow(end) < num_rows) {
return false;

View File

@@ -16,6 +16,8 @@
#include <algorithm>
#include "absl/strings/str_format.h"
#include "ortools/lp_data/lp_types.h"
#include "ortools/lp_data/permutation.h"
namespace operations_research {
namespace glop {
@@ -212,7 +214,7 @@ void SparseMatrix::PopulateFromPermutedMatrix(
const ColIndex num_cols = a.num_cols();
Reset(num_cols, a.num_rows());
for (ColIndex col(0); col < num_cols; ++col) {
for (const SparseColumn::Entry e : a.column(inverse_col_perm[col])) {
for (const auto e : a.column(inverse_col_perm[col])) {
columns_[col].SetCoefficient(row_perm[e.row()], e.coefficient());
}
}
@@ -427,8 +429,8 @@ template void SparseMatrix::PopulateFromTranspose<SparseMatrix>(
template void SparseMatrix::PopulateFromPermutedMatrix<SparseMatrix>(
const SparseMatrix& a, const RowPermutation& row_perm,
const ColumnPermutation& inverse_col_perm);
template void SparseMatrix::PopulateFromPermutedMatrix<MatrixView>(
const MatrixView& a, const RowPermutation& row_perm,
template void SparseMatrix::PopulateFromPermutedMatrix<CompactSparseMatrixView>(
const CompactSparseMatrixView& a, const RowPermutation& row_perm,
const ColumnPermutation& inverse_col_perm);
void CompactSparseMatrix::PopulateFromMatrixView(const MatrixView& input) {
@@ -591,6 +593,16 @@ void TriangularMatrix::Swap(TriangularMatrix* other) {
other->all_diagonal_coefficients_are_one_);
}
EntryIndex CompactSparseMatrixView::num_entries() const {
return ComputeNumEntries(*this);
}
Fractional CompactSparseMatrixView::ComputeOneNorm() const {
return ComputeOneNormTemplate(*this);
}
Fractional CompactSparseMatrixView::ComputeInfinityNorm() const {
return ComputeInfinityNormTemplate(*this);
}
// Internal function used to finish adding one column to a triangular matrix.
// This sets the diagonal coefficient to the given value, and prepares the
// matrix for the next column addition.
@@ -618,7 +630,7 @@ void TriangularMatrix::AddDiagonalOnlyColumn(Fractional diagonal_value) {
CloseCurrentColumn(diagonal_value);
}
void TriangularMatrix::AddTriangularColumn(const SparseColumn& column,
void TriangularMatrix::AddTriangularColumn(const ColumnView& column,
RowIndex diagonal_row) {
Fractional diagonal_value = 0.0;
for (const SparseColumn::Entry e : column) {
@@ -665,7 +677,7 @@ void TriangularMatrix::PopulateFromTriangularSparseMatrix(
const SparseMatrix& input) {
Reset(input.num_rows());
for (ColIndex col(0); col < input.num_cols(); ++col) {
AddTriangularColumn(input.column(col), ColToRowIndex(col));
AddTriangularColumn(ColumnView(input.column(col)), ColToRowIndex(col));
}
DCHECK(IsLowerTriangular() || IsUpperTriangular());
}
@@ -772,8 +784,8 @@ void TriangularMatrix::UpperSolveInternal(DenseColumn* rhs) const {
// It is faster to iterate this way (instead of i : Column(col)) because of
// cache locality. Note that the floating-point computations are exactly the
// same in both cases.
const EntryIndex last = starts_[col];
for (EntryIndex i(starts_[col + 1] - 1); i >= last; --i) {
const EntryIndex i_end = starts_[col];
for (EntryIndex i(starts_[col + 1] - 1); i >= i_end; --i) {
(*rhs)[EntryRow(i)] -= coeff * EntryCoefficient(i);
}
}
@@ -797,6 +809,9 @@ void TriangularMatrix::TransposeUpperSolveInternal(DenseColumn* rhs) const {
// Note that this is a bit faster than the simpler
// for (const EntryIndex i : Column(col)) {
// EntryIndex i is explicitly not modified in outer iterations, since
// the last entry in column col is stored contiguously just before the
// first entry in column col+1.
const EntryIndex i_end = starts_[col + 1];
for (; i < i_end; ++i) {
sum -= EntryCoefficient(i) * (*rhs)[EntryRow(i)];
@@ -806,18 +821,16 @@ void TriangularMatrix::TransposeUpperSolveInternal(DenseColumn* rhs) const {
}
}
void TriangularMatrix::TransposeLowerSolve(DenseColumn* rhs,
RowIndex* last_non_zero_row) const {
void TriangularMatrix::TransposeLowerSolve(DenseColumn* rhs) const {
if (all_diagonal_coefficients_are_one_) {
TransposeLowerSolveInternal<true>(rhs, last_non_zero_row);
TransposeLowerSolveInternal<true>(rhs);
} else {
TransposeLowerSolveInternal<false>(rhs, last_non_zero_row);
TransposeLowerSolveInternal<false>(rhs);
}
}
template <bool diagonal_of_ones>
void TriangularMatrix::TransposeLowerSolveInternal(
DenseColumn* rhs, RowIndex* last_non_zero_row) const {
void TriangularMatrix::TransposeLowerSolveInternal(DenseColumn* rhs) const {
RETURN_IF_NULL(rhs);
const ColIndex end = first_non_identity_column_;
@@ -826,9 +839,6 @@ void TriangularMatrix::TransposeLowerSolveInternal(
while (col >= end && (*rhs)[ColToRowIndex(col)] == 0.0) {
--col;
}
if (last_non_zero_row != nullptr) {
*last_non_zero_row = ColToRowIndex(col);
}
EntryIndex i = starts_[col + 1] - 1;
for (; col >= end; --col) {
@@ -837,6 +847,9 @@ void TriangularMatrix::TransposeLowerSolveInternal(
// Note that this is a bit faster than the simpler
// for (const EntryIndex i : Column(col)) {
// mainly because we iterate in a good direction for the cache.
// EntryIndex i is explicitly not modified in outer iterations, since
// the last entry in column col is stored contiguously just before the
// first entry in column col+1.
const EntryIndex i_end = starts_[col];
for (; i >= i_end; --i) {
sum -= EntryCoefficient(i) * (*rhs)[EntryRow(i)];
@@ -969,7 +982,7 @@ void TriangularMatrix::PermutedLowerSolve(
DCHECK(lower->CheckNoDuplicates());
}
void TriangularMatrix::PermutedLowerSparseSolve(const SparseColumn& rhs,
void TriangularMatrix::PermutedLowerSparseSolve(const ColumnView& rhs,
const RowPermutation& row_perm,
SparseColumn* lower_column,
SparseColumn* upper_column) {
@@ -1049,7 +1062,7 @@ void TriangularMatrix::PermutedLowerSparseSolve(const SparseColumn& rhs,
// will be given by upper_column_rows_ and it will be populated in reverse
// order.
void TriangularMatrix::PermutedComputeRowsToConsider(
const SparseColumn& rhs, const RowPermutation& row_perm,
const ColumnView& rhs, const RowPermutation& row_perm,
RowIndexVector* lower_column_rows, RowIndexVector* upper_column_rows) {
stored_.resize(num_rows_, false);
marked_.resize(num_rows_, false);

View File

@@ -40,6 +40,8 @@
namespace operations_research {
namespace glop {
class CompactSparseMatrixView;
// --------------------------------------------------------
// SparseMatrix
// --------------------------------------------------------
@@ -273,8 +275,9 @@ extern template void SparseMatrix::PopulateFromTranspose<SparseMatrix>(
extern template void SparseMatrix::PopulateFromPermutedMatrix<SparseMatrix>(
const SparseMatrix& a, const RowPermutation& row_perm,
const ColumnPermutation& inverse_col_perm);
extern template void SparseMatrix::PopulateFromPermutedMatrix<MatrixView>(
const MatrixView& a, const RowPermutation& row_perm,
extern template void
SparseMatrix::PopulateFromPermutedMatrix<CompactSparseMatrixView>(
const CompactSparseMatrixView& a, const RowPermutation& row_perm,
const ColumnPermutation& inverse_col_perm);
// Another matrix representation which is more efficient than a SparseMatrix but
@@ -357,45 +360,6 @@ class CompactSparseMatrix {
Fractional EntryCoefficient(EntryIndex i) const { return coefficients_[i]; }
RowIndex EntryRow(EntryIndex i) const { return rows_[i]; }
// Class to iterate on the entries of a given column with the same interface
// as for SparseColumn.
class ColumnView {
public:
// Clients should pass Entry by value rather than by reference.
// This is because SparseColumnEntry is small (2 pointers and an index) and
// previous profiling of this type of use showed no performance penalty
// (see cl/51057736).
// Example: for(const Entry e : column_view)
typedef SparseColumnEntry Entry;
typedef SparseVectorIterator<Entry> Iterator;
ColumnView(EntryIndex num_entries, const RowIndex* const rows,
const Fractional* const coefficients)
: num_entries_(num_entries), rows_(rows), coefficients_(coefficients) {}
EntryIndex num_entries() const { return num_entries_; }
Fractional EntryCoefficient(EntryIndex i) const {
return coefficients_[i.value()];
}
Fractional GetFirstCoefficient() const {
return EntryCoefficient(EntryIndex(0));
}
RowIndex EntryRow(EntryIndex i) const { return rows_[i.value()]; }
RowIndex GetFirstRow() const { return EntryRow(EntryIndex(0)); }
Iterator begin() const {
return Iterator(this->rows_, this->coefficients_, EntryIndex(0));
}
Iterator end() const {
return Iterator(this->rows_, this->coefficients_, num_entries_);
}
private:
const EntryIndex num_entries_;
const RowIndex* const rows_;
const Fractional* const coefficients_;
};
ColumnView column(ColIndex col) const {
DCHECK_LT(col, num_cols_);
@@ -504,6 +468,34 @@ class CompactSparseMatrix {
DISALLOW_COPY_AND_ASSIGN(CompactSparseMatrix);
};
// A matrix view of the basis columns of a CompactSparseMatrix, with basis
// specified as a RowToColMapping. This class does not take ownership of the
// underlying matrix or basis, and thus they must outlive this class (and keep
// the same address in memory).
class CompactSparseMatrixView {
public:
CompactSparseMatrixView(const CompactSparseMatrix* compact_matrix,
const RowToColMapping* basis)
: compact_matrix_(*compact_matrix), basis_(*basis) {}
// Same behavior as the SparseMatrix functions above.
bool IsEmpty() const { return compact_matrix_.IsEmpty(); }
RowIndex num_rows() const { return compact_matrix_.num_rows(); }
ColIndex num_cols() const { return RowToColIndex(basis_.size()); }
const ColumnView column(ColIndex col) const {
return compact_matrix_.column(basis_[ColToRowIndex(col)]);
}
EntryIndex num_entries() const;
Fractional ComputeOneNorm() const;
Fractional ComputeInfinityNorm() const;
private:
// We require that the underlying CompactSparseMatrix and RowToColMapping
// continue to own the (potentially large) data accessed via this view.
const CompactSparseMatrix& compact_matrix_;
const RowToColMapping& basis_;
};
// Specialization of a CompactSparseMatrix used for triangular matrices.
// To be able to solve triangular systems as efficiently as possible, the
// diagonal entries are stored in a separate vector and not in the underlying
@@ -543,7 +535,7 @@ class TriangularMatrix : private CompactSparseMatrix {
// permutation can be fixed at the end by a call to
// ApplyRowPermutationToNonDiagonalEntries() or accounted directly in the case
// of PermutedLowerSparseSolve().
void AddTriangularColumn(const SparseColumn& column, RowIndex diagonal_row);
void AddTriangularColumn(const ColumnView& column, RowIndex diagonal_row);
void AddTriangularColumnWithGivenDiagonalEntry(const SparseColumn& column,
RowIndex diagonal_row,
Fractional diagonal_value);
@@ -604,8 +596,9 @@ class TriangularMatrix : private CompactSparseMatrix {
// This assumes that the rhs is all zero before the given position.
void LowerSolveStartingAt(ColIndex start, DenseColumn* rhs) const;
// This also computes the last non-zero row position (if not nullptr).
void TransposeLowerSolve(DenseColumn* rhs, RowIndex* last_non_zero_row) const;
// Solves the system Transpose(L).x = rhs, where L is lower triangular.
// This can be used to do a left-solve for a row vector (i.e., y.Y = rhs).
void TransposeLowerSolve(DenseColumn* rhs) const;
// Hyper-sparse version of the triangular solve functions. The passed
// non_zero_rows should contain the positions of the symbolic non-zeros of the
@@ -694,7 +687,7 @@ class TriangularMatrix : private CompactSparseMatrix {
// Note: This function is non-const because ComputeRowsToConsider() also
// prunes the underlying dependency graph of the lower matrix while doing a
// solve. See marked_ and pruned_ends_ below.
void PermutedLowerSparseSolve(const SparseColumn& rhs,
void PermutedLowerSparseSolve(const ColumnView& rhs,
const RowPermutation& row_perm,
SparseColumn* lower, SparseColumn* upper);
@@ -718,9 +711,9 @@ class TriangularMatrix : private CompactSparseMatrix {
// Pruning the graph at the same time is slower but not by too much (< 2x) and
// seems worth doing. Note that when the lower matrix is dense, most of the
// graph will likely be pruned. As a result, the symbolic phase will be
// negligeable compared to the numerical phase so we don't really need a dense
// negligible compared to the numerical phase so we don't really need a dense
// version of PermutedLowerSparseSolve().
void PermutedComputeRowsToConsider(const SparseColumn& rhs,
void PermutedComputeRowsToConsider(const ColumnView& rhs,
const RowPermutation& row_perm,
RowIndexVector* lower_column_rows,
RowIndexVector* upper_column_rows);
@@ -738,8 +731,7 @@ class TriangularMatrix : private CompactSparseMatrix {
template <bool diagonal_of_ones>
void UpperSolveInternal(DenseColumn* rhs) const;
template <bool diagonal_of_ones>
void TransposeLowerSolveInternal(DenseColumn* rhs,
RowIndex* last_non_zero_row) const;
void TransposeLowerSolveInternal(DenseColumn* rhs) const;
template <bool diagonal_of_ones>
void TransposeUpperSolveInternal(DenseColumn* rhs) const;

View File

@@ -37,9 +37,13 @@ class SparseColumnEntry : public SparseVectorEntry<RowIndex> {
};
using SparseColumnIterator = SparseVectorIterator<SparseColumnEntry>;
class ColumnView;
// A SparseColumn is a SparseVector<RowIndex>, with a few methods renamed
// to help readability on the client side.
class SparseColumn : public SparseVector<RowIndex, SparseColumnIterator> {
friend class ColumnView;
public:
SparseColumn() : SparseVector<RowIndex, SparseColumnIterator>() {}
@@ -56,6 +60,49 @@ class SparseColumn : public SparseVector<RowIndex, SparseColumnIterator> {
}
};
// Class to iterate on the entries of a given column with the same interface
// as for SparseColumn.
class ColumnView {
public:
// Clients should pass Entry by value rather than by reference.
// This is because SparseColumnEntry is small (2 pointers and an index) and
// previous profiling of this type of use showed no performance penalty
// (see cl/51057736).
// Example: for(const Entry e : column_view)
typedef SparseColumnEntry Entry;
typedef SparseVectorIterator<Entry> Iterator;
ColumnView(EntryIndex num_entries, const RowIndex* rows,
const Fractional* const coefficients)
: num_entries_(num_entries), rows_(rows), coefficients_(coefficients) {}
explicit ColumnView(const SparseColumn& column)
: num_entries_(column.num_entries()),
rows_(column.index_),
coefficients_(column.coefficient_) {}
EntryIndex num_entries() const { return num_entries_; }
Fractional EntryCoefficient(EntryIndex i) const {
return coefficients_[i.value()];
}
Fractional GetFirstCoefficient() const {
return EntryCoefficient(EntryIndex(0));
}
RowIndex EntryRow(EntryIndex i) const { return rows_[i.value()]; }
RowIndex GetFirstRow() const { return EntryRow(EntryIndex(0)); }
Iterator begin() const {
return Iterator(this->rows_, this->coefficients_, EntryIndex(0));
}
Iterator end() const {
return Iterator(this->rows_, this->coefficients_, num_entries_);
}
private:
const EntryIndex num_entries_;
const RowIndex* const rows_;
const Fractional* const coefficients_;
};
// --------------------------------------------------------
// RandomAccessSparseColumn
// --------------------------------------------------------

View File

@@ -20,7 +20,7 @@ namespace operations_research {
namespace glop {
// Specialization of SparseVectorEntry and SparseVectorIterator for the
// SparseRow class. In addtion to index(), it also provides col() for better
// SparseRow class. In addition to index(), it also provides col() for better
// readability on the client side.
class SparseRowEntry : public SparseVectorEntry<ColIndex> {
public: