OR-Tools  9.1
sparse.h
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14//
15// The following are very good references for terminology, data structures,
16// and algorithms:
17//
18// I.S. Duff, A.M. Erisman and J.K. Reid, "Direct Methods for Sparse Matrices",
19// Clarendon, Oxford, UK, 1987, ISBN 0-19-853421-3,
20// http://www.amazon.com/dp/0198534213.
21//
22//
23// T.A. Davis, "Direct methods for Sparse Linear Systems", SIAM, Philadelphia,
24// 2006, ISBN-13: 978-0-898716-13, http://www.amazon.com/dp/0898716136.
25//
26//
27// Both books also contain a wealth of references.
28
29#ifndef OR_TOOLS_LP_DATA_SPARSE_H_
30#define OR_TOOLS_LP_DATA_SPARSE_H_
31
32#include <cstdint>
33#include <string>
34
41
42namespace operations_research {
43namespace glop {
44
45class CompactSparseMatrixView;
46
47// --------------------------------------------------------
48// SparseMatrix
49// --------------------------------------------------------
50// SparseMatrix is a class for sparse matrices suitable for computation.
51// Data is represented using the so-called compressed-column storage scheme.
52// Entries (row, col, value) are stored by column using a SparseColumn.
53//
54// Citing [Duff et al, 1987], a matrix is sparse if many of its coefficients are
55// zero and if there is an advantage in exploiting its zeros.
56// For practical reasons, not all zeros are exploited (for example those that
57// result from calculations.) The term entry refers to those coefficients that
58// are handled explicitly. All non-zeros are entries while some zero
59// coefficients may also be entries.
60//
61// Note that no special ordering of entries is assumed.
63 public:
65
66 // Useful for testing. This makes it possible to write:
67 // SparseMatrix matrix {
68 // {1, 2, 3},
69 // {4, 5, 6},
70 // {7, 8, 9}};
71#if (!defined(_MSC_VER) || _MSC_VER >= 1800)
73 std::initializer_list<std::initializer_list<Fractional>> init_list);
74#endif
75 // Clears internal data structure, i.e. erases all the columns and set
76 // the number of rows to zero.
77 void Clear();
78
79 // Returns true if the matrix is empty.
80 // That is if num_rows() OR num_cols() are zero.
81 bool IsEmpty() const;
82
83 // Cleans the columns, i.e. removes zero-values entries, removes duplicates
84 // entries and sorts remaining entries in increasing row order.
85 // Call with care: Runs in O(num_cols * column_cleanup), with each column
86 // cleanup running in O(num_entries * log(num_entries)).
87 void CleanUp();
88
89 // Call CheckNoDuplicates() on all columns, useful for doing a DCHECK.
90 bool CheckNoDuplicates() const;
91
92 // Call IsCleanedUp() on all columns, useful for doing a DCHECK.
93 bool IsCleanedUp() const;
94
95 // Change the number of row of this matrix.
96 void SetNumRows(RowIndex num_rows);
97
98 // Appends an empty column and returns its index.
99 ColIndex AppendEmptyColumn();
100
101 // Appends a unit vector defined by the single entry (row, value).
102 // Note that the row should be smaller than the number of rows of the matrix.
103 void AppendUnitVector(RowIndex row, Fractional value);
104
105 // Swaps the content of this SparseMatrix with the one passed as argument.
106 // Works in O(1).
107 void Swap(SparseMatrix* matrix);
108
109 // Populates the matrix with num_cols columns of zeros. As the number of rows
110 // is specified by num_rows, the matrix is not necessarily square.
111 // Previous columns/values are deleted.
112 void PopulateFromZero(RowIndex num_rows, ColIndex num_cols);
113
114 // Populates the matrix from the Identity matrix of size num_cols.
115 // Previous columns/values are deleted.
116 void PopulateFromIdentity(ColIndex num_cols);
117
118 // Populates the matrix from the transposed of the given matrix.
119 // Note that this preserve the property of lower/upper triangular matrix
120 // to have the diagonal coefficients first/last in each columns. It actually
121 // sorts the entries in each columns by their indices.
122 template <typename Matrix>
123 void PopulateFromTranspose(const Matrix& input);
124
125 // Populates a SparseMatrix from another one (copy), note that this run in
126 // O(number of entries in the matrix).
127 void PopulateFromSparseMatrix(const SparseMatrix& matrix);
128
129 // Populates a SparseMatrix from the image of a matrix A through the given
130 // row_perm and inverse_col_perm. See permutation.h for more details.
131 template <typename Matrix>
132 void PopulateFromPermutedMatrix(const Matrix& a,
133 const RowPermutation& row_perm,
134 const ColumnPermutation& inverse_col_perm);
135
136 // Populates a SparseMatrix from the result of alpha * A + beta * B,
137 // where alpha and beta are Fractionals, A and B are sparse matrices.
139 Fractional beta, const SparseMatrix& b);
140
141 // Multiplies SparseMatrix a by SparseMatrix b.
142 void PopulateFromProduct(const SparseMatrix& a, const SparseMatrix& b);
143
144 // Removes the marked columns from the matrix and adjust its size.
145 // This runs in O(num_cols).
146 void DeleteColumns(const DenseBooleanRow& columns_to_delete);
147
148 // Applies the given row permutation and deletes the rows for which
149 // permutation[row] is kInvalidRow. Sets the new number of rows to num_rows.
150 // This runs in O(num_entries).
151 void DeleteRows(RowIndex num_rows, const RowPermutation& permutation);
152
153 // Appends all rows from the given matrix to the calling object after the last
154 // row of the calling object. Both matrices must have the same number of
155 // columns. The method returns true if the rows were added successfully and
156 // false if it can't add the rows because the number of columns of the
157 // matrices are different.
158 bool AppendRowsFromSparseMatrix(const SparseMatrix& matrix);
159
160 // Applies the row permutation.
161 void ApplyRowPermutation(const RowPermutation& row_perm);
162
163 // Returns the coefficient at position row in column col.
164 // Call with care: runs in O(num_entries_in_col) as entries may not be sorted.
165 Fractional LookUpValue(RowIndex row, ColIndex col) const;
166
167 // Returns true if the matrix equals a (with a maximum error smaller than
168 // given the tolerance).
169 bool Equals(const SparseMatrix& a, Fractional tolerance) const;
170
171 // Returns, in min_magnitude and max_magnitude, the minimum and maximum
172 // magnitudes of the non-zero coefficients of the calling object.
173 void ComputeMinAndMaxMagnitudes(Fractional* min_magnitude,
174 Fractional* max_magnitude) const;
175
176 // Return the matrix dimension.
177 RowIndex num_rows() const { return num_rows_; }
178 ColIndex num_cols() const { return ColIndex(columns_.size()); }
179
180 // Access the underlying sparse columns.
181 const SparseColumn& column(ColIndex col) const { return columns_[col]; }
182 SparseColumn* mutable_column(ColIndex col) { return &(columns_[col]); }
183
184 // Returns the total numbers of entries in the matrix.
185 // Runs in O(num_cols).
186 EntryIndex num_entries() const;
187
188 // Computes the 1-norm of the matrix.
189 // The 1-norm |A| is defined as max_j sum_i |a_ij| or
190 // max_col sum_row |a(row,col)|.
192
193 // Computes the oo-norm (infinity-norm) of the matrix.
194 // The oo-norm |A| is defined as max_i sum_j |a_ij| or
195 // max_row sum_col |a(row,col)|.
197
198 // Returns a dense representation of the matrix.
199 std::string Dump() const;
200
201 private:
202 // Resets the internal data structure and create an empty rectangular
203 // matrix of size num_rows x num_cols.
204 void Reset(ColIndex num_cols, RowIndex num_rows);
205
206 // Vector of sparse columns.
208
209 // Number of rows. This is needed as sparse columns don't have a maximum
210 // number of rows.
211 RowIndex num_rows_;
212
213 DISALLOW_COPY_AND_ASSIGN(SparseMatrix);
214};
215
216// A matrix constructed from a list of already existing SparseColumn. This class
217// does not take ownership of the underlying columns, and thus they must outlive
218// this class (and keep the same address in memory).
220 public:
222 explicit MatrixView(const SparseMatrix& matrix) {
223 PopulateFromMatrix(matrix);
224 }
225
226 // Takes all the columns of the given matrix.
227 void PopulateFromMatrix(const SparseMatrix& matrix) {
228 const ColIndex num_cols = matrix.num_cols();
229 columns_.resize(num_cols, nullptr);
230 for (ColIndex col(0); col < num_cols; ++col) {
231 columns_[col] = &matrix.column(col);
232 }
233 num_rows_ = matrix.num_rows();
234 }
235
236 // Takes all the columns of the first matrix followed by the columns of the
237 // second matrix.
239 const SparseMatrix& matrix_b) {
240 const ColIndex num_cols = matrix_a.num_cols() + matrix_b.num_cols();
241 columns_.resize(num_cols, nullptr);
242 for (ColIndex col(0); col < matrix_a.num_cols(); ++col) {
243 columns_[col] = &matrix_a.column(col);
244 }
245 for (ColIndex col(0); col < matrix_b.num_cols(); ++col) {
246 columns_[matrix_a.num_cols() + col] = &matrix_b.column(col);
247 }
248 num_rows_ = std::max(matrix_a.num_rows(), matrix_b.num_rows());
249 }
250
251 // Takes only the columns of the given matrix that belongs to the given basis.
252 void PopulateFromBasis(const MatrixView& matrix,
253 const RowToColMapping& basis) {
254 columns_.resize(RowToColIndex(basis.size()), nullptr);
255 for (RowIndex row(0); row < basis.size(); ++row) {
256 columns_[RowToColIndex(row)] = &matrix.column(basis[row]);
257 }
258 num_rows_ = matrix.num_rows();
259 }
260
261 // Same behavior as the SparseMatrix functions above.
262 bool IsEmpty() const { return columns_.empty(); }
263 RowIndex num_rows() const { return num_rows_; }
264 ColIndex num_cols() const { return columns_.size(); }
265 const SparseColumn& column(ColIndex col) const { return *columns_[col]; }
266 EntryIndex num_entries() const;
269
270 private:
271 RowIndex num_rows_;
273};
274
275extern template void SparseMatrix::PopulateFromTranspose<SparseMatrix>(
276 const SparseMatrix& input);
277extern template void SparseMatrix::PopulateFromPermutedMatrix<SparseMatrix>(
278 const SparseMatrix& a, const RowPermutation& row_perm,
279 const ColumnPermutation& inverse_col_perm);
280extern template void
281SparseMatrix::PopulateFromPermutedMatrix<CompactSparseMatrixView>(
282 const CompactSparseMatrixView& a, const RowPermutation& row_perm,
283 const ColumnPermutation& inverse_col_perm);
284
285// Another matrix representation which is more efficient than a SparseMatrix but
286// doesn't allow matrix modification. It is faster to construct, uses less
287// memory and provides a better cache locality when iterating over the non-zeros
288// of the matrix columns.
290 public:
292
293 // Convenient constructors for tests.
294 // TODO(user): If this is needed in production code, it can be done faster.
295 explicit CompactSparseMatrix(const SparseMatrix& matrix) {
296 PopulateFromMatrixView(MatrixView(matrix));
297 }
298
299 // Creates a CompactSparseMatrix from the given MatrixView. The matrices are
300 // the same, only the representation differ. Note that the entry order in
301 // each column is preserved.
302 void PopulateFromMatrixView(const MatrixView& input);
303
304 // Creates a CompactSparseMatrix by copying the input and adding an identity
305 // matrix to the left of it.
306 void PopulateFromSparseMatrixAndAddSlacks(const SparseMatrix& input);
307
308 // Creates a CompactSparseMatrix from the transpose of the given
309 // CompactSparseMatrix. Note that the entries in each columns will be ordered
310 // by row indices.
311 void PopulateFromTranspose(const CompactSparseMatrix& input);
312
313 // Clears the matrix and sets its number of rows. If none of the Populate()
314 // function has been called, Reset() must be called before calling any of the
315 // Add*() functions below.
316 void Reset(RowIndex num_rows);
317
318 // Adds a dense column to the CompactSparseMatrix (only the non-zero will be
319 // actually stored). This work in O(input.size()) and returns the index of the
320 // added column.
321 ColIndex AddDenseColumn(const DenseColumn& dense_column);
322
323 // Same as AddDenseColumn(), but only adds the non-zero from the given start.
324 ColIndex AddDenseColumnPrefix(const DenseColumn& dense_column,
325 RowIndex start);
326
327 // Same as AddDenseColumn(), but uses the given non_zeros pattern of input.
328 // If non_zeros is empty, this actually calls AddDenseColumn().
329 ColIndex AddDenseColumnWithNonZeros(const DenseColumn& dense_column,
330 const std::vector<RowIndex>& non_zeros);
331
332 // Adds a dense column for which we know the non-zero positions and clears it.
333 // Note that this function supports duplicate indices in non_zeros. The
334 // complexity is in O(non_zeros.size()). Only the indices present in non_zeros
335 // will be cleared. Returns the index of the added column.
336 ColIndex AddAndClearColumnWithNonZeros(DenseColumn* column,
337 std::vector<RowIndex>* non_zeros);
338
339 // Returns the number of entries (i.e. degree) of the given column.
340 EntryIndex ColumnNumEntries(ColIndex col) const {
341 return starts_[col + 1] - starts_[col];
342 }
343
344 // Returns the matrix dimensions. See same functions in SparseMatrix.
345 EntryIndex num_entries() const {
346 DCHECK_EQ(coefficients_.size(), rows_.size());
347 return coefficients_.size();
348 }
349 RowIndex num_rows() const { return num_rows_; }
350 ColIndex num_cols() const { return num_cols_; }
351
352 // Returns whether or not this matrix contains any non-zero entries.
353 bool IsEmpty() const {
354 DCHECK_EQ(coefficients_.size(), rows_.size());
355 return coefficients_.empty();
356 }
357
358 // Functions to iterate on the entries of a given column:
359 // for (const EntryIndex i : compact_matrix_.Column(col)) {
360 // const RowIndex row = compact_matrix_.EntryRow(i);
361 // const Fractional coefficient = compact_matrix_.EntryCoefficient(i);
362 // }
364 return ::util::IntegerRange<EntryIndex>(starts_[col], starts_[col + 1]);
365 }
366 Fractional EntryCoefficient(EntryIndex i) const { return coefficients_[i]; }
367 RowIndex EntryRow(EntryIndex i) const { return rows_[i]; }
368
369 ColumnView column(ColIndex col) const {
370 DCHECK_LT(col, num_cols_);
371
372 // Note that the start may be equal to row.size() if the last columns
373 // are empty, it is why we don't use &row[start].
374 const EntryIndex start = starts_[col];
375 return ColumnView(starts_[col + 1] - start, rows_.data() + start.value(),
376 coefficients_.data() + start.value());
377 }
378
379 // Returns true if the given column is empty. Note that for triangular matrix
380 // this does not include the diagonal coefficient (see below).
381 bool ColumnIsEmpty(ColIndex col) const {
382 return starts_[col + 1] == starts_[col];
383 }
384
385 // Returns the scalar product of the given row vector with the column of index
386 // col of this matrix. This function is declared in the .h for efficiency.
387 Fractional ColumnScalarProduct(ColIndex col, const DenseRow& vector) const {
388 Fractional result = 0.0;
389 for (const EntryIndex i : Column(col)) {
390 result += EntryCoefficient(i) * vector[RowToColIndex(EntryRow(i))];
391 }
392 return result;
393 }
394
395 // Adds a multiple of the given column of this matrix to the given
396 // dense_column. If multiplier is 0.0, this function does nothing. This
397 // function is declared in the .h for efficiency.
399 DenseColumn* dense_column) const {
400 if (multiplier == 0.0) return;
401 RETURN_IF_NULL(dense_column);
402 for (const EntryIndex i : Column(col)) {
403 (*dense_column)[EntryRow(i)] += multiplier * EntryCoefficient(i);
404 }
405 }
406
407 // Same as ColumnAddMultipleToDenseColumn() but also adds the new non-zeros to
408 // the non_zeros vector. A non-zero is "new" if is_non_zero[row] was false,
409 // and we update dense_column[row]. This function also updates is_non_zero.
411 Fractional multiplier,
412 ScatteredColumn* column) const {
413 if (multiplier == 0.0) return;
414 RETURN_IF_NULL(column);
415 for (const EntryIndex i : Column(col)) {
416 const RowIndex row = EntryRow(i);
417 column->Add(row, multiplier * EntryCoefficient(i));
418 }
419 }
420
421 // Copies the given column of this matrix into the given dense_column.
422 // This function is declared in the .h for efficiency.
423 void ColumnCopyToDenseColumn(ColIndex col, DenseColumn* dense_column) const {
424 RETURN_IF_NULL(dense_column);
425 dense_column->AssignToZero(num_rows_);
426 ColumnCopyToClearedDenseColumn(col, dense_column);
427 }
428
429 // Same as ColumnCopyToDenseColumn() but assumes the column to be initially
430 // all zero.
432 DenseColumn* dense_column) const {
433 RETURN_IF_NULL(dense_column);
434 dense_column->resize(num_rows_, 0.0);
435 for (const EntryIndex i : Column(col)) {
436 (*dense_column)[EntryRow(i)] = EntryCoefficient(i);
437 }
438 }
439
440 // Same as ColumnCopyToClearedDenseColumn() but also fills non_zeros.
442 ColIndex col, DenseColumn* dense_column,
443 RowIndexVector* non_zeros) const {
444 RETURN_IF_NULL(dense_column);
445 dense_column->resize(num_rows_, 0.0);
446 non_zeros->clear();
447 for (const EntryIndex i : Column(col)) {
448 const RowIndex row = EntryRow(i);
449 (*dense_column)[row] = EntryCoefficient(i);
450 non_zeros->push_back(row);
451 }
452 }
453
454 void Swap(CompactSparseMatrix* other);
455
456 protected:
457 // The matrix dimensions, properly updated by full and incremental builders.
458 RowIndex num_rows_;
459 ColIndex num_cols_;
460
461 // Holds the columns non-zero coefficients and row positions.
462 // The entries for the column of index col are stored in the entries
463 // [starts_[col], starts_[col + 1]).
467
468 private:
470};
471
472// A matrix view of the basis columns of a CompactSparseMatrix, with basis
473// specified as a RowToColMapping. This class does not take ownership of the
474// underlying matrix or basis, and thus they must outlive this class (and keep
475// the same address in memory).
477 public:
479 const RowToColMapping* basis)
480 : compact_matrix_(*compact_matrix),
481 columns_(basis->data(), basis->size().value()) {}
483 const std::vector<ColIndex>* columns)
484 : compact_matrix_(*compact_matrix), columns_(*columns) {}
485
486 // Same behavior as the SparseMatrix functions above.
487 bool IsEmpty() const { return compact_matrix_.IsEmpty(); }
488 RowIndex num_rows() const { return compact_matrix_.num_rows(); }
489 ColIndex num_cols() const { return ColIndex(columns_.size()); }
490 const ColumnView column(ColIndex col) const {
491 return compact_matrix_.column(columns_[col.value()]);
492 }
493 EntryIndex num_entries() const;
494 Fractional ComputeOneNorm() const;
496
497 private:
498 // We require that the underlying CompactSparseMatrix and RowToColMapping
499 // continue to own the (potentially large) data accessed via this view.
500 const CompactSparseMatrix& compact_matrix_;
501 const absl::Span<const ColIndex> columns_;
502};
503
504// Specialization of a CompactSparseMatrix used for triangular matrices.
505// To be able to solve triangular systems as efficiently as possible, the
506// diagonal entries are stored in a separate vector and not in the underlying
507// CompactSparseMatrix.
508//
509// Advanced usage: this class also support matrices that can be permuted into a
510// triangular matrix and some functions work directly on such matrices.
512 public:
513 TriangularMatrix() : all_diagonal_coefficients_are_one_(true) {}
514
515 // Only a subset of the functions from CompactSparseMatrix are exposed (note
516 // the private inheritance). They are extended to deal with diagonal
517 // coefficients properly.
518 void PopulateFromTranspose(const TriangularMatrix& input);
519 void Swap(TriangularMatrix* other);
520 bool IsEmpty() const { return diagonal_coefficients_.empty(); }
521 RowIndex num_rows() const { return num_rows_; }
522 ColIndex num_cols() const { return num_cols_; }
523 EntryIndex num_entries() const {
524 return EntryIndex(num_cols_.value()) + coefficients_.size();
525 }
526
527 // On top of the CompactSparseMatrix functionality, TriangularMatrix::Reset()
528 // also pre-allocates space of size col_size for a number of internal vectors.
529 // This helps reduce costly push_back operations for large problems.
530 //
531 // WARNING: Reset() must be called with a sufficiently large col_capacity
532 // prior to any Add* calls (e.g., AddTriangularColumn).
533 void Reset(RowIndex num_rows, ColIndex col_capacity);
534
535 // Constructs a triangular matrix from the given SparseMatrix. The input is
536 // assumed to be lower or upper triangular without any permutations. This is
537 // checked in debug mode.
538 void PopulateFromTriangularSparseMatrix(const SparseMatrix& input);
539
540 // Functions to create a triangular matrix incrementally, column by column.
541 // A client needs to call Reset(num_rows) first, and then each column must be
542 // added by calling one of the 3 functions below.
543 //
544 // Note that the row indices of the columns are allowed to be permuted: the
545 // diagonal entry of the column #col not being necessarily on the row #col.
546 // This is why these functions require the 'diagonal_row' parameter. The
547 // permutation can be fixed at the end by a call to
548 // ApplyRowPermutationToNonDiagonalEntries() or accounted directly in the case
549 // of PermutedLowerSparseSolve().
550 void AddTriangularColumn(const ColumnView& column, RowIndex diagonal_row);
551 void AddTriangularColumnWithGivenDiagonalEntry(const SparseColumn& column,
552 RowIndex diagonal_row,
553 Fractional diagonal_value);
554 void AddDiagonalOnlyColumn(Fractional diagonal_value);
555
556 // Adds the given sparse column divided by diagonal_coefficient.
557 // The diagonal_row is assumed to be present and its value should be the
558 // same as the one given in diagonal_coefficient. Note that this function
559 // tests for zero coefficients in the input column and removes them.
560 void AddAndNormalizeTriangularColumn(const SparseColumn& column,
561 RowIndex diagonal_row,
562 Fractional diagonal_coefficient);
563
564 // Applies the given row permutation to all entries except the diagonal ones.
565 void ApplyRowPermutationToNonDiagonalEntries(const RowPermutation& row_perm);
566
567 // Copy a triangular column with its diagonal entry to the given SparseColumn.
568 void CopyColumnToSparseColumn(ColIndex col, SparseColumn* output) const;
569
570 // Copy a triangular matrix to the given SparseMatrix.
571 void CopyToSparseMatrix(SparseMatrix* output) const;
572
573 // Returns the index of the first column which is not an identity column (i.e.
574 // a column j with only one entry of value 1 at the j-th row). This is always
575 // zero if the matrix is not triangular.
576 ColIndex GetFirstNonIdentityColumn() const {
577 return first_non_identity_column_;
578 }
579
580 // Returns the diagonal coefficient of the given column.
582 return diagonal_coefficients_[col];
583 }
584
585 // Returns true iff the column contains no non-diagonal entries.
586 bool ColumnIsDiagonalOnly(ColIndex col) const {
588 }
589
590 // --------------------------------------------------------------------------
591 // Triangular solve functions.
592 //
593 // All the functions containing the word Lower (resp. Upper) require the
594 // matrix to be lower (resp. upper_) triangular without any permutation.
595 // --------------------------------------------------------------------------
596
597 // Solve the system L.x = rhs for a lower triangular matrix.
598 // The result overwrite rhs.
599 void LowerSolve(DenseColumn* rhs) const;
600
601 // Solves the system U.x = rhs for an upper triangular matrix.
602 void UpperSolve(DenseColumn* rhs) const;
603
604 // Solves the system Transpose(U).x = rhs where U is upper triangular.
605 // This can be used to do a left-solve for a row vector (i.e. y.Y = rhs).
606 void TransposeUpperSolve(DenseColumn* rhs) const;
607
608 // This assumes that the rhs is all zero before the given position.
609 void LowerSolveStartingAt(ColIndex start, DenseColumn* rhs) const;
610
611 // Solves the system Transpose(L).x = rhs, where L is lower triangular.
612 // This can be used to do a left-solve for a row vector (i.e., y.Y = rhs).
613 void TransposeLowerSolve(DenseColumn* rhs) const;
614
615 // Hyper-sparse version of the triangular solve functions. The passed
616 // non_zero_rows should contain the positions of the symbolic non-zeros of the
617 // result in the order in which they need to be accessed (or in the reverse
618 // order for the Reverse*() versions).
619 //
620 // The non-zero vector is mutable so that the symbolic non-zeros that are
621 // actually zero because of numerical cancellations can be removed.
622 //
623 // The non-zeros can be computed by one of these two methods:
624 // - ComputeRowsToConsiderWithDfs() which will give them in the reverse order
625 // of the one they need to be accessed in. This is only a topological order,
626 // and it will not necessarily be "sorted".
627 // - ComputeRowsToConsiderInSortedOrder() which will always give them in
628 // increasing order.
629 //
630 // Note that if the non-zeros are given in a sorted order, then the
631 // hyper-sparse functions will return EXACTLY the same results as the non
632 // hyper-sparse version above.
633 //
634 // For a given solve, here is the required order:
635 // - For a lower solve, increasing non-zeros order.
636 // - For an upper solve, decreasing non-zeros order.
637 // - for a transpose lower solve, decreasing non-zeros order.
638 // - for a transpose upper solve, increasing non_zeros order.
639 //
640 // For a general discussion of hyper-sparsity in LP, see:
641 // J.A.J. Hall, K.I.M. McKinnon, "Exploiting hyper-sparsity in the revised
642 // simplex method", December 1999, MS 99-014.
643 // http://www.maths.ed.ac.uk/hall/MS-99/MS9914.pdf
644 void HyperSparseSolve(DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
645 void HyperSparseSolveWithReversedNonZeros(
646 DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
647 void TransposeHyperSparseSolve(DenseColumn* rhs,
648 RowIndexVector* non_zero_rows) const;
649 void TransposeHyperSparseSolveWithReversedNonZeros(
650 DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
651
652 // Given the positions of the non-zeros of a vector, computes the non-zero
653 // positions of the vector after a solve by this triangular matrix. The order
654 // of the returned non-zero positions will be in the REVERSE elimination
655 // order. If the function detects that there are too many non-zeros, then it
656 // aborts early and non_zero_rows is cleared.
657 void ComputeRowsToConsiderWithDfs(RowIndexVector* non_zero_rows) const;
658
659 // Same as TriangularComputeRowsToConsider() but always returns the non-zeros
660 // sorted by rows. It is up to the client to call the direct or reverse
661 // hyper-sparse solve function depending if the matrix is upper or lower
662 // triangular.
663 void ComputeRowsToConsiderInSortedOrder(RowIndexVector* non_zero_rows,
664 Fractional sparsity_ratio,
665 Fractional num_ops_ratio) const;
666 void ComputeRowsToConsiderInSortedOrder(RowIndexVector* non_zero_rows) const;
667 // This is currently only used for testing. It achieves the same result as
668 // PermutedLowerSparseSolve() below, but the latter exploits the sparsity of
669 // rhs and is thus faster for our use case.
670 //
671 // Note that partial_inverse_row_perm only permutes the first k rows, where k
672 // is the same as partial_inverse_row_perm.size(). It is the inverse
673 // permutation of row_perm which only permutes k rows into is [0, k), the
674 // other row images beeing kInvalidRow. The other arguments are the same as
675 // for PermutedLowerSparseSolve() and described there.
676 //
677 // IMPORTANT: lower will contain all the "symbolic" non-zero entries.
678 // A "symbolic" zero entry is one that will be zero whatever the coefficients
679 // of the rhs entries. That is it only depends on the position of its
680 // entries, not on their values. Thus, some of its coefficients may be zero.
681 // This fact is exploited by the LU factorization code. The zero coefficients
682 // of upper will be cleaned, however.
683 void PermutedLowerSolve(const SparseColumn& rhs,
684 const RowPermutation& row_perm,
685 const RowMapping& partial_inverse_row_perm,
686 SparseColumn* lower, SparseColumn* upper) const;
687
688 // This solves a lower triangular system with only ones on the diagonal where
689 // the matrix and the input rhs are permuted by the inverse of row_perm. Note
690 // that the output will also be permuted by the inverse of row_perm. The
691 // function also supports partial permutation. That is if row_perm[i] < 0 then
692 // column row_perm[i] is assumed to be an identity column.
693 //
694 // The output is given as follow:
695 // - lower is cleared, and receives the rows for which row_perm[row] < 0
696 // meaning not yet examined as a pivot (see markowitz.cc).
697 // - upper is NOT cleared, and the other rows (row_perm[row] >= 0) are
698 // appended to it.
699 // - Note that lower and upper can point to the same SparseColumn.
700 //
701 // Note: This function is non-const because ComputeRowsToConsider() also
702 // prunes the underlying dependency graph of the lower matrix while doing a
703 // solve. See marked_ and pruned_ends_ below.
704 void PermutedLowerSparseSolve(const ColumnView& rhs,
705 const RowPermutation& row_perm,
706 SparseColumn* lower, SparseColumn* upper);
707
708 // This is used to compute the deterministic time of a matrix factorization.
710 return num_fp_operations_;
711 }
712
713 // To be used in DEBUG mode by the client code. This check that the matrix is
714 // lower- (resp. upper-) triangular without any permutation and that there is
715 // no zero on the diagonal. We can't do that on each Solve() that require so,
716 // otherwise it will be too slow in debug.
717 bool IsLowerTriangular() const;
718 bool IsUpperTriangular() const;
719
720 // Visible for testing. This is used by PermutedLowerSparseSolve() to compute
721 // the non-zero indices of the result. The output is as follow:
722 // - lower_column_rows will contains the rows for which row_perm[row] < 0.
723 // - upper_column_rows will contains the other rows in the reverse topological
724 // order in which they should be considered in PermutedLowerSparseSolve().
725 //
726 // This function is non-const because it prunes the underlying dependency
727 // graph of the lower matrix while doing a solve. See marked_ and pruned_ends_
728 // below.
729 //
730 // Pruning the graph at the same time is slower but not by too much (< 2x) and
731 // seems worth doing. Note that when the lower matrix is dense, most of the
732 // graph will likely be pruned. As a result, the symbolic phase will be
733 // negligible compared to the numerical phase so we don't really need a dense
734 // version of PermutedLowerSparseSolve().
735 void PermutedComputeRowsToConsider(const ColumnView& rhs,
736 const RowPermutation& row_perm,
737 RowIndexVector* lower_column_rows,
738 RowIndexVector* upper_column_rows);
739
740 // The upper bound is computed using one of the algorithm presented in
741 // "A Survey of Condition Number Estimation for Triangular Matrices"
742 // https:epubs.siam.org/doi/pdf/10.1137/1029112/
743 Fractional ComputeInverseInfinityNormUpperBound() const;
744 Fractional ComputeInverseInfinityNorm() const;
745
746 private:
747 // Internal versions of some Solve() functions to avoid code duplication.
748 template <bool diagonal_of_ones>
749 void LowerSolveStartingAtInternal(ColIndex start, DenseColumn* rhs) const;
750 template <bool diagonal_of_ones>
751 void UpperSolveInternal(DenseColumn* rhs) const;
752 template <bool diagonal_of_ones>
753 void TransposeLowerSolveInternal(DenseColumn* rhs) const;
754 template <bool diagonal_of_ones>
755 void TransposeUpperSolveInternal(DenseColumn* rhs) const;
756 template <bool diagonal_of_ones>
757 void HyperSparseSolveInternal(DenseColumn* rhs,
758 RowIndexVector* non_zero_rows) const;
759 template <bool diagonal_of_ones>
760 void HyperSparseSolveWithReversedNonZerosInternal(
761 DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
762 template <bool diagonal_of_ones>
763 void TransposeHyperSparseSolveInternal(DenseColumn* rhs,
764 RowIndexVector* non_zero_rows) const;
765 template <bool diagonal_of_ones>
766 void TransposeHyperSparseSolveWithReversedNonZerosInternal(
767 DenseColumn* rhs, RowIndexVector* non_zero_rows) const;
768
769 // Internal function used by the Add*() functions to finish adding
770 // a new column to a triangular matrix.
771 void CloseCurrentColumn(Fractional diagonal_value);
772
773 // Extra data for "triangular" matrices. The diagonal coefficients are
774 // stored in a separate vector instead of beeing stored in each column.
775 StrictITIVector<ColIndex, Fractional> diagonal_coefficients_;
776
777 // Index of the first column which is not a diagonal only column with a
778 // coefficient of 1. This is used to optimize the solves.
779 ColIndex first_non_identity_column_;
780
781 // This common case allows for more efficient Solve() functions.
782 // TODO(user): Do not even construct diagonal_coefficients_ in this case?
783 bool all_diagonal_coefficients_are_one_;
784
785 // For the hyper-sparse version. These are used to implement a DFS, see
786 // TriangularComputeRowsToConsider() for more details.
787 mutable DenseBooleanColumn stored_;
788 mutable std::vector<RowIndex> nodes_to_explore_;
789
790 // For PermutedLowerSparseSolve().
791 int64_t num_fp_operations_;
792 mutable std::vector<RowIndex> lower_column_rows_;
793 mutable std::vector<RowIndex> upper_column_rows_;
794 mutable DenseColumn initially_all_zero_scratchpad_;
795
796 // This boolean vector is used to detect entries that can be pruned during
797 // the DFS used for the symbolic phase of ComputeRowsToConsider().
798 //
799 // Problem: We have a DAG where each node has outgoing arcs towards other
800 // nodes (this adjacency list is NOT sorted by any order). We want to compute
801 // the reachability of a set of nodes S and its topological order. While doing
802 // this, we also want to prune the adjacency lists to exploit the simple fact
803 // that if a -> (b, c) and b -> (c) then c can be removed from the adjacency
804 // list of a since it will be implied through b. Note that this doesn't change
805 // the reachability of any set nor a valid topological ordering of such a set.
806 //
807 // The concept is known as the transitive reduction of a DAG, see
808 // http://en.wikipedia.org/wiki/Transitive_reduction.
809 //
810 // Heuristic algorithm: While doing the DFS to compute Reach(S) and its
811 // topological order, each time we process a node, we mark all its adjacent
812 // node while going down in the DFS, and then we unmark all of them when we go
813 // back up. During the un-marking, if a node is already un-marked, it means
814 // that it was implied by some other path starting at the current node and we
815 // can prune it and remove it from the adjacency list of the current node.
816 //
817 // Note(user): I couldn't find any reference for this algorithm, even though
818 // I suspect I am not the first one to need something similar.
819 mutable DenseBooleanColumn marked_;
820
821 // This is used to represent a pruned sub-matrix of the current matrix that
822 // corresponds to the pruned DAG as described in the comment above for
823 // marked_. This vector is used to encode the sub-matrix as follow:
824 // - Both the rows and the coefficients of the pruned matrix are still stored
825 // in rows_ and coefficients_.
826 // - The data of column 'col' is still stored starting at starts_[col].
827 // - But, its end is given by pruned_ends_[col] instead of starts_[col + 1].
828 //
829 // The idea of using a smaller graph for the symbolic phase is well known in
830 // sparse linear algebra. See:
831 // - John R. Gilbert and Joseph W. H. Liu, "Elimination structures for
832 // unsymmetric sparse LU factors", Tech. Report CS-90-11. Departement of
833 // Computer Science, York University, North York. Ontario, Canada, 1990.
834 // - Stanley C. Eisenstat and Joseph W. H. Liu, "Exploiting structural
835 // symmetry in a sparse partial pivoting code". SIAM J. Sci. Comput. Vol
836 // 14, No 1, pp. 253-257, January 1993.
837 //
838 // Note that we use an original algorithm and prune the graph while performing
839 // the symbolic phase. Hence the pruning will only benefit the next symbolic
840 // phase. This is different from Eisenstat-Liu's symmetric pruning. It is
841 // still a heuristic and will not necessarily find the minimal graph that
842 // has the same result for the symbolic phase though.
843 //
844 // TODO(user): Use this during the "normal" hyper-sparse solves so that
845 // we can benefit from the pruned lower matrix there?
847
849};
850
851} // namespace glop
852} // namespace operations_research
853
854#endif // OR_TOOLS_LP_DATA_SPARSE_H_
int64_t max
Definition: alldiff_cst.cc:140
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:889
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:886
bool ColumnIsEmpty(ColIndex col) const
Definition: sparse.h:381
void ColumnCopyToDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:423
StrictITIVector< ColIndex, EntryIndex > starts_
Definition: sparse.h:466
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col, Fractional multiplier, ScatteredColumn *column) const
Definition: sparse.h:410
void ColumnCopyToClearedDenseColumnWithNonZeros(ColIndex col, DenseColumn *dense_column, RowIndexVector *non_zeros) const
Definition: sparse.h:441
StrictITIVector< EntryIndex, RowIndex > rows_
Definition: sparse.h:465
CompactSparseMatrix(const SparseMatrix &matrix)
Definition: sparse.h:295
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse.h:366
StrictITIVector< EntryIndex, Fractional > coefficients_
Definition: sparse.h:464
void ColumnCopyToClearedDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:431
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition: sparse.h:387
::util::IntegerRange< EntryIndex > Column(ColIndex col) const
Definition: sparse.h:363
void ColumnAddMultipleToDenseColumn(ColIndex col, Fractional multiplier, DenseColumn *dense_column) const
Definition: sparse.h:398
RowIndex EntryRow(EntryIndex i) const
Definition: sparse.h:367
ColumnView column(ColIndex col) const
Definition: sparse.h:369
EntryIndex ColumnNumEntries(ColIndex col) const
Definition: sparse.h:340
const ColumnView column(ColIndex col) const
Definition: sparse.h:490
CompactSparseMatrixView(const CompactSparseMatrix *compact_matrix, const RowToColMapping *basis)
Definition: sparse.h:478
CompactSparseMatrixView(const CompactSparseMatrix *compact_matrix, const std::vector< ColIndex > *columns)
Definition: sparse.h:482
Fractional ComputeInfinityNorm() const
Definition: sparse.cc:423
void PopulateFromMatrix(const SparseMatrix &matrix)
Definition: sparse.h:227
Fractional ComputeOneNorm() const
Definition: sparse.cc:420
const SparseColumn & column(ColIndex col) const
Definition: sparse.h:265
void PopulateFromMatrixPair(const SparseMatrix &matrix_a, const SparseMatrix &matrix_b)
Definition: sparse.h:238
MatrixView(const SparseMatrix &matrix)
Definition: sparse.h:222
void PopulateFromBasis(const MatrixView &matrix, const RowToColMapping &basis)
Definition: sparse.h:252
EntryIndex num_entries() const
Definition: sparse.cc:419
void AppendUnitVector(RowIndex row, Fractional value)
Definition: sparse.cc:151
void PopulateFromLinearCombination(Fractional alpha, const SparseMatrix &a, Fractional beta, const SparseMatrix &b)
Definition: sparse.cc:225
void PopulateFromPermutedMatrix(const Matrix &a, const RowPermutation &row_perm, const ColumnPermutation &inverse_col_perm)
Definition: sparse.cc:212
void PopulateFromTranspose(const Matrix &input)
Definition: sparse.cc:181
void PopulateFromIdentity(ColIndex num_cols)
Definition: sparse.cc:172
Fractional ComputeInfinityNorm() const
Definition: sparse.cc:395
void SetNumRows(RowIndex num_rows)
Definition: sparse.cc:143
SparseColumn * mutable_column(ColIndex col)
Definition: sparse.h:182
Fractional LookUpValue(RowIndex row, ColIndex col) const
Definition: sparse.cc:323
void Swap(SparseMatrix *matrix)
Definition: sparse.cc:158
void ComputeMinAndMaxMagnitudes(Fractional *min_magnitude, Fractional *max_magnitude) const
Definition: sparse.cc:369
const SparseColumn & column(ColIndex col) const
Definition: sparse.h:181
void DeleteRows(RowIndex num_rows, const RowPermutation &permutation)
Definition: sparse.cc:289
void PopulateFromProduct(const SparseMatrix &a, const SparseMatrix &b)
Definition: sparse.cc:250
bool AppendRowsFromSparseMatrix(const SparseMatrix &matrix)
Definition: sparse.cc:302
void DeleteColumns(const DenseBooleanRow &columns_to_delete)
Definition: sparse.cc:276
void PopulateFromSparseMatrix(const SparseMatrix &matrix)
Definition: sparse.cc:206
void ApplyRowPermutation(const RowPermutation &row_perm)
Definition: sparse.cc:316
void PopulateFromZero(RowIndex num_rows, ColIndex num_cols)
Definition: sparse.cc:164
bool Equals(const SparseMatrix &a, Fractional tolerance) const
Definition: sparse.cc:327
Fractional GetDiagonalCoefficient(ColIndex col) const
Definition: sparse.h:581
int64_t NumFpOperationsInLastPermutedLowerSparseSolve() const
Definition: sparse.h:709
bool ColumnIsDiagonalOnly(ColIndex col) const
Definition: sparse.h:586
int64_t b
int64_t a
int64_t value
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: macros.h:29
ColIndex col
Definition: markowitz.cc:183
RowIndex row
Definition: markowitz.cc:182
Permutation< ColIndex > ColumnPermutation
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
std::vector< RowIndex > RowIndexVector
Definition: lp_types.h:313
Permutation< RowIndex > RowPermutation
IntegerValue ComputeInfinityNorm(const LinearConstraint &constraint)
Collection of objects used to extend the Constraint Solver library.
static int input(yyscan_t yyscanner)
EntryIndex num_entries
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
void Add(Index index, Fractional value)