OR-Tools  9.1
markowitz.cc
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
14 #include "ortools/glop/markowitz.h"
15 
16 #include <cstdint>
17 #include <limits>
18 
19 #include "absl/strings/str_format.h"
22 #include "ortools/lp_data/sparse.h"
23 
24 namespace operations_research {
25 namespace glop {
26 
28  const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm,
29  ColumnPermutation* col_perm) {
30  SCOPED_TIME_STAT(&stats_);
31  Clear();
32  const RowIndex num_rows = basis_matrix.num_rows();
33  const ColIndex num_cols = basis_matrix.num_cols();
34  col_perm->assign(num_cols, kInvalidCol);
35  row_perm->assign(num_rows, kInvalidRow);
36 
37  // Get the empty matrix corner case out of the way.
38  if (basis_matrix.IsEmpty()) return Status::OK();
39  basis_matrix_ = &basis_matrix;
40 
41  // Initialize all the matrices.
42  lower_.Reset(num_rows, num_cols);
43  upper_.Reset(num_rows, num_cols);
44  permuted_lower_.Reset(num_cols);
45  permuted_upper_.Reset(num_cols);
46  permuted_lower_column_needs_solve_.assign(num_cols, false);
47  contains_only_singleton_columns_ = true;
48 
49  // Start by moving the singleton columns to the front and by putting their
50  // non-zero coefficient on the diagonal. The general algorithm below would
51  // have the same effect, but this function is a lot faster.
52  int index = 0;
53  ExtractSingletonColumns(basis_matrix, row_perm, col_perm, &index);
54  ExtractResidualSingletonColumns(basis_matrix, row_perm, col_perm, &index);
55  int stats_num_pivots_without_fill_in = index;
56  int stats_degree_two_pivot_columns = 0;
57 
58  // Initialize residual_matrix_non_zero_ with the submatrix left after we
59  // removed the singleton and residual singleton columns.
60  residual_matrix_non_zero_.InitializeFromMatrixSubset(
61  basis_matrix, *row_perm, *col_perm, &singleton_column_, &singleton_row_);
62 
63  // Perform Gaussian elimination.
64  const int end_index = std::min(num_rows.value(), num_cols.value());
65  const Fractional singularity_threshold =
66  parameters_.markowitz_singularity_threshold();
67  while (index < end_index) {
68  Fractional pivot_coefficient = 0.0;
69  RowIndex pivot_row = kInvalidRow;
70  ColIndex pivot_col = kInvalidCol;
71 
72  // TODO(user): If we don't need L and U, we can abort when the residual
73  // matrix becomes dense (i.e. when its density factor is above a certain
74  // threshold). The residual size is 'end_index - index' and the
75  // density can either be computed exactly or estimated from min_markowitz.
76  const int64_t min_markowitz = FindPivot(*row_perm, *col_perm, &pivot_row,
77  &pivot_col, &pivot_coefficient);
78 
79  // Singular matrix? No pivot will be selected if a column has no entries. If
80  // a column has some entries, then we are sure that a pivot will be selected
81  // but its magnitude can be really close to zero. In both cases, we
82  // report the singularity of the matrix.
83  if (pivot_row == kInvalidRow || pivot_col == kInvalidCol ||
84  std::abs(pivot_coefficient) <= singularity_threshold) {
85  const std::string error_message = absl::StrFormat(
86  "The matrix is singular! pivot = %E", pivot_coefficient);
87  VLOG(1) << "ERROR_LU: " << error_message;
88  return Status(Status::ERROR_LU, error_message);
89  }
90  DCHECK_EQ((*row_perm)[pivot_row], kInvalidRow);
91  DCHECK_EQ((*col_perm)[pivot_col], kInvalidCol);
92 
93  // Update residual_matrix_non_zero_.
94  // TODO(user): This step can be skipped, once a fully dense matrix is
95  // obtained. But note that permuted_lower_column_needs_solve_ needs to be
96  // updated.
97  const int pivot_col_degree = residual_matrix_non_zero_.ColDegree(pivot_col);
98  const int pivot_row_degree = residual_matrix_non_zero_.RowDegree(pivot_row);
99  residual_matrix_non_zero_.DeleteRowAndColumn(pivot_row, pivot_col);
100  if (min_markowitz == 0) {
101  ++stats_num_pivots_without_fill_in;
102  if (pivot_col_degree == 1) {
103  RemoveRowFromResidualMatrix(pivot_row, pivot_col);
104  } else {
105  DCHECK_EQ(pivot_row_degree, 1);
106  RemoveColumnFromResidualMatrix(pivot_row, pivot_col);
107  }
108  } else {
109  // TODO(user): Note that in some rare cases, because of numerical
110  // cancellation, the column degree may actually be smaller than
111  // pivot_col_degree. Exploit that better?
113  if (pivot_col_degree == 2) { ++stats_degree_two_pivot_columns; });
114  UpdateResidualMatrix(pivot_row, pivot_col);
115  }
116 
117  if (contains_only_singleton_columns_) {
118  DCHECK(permuted_upper_.column(pivot_col).IsEmpty());
119  lower_.AddDiagonalOnlyColumn(1.0);
120  upper_.AddTriangularColumn(basis_matrix.column(pivot_col), pivot_row);
121  } else {
122  lower_.AddAndNormalizeTriangularColumn(permuted_lower_.column(pivot_col),
123  pivot_row, pivot_coefficient);
124  permuted_lower_.ClearAndReleaseColumn(pivot_col);
125 
127  permuted_upper_.column(pivot_col), pivot_row, pivot_coefficient);
128  permuted_upper_.ClearAndReleaseColumn(pivot_col);
129  }
130 
131  // Update the permutations.
132  (*col_perm)[pivot_col] = ColIndex(index);
133  (*row_perm)[pivot_row] = RowIndex(index);
134  ++index;
135  }
136 
137  // To get a better deterministic time, we add a factor that depend on the
138  // final number of entries in the result.
139  num_fp_operations_ += 10 * lower_.num_entries().value();
140  num_fp_operations_ += 10 * upper_.num_entries().value();
141 
142  stats_.pivots_without_fill_in_ratio.Add(
143  1.0 * stats_num_pivots_without_fill_in / num_rows.value());
144  stats_.degree_two_pivot_columns.Add(1.0 * stats_degree_two_pivot_columns /
145  num_rows.value());
146  return Status::OK();
147 }
148 
150  RowPermutation* row_perm,
151  ColumnPermutation* col_perm,
152  TriangularMatrix* lower, TriangularMatrix* upper) {
153  // The two first swaps allow to use less memory since this way upper_
154  // and lower_ will always stay empty at the end of this function.
155  lower_.Swap(lower);
156  upper_.Swap(upper);
158  ComputeRowAndColumnPermutation(basis_matrix, row_perm, col_perm));
159  SCOPED_TIME_STAT(&stats_);
160  lower_.ApplyRowPermutationToNonDiagonalEntries(*row_perm);
161  upper_.ApplyRowPermutationToNonDiagonalEntries(*row_perm);
162  lower_.Swap(lower);
163  upper_.Swap(upper);
164  DCHECK(lower->IsLowerTriangular());
165  DCHECK(upper->IsUpperTriangular());
166  return Status::OK();
167 }
168 
170  SCOPED_TIME_STAT(&stats_);
171  permuted_lower_.Clear();
172  permuted_upper_.Clear();
173  residual_matrix_non_zero_.Clear();
174  col_by_degree_.Clear();
175  examined_col_.clear();
176  num_fp_operations_ = 0;
177  is_col_by_degree_initialized_ = false;
178 }
179 
180 namespace {
181 struct MatrixEntry {
182  RowIndex row;
183  ColIndex col;
185  MatrixEntry(RowIndex r, ColIndex c, Fractional coeff)
186  : row(r), col(c), coefficient(coeff) {}
187  bool operator<(const MatrixEntry& o) const {
188  return (row == o.row) ? col < o.col : row < o.row;
189  }
190 };
191 
192 } // namespace
193 
194 void Markowitz::ExtractSingletonColumns(
195  const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm,
196  ColumnPermutation* col_perm, int* index) {
197  SCOPED_TIME_STAT(&stats_);
198  std::vector<MatrixEntry> singleton_entries;
199  const ColIndex num_cols = basis_matrix.num_cols();
200  for (ColIndex col(0); col < num_cols; ++col) {
201  const ColumnView& column = basis_matrix.column(col);
202  if (column.num_entries().value() == 1) {
203  singleton_entries.push_back(
204  MatrixEntry(column.GetFirstRow(), col, column.GetFirstCoefficient()));
205  }
206  }
207 
208  // Sorting the entries by row indices allows the row_permutation to be closer
209  // to identity which seems like a good idea.
210  std::sort(singleton_entries.begin(), singleton_entries.end());
211  for (const MatrixEntry e : singleton_entries) {
212  if ((*row_perm)[e.row] == kInvalidRow) {
213  (*col_perm)[e.col] = ColIndex(*index);
214  (*row_perm)[e.row] = RowIndex(*index);
215  lower_.AddDiagonalOnlyColumn(1.0);
216  upper_.AddDiagonalOnlyColumn(e.coefficient);
217  ++(*index);
218  }
219  }
220  stats_.basis_singleton_column_ratio.Add(static_cast<double>(*index) /
221  basis_matrix.num_rows().value());
222 }
223 
224 bool Markowitz::IsResidualSingletonColumn(const ColumnView& column,
225  const RowPermutation& row_perm,
226  RowIndex* row) {
227  int residual_degree = 0;
228  for (const auto e : column) {
229  if (row_perm[e.row()] != kInvalidRow) continue;
230  ++residual_degree;
231  if (residual_degree > 1) return false;
232  *row = e.row();
233  }
234  return residual_degree == 1;
235 }
236 
237 void Markowitz::ExtractResidualSingletonColumns(
238  const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm,
239  ColumnPermutation* col_perm, int* index) {
240  SCOPED_TIME_STAT(&stats_);
241  const ColIndex num_cols = basis_matrix.num_cols();
242  RowIndex row = kInvalidRow;
243  for (ColIndex col(0); col < num_cols; ++col) {
244  if ((*col_perm)[col] != kInvalidCol) continue;
245  const ColumnView& column = basis_matrix.column(col);
246  if (!IsResidualSingletonColumn(column, *row_perm, &row)) continue;
247  (*col_perm)[col] = ColIndex(*index);
248  (*row_perm)[row] = RowIndex(*index);
249  lower_.AddDiagonalOnlyColumn(1.0);
250  upper_.AddTriangularColumn(column, row);
251  ++(*index);
252  }
253  stats_.basis_residual_singleton_column_ratio.Add(
254  static_cast<double>(*index) / basis_matrix.num_rows().value());
255 }
256 
257 const SparseColumn& Markowitz::ComputeColumn(const RowPermutation& row_perm,
258  ColIndex col) {
259  SCOPED_TIME_STAT(&stats_);
260  // Is this the first time ComputeColumn() sees this column? This is a bit
261  // tricky because just one of the tests is not sufficient in case the matrix
262  // is degenerate.
263  const bool first_time = permuted_lower_.column(col).IsEmpty() &&
264  permuted_upper_.column(col).IsEmpty();
265 
266  // If !permuted_lower_column_needs_solve_[col] then the result of the
267  // PermutedLowerSparseSolve() below is already stored in
268  // permuted_lower_.column(col) and we just need to split this column. Note
269  // that this is just an optimization and the code would work if we just
270  // assumed permuted_lower_column_needs_solve_[col] to be always true.
271  SparseColumn* lower_column = permuted_lower_.mutable_column(col);
272  if (permuted_lower_column_needs_solve_[col]) {
273  // Solve a sparse triangular system. If the column 'col' of permuted_lower_
274  // was never computed before by ComputeColumn(), we use the column 'col' of
275  // the matrix to factorize.
276  const ColumnView& input =
277  first_time ? basis_matrix_->column(col) : ColumnView(*lower_column);
278  lower_.PermutedLowerSparseSolve(input, row_perm, lower_column,
279  permuted_upper_.mutable_column(col));
280  permuted_lower_column_needs_solve_[col] = false;
281  num_fp_operations_ +=
283  return *lower_column;
284  }
285 
286  // All the symbolic non-zeros are always present in lower. So if this test is
287  // true, we can conclude that there is no entries from upper that need to be
288  // moved by a cardinality argument.
289  if (lower_column->num_entries() == residual_matrix_non_zero_.ColDegree(col)) {
290  return *lower_column;
291  }
292 
293  // In this case, we just need to "split" the lower column. We copy from the
294  // appropriate ColumnView in basis_matrix_.
295  // TODO(user): add PopulateFromColumnView if it is useful elsewhere.
296  if (first_time) {
297  const EntryIndex num_entries = basis_matrix_->column(col).num_entries();
298  num_fp_operations_ += num_entries.value();
299  lower_column->Reserve(num_entries);
300  for (const auto e : basis_matrix_->column(col)) {
301  lower_column->SetCoefficient(e.row(), e.coefficient());
302  }
303  }
304  num_fp_operations_ += lower_column->num_entries().value();
305  lower_column->MoveTaggedEntriesTo(row_perm,
306  permuted_upper_.mutable_column(col));
307  return *lower_column;
308 }
309 
310 int64_t Markowitz::FindPivot(const RowPermutation& row_perm,
311  const ColumnPermutation& col_perm,
312  RowIndex* pivot_row, ColIndex* pivot_col,
313  Fractional* pivot_coefficient) {
314  SCOPED_TIME_STAT(&stats_);
315 
316  // Fast track for singleton columns.
317  while (!singleton_column_.empty()) {
318  const ColIndex col = singleton_column_.back();
319  singleton_column_.pop_back();
320  DCHECK_EQ(kInvalidCol, col_perm[col]);
321 
322  // This can only happen if the matrix is singular. Continuing will cause
323  // the algorithm to detect the singularity at the end when we stop before
324  // the end.
325  //
326  // TODO(user): We could detect the singularity at this point, but that
327  // may make the code more complex.
328  if (residual_matrix_non_zero_.ColDegree(col) != 1) continue;
329 
330  // ComputeColumn() is not used as long as only singleton columns of the
331  // residual matrix are used. See the other condition in
332  // ComputeRowAndColumnPermutation().
333  if (contains_only_singleton_columns_) {
334  *pivot_col = col;
335  for (const SparseColumn::Entry e : basis_matrix_->column(col)) {
336  if (row_perm[e.row()] == kInvalidRow) {
337  *pivot_row = e.row();
338  *pivot_coefficient = e.coefficient();
339  break;
340  }
341  }
342  return 0;
343  }
344  const SparseColumn& column = ComputeColumn(row_perm, col);
345  if (column.IsEmpty()) continue;
346  *pivot_col = col;
347  *pivot_row = column.GetFirstRow();
348  *pivot_coefficient = column.GetFirstCoefficient();
349  return 0;
350  }
351  contains_only_singleton_columns_ = false;
352 
353  // Fast track for singleton rows. Note that this is actually more than a fast
354  // track because of the Zlatev heuristic. Such rows may not be processed as
355  // soon as possible otherwise, resulting in more fill-in.
356  while (!singleton_row_.empty()) {
357  const RowIndex row = singleton_row_.back();
358  singleton_row_.pop_back();
359 
360  // A singleton row could have been processed when processing a singleton
361  // column. Skip if this is the case.
362  if (row_perm[row] != kInvalidRow) continue;
363 
364  // This shows that the matrix is singular, see comment above for the same
365  // case when processing singleton columns.
366  if (residual_matrix_non_zero_.RowDegree(row) != 1) continue;
367  const ColIndex col =
368  residual_matrix_non_zero_.GetFirstNonDeletedColumnFromRow(row);
369  if (col == kInvalidCol) continue;
370  const SparseColumn& column = ComputeColumn(row_perm, col);
371  if (column.IsEmpty()) continue;
372 
373  *pivot_col = col;
374  *pivot_row = row;
375  *pivot_coefficient = column.LookUpCoefficient(row);
376  return 0;
377  }
378 
379  // col_by_degree_ is not needed before we reach this point. Exploit this with
380  // a lazy initialization.
381  if (!is_col_by_degree_initialized_) {
382  is_col_by_degree_initialized_ = true;
383  const ColIndex num_cols = col_perm.size();
384  col_by_degree_.Reset(row_perm.size().value(), num_cols);
385  for (ColIndex col(0); col < num_cols; ++col) {
386  if (col_perm[col] != kInvalidCol) continue;
387  const int degree = residual_matrix_non_zero_.ColDegree(col);
388  DCHECK_NE(degree, 1);
389  UpdateDegree(col, degree);
390  }
391  }
392 
393  // Note(user): we use int64_t since this is a product of two ints, moreover
394  // the ints should be relatively small, so that should be fine for a while.
395  int64_t min_markowitz_number = std::numeric_limits<int64_t>::max();
396  examined_col_.clear();
397  const int num_columns_to_examine = parameters_.markowitz_zlatev_parameter();
398  const Fractional threshold = parameters_.lu_factorization_pivot_threshold();
399  while (examined_col_.size() < num_columns_to_examine) {
400  const ColIndex col = col_by_degree_.Pop();
401  if (col == kInvalidCol) break;
402  if (col_perm[col] != kInvalidCol) continue;
403  const int col_degree = residual_matrix_non_zero_.ColDegree(col);
404  examined_col_.push_back(col);
405 
406  // Because of the two singleton special cases at the beginning of this
407  // function and because we process columns by increasing degree, we can
408  // derive a lower bound on the best markowitz number we can get by exploring
409  // this column. If we cannot beat this number, we can stop here.
410  //
411  // Note(user): we still process extra column if we can meet the lower bound
412  // to eventually have a better pivot.
413  //
414  // Todo(user): keep the minimum row degree to have a better bound?
415  const int64_t markowitz_lower_bound = col_degree - 1;
416  if (min_markowitz_number < markowitz_lower_bound) break;
417 
418  // TODO(user): col_degree (which is the same as column.num_entries()) is
419  // actually an upper bound on the number of non-zeros since there may be
420  // numerical cancellations. Exploit this here? Note that it is already used
421  // when we update the non_zero pattern of the residual matrix.
422  const SparseColumn& column = ComputeColumn(row_perm, col);
423  DCHECK_EQ(column.num_entries(), col_degree);
424 
425  Fractional max_magnitude = 0.0;
426  for (const SparseColumn::Entry e : column) {
427  max_magnitude = std::max(max_magnitude, std::abs(e.coefficient()));
428  }
429  if (max_magnitude == 0.0) {
430  // All symbolic non-zero entries have been cancelled!
431  // The matrix is singular, but we continue with the other columns.
432  examined_col_.pop_back();
433  continue;
434  }
435 
436  const Fractional skip_threshold = threshold * max_magnitude;
437  for (const SparseColumn::Entry e : column) {
438  const Fractional magnitude = std::abs(e.coefficient());
439  if (magnitude < skip_threshold) continue;
440 
441  const int row_degree = residual_matrix_non_zero_.RowDegree(e.row());
442  const int64_t markowitz_number = (col_degree - 1) * (row_degree - 1);
443  DCHECK_NE(markowitz_number, 0);
444  if (markowitz_number < min_markowitz_number ||
445  ((markowitz_number == min_markowitz_number) &&
446  magnitude > std::abs(*pivot_coefficient))) {
447  min_markowitz_number = markowitz_number;
448  *pivot_col = col;
449  *pivot_row = e.row();
450  *pivot_coefficient = e.coefficient();
451 
452  // Note(user): We could abort early here if the markowitz_lower_bound is
453  // reached, but finishing to loop over this column is fast and may lead
454  // to a pivot with a greater magnitude (i.e. a more robust
455  // factorization).
456  }
457  }
458  DCHECK_NE(min_markowitz_number, 0);
459  DCHECK_GE(min_markowitz_number, markowitz_lower_bound);
460  }
461 
462  // Push back the columns that we just looked at in the queue since they
463  // are candidates for the next pivot.
464  //
465  // TODO(user): Do that after having updated the matrix? Rationale:
466  // - col_by_degree_ is LIFO, so that may save work in ComputeColumn() by
467  // calling it again on the same columns.
468  // - Maybe the earliest low-degree columns have a better precision? This
469  // actually depends on the number of operations so is not really true.
470  // - Maybe picking the column randomly from the ones with lowest degree would
471  // help having more diversity from one factorization to the next. This is
472  // for the case we do implement this TODO.
473  for (const ColIndex col : examined_col_) {
474  if (col != *pivot_col) {
475  const int degree = residual_matrix_non_zero_.ColDegree(col);
476  col_by_degree_.PushOrAdjust(col, degree);
477  }
478  }
479  return min_markowitz_number;
480 }
481 
482 void Markowitz::UpdateDegree(ColIndex col, int degree) {
483  DCHECK(is_col_by_degree_initialized_);
484 
485  // Separating the degree one columns work because we always select such
486  // a column first and pivoting by such columns does not affect the degree of
487  // any other singleton columns (except if the matrix is not inversible).
488  //
489  // Note that using this optimization does change the order in which the
490  // degree one columns are taken compared to pushing them in the queue.
491  if (degree == 1) {
492  // Note that there is no need to remove this column from col_by_degree_
493  // because it will be processed before col_by_degree_.Pop() is called and
494  // then just be ignored.
495  singleton_column_.push_back(col);
496  } else {
497  col_by_degree_.PushOrAdjust(col, degree);
498  }
499 }
500 
501 void Markowitz::RemoveRowFromResidualMatrix(RowIndex pivot_row,
502  ColIndex pivot_col) {
503  SCOPED_TIME_STAT(&stats_);
504  // Note that instead of calling:
505  // residual_matrix_non_zero_.RemoveDeletedColumnsFromRow(pivot_row);
506  // it is a bit faster to test each position with IsColumnDeleted() since we
507  // will not need the pivot row anymore.
508  if (is_col_by_degree_initialized_) {
509  for (const ColIndex col : residual_matrix_non_zero_.RowNonZero(pivot_row)) {
510  if (residual_matrix_non_zero_.IsColumnDeleted(col)) continue;
511  UpdateDegree(col, residual_matrix_non_zero_.DecreaseColDegree(col));
512  }
513  } else {
514  for (const ColIndex col : residual_matrix_non_zero_.RowNonZero(pivot_row)) {
515  if (residual_matrix_non_zero_.IsColumnDeleted(col)) continue;
516  if (residual_matrix_non_zero_.DecreaseColDegree(col) == 1) {
517  singleton_column_.push_back(col);
518  }
519  }
520  }
521 }
522 
523 void Markowitz::RemoveColumnFromResidualMatrix(RowIndex pivot_row,
524  ColIndex pivot_col) {
525  SCOPED_TIME_STAT(&stats_);
526  // The entries of the pivot column are exactly the symbolic non-zeros of the
527  // residual matrix, since we didn't remove the entries with a coefficient of
528  // zero during PermutedLowerSparseSolve().
529  //
530  // Note that it is okay to decrease the degree of a previous pivot row since
531  // it was set to 0 and will never trigger this test. Even if it triggers it,
532  // we just ignore such singleton rows in FindPivot().
533  for (const SparseColumn::Entry e : permuted_lower_.column(pivot_col)) {
534  const RowIndex row = e.row();
535  if (residual_matrix_non_zero_.DecreaseRowDegree(row) == 1) {
536  singleton_row_.push_back(row);
537  }
538  }
539 }
540 
541 void Markowitz::UpdateResidualMatrix(RowIndex pivot_row, ColIndex pivot_col) {
542  SCOPED_TIME_STAT(&stats_);
543  const SparseColumn& pivot_column = permuted_lower_.column(pivot_col);
544  residual_matrix_non_zero_.Update(pivot_row, pivot_col, pivot_column);
545  for (const ColIndex col : residual_matrix_non_zero_.RowNonZero(pivot_row)) {
546  DCHECK_NE(col, pivot_col);
547  UpdateDegree(col, residual_matrix_non_zero_.ColDegree(col));
548  permuted_lower_column_needs_solve_[col] = true;
549  }
550  RemoveColumnFromResidualMatrix(pivot_row, pivot_col);
551 }
552 
554  return DeterministicTimeForFpOperations(num_fp_operations_);
555 }
556 
558  row_degree_.clear();
559  col_degree_.clear();
560  row_non_zero_.clear();
561  deleted_columns_.clear();
562  bool_scratchpad_.clear();
563  num_non_deleted_columns_ = 0;
564 }
565 
566 void MatrixNonZeroPattern::Reset(RowIndex num_rows, ColIndex num_cols) {
567  row_degree_.AssignToZero(num_rows);
568  col_degree_.AssignToZero(num_cols);
569  row_non_zero_.clear();
570  row_non_zero_.resize(num_rows.value());
571  deleted_columns_.assign(num_cols, false);
572  bool_scratchpad_.assign(num_cols, false);
573  num_non_deleted_columns_ = num_cols;
574 }
575 
577  const CompactSparseMatrixView& basis_matrix, const RowPermutation& row_perm,
578  const ColumnPermutation& col_perm, std::vector<ColIndex>* singleton_columns,
579  std::vector<RowIndex>* singleton_rows) {
580  const ColIndex num_cols = basis_matrix.num_cols();
581  const RowIndex num_rows = basis_matrix.num_rows();
582 
583  // Reset the matrix and initialize the vectors to the correct sizes.
584  Reset(num_rows, num_cols);
585  singleton_columns->clear();
586  singleton_rows->clear();
587 
588  // Compute the number of entries in each row.
589  for (ColIndex col(0); col < num_cols; ++col) {
590  if (col_perm[col] != kInvalidCol) {
591  deleted_columns_[col] = true;
592  --num_non_deleted_columns_;
593  continue;
594  }
595  for (const SparseColumn::Entry e : basis_matrix.column(col)) {
596  ++row_degree_[e.row()];
597  }
598  }
599 
600  // Reserve the row_non_zero_ vector sizes.
601  for (RowIndex row(0); row < num_rows; ++row) {
602  if (row_perm[row] == kInvalidRow) {
603  row_non_zero_[row].reserve(row_degree_[row]);
604  if (row_degree_[row] == 1) singleton_rows->push_back(row);
605  } else {
606  // This is needed because in the row degree computation above, we do not
607  // test for row_perm[row] == kInvalidRow because it is a bit faster.
608  row_degree_[row] = 0;
609  }
610  }
611 
612  // Initialize row_non_zero_.
613  for (ColIndex col(0); col < num_cols; ++col) {
614  if (col_perm[col] != kInvalidCol) continue;
615  int32_t col_degree = 0;
616  for (const SparseColumn::Entry e : basis_matrix.column(col)) {
617  const RowIndex row = e.row();
618  if (row_perm[row] == kInvalidRow) {
619  ++col_degree;
620  row_non_zero_[row].push_back(col);
621  }
622  }
623  col_degree_[col] = col_degree;
624  if (col_degree == 1) singleton_columns->push_back(col);
625  }
626 }
627 
628 void MatrixNonZeroPattern::AddEntry(RowIndex row, ColIndex col) {
629  ++row_degree_[row];
630  ++col_degree_[col];
631  row_non_zero_[row].push_back(col);
632 }
633 
635  return --col_degree_[col];
636 }
637 
639  return --row_degree_[row];
640 }
641 
643  ColIndex pivot_col) {
644  DCHECK(!deleted_columns_[pivot_col]);
645  deleted_columns_[pivot_col] = true;
646  --num_non_deleted_columns_;
647 
648  // We do that to optimize RemoveColumnFromResidualMatrix().
649  row_degree_[pivot_row] = 0;
650 }
651 
653  return deleted_columns_[col];
654 }
655 
657  auto& ref = row_non_zero_[row];
658  int new_index = 0;
659  const int end = ref.size();
660  for (int i = 0; i < end; ++i) {
661  const ColIndex col = ref[i];
662  if (!deleted_columns_[col]) {
663  ref[new_index] = col;
664  ++new_index;
665  }
666  }
667  ref.resize(new_index);
668 }
669 
671  RowIndex row) const {
672  for (const ColIndex col : RowNonZero(row)) {
673  if (!IsColumnDeleted(col)) return col;
674  }
675  return kInvalidCol;
676 }
677 
678 void MatrixNonZeroPattern::Update(RowIndex pivot_row, ColIndex pivot_col,
679  const SparseColumn& column) {
680  // Since DeleteRowAndColumn() must be called just before this function,
681  // the pivot column has been marked as deleted but degrees have not been
682  // updated yet. Hence the +1.
683  DCHECK(deleted_columns_[pivot_col]);
684  const int max_row_degree = num_non_deleted_columns_.value() + 1;
685 
686  RemoveDeletedColumnsFromRow(pivot_row);
687  for (const ColIndex col : row_non_zero_[pivot_row]) {
689  bool_scratchpad_[col] = false;
690  }
691 
692  // We only need to merge the row for the position with a coefficient different
693  // from 0.0. Note that the column must contain all the symbolic non-zeros for
694  // the row degree to be updated correctly. Note also that decreasing the row
695  // degrees due to the deletion of pivot_col will happen outside this function.
696  for (const SparseColumn::Entry e : column) {
697  const RowIndex row = e.row();
698  if (row == pivot_row) continue;
699 
700  // If the row is fully dense, there is nothing to do (the merge below will
701  // not change anything). This is a small price to pay for a huge gain when
702  // the matrix becomes dense.
703  if (e.coefficient() == 0.0 || row_degree_[row] == max_row_degree) continue;
704  DCHECK_LT(row_degree_[row], max_row_degree);
705 
706  // We only clean row_non_zero_[row] if there are more than 4 entries to
707  // delete. Note(user): the 4 is somewhat arbitrary, but gives good results
708  // on the Netlib (23/04/2013). Note that calling
709  // RemoveDeletedColumnsFromRow() is not mandatory and does not change the LU
710  // decomposition, so we could call it all the time or never and the
711  // algorithm would still work.
712  const int kDeletionThreshold = 4;
713  if (row_non_zero_[row].size() > row_degree_[row] + kDeletionThreshold) {
715  }
716  // TODO(user): Special case if row_non_zero_[pivot_row].size() == 1?
717  if (/* DISABLES CODE */ (true)) {
718  MergeInto(pivot_row, row);
719  } else {
720  // This is currently not used, but kept as an alternative algorithm to
721  // investigate. The performance is really similar, but the final L.U is
722  // different. Note that when this is used, there is no need to modify
723  // bool_scratchpad_ at the beginning of this function.
724  //
725  // TODO(user): Add unit tests before using this.
726  MergeIntoSorted(pivot_row, row);
727  }
728  }
729 }
730 
731 void MatrixNonZeroPattern::MergeInto(RowIndex pivot_row, RowIndex row) {
732  // Note that bool_scratchpad_ must be already false on the positions in
733  // row_non_zero_[pivot_row].
734  for (const ColIndex col : row_non_zero_[row]) {
735  bool_scratchpad_[col] = true;
736  }
737 
738  auto& non_zero = row_non_zero_[row];
739  const int old_size = non_zero.size();
740  for (const ColIndex col : row_non_zero_[pivot_row]) {
741  if (bool_scratchpad_[col]) {
742  bool_scratchpad_[col] = false;
743  } else {
744  non_zero.push_back(col);
745  ++col_degree_[col];
746  }
747  }
748  row_degree_[row] += non_zero.size() - old_size;
749 }
750 
751 namespace {
752 
753 // Given two sorted vectors (the second one is the initial value of out), merges
754 // them and outputs the sorted result in out. The merge is stable and an element
755 // of input_a will appear before the identical elements of the second input.
756 template <typename V, typename W>
757 void MergeSortedVectors(const V& input_a, W* out) {
758  if (input_a.empty()) return;
759  const auto& input_b = *out;
760  int index_a = input_a.size() - 1;
761  int index_b = input_b.size() - 1;
762  int index_out = input_a.size() + input_b.size();
763  out->resize(index_out);
764  while (index_a >= 0) {
765  if (index_b < 0) {
766  while (index_a >= 0) {
767  --index_out;
768  (*out)[index_out] = input_a[index_a];
769  --index_a;
770  }
771  return;
772  }
773  --index_out;
774  if (input_a[index_a] > input_b[index_b]) {
775  (*out)[index_out] = input_a[index_a];
776  --index_a;
777  } else {
778  (*out)[index_out] = input_b[index_b];
779  --index_b;
780  }
781  }
782 }
783 
784 } // namespace
785 
786 // The algorithm first computes into col_scratchpad_ the entries in pivot_row
787 // that are not in the row (i.e. the fill-in). It then updates the non-zero
788 // pattern using this temporary vector.
789 void MatrixNonZeroPattern::MergeIntoSorted(RowIndex pivot_row, RowIndex row) {
790  // We want to add the entries of the input not already in the output.
791  const auto& input = row_non_zero_[pivot_row];
792  const auto& output = row_non_zero_[row];
793 
794  // These two resizes are because of the set_difference() output iterator api.
795  col_scratchpad_.resize(input.size());
796  col_scratchpad_.resize(std::set_difference(input.begin(), input.end(),
797  output.begin(), output.end(),
798  col_scratchpad_.begin()) -
799  col_scratchpad_.begin());
800 
801  // Add the fill-in to the pattern.
802  for (const ColIndex col : col_scratchpad_) {
803  ++col_degree_[col];
804  }
805  row_degree_[row] += col_scratchpad_.size();
806  MergeSortedVectors(col_scratchpad_, &row_non_zero_[row]);
807 }
808 
810  col_degree_.clear();
811  col_index_.clear();
812  col_by_degree_.clear();
813 }
814 
815 void ColumnPriorityQueue::Reset(int max_degree, ColIndex num_cols) {
816  Clear();
817  col_degree_.assign(num_cols, 0);
818  col_index_.assign(num_cols, -1);
819  col_by_degree_.resize(max_degree + 1);
820  min_degree_ = max_degree + 1;
821 }
822 
823 void ColumnPriorityQueue::PushOrAdjust(ColIndex col, int32_t degree) {
824  DCHECK_GE(degree, 0);
825  DCHECK_LT(degree, col_by_degree_.size());
826  DCHECK_GE(col, 0);
827  DCHECK_LT(col, col_degree_.size());
828 
829  const int32_t old_degree = col_degree_[col];
830  if (degree != old_degree) {
831  const int32_t old_index = col_index_[col];
832  if (old_index != -1) {
833  col_by_degree_[old_degree][old_index] = col_by_degree_[old_degree].back();
834  col_index_[col_by_degree_[old_degree].back()] = old_index;
835  col_by_degree_[old_degree].pop_back();
836  }
837  if (degree > 0) {
838  col_index_[col] = col_by_degree_[degree].size();
839  col_degree_[col] = degree;
840  col_by_degree_[degree].push_back(col);
841  min_degree_ = std::min(min_degree_, degree);
842  } else {
843  col_index_[col] = -1;
844  col_degree_[col] = 0;
845  }
846  }
847 }
848 
850  DCHECK_GE(min_degree_, 0);
851  DCHECK_LE(min_degree_, col_by_degree_.size());
852  while (true) {
853  if (min_degree_ == col_by_degree_.size()) return kInvalidCol;
854  if (!col_by_degree_[min_degree_].empty()) break;
855  min_degree_++;
856  }
857  const ColIndex col = col_by_degree_[min_degree_].back();
858  col_by_degree_[min_degree_].pop_back();
859  col_index_[col] = -1;
860  col_degree_[col] = 0;
861  return col;
862 }
863 
865  mapping_.assign(num_cols.value(), -1);
866  free_columns_.clear();
867  columns_.clear();
868 }
869 
871  ColIndex col) const {
872  if (mapping_[col] == -1) return empty_column_;
873  return columns_[mapping_[col]];
874 }
875 
877  ColIndex col) {
878  if (mapping_[col] != -1) return &columns_[mapping_[col]];
879  int new_col_index;
880  if (free_columns_.empty()) {
881  new_col_index = columns_.size();
882  columns_.push_back(SparseColumn());
883  } else {
884  new_col_index = free_columns_.back();
885  free_columns_.pop_back();
886  }
887  mapping_[col] = new_col_index;
888  return &columns_[new_col_index];
889 }
890 
892  DCHECK_NE(mapping_[col], -1);
893  free_columns_.push_back(mapping_[col]);
894  columns_[mapping_[col]].Clear();
895  mapping_[col] = -1;
896 }
897 
899  mapping_.clear();
900  free_columns_.clear();
901  columns_.clear();
902 }
903 
904 } // namespace glop
905 } // namespace operations_research
void AddEntry(RowIndex row, ColIndex col)
Definition: markowitz.cc:628
void PushOrAdjust(ColIndex col, int32_t degree)
Definition: markowitz.cc:823
void Update(RowIndex pivot_row, ColIndex pivot_col, const SparseColumn &column)
Definition: markowitz.cc:678
void Reset(RowIndex num_rows, ColIndex num_cols)
Definition: markowitz.cc:566
int64_t min
Definition: alldiff_cst.cc:139
void AddDiagonalOnlyColumn(Fractional diagonal_value)
Definition: sparse.cc:663
void Reset(RowIndex num_rows, ColIndex col_capacity)
Definition: sparse.cc:551
EntryIndex num_entries
ABSL_MUST_USE_RESULT Status ComputeRowAndColumnPermutation(const CompactSparseMatrixView &basis_matrix, RowPermutation *row_perm, ColumnPermutation *col_perm)
Definition: markowitz.cc:27
#define VLOG(verboselevel)
Definition: base/logging.h:979
const ColIndex kInvalidCol(-1)
ColIndex col
Definition: markowitz.cc:183
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
static const Status OK()
Definition: status.h:54
const ColumnView column(ColIndex col) const
Definition: sparse.h:490
RowIndex row
Definition: markowitz.cc:182
void assign(IntType size, const T &v)
Definition: lp_types.h:278
Permutation< ColIndex > ColumnPermutation
#define GLOP_RETURN_IF_ERROR(function_call)
Definition: status.h:70
Fractional coefficient
Definition: markowitz.cc:184
const SparseColumn & column(ColIndex col) const
Definition: markowitz.cc:870
void assign(IndexType size, IndexType value)
void AddTriangularColumnWithGivenDiagonalEntry(const SparseColumn &column, RowIndex diagonal_row, Fractional diagonal_value)
Definition: sparse.cc:699
int64_t max
Definition: alldiff_cst.cc:140
void resize(size_type new_size)
void DeleteRowAndColumn(RowIndex pivot_row, ColIndex pivot_col)
Definition: markowitz.cc:642
ABSL_MUST_USE_RESULT Status ComputeLU(const CompactSparseMatrixView &basis_matrix, RowPermutation *row_perm, ColumnPermutation *col_perm, TriangularMatrix *lower, TriangularMatrix *upper)
Definition: markowitz.cc:149
const absl::InlinedVector< ColIndex, 6 > & RowNonZero(RowIndex row) const
Definition: markowitz.h:168
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:887
ColIndex GetFirstNonDeletedColumnFromRow(RowIndex row) const
Definition: markowitz.cc:670
void push_back(const value_type &x)
static int input(yyscan_t yyscanner)
int index
Definition: pack.cc:509
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:890
void PermutedLowerSparseSolve(const ColumnView &rhs, const RowPermutation &row_perm, SparseColumn *lower, SparseColumn *upper)
Definition: sparse.cc:1065
void AddTriangularColumn(const ColumnView &column, RowIndex diagonal_row)
Definition: sparse.cc:667
const RowIndex kInvalidRow(-1)
size_type size() const
double DeterministicTimeOfLastFactorization() const
Definition: markowitz.cc:553
#define DCHECK(condition)
Definition: base/logging.h:885
void ApplyRowPermutationToNonDiagonalEntries(const RowPermutation &row_perm)
Definition: sparse.cc:739
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:886
::PROTOBUF_NAMESPACE_ID::int32 markowitz_zlatev_parameter() const
int32_t RowDegree(RowIndex row) const
Definition: markowitz.h:163
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:888
int64_t NumFpOperationsInLastPermutedLowerSparseSolve() const
Definition: sparse.h:709
Collection of objects used to extend the Constraint Solver library.
void AddAndNormalizeTriangularColumn(const SparseColumn &column, RowIndex diagonal_row, Fractional diagonal_coefficient)
Definition: sparse.cc:682
void assign(size_type n, const value_type &val)
Permutation< RowIndex > RowPermutation
void reserve(size_type n)
void Reset(int32_t max_degree, ColIndex num_cols)
Definition: markowitz.cc:815
int32_t ColDegree(ColIndex col) const
Definition: markowitz.h:156
static double DeterministicTimeForFpOperations(int64_t n)
Definition: lp_types.h:383
void Swap(TriangularMatrix *other)
Definition: sparse.cc:620
void InitializeFromMatrixSubset(const CompactSparseMatrixView &basis_matrix, const RowPermutation &row_perm, const ColumnPermutation &col_perm, std::vector< ColIndex > *singleton_columns, std::vector< RowIndex > *singleton_rows)
Definition: markowitz.cc:576
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:889
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437