OR-Tools  8.2
revised_simplex.cc
Go to the documentation of this file.
1 // Copyright 2010-2018 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <algorithm>
17 #include <cmath>
18 #include <functional>
19 #include <map>
20 #include <string>
21 #include <utility>
22 #include <vector>
23 
24 #include "absl/strings/str_cat.h"
25 #include "absl/strings/str_format.h"
28 #include "ortools/base/logging.h"
37 #include "ortools/util/fp_utils.h"
38 
39 ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false,
40  "Display numbers as fractions.");
41 ABSL_FLAG(bool, simplex_stop_after_first_basis, false,
42  "Stop after first basis has been computed.");
43 ABSL_FLAG(bool, simplex_stop_after_feasibility, false,
44  "Stop after first phase has been completed.");
45 ABSL_FLAG(bool, simplex_display_stats, false, "Display algorithm statistics.");
46 
47 namespace operations_research {
48 namespace glop {
49 namespace {
50 
51 // Calls the given closure upon destruction. It can be used to ensure that a
52 // closure is executed whenever a function returns.
53 class Cleanup {
54  public:
55  explicit Cleanup(std::function<void()> closure)
56  : closure_(std::move(closure)) {}
57  ~Cleanup() { closure_(); }
58 
59  private:
60  std::function<void()> closure_;
61 };
62 } // namespace
63 
64 #define DCHECK_COL_BOUNDS(col) \
65  { \
66  DCHECK_LE(0, col); \
67  DCHECK_GT(num_cols_, col); \
68  }
69 
70 #define DCHECK_ROW_BOUNDS(row) \
71  { \
72  DCHECK_LE(0, row); \
73  DCHECK_GT(num_rows_, row); \
74  }
75 
76 constexpr const uint64 kDeterministicSeed = 42;
77 
79  : problem_status_(ProblemStatus::INIT),
80  num_rows_(0),
81  num_cols_(0),
82  first_slack_col_(0),
83  objective_(),
84  lower_bound_(),
85  upper_bound_(),
86  basis_(),
87  variable_name_(),
88  direction_(),
89  error_(),
90  basis_factorization_(&compact_matrix_, &basis_),
91  variables_info_(compact_matrix_, lower_bound_, upper_bound_),
92  variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
93  basis_factorization_),
94  dual_edge_norms_(basis_factorization_),
95  primal_edge_norms_(compact_matrix_, variables_info_,
96  basis_factorization_),
97  update_row_(compact_matrix_, transposed_matrix_, variables_info_, basis_,
98  basis_factorization_),
99  reduced_costs_(compact_matrix_, objective_, basis_, variables_info_,
100  basis_factorization_, &random_),
101  entering_variable_(variables_info_, &random_, &reduced_costs_,
102  &primal_edge_norms_),
103  num_iterations_(0),
104  num_feasibility_iterations_(0),
105  num_optimization_iterations_(0),
106  total_time_(0.0),
107  feasibility_time_(0.0),
108  optimization_time_(0.0),
109  last_deterministic_time_update_(0.0),
110  iteration_stats_(),
111  ratio_test_stats_(),
112  function_stats_("SimplexFunctionStats"),
113  parameters_(),
114  test_lu_(),
115  feasibility_phase_(true),
116  random_(kDeterministicSeed) {
117  SetParameters(parameters_);
118 }
119 
121  SCOPED_TIME_STAT(&function_stats_);
122  solution_state_.statuses.clear();
123 }
124 
126  SCOPED_TIME_STAT(&function_stats_);
127  solution_state_ = state;
128  solution_state_has_been_set_externally_ = true;
129 }
130 
132  notify_that_matrix_is_unchanged_ = true;
133 }
134 
136  SCOPED_TIME_STAT(&function_stats_);
137  DCHECK(lp.IsCleanedUp());
139  if (!lp.IsInEquationForm()) {
141  "The problem is not in the equations form.");
142  }
143  Cleanup update_deterministic_time_on_return(
144  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
145 
146  // Initialization. Note That Initialize() must be called first since it
147  // analyzes the current solver state.
148  const double start_time = time_limit->GetElapsedTime();
149  GLOP_RETURN_IF_ERROR(Initialize(lp));
150 
151  dual_infeasibility_improvement_direction_.clear();
152  update_row_.Invalidate();
153  test_lu_.Clear();
154  problem_status_ = ProblemStatus::INIT;
155  feasibility_phase_ = true;
156  num_iterations_ = 0;
157  num_feasibility_iterations_ = 0;
158  num_optimization_iterations_ = 0;
159  feasibility_time_ = 0.0;
160  optimization_time_ = 0.0;
161  total_time_ = 0.0;
162 
163  // In case we abort because of an error, we cannot assume that the current
164  // solution state will be in sync with all our internal data structure. In
165  // case we abort without resetting it, setting this allow us to still use the
166  // previous state info, but we will double-check everything.
167  solution_state_has_been_set_externally_ = true;
168 
169  if (VLOG_IS_ON(1)) {
170  ComputeNumberOfEmptyRows();
171  ComputeNumberOfEmptyColumns();
172  DisplayBasicVariableStatistics();
173  DisplayProblem();
174  }
175  if (absl::GetFlag(FLAGS_simplex_stop_after_first_basis)) {
176  DisplayAllStats();
177  return Status::OK();
178  }
179 
180  const bool use_dual = parameters_.use_dual_simplex();
181  const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
182  if (log_info) {
183  LOG(INFO) << "------ " << (use_dual ? "Dual simplex." : "Primal simplex.");
184  LOG(INFO) << "The matrix has " << compact_matrix_.num_rows() << " rows, "
185  << compact_matrix_.num_cols() << " columns, "
186  << compact_matrix_.num_entries() << " entries.";
187  }
188 
189  // TODO(user): Avoid doing the first phase checks when we know from the
190  // incremental solve that the solution is already dual or primal feasible.
191  if (log_info) LOG(INFO) << "------ First phase: feasibility.";
192  entering_variable_.SetPricingRule(parameters_.feasibility_rule());
193  if (use_dual) {
194  if (parameters_.perturb_costs_in_dual_simplex()) {
195  reduced_costs_.PerturbCosts();
196  }
197 
198  variables_info_.MakeBoxedVariableRelevant(false);
199  GLOP_RETURN_IF_ERROR(DualMinimize(time_limit));
200  DisplayIterationInfo();
201 
202  if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
203  // Note(user): In most cases, the matrix will already be refactorized and
204  // both Refactorize() and PermuteBasis() will do nothing. However, if the
205  // time limit is reached during the first phase, this might not be the
206  // case and RecomputeBasicVariableValues() below DCHECKs that the matrix
207  // is refactorized. This is not required, but we currently only want to
208  // recompute values from scratch when the matrix was just refactorized to
209  // maximize precision.
210  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
211  PermuteBasis();
212 
213  variables_info_.MakeBoxedVariableRelevant(true);
214  reduced_costs_.MakeReducedCostsPrecise();
215 
216  // This is needed to display errors properly.
217  MakeBoxedVariableDualFeasible(variables_info_.GetNonBasicBoxedVariables(),
218  /*update_basic_values=*/false);
219  variable_values_.RecomputeBasicVariableValues();
220  variable_values_.ResetPrimalInfeasibilityInformation();
221  }
222  } else {
223  reduced_costs_.MaintainDualInfeasiblePositions(true);
224  GLOP_RETURN_IF_ERROR(Minimize(time_limit));
225  DisplayIterationInfo();
226 
227  // After the primal phase I, we need to restore the objective.
228  if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) {
229  InitializeObjectiveAndTestIfUnchanged(lp);
230  reduced_costs_.ResetForNewObjective();
231  }
232  }
233 
234  // Reduced costs must be explicitly recomputed because DisplayErrors() is
235  // const.
236  // TODO(user): This API is not really nice.
237  reduced_costs_.GetReducedCosts();
238  DisplayErrors();
239 
240  feasibility_phase_ = false;
241  feasibility_time_ = time_limit->GetElapsedTime() - start_time;
242  entering_variable_.SetPricingRule(parameters_.optimization_rule());
243  num_feasibility_iterations_ = num_iterations_;
244 
245  if (log_info) LOG(INFO) << "------ Second phase: optimization.";
246 
247  // Because of shifts or perturbations, we may need to re-run a dual simplex
248  // after the primal simplex finished, or the opposite.
249  //
250  // We alter between solving with primal and dual Phase II algorithm as long as
251  // time limit permits *and* we did not yet achieve the desired precision.
252  // I.e., we run iteration i if the solution from iteration i-1 was not precise
253  // after we removed the bound and cost shifts and perturbations.
254  //
255  // NOTE(user): We may still hit the limit of max_number_of_reoptimizations()
256  // which means the status returned can be PRIMAL_FEASIBLE or DUAL_FEASIBLE
257  // (i.e., these statuses are not necesserily a consequence of hitting a time
258  // limit).
259  for (int num_optims = 0;
260  // We want to enter the loop when both num_optims and num_iterations_ are
261  // *equal* to the corresponding limits (to return a meaningful status
262  // when the limits are set to 0).
263  num_optims <= parameters_.max_number_of_reoptimizations() &&
264  !objective_limit_reached_ &&
265  (num_iterations_ == 0 ||
266  num_iterations_ < parameters_.max_number_of_iterations()) &&
267  !time_limit->LimitReached() &&
268  !absl::GetFlag(FLAGS_simplex_stop_after_feasibility) &&
269  (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
270  problem_status_ == ProblemStatus::DUAL_FEASIBLE);
271  ++num_optims) {
272  if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
273  // Run the primal simplex.
274  reduced_costs_.MaintainDualInfeasiblePositions(true);
275  GLOP_RETURN_IF_ERROR(Minimize(time_limit));
276  } else {
277  // Run the dual simplex.
278  reduced_costs_.MaintainDualInfeasiblePositions(false);
279  GLOP_RETURN_IF_ERROR(DualMinimize(time_limit));
280  }
281 
282  // Minimize() or DualMinimize() always double check the result with maximum
283  // precision by refactoring the basis before exiting (except if an
284  // iteration or time limit was reached).
285  DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
286  problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
287  basis_factorization_.IsRefactorized());
288 
289  // If SetIntegralityScale() was called, we preform a polish operation.
290  if (!integrality_scale_.empty() &&
291  problem_status_ == ProblemStatus::OPTIMAL) {
292  reduced_costs_.MaintainDualInfeasiblePositions(true);
294  }
295 
296  // Remove the bound and cost shifts (or perturbations).
297  //
298  // Note(user): Currently, we never do both at the same time, so we could
299  // be a bit faster here, but then this is quick anyway.
300  variable_values_.ResetAllNonBasicVariableValues();
301  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
302  PermuteBasis();
303  variable_values_.RecomputeBasicVariableValues();
304  reduced_costs_.ClearAndRemoveCostShifts();
305 
306  // Reduced costs must be explicitly recomputed because DisplayErrors() is
307  // const.
308  // TODO(user): This API is not really nice.
309  reduced_costs_.GetReducedCosts();
310  DisplayIterationInfo();
311  DisplayErrors();
312 
313  // TODO(user): We should also confirm the PRIMAL_UNBOUNDED or DUAL_UNBOUNDED
314  // status by checking with the other phase I that the problem is really
315  // DUAL_INFEASIBLE or PRIMAL_INFEASIBLE. For instance we currently report
316  // PRIMAL_UNBOUNDED with the primal on the problem l30.mps instead of
317  // OPTIMAL and the dual does not have issues on this problem.
318  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) {
319  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
320  if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
321  variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
322  reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
323  if (log_info) {
324  LOG(INFO) << "DUAL_UNBOUNDED was reported, but the residual and/or "
325  << "dual infeasibility is above the tolerance";
326  }
327  }
328  break;
329  }
330 
331  // Change the status, if after the shift and perturbation removal the
332  // problem is not OPTIMAL anymore.
333  if (problem_status_ == ProblemStatus::OPTIMAL) {
334  const Fractional solution_tolerance =
335  parameters_.solution_feasibility_tolerance();
336  if (variable_values_.ComputeMaximumPrimalResidual() >
337  solution_tolerance ||
338  reduced_costs_.ComputeMaximumDualResidual() > solution_tolerance) {
339  if (log_info) {
340  LOG(INFO) << "OPTIMAL was reported, yet one of the residuals is "
341  "above the solution feasibility tolerance after the "
342  "shift/perturbation are removed.";
343  }
344  if (parameters_.change_status_to_imprecise()) {
345  problem_status_ = ProblemStatus::IMPRECISE;
346  }
347  } else {
348  // We use the "precise" tolerances here to try to report the best
349  // possible solution.
350  const Fractional primal_tolerance =
351  parameters_.primal_feasibility_tolerance();
352  const Fractional dual_tolerance =
353  parameters_.dual_feasibility_tolerance();
354  const Fractional primal_infeasibility =
355  variable_values_.ComputeMaximumPrimalInfeasibility();
356  const Fractional dual_infeasibility =
357  reduced_costs_.ComputeMaximumDualInfeasibility();
358  if (primal_infeasibility > primal_tolerance &&
359  dual_infeasibility > dual_tolerance) {
360  if (log_info) {
361  LOG(INFO) << "OPTIMAL was reported, yet both of the infeasibility "
362  "are above the tolerance after the "
363  "shift/perturbation are removed.";
364  }
365  if (parameters_.change_status_to_imprecise()) {
366  problem_status_ = ProblemStatus::IMPRECISE;
367  }
368  } else if (primal_infeasibility > primal_tolerance) {
369  if (log_info) LOG(INFO) << "Re-optimizing with dual simplex ... ";
370  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
371  } else if (dual_infeasibility > dual_tolerance) {
372  if (log_info) LOG(INFO) << "Re-optimizing with primal simplex ... ";
373  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
374  }
375  }
376  }
377  }
378 
379  // Check that the return status is "precise".
380  //
381  // TODO(user): we curretnly skip the DUAL_INFEASIBLE status because the
382  // quantities are not up to date in this case.
383  if (parameters_.change_status_to_imprecise() &&
384  problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
385  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
386  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
387  reduced_costs_.ComputeMaximumDualResidual() > tolerance) {
388  problem_status_ = ProblemStatus::IMPRECISE;
389  } else if (problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
390  problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
391  problem_status_ == ProblemStatus::PRIMAL_INFEASIBLE) {
392  if (reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
393  problem_status_ = ProblemStatus::IMPRECISE;
394  }
395  } else if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
396  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED ||
397  problem_status_ == ProblemStatus::DUAL_INFEASIBLE) {
398  if (variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
399  problem_status_ = ProblemStatus::IMPRECISE;
400  }
401  }
402  }
403 
404  // Store the result for the solution getters.
405  SaveState();
406  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
407  solution_dual_values_ = reduced_costs_.GetDualValues();
408  solution_reduced_costs_ = reduced_costs_.GetReducedCosts();
409  if (lp.IsMaximizationProblem()) {
410  ChangeSign(&solution_dual_values_);
411  ChangeSign(&solution_reduced_costs_);
412  }
413 
414  // If the problem is unbounded, set the objective value to +/- infinity.
415  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
416  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
417  solution_objective_value_ =
418  (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity
419  : -kInfinity;
420  if (lp.IsMaximizationProblem()) {
421  solution_objective_value_ = -solution_objective_value_;
422  }
423  }
424 
425  total_time_ = time_limit->GetElapsedTime() - start_time;
426  optimization_time_ = total_time_ - feasibility_time_;
427  num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
428 
429  DisplayAllStats();
430  return Status::OK();
431 }
432 
434  return problem_status_;
435 }
436 
438  return solution_objective_value_;
439 }
440 
441 int64 RevisedSimplex::GetNumberOfIterations() const { return num_iterations_; }
442 
443 RowIndex RevisedSimplex::GetProblemNumRows() const { return num_rows_; }
444 
445 ColIndex RevisedSimplex::GetProblemNumCols() const { return num_cols_; }
446 
448  return variable_values_.Get(col);
449 }
450 
452  return solution_reduced_costs_[col];
453 }
454 
456  return solution_reduced_costs_;
457 }
458 
460  return solution_dual_values_[row];
461 }
462 
464  return variables_info_.GetStatusRow()[col];
465 }
466 
467 const BasisState& RevisedSimplex::GetState() const { return solution_state_; }
468 
470  // Note the negative sign since the slack variable is such that
471  // constraint_activity + slack_value = 0.
472  return -variable_values_.Get(SlackColIndex(row));
473 }
474 
476  // The status of the given constraint is the same as the status of the
477  // associated slack variable with a change of sign.
478  const VariableStatus s = variables_info_.GetStatusRow()[SlackColIndex(row)];
481  }
484  }
485  return VariableToConstraintStatus(s);
486 }
487 
489  DCHECK_EQ(problem_status_, ProblemStatus::PRIMAL_UNBOUNDED);
490  return solution_primal_ray_;
491 }
493  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
494  return solution_dual_ray_;
495 }
496 
498  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
499  return solution_dual_ray_row_combination_;
500 }
501 
502 ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; }
503 
505  DCHECK(basis_factorization_.GetColumnPermutation().empty());
506  return basis_factorization_;
507 }
508 
509 std::string RevisedSimplex::GetPrettySolverStats() const {
510  return absl::StrFormat(
511  "Problem status : %s\n"
512  "Solving time : %-6.4g\n"
513  "Number of iterations : %u\n"
514  "Time for solvability (first phase) : %-6.4g\n"
515  "Number of iterations for solvability : %u\n"
516  "Time for optimization : %-6.4g\n"
517  "Number of iterations for optimization : %u\n"
518  "Stop after first basis : %d\n",
519  GetProblemStatusString(problem_status_), total_time_, num_iterations_,
520  feasibility_time_, num_feasibility_iterations_, optimization_time_,
521  num_optimization_iterations_,
522  absl::GetFlag(FLAGS_simplex_stop_after_first_basis));
523 }
524 
526  // TODO(user): Also take into account the dual edge norms and the reduced cost
527  // updates.
528  return basis_factorization_.DeterministicTime() +
529  update_row_.DeterministicTime() +
530  primal_edge_norms_.DeterministicTime();
531 }
532 
533 void RevisedSimplex::SetVariableNames() {
534  variable_name_.resize(num_cols_, "");
535  for (ColIndex col(0); col < first_slack_col_; ++col) {
536  const ColIndex var_index = col + 1;
537  variable_name_[col] = absl::StrFormat("x%d", ColToIntIndex(var_index));
538  }
539  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
540  const ColIndex var_index = col - first_slack_col_ + 1;
541  variable_name_[col] = absl::StrFormat("s%d", ColToIntIndex(var_index));
542  }
543 }
544 
545 VariableStatus RevisedSimplex::ComputeDefaultVariableStatus(
546  ColIndex col) const {
548  if (lower_bound_[col] == upper_bound_[col]) {
550  }
551  if (lower_bound_[col] == -kInfinity && upper_bound_[col] == kInfinity) {
552  return VariableStatus::FREE;
553  }
554 
555  // Returns the bound with the lowest magnitude. Note that it must be finite
556  // because the VariableStatus::FREE case was tested earlier.
557  DCHECK(IsFinite(lower_bound_[col]) || IsFinite(upper_bound_[col]));
558  return std::abs(lower_bound_[col]) <= std::abs(upper_bound_[col])
561 }
562 
563 void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue(
564  ColIndex col, VariableStatus status) {
565  variables_info_.UpdateToNonBasicStatus(col, status);
566  variable_values_.SetNonBasicVariableValueFromStatus(col);
567 }
568 
569 bool RevisedSimplex::BasisIsConsistent() const {
570  const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
571  const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
572  for (RowIndex row(0); row < num_rows_; ++row) {
573  const ColIndex col = basis_[row];
574  if (!is_basic.IsSet(col)) return false;
575  if (variable_statuses[col] != VariableStatus::BASIC) return false;
576  }
577  ColIndex cols_in_basis(0);
578  ColIndex cols_not_in_basis(0);
579  for (ColIndex col(0); col < num_cols_; ++col) {
580  cols_in_basis += is_basic.IsSet(col);
581  cols_not_in_basis += !is_basic.IsSet(col);
582  if (is_basic.IsSet(col) !=
583  (variable_statuses[col] == VariableStatus::BASIC)) {
584  return false;
585  }
586  }
587  if (cols_in_basis != RowToColIndex(num_rows_)) return false;
588  if (cols_not_in_basis != num_cols_ - RowToColIndex(num_rows_)) return false;
589  return true;
590 }
591 
592 // Note(user): The basis factorization is not updated by this function but by
593 // UpdateAndPivot().
594 void RevisedSimplex::UpdateBasis(ColIndex entering_col, RowIndex basis_row,
595  VariableStatus leaving_variable_status) {
596  SCOPED_TIME_STAT(&function_stats_);
597  DCHECK_COL_BOUNDS(entering_col);
598  DCHECK_ROW_BOUNDS(basis_row);
599 
600  // Check that this is not called with an entering_col already in the basis
601  // and that the leaving col is indeed in the basis.
602  DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
603  DCHECK_NE(basis_[basis_row], entering_col);
604  DCHECK_NE(basis_[basis_row], kInvalidCol);
605 
606  const ColIndex leaving_col = basis_[basis_row];
607  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
608 
609  // Make leaving_col leave the basis and update relevant data.
610  // Note thate the leaving variable value is not necessarily at its exact
611  // bound, which is like a bound shift.
612  variables_info_.Update(leaving_col, leaving_variable_status);
613  DCHECK(leaving_variable_status == VariableStatus::AT_UPPER_BOUND ||
614  leaving_variable_status == VariableStatus::AT_LOWER_BOUND ||
615  leaving_variable_status == VariableStatus::FIXED_VALUE);
616 
617  basis_[basis_row] = entering_col;
618  variables_info_.Update(entering_col, VariableStatus::BASIC);
619  update_row_.Invalidate();
620 }
621 
622 namespace {
623 
624 // Comparator used to sort column indices according to a given value vector.
625 class ColumnComparator {
626  public:
627  explicit ColumnComparator(const DenseRow& value) : value_(value) {}
628  bool operator()(ColIndex col_a, ColIndex col_b) const {
629  return value_[col_a] < value_[col_b];
630  }
631 
632  private:
633  const DenseRow& value_;
634 };
635 
636 } // namespace
637 
638 // To understand better what is going on in this function, let us say that this
639 // algorithm will produce the optimal solution to a problem containing only
640 // singleton columns (provided that the variables start at the minimum possible
641 // cost, see ComputeDefaultVariableStatus()). This is unit tested.
642 //
643 // The error_ must be equal to the constraint activity for the current variable
644 // values before this function is called. If error_[row] is 0.0, that mean this
645 // constraint is currently feasible.
646 void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) {
647  SCOPED_TIME_STAT(&function_stats_);
648  // Computes the singleton columns and the cost variation of the corresponding
649  // variables (in the only possible direction, i.e away from its current bound)
650  // for a unit change in the infeasibility of the corresponding row.
651  //
652  // Note that the slack columns will be treated as normal singleton columns.
653  std::vector<ColIndex> singleton_column;
654  DenseRow cost_variation(num_cols_, 0.0);
655  for (ColIndex col(0); col < num_cols_; ++col) {
656  if (compact_matrix_.column(col).num_entries() != 1) continue;
657  if (lower_bound_[col] == upper_bound_[col]) continue;
658  const Fractional slope = compact_matrix_.column(col).GetFirstCoefficient();
659  if (variable_values_.Get(col) == lower_bound_[col]) {
660  cost_variation[col] = objective_[col] / std::abs(slope);
661  } else {
662  cost_variation[col] = -objective_[col] / std::abs(slope);
663  }
664  singleton_column.push_back(col);
665  }
666  if (singleton_column.empty()) return;
667 
668  // Sort the singleton columns for the case where many of them correspond to
669  // the same row (equivalent to a piecewise-linear objective on this variable).
670  // Negative cost_variation first since moving the singleton variable away from
671  // its current bound means the least decrease in the objective function for
672  // the same "error" variation.
673  ColumnComparator comparator(cost_variation);
674  std::sort(singleton_column.begin(), singleton_column.end(), comparator);
675  DCHECK_LE(cost_variation[singleton_column.front()],
676  cost_variation[singleton_column.back()]);
677 
678  // Use a singleton column to "absorb" the error when possible to avoid
679  // introducing unneeded artificial variables. Note that with scaling on, the
680  // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to
681  // them) and that the SingletonColumnSignPreprocessor makes them all positive.
682  // However, this code works for any coefficient value.
683  const DenseRow& variable_values = variable_values_.GetDenseRow();
684  for (const ColIndex col : singleton_column) {
685  const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0));
686 
687  // If no singleton columns have entered the basis for this row, choose the
688  // first one. It will be the one with the least decrease in the objective
689  // function when it leaves the basis.
690  if ((*basis)[row] == kInvalidCol) {
691  (*basis)[row] = col;
692  }
693 
694  // If there is already no error in this row (i.e. it is primal-feasible),
695  // there is nothing to do.
696  if (error_[row] == 0.0) continue;
697 
698  // In this case, all the infeasibility can be "absorbed" and this variable
699  // may not be at one of its bound anymore, so we have to use it in the
700  // basis.
701  const Fractional coeff =
702  compact_matrix_.column(col).EntryCoefficient(EntryIndex(0));
703  const Fractional new_value = variable_values[col] + error_[row] / coeff;
704  if (new_value >= lower_bound_[col] && new_value <= upper_bound_[col]) {
705  error_[row] = 0.0;
706 
707  // Use this variable in the initial basis.
708  (*basis)[row] = col;
709  continue;
710  }
711 
712  // The idea here is that if the singleton column cannot be used to "absorb"
713  // all error_[row], if it is boxed, it can still be used to make the
714  // infeasibility smaller (with a bound flip).
715  const Fractional box_width = variables_info_.GetBoundDifference(col);
716  DCHECK_NE(box_width, 0.0);
717  DCHECK_NE(error_[row], 0.0);
718  const Fractional error_sign = error_[row] / coeff;
719  if (variable_values[col] == lower_bound_[col] && error_sign > 0.0) {
720  DCHECK(IsFinite(box_width));
721  error_[row] -= coeff * box_width;
722  SetNonBasicVariableStatusAndDeriveValue(col,
724  continue;
725  }
726  if (variable_values[col] == upper_bound_[col] && error_sign < 0.0) {
727  DCHECK(IsFinite(box_width));
728  error_[row] += coeff * box_width;
729  SetNonBasicVariableStatusAndDeriveValue(col,
731  continue;
732  }
733  }
734 }
735 
736 bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged(
737  const LinearProgram& lp, bool* only_change_is_new_rows,
738  bool* only_change_is_new_cols, ColIndex* num_new_cols) {
739  SCOPED_TIME_STAT(&function_stats_);
740  DCHECK(only_change_is_new_rows != nullptr);
741  DCHECK(only_change_is_new_cols != nullptr);
742  DCHECK(num_new_cols != nullptr);
743  DCHECK_NE(kInvalidCol, lp.GetFirstSlackVariable());
744  DCHECK_EQ(num_cols_, compact_matrix_.num_cols());
745  DCHECK_EQ(num_rows_, compact_matrix_.num_rows());
746 
747  DCHECK_EQ(lp.num_variables(),
748  lp.GetFirstSlackVariable() + RowToColIndex(lp.num_constraints()));
749  DCHECK(IsRightMostSquareMatrixIdentity(lp.GetSparseMatrix()));
750  const bool old_part_of_matrix_is_unchanged =
752  num_rows_, first_slack_col_, lp.GetSparseMatrix(), compact_matrix_);
753 
754  // Test if the matrix is unchanged, and if yes, just returns true. Note that
755  // this doesn't check the columns corresponding to the slack variables,
756  // because they were checked by lp.IsInEquationForm() when Solve() was called.
757  if (old_part_of_matrix_is_unchanged && lp.num_constraints() == num_rows_ &&
758  lp.num_variables() == num_cols_) {
759  return true;
760  }
761 
762  // Check if the new matrix can be derived from the old one just by adding
763  // new rows (i.e new constraints).
764  *only_change_is_new_rows = old_part_of_matrix_is_unchanged &&
765  lp.num_constraints() > num_rows_ &&
766  lp.GetFirstSlackVariable() == first_slack_col_;
767 
768  // Check if the new matrix can be derived from the old one just by adding
769  // new columns (i.e new variables).
770  *only_change_is_new_cols = old_part_of_matrix_is_unchanged &&
771  lp.num_constraints() == num_rows_ &&
772  lp.GetFirstSlackVariable() > first_slack_col_;
773  *num_new_cols =
774  *only_change_is_new_cols ? lp.num_variables() - num_cols_ : ColIndex(0);
775 
776  // Initialize first_slack_.
777  first_slack_col_ = lp.GetFirstSlackVariable();
778 
779  // Initialize the new dimensions.
780  num_rows_ = lp.num_constraints();
781  num_cols_ = lp.num_variables();
782 
783  // Populate compact_matrix_ and transposed_matrix_ if needed. Note that we
784  // already added all the slack variables at this point, so matrix_ will not
785  // change anymore.
786  // TODO(user): This can be sped up by removing the MatrixView.
787  compact_matrix_.PopulateFromMatrixView(MatrixView(lp.GetSparseMatrix()));
788  if (parameters_.use_transposed_matrix()) {
789  transposed_matrix_.PopulateFromTranspose(compact_matrix_);
790  }
791  return false;
792 }
793 
794 bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
795  const LinearProgram& lp, ColIndex num_new_cols) {
796  SCOPED_TIME_STAT(&function_stats_);
797  DCHECK_EQ(lp.num_variables(), num_cols_);
798  DCHECK_LE(num_new_cols, first_slack_col_);
799  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
800 
801  // Check the original variable bounds.
802  for (ColIndex col(0); col < first_new_col; ++col) {
803  if (lower_bound_[col] != lp.variable_lower_bounds()[col] ||
804  upper_bound_[col] != lp.variable_upper_bounds()[col]) {
805  return false;
806  }
807  }
808  // Check that each new variable has a bound of zero.
809  for (ColIndex col(first_new_col); col < first_slack_col_; ++col) {
810  if (lp.variable_lower_bounds()[col] != 0.0 &&
811  lp.variable_upper_bounds()[col] != 0.0) {
812  return false;
813  }
814  }
815  // Check that the slack bounds are unchanged.
816  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
817  if (lower_bound_[col - num_new_cols] != lp.variable_lower_bounds()[col] ||
818  upper_bound_[col - num_new_cols] != lp.variable_upper_bounds()[col]) {
819  return false;
820  }
821  }
822  return true;
823 }
824 
825 bool RevisedSimplex::InitializeBoundsAndTestIfUnchanged(
826  const LinearProgram& lp) {
827  SCOPED_TIME_STAT(&function_stats_);
828  lower_bound_.resize(num_cols_, 0.0);
829  upper_bound_.resize(num_cols_, 0.0);
830  bound_perturbation_.AssignToZero(num_cols_);
831 
832  // Variable bounds, for both non-slack and slack variables.
833  bool bounds_are_unchanged = true;
834  DCHECK_EQ(lp.num_variables(), num_cols_);
835  for (ColIndex col(0); col < lp.num_variables(); ++col) {
836  if (lower_bound_[col] != lp.variable_lower_bounds()[col] ||
837  upper_bound_[col] != lp.variable_upper_bounds()[col]) {
838  bounds_are_unchanged = false;
839  break;
840  }
841  }
842  if (!bounds_are_unchanged) {
843  lower_bound_ = lp.variable_lower_bounds();
844  upper_bound_ = lp.variable_upper_bounds();
845  }
846  return bounds_are_unchanged;
847 }
848 
849 bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
850  const LinearProgram& lp) {
851  SCOPED_TIME_STAT(&function_stats_);
852 
853  bool objective_is_unchanged = true;
854  objective_.resize(num_cols_, 0.0);
855  DCHECK_EQ(num_cols_, lp.num_variables());
856  if (lp.IsMaximizationProblem()) {
857  // Note that we use the minimization version of the objective internally.
858  for (ColIndex col(0); col < lp.num_variables(); ++col) {
859  const Fractional coeff = -lp.objective_coefficients()[col];
860  if (objective_[col] != coeff) {
861  objective_is_unchanged = false;
862  }
863  objective_[col] = coeff;
864  }
865  objective_offset_ = -lp.objective_offset();
866  objective_scaling_factor_ = -lp.objective_scaling_factor();
867  } else {
868  for (ColIndex col(0); col < lp.num_variables(); ++col) {
869  if (objective_[col] != lp.objective_coefficients()[col]) {
870  objective_is_unchanged = false;
871  break;
872  }
873  }
874  if (!objective_is_unchanged) {
875  objective_ = lp.objective_coefficients();
876  }
877  objective_offset_ = lp.objective_offset();
878  objective_scaling_factor_ = lp.objective_scaling_factor();
879  }
880  return objective_is_unchanged;
881 }
882 
883 void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) {
884  objective_limit_reached_ = false;
885  DCHECK(std::isfinite(objective_offset_));
886  DCHECK(std::isfinite(objective_scaling_factor_));
887  DCHECK_NE(0.0, objective_scaling_factor_);
888 
889  // This sets dual_objective_limit_ and then primal_objective_limit_.
890  for (const bool set_dual : {true, false}) {
891  // NOTE(user): If objective_scaling_factor_ is negative, the optimization
892  // direction was reversed (during preprocessing or inside revised simplex),
893  // i.e., the original problem is maximization. In such case the _meaning_ of
894  // the lower and upper limits is swapped. To this end we must change the
895  // signs of limits, which happens automatically when calculating shifted
896  // limits. We must also use upper (resp. lower) limit in place of lower
897  // (resp. upper) limit when calculating the final objective_limit_.
898  //
899  // Choose lower limit if using the dual simplex and scaling factor is
900  // negative or if using the primal simplex and scaling is nonnegative, upper
901  // limit otherwise.
902  const Fractional limit = (objective_scaling_factor_ >= 0.0) != set_dual
903  ? parameters_.objective_lower_limit()
904  : parameters_.objective_upper_limit();
905  const Fractional shifted_limit =
906  limit / objective_scaling_factor_ - objective_offset_;
907  if (set_dual) {
908  dual_objective_limit_ = shifted_limit;
909  } else {
910  primal_objective_limit_ = shifted_limit;
911  }
912  }
913 }
914 
915 void RevisedSimplex::InitializeVariableStatusesForWarmStart(
916  const BasisState& state, ColIndex num_new_cols) {
917  variables_info_.InitializeAndComputeType();
918  RowIndex num_basic_variables(0);
919  DCHECK_LE(num_new_cols, first_slack_col_);
920  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
921  // Compute the status for all the columns (note that the slack variables are
922  // already added at the end of the matrix at this stage).
923  for (ColIndex col(0); col < num_cols_; ++col) {
924  const VariableStatus default_status = ComputeDefaultVariableStatus(col);
925 
926  // Start with the given "warm" status from the BasisState if it exists.
927  VariableStatus status = default_status;
928  if (col < first_new_col && col < state.statuses.size()) {
929  status = state.statuses[col];
930  } else if (col >= first_slack_col_ &&
931  col - num_new_cols < state.statuses.size()) {
932  status = state.statuses[col - num_new_cols];
933  }
934 
935  if (status == VariableStatus::BASIC) {
936  // Do not allow more than num_rows_ VariableStatus::BASIC variables.
937  if (num_basic_variables == num_rows_) {
938  VLOG(1) << "Too many basic variables in the warm-start basis."
939  << "Only keeping the first ones as VariableStatus::BASIC.";
940  variables_info_.UpdateToNonBasicStatus(col, default_status);
941  } else {
942  ++num_basic_variables;
943  variables_info_.UpdateToBasicStatus(col);
944  }
945  } else {
946  // Remove incompatibilities between the warm status and the variable
947  // bounds. We use the default status as an indication of the bounds
948  // type.
949  if ((status != default_status) &&
950  ((default_status == VariableStatus::FIXED_VALUE) ||
951  (status == VariableStatus::FREE) ||
952  (status == VariableStatus::FIXED_VALUE) ||
953  (status == VariableStatus::AT_LOWER_BOUND &&
954  lower_bound_[col] == -kInfinity) ||
955  (status == VariableStatus::AT_UPPER_BOUND &&
956  upper_bound_[col] == kInfinity))) {
957  status = default_status;
958  }
959  variables_info_.UpdateToNonBasicStatus(col, status);
960  }
961  }
962 
963  // Initialize the values.
964  variable_values_.ResetAllNonBasicVariableValues();
965 }
966 
967 // This implementation starts with an initial matrix B equal to the identity
968 // matrix (modulo a column permutation). For that it uses either the slack
969 // variables or the singleton columns present in the problem. Afterwards, the
970 // fixed slacks in the basis are exchanged with normal columns of A if possible
971 // by the InitialBasis class.
972 Status RevisedSimplex::CreateInitialBasis() {
973  SCOPED_TIME_STAT(&function_stats_);
974 
975  // Initialize the variable values and statuses.
976  // Note that for the dual algorithm, boxed variables will be made
977  // dual-feasible later by MakeBoxedVariableDualFeasible(), so it doesn't
978  // really matter at which of their two finite bounds they start.
979  int num_free_variables = 0;
980  variables_info_.InitializeAndComputeType();
981  for (ColIndex col(0); col < num_cols_; ++col) {
982  const VariableStatus status = ComputeDefaultVariableStatus(col);
983  SetNonBasicVariableStatusAndDeriveValue(col, status);
984  if (status == VariableStatus::FREE) ++num_free_variables;
985  }
986  VLOG(1) << "Number of free variables in the problem: " << num_free_variables;
987 
988  // Start by using an all-slack basis.
989  RowToColMapping basis(num_rows_, kInvalidCol);
990  for (RowIndex row(0); row < num_rows_; ++row) {
991  basis[row] = SlackColIndex(row);
992  }
993 
994  // If possible, for the primal simplex we replace some slack variables with
995  // some singleton columns present in the problem.
996  if (!parameters_.use_dual_simplex() &&
997  parameters_.initial_basis() != GlopParameters::MAROS &&
998  parameters_.exploit_singleton_column_in_initial_basis()) {
999  // For UseSingletonColumnInInitialBasis() to work better, we change
1000  // the value of the boxed singleton column with a non-zero cost to the best
1001  // of their two bounds.
1002  for (ColIndex col(0); col < num_cols_; ++col) {
1003  if (compact_matrix_.column(col).num_entries() != 1) continue;
1004  const VariableStatus status = variables_info_.GetStatusRow()[col];
1005  const Fractional objective = objective_[col];
1006  if (objective > 0 && IsFinite(lower_bound_[col]) &&
1007  status == VariableStatus::AT_UPPER_BOUND) {
1008  SetNonBasicVariableStatusAndDeriveValue(col,
1010  } else if (objective < 0 && IsFinite(upper_bound_[col]) &&
1011  status == VariableStatus::AT_LOWER_BOUND) {
1012  SetNonBasicVariableStatusAndDeriveValue(col,
1014  }
1015  }
1016 
1017  // Compute the primal infeasibility of the initial variable values in
1018  // error_.
1019  ComputeVariableValuesError();
1020 
1021  // TODO(user): A better but slightly more complex algorithm would be to:
1022  // - Ignore all singleton columns except the slacks during phase I.
1023  // - For this, change the slack variable bounds accordingly.
1024  // - At the end of phase I, restore the slack variable bounds and perform
1025  // the same algorithm to start with feasible and "optimal" values of the
1026  // singleton columns.
1027  basis.assign(num_rows_, kInvalidCol);
1028  UseSingletonColumnInInitialBasis(&basis);
1029 
1030  // Eventually complete the basis with fixed slack columns.
1031  for (RowIndex row(0); row < num_rows_; ++row) {
1032  if (basis[row] == kInvalidCol) {
1033  basis[row] = SlackColIndex(row);
1034  }
1035  }
1036  }
1037 
1038  // Use an advanced initial basis to remove the fixed variables from the basis.
1039  if (parameters_.initial_basis() == GlopParameters::NONE) {
1040  return InitializeFirstBasis(basis);
1041  }
1042  if (parameters_.initial_basis() == GlopParameters::MAROS) {
1043  InitialBasis initial_basis(compact_matrix_, objective_, lower_bound_,
1044  upper_bound_, variables_info_.GetTypeRow());
1045  if (parameters_.use_dual_simplex()) {
1046  // This dual version only uses zero-cost columns to complete the
1047  // basis.
1048  initial_basis.GetDualMarosBasis(num_cols_, &basis);
1049  } else {
1050  initial_basis.GetPrimalMarosBasis(num_cols_, &basis);
1051  }
1052  int number_changed = 0;
1053  for (RowIndex row(0); row < num_rows_; ++row) {
1054  if (basis[row] != SlackColIndex(row)) {
1055  number_changed++;
1056  }
1057  }
1058  VLOG(1) << "Number of Maros basis changes: " << number_changed;
1059  } else if (parameters_.initial_basis() == GlopParameters::BIXBY ||
1060  parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1061  // First unassign the fixed variables from basis.
1062  int num_fixed_variables = 0;
1063  for (RowIndex row(0); row < basis.size(); ++row) {
1064  const ColIndex col = basis[row];
1065  if (lower_bound_[col] == upper_bound_[col]) {
1066  basis[row] = kInvalidCol;
1067  ++num_fixed_variables;
1068  }
1069  }
1070 
1071  if (num_fixed_variables == 0) {
1072  VLOG(1) << "Crash is set to " << parameters_.initial_basis()
1073  << " but there is no equality rows to remove from initial all "
1074  "slack basis.";
1075  } else {
1076  // Then complete the basis with an advanced initial basis algorithm.
1077  VLOG(1) << "Trying to remove " << num_fixed_variables
1078  << " fixed variables from the initial basis.";
1079  InitialBasis initial_basis(compact_matrix_, objective_, lower_bound_,
1080  upper_bound_, variables_info_.GetTypeRow());
1081 
1082  if (parameters_.initial_basis() == GlopParameters::BIXBY) {
1083  if (parameters_.use_scaling()) {
1084  initial_basis.CompleteBixbyBasis(first_slack_col_, &basis);
1085  } else {
1086  VLOG(1) << "Bixby initial basis algorithm requires the problem "
1087  << "to be scaled. Skipping Bixby's algorithm.";
1088  }
1089  } else if (parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1090  // Note the use of num_cols_ here because this algorithm
1091  // benefits from treating fixed slack columns like any other column.
1092  if (parameters_.use_dual_simplex()) {
1093  // This dual version only uses zero-cost columns to complete the
1094  // basis.
1095  initial_basis.CompleteTriangularDualBasis(num_cols_, &basis);
1096  } else {
1097  initial_basis.CompleteTriangularPrimalBasis(num_cols_, &basis);
1098  }
1099 
1100  const Status status = InitializeFirstBasis(basis);
1101  if (status.ok()) {
1102  return status;
1103  } else {
1104  VLOG(1) << "Reverting to all slack basis.";
1105 
1106  for (RowIndex row(0); row < num_rows_; ++row) {
1107  basis[row] = SlackColIndex(row);
1108  }
1109  }
1110  }
1111  }
1112  } else {
1113  LOG(WARNING) << "Unsupported initial_basis parameters: "
1114  << parameters_.initial_basis();
1115  }
1116 
1117  return InitializeFirstBasis(basis);
1118 }
1119 
1120 Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) {
1121  basis_ = basis;
1122 
1123  // For each row which does not have a basic column, assign it to the
1124  // corresponding slack column.
1125  basis_.resize(num_rows_, kInvalidCol);
1126  for (RowIndex row(0); row < num_rows_; ++row) {
1127  if (basis_[row] == kInvalidCol) {
1128  basis_[row] = SlackColIndex(row);
1129  }
1130  }
1131 
1132  GLOP_RETURN_IF_ERROR(basis_factorization_.Initialize());
1133  PermuteBasis();
1134 
1135  // Test that the upper bound on the condition number of basis is not too high.
1136  // The number was not computed by any rigorous analysis, we just prefer to
1137  // revert to the all slack basis if the condition number of our heuristic
1138  // first basis seems bad. See for instance on cond11.mps, where we get an
1139  // infinity upper bound.
1140  const Fractional condition_number_ub =
1141  basis_factorization_.ComputeInfinityNormConditionNumberUpperBound();
1142  if (condition_number_ub > parameters_.initial_condition_number_threshold()) {
1143  const std::string error_message =
1144  absl::StrCat("The matrix condition number upper bound is too high: ",
1145  condition_number_ub);
1146  VLOG(1) << error_message;
1147  return Status(Status::ERROR_LU, error_message);
1148  }
1149 
1150  // Everything is okay, finish the initialization.
1151  for (RowIndex row(0); row < num_rows_; ++row) {
1152  variables_info_.Update(basis_[row], VariableStatus::BASIC);
1153  }
1154  DCHECK(BasisIsConsistent());
1155 
1156  // TODO(user): Maybe return an error status if this is too high. Note however
1157  // that if we want to do that, we need to reset variables_info_ to a
1158  // consistent state.
1159  variable_values_.RecomputeBasicVariableValues();
1160  if (VLOG_IS_ON(1)) {
1161  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1162  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) {
1163  VLOG(1) << absl::StrCat(
1164  "The primal residual of the initial basis is above the tolerance, ",
1165  variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance);
1166  }
1167  }
1168  return Status::OK();
1169 }
1170 
1171 Status RevisedSimplex::Initialize(const LinearProgram& lp) {
1172  parameters_ = initial_parameters_;
1173  PropagateParameters();
1174 
1175  // Calling InitializeMatrixAndTestIfUnchanged() first is important because
1176  // this is where num_rows_ and num_cols_ are computed.
1177  //
1178  // Note that these functions can't depend on use_dual_simplex() since we may
1179  // change it below.
1180  ColIndex num_new_cols(0);
1181  bool only_change_is_new_rows = false;
1182  bool only_change_is_new_cols = false;
1183  bool matrix_is_unchanged = true;
1184  bool only_new_bounds = false;
1185  if (solution_state_.IsEmpty() || !notify_that_matrix_is_unchanged_) {
1186  matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged(
1187  lp, &only_change_is_new_rows, &only_change_is_new_cols, &num_new_cols);
1188  only_new_bounds = only_change_is_new_cols && num_new_cols > 0 &&
1189  OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1190  lp, num_new_cols);
1191  } else if (DEBUG_MODE) {
1192  CHECK(InitializeMatrixAndTestIfUnchanged(
1193  lp, &only_change_is_new_rows, &only_change_is_new_cols, &num_new_cols));
1194  }
1195  notify_that_matrix_is_unchanged_ = false;
1196  const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp);
1197  const bool bounds_are_unchanged = InitializeBoundsAndTestIfUnchanged(lp);
1198 
1199  // If parameters_.allow_simplex_algorithm_change() is true and we already have
1200  // a primal (resp. dual) feasible solution, then we use the primal (resp.
1201  // dual) algorithm since there is a good chance that it will be faster.
1202  if (matrix_is_unchanged && parameters_.allow_simplex_algorithm_change()) {
1203  if (objective_is_unchanged && !bounds_are_unchanged) {
1204  parameters_.set_use_dual_simplex(true);
1205  PropagateParameters();
1206  }
1207  if (bounds_are_unchanged && !objective_is_unchanged) {
1208  parameters_.set_use_dual_simplex(false);
1209  PropagateParameters();
1210  }
1211  }
1212 
1213  InitializeObjectiveLimit(lp);
1214 
1215  // Computes the variable name as soon as possible for logging.
1216  // TODO(user): do we really need to store them? we could just compute them
1217  // on the fly since we do not need the speed.
1218  if (VLOG_IS_ON(1)) {
1219  SetVariableNames();
1220  }
1221 
1222  // Warm-start? This is supported only if the solution_state_ is non empty,
1223  // i.e., this revised simplex i) was already used to solve a problem, or
1224  // ii) the solution state was provided externally. Note that the
1225  // solution_state_ may have nothing to do with the current problem, e.g.,
1226  // objective, matrix, and/or bounds had changed. So we support several
1227  // scenarios of warm-start depending on how did the problem change and which
1228  // simplex algorithm is used (primal or dual).
1229  bool solve_from_scratch = true;
1230 
1231  // Try to perform a "quick" warm-start with no matrix factorization involved.
1232  if (!solution_state_.IsEmpty() && !solution_state_has_been_set_externally_) {
1233  if (!parameters_.use_dual_simplex()) {
1234  // With primal simplex, always clear dual norms and dual pricing.
1235  // Incrementality is supported only if only change to the matrix and
1236  // bounds is adding new columns (objective may change), and that all
1237  // new columns have a bound equal to zero.
1238  dual_edge_norms_.Clear();
1239  dual_pricing_vector_.clear();
1240  if (matrix_is_unchanged && bounds_are_unchanged) {
1241  // TODO(user): Do not do that if objective_is_unchanged. Currently
1242  // this seems to break something. Investigate.
1243  reduced_costs_.ClearAndRemoveCostShifts();
1244  solve_from_scratch = false;
1245  } else if (only_change_is_new_cols && only_new_bounds) {
1246  InitializeVariableStatusesForWarmStart(solution_state_, num_new_cols);
1247  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1248  for (ColIndex& col_ref : basis_) {
1249  if (col_ref >= first_new_col) {
1250  col_ref += num_new_cols;
1251  }
1252  }
1253 
1254  // Make sure the primal edge norm are recomputed from scratch.
1255  // TODO(user): only the norms of the new columns actually need to be
1256  // computed.
1257  primal_edge_norms_.Clear();
1258  reduced_costs_.ClearAndRemoveCostShifts();
1259  solve_from_scratch = false;
1260  }
1261  } else {
1262  // With dual simplex, always clear primal norms. Incrementality is
1263  // supported only if the objective remains the same (the matrix may
1264  // contain new rows and the bounds may change).
1265  primal_edge_norms_.Clear();
1266  if (objective_is_unchanged) {
1267  if (matrix_is_unchanged) {
1268  if (!bounds_are_unchanged) {
1269  InitializeVariableStatusesForWarmStart(solution_state_,
1270  ColIndex(0));
1271  variable_values_.RecomputeBasicVariableValues();
1272  }
1273  solve_from_scratch = false;
1274  } else if (only_change_is_new_rows) {
1275  // For the dual-simplex, we also perform a warm start if a couple of
1276  // new rows where added.
1277  InitializeVariableStatusesForWarmStart(solution_state_, ColIndex(0));
1278  dual_edge_norms_.ResizeOnNewRows(num_rows_);
1279 
1280  // TODO(user): The reduced costs do not really need to be recomputed.
1281  // We just need to initialize the ones of the new slack variables to
1282  // 0.
1283  reduced_costs_.ClearAndRemoveCostShifts();
1284  dual_pricing_vector_.clear();
1285 
1286  // Note that this needs to be done after the Clear() calls above.
1287  if (InitializeFirstBasis(basis_).ok()) {
1288  solve_from_scratch = false;
1289  }
1290  }
1291  }
1292  }
1293  }
1294 
1295  // If we couldn't perform a "quick" warm start above, we can at least try to
1296  // reuse the variable statuses.
1297  const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
1298  if (solve_from_scratch && !solution_state_.IsEmpty()) {
1299  // If an external basis has been provided or if the matrix changed, we need
1300  // to perform more work, e.g., factorize the proposed basis and validate it.
1301  InitializeVariableStatusesForWarmStart(solution_state_, ColIndex(0));
1302  basis_.assign(num_rows_, kInvalidCol);
1303  RowIndex row(0);
1304  for (ColIndex col : variables_info_.GetIsBasicBitRow()) {
1305  basis_[row] = col;
1306  ++row;
1307  }
1308 
1309  basis_factorization_.Clear();
1310  reduced_costs_.ClearAndRemoveCostShifts();
1311  primal_edge_norms_.Clear();
1312  dual_edge_norms_.Clear();
1313  dual_pricing_vector_.clear();
1314 
1315  // TODO(user): If the basis is incomplete, we could complete it with
1316  // better slack variables than is done by InitializeFirstBasis() by
1317  // using a partial LU decomposition (see markowitz.h).
1318  if (InitializeFirstBasis(basis_).ok()) {
1319  solve_from_scratch = false;
1320  } else {
1321  if (log_info) {
1322  LOG(INFO) << "RevisedSimplex is not using the warm start "
1323  "basis because it is not factorizable.";
1324  }
1325  }
1326  }
1327 
1328  if (solve_from_scratch) {
1329  if (log_info) LOG(INFO) << "Solve from scratch.";
1330  basis_factorization_.Clear();
1331  reduced_costs_.ClearAndRemoveCostShifts();
1332  primal_edge_norms_.Clear();
1333  dual_edge_norms_.Clear();
1334  dual_pricing_vector_.clear();
1335  GLOP_RETURN_IF_ERROR(CreateInitialBasis());
1336  } else {
1337  if (log_info) LOG(INFO) << "Incremental solve.";
1338  }
1339  DCHECK(BasisIsConsistent());
1340  return Status::OK();
1341 }
1342 
1343 void RevisedSimplex::DisplayBasicVariableStatistics() {
1344  SCOPED_TIME_STAT(&function_stats_);
1345 
1346  int num_fixed_variables = 0;
1347  int num_free_variables = 0;
1348  int num_variables_at_bound = 0;
1349  int num_slack_variables = 0;
1350  int num_infeasible_variables = 0;
1351 
1352  const DenseRow& variable_values = variable_values_.GetDenseRow();
1353  const VariableTypeRow& variable_types = variables_info_.GetTypeRow();
1354  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1355  for (RowIndex row(0); row < num_rows_; ++row) {
1356  const ColIndex col = basis_[row];
1357  const Fractional value = variable_values[col];
1358  if (variable_types[col] == VariableType::UNCONSTRAINED) {
1359  ++num_free_variables;
1360  }
1361  if (value > upper_bound_[col] + tolerance ||
1362  value < lower_bound_[col] - tolerance) {
1363  ++num_infeasible_variables;
1364  }
1365  if (col >= first_slack_col_) {
1366  ++num_slack_variables;
1367  }
1368  if (lower_bound_[col] == upper_bound_[col]) {
1369  ++num_fixed_variables;
1370  } else if (variable_values[col] == lower_bound_[col] ||
1371  variable_values[col] == upper_bound_[col]) {
1372  ++num_variables_at_bound;
1373  }
1374  }
1375 
1376  VLOG(1) << "Basis size: " << num_rows_;
1377  VLOG(1) << "Number of basic infeasible variables: "
1378  << num_infeasible_variables;
1379  VLOG(1) << "Number of basic slack variables: " << num_slack_variables;
1380  VLOG(1) << "Number of basic variables at bound: " << num_variables_at_bound;
1381  VLOG(1) << "Number of basic fixed variables: " << num_fixed_variables;
1382  VLOG(1) << "Number of basic free variables: " << num_free_variables;
1383 }
1384 
1385 void RevisedSimplex::SaveState() {
1386  DCHECK_EQ(num_cols_, variables_info_.GetStatusRow().size());
1387  solution_state_.statuses = variables_info_.GetStatusRow();
1388  solution_state_has_been_set_externally_ = false;
1389 }
1390 
1391 RowIndex RevisedSimplex::ComputeNumberOfEmptyRows() {
1392  DenseBooleanColumn contains_data(num_rows_, false);
1393  for (ColIndex col(0); col < num_cols_; ++col) {
1394  for (const SparseColumn::Entry e : compact_matrix_.column(col)) {
1395  contains_data[e.row()] = true;
1396  }
1397  }
1398  RowIndex num_empty_rows(0);
1399  for (RowIndex row(0); row < num_rows_; ++row) {
1400  if (!contains_data[row]) {
1401  ++num_empty_rows;
1402  VLOG(1) << "Row " << row << " is empty.";
1403  }
1404  }
1405  return num_empty_rows;
1406 }
1407 
1408 ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
1409  ColIndex num_empty_cols(0);
1410  for (ColIndex col(0); col < num_cols_; ++col) {
1411  if (compact_matrix_.column(col).IsEmpty()) {
1412  ++num_empty_cols;
1413  VLOG(1) << "Column " << col << " is empty.";
1414  }
1415  }
1416  return num_empty_cols;
1417 }
1418 
1419 void RevisedSimplex::CorrectErrorsOnVariableValues() {
1420  SCOPED_TIME_STAT(&function_stats_);
1421  DCHECK(basis_factorization_.IsRefactorized());
1422 
1423  // TODO(user): The primal residual error does not change if we take degenerate
1424  // steps or if we do not change the variable values. No need to recompute it
1425  // in this case.
1426  const Fractional primal_residual =
1427  variable_values_.ComputeMaximumPrimalResidual();
1428 
1429  // If the primal_residual is within the tolerance, no need to recompute
1430  // the basic variable values with a better precision.
1431  if (primal_residual >= parameters_.harris_tolerance_ratio() *
1432  parameters_.primal_feasibility_tolerance()) {
1433  variable_values_.RecomputeBasicVariableValues();
1434  VLOG(1) << "Primal infeasibility (bounds error) = "
1435  << variable_values_.ComputeMaximumPrimalInfeasibility()
1436  << ", Primal residual |A.x - b| = "
1437  << variable_values_.ComputeMaximumPrimalResidual();
1438  }
1439 
1440  // If we are doing too many degenerate iterations, we try to perturb the
1441  // problem by extending each basic variable bound with a random value. See how
1442  // bound_perturbation_ is used in ComputeHarrisRatioAndLeavingCandidates().
1443  //
1444  // Note that the perturbation is currently only reset to zero at the end of
1445  // the algorithm.
1446  //
1447  // TODO(user): This is currently disabled because the improvement is unclear.
1448  if (/* DISABLES CODE */ false &&
1449  (!feasibility_phase_ && num_consecutive_degenerate_iterations_ >= 100)) {
1450  VLOG(1) << "Perturbing the problem.";
1451  const Fractional tolerance = parameters_.harris_tolerance_ratio() *
1452  parameters_.primal_feasibility_tolerance();
1453  std::uniform_real_distribution<double> dist(0, tolerance);
1454  for (ColIndex col(0); col < num_cols_; ++col) {
1455  bound_perturbation_[col] += dist(random_);
1456  }
1457  }
1458 }
1459 
1460 void RevisedSimplex::ComputeVariableValuesError() {
1461  SCOPED_TIME_STAT(&function_stats_);
1462  error_.AssignToZero(num_rows_);
1463  const DenseRow& variable_values = variable_values_.GetDenseRow();
1464  for (ColIndex col(0); col < num_cols_; ++col) {
1465  const Fractional value = variable_values[col];
1466  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_);
1467  }
1468 }
1469 
1470 void RevisedSimplex::ComputeDirection(ColIndex col) {
1471  SCOPED_TIME_STAT(&function_stats_);
1473  basis_factorization_.RightSolveForProblemColumn(col, &direction_);
1474  direction_infinity_norm_ = 0.0;
1475  if (direction_.non_zeros.empty()) {
1476  // We still compute the direction non-zeros because our code relies on it.
1477  for (RowIndex row(0); row < num_rows_; ++row) {
1478  const Fractional value = direction_[row];
1479  if (value != 0.0) {
1480  direction_.non_zeros.push_back(row);
1481  direction_infinity_norm_ =
1482  std::max(direction_infinity_norm_, std::abs(value));
1483  }
1484  }
1485  } else {
1486  for (const auto e : direction_) {
1487  direction_infinity_norm_ =
1488  std::max(direction_infinity_norm_, std::abs(e.coefficient()));
1489  }
1490  }
1491  IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
1492  num_rows_ == 0 ? 0.0
1493  : static_cast<double>(direction_.non_zeros.size()) /
1494  static_cast<double>(num_rows_.value())));
1495 }
1496 
1497 Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
1498  SCOPED_TIME_STAT(&function_stats_);
1499  compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
1500  for (const auto e : direction_) {
1501  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
1502  &error_);
1503  }
1504  return InfinityNorm(error_);
1505 }
1506 
1507 template <bool is_entering_reduced_cost_positive>
1508 Fractional RevisedSimplex::GetRatio(RowIndex row) const {
1509  const ColIndex col = basis_[row];
1510  const Fractional direction = direction_[row];
1511  const Fractional value = variable_values_.Get(col);
1512  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1513  DCHECK_NE(direction, 0.0);
1514  if (is_entering_reduced_cost_positive) {
1515  if (direction > 0.0) {
1516  return (upper_bound_[col] - value) / direction;
1517  } else {
1518  return (lower_bound_[col] - value) / direction;
1519  }
1520  } else {
1521  if (direction > 0.0) {
1522  return (value - lower_bound_[col]) / direction;
1523  } else {
1524  return (value - upper_bound_[col]) / direction;
1525  }
1526  }
1527 }
1528 
1529 template <bool is_entering_reduced_cost_positive>
1530 Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
1531  Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const {
1532  SCOPED_TIME_STAT(&function_stats_);
1533  const Fractional harris_tolerance =
1534  parameters_.harris_tolerance_ratio() *
1535  parameters_.primal_feasibility_tolerance();
1536  const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
1537  parameters_.primal_feasibility_tolerance();
1538 
1539  // Initially, we can skip any variable with a ratio greater than
1540  // bound_flip_ratio since it seems to be always better to choose the
1541  // bound-flip over such leaving variable.
1542  Fractional harris_ratio = bound_flip_ratio;
1543  leaving_candidates->Clear();
1544 
1545  // If the basis is refactorized, then we should have everything with a good
1546  // precision, so we only consider "acceptable" pivots. Otherwise we consider
1547  // all the entries, and if the algorithm return a pivot that is too small, we
1548  // will refactorize and recompute the relevant quantities.
1549  const Fractional threshold = basis_factorization_.IsRefactorized()
1550  ? parameters_.minimum_acceptable_pivot()
1551  : parameters_.ratio_test_zero_threshold();
1552 
1553  for (const auto e : direction_) {
1554  const Fractional magnitude = std::abs(e.coefficient());
1555  if (magnitude <= threshold) continue;
1556  Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(e.row());
1557  // TODO(user): The perturbation is currently disabled, so no need to test
1558  // anything here.
1559  if (false && ratio < 0.0) {
1560  // If the variable is already pass its bound, we use the perturbed version
1561  // of the bound (if bound_perturbation_[basis_[row]] is not zero).
1562  ratio += std::abs(bound_perturbation_[basis_[e.row()]] / e.coefficient());
1563  }
1564  if (ratio <= harris_ratio) {
1565  leaving_candidates->SetCoefficient(e.row(), ratio);
1566 
1567  // The second max() makes sure harris_ratio is lower bounded by a small
1568  // positive value. The more classical approach is to bound it by 0.0 but
1569  // since we will always perform a small positive step, we allow any
1570  // variable to go a bit more out of bound (even if it is past the harris
1571  // tolerance). This increase the number of candidates and allows us to
1572  // choose a more numerically stable pivot.
1573  //
1574  // Note that at least lower bounding it by 0.0 is really important on
1575  // numerically difficult problems because its helps in the choice of a
1576  // stable pivot.
1577  harris_ratio = std::min(harris_ratio,
1578  std::max(minimum_delta / magnitude,
1579  ratio + harris_tolerance / magnitude));
1580  }
1581  }
1582  return harris_ratio;
1583 }
1584 
1585 namespace {
1586 
1587 // Returns true if the candidate ratio is supposed to be more stable than the
1588 // current ratio (or if the two are equal).
1589 // The idea here is to take, by order of preference:
1590 // - the minimum positive ratio in order to intoduce a primal infeasibility
1591 // which is as small as possible.
1592 // - or the least negative one in order to have the smallest bound shift
1593 // possible on the leaving variable.
1594 bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) {
1595  if (current >= 0.0) {
1596  return candidate >= 0.0 && candidate <= current;
1597  } else {
1598  return candidate >= current;
1599  }
1600 }
1601 
1602 } // namespace
1603 
1604 // Ratio-test or Quotient-test. Choose the row of the leaving variable.
1605 // Known as CHUZR or CHUZRO in FORTRAN codes.
1606 Status RevisedSimplex::ChooseLeavingVariableRow(
1607  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1608  RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) {
1609  SCOPED_TIME_STAT(&function_stats_);
1610  GLOP_RETURN_ERROR_IF_NULL(refactorize);
1611  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1612  GLOP_RETURN_ERROR_IF_NULL(step_length);
1613  DCHECK_COL_BOUNDS(entering_col);
1614  DCHECK_NE(0.0, reduced_cost);
1615 
1616  // A few cases will cause the test to be recomputed from the beginning.
1617  int stats_num_leaving_choices = 0;
1618  equivalent_leaving_choices_.clear();
1619  while (true) {
1620  stats_num_leaving_choices = 0;
1621 
1622  // We initialize current_ratio with the maximum step the entering variable
1623  // can take (bound-flip). Note that we do not use tolerance here.
1624  const Fractional entering_value = variable_values_.Get(entering_col);
1625  Fractional current_ratio =
1626  (reduced_cost > 0.0) ? entering_value - lower_bound_[entering_col]
1627  : upper_bound_[entering_col] - entering_value;
1628  DCHECK_GT(current_ratio, 0.0);
1629 
1630  // First pass of the Harris ratio test. If 'harris_tolerance' is zero, this
1631  // actually computes the minimum leaving ratio of all the variables. This is
1632  // the same as the 'classic' ratio test.
1633  const Fractional harris_ratio =
1634  (reduced_cost > 0.0) ? ComputeHarrisRatioAndLeavingCandidates<true>(
1635  current_ratio, &leaving_candidates_)
1636  : ComputeHarrisRatioAndLeavingCandidates<false>(
1637  current_ratio, &leaving_candidates_);
1638 
1639  // If the bound-flip is a viable solution (i.e. it doesn't move the basic
1640  // variable too much out of bounds), we take it as it is always stable and
1641  // fast.
1642  if (current_ratio <= harris_ratio) {
1643  *leaving_row = kInvalidRow;
1644  *step_length = current_ratio;
1645  break;
1646  }
1647 
1648  // Second pass of the Harris ratio test. Amongst the variables with 'ratio
1649  // <= harris_ratio', we choose the leaving row with the largest coefficient.
1650  //
1651  // This has a big impact, because picking a leaving variable with a small
1652  // direction_[row] is the main source of Abnormal LU errors.
1653  Fractional pivot_magnitude = 0.0;
1654  stats_num_leaving_choices = 0;
1655  *leaving_row = kInvalidRow;
1656  equivalent_leaving_choices_.clear();
1657  for (const SparseColumn::Entry e : leaving_candidates_) {
1658  const Fractional ratio = e.coefficient();
1659  if (ratio > harris_ratio) continue;
1660  ++stats_num_leaving_choices;
1661  const RowIndex row = e.row();
1662 
1663  // If the magnitudes are the same, we choose the leaving variable with
1664  // what is probably the more stable ratio, see
1665  // IsRatioMoreOrEquallyStable().
1666  const Fractional candidate_magnitude = std::abs(direction_[row]);
1667  if (candidate_magnitude < pivot_magnitude) continue;
1668  if (candidate_magnitude == pivot_magnitude) {
1669  if (!IsRatioMoreOrEquallyStable(ratio, current_ratio)) continue;
1670  if (ratio == current_ratio) {
1671  DCHECK_NE(kInvalidRow, *leaving_row);
1672  equivalent_leaving_choices_.push_back(row);
1673  continue;
1674  }
1675  }
1676  equivalent_leaving_choices_.clear();
1677  current_ratio = ratio;
1678  pivot_magnitude = candidate_magnitude;
1679  *leaving_row = row;
1680  }
1681 
1682  // Break the ties randomly.
1683  if (!equivalent_leaving_choices_.empty()) {
1684  equivalent_leaving_choices_.push_back(*leaving_row);
1685  *leaving_row =
1686  equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1687  0, equivalent_leaving_choices_.size() - 1)(random_)];
1688  }
1689 
1690  // Since we took care of the bound-flip at the beginning, at this point
1691  // we have a valid leaving row.
1692  DCHECK_NE(kInvalidRow, *leaving_row);
1693 
1694  // A variable already outside one of its bounds +/- tolerance is considered
1695  // at its bound and its ratio is zero. Not doing this may lead to a step
1696  // that moves the objective in the wrong direction. We may want to allow
1697  // such steps, but then we will need to check that it doesn't break the
1698  // bounds of the other variables.
1699  if (current_ratio <= 0.0) {
1700  // Instead of doing a zero step, we do a small positive step. This
1701  // helps on degenerate problems.
1702  const Fractional minimum_delta =
1703  parameters_.degenerate_ministep_factor() *
1704  parameters_.primal_feasibility_tolerance();
1705  *step_length = minimum_delta / pivot_magnitude;
1706  } else {
1707  *step_length = current_ratio;
1708  }
1709 
1710  // Note(user): Testing the pivot at each iteration is useful for debugging
1711  // an LU factorization problem. Remove the false if you need to investigate
1712  // this, it makes sure that this will be compiled away.
1713  if (/* DISABLES CODE */ (false)) {
1714  TestPivot(entering_col, *leaving_row);
1715  }
1716 
1717  // We try various "heuristics" to avoid a small pivot.
1718  //
1719  // The smaller 'direction_[*leaving_row]', the less precise
1720  // it is. So we want to avoid pivoting by such a row. Small pivots lead to
1721  // ill-conditioned bases or even to matrices that are not a basis at all if
1722  // the actual (infinite-precision) coefficient is zero.
1723  //
1724  // TODO(user): We may have to choose another entering column if
1725  // we cannot prevent pivoting by a small pivot.
1726  // (Chvatal, p.115, about epsilon2.)
1727  if (pivot_magnitude <
1728  parameters_.small_pivot_threshold() * direction_infinity_norm_) {
1729  // The first countermeasure is to recompute everything to the best
1730  // precision we can in the hope of avoiding such a choice. Note that this
1731  // helps a lot on the Netlib problems.
1732  if (!basis_factorization_.IsRefactorized()) {
1733  VLOG(1) << "Refactorizing to avoid pivoting by "
1734  << direction_[*leaving_row]
1735  << " direction_infinity_norm_ = " << direction_infinity_norm_
1736  << " reduced cost = " << reduced_cost;
1737  *refactorize = true;
1738  return Status::OK();
1739  }
1740 
1741  // Because of the "threshold" in ComputeHarrisRatioAndLeavingCandidates()
1742  // we kwnow that this pivot will still have an acceptable magnitude.
1743  //
1744  // TODO(user): An issue left to fix is that if there is no such pivot at
1745  // all, then we will report unbounded even if this is not really the case.
1746  // As of 2018/07/18, this happens on l30.mps.
1747  VLOG(1) << "Couldn't avoid pivoting by " << direction_[*leaving_row]
1748  << " direction_infinity_norm_ = " << direction_infinity_norm_
1749  << " reduced cost = " << reduced_cost;
1750  DCHECK_GE(std::abs(direction_[*leaving_row]),
1751  parameters_.minimum_acceptable_pivot());
1752  IF_STATS_ENABLED(ratio_test_stats_.abs_tested_pivot.Add(pivot_magnitude));
1753  }
1754  break;
1755  }
1756 
1757  // Update the target bound.
1758  if (*leaving_row != kInvalidRow) {
1759  const bool is_reduced_cost_positive = (reduced_cost > 0.0);
1760  const bool is_leaving_coeff_positive = (direction_[*leaving_row] > 0.0);
1761  *target_bound = (is_reduced_cost_positive == is_leaving_coeff_positive)
1762  ? upper_bound_[basis_[*leaving_row]]
1763  : lower_bound_[basis_[*leaving_row]];
1764  }
1765 
1766  // Stats.
1768  ratio_test_stats_.leaving_choices.Add(stats_num_leaving_choices);
1769  if (!equivalent_leaving_choices_.empty()) {
1770  ratio_test_stats_.num_perfect_ties.Add(
1771  equivalent_leaving_choices_.size());
1772  }
1773  if (*leaving_row != kInvalidRow) {
1774  ratio_test_stats_.abs_used_pivot.Add(std::abs(direction_[*leaving_row]));
1775  }
1776  });
1777  return Status::OK();
1778 }
1779 
1780 namespace {
1781 
1782 // Store a row with its ratio, coefficient magnitude and target bound. This is
1783 // used by PrimalPhaseIChooseLeavingVariableRow(), see this function for more
1784 // details.
1785 struct BreakPoint {
1786  BreakPoint(RowIndex _row, Fractional _ratio, Fractional _coeff_magnitude,
1787  Fractional _target_bound)
1788  : row(_row),
1789  ratio(_ratio),
1790  coeff_magnitude(_coeff_magnitude),
1791  target_bound(_target_bound) {}
1792 
1793  // We want to process the breakpoints by increasing ratio and decreasing
1794  // coefficient magnitude (if the ratios are the same). Returns false if "this"
1795  // is before "other" in a priority queue.
1796  bool operator<(const BreakPoint& other) const {
1797  if (ratio == other.ratio) {
1798  if (coeff_magnitude == other.coeff_magnitude) {
1799  return row > other.row;
1800  }
1801  return coeff_magnitude < other.coeff_magnitude;
1802  }
1803  return ratio > other.ratio;
1804  }
1805 
1806  RowIndex row;
1810 };
1811 
1812 } // namespace
1813 
1814 void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
1815  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1816  RowIndex* leaving_row, Fractional* step_length,
1817  Fractional* target_bound) const {
1818  SCOPED_TIME_STAT(&function_stats_);
1819  RETURN_IF_NULL(refactorize);
1820  RETURN_IF_NULL(leaving_row);
1821  RETURN_IF_NULL(step_length);
1822  DCHECK_COL_BOUNDS(entering_col);
1823  DCHECK_NE(0.0, reduced_cost);
1824 
1825  // We initialize current_ratio with the maximum step the entering variable
1826  // can take (bound-flip). Note that we do not use tolerance here.
1827  const Fractional entering_value = variable_values_.Get(entering_col);
1828  Fractional current_ratio = (reduced_cost > 0.0)
1829  ? entering_value - lower_bound_[entering_col]
1830  : upper_bound_[entering_col] - entering_value;
1831  DCHECK_GT(current_ratio, 0.0);
1832 
1833  std::vector<BreakPoint> breakpoints;
1834  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1835  for (const auto e : direction_) {
1836  const Fractional direction =
1837  reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
1838  const Fractional magnitude = std::abs(direction);
1839  if (magnitude < tolerance) continue;
1840 
1841  // Computes by how much we can add 'direction' to the basic variable value
1842  // with index 'row' until it changes of primal feasibility status. That is
1843  // from infeasible to feasible or from feasible to infeasible. Note that the
1844  // transition infeasible->feasible->infeasible is possible. We use
1845  // tolerances here, but when the step will be performed, it will move the
1846  // variable to the target bound (possibly taking a small negative step).
1847  //
1848  // Note(user): The negative step will only happen when the leaving variable
1849  // was slightly infeasible (less than tolerance). Moreover, the overall
1850  // infeasibility will not necessarily increase since it doesn't take into
1851  // account all the variables with an infeasibility smaller than the
1852  // tolerance, and here we will at least improve the one of the leaving
1853  // variable.
1854  const ColIndex col = basis_[e.row()];
1855  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1856 
1857  const Fractional value = variable_values_.Get(col);
1858  const Fractional lower_bound = lower_bound_[col];
1859  const Fractional upper_bound = upper_bound_[col];
1860  const Fractional to_lower = (lower_bound - tolerance - value) / direction;
1861  const Fractional to_upper = (upper_bound + tolerance - value) / direction;
1862 
1863  // Enqueue the possible transitions. Note that the second tests exclude the
1864  // case where to_lower or to_upper are infinite.
1865  if (to_lower >= 0.0 && to_lower < current_ratio) {
1866  breakpoints.push_back(
1867  BreakPoint(e.row(), to_lower, magnitude, lower_bound));
1868  }
1869  if (to_upper >= 0.0 && to_upper < current_ratio) {
1870  breakpoints.push_back(
1871  BreakPoint(e.row(), to_upper, magnitude, upper_bound));
1872  }
1873  }
1874 
1875  // Order the breakpoints by increasing ratio and decreasing coefficient
1876  // magnitude (if the ratios are the same).
1877  std::make_heap(breakpoints.begin(), breakpoints.end());
1878 
1879  // Select the last breakpoint that still improves the infeasibility and has
1880  // the largest coefficient magnitude.
1881  Fractional improvement = std::abs(reduced_cost);
1882  Fractional best_magnitude = 0.0;
1883  *leaving_row = kInvalidRow;
1884  while (!breakpoints.empty()) {
1885  const BreakPoint top = breakpoints.front();
1886  // TODO(user): consider using >= here. That will lead to bigger ratio and
1887  // hence a better impact on the infeasibility. The drawback is that more
1888  // effort may be needed to update the reduced costs.
1889  //
1890  // TODO(user): Use a random tie breaking strategy for BreakPoint with
1891  // same ratio and same coefficient magnitude? Koberstein explains in his PhD
1892  // that it helped on the dual-simplex.
1893  if (top.coeff_magnitude > best_magnitude) {
1894  *leaving_row = top.row;
1895  current_ratio = top.ratio;
1896  best_magnitude = top.coeff_magnitude;
1897  *target_bound = top.target_bound;
1898  }
1899 
1900  // As long as the sum of primal infeasibilities is decreasing, we look for
1901  // pivots that are numerically more stable.
1902  improvement -= top.coeff_magnitude;
1903  if (improvement <= 0.0) break;
1904  std::pop_heap(breakpoints.begin(), breakpoints.end());
1905  breakpoints.pop_back();
1906  }
1907 
1908  // Try to avoid a small pivot by refactorizing.
1909  if (*leaving_row != kInvalidRow) {
1910  const Fractional threshold =
1911  parameters_.small_pivot_threshold() * direction_infinity_norm_;
1912  if (best_magnitude < threshold && !basis_factorization_.IsRefactorized()) {
1913  *refactorize = true;
1914  return;
1915  }
1916  }
1917  *step_length = current_ratio;
1918 }
1919 
1920 // This implements the pricing step for the dual simplex.
1921 Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row,
1922  Fractional* cost_variation,
1924  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1925  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
1926 
1927  // TODO(user): Reuse parameters_.optimization_rule() to decide if we use
1928  // steepest edge or the normal Dantzig pricing.
1929  const DenseColumn& squared_norm = dual_edge_norms_.GetEdgeSquaredNorms();
1930  SCOPED_TIME_STAT(&function_stats_);
1931 
1932  *leaving_row = kInvalidRow;
1933  Fractional best_price(0.0);
1934  const DenseColumn& squared_infeasibilities =
1935  variable_values_.GetPrimalSquaredInfeasibilities();
1936  equivalent_leaving_choices_.clear();
1937  for (const RowIndex row : variable_values_.GetPrimalInfeasiblePositions()) {
1938  const Fractional scaled_best_price = best_price * squared_norm[row];
1939  if (squared_infeasibilities[row] >= scaled_best_price) {
1940  if (squared_infeasibilities[row] == scaled_best_price) {
1941  DCHECK_NE(*leaving_row, kInvalidRow);
1942  equivalent_leaving_choices_.push_back(row);
1943  continue;
1944  }
1945  equivalent_leaving_choices_.clear();
1946  best_price = squared_infeasibilities[row] / squared_norm[row];
1947  *leaving_row = row;
1948  }
1949  }
1950 
1951  // Break the ties randomly.
1952  if (!equivalent_leaving_choices_.empty()) {
1953  equivalent_leaving_choices_.push_back(*leaving_row);
1954  *leaving_row =
1955  equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1956  0, equivalent_leaving_choices_.size() - 1)(random_)];
1957  }
1958 
1959  // Return right away if there is no leaving variable.
1960  // Fill cost_variation and target_bound otherwise.
1961  if (*leaving_row == kInvalidRow) return Status::OK();
1962  const ColIndex leaving_col = basis_[*leaving_row];
1963  const Fractional value = variable_values_.Get(leaving_col);
1964  if (value < lower_bound_[leaving_col]) {
1965  *cost_variation = lower_bound_[leaving_col] - value;
1966  *target_bound = lower_bound_[leaving_col];
1967  DCHECK_GT(*cost_variation, 0.0);
1968  } else {
1969  *cost_variation = upper_bound_[leaving_col] - value;
1970  *target_bound = upper_bound_[leaving_col];
1971  DCHECK_LT(*cost_variation, 0.0);
1972  }
1973  return Status::OK();
1974 }
1975 
1976 namespace {
1977 
1978 // Returns true if a basic variable with given cost and type is to be considered
1979 // as a leaving candidate for the dual phase I. This utility function is used
1980 // to keep is_dual_entering_candidate_ up to date.
1981 bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type,
1982  Fractional threshold) {
1983  if (cost == 0.0) return false;
1984  return type == VariableType::UPPER_AND_LOWER_BOUNDED ||
1985  type == VariableType::FIXED_VARIABLE ||
1986  (type == VariableType::UPPER_BOUNDED && cost < -threshold) ||
1987  (type == VariableType::LOWER_BOUNDED && cost > threshold);
1988 }
1989 
1990 } // namespace
1991 
1992 void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
1993  ColIndex entering_col) {
1994  SCOPED_TIME_STAT(&function_stats_);
1995  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
1996  const Fractional threshold = parameters_.ratio_test_zero_threshold();
1997 
1998  // Convert the dual_pricing_vector_ from the old basis into the new one (which
1999  // is the same as multiplying it by an Eta matrix corresponding to the
2000  // direction).
2001  const Fractional step =
2002  dual_pricing_vector_[leaving_row] / direction_[leaving_row];
2003  for (const auto e : direction_) {
2004  dual_pricing_vector_[e.row()] -= e.coefficient() * step;
2005  is_dual_entering_candidate_.Set(
2006  e.row(), IsDualPhaseILeavingCandidate(dual_pricing_vector_[e.row()],
2007  variable_type[basis_[e.row()]],
2008  threshold));
2009  }
2010  dual_pricing_vector_[leaving_row] = step;
2011 
2012  // The entering_col which was dual-infeasible is now dual-feasible, so we
2013  // have to remove it from the infeasibility sum.
2014  dual_pricing_vector_[leaving_row] -=
2015  dual_infeasibility_improvement_direction_[entering_col];
2016  if (dual_infeasibility_improvement_direction_[entering_col] != 0.0) {
2017  --num_dual_infeasible_positions_;
2018  }
2019  dual_infeasibility_improvement_direction_[entering_col] = 0.0;
2020 
2021  // The leaving variable will also be dual-feasible.
2022  dual_infeasibility_improvement_direction_[basis_[leaving_row]] = 0.0;
2023 
2024  // Update the leaving row entering candidate status.
2025  is_dual_entering_candidate_.Set(
2026  leaving_row,
2027  IsDualPhaseILeavingCandidate(dual_pricing_vector_[leaving_row],
2028  variable_type[entering_col], threshold));
2029 }
2030 
2031 template <typename Cols>
2032 void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
2033  const Cols& cols) {
2034  SCOPED_TIME_STAT(&function_stats_);
2035  bool something_to_do = false;
2036  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
2037  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
2038  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2039  const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance();
2040  for (ColIndex col : cols) {
2041  const Fractional reduced_cost = reduced_costs[col];
2042  const Fractional sign =
2043  (can_increase.IsSet(col) && reduced_cost < -tolerance) ? 1.0
2044  : (can_decrease.IsSet(col) && reduced_cost > tolerance) ? -1.0
2045  : 0.0;
2046  if (sign != dual_infeasibility_improvement_direction_[col]) {
2047  if (sign == 0.0) {
2048  --num_dual_infeasible_positions_;
2049  } else if (dual_infeasibility_improvement_direction_[col] == 0.0) {
2050  ++num_dual_infeasible_positions_;
2051  }
2052  if (!something_to_do) {
2053  initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
2054  initially_all_zero_scratchpad_.ClearSparseMask();
2055  initially_all_zero_scratchpad_.non_zeros.clear();
2056  something_to_do = true;
2057  }
2059  col, sign - dual_infeasibility_improvement_direction_[col],
2060  &initially_all_zero_scratchpad_);
2061  dual_infeasibility_improvement_direction_[col] = sign;
2062  }
2063  }
2064  if (something_to_do) {
2065  initially_all_zero_scratchpad_.ClearNonZerosIfTooDense();
2066  initially_all_zero_scratchpad_.ClearSparseMask();
2067 
2068  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2069  const Fractional threshold = parameters_.ratio_test_zero_threshold();
2070  basis_factorization_.RightSolve(&initially_all_zero_scratchpad_);
2071  if (initially_all_zero_scratchpad_.non_zeros.empty()) {
2072  for (RowIndex row(0); row < num_rows_; ++row) {
2073  if (initially_all_zero_scratchpad_[row] == 0.0) continue;
2074  dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
2075  is_dual_entering_candidate_.Set(
2076  row, IsDualPhaseILeavingCandidate(dual_pricing_vector_[row],
2077  variable_type[basis_[row]],
2078  threshold));
2079  }
2080  initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
2081  } else {
2082  for (const auto e : initially_all_zero_scratchpad_) {
2083  dual_pricing_vector_[e.row()] += e.coefficient();
2084  initially_all_zero_scratchpad_[e.row()] = 0.0;
2085  is_dual_entering_candidate_.Set(
2086  e.row(), IsDualPhaseILeavingCandidate(
2087  dual_pricing_vector_[e.row()],
2088  variable_type[basis_[e.row()]], threshold));
2089  }
2090  }
2091  initially_all_zero_scratchpad_.non_zeros.clear();
2092  }
2093 }
2094 
2095 Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow(
2096  RowIndex* leaving_row, Fractional* cost_variation,
2098  SCOPED_TIME_STAT(&function_stats_);
2099  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2100  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2101 
2102  // dual_infeasibility_improvement_direction_ is zero for dual-feasible
2103  // positions and contains the sign in which the reduced cost of this column
2104  // needs to move to improve the feasibility otherwise (+1 or -1).
2105  //
2106  // Its current value was the one used to compute dual_pricing_vector_ and
2107  // was updated accordingly by DualPhaseIUpdatePrice().
2108  //
2109  // If more variables changed of dual-feasibility status during the last
2110  // iteration, we need to call DualPhaseIUpdatePriceOnReducedCostChange() to
2111  // take them into account.
2112  if (reduced_costs_.AreReducedCostsRecomputed() ||
2113  dual_pricing_vector_.empty()) {
2114  // Recompute everything from scratch.
2115  num_dual_infeasible_positions_ = 0;
2116  dual_pricing_vector_.AssignToZero(num_rows_);
2117  is_dual_entering_candidate_.ClearAndResize(num_rows_);
2118  dual_infeasibility_improvement_direction_.AssignToZero(num_cols_);
2119  DualPhaseIUpdatePriceOnReducedCostChange(
2120  variables_info_.GetIsRelevantBitRow());
2121  } else {
2122  // Update row is still equal to the row used during the last iteration
2123  // to update the reduced costs.
2124  DualPhaseIUpdatePriceOnReducedCostChange(update_row_.GetNonZeroPositions());
2125  }
2126 
2127  // If there is no dual-infeasible position, we are done.
2128  *leaving_row = kInvalidRow;
2129  if (num_dual_infeasible_positions_ == 0) return Status::OK();
2130 
2131  // TODO(user): Reuse parameters_.optimization_rule() to decide if we use
2132  // steepest edge or the normal Dantzig pricing.
2133  const DenseColumn& squared_norm = dual_edge_norms_.GetEdgeSquaredNorms();
2134 
2135  // Now take a leaving variable that maximizes the infeasibility variation and
2136  // can leave the basis while being dual-feasible.
2137  Fractional best_price(0.0);
2138  equivalent_leaving_choices_.clear();
2139  for (const RowIndex row : is_dual_entering_candidate_) {
2140  const Fractional squared_cost = Square(dual_pricing_vector_[row]);
2141  const Fractional scaled_best_price = best_price * squared_norm[row];
2142  if (squared_cost >= scaled_best_price) {
2143  if (squared_cost == scaled_best_price) {
2144  DCHECK_NE(*leaving_row, kInvalidRow);
2145  equivalent_leaving_choices_.push_back(row);
2146  continue;
2147  }
2148  equivalent_leaving_choices_.clear();
2149  best_price = squared_cost / squared_norm[row];
2150  *leaving_row = row;
2151  }
2152  }
2153 
2154  // Break the ties randomly.
2155  if (!equivalent_leaving_choices_.empty()) {
2156  equivalent_leaving_choices_.push_back(*leaving_row);
2157  *leaving_row =
2158  equivalent_leaving_choices_[std::uniform_int_distribution<int>(
2159  0, equivalent_leaving_choices_.size() - 1)(random_)];
2160  }
2161 
2162  // Returns right away if there is no leaving variable or fill the other
2163  // return values otherwise.
2164  if (*leaving_row == kInvalidRow) return Status::OK();
2165  *cost_variation = dual_pricing_vector_[*leaving_row];
2166  const ColIndex leaving_col = basis_[*leaving_row];
2167  if (*cost_variation < 0.0) {
2168  *target_bound = upper_bound_[leaving_col];
2169  } else {
2170  *target_bound = lower_bound_[leaving_col];
2171  }
2173  return Status::OK();
2174 }
2175 
2176 template <typename BoxedVariableCols>
2177 void RevisedSimplex::MakeBoxedVariableDualFeasible(
2178  const BoxedVariableCols& cols, bool update_basic_values) {
2179  SCOPED_TIME_STAT(&function_stats_);
2180  std::vector<ColIndex> changed_cols;
2181 
2182  // It is important to flip bounds within a tolerance because of precision
2183  // errors. Otherwise, this leads to cycling on many of the Netlib problems
2184  // since this is called at each iteration (because of the bound-flipping ratio
2185  // test).
2186  const DenseRow& variable_values = variable_values_.GetDenseRow();
2187  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2188  const Fractional dual_feasibility_tolerance =
2189  reduced_costs_.GetDualFeasibilityTolerance();
2190  const VariableStatusRow& variable_status = variables_info_.GetStatusRow();
2191  for (const ColIndex col : cols) {
2192  const Fractional reduced_cost = reduced_costs[col];
2193  const VariableStatus status = variable_status[col];
2194  DCHECK(variables_info_.GetTypeRow()[col] ==
2196  // TODO(user): refactor this as DCHECK(IsVariableBasicOrExactlyAtBound())?
2197  DCHECK(variable_values[col] == lower_bound_[col] ||
2198  variable_values[col] == upper_bound_[col] ||
2199  status == VariableStatus::BASIC);
2200  if (reduced_cost > dual_feasibility_tolerance &&
2201  status == VariableStatus::AT_UPPER_BOUND) {
2202  variables_info_.Update(col, VariableStatus::AT_LOWER_BOUND);
2203  changed_cols.push_back(col);
2204  } else if (reduced_cost < -dual_feasibility_tolerance &&
2205  status == VariableStatus::AT_LOWER_BOUND) {
2206  variables_info_.Update(col, VariableStatus::AT_UPPER_BOUND);
2207  changed_cols.push_back(col);
2208  }
2209  }
2210 
2211  if (!changed_cols.empty()) {
2212  variable_values_.UpdateGivenNonBasicVariables(changed_cols,
2213  update_basic_values);
2214  }
2215 }
2216 
2217 Fractional RevisedSimplex::ComputeStepToMoveBasicVariableToBound(
2218  RowIndex leaving_row, Fractional target_bound) {
2219  SCOPED_TIME_STAT(&function_stats_);
2220 
2221  // We just want the leaving variable to go to its target_bound.
2222  const ColIndex leaving_col = basis_[leaving_row];
2223  const Fractional leaving_variable_value = variable_values_.Get(leaving_col);
2224  Fractional unscaled_step = leaving_variable_value - target_bound;
2225 
2226  // In Chvatal p 157 update_[entering_col] is used instead of
2227  // direction_[leaving_row], but the two quantities are actually the
2228  // same. This is because update_[col] is the value at leaving_row of
2229  // the right inverse of col and direction_ is the right inverse of the
2230  // entering_col. Note that direction_[leaving_row] is probably more
2231  // precise.
2232  // TODO(user): use this to check precision and trigger recomputation.
2233  return unscaled_step / direction_[leaving_row];
2234 }
2235 
2236 bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
2237  VLOG(1) << "Test pivot.";
2238  SCOPED_TIME_STAT(&function_stats_);
2239  const ColIndex leaving_col = basis_[leaving_row];
2240  basis_[leaving_row] = entering_col;
2241 
2242  // TODO(user): If 'is_ok' is true, we could use the computed lu in
2243  // basis_factorization_ rather than recompute it during UpdateAndPivot().
2244  CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
2245  const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
2246  basis_[leaving_row] = leaving_col;
2247  return is_ok;
2248 }
2249 
2250 // Note that this function is an optimization and that if it was doing nothing
2251 // the algorithm will still be correct and work. Using it does change the pivot
2252 // taken during the simplex method though.
2253 void RevisedSimplex::PermuteBasis() {
2254  SCOPED_TIME_STAT(&function_stats_);
2255 
2256  // Fetch the current basis column permutation and return if it is empty which
2257  // means the permutation is the identity.
2258  const ColumnPermutation& col_perm =
2259  basis_factorization_.GetColumnPermutation();
2260  if (col_perm.empty()) return;
2261 
2262  // Permute basis_.
2263  ApplyColumnPermutationToRowIndexedVector(col_perm, &basis_);
2264 
2265  // Permute dual_pricing_vector_ if needed.
2266  if (!dual_pricing_vector_.empty()) {
2267  // TODO(user): We need to permute is_dual_entering_candidate_ too. Right
2268  // now, we recompute both the dual_pricing_vector_ and
2269  // is_dual_entering_candidate_ on each refactorization, so this don't
2270  // matter.
2271  ApplyColumnPermutationToRowIndexedVector(col_perm, &dual_pricing_vector_);
2272  }
2273 
2274  // Notify the other classes.
2275  reduced_costs_.UpdateDataOnBasisPermutation();
2276  dual_edge_norms_.UpdateDataOnBasisPermutation(col_perm);
2277 
2278  // Finally, remove the column permutation from all subsequent solves since
2279  // it has been taken into account in basis_.
2280  basis_factorization_.SetColumnPermutationToIdentity();
2281 }
2282 
2283 Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
2284  RowIndex leaving_row,
2286  SCOPED_TIME_STAT(&function_stats_);
2287  const ColIndex leaving_col = basis_[leaving_row];
2288  const VariableStatus leaving_variable_status =
2289  lower_bound_[leaving_col] == upper_bound_[leaving_col]
2291  : target_bound == lower_bound_[leaving_col]
2294  if (variable_values_.Get(leaving_col) != target_bound) {
2295  ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) -
2296  target_bound);
2297  }
2298  UpdateBasis(entering_col, leaving_row, leaving_variable_status);
2299 
2300  const Fractional pivot_from_direction = direction_[leaving_row];
2301  const Fractional pivot_from_update_row =
2302  update_row_.GetCoefficient(entering_col);
2303  const Fractional diff =
2304  std::abs(pivot_from_update_row - pivot_from_direction);
2305  if (diff > parameters_.refactorization_threshold() *
2306  (1 + std::abs(pivot_from_direction))) {
2307  VLOG(1) << "Refactorizing: imprecise pivot " << pivot_from_direction
2308  << " diff = " << diff;
2309  GLOP_RETURN_IF_ERROR(basis_factorization_.ForceRefactorization());
2310  } else {
2312  basis_factorization_.Update(entering_col, leaving_row, direction_));
2313  }
2314  if (basis_factorization_.IsRefactorized()) {
2315  PermuteBasis();
2316  }
2317  return Status::OK();
2318 }
2319 
2320 bool RevisedSimplex::NeedsBasisRefactorization(bool refactorize) {
2321  if (basis_factorization_.IsRefactorized()) return false;
2322  if (reduced_costs_.NeedsBasisRefactorization()) return true;
2323  const GlopParameters::PricingRule pricing_rule =
2324  feasibility_phase_ ? parameters_.feasibility_rule()
2325  : parameters_.optimization_rule();
2326  if (parameters_.use_dual_simplex()) {
2327  // TODO(user): Currently the dual is always using STEEPEST_EDGE.
2328  DCHECK_EQ(pricing_rule, GlopParameters::STEEPEST_EDGE);
2329  if (dual_edge_norms_.NeedsBasisRefactorization()) return true;
2330  } else {
2331  if (pricing_rule == GlopParameters::STEEPEST_EDGE &&
2332  primal_edge_norms_.NeedsBasisRefactorization()) {
2333  return true;
2334  }
2335  }
2336  return refactorize;
2337 }
2338 
2339 Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
2340  SCOPED_TIME_STAT(&function_stats_);
2341  if (NeedsBasisRefactorization(*refactorize)) {
2342  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
2343  update_row_.Invalidate();
2344  PermuteBasis();
2345  }
2346  *refactorize = false;
2347  return Status::OK();
2348 }
2349 
2351  if (col >= integrality_scale_.size()) {
2352  integrality_scale_.resize(col + 1, 0.0);
2353  }
2354  integrality_scale_[col] = scale;
2355 }
2356 
2357 Status RevisedSimplex::Polish(TimeLimit* time_limit) {
2359  Cleanup update_deterministic_time_on_return(
2360  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2361 
2362  // Get all non-basic variables with a reduced costs close to zero.
2363  // Note that because we only choose entering candidate with a cost of zero,
2364  // this set will not change (modulo epsilons).
2365  const DenseRow& rc = reduced_costs_.GetReducedCosts();
2366  std::vector<ColIndex> candidates;
2367  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
2368  if (!variables_info_.GetIsRelevantBitRow()[col]) continue;
2369  if (std::abs(rc[col]) < 1e-9) candidates.push_back(col);
2370  }
2371 
2372  bool refactorize = false;
2373  int num_pivots = 0;
2374  Fractional total_gain = 0.0;
2375  for (int i = 0; i < 10; ++i) {
2376  AdvanceDeterministicTime(time_limit);
2377  if (time_limit->LimitReached()) break;
2378  if (num_pivots >= 5) break;
2379  if (candidates.empty()) break;
2380 
2381  // Pick a random one and remove it from the list.
2382  const int index =
2383  std::uniform_int_distribution<int>(0, candidates.size() - 1)(random_);
2384  const ColIndex entering_col = candidates[index];
2385  std::swap(candidates[index], candidates.back());
2386  candidates.pop_back();
2387 
2388  // We need the entering variable to move in the correct direction.
2389  Fractional fake_rc = 1.0;
2390  if (!variables_info_.GetCanDecreaseBitRow()[entering_col]) {
2391  CHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
2392  fake_rc = -1.0;
2393  }
2394 
2395  // Compute the direction and by how much we can move along it.
2396  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2397  ComputeDirection(entering_col);
2398  Fractional step_length;
2399  RowIndex leaving_row;
2401  bool local_refactorize = false;
2403  ChooseLeavingVariableRow(entering_col, fake_rc, &local_refactorize,
2404  &leaving_row, &step_length, &target_bound));
2405 
2406  if (local_refactorize) continue;
2407  if (step_length == kInfinity || step_length == -kInfinity) continue;
2408  if (std::abs(step_length) <= 1e-6) continue;
2409  if (leaving_row != kInvalidRow && std::abs(direction_[leaving_row]) < 0.1) {
2410  continue;
2411  }
2412  const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
2413 
2414  // Evaluate if pivot reduce the fractionality of the basis.
2415  //
2416  // TODO(user): Count with more weight variable with a small domain, i.e.
2417  // binary variable, compared to a variable in [0, 1k] ?
2418  const auto get_diff = [this](ColIndex col, Fractional old_value,
2419  Fractional new_value) {
2420  if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) {
2421  return 0.0;
2422  }
2423  const Fractional s = integrality_scale_[col];
2424  return (std::abs(new_value * s - std::round(new_value * s)) -
2425  std::abs(old_value * s - std::round(old_value * s)));
2426  };
2427  Fractional diff = get_diff(entering_col, variable_values_.Get(entering_col),
2428  variable_values_.Get(entering_col) + step);
2429  for (const auto e : direction_) {
2430  const ColIndex col = basis_[e.row()];
2431  const Fractional old_value = variable_values_.Get(col);
2432  const Fractional new_value = old_value - e.coefficient() * step;
2433  diff += get_diff(col, old_value, new_value);
2434  }
2435 
2436  // Ignore low decrease in integrality.
2437  if (diff > -1e-2) continue;
2438  total_gain -= diff;
2439 
2440  // We perform the change.
2441  num_pivots++;
2442  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2443 
2444  // This is a bound flip of the entering column.
2445  if (leaving_row == kInvalidRow) {
2446  if (step > 0.0) {
2447  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2449  } else if (step < 0.0) {
2450  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2452  }
2453  reduced_costs_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
2454  continue;
2455  }
2456 
2457  // Perform the pivot.
2458  const ColIndex leaving_col = basis_[leaving_row];
2459  update_row_.ComputeUpdateRow(leaving_row);
2460  primal_edge_norms_.UpdateBeforeBasisPivot(
2461  entering_col, leaving_col, leaving_row, direction_, &update_row_);
2462  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2463  &update_row_);
2464 
2465  const Fractional dir = -direction_[leaving_row] * step;
2466  const bool is_degenerate =
2467  (dir == 0.0) ||
2468  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2469  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2470  if (!is_degenerate) {
2471  variable_values_.Set(leaving_col, target_bound);
2472  }
2474  UpdateAndPivot(entering_col, leaving_row, target_bound));
2475  }
2476 
2477  VLOG(1) << "Polish num_pivots: " << num_pivots << " gain:" << total_gain;
2478  return Status::OK();
2479 }
2480 
2481 // Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
2482 // x an n-vector.
2483 //
2484 // x is split in two parts x_B and x_N (B standing for basis).
2485 // In the same way, A is split in A_B (also known as B) and A_N, and
2486 // c is split into c_B and c_N.
2487 //
2488 // The goal is to minimize c_B.x_B + c_N.x_N
2489 // subject to B.x_B + A_N.x_N = 0
2490 // and x_lower <= x <= x_upper.
2491 //
2492 // To minimize c.x, at each iteration a variable from x_N is selected to
2493 // enter the basis, and a variable from x_B is selected to leave the basis.
2494 // To avoid explicit inversion of B, the algorithm solves two sub-systems:
2495 // y.B = c_B and B.d = a (a being the entering column).
2496 Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
2498  Cleanup update_deterministic_time_on_return(
2499  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2500  num_consecutive_degenerate_iterations_ = 0;
2501  DisplayIterationInfo();
2502  bool refactorize = false;
2503 
2504  if (feasibility_phase_) {
2505  // Initialize the primal phase-I objective.
2506  // Note that this temporarily erases the problem objective.
2507  objective_.AssignToZero(num_cols_);
2508  variable_values_.UpdatePrimalPhaseICosts(
2509  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_), &objective_);
2510  reduced_costs_.ResetForNewObjective();
2511  }
2512 
2513  while (true) {
2514  // TODO(user): we may loop a bit more than the actual number of iteration.
2515  // fix.
2517  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2518  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2519  if (basis_factorization_.IsRefactorized()) {
2520  CorrectErrorsOnVariableValues();
2521  DisplayIterationInfo();
2522 
2523  if (feasibility_phase_) {
2524  // Since the variable values may have been recomputed, we need to
2525  // recompute the primal infeasible variables and update their costs.
2526  if (variable_values_.UpdatePrimalPhaseICosts(
2527  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_),
2528  &objective_)) {
2529  reduced_costs_.ResetForNewObjective();
2530  }
2531  }
2532 
2533  // Computing the objective at each iteration takes time, so we just
2534  // check the limit when the basis is refactorized.
2535  if (!feasibility_phase_ &&
2536  ComputeObjectiveValue() < primal_objective_limit_) {
2537  VLOG(1) << "Stopping the primal simplex because"
2538  << " the objective limit " << primal_objective_limit_
2539  << " has been reached.";
2540  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2541  objective_limit_reached_ = true;
2542  return Status::OK();
2543  }
2544  } else if (feasibility_phase_) {
2545  // Note that direction_.non_zeros contains the positions of the basic
2546  // variables whose values were updated during the last iteration.
2547  if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
2548  &objective_)) {
2549  reduced_costs_.ResetForNewObjective();
2550  }
2551  }
2552 
2553  Fractional reduced_cost = 0.0;
2554  ColIndex entering_col = kInvalidCol;
2556  entering_variable_.PrimalChooseEnteringColumn(&entering_col));
2557  if (entering_col == kInvalidCol) {
2558  if (reduced_costs_.AreReducedCostsPrecise() &&
2559  basis_factorization_.IsRefactorized()) {
2560  if (feasibility_phase_) {
2561  const Fractional primal_infeasibility =
2562  variable_values_.ComputeMaximumPrimalInfeasibility();
2563  if (primal_infeasibility <
2564  parameters_.primal_feasibility_tolerance()) {
2565  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2566  } else {
2567  VLOG(1) << "Infeasible problem! infeasibility = "
2568  << primal_infeasibility;
2569  problem_status_ = ProblemStatus::PRIMAL_INFEASIBLE;
2570  }
2571  } else {
2572  problem_status_ = ProblemStatus::OPTIMAL;
2573  }
2574  break;
2575  } else {
2576  VLOG(1) << "Optimal reached, double checking...";
2577  reduced_costs_.MakeReducedCostsPrecise();
2578  refactorize = true;
2579  continue;
2580  }
2581  } else {
2582  reduced_cost = reduced_costs_.GetReducedCosts()[entering_col];
2583  DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
2584 
2585  // Solve the system B.d = a with a the entering column.
2586  ComputeDirection(entering_col);
2587  primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col,
2588  direction_);
2589  if (!reduced_costs_.TestEnteringReducedCostPrecision(
2590  entering_col, direction_, &reduced_cost)) {
2591  VLOG(1) << "Skipping col #" << entering_col << " whose reduced cost is "
2592  << reduced_cost;
2593  continue;
2594  }
2595  }
2596 
2597  // This test takes place after the check for optimality/feasibility because
2598  // when running with 0 iterations, we still want to report
2599  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2600  // case at the beginning of the algorithm.
2601  AdvanceDeterministicTime(time_limit);
2602  if (num_iterations_ == parameters_.max_number_of_iterations() ||
2603  time_limit->LimitReached()) {
2604  break;
2605  }
2606 
2607  Fractional step_length;
2608  RowIndex leaving_row;
2610  if (feasibility_phase_) {
2611  PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
2612  &refactorize, &leaving_row,
2613  &step_length, &target_bound);
2614  } else {
2616  ChooseLeavingVariableRow(entering_col, reduced_cost, &refactorize,
2617  &leaving_row, &step_length, &target_bound));
2618  }
2619  if (refactorize) continue;
2620 
2621  if (step_length == kInfinity || step_length == -kInfinity) {
2622  if (!basis_factorization_.IsRefactorized() ||
2623  !reduced_costs_.AreReducedCostsPrecise()) {
2624  VLOG(1) << "Infinite step length, double checking...";
2625  reduced_costs_.MakeReducedCostsPrecise();
2626  continue;
2627  }
2628  if (feasibility_phase_) {
2629  // This shouldn't happen by construction.
2630  VLOG(1) << "Unbounded feasibility problem !?";
2631  problem_status_ = ProblemStatus::ABNORMAL;
2632  } else {
2633  VLOG(1) << "Unbounded problem.";
2634  problem_status_ = ProblemStatus::PRIMAL_UNBOUNDED;
2635  solution_primal_ray_.AssignToZero(num_cols_);
2636  for (RowIndex row(0); row < num_rows_; ++row) {
2637  const ColIndex col = basis_[row];
2638  solution_primal_ray_[col] = -direction_[row];
2639  }
2640  solution_primal_ray_[entering_col] = 1.0;
2641  if (step_length == -kInfinity) {
2642  ChangeSign(&solution_primal_ray_);
2643  }
2644  }
2645  break;
2646  }
2647 
2648  Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
2649  if (feasibility_phase_ && leaving_row != kInvalidRow) {
2650  // For phase-I we currently always set the leaving variable to its exact
2651  // bound even if by doing so we may take a small step in the wrong
2652  // direction and may increase the overall infeasibility.
2653  //
2654  // TODO(user): Investigate alternatives even if this seems to work well in
2655  // practice. Note that the final returned solution will have the property
2656  // that all non-basic variables are at their exact bound, so it is nice
2657  // that we do not report ProblemStatus::PRIMAL_FEASIBLE if a solution with
2658  // this property cannot be found.
2659  step = ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2660  }
2661 
2662  // Store the leaving_col before basis_ change.
2663  const ColIndex leaving_col =
2664  (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
2665 
2666  // An iteration is called 'degenerate' if the leaving variable is already
2667  // primal-infeasible and we make it even more infeasible or if we do a zero
2668  // step.
2669  bool is_degenerate = false;
2670  if (leaving_row != kInvalidRow) {
2671  Fractional dir = -direction_[leaving_row] * step;
2672  is_degenerate =
2673  (dir == 0.0) ||
2674  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2675  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2676 
2677  // If the iteration is not degenerate, the leaving variable should go to
2678  // its exact target bound (it is how the step is computed).
2679  if (!is_degenerate) {
2680  DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
2681  target_bound));
2682  }
2683  }
2684 
2685  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2686  if (leaving_row != kInvalidRow) {
2687  primal_edge_norms_.UpdateBeforeBasisPivot(
2688  entering_col, basis_[leaving_row], leaving_row, direction_,
2689  &update_row_);
2690  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
2691  direction_, &update_row_);
2692  if (!is_degenerate) {
2693  // On a non-degenerate iteration, the leaving variable should be at its
2694  // exact bound. This corrects an eventual small numerical error since
2695  // 'value + direction * step' where step is
2696  // '(target_bound - value) / direction'
2697  // may be slighlty different from target_bound.
2698  variable_values_.Set(leaving_col, target_bound);
2699  }
2701  UpdateAndPivot(entering_col, leaving_row, target_bound));
2703  if (is_degenerate) {
2704  timer.AlsoUpdate(&iteration_stats_.degenerate);
2705  } else {
2706  timer.AlsoUpdate(&iteration_stats_.normal);
2707  }
2708  });
2709  } else {
2710  // Bound flip. This makes sure that the flipping variable is at its bound
2711  // and has the correct status.
2713  variables_info_.GetTypeRow()[entering_col]);
2714  if (step > 0.0) {
2715  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2717  } else if (step < 0.0) {
2718  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2720  }
2721  reduced_costs_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
2722  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
2723  }
2724 
2725  if (feasibility_phase_ && leaving_row != kInvalidRow) {
2726  // Set the leaving variable to its exact bound.
2727  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
2728  reduced_costs_.SetNonBasicVariableCostToZero(leaving_col,
2729  &objective_[leaving_col]);
2730  }
2731 
2732  // Stats about consecutive degenerate iterations.
2733  if (step_length == 0.0) {
2734  num_consecutive_degenerate_iterations_++;
2735  } else {
2736  if (num_consecutive_degenerate_iterations_ > 0) {
2737  iteration_stats_.degenerate_run_size.Add(
2738  num_consecutive_degenerate_iterations_);
2739  num_consecutive_degenerate_iterations_ = 0;
2740  }
2741  }
2742  ++num_iterations_;
2743  }
2744  if (num_consecutive_degenerate_iterations_ > 0) {
2745  iteration_stats_.degenerate_run_size.Add(
2746  num_consecutive_degenerate_iterations_);
2747  }
2748  return Status::OK();
2749 }
2750 
2751 // TODO(user): Two other approaches for the phase I described in Koberstein's
2752 // PhD thesis seem worth trying at some point:
2753 // - The subproblem approach, which enables one to use a normal phase II dual,
2754 // but requires an efficient bound-flipping ratio test since the new problem
2755 // has all its variables boxed.
2756 // - Pan's method, which is really fast but have no theoretical guarantee of
2757 // terminating and thus needs to use one of the other methods as a fallback if
2758 // it fails to make progress.
2759 //
2760 // Note that the returned status applies to the primal problem!
2761 Status RevisedSimplex::DualMinimize(TimeLimit* time_limit) {
2762  Cleanup update_deterministic_time_on_return(
2763  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2764  num_consecutive_degenerate_iterations_ = 0;
2765  bool refactorize = false;
2766 
2767  bound_flip_candidates_.clear();
2768  pair_to_ignore_.clear();
2769 
2770  // Leaving variable.
2771  RowIndex leaving_row;
2772  Fractional cost_variation;
2774 
2775  // Entering variable.
2776  ColIndex entering_col;
2777  Fractional ratio;
2778 
2779  while (true) {
2780  // TODO(user): we may loop a bit more than the actual number of iteration.
2781  // fix.
2783  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2784 
2785  const bool old_refactorize_value = refactorize;
2786  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2787 
2788  // If the basis is refactorized, we recompute all the values in order to
2789  // have a good precision.
2790  if (basis_factorization_.IsRefactorized()) {
2791  // We do not want to recompute the reduced costs too often, this is
2792  // because that may break the overall direction taken by the last steps
2793  // and may lead to less improvement on degenerate problems.
2794  //
2795  // During phase-I, we do want the reduced costs to be as precise as
2796  // possible. TODO(user): Investigate why and fix the TODO in
2797  // PermuteBasis().
2798  //
2799  // Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
2800  // do recompute them, it is better to do that first.
2801  if (!feasibility_phase_ && !reduced_costs_.AreReducedCostsRecomputed() &&
2802  !old_refactorize_value) {
2803  const Fractional dual_residual_error =
2804  reduced_costs_.ComputeMaximumDualResidual();
2805  if (dual_residual_error >
2806  reduced_costs_.GetDualFeasibilityTolerance()) {
2807  VLOG(1) << "Recomputing reduced costs. Dual residual = "
2808  << dual_residual_error;
2809  reduced_costs_.MakeReducedCostsPrecise();
2810  }
2811  } else {
2812  reduced_costs_.MakeReducedCostsPrecise();
2813  }
2814 
2815  // TODO(user): Make RecomputeBasicVariableValues() do nothing
2816  // if it was already recomputed on a refactorized basis. This is the
2817  // same behavior as MakeReducedCostsPrecise().
2818  //
2819  // TODO(user): Do not recompute the variable values each time we
2820  // refactorize the matrix, like for the reduced costs? That may lead to
2821  // a worse behavior than keeping the "imprecise" version and only
2822  // recomputing it when its precision is above a threshold.
2823  if (!feasibility_phase_) {
2824  MakeBoxedVariableDualFeasible(
2825  variables_info_.GetNonBasicBoxedVariables(),
2826  /*update_basic_values=*/false);
2827  variable_values_.RecomputeBasicVariableValues();
2828  variable_values_.ResetPrimalInfeasibilityInformation();
2829 
2830  // Computing the objective at each iteration takes time, so we just
2831  // check the limit when the basis is refactorized.
2832  if (ComputeObjectiveValue() > dual_objective_limit_) {
2833  VLOG(1) << "Stopping the dual simplex because"
2834  << " the objective limit " << dual_objective_limit_
2835  << " has been reached.";
2836  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
2837  objective_limit_reached_ = true;
2838  return Status::OK();
2839  }
2840  }
2841 
2842  reduced_costs_.GetReducedCosts();
2843  DisplayIterationInfo();
2844  } else {
2845  // Updates from the previous iteration that can be skipped if we
2846  // recompute everything (see other case above).
2847  if (!feasibility_phase_) {
2848  // Make sure the boxed variables are dual-feasible before choosing the
2849  // leaving variable row.
2850  MakeBoxedVariableDualFeasible(bound_flip_candidates_,
2851  /*update_basic_values=*/true);
2852  bound_flip_candidates_.clear();
2853 
2854  // The direction_.non_zeros contains the positions for which the basic
2855  // variable value was changed during the previous iterations.
2856  variable_values_.UpdatePrimalInfeasibilityInformation(
2857  direction_.non_zeros);
2858  }
2859  }
2860 
2861  if (feasibility_phase_) {
2862  GLOP_RETURN_IF_ERROR(DualPhaseIChooseLeavingVariableRow(
2863  &leaving_row, &cost_variation, &target_bound));
2864  } else {
2865  GLOP_RETURN_IF_ERROR(DualChooseLeavingVariableRow(
2866  &leaving_row, &cost_variation, &target_bound));
2867  }
2868  if (leaving_row == kInvalidRow) {
2869  if (!basis_factorization_.IsRefactorized()) {
2870  VLOG(1) << "Optimal reached, double checking.";
2871  refactorize = true;
2872  continue;
2873  }
2874  if (feasibility_phase_) {
2875  // Note that since the basis is refactorized, the variable values
2876  // will be recomputed at the beginning of the second phase. The boxed
2877  // variable values will also be corrected by
2878  // MakeBoxedVariableDualFeasible().
2879  if (num_dual_infeasible_positions_ == 0) {
2880  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
2881  } else {
2882  problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
2883  }
2884  } else {
2885  problem_status_ = ProblemStatus::OPTIMAL;
2886  }
2887  return Status::OK();
2888  }
2889 
2890  update_row_.ComputeUpdateRow(leaving_row);
2891  for (std::pair<RowIndex, ColIndex> pair : pair_to_ignore_) {
2892  if (pair.first == leaving_row) {
2893  update_row_.IgnoreUpdatePosition(pair.second);
2894  }
2895  }
2896  if (feasibility_phase_) {
2898  update_row_, cost_variation, &entering_col, &ratio));
2899  } else {
2901  update_row_, cost_variation, &bound_flip_candidates_, &entering_col,
2902  &ratio));
2903  }
2904 
2905  // No entering_col: Unbounded problem / Infeasible problem.
2906  if (entering_col == kInvalidCol) {
2907  if (!reduced_costs_.AreReducedCostsPrecise()) {
2908  VLOG(1) << "No entering column. Double checking...";
2909  refactorize = true;
2910  continue;
2911  }
2912  DCHECK(basis_factorization_.IsRefactorized());
2913  if (feasibility_phase_) {
2914  // This shouldn't happen by construction.
2915  VLOG(1) << "Unbounded dual feasibility problem !?";
2916  problem_status_ = ProblemStatus::ABNORMAL;
2917  } else {
2918  problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
2919  solution_dual_ray_ =
2920  Transpose(update_row_.GetUnitRowLeftInverse().values);
2921  update_row_.RecomputeFullUpdateRow(leaving_row);
2922  solution_dual_ray_row_combination_.AssignToZero(num_cols_);
2923  for (const ColIndex col : update_row_.GetNonZeroPositions()) {
2924  solution_dual_ray_row_combination_[col] =
2925  update_row_.GetCoefficient(col);
2926  }
2927  if (cost_variation < 0) {
2928  ChangeSign(&solution_dual_ray_);
2929  ChangeSign(&solution_dual_ray_row_combination_);
2930  }
2931  }
2932  return Status::OK();
2933  }
2934 
2935  // If the coefficient is too small, we recompute the reduced costs.
2936  const Fractional entering_coeff = update_row_.GetCoefficient(entering_col);
2937  if (std::abs(entering_coeff) < parameters_.dual_small_pivot_threshold() &&
2938  !reduced_costs_.AreReducedCostsPrecise()) {
2939  VLOG(1) << "Trying not to pivot by " << entering_coeff;
2940  refactorize = true;
2941  continue;
2942  }
2943 
2944  // If the reduced cost is already precise, we check with the direction_.
2945  // This is at least needed to avoid corner cases where
2946  // direction_[leaving_row] is actually 0 which causes a floating
2947  // point exception below.
2948  ComputeDirection(entering_col);
2949  if (std::abs(direction_[leaving_row]) <
2950  parameters_.minimum_acceptable_pivot()) {
2951  VLOG(1) << "Do not pivot by " << entering_coeff
2952  << " because the direction is " << direction_[leaving_row];
2953  refactorize = true;
2954  pair_to_ignore_.push_back({leaving_row, entering_col});
2955  continue;
2956  }
2957  pair_to_ignore_.clear();
2958 
2959  // This test takes place after the check for optimality/feasibility because
2960  // when running with 0 iterations, we still want to report
2961  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2962  // case at the beginning of the algorithm.
2963  AdvanceDeterministicTime(time_limit);
2964  if (num_iterations_ == parameters_.max_number_of_iterations() ||
2965  time_limit->LimitReached()) {
2966  return Status::OK();
2967  }
2968 
2970  if (ratio == 0.0) {
2971  timer.AlsoUpdate(&iteration_stats_.degenerate);
2972  } else {
2973  timer.AlsoUpdate(&iteration_stats_.normal);
2974  }
2975  });
2976 
2977  // Update basis. Note that direction_ is already computed.
2978  //
2979  // TODO(user): this is pretty much the same in the primal or dual code.
2980  // We just need to know to what bound the leaving variable will be set to.
2981  // Factorize more common code?
2982  //
2983  // During phase I, we do not need the basic variable values at all.
2984  Fractional primal_step = 0.0;
2985  if (feasibility_phase_) {
2986  DualPhaseIUpdatePrice(leaving_row, entering_col);
2987  } else {
2988  primal_step =
2989  ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2990  variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
2991  }
2992 
2993  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2994  &update_row_);
2995  dual_edge_norms_.UpdateBeforeBasisPivot(
2996  entering_col, leaving_row, direction_,
2997  update_row_.GetUnitRowLeftInverse());
2998 
2999  // It is important to do the actual pivot after the update above!
3000  const ColIndex leaving_col = basis_[leaving_row];
3002  UpdateAndPivot(entering_col, leaving_row, target_bound));
3003 
3004  // This makes sure the leaving variable is at its exact bound. Tests
3005  // indicate that this makes everything more stable. Note also that during
3006  // the feasibility phase, the variable values are not used, but that the
3007  // correct non-basic variable value are needed at the end.
3008  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3009 
3010  // This is slow, but otherwise we have a really bad precision on the
3011  // variable values ...
3012  if (std::abs(primal_step) * parameters_.primal_feasibility_tolerance() >
3013  1.0) {
3014  refactorize = true;
3015  }
3016  ++num_iterations_;
3017  }
3018  return Status::OK();
3019 }
3020 
3021 ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
3022  // TODO(user): Remove this function.
3024  return first_slack_col_ + RowToColIndex(row);
3025 }
3026 
3028  std::string result;
3029  result.append(iteration_stats_.StatString());
3030  result.append(ratio_test_stats_.StatString());
3031  result.append(entering_variable_.StatString());
3032  result.append(reduced_costs_.StatString());
3033  result.append(variable_values_.StatString());
3034  result.append(primal_edge_norms_.StatString());
3035  result.append(dual_edge_norms_.StatString());
3036  result.append(update_row_.StatString());
3037  result.append(basis_factorization_.StatString());
3038  result.append(function_stats_.StatString());
3039  return result;
3040 }
3041 
3042 void RevisedSimplex::DisplayAllStats() {
3043  if (absl::GetFlag(FLAGS_simplex_display_stats)) {
3044  absl::FPrintF(stderr, "%s", StatString());
3045  absl::FPrintF(stderr, "%s", GetPrettySolverStats());
3046  }
3047 }
3048 
3049 Fractional RevisedSimplex::ComputeObjectiveValue() const {
3050  SCOPED_TIME_STAT(&function_stats_);
3051  return PreciseScalarProduct(objective_,
3052  Transpose(variable_values_.GetDenseRow()));
3053 }
3054 
3055 Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
3056  SCOPED_TIME_STAT(&function_stats_);
3057  const Fractional sum = PreciseScalarProduct(
3058  objective_, Transpose(variable_values_.GetDenseRow()));
3059  return objective_scaling_factor_ * (sum + objective_offset_);
3060 }
3061 
3062 void RevisedSimplex::SetParameters(const GlopParameters& parameters) {
3063  SCOPED_TIME_STAT(&function_stats_);
3064  random_.seed(parameters.random_seed());
3065  initial_parameters_ = parameters;
3066  parameters_ = parameters;
3067  PropagateParameters();
3068 }
3069 
3070 void RevisedSimplex::PropagateParameters() {
3071  SCOPED_TIME_STAT(&function_stats_);
3072  basis_factorization_.SetParameters(parameters_);
3073  entering_variable_.SetParameters(parameters_);
3074  reduced_costs_.SetParameters(parameters_);
3075  dual_edge_norms_.SetParameters(parameters_);
3076  primal_edge_norms_.SetParameters(parameters_);
3077  update_row_.SetParameters(parameters_);
3078 }
3079 
3080 void RevisedSimplex::DisplayIterationInfo() const {
3081  if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
3082  const int iter = feasibility_phase_
3083  ? num_iterations_
3084  : num_iterations_ - num_feasibility_iterations_;
3085  // Note that in the dual phase II, ComputeObjectiveValue() is also computing
3086  // the dual objective even if it uses the variable values. This is because
3087  // if we modify the bounds to make the problem primal-feasible, we are at
3088  // the optimal and hence the two objectives are the same.
3089  const Fractional objective =
3090  !feasibility_phase_
3091  ? ComputeInitialProblemObjectiveValue()
3092  : (parameters_.use_dual_simplex()
3093  ? reduced_costs_.ComputeSumOfDualInfeasibilities()
3094  : variable_values_.ComputeSumOfPrimalInfeasibilities());
3095  LOG(INFO) << (feasibility_phase_ ? "Feasibility" : "Optimization")
3096  << " phase, iteration # " << iter
3097  << ", objective = " << absl::StrFormat("%.15E", objective);
3098  }
3099 }
3100 
3101 void RevisedSimplex::DisplayErrors() const {
3102  if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
3103  LOG(INFO) << "Primal infeasibility (bounds) = "
3104  << variable_values_.ComputeMaximumPrimalInfeasibility();
3105  LOG(INFO) << "Primal residual |A.x - b| = "
3106  << variable_values_.ComputeMaximumPrimalResidual();
3107  LOG(INFO) << "Dual infeasibility (reduced costs) = "
3108  << reduced_costs_.ComputeMaximumDualInfeasibility();
3109  LOG(INFO) << "Dual residual |c_B - y.B| = "
3110  << reduced_costs_.ComputeMaximumDualResidual();
3111  }
3112 }
3113 
3114 namespace {
3115 
3116 std::string StringifyMonomialWithFlags(const Fractional a,
3117  const std::string& x) {
3118  return StringifyMonomial(
3119  a, x, absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3120 }
3121 
3122 // Returns a string representing the rational approximation of x or a decimal
3123 // approximation of x according to
3124 // absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions).
3125 std::string StringifyWithFlags(const Fractional x) {
3126  return Stringify(x,
3127  absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3128 }
3129 
3130 } // namespace
3131 
3132 std::string RevisedSimplex::SimpleVariableInfo(ColIndex col) const {
3133  std::string output;
3134  VariableType variable_type = variables_info_.GetTypeRow()[col];
3135  VariableStatus variable_status = variables_info_.GetStatusRow()[col];
3136  absl::StrAppendFormat(&output, "%d (%s) = %s, %s, %s, [%s,%s]", col.value(),
3137  variable_name_[col],
3138  StringifyWithFlags(variable_values_.Get(col)),
3139  GetVariableStatusString(variable_status),
3140  GetVariableTypeString(variable_type),
3141  StringifyWithFlags(lower_bound_[col]),
3142  StringifyWithFlags(upper_bound_[col]));
3143  return output;
3144 }
3145 
3146 void RevisedSimplex::DisplayInfoOnVariables() const {
3147  if (VLOG_IS_ON(3)) {
3148  for (ColIndex col(0); col < num_cols_; ++col) {
3149  const Fractional variable_value = variable_values_.Get(col);
3150  const Fractional objective_coefficient = objective_[col];
3151  const Fractional objective_contribution =
3152  objective_coefficient * variable_value;
3153  VLOG(3) << SimpleVariableInfo(col) << ". " << variable_name_[col] << " = "
3154  << StringifyWithFlags(variable_value) << " * "
3155  << StringifyWithFlags(objective_coefficient)
3156  << "(obj) = " << StringifyWithFlags(objective_contribution);
3157  }
3158  VLOG(3) << "------";
3159  }
3160 }
3161 
3162 void RevisedSimplex::DisplayVariableBounds() {
3163  if (VLOG_IS_ON(3)) {
3164  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
3165  for (ColIndex col(0); col < num_cols_; ++col) {
3166  switch (variable_type[col]) {
3168  break;
3170  VLOG(3) << variable_name_[col]
3171  << " >= " << StringifyWithFlags(lower_bound_[col]) << ";";
3172  break;
3174  VLOG(3) << variable_name_[col]
3175  << " <= " << StringifyWithFlags(upper_bound_[col]) << ";";
3176  break;
3178  VLOG(3) << StringifyWithFlags(lower_bound_[col])
3179  << " <= " << variable_name_[col]
3180  << " <= " << StringifyWithFlags(upper_bound_[col]) << ";";
3181  break;
3183  VLOG(3) << variable_name_[col] << " = "
3184  << StringifyWithFlags(lower_bound_[col]) << ";";
3185  break;
3186  default: // This should never happen.
3187  LOG(DFATAL) << "Column " << col << " has no meaningful status.";
3188  break;
3189  }
3190  }
3191  }
3192 }
3193 
3195  const DenseRow* column_scales) {
3196  absl::StrongVector<RowIndex, SparseRow> dictionary(num_rows_.value());
3197  for (ColIndex col(0); col < num_cols_; ++col) {
3198  ComputeDirection(col);
3199  for (const auto e : direction_) {
3200  if (column_scales == nullptr) {
3201  dictionary[e.row()].SetCoefficient(col, e.coefficient());
3202  continue;
3203  }
3204  const Fractional numerator =
3205  col < column_scales->size() ? (*column_scales)[col] : 1.0;
3206  const Fractional denominator = GetBasis(e.row()) < column_scales->size()
3207  ? (*column_scales)[GetBasis(e.row())]
3208  : 1.0;
3209  dictionary[e.row()].SetCoefficient(
3210  col, direction_[e.row()] * (numerator / denominator));
3211  }
3212  }
3213  return dictionary;
3214 }
3215 
3217  const LinearProgram& linear_program, const BasisState& state) {
3218  LoadStateForNextSolve(state);
3219  Status status = Initialize(linear_program);
3220  if (status.ok()) {
3221  variable_values_.RecomputeBasicVariableValues();
3222  variable_values_.ResetPrimalInfeasibilityInformation();
3223  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
3224  }
3225 }
3226 
3227 void RevisedSimplex::DisplayRevisedSimplexDebugInfo() {
3228  if (VLOG_IS_ON(3)) {
3229  // This function has a complexity in O(num_non_zeros_in_matrix).
3230  DisplayInfoOnVariables();
3231 
3232  std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue());
3233  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
3234  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3235  absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col],
3236  variable_name_[col]));
3237  }
3238  VLOG(3) << output << ";";
3239 
3240  const RevisedSimplexDictionary dictionary(nullptr, this);
3241  RowIndex r(0);
3242  for (const SparseRow& row : dictionary) {
3243  output.clear();
3244  ColIndex basic_col = basis_[r];
3245  absl::StrAppend(&output, variable_name_[basic_col], " = ",
3246  StringifyWithFlags(variable_values_.Get(basic_col)));
3247  for (const SparseRowEntry e : row) {
3248  if (e.col() != basic_col) {
3249  absl::StrAppend(&output,
3250  StringifyMonomialWithFlags(e.coefficient(),
3251  variable_name_[e.col()]));
3252  }
3253  }
3254  VLOG(3) << output << ";";
3255  }
3256  VLOG(3) << "------";
3257  DisplayVariableBounds();
3258  ++r;
3259  }
3260 }
3261 
3262 void RevisedSimplex::DisplayProblem() const {
3263  // This function has a complexity in O(num_rows * num_cols *
3264  // num_non_zeros_in_row).
3265  if (VLOG_IS_ON(3)) {
3266  DisplayInfoOnVariables();
3267  std::string output = "min: ";
3268  bool has_objective = false;
3269  for (ColIndex col(0); col < num_cols_; ++col) {
3270  const Fractional coeff = objective_[col];
3271  has_objective |= (coeff != 0.0);
3272  absl::StrAppend(&output,
3273  StringifyMonomialWithFlags(coeff, variable_name_[col]));
3274  }
3275  if (!has_objective) {
3276  absl::StrAppend(&output, " 0");
3277  }
3278  VLOG(3) << output << ";";
3279  for (RowIndex row(0); row < num_rows_; ++row) {
3280  output = "";
3281  for (ColIndex col(0); col < num_cols_; ++col) {
3282  absl::StrAppend(&output,
3283  StringifyMonomialWithFlags(
3284  compact_matrix_.column(col).LookUpCoefficient(row),
3285  variable_name_[col]));
3286  }
3287  VLOG(3) << output << " = 0;";
3288  }
3289  VLOG(3) << "------";
3290  }
3291 }
3292 
3293 void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) {
3294  DCHECK(time_limit != nullptr);
3295  const double current_deterministic_time = DeterministicTime();
3296  const double deterministic_time_delta =
3297  current_deterministic_time - last_deterministic_time_update_;
3298  time_limit->AdvanceDeterministicTime(deterministic_time_delta);
3299  last_deterministic_time_update_ = current_deterministic_time;
3300 }
3301 
3302 #undef DCHECK_COL_BOUNDS
3303 #undef DCHECK_ROW_BOUNDS
3304 
3305 } // namespace glop
3306 } // namespace operations_research
int64 min
Definition: alldiff_cst.cc:138
int64 max
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:887
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:886
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:889
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:890
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:888
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:884
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:885
#define VLOG(verboselevel)
Definition: base/logging.h:978
bool empty() const
void push_back(const value_type &x)
void ClearAndResize(IndexType size)
Definition: bitset.h:436
void Set(IndexType i)
Definition: bitset.h:491
bool IsSet(IndexType i) const
Definition: bitset.h:481
std::string StatString() const
Definition: stats.cc:71
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
const ColumnPermutation & GetColumnPermutation() const
ABSL_MUST_USE_RESULT Status Update(ColIndex entering_col, RowIndex leaving_variable_row, const ScatteredColumn &direction)
void RightSolveForProblemColumn(ColIndex col, ScatteredColumn *d) const
void SetParameters(const GlopParameters &parameters)
Fractional LookUpCoefficient(RowIndex index) const
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse_column.h:83
RowIndex EntryRow(EntryIndex i) const
Definition: sparse_column.h:89
void ColumnCopyToDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:418
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col, Fractional multiplier, ScatteredColumn *column) const
Definition: sparse.h:405
void PopulateFromTranspose(const CompactSparseMatrix &input)
Definition: sparse.cc:456
void ColumnAddMultipleToDenseColumn(ColIndex col, Fractional multiplier, DenseColumn *dense_column) const
Definition: sparse.h:393
void PopulateFromMatrixView(const MatrixView &input)
Definition: sparse.cc:437
ColumnView column(ColIndex col) const
Definition: sparse.h:364
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, const ScatteredRow &unit_row_left_inverse)
void UpdateDataOnBasisPermutation(const ColumnPermutation &col_perm)
void SetParameters(const GlopParameters &parameters)
ABSL_MUST_USE_RESULT Status PrimalChooseEnteringColumn(ColIndex *entering_col)
ABSL_MUST_USE_RESULT Status DualChooseEnteringColumn(const UpdateRow &update_row, Fractional cost_variation, std::vector< ColIndex > *bound_flip_candidates, ColIndex *entering_col, Fractional *step)
ABSL_MUST_USE_RESULT Status DualPhaseIChooseEnteringColumn(const UpdateRow &update_row, Fractional cost_variation, ColIndex *entering_col, Fractional *step)
void SetPricingRule(GlopParameters::PricingRule rule)
void SetParameters(const GlopParameters &parameters)
ABSL_MUST_USE_RESULT Status ComputeFactorization(const CompactSparseMatrixView &compact_matrix)
void UpdateBeforeBasisPivot(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
void TestEnteringEdgeNormPrecision(ColIndex entering_col, const ScatteredColumn &direction)
void SetParameters(const GlopParameters &parameters)
bool TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction, Fractional *reduced_cost)
bool IsValidPrimalEnteringCandidate(ColIndex col) const
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
Fractional GetDualFeasibilityTolerance() const
Fractional ComputeMaximumDualInfeasibility() const
void MaintainDualInfeasiblePositions(bool maintain)
void SetParameters(const GlopParameters &parameters)
const DenseRow & GetDualRayRowCombination() const
Fractional GetVariableValue(ColIndex col) const
void SetIntegralityScale(ColIndex col, Fractional scale)
Fractional GetConstraintActivity(RowIndex row) const
VariableStatus GetVariableStatus(ColIndex col) const
Fractional GetReducedCost(ColIndex col) const
const DenseColumn & GetDualRay() const
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales)
Fractional GetDualValue(RowIndex row) const
ConstraintStatus GetConstraintStatus(RowIndex row) const
void ComputeBasicVariablesForState(const LinearProgram &linear_program, const BasisState &state)
void LoadStateForNextSolve(const BasisState &state)
const BasisFactorization & GetBasisFactorization() const
ColIndex GetBasis(RowIndex row) const
void SetParameters(const GlopParameters &parameters)
static const Status OK()
Definition: status.h:54
const ScatteredRow & GetUnitRowLeftInverse() const
Definition: update_row.cc:51
void RecomputeFullUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:244
void IgnoreUpdatePosition(ColIndex col)
Definition: update_row.cc:45
const Fractional GetCoefficient(ColIndex col) const
Definition: update_row.h:66
void ComputeUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:78
void SetParameters(const GlopParameters &parameters)
Definition: update_row.cc:174
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:170
std::string StatString() const
Definition: update_row.h:81
void Set(ColIndex col, Fractional value)
const DenseColumn & GetPrimalSquaredInfeasibilities() const
void UpdateGivenNonBasicVariables(const std::vector< ColIndex > &cols_to_update, bool update_basic_variables)
const DenseBitColumn & GetPrimalInfeasiblePositions() const
void UpdateOnPivoting(const ScatteredColumn &direction, ColIndex entering_col, Fractional step)
const Fractional Get(ColIndex col) const
void UpdatePrimalInfeasibilityInformation(const std::vector< RowIndex > &rows)
bool UpdatePrimalPhaseICosts(const Rows &rows, DenseRow *objective)
const DenseBitRow & GetIsBasicBitRow() const
const DenseBitRow & GetNonBasicBoxedVariables() const
Fractional GetBoundDifference(ColIndex col) const
const DenseBitRow & GetCanIncreaseBitRow() const
const DenseBitRow & GetCanDecreaseBitRow() const
const VariableTypeRow & GetTypeRow() const
void UpdateToNonBasicStatus(ColIndex col, VariableStatus status)
const DenseBitRow & GetNotBasicBitRow() const
const VariableStatusRow & GetStatusRow() const
const DenseBitRow & GetIsRelevantBitRow() const
void Update(ColIndex col, VariableStatus status)
SatParameters parameters
SharedTimeLimit * time_limit
int64 value
int64_t int64
uint64_t uint64
const int WARNING
Definition: log_severity.h:31
const int INFO
Definition: log_severity.h:31
const bool DEBUG_MODE
Definition: macros.h:24
ColIndex col
Definition: markowitz.cc:176
std::string StringifyMonomial(const Fractional a, const std::string &x, bool fraction)
bool IsRightMostSquareMatrixIdentity(const SparseMatrix &matrix)
Fractional Square(Fractional f)
Fractional InfinityNorm(const DenseColumn &v)
const RowIndex kInvalidRow(-1)
std::string Stringify(const Fractional x, bool fraction)
StrictITIVector< ColIndex, VariableType > VariableTypeRow
Definition: lp_types.h:317
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:299
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
Index ColToIntIndex(ColIndex col)
Definition: lp_types.h:54
Permutation< ColIndex > ColumnPermutation
StrictITIVector< ColIndex, VariableStatus > VariableStatusRow
Definition: lp_types.h:320
constexpr const uint64 kDeterministicSeed
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
bool IsFinite(Fractional value)
Definition: lp_types.h:90
bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, const SparseMatrix &matrix_a, const CompactSparseMatrix &matrix_b)
const DenseRow & Transpose(const DenseColumn &col)
Bitset64< ColIndex > DenseBitRow
Definition: lp_types.h:323
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
StrictITIVector< RowIndex, ColIndex > RowToColMapping
Definition: lp_types.h:342
std::string GetVariableTypeString(VariableType variable_type)
Definition: lp_types.cc:52
void ApplyColumnPermutationToRowIndexedVector(const Permutation< ColIndex > &col_perm, RowIndexedVector *v)
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:328
StrictITIVector< RowIndex, bool > DenseBooleanColumn
Definition: lp_types.h:331
std::string GetVariableStatusString(VariableStatus status)
Definition: lp_types.cc:71
const double kInfinity
Definition: lp_types.h:83
const ColIndex kInvalidCol(-1)
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition: stats.h:432
int index
Definition: pack.cc:508
if(!yyg->yy_init)
Definition: parser.yy.cc:965
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
Fractional coeff_magnitude
#define DCHECK_ROW_BOUNDS(row)
ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false, "Display numbers as fractions.")
Fractional target_bound
RowIndex row
#define DCHECK_COL_BOUNDS(col)
Fractional ratio
int64 cost
IntVar *const objective_
Definition: search.cc:2951
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:435
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:436
#define GLOP_RETURN_IF_ERROR(function_call)
Definition: status.h:70
#define GLOP_RETURN_ERROR_IF_NULL(arg)
Definition: status.h:85
void ClearNonZerosIfTooDense(double ratio_for_using_dense_representation)
StrictITIVector< Index, Fractional > values
#define VLOG_IS_ON(verboselevel)
Definition: vlog_is_on.h:41