OR-Tools  8.0
revised_simplex.cc
Go to the documentation of this file.
1 // Copyright 2010-2018 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <algorithm>
17 #include <cmath>
18 #include <functional>
19 #include <map>
20 #include <string>
21 #include <utility>
22 #include <vector>
23 
24 #include "absl/strings/str_cat.h"
25 #include "absl/strings/str_format.h"
28 #include "ortools/base/logging.h"
36 #include "ortools/util/fp_utils.h"
37 
38 DEFINE_bool(simplex_display_numbers_as_fractions, false,
39  "Display numbers as fractions.");
40 DEFINE_bool(simplex_stop_after_first_basis, false,
41  "Stop after first basis has been computed.");
42 DEFINE_bool(simplex_stop_after_feasibility, false,
43  "Stop after first phase has been completed.");
44 DEFINE_bool(simplex_display_stats, false, "Display algorithm statistics.");
45 
46 namespace operations_research {
47 namespace glop {
48 namespace {
49 
50 // Calls the given closure upon destruction. It can be used to ensure that a
51 // closure is executed whenever a function returns.
52 class Cleanup {
53  public:
54  explicit Cleanup(std::function<void()> closure)
55  : closure_(std::move(closure)) {}
56  ~Cleanup() { closure_(); }
57 
58  private:
59  std::function<void()> closure_;
60 };
61 } // namespace
62 
63 #define DCHECK_COL_BOUNDS(col) \
64  { \
65  DCHECK_LE(0, col); \
66  DCHECK_GT(num_cols_, col); \
67  }
68 
69 #define DCHECK_ROW_BOUNDS(row) \
70  { \
71  DCHECK_LE(0, row); \
72  DCHECK_GT(num_rows_, row); \
73  }
74 
75 constexpr const uint64 kDeterministicSeed = 42;
76 
78  : problem_status_(ProblemStatus::INIT),
79  num_rows_(0),
80  num_cols_(0),
81  first_slack_col_(0),
82  objective_(),
83  lower_bound_(),
84  upper_bound_(),
85  basis_(),
86  variable_name_(),
87  direction_(),
88  error_(),
89  basis_factorization_(&compact_matrix_, &basis_),
90  variables_info_(compact_matrix_, lower_bound_, upper_bound_),
91  variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
92  basis_factorization_),
93  dual_edge_norms_(basis_factorization_),
94  primal_edge_norms_(compact_matrix_, variables_info_,
95  basis_factorization_),
96  update_row_(compact_matrix_, transposed_matrix_, variables_info_, basis_,
97  basis_factorization_),
98  reduced_costs_(compact_matrix_, objective_, basis_, variables_info_,
99  basis_factorization_, &random_),
100  entering_variable_(variables_info_, &random_, &reduced_costs_,
101  &primal_edge_norms_),
102  num_iterations_(0),
103  num_feasibility_iterations_(0),
104  num_optimization_iterations_(0),
105  total_time_(0.0),
106  feasibility_time_(0.0),
107  optimization_time_(0.0),
108  last_deterministic_time_update_(0.0),
109  iteration_stats_(),
110  ratio_test_stats_(),
111  function_stats_("SimplexFunctionStats"),
112  parameters_(),
113  test_lu_(),
114  feasibility_phase_(true),
115  random_(kDeterministicSeed) {
116  SetParameters(parameters_);
117 }
118 
120  SCOPED_TIME_STAT(&function_stats_);
121  solution_state_.statuses.clear();
122 }
123 
125  SCOPED_TIME_STAT(&function_stats_);
126  solution_state_ = state;
127  solution_state_has_been_set_externally_ = true;
128 }
129 
131  notify_that_matrix_is_unchanged_ = true;
132 }
133 
135  SCOPED_TIME_STAT(&function_stats_);
136  DCHECK(lp.IsCleanedUp());
138  if (!lp.IsInEquationForm()) {
140  "The problem is not in the equations form.");
141  }
142  Cleanup update_deterministic_time_on_return(
143  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
144 
145  // Initialization. Note That Initialize() must be called first since it
146  // analyzes the current solver state.
147  const double start_time = time_limit->GetElapsedTime();
148  GLOP_RETURN_IF_ERROR(Initialize(lp));
149 
150  dual_infeasibility_improvement_direction_.clear();
151  update_row_.Invalidate();
152  test_lu_.Clear();
153  problem_status_ = ProblemStatus::INIT;
154  feasibility_phase_ = true;
155  num_iterations_ = 0;
156  num_feasibility_iterations_ = 0;
157  num_optimization_iterations_ = 0;
158  feasibility_time_ = 0.0;
159  optimization_time_ = 0.0;
160  total_time_ = 0.0;
161 
162  // In case we abort because of an error, we cannot assume that the current
163  // solution state will be in sync with all our internal data structure. In
164  // case we abort without resetting it, setting this allow us to still use the
165  // previous state info, but we will double-check everything.
166  solution_state_has_been_set_externally_ = true;
167 
168  if (VLOG_IS_ON(1)) {
169  ComputeNumberOfEmptyRows();
170  ComputeNumberOfEmptyColumns();
171  DisplayBasicVariableStatistics();
172  DisplayProblem();
173  }
174  if (FLAGS_simplex_stop_after_first_basis) {
175  DisplayAllStats();
176  return Status::OK();
177  }
178 
179  const bool use_dual = parameters_.use_dual_simplex();
180  VLOG(1) << "------ " << (use_dual ? "Dual simplex." : "Primal simplex.");
181  VLOG(1) << "The matrix has " << compact_matrix_.num_rows() << " rows, "
182  << compact_matrix_.num_cols() << " columns, "
183  << compact_matrix_.num_entries() << " entries.";
184 
185  // TODO(user): Avoid doing the first phase checks when we know from the
186  // incremental solve that the solution is already dual or primal feasible.
187  VLOG(1) << "------ First phase: feasibility.";
188  entering_variable_.SetPricingRule(parameters_.feasibility_rule());
189  if (use_dual) {
190  if (parameters_.perturb_costs_in_dual_simplex()) {
191  reduced_costs_.PerturbCosts();
192  }
193 
194  variables_info_.MakeBoxedVariableRelevant(false);
195  GLOP_RETURN_IF_ERROR(DualMinimize(time_limit));
196  DisplayIterationInfo();
197 
198  if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
199  // Note(user): In most cases, the matrix will already be refactorized and
200  // both Refactorize() and PermuteBasis() will do nothing. However, if the
201  // time limit is reached during the first phase, this might not be the
202  // case and RecomputeBasicVariableValues() below DCHECKs that the matrix
203  // is refactorized. This is not required, but we currently only want to
204  // recompute values from scratch when the matrix was just refactorized to
205  // maximize precision.
206  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
207  PermuteBasis();
208 
209  variables_info_.MakeBoxedVariableRelevant(true);
210  reduced_costs_.MakeReducedCostsPrecise();
211 
212  // This is needed to display errors properly.
213  MakeBoxedVariableDualFeasible(variables_info_.GetNonBasicBoxedVariables(),
214  /*update_basic_values=*/false);
215  variable_values_.RecomputeBasicVariableValues();
216  variable_values_.ResetPrimalInfeasibilityInformation();
217  }
218  } else {
219  reduced_costs_.MaintainDualInfeasiblePositions(true);
220  GLOP_RETURN_IF_ERROR(Minimize(time_limit));
221  DisplayIterationInfo();
222 
223  // After the primal phase I, we need to restore the objective.
224  if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) {
225  InitializeObjectiveAndTestIfUnchanged(lp);
226  reduced_costs_.ResetForNewObjective();
227  }
228  }
229 
230  // Reduced costs must be explicitly recomputed because DisplayErrors() is
231  // const.
232  // TODO(user): This API is not really nice.
233  reduced_costs_.GetReducedCosts();
234  DisplayErrors();
235 
236  feasibility_phase_ = false;
237  feasibility_time_ = time_limit->GetElapsedTime() - start_time;
238  entering_variable_.SetPricingRule(parameters_.optimization_rule());
239  num_feasibility_iterations_ = num_iterations_;
240 
241  VLOG(1) << "------ Second phase: optimization.";
242 
243  // Because of shifts or perturbations, we may need to re-run a dual simplex
244  // after the primal simplex finished, or the opposite.
245  //
246  // We alter between solving with primal and dual Phase II algorithm as long as
247  // time limit permits *and* we did not yet achieve the desired precision.
248  // I.e., we run iteration i if the solution from iteration i-1 was not precise
249  // after we removed the bound and cost shifts and perturbations.
250  //
251  // NOTE(user): We may still hit the limit of max_number_of_reoptimizations()
252  // which means the status returned can be PRIMAL_FEASIBLE or DUAL_FEASIBLE
253  // (i.e., these statuses are not necesserily a consequence of hitting a time
254  // limit).
255  for (int num_optims = 0;
256  // We want to enter the loop when both num_optims and num_iterations_ are
257  // *equal* to the corresponding limits (to return a meaningful status
258  // when the limits are set to 0).
259  num_optims <= parameters_.max_number_of_reoptimizations() &&
260  !objective_limit_reached_ &&
261  (num_iterations_ == 0 ||
262  num_iterations_ < parameters_.max_number_of_iterations()) &&
263  !time_limit->LimitReached() && !FLAGS_simplex_stop_after_feasibility &&
264  (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
265  problem_status_ == ProblemStatus::DUAL_FEASIBLE);
266  ++num_optims) {
267  if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
268  // Run the primal simplex.
269  reduced_costs_.MaintainDualInfeasiblePositions(true);
270  GLOP_RETURN_IF_ERROR(Minimize(time_limit));
271  } else {
272  // Run the dual simplex.
273  reduced_costs_.MaintainDualInfeasiblePositions(false);
274  GLOP_RETURN_IF_ERROR(DualMinimize(time_limit));
275  }
276 
277  // Minimize() or DualMinimize() always double check the result with maximum
278  // precision by refactoring the basis before exiting (except if an
279  // iteration or time limit was reached).
280  DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
281  problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
282  basis_factorization_.IsRefactorized());
283 
284  // Remove the bound and cost shifts (or perturbations).
285  //
286  // Note(user): Currently, we never do both at the same time, so we could
287  // be a bit faster here, but then this is quick anyway.
288  variable_values_.ResetAllNonBasicVariableValues();
289  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
290  PermuteBasis();
291  variable_values_.RecomputeBasicVariableValues();
292  reduced_costs_.ClearAndRemoveCostShifts();
293 
294  // Reduced costs must be explicitly recomputed because DisplayErrors() is
295  // const.
296  // TODO(user): This API is not really nice.
297  reduced_costs_.GetReducedCosts();
298  DisplayIterationInfo();
299  DisplayErrors();
300 
301  // TODO(user): We should also confirm the PRIMAL_UNBOUNDED or DUAL_UNBOUNDED
302  // status by checking with the other phase I that the problem is really
303  // DUAL_INFEASIBLE or PRIMAL_INFEASIBLE. For instance we currently report
304  // PRIMAL_UNBOUNDED with the primal on the problem l30.mps instead of
305  // OPTIMAL and the dual does not have issues on this problem.
306  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) {
307  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
308  if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
309  variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
310  reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
311  VLOG(1) << "DUAL_UNBOUNDED was reported, but the residual and/or "
312  << "dual infeasibility is above the tolerance";
313  }
314  break;
315  }
316 
317  // Change the status, if after the shift and perturbation removal the
318  // problem is not OPTIMAL anymore.
319  if (problem_status_ == ProblemStatus::OPTIMAL) {
320  const Fractional solution_tolerance =
321  parameters_.solution_feasibility_tolerance();
322  if (variable_values_.ComputeMaximumPrimalResidual() >
323  solution_tolerance ||
324  reduced_costs_.ComputeMaximumDualResidual() > solution_tolerance) {
325  VLOG(1) << "OPTIMAL was reported, yet one of the residuals is "
326  "above the solution feasibility tolerance after the "
327  "shift/perturbation are removed.";
328  if (parameters_.change_status_to_imprecise()) {
329  problem_status_ = ProblemStatus::IMPRECISE;
330  }
331  } else {
332  // We use the "precise" tolerances here to try to report the best
333  // possible solution.
334  const Fractional primal_tolerance =
335  parameters_.primal_feasibility_tolerance();
336  const Fractional dual_tolerance =
337  parameters_.dual_feasibility_tolerance();
338  const Fractional primal_infeasibility =
339  variable_values_.ComputeMaximumPrimalInfeasibility();
340  const Fractional dual_infeasibility =
341  reduced_costs_.ComputeMaximumDualInfeasibility();
342  if (primal_infeasibility > primal_tolerance &&
343  dual_infeasibility > dual_tolerance) {
344  VLOG(1) << "OPTIMAL was reported, yet both of the infeasibility "
345  "are above the tolerance after the "
346  "shift/perturbation are removed.";
347  if (parameters_.change_status_to_imprecise()) {
348  problem_status_ = ProblemStatus::IMPRECISE;
349  }
350  } else if (primal_infeasibility > primal_tolerance) {
351  VLOG(1) << "Re-optimizing with dual simplex ... ";
352  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
353  } else if (dual_infeasibility > dual_tolerance) {
354  VLOG(1) << "Re-optimizing with primal simplex ... ";
355  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
356  }
357  }
358  }
359  }
360 
361  // Check that the return status is "precise".
362  //
363  // TODO(user): we curretnly skip the DUAL_INFEASIBLE status because the
364  // quantities are not up to date in this case.
365  if (parameters_.change_status_to_imprecise() &&
366  problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
367  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
368  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
369  reduced_costs_.ComputeMaximumDualResidual() > tolerance) {
370  problem_status_ = ProblemStatus::IMPRECISE;
371  } else if (problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
372  problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
373  problem_status_ == ProblemStatus::PRIMAL_INFEASIBLE) {
374  if (reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
375  problem_status_ = ProblemStatus::IMPRECISE;
376  }
377  } else if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
378  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED ||
379  problem_status_ == ProblemStatus::DUAL_INFEASIBLE) {
380  if (variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
381  problem_status_ = ProblemStatus::IMPRECISE;
382  }
383  }
384  }
385 
386  // Store the result for the solution getters.
387  SaveState();
388  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
389  solution_dual_values_ = reduced_costs_.GetDualValues();
390  solution_reduced_costs_ = reduced_costs_.GetReducedCosts();
391  if (lp.IsMaximizationProblem()) {
392  ChangeSign(&solution_dual_values_);
393  ChangeSign(&solution_reduced_costs_);
394  }
395 
396  // If the problem is unbounded, set the objective value to +/- infinity.
397  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
398  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
399  solution_objective_value_ =
400  (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity
401  : -kInfinity;
402  if (lp.IsMaximizationProblem()) {
403  solution_objective_value_ = -solution_objective_value_;
404  }
405  }
406 
407  total_time_ = time_limit->GetElapsedTime() - start_time;
408  optimization_time_ = total_time_ - feasibility_time_;
409  num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
410 
411  DisplayAllStats();
412  return Status::OK();
413 }
414 
416  return problem_status_;
417 }
418 
420  return solution_objective_value_;
421 }
422 
423 int64 RevisedSimplex::GetNumberOfIterations() const { return num_iterations_; }
424 
425 RowIndex RevisedSimplex::GetProblemNumRows() const { return num_rows_; }
426 
427 ColIndex RevisedSimplex::GetProblemNumCols() const { return num_cols_; }
428 
430  return variable_values_.Get(col);
431 }
432 
434  return solution_reduced_costs_[col];
435 }
436 
438  return solution_reduced_costs_;
439 }
440 
442  return solution_dual_values_[row];
443 }
444 
446  return variables_info_.GetStatusRow()[col];
447 }
448 
449 const BasisState& RevisedSimplex::GetState() const { return solution_state_; }
450 
452  // Note the negative sign since the slack variable is such that
453  // constraint_activity + slack_value = 0.
454  return -variable_values_.Get(SlackColIndex(row));
455 }
456 
458  // The status of the given constraint is the same as the status of the
459  // associated slack variable with a change of sign.
460  const VariableStatus s = variables_info_.GetStatusRow()[SlackColIndex(row)];
463  }
466  }
467  return VariableToConstraintStatus(s);
468 }
469 
471  DCHECK_EQ(problem_status_, ProblemStatus::PRIMAL_UNBOUNDED);
472  return solution_primal_ray_;
473 }
475  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
476  return solution_dual_ray_;
477 }
478 
480  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
481  return solution_dual_ray_row_combination_;
482 }
483 
484 ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; }
485 
487  DCHECK(basis_factorization_.GetColumnPermutation().empty());
488  return basis_factorization_;
489 }
490 
491 std::string RevisedSimplex::GetPrettySolverStats() const {
492  return absl::StrFormat(
493  "Problem status : %s\n"
494  "Solving time : %-6.4g\n"
495  "Number of iterations : %u\n"
496  "Time for solvability (first phase) : %-6.4g\n"
497  "Number of iterations for solvability : %u\n"
498  "Time for optimization : %-6.4g\n"
499  "Number of iterations for optimization : %u\n"
500  "Stop after first basis : %d\n",
501  GetProblemStatusString(problem_status_), total_time_, num_iterations_,
502  feasibility_time_, num_feasibility_iterations_, optimization_time_,
503  num_optimization_iterations_, FLAGS_simplex_stop_after_first_basis);
504 }
505 
507  // TODO(user): Also take into account the dual edge norms and the reduced cost
508  // updates.
509  return basis_factorization_.DeterministicTime() +
510  update_row_.DeterministicTime() +
511  primal_edge_norms_.DeterministicTime();
512 }
513 
514 void RevisedSimplex::SetVariableNames() {
515  variable_name_.resize(num_cols_, "");
516  for (ColIndex col(0); col < first_slack_col_; ++col) {
517  const ColIndex var_index = col + 1;
518  variable_name_[col] = absl::StrFormat("x%d", ColToIntIndex(var_index));
519  }
520  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
521  const ColIndex var_index = col - first_slack_col_ + 1;
522  variable_name_[col] = absl::StrFormat("s%d", ColToIntIndex(var_index));
523  }
524 }
525 
526 VariableStatus RevisedSimplex::ComputeDefaultVariableStatus(
527  ColIndex col) const {
529  if (lower_bound_[col] == upper_bound_[col]) {
531  }
532  if (lower_bound_[col] == -kInfinity && upper_bound_[col] == kInfinity) {
533  return VariableStatus::FREE;
534  }
535 
536  // Returns the bound with the lowest magnitude. Note that it must be finite
537  // because the VariableStatus::FREE case was tested earlier.
538  DCHECK(IsFinite(lower_bound_[col]) || IsFinite(upper_bound_[col]));
539  return std::abs(lower_bound_[col]) <= std::abs(upper_bound_[col])
542 }
543 
544 void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue(
545  ColIndex col, VariableStatus status) {
546  variables_info_.UpdateToNonBasicStatus(col, status);
547  variable_values_.SetNonBasicVariableValueFromStatus(col);
548 }
549 
550 bool RevisedSimplex::BasisIsConsistent() const {
551  const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
552  const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
553  for (RowIndex row(0); row < num_rows_; ++row) {
554  const ColIndex col = basis_[row];
555  if (!is_basic.IsSet(col)) return false;
556  if (variable_statuses[col] != VariableStatus::BASIC) return false;
557  }
558  ColIndex cols_in_basis(0);
559  ColIndex cols_not_in_basis(0);
560  for (ColIndex col(0); col < num_cols_; ++col) {
561  cols_in_basis += is_basic.IsSet(col);
562  cols_not_in_basis += !is_basic.IsSet(col);
563  if (is_basic.IsSet(col) !=
564  (variable_statuses[col] == VariableStatus::BASIC)) {
565  return false;
566  }
567  }
568  if (cols_in_basis != RowToColIndex(num_rows_)) return false;
569  if (cols_not_in_basis != num_cols_ - RowToColIndex(num_rows_)) return false;
570  return true;
571 }
572 
573 // Note(user): The basis factorization is not updated by this function but by
574 // UpdateAndPivot().
575 void RevisedSimplex::UpdateBasis(ColIndex entering_col, RowIndex basis_row,
576  VariableStatus leaving_variable_status) {
577  SCOPED_TIME_STAT(&function_stats_);
578  DCHECK_COL_BOUNDS(entering_col);
579  DCHECK_ROW_BOUNDS(basis_row);
580 
581  // Check that this is not called with an entering_col already in the basis
582  // and that the leaving col is indeed in the basis.
583  DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
584  DCHECK_NE(basis_[basis_row], entering_col);
585  DCHECK_NE(basis_[basis_row], kInvalidCol);
586 
587  const ColIndex leaving_col = basis_[basis_row];
588  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
589 
590  // Make leaving_col leave the basis and update relevant data.
591  // Note thate the leaving variable value is not necessarily at its exact
592  // bound, which is like a bound shift.
593  variables_info_.Update(leaving_col, leaving_variable_status);
594  DCHECK(leaving_variable_status == VariableStatus::AT_UPPER_BOUND ||
595  leaving_variable_status == VariableStatus::AT_LOWER_BOUND ||
596  leaving_variable_status == VariableStatus::FIXED_VALUE);
597 
598  basis_[basis_row] = entering_col;
599  variables_info_.Update(entering_col, VariableStatus::BASIC);
600  update_row_.Invalidate();
601 }
602 
603 namespace {
604 
605 // Comparator used to sort column indices according to a given value vector.
606 class ColumnComparator {
607  public:
608  explicit ColumnComparator(const DenseRow& value) : value_(value) {}
609  bool operator()(ColIndex col_a, ColIndex col_b) const {
610  return value_[col_a] < value_[col_b];
611  }
612 
613  private:
614  const DenseRow& value_;
615 };
616 
617 } // namespace
618 
619 // To understand better what is going on in this function, let us say that this
620 // algorithm will produce the optimal solution to a problem containing only
621 // singleton columns (provided that the variables start at the minimum possible
622 // cost, see ComputeDefaultVariableStatus()). This is unit tested.
623 //
624 // The error_ must be equal to the constraint activity for the current variable
625 // values before this function is called. If error_[row] is 0.0, that mean this
626 // constraint is currently feasible.
627 void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) {
628  SCOPED_TIME_STAT(&function_stats_);
629  // Computes the singleton columns and the cost variation of the corresponding
630  // variables (in the only possible direction, i.e away from its current bound)
631  // for a unit change in the infeasibility of the corresponding row.
632  //
633  // Note that the slack columns will be treated as normal singleton columns.
634  std::vector<ColIndex> singleton_column;
635  DenseRow cost_variation(num_cols_, 0.0);
636  for (ColIndex col(0); col < num_cols_; ++col) {
637  if (compact_matrix_.column(col).num_entries() != 1) continue;
638  if (lower_bound_[col] == upper_bound_[col]) continue;
639  const Fractional slope = compact_matrix_.column(col).GetFirstCoefficient();
640  if (variable_values_.Get(col) == lower_bound_[col]) {
641  cost_variation[col] = objective_[col] / std::abs(slope);
642  } else {
643  cost_variation[col] = -objective_[col] / std::abs(slope);
644  }
645  singleton_column.push_back(col);
646  }
647  if (singleton_column.empty()) return;
648 
649  // Sort the singleton columns for the case where many of them correspond to
650  // the same row (equivalent to a piecewise-linear objective on this variable).
651  // Negative cost_variation first since moving the singleton variable away from
652  // its current bound means the least decrease in the objective function for
653  // the same "error" variation.
654  ColumnComparator comparator(cost_variation);
655  std::sort(singleton_column.begin(), singleton_column.end(), comparator);
656  DCHECK_LE(cost_variation[singleton_column.front()],
657  cost_variation[singleton_column.back()]);
658 
659  // Use a singleton column to "absorb" the error when possible to avoid
660  // introducing unneeded artificial variables. Note that with scaling on, the
661  // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to
662  // them) and that the SingletonColumnSignPreprocessor makes them all positive.
663  // However, this code works for any coefficient value.
664  const DenseRow& variable_values = variable_values_.GetDenseRow();
665  for (const ColIndex col : singleton_column) {
666  const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0));
667 
668  // If no singleton columns have entered the basis for this row, choose the
669  // first one. It will be the one with the least decrease in the objective
670  // function when it leaves the basis.
671  if ((*basis)[row] == kInvalidCol) {
672  (*basis)[row] = col;
673  }
674 
675  // If there is already no error in this row (i.e. it is primal-feasible),
676  // there is nothing to do.
677  if (error_[row] == 0.0) continue;
678 
679  // In this case, all the infeasibility can be "absorbed" and this variable
680  // may not be at one of its bound anymore, so we have to use it in the
681  // basis.
682  const Fractional coeff =
683  compact_matrix_.column(col).EntryCoefficient(EntryIndex(0));
684  const Fractional new_value = variable_values[col] + error_[row] / coeff;
685  if (new_value >= lower_bound_[col] && new_value <= upper_bound_[col]) {
686  error_[row] = 0.0;
687 
688  // Use this variable in the initial basis.
689  (*basis)[row] = col;
690  continue;
691  }
692 
693  // The idea here is that if the singleton column cannot be used to "absorb"
694  // all error_[row], if it is boxed, it can still be used to make the
695  // infeasibility smaller (with a bound flip).
696  const Fractional box_width = variables_info_.GetBoundDifference(col);
697  DCHECK_NE(box_width, 0.0);
698  DCHECK_NE(error_[row], 0.0);
699  const Fractional error_sign = error_[row] / coeff;
700  if (variable_values[col] == lower_bound_[col] && error_sign > 0.0) {
701  DCHECK(IsFinite(box_width));
702  error_[row] -= coeff * box_width;
703  SetNonBasicVariableStatusAndDeriveValue(col,
705  continue;
706  }
707  if (variable_values[col] == upper_bound_[col] && error_sign < 0.0) {
708  DCHECK(IsFinite(box_width));
709  error_[row] += coeff * box_width;
710  SetNonBasicVariableStatusAndDeriveValue(col,
712  continue;
713  }
714  }
715 }
716 
717 bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged(
718  const LinearProgram& lp, bool* only_change_is_new_rows,
719  bool* only_change_is_new_cols, ColIndex* num_new_cols) {
720  SCOPED_TIME_STAT(&function_stats_);
721  DCHECK(only_change_is_new_rows != nullptr);
722  DCHECK(only_change_is_new_cols != nullptr);
723  DCHECK(num_new_cols != nullptr);
724  DCHECK_NE(kInvalidCol, lp.GetFirstSlackVariable());
725  DCHECK_EQ(num_cols_, compact_matrix_.num_cols());
726  DCHECK_EQ(num_rows_, compact_matrix_.num_rows());
727 
728  DCHECK_EQ(lp.num_variables(),
729  lp.GetFirstSlackVariable() + RowToColIndex(lp.num_constraints()));
730  DCHECK(IsRightMostSquareMatrixIdentity(lp.GetSparseMatrix()));
731  const bool old_part_of_matrix_is_unchanged =
733  num_rows_, first_slack_col_, lp.GetSparseMatrix(), compact_matrix_);
734 
735  // Test if the matrix is unchanged, and if yes, just returns true. Note that
736  // this doesn't check the columns corresponding to the slack variables,
737  // because they were checked by lp.IsInEquationForm() when Solve() was called.
738  if (old_part_of_matrix_is_unchanged && lp.num_constraints() == num_rows_ &&
739  lp.num_variables() == num_cols_) {
740  return true;
741  }
742 
743  // Check if the new matrix can be derived from the old one just by adding
744  // new rows (i.e new constraints).
745  *only_change_is_new_rows = old_part_of_matrix_is_unchanged &&
746  lp.num_constraints() > num_rows_ &&
747  lp.GetFirstSlackVariable() == first_slack_col_;
748 
749  // Check if the new matrix can be derived from the old one just by adding
750  // new columns (i.e new variables).
751  *only_change_is_new_cols = old_part_of_matrix_is_unchanged &&
752  lp.num_constraints() == num_rows_ &&
753  lp.GetFirstSlackVariable() > first_slack_col_;
754  *num_new_cols =
755  *only_change_is_new_cols ? lp.num_variables() - num_cols_ : ColIndex(0);
756 
757  // Initialize first_slack_.
758  first_slack_col_ = lp.GetFirstSlackVariable();
759 
760  // Initialize the new dimensions.
761  num_rows_ = lp.num_constraints();
762  num_cols_ = lp.num_variables();
763 
764  // Populate compact_matrix_ and transposed_matrix_ if needed. Note that we
765  // already added all the slack variables at this point, so matrix_ will not
766  // change anymore.
767  // TODO(user): This can be sped up by removing the MatrixView.
768  compact_matrix_.PopulateFromMatrixView(MatrixView(lp.GetSparseMatrix()));
769  if (parameters_.use_transposed_matrix()) {
770  transposed_matrix_.PopulateFromTranspose(compact_matrix_);
771  }
772  return false;
773 }
774 
775 bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
776  const LinearProgram& lp, ColIndex num_new_cols) {
777  SCOPED_TIME_STAT(&function_stats_);
778  DCHECK_EQ(lp.num_variables(), num_cols_);
779  DCHECK_LE(num_new_cols, first_slack_col_);
780  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
781 
782  // Check the original variable bounds.
783  for (ColIndex col(0); col < first_new_col; ++col) {
784  if (lower_bound_[col] != lp.variable_lower_bounds()[col] ||
785  upper_bound_[col] != lp.variable_upper_bounds()[col]) {
786  return false;
787  }
788  }
789  // Check that each new variable has a bound of zero.
790  for (ColIndex col(first_new_col); col < first_slack_col_; ++col) {
791  if (lp.variable_lower_bounds()[col] != 0.0 &&
792  lp.variable_upper_bounds()[col] != 0.0) {
793  return false;
794  }
795  }
796  // Check that the slack bounds are unchanged.
797  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
798  if (lower_bound_[col - num_new_cols] != lp.variable_lower_bounds()[col] ||
799  upper_bound_[col - num_new_cols] != lp.variable_upper_bounds()[col]) {
800  return false;
801  }
802  }
803  return true;
804 }
805 
806 bool RevisedSimplex::InitializeBoundsAndTestIfUnchanged(
807  const LinearProgram& lp) {
808  SCOPED_TIME_STAT(&function_stats_);
809  lower_bound_.resize(num_cols_, 0.0);
810  upper_bound_.resize(num_cols_, 0.0);
811  bound_perturbation_.AssignToZero(num_cols_);
812 
813  // Variable bounds, for both non-slack and slack variables.
814  bool bounds_are_unchanged = true;
815  DCHECK_EQ(lp.num_variables(), num_cols_);
816  for (ColIndex col(0); col < lp.num_variables(); ++col) {
817  if (lower_bound_[col] != lp.variable_lower_bounds()[col] ||
818  upper_bound_[col] != lp.variable_upper_bounds()[col]) {
819  bounds_are_unchanged = false;
820  break;
821  }
822  }
823  if (!bounds_are_unchanged) {
824  lower_bound_ = lp.variable_lower_bounds();
825  upper_bound_ = lp.variable_upper_bounds();
826  }
827  return bounds_are_unchanged;
828 }
829 
830 bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
831  const LinearProgram& lp) {
832  SCOPED_TIME_STAT(&function_stats_);
833 
834  bool objective_is_unchanged = true;
835  objective_.resize(num_cols_, 0.0);
836  DCHECK_EQ(num_cols_, lp.num_variables());
837  if (lp.IsMaximizationProblem()) {
838  // Note that we use the minimization version of the objective internally.
839  for (ColIndex col(0); col < lp.num_variables(); ++col) {
840  const Fractional coeff = -lp.objective_coefficients()[col];
841  if (objective_[col] != coeff) {
842  objective_is_unchanged = false;
843  }
844  objective_[col] = coeff;
845  }
846  objective_offset_ = -lp.objective_offset();
847  objective_scaling_factor_ = -lp.objective_scaling_factor();
848  } else {
849  for (ColIndex col(0); col < lp.num_variables(); ++col) {
850  if (objective_[col] != lp.objective_coefficients()[col]) {
851  objective_is_unchanged = false;
852  break;
853  }
854  }
855  if (!objective_is_unchanged) {
856  objective_ = lp.objective_coefficients();
857  }
858  objective_offset_ = lp.objective_offset();
859  objective_scaling_factor_ = lp.objective_scaling_factor();
860  }
861  return objective_is_unchanged;
862 }
863 
864 void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) {
865  objective_limit_reached_ = false;
866  DCHECK(std::isfinite(objective_offset_));
867  DCHECK(std::isfinite(objective_scaling_factor_));
868  DCHECK_NE(0.0, objective_scaling_factor_);
869 
870  // This sets dual_objective_limit_ and then primal_objective_limit_.
871  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
872  for (const bool set_dual : {true, false}) {
873  // NOTE(user): If objective_scaling_factor_ is negative, the optimization
874  // direction was reversed (during preprocessing or inside revised simplex),
875  // i.e., the original problem is maximization. In such case the _meaning_ of
876  // the lower and upper limits is swapped. To this end we must change the
877  // signs of limits, which happens automatically when calculating shifted
878  // limits. We must also use upper (resp. lower) limit in place of lower
879  // (resp. upper) limit when calculating the final objective_limit_.
880  //
881  // Choose lower limit if using the dual simplex and scaling factor is
882  // negative or if using the primal simplex and scaling is nonnegative, upper
883  // limit otherwise.
884  const Fractional limit = (objective_scaling_factor_ >= 0.0) != set_dual
885  ? parameters_.objective_lower_limit()
886  : parameters_.objective_upper_limit();
887  const Fractional shifted_limit =
888  limit / objective_scaling_factor_ - objective_offset_;
889 
890  // The isfinite() test is there to avoid generating NaNs with clang in
891  // fast-math mode on iOS 9.3.i.
892  if (set_dual) {
893  dual_objective_limit_ = std::isfinite(shifted_limit)
894  ? shifted_limit * (1.0 + tolerance)
895  : shifted_limit;
896  } else {
897  primal_objective_limit_ = std::isfinite(shifted_limit)
898  ? shifted_limit * (1.0 - tolerance)
899  : shifted_limit;
900  }
901  }
902 }
903 
904 void RevisedSimplex::InitializeVariableStatusesForWarmStart(
905  const BasisState& state, ColIndex num_new_cols) {
906  variables_info_.InitializeAndComputeType();
907  RowIndex num_basic_variables(0);
908  DCHECK_LE(num_new_cols, first_slack_col_);
909  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
910  // Compute the status for all the columns (note that the slack variables are
911  // already added at the end of the matrix at this stage).
912  for (ColIndex col(0); col < num_cols_; ++col) {
913  const VariableStatus default_status = ComputeDefaultVariableStatus(col);
914 
915  // Start with the given "warm" status from the BasisState if it exists.
916  VariableStatus status = default_status;
917  if (col < first_new_col && col < state.statuses.size()) {
918  status = state.statuses[col];
919  } else if (col >= first_slack_col_ &&
920  col - num_new_cols < state.statuses.size()) {
921  status = state.statuses[col - num_new_cols];
922  }
923 
924  if (status == VariableStatus::BASIC) {
925  // Do not allow more than num_rows_ VariableStatus::BASIC variables.
926  if (num_basic_variables == num_rows_) {
927  VLOG(1) << "Too many basic variables in the warm-start basis."
928  << "Only keeping the first ones as VariableStatus::BASIC.";
929  variables_info_.UpdateToNonBasicStatus(col, default_status);
930  } else {
931  ++num_basic_variables;
932  variables_info_.UpdateToBasicStatus(col);
933  }
934  } else {
935  // Remove incompatibilities between the warm status and the variable
936  // bounds. We use the default status as an indication of the bounds
937  // type.
938  if ((status != default_status) &&
939  ((default_status == VariableStatus::FIXED_VALUE) ||
940  (status == VariableStatus::FREE) ||
941  (status == VariableStatus::FIXED_VALUE) ||
942  (status == VariableStatus::AT_LOWER_BOUND &&
943  lower_bound_[col] == -kInfinity) ||
944  (status == VariableStatus::AT_UPPER_BOUND &&
945  upper_bound_[col] == kInfinity))) {
946  status = default_status;
947  }
948  variables_info_.UpdateToNonBasicStatus(col, status);
949  }
950  }
951 
952  // Initialize the values.
953  variable_values_.ResetAllNonBasicVariableValues();
954 }
955 
956 // This implementation starts with an initial matrix B equal to the identity
957 // matrix (modulo a column permutation). For that it uses either the slack
958 // variables or the singleton columns present in the problem. Afterwards, the
959 // fixed slacks in the basis are exchanged with normal columns of A if possible
960 // by the InitialBasis class.
961 Status RevisedSimplex::CreateInitialBasis() {
962  SCOPED_TIME_STAT(&function_stats_);
963 
964  // Initialize the variable values and statuses.
965  // Note that for the dual algorithm, boxed variables will be made
966  // dual-feasible later by MakeBoxedVariableDualFeasible(), so it doesn't
967  // really matter at which of their two finite bounds they start.
968  int num_free_variables = 0;
969  variables_info_.InitializeAndComputeType();
970  for (ColIndex col(0); col < num_cols_; ++col) {
971  const VariableStatus status = ComputeDefaultVariableStatus(col);
972  SetNonBasicVariableStatusAndDeriveValue(col, status);
973  if (status == VariableStatus::FREE) ++num_free_variables;
974  }
975  VLOG(1) << "Number of free variables in the problem: " << num_free_variables;
976 
977  // Start by using an all-slack basis.
978  RowToColMapping basis(num_rows_, kInvalidCol);
979  for (RowIndex row(0); row < num_rows_; ++row) {
980  basis[row] = SlackColIndex(row);
981  }
982 
983  // If possible, for the primal simplex we replace some slack variables with
984  // some singleton columns present in the problem.
985  if (!parameters_.use_dual_simplex() &&
986  parameters_.initial_basis() != GlopParameters::MAROS &&
987  parameters_.exploit_singleton_column_in_initial_basis()) {
988  // For UseSingletonColumnInInitialBasis() to work better, we change
989  // the value of the boxed singleton column with a non-zero cost to the best
990  // of their two bounds.
991  for (ColIndex col(0); col < num_cols_; ++col) {
992  if (compact_matrix_.column(col).num_entries() != 1) continue;
993  const VariableStatus status = variables_info_.GetStatusRow()[col];
994  const Fractional objective = objective_[col];
995  if (objective > 0 && IsFinite(lower_bound_[col]) &&
996  status == VariableStatus::AT_UPPER_BOUND) {
997  SetNonBasicVariableStatusAndDeriveValue(col,
999  } else if (objective < 0 && IsFinite(upper_bound_[col]) &&
1000  status == VariableStatus::AT_LOWER_BOUND) {
1001  SetNonBasicVariableStatusAndDeriveValue(col,
1003  }
1004  }
1005 
1006  // Compute the primal infeasibility of the initial variable values in
1007  // error_.
1008  ComputeVariableValuesError();
1009 
1010  // TODO(user): A better but slightly more complex algorithm would be to:
1011  // - Ignore all singleton columns except the slacks during phase I.
1012  // - For this, change the slack variable bounds accordingly.
1013  // - At the end of phase I, restore the slack variable bounds and perform
1014  // the same algorithm to start with feasible and "optimal" values of the
1015  // singleton columns.
1016  basis.assign(num_rows_, kInvalidCol);
1017  UseSingletonColumnInInitialBasis(&basis);
1018 
1019  // Eventually complete the basis with fixed slack columns.
1020  for (RowIndex row(0); row < num_rows_; ++row) {
1021  if (basis[row] == kInvalidCol) {
1022  basis[row] = SlackColIndex(row);
1023  }
1024  }
1025  }
1026 
1027  // Use an advanced initial basis to remove the fixed variables from the basis.
1028  if (parameters_.initial_basis() == GlopParameters::NONE) {
1029  return InitializeFirstBasis(basis);
1030  }
1031  if (parameters_.initial_basis() == GlopParameters::MAROS) {
1032  InitialBasis initial_basis(compact_matrix_, objective_, lower_bound_,
1033  upper_bound_, variables_info_.GetTypeRow());
1034  if (parameters_.use_dual_simplex()) {
1035  // This dual version only uses zero-cost columns to complete the
1036  // basis.
1037  initial_basis.GetDualMarosBasis(num_cols_, &basis);
1038  } else {
1039  initial_basis.GetPrimalMarosBasis(num_cols_, &basis);
1040  }
1041  int number_changed = 0;
1042  for (RowIndex row(0); row < num_rows_; ++row) {
1043  if (basis[row] != SlackColIndex(row)) {
1044  number_changed++;
1045  }
1046  }
1047  VLOG(1) << "Number of Maros basis changes: " << number_changed;
1048  } else if (parameters_.initial_basis() == GlopParameters::BIXBY ||
1049  parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1050  // First unassign the fixed variables from basis.
1051  int num_fixed_variables = 0;
1052  for (RowIndex row(0); row < basis.size(); ++row) {
1053  const ColIndex col = basis[row];
1054  if (lower_bound_[col] == upper_bound_[col]) {
1055  basis[row] = kInvalidCol;
1056  ++num_fixed_variables;
1057  }
1058  }
1059 
1060  if (num_fixed_variables == 0) {
1061  VLOG(1) << "Crash is set to " << parameters_.initial_basis()
1062  << " but there is no equality rows to remove from initial all "
1063  "slack basis.";
1064  } else {
1065  // Then complete the basis with an advanced initial basis algorithm.
1066  VLOG(1) << "Trying to remove " << num_fixed_variables
1067  << " fixed variables from the initial basis.";
1068  InitialBasis initial_basis(compact_matrix_, objective_, lower_bound_,
1069  upper_bound_, variables_info_.GetTypeRow());
1070 
1071  if (parameters_.initial_basis() == GlopParameters::BIXBY) {
1072  if (parameters_.use_scaling()) {
1073  initial_basis.CompleteBixbyBasis(first_slack_col_, &basis);
1074  } else {
1075  VLOG(1) << "Bixby initial basis algorithm requires the problem "
1076  << "to be scaled. Skipping Bixby's algorithm.";
1077  }
1078  } else if (parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1079  // Note the use of num_cols_ here because this algorithm
1080  // benefits from treating fixed slack columns like any other column.
1081  if (parameters_.use_dual_simplex()) {
1082  // This dual version only uses zero-cost columns to complete the
1083  // basis.
1084  initial_basis.CompleteTriangularDualBasis(num_cols_, &basis);
1085  } else {
1086  initial_basis.CompleteTriangularPrimalBasis(num_cols_, &basis);
1087  }
1088 
1089  const Status status = InitializeFirstBasis(basis);
1090  if (status.ok()) {
1091  return status;
1092  } else {
1093  VLOG(1) << "Reverting to all slack basis.";
1094 
1095  for (RowIndex row(0); row < num_rows_; ++row) {
1096  basis[row] = SlackColIndex(row);
1097  }
1098  }
1099  }
1100  }
1101  } else {
1102  LOG(WARNING) << "Unsupported initial_basis parameters: "
1103  << parameters_.initial_basis();
1104  }
1105 
1106  return InitializeFirstBasis(basis);
1107 }
1108 
1109 Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) {
1110  basis_ = basis;
1111 
1112  // For each row which does not have a basic column, assign it to the
1113  // corresponding slack column.
1114  basis_.resize(num_rows_, kInvalidCol);
1115  for (RowIndex row(0); row < num_rows_; ++row) {
1116  if (basis_[row] == kInvalidCol) {
1117  basis_[row] = SlackColIndex(row);
1118  }
1119  }
1120 
1121  GLOP_RETURN_IF_ERROR(basis_factorization_.Initialize());
1122  PermuteBasis();
1123 
1124  // Test that the upper bound on the condition number of basis is not too high.
1125  // The number was not computed by any rigorous analysis, we just prefer to
1126  // revert to the all slack basis if the condition number of our heuristic
1127  // first basis seems bad. See for instance on cond11.mps, where we get an
1128  // infinity upper bound.
1129  const Fractional condition_number_ub =
1130  basis_factorization_.ComputeInfinityNormConditionNumberUpperBound();
1131  if (condition_number_ub > parameters_.initial_condition_number_threshold()) {
1132  const std::string error_message =
1133  absl::StrCat("The matrix condition number upper bound is too high: ",
1134  condition_number_ub);
1135  VLOG(1) << error_message;
1136  return Status(Status::ERROR_LU, error_message);
1137  }
1138 
1139  // Everything is okay, finish the initialization.
1140  for (RowIndex row(0); row < num_rows_; ++row) {
1141  variables_info_.Update(basis_[row], VariableStatus::BASIC);
1142  }
1143  DCHECK(BasisIsConsistent());
1144 
1145  // TODO(user): Maybe return an error status if this is too high. Note however
1146  // that if we want to do that, we need to reset variables_info_ to a
1147  // consistent state.
1148  variable_values_.RecomputeBasicVariableValues();
1149  if (VLOG_IS_ON(1)) {
1150  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1151  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) {
1152  VLOG(1) << absl::StrCat(
1153  "The primal residual of the initial basis is above the tolerance, ",
1154  variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance);
1155  }
1156  }
1157  return Status::OK();
1158 }
1159 
1160 Status RevisedSimplex::Initialize(const LinearProgram& lp) {
1161  parameters_ = initial_parameters_;
1162  PropagateParameters();
1163 
1164  // Calling InitializeMatrixAndTestIfUnchanged() first is important because
1165  // this is where num_rows_ and num_cols_ are computed.
1166  //
1167  // Note that these functions can't depend on use_dual_simplex() since we may
1168  // change it below.
1169  ColIndex num_new_cols(0);
1170  bool only_change_is_new_rows = false;
1171  bool only_change_is_new_cols = false;
1172  bool matrix_is_unchanged = true;
1173  bool only_new_bounds = false;
1174  if (solution_state_.IsEmpty() || !notify_that_matrix_is_unchanged_) {
1175  matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged(
1176  lp, &only_change_is_new_rows, &only_change_is_new_cols, &num_new_cols);
1177  only_new_bounds = only_change_is_new_cols && num_new_cols > 0 &&
1178  OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1179  lp, num_new_cols);
1180  } else if (DEBUG_MODE) {
1181  CHECK(InitializeMatrixAndTestIfUnchanged(
1182  lp, &only_change_is_new_rows, &only_change_is_new_cols, &num_new_cols));
1183  }
1184  notify_that_matrix_is_unchanged_ = false;
1185  const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp);
1186  const bool bounds_are_unchanged = InitializeBoundsAndTestIfUnchanged(lp);
1187 
1188  // If parameters_.allow_simplex_algorithm_change() is true and we already have
1189  // a primal (resp. dual) feasible solution, then we use the primal (resp.
1190  // dual) algorithm since there is a good chance that it will be faster.
1191  if (matrix_is_unchanged && parameters_.allow_simplex_algorithm_change()) {
1192  if (objective_is_unchanged && !bounds_are_unchanged) {
1193  parameters_.set_use_dual_simplex(true);
1194  PropagateParameters();
1195  }
1196  if (bounds_are_unchanged && !objective_is_unchanged) {
1197  parameters_.set_use_dual_simplex(false);
1198  PropagateParameters();
1199  }
1200  }
1201 
1202  InitializeObjectiveLimit(lp);
1203 
1204  // Computes the variable name as soon as possible for logging.
1205  // TODO(user): do we really need to store them? we could just compute them
1206  // on the fly since we do not need the speed.
1207  if (VLOG_IS_ON(1)) {
1208  SetVariableNames();
1209  }
1210 
1211  // Warm-start? This is supported only if the solution_state_ is non empty,
1212  // i.e., this revised simplex i) was already used to solve a problem, or
1213  // ii) the solution state was provided externally. Note that the
1214  // solution_state_ may have nothing to do with the current problem, e.g.,
1215  // objective, matrix, and/or bounds had changed. So we support several
1216  // scenarios of warm-start depending on how did the problem change and which
1217  // simplex algorithm is used (primal or dual).
1218  bool solve_from_scratch = true;
1219 
1220  // Try to perform a "quick" warm-start with no matrix factorization involved.
1221  if (!solution_state_.IsEmpty() && !solution_state_has_been_set_externally_) {
1222  if (!parameters_.use_dual_simplex()) {
1223  // With primal simplex, always clear dual norms and dual pricing.
1224  // Incrementality is supported only if only change to the matrix and
1225  // bounds is adding new columns (objective may change), and that all
1226  // new columns have a bound equal to zero.
1227  dual_edge_norms_.Clear();
1228  dual_pricing_vector_.clear();
1229  if (matrix_is_unchanged && bounds_are_unchanged) {
1230  // TODO(user): Do not do that if objective_is_unchanged. Currently
1231  // this seems to break something. Investigate.
1232  reduced_costs_.ClearAndRemoveCostShifts();
1233  solve_from_scratch = false;
1234  } else if (only_change_is_new_cols && only_new_bounds) {
1235  InitializeVariableStatusesForWarmStart(solution_state_, num_new_cols);
1236  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1237  for (ColIndex& col_ref : basis_) {
1238  if (col_ref >= first_new_col) {
1239  col_ref += num_new_cols;
1240  }
1241  }
1242 
1243  // Make sure the primal edge norm are recomputed from scratch.
1244  // TODO(user): only the norms of the new columns actually need to be
1245  // computed.
1246  primal_edge_norms_.Clear();
1247  reduced_costs_.ClearAndRemoveCostShifts();
1248  solve_from_scratch = false;
1249  }
1250  } else {
1251  // With dual simplex, always clear primal norms. Incrementality is
1252  // supported only if the objective remains the same (the matrix may
1253  // contain new rows and the bounds may change).
1254  primal_edge_norms_.Clear();
1255  if (objective_is_unchanged) {
1256  if (matrix_is_unchanged) {
1257  if (!bounds_are_unchanged) {
1258  InitializeVariableStatusesForWarmStart(solution_state_,
1259  ColIndex(0));
1260  variable_values_.RecomputeBasicVariableValues();
1261  }
1262  solve_from_scratch = false;
1263  } else if (only_change_is_new_rows) {
1264  // For the dual-simplex, we also perform a warm start if a couple of
1265  // new rows where added.
1266  InitializeVariableStatusesForWarmStart(solution_state_, ColIndex(0));
1267  dual_edge_norms_.ResizeOnNewRows(num_rows_);
1268 
1269  // TODO(user): The reduced costs do not really need to be recomputed.
1270  // We just need to initialize the ones of the new slack variables to
1271  // 0.
1272  reduced_costs_.ClearAndRemoveCostShifts();
1273  dual_pricing_vector_.clear();
1274 
1275  // Note that this needs to be done after the Clear() calls above.
1276  if (InitializeFirstBasis(basis_).ok()) {
1277  solve_from_scratch = false;
1278  }
1279  }
1280  }
1281  }
1282  }
1283 
1284  // If we couldn't perform a "quick" warm start above, we can at least try to
1285  // reuse the variable statuses.
1286  if (solve_from_scratch && !solution_state_.IsEmpty()) {
1287  // If an external basis has been provided or if the matrix changed, we need
1288  // to perform more work, e.g., factorize the proposed basis and validate it.
1289  InitializeVariableStatusesForWarmStart(solution_state_, ColIndex(0));
1290  basis_.assign(num_rows_, kInvalidCol);
1291  RowIndex row(0);
1292  for (ColIndex col : variables_info_.GetIsBasicBitRow()) {
1293  basis_[row] = col;
1294  ++row;
1295  }
1296 
1297  basis_factorization_.Clear();
1298  reduced_costs_.ClearAndRemoveCostShifts();
1299  primal_edge_norms_.Clear();
1300  dual_edge_norms_.Clear();
1301  dual_pricing_vector_.clear();
1302 
1303  // TODO(user): If the basis is incomplete, we could complete it with
1304  // better slack variables than is done by InitializeFirstBasis() by
1305  // using a partial LU decomposition (see markowitz.h).
1306  if (InitializeFirstBasis(basis_).ok()) {
1307  solve_from_scratch = false;
1308  } else {
1309  VLOG(1) << "RevisedSimplex is not using the warm start "
1310  "basis because it is not factorizable.";
1311  }
1312  }
1313 
1314  if (solve_from_scratch) {
1315  VLOG(1) << "Solve from scratch.";
1316  basis_factorization_.Clear();
1317  reduced_costs_.ClearAndRemoveCostShifts();
1318  primal_edge_norms_.Clear();
1319  dual_edge_norms_.Clear();
1320  dual_pricing_vector_.clear();
1321  GLOP_RETURN_IF_ERROR(CreateInitialBasis());
1322  } else {
1323  VLOG(1) << "Incremental solve.";
1324  }
1325  DCHECK(BasisIsConsistent());
1326  return Status::OK();
1327 }
1328 
1329 void RevisedSimplex::DisplayBasicVariableStatistics() {
1330  SCOPED_TIME_STAT(&function_stats_);
1331 
1332  int num_fixed_variables = 0;
1333  int num_free_variables = 0;
1334  int num_variables_at_bound = 0;
1335  int num_slack_variables = 0;
1336  int num_infeasible_variables = 0;
1337 
1338  const DenseRow& variable_values = variable_values_.GetDenseRow();
1339  const VariableTypeRow& variable_types = variables_info_.GetTypeRow();
1340  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1341  for (RowIndex row(0); row < num_rows_; ++row) {
1342  const ColIndex col = basis_[row];
1343  const Fractional value = variable_values[col];
1344  if (variable_types[col] == VariableType::UNCONSTRAINED) {
1345  ++num_free_variables;
1346  }
1347  if (value > upper_bound_[col] + tolerance ||
1348  value < lower_bound_[col] - tolerance) {
1349  ++num_infeasible_variables;
1350  }
1351  if (col >= first_slack_col_) {
1352  ++num_slack_variables;
1353  }
1354  if (lower_bound_[col] == upper_bound_[col]) {
1355  ++num_fixed_variables;
1356  } else if (variable_values[col] == lower_bound_[col] ||
1357  variable_values[col] == upper_bound_[col]) {
1358  ++num_variables_at_bound;
1359  }
1360  }
1361 
1362  VLOG(1) << "Basis size: " << num_rows_;
1363  VLOG(1) << "Number of basic infeasible variables: "
1364  << num_infeasible_variables;
1365  VLOG(1) << "Number of basic slack variables: " << num_slack_variables;
1366  VLOG(1) << "Number of basic variables at bound: " << num_variables_at_bound;
1367  VLOG(1) << "Number of basic fixed variables: " << num_fixed_variables;
1368  VLOG(1) << "Number of basic free variables: " << num_free_variables;
1369 }
1370 
1371 void RevisedSimplex::SaveState() {
1372  DCHECK_EQ(num_cols_, variables_info_.GetStatusRow().size());
1373  solution_state_.statuses = variables_info_.GetStatusRow();
1374  solution_state_has_been_set_externally_ = false;
1375 }
1376 
1377 RowIndex RevisedSimplex::ComputeNumberOfEmptyRows() {
1378  DenseBooleanColumn contains_data(num_rows_, false);
1379  for (ColIndex col(0); col < num_cols_; ++col) {
1380  for (const SparseColumn::Entry e : compact_matrix_.column(col)) {
1381  contains_data[e.row()] = true;
1382  }
1383  }
1384  RowIndex num_empty_rows(0);
1385  for (RowIndex row(0); row < num_rows_; ++row) {
1386  if (!contains_data[row]) {
1387  ++num_empty_rows;
1388  VLOG(1) << "Row " << row << " is empty.";
1389  }
1390  }
1391  return num_empty_rows;
1392 }
1393 
1394 ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
1395  ColIndex num_empty_cols(0);
1396  for (ColIndex col(0); col < num_cols_; ++col) {
1397  if (compact_matrix_.column(col).IsEmpty()) {
1398  ++num_empty_cols;
1399  VLOG(1) << "Column " << col << " is empty.";
1400  }
1401  }
1402  return num_empty_cols;
1403 }
1404 
1405 void RevisedSimplex::CorrectErrorsOnVariableValues() {
1406  SCOPED_TIME_STAT(&function_stats_);
1407  DCHECK(basis_factorization_.IsRefactorized());
1408 
1409  // TODO(user): The primal residual error does not change if we take degenerate
1410  // steps or if we do not change the variable values. No need to recompute it
1411  // in this case.
1412  const Fractional primal_residual =
1413  variable_values_.ComputeMaximumPrimalResidual();
1414 
1415  // If the primal_residual is within the tolerance, no need to recompute
1416  // the basic variable values with a better precision.
1417  if (primal_residual >= parameters_.harris_tolerance_ratio() *
1418  parameters_.primal_feasibility_tolerance()) {
1419  variable_values_.RecomputeBasicVariableValues();
1420  VLOG(1) << "Primal infeasibility (bounds error) = "
1421  << variable_values_.ComputeMaximumPrimalInfeasibility()
1422  << ", Primal residual |A.x - b| = "
1423  << variable_values_.ComputeMaximumPrimalResidual();
1424  }
1425 
1426  // If we are doing too many degenerate iterations, we try to perturb the
1427  // problem by extending each basic variable bound with a random value. See how
1428  // bound_perturbation_ is used in ComputeHarrisRatioAndLeavingCandidates().
1429  //
1430  // Note that the perturbation is currently only reset to zero at the end of
1431  // the algorithm.
1432  //
1433  // TODO(user): This is currently disabled because the improvement is unclear.
1434  if (/* DISABLES CODE */ false &&
1435  (!feasibility_phase_ && num_consecutive_degenerate_iterations_ >= 100)) {
1436  VLOG(1) << "Perturbing the problem.";
1437  const Fractional tolerance = parameters_.harris_tolerance_ratio() *
1438  parameters_.primal_feasibility_tolerance();
1439  std::uniform_real_distribution<double> dist(0, tolerance);
1440  for (ColIndex col(0); col < num_cols_; ++col) {
1441  bound_perturbation_[col] += dist(random_);
1442  }
1443  }
1444 }
1445 
1446 void RevisedSimplex::ComputeVariableValuesError() {
1447  SCOPED_TIME_STAT(&function_stats_);
1448  error_.AssignToZero(num_rows_);
1449  const DenseRow& variable_values = variable_values_.GetDenseRow();
1450  for (ColIndex col(0); col < num_cols_; ++col) {
1451  const Fractional value = variable_values[col];
1452  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_);
1453  }
1454 }
1455 
1456 void RevisedSimplex::ComputeDirection(ColIndex col) {
1457  SCOPED_TIME_STAT(&function_stats_);
1459  basis_factorization_.RightSolveForProblemColumn(col, &direction_);
1460  direction_infinity_norm_ = 0.0;
1461  if (direction_.non_zeros.empty()) {
1462  // We still compute the direction non-zeros because our code relies on it.
1463  for (RowIndex row(0); row < num_rows_; ++row) {
1464  const Fractional value = direction_[row];
1465  if (value != 0.0) {
1466  direction_.non_zeros.push_back(row);
1467  direction_infinity_norm_ =
1468  std::max(direction_infinity_norm_, std::abs(value));
1469  }
1470  }
1471  } else {
1472  for (const auto e : direction_) {
1473  direction_infinity_norm_ =
1474  std::max(direction_infinity_norm_, std::abs(e.coefficient()));
1475  }
1476  }
1477  IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
1478  num_rows_ == 0 ? 0.0
1479  : static_cast<double>(direction_.non_zeros.size()) /
1480  static_cast<double>(num_rows_.value())));
1481 }
1482 
1483 Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
1484  SCOPED_TIME_STAT(&function_stats_);
1485  compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
1486  for (const auto e : direction_) {
1487  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
1488  &error_);
1489  }
1490  return InfinityNorm(error_);
1491 }
1492 
1493 template <bool is_entering_reduced_cost_positive>
1494 Fractional RevisedSimplex::GetRatio(RowIndex row) const {
1495  const ColIndex col = basis_[row];
1496  const Fractional direction = direction_[row];
1497  const Fractional value = variable_values_.Get(col);
1498  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1499  DCHECK_NE(direction, 0.0);
1500  if (is_entering_reduced_cost_positive) {
1501  if (direction > 0.0) {
1502  return (upper_bound_[col] - value) / direction;
1503  } else {
1504  return (lower_bound_[col] - value) / direction;
1505  }
1506  } else {
1507  if (direction > 0.0) {
1508  return (value - lower_bound_[col]) / direction;
1509  } else {
1510  return (value - upper_bound_[col]) / direction;
1511  }
1512  }
1513 }
1514 
1515 template <bool is_entering_reduced_cost_positive>
1516 Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
1517  Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const {
1518  SCOPED_TIME_STAT(&function_stats_);
1519  const Fractional harris_tolerance =
1520  parameters_.harris_tolerance_ratio() *
1521  parameters_.primal_feasibility_tolerance();
1522  const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
1523  parameters_.primal_feasibility_tolerance();
1524 
1525  // Initially, we can skip any variable with a ratio greater than
1526  // bound_flip_ratio since it seems to be always better to choose the
1527  // bound-flip over such leaving variable.
1528  Fractional harris_ratio = bound_flip_ratio;
1529  leaving_candidates->Clear();
1530 
1531  // If the basis is refactorized, then we should have everything with a good
1532  // precision, so we only consider "acceptable" pivots. Otherwise we consider
1533  // all the entries, and if the algorithm return a pivot that is too small, we
1534  // will refactorize and recompute the relevant quantities.
1535  const Fractional threshold = basis_factorization_.IsRefactorized()
1536  ? parameters_.minimum_acceptable_pivot()
1537  : parameters_.ratio_test_zero_threshold();
1538 
1539  for (const auto e : direction_) {
1540  const Fractional magnitude = std::abs(e.coefficient());
1541  if (magnitude <= threshold) continue;
1542  Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(e.row());
1543  // TODO(user): The perturbation is currently disabled, so no need to test
1544  // anything here.
1545  if (false && ratio < 0.0) {
1546  // If the variable is already pass its bound, we use the perturbed version
1547  // of the bound (if bound_perturbation_[basis_[row]] is not zero).
1548  ratio += std::abs(bound_perturbation_[basis_[e.row()]] / e.coefficient());
1549  }
1550  if (ratio <= harris_ratio) {
1551  leaving_candidates->SetCoefficient(e.row(), ratio);
1552 
1553  // The second max() makes sure harris_ratio is lower bounded by a small
1554  // positive value. The more classical approach is to bound it by 0.0 but
1555  // since we will always perform a small positive step, we allow any
1556  // variable to go a bit more out of bound (even if it is past the harris
1557  // tolerance). This increase the number of candidates and allows us to
1558  // choose a more numerically stable pivot.
1559  //
1560  // Note that at least lower bounding it by 0.0 is really important on
1561  // numerically difficult problems because its helps in the choice of a
1562  // stable pivot.
1563  harris_ratio = std::min(harris_ratio,
1564  std::max(minimum_delta / magnitude,
1565  ratio + harris_tolerance / magnitude));
1566  }
1567  }
1568  return harris_ratio;
1569 }
1570 
1571 namespace {
1572 
1573 // Returns true if the candidate ratio is supposed to be more stable than the
1574 // current ratio (or if the two are equal).
1575 // The idea here is to take, by order of preference:
1576 // - the minimum positive ratio in order to intoduce a primal infeasibility
1577 // which is as small as possible.
1578 // - or the least negative one in order to have the smallest bound shift
1579 // possible on the leaving variable.
1580 bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) {
1581  if (current >= 0.0) {
1582  return candidate >= 0.0 && candidate <= current;
1583  } else {
1584  return candidate >= current;
1585  }
1586 }
1587 
1588 } // namespace
1589 
1590 // Ratio-test or Quotient-test. Choose the row of the leaving variable.
1591 // Known as CHUZR or CHUZRO in FORTRAN codes.
1592 Status RevisedSimplex::ChooseLeavingVariableRow(
1593  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1594  RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) {
1595  SCOPED_TIME_STAT(&function_stats_);
1596  GLOP_RETURN_ERROR_IF_NULL(refactorize);
1597  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1598  GLOP_RETURN_ERROR_IF_NULL(step_length);
1599  DCHECK_COL_BOUNDS(entering_col);
1600  DCHECK_NE(0.0, reduced_cost);
1601 
1602  // A few cases will cause the test to be recomputed from the beginning.
1603  int stats_num_leaving_choices = 0;
1604  equivalent_leaving_choices_.clear();
1605  while (true) {
1606  stats_num_leaving_choices = 0;
1607 
1608  // We initialize current_ratio with the maximum step the entering variable
1609  // can take (bound-flip). Note that we do not use tolerance here.
1610  const Fractional entering_value = variable_values_.Get(entering_col);
1611  Fractional current_ratio =
1612  (reduced_cost > 0.0) ? entering_value - lower_bound_[entering_col]
1613  : upper_bound_[entering_col] - entering_value;
1614  DCHECK_GT(current_ratio, 0.0);
1615 
1616  // First pass of the Harris ratio test. If 'harris_tolerance' is zero, this
1617  // actually computes the minimum leaving ratio of all the variables. This is
1618  // the same as the 'classic' ratio test.
1619  const Fractional harris_ratio =
1620  (reduced_cost > 0.0) ? ComputeHarrisRatioAndLeavingCandidates<true>(
1621  current_ratio, &leaving_candidates_)
1622  : ComputeHarrisRatioAndLeavingCandidates<false>(
1623  current_ratio, &leaving_candidates_);
1624 
1625  // If the bound-flip is a viable solution (i.e. it doesn't move the basic
1626  // variable too much out of bounds), we take it as it is always stable and
1627  // fast.
1628  if (current_ratio <= harris_ratio) {
1629  *leaving_row = kInvalidRow;
1630  *step_length = current_ratio;
1631  break;
1632  }
1633 
1634  // Second pass of the Harris ratio test. Amongst the variables with 'ratio
1635  // <= harris_ratio', we choose the leaving row with the largest coefficient.
1636  //
1637  // This has a big impact, because picking a leaving variable with a small
1638  // direction_[row] is the main source of Abnormal LU errors.
1639  Fractional pivot_magnitude = 0.0;
1640  stats_num_leaving_choices = 0;
1641  *leaving_row = kInvalidRow;
1642  equivalent_leaving_choices_.clear();
1643  for (const SparseColumn::Entry e : leaving_candidates_) {
1644  const Fractional ratio = e.coefficient();
1645  if (ratio > harris_ratio) continue;
1646  ++stats_num_leaving_choices;
1647  const RowIndex row = e.row();
1648 
1649  // If the magnitudes are the same, we choose the leaving variable with
1650  // what is probably the more stable ratio, see
1651  // IsRatioMoreOrEquallyStable().
1652  const Fractional candidate_magnitude = std::abs(direction_[row]);
1653  if (candidate_magnitude < pivot_magnitude) continue;
1654  if (candidate_magnitude == pivot_magnitude) {
1655  if (!IsRatioMoreOrEquallyStable(ratio, current_ratio)) continue;
1656  if (ratio == current_ratio) {
1657  DCHECK_NE(kInvalidRow, *leaving_row);
1658  equivalent_leaving_choices_.push_back(row);
1659  continue;
1660  }
1661  }
1662  equivalent_leaving_choices_.clear();
1663  current_ratio = ratio;
1664  pivot_magnitude = candidate_magnitude;
1665  *leaving_row = row;
1666  }
1667 
1668  // Break the ties randomly.
1669  if (!equivalent_leaving_choices_.empty()) {
1670  equivalent_leaving_choices_.push_back(*leaving_row);
1671  *leaving_row =
1672  equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1673  0, equivalent_leaving_choices_.size() - 1)(random_)];
1674  }
1675 
1676  // Since we took care of the bound-flip at the beginning, at this point
1677  // we have a valid leaving row.
1678  DCHECK_NE(kInvalidRow, *leaving_row);
1679 
1680  // A variable already outside one of its bounds +/- tolerance is considered
1681  // at its bound and its ratio is zero. Not doing this may lead to a step
1682  // that moves the objective in the wrong direction. We may want to allow
1683  // such steps, but then we will need to check that it doesn't break the
1684  // bounds of the other variables.
1685  if (current_ratio <= 0.0) {
1686  // Instead of doing a zero step, we do a small positive step. This
1687  // helps on degenerate problems.
1688  const Fractional minimum_delta =
1689  parameters_.degenerate_ministep_factor() *
1690  parameters_.primal_feasibility_tolerance();
1691  *step_length = minimum_delta / pivot_magnitude;
1692  } else {
1693  *step_length = current_ratio;
1694  }
1695 
1696  // Note(user): Testing the pivot at each iteration is useful for debugging
1697  // an LU factorization problem. Remove the false if you need to investigate
1698  // this, it makes sure that this will be compiled away.
1699  if (/* DISABLES CODE */ (false)) {
1700  TestPivot(entering_col, *leaving_row);
1701  }
1702 
1703  // We try various "heuristics" to avoid a small pivot.
1704  //
1705  // The smaller 'direction_[*leaving_row]', the less precise
1706  // it is. So we want to avoid pivoting by such a row. Small pivots lead to
1707  // ill-conditioned bases or even to matrices that are not a basis at all if
1708  // the actual (infinite-precision) coefficient is zero.
1709  //
1710  // TODO(user): We may have to choose another entering column if
1711  // we cannot prevent pivoting by a small pivot.
1712  // (Chvatal, p.115, about epsilon2.)
1713  if (pivot_magnitude <
1714  parameters_.small_pivot_threshold() * direction_infinity_norm_) {
1715  // The first countermeasure is to recompute everything to the best
1716  // precision we can in the hope of avoiding such a choice. Note that this
1717  // helps a lot on the Netlib problems.
1718  if (!basis_factorization_.IsRefactorized()) {
1719  VLOG(1) << "Refactorizing to avoid pivoting by "
1720  << direction_[*leaving_row]
1721  << " direction_infinity_norm_ = " << direction_infinity_norm_
1722  << " reduced cost = " << reduced_cost;
1723  *refactorize = true;
1724  return Status::OK();
1725  }
1726 
1727  // Because of the "threshold" in ComputeHarrisRatioAndLeavingCandidates()
1728  // we kwnow that this pivot will still have an acceptable magnitude.
1729  //
1730  // TODO(user): An issue left to fix is that if there is no such pivot at
1731  // all, then we will report unbounded even if this is not really the case.
1732  // As of 2018/07/18, this happens on l30.mps.
1733  VLOG(1) << "Couldn't avoid pivoting by " << direction_[*leaving_row]
1734  << " direction_infinity_norm_ = " << direction_infinity_norm_
1735  << " reduced cost = " << reduced_cost;
1736  DCHECK_GE(std::abs(direction_[*leaving_row]),
1737  parameters_.minimum_acceptable_pivot());
1738  IF_STATS_ENABLED(ratio_test_stats_.abs_tested_pivot.Add(pivot_magnitude));
1739  }
1740  break;
1741  }
1742 
1743  // Update the target bound.
1744  if (*leaving_row != kInvalidRow) {
1745  const bool is_reduced_cost_positive = (reduced_cost > 0.0);
1746  const bool is_leaving_coeff_positive = (direction_[*leaving_row] > 0.0);
1747  *target_bound = (is_reduced_cost_positive == is_leaving_coeff_positive)
1748  ? upper_bound_[basis_[*leaving_row]]
1749  : lower_bound_[basis_[*leaving_row]];
1750  }
1751 
1752  // Stats.
1754  ratio_test_stats_.leaving_choices.Add(stats_num_leaving_choices);
1755  if (!equivalent_leaving_choices_.empty()) {
1756  ratio_test_stats_.num_perfect_ties.Add(
1757  equivalent_leaving_choices_.size());
1758  }
1759  if (*leaving_row != kInvalidRow) {
1760  ratio_test_stats_.abs_used_pivot.Add(std::abs(direction_[*leaving_row]));
1761  }
1762  });
1763  return Status::OK();
1764 }
1765 
1766 namespace {
1767 
1768 // Store a row with its ratio, coefficient magnitude and target bound. This is
1769 // used by PrimalPhaseIChooseLeavingVariableRow(), see this function for more
1770 // details.
1771 struct BreakPoint {
1772  BreakPoint(RowIndex _row, Fractional _ratio, Fractional _coeff_magnitude,
1773  Fractional _target_bound)
1774  : row(_row),
1775  ratio(_ratio),
1776  coeff_magnitude(_coeff_magnitude),
1777  target_bound(_target_bound) {}
1778 
1779  // We want to process the breakpoints by increasing ratio and decreasing
1780  // coefficient magnitude (if the ratios are the same). Returns false if "this"
1781  // is before "other" in a priority queue.
1782  bool operator<(const BreakPoint& other) const {
1783  if (ratio == other.ratio) {
1784  if (coeff_magnitude == other.coeff_magnitude) {
1785  return row > other.row;
1786  }
1787  return coeff_magnitude < other.coeff_magnitude;
1788  }
1789  return ratio > other.ratio;
1790  }
1791 
1792  RowIndex row;
1796 };
1797 
1798 } // namespace
1799 
1800 void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
1801  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1802  RowIndex* leaving_row, Fractional* step_length,
1803  Fractional* target_bound) const {
1804  SCOPED_TIME_STAT(&function_stats_);
1805  RETURN_IF_NULL(refactorize);
1806  RETURN_IF_NULL(leaving_row);
1807  RETURN_IF_NULL(step_length);
1808  DCHECK_COL_BOUNDS(entering_col);
1809  DCHECK_NE(0.0, reduced_cost);
1810 
1811  // We initialize current_ratio with the maximum step the entering variable
1812  // can take (bound-flip). Note that we do not use tolerance here.
1813  const Fractional entering_value = variable_values_.Get(entering_col);
1814  Fractional current_ratio = (reduced_cost > 0.0)
1815  ? entering_value - lower_bound_[entering_col]
1816  : upper_bound_[entering_col] - entering_value;
1817  DCHECK_GT(current_ratio, 0.0);
1818 
1819  std::vector<BreakPoint> breakpoints;
1820  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1821  for (const auto e : direction_) {
1822  const Fractional direction =
1823  reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
1824  const Fractional magnitude = std::abs(direction);
1825  if (magnitude < tolerance) continue;
1826 
1827  // Computes by how much we can add 'direction' to the basic variable value
1828  // with index 'row' until it changes of primal feasibility status. That is
1829  // from infeasible to feasible or from feasible to infeasible. Note that the
1830  // transition infeasible->feasible->infeasible is possible. We use
1831  // tolerances here, but when the step will be performed, it will move the
1832  // variable to the target bound (possibly taking a small negative step).
1833  //
1834  // Note(user): The negative step will only happen when the leaving variable
1835  // was slightly infeasible (less than tolerance). Moreover, the overall
1836  // infeasibility will not necessarily increase since it doesn't take into
1837  // account all the variables with an infeasibility smaller than the
1838  // tolerance, and here we will at least improve the one of the leaving
1839  // variable.
1840  const ColIndex col = basis_[e.row()];
1841  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1842 
1843  const Fractional value = variable_values_.Get(col);
1844  const Fractional lower_bound = lower_bound_[col];
1845  const Fractional upper_bound = upper_bound_[col];
1846  const Fractional to_lower = (lower_bound - tolerance - value) / direction;
1847  const Fractional to_upper = (upper_bound + tolerance - value) / direction;
1848 
1849  // Enqueue the possible transitions. Note that the second tests exclude the
1850  // case where to_lower or to_upper are infinite.
1851  if (to_lower >= 0.0 && to_lower < current_ratio) {
1852  breakpoints.push_back(
1853  BreakPoint(e.row(), to_lower, magnitude, lower_bound));
1854  }
1855  if (to_upper >= 0.0 && to_upper < current_ratio) {
1856  breakpoints.push_back(
1857  BreakPoint(e.row(), to_upper, magnitude, upper_bound));
1858  }
1859  }
1860 
1861  // Order the breakpoints by increasing ratio and decreasing coefficient
1862  // magnitude (if the ratios are the same).
1863  std::make_heap(breakpoints.begin(), breakpoints.end());
1864 
1865  // Select the last breakpoint that still improves the infeasibility and has
1866  // the largest coefficient magnitude.
1867  Fractional improvement = std::abs(reduced_cost);
1868  Fractional best_magnitude = 0.0;
1869  *leaving_row = kInvalidRow;
1870  while (!breakpoints.empty()) {
1871  const BreakPoint top = breakpoints.front();
1872  // TODO(user): consider using >= here. That will lead to bigger ratio and
1873  // hence a better impact on the infeasibility. The drawback is that more
1874  // effort may be needed to update the reduced costs.
1875  //
1876  // TODO(user): Use a random tie breaking strategy for BreakPoint with
1877  // same ratio and same coefficient magnitude? Koberstein explains in his PhD
1878  // that it helped on the dual-simplex.
1879  if (top.coeff_magnitude > best_magnitude) {
1880  *leaving_row = top.row;
1881  current_ratio = top.ratio;
1882  best_magnitude = top.coeff_magnitude;
1883  *target_bound = top.target_bound;
1884  }
1885 
1886  // As long as the sum of primal infeasibilities is decreasing, we look for
1887  // pivots that are numerically more stable.
1888  improvement -= top.coeff_magnitude;
1889  if (improvement <= 0.0) break;
1890  std::pop_heap(breakpoints.begin(), breakpoints.end());
1891  breakpoints.pop_back();
1892  }
1893 
1894  // Try to avoid a small pivot by refactorizing.
1895  if (*leaving_row != kInvalidRow) {
1896  const Fractional threshold =
1897  parameters_.small_pivot_threshold() * direction_infinity_norm_;
1898  if (best_magnitude < threshold && !basis_factorization_.IsRefactorized()) {
1899  *refactorize = true;
1900  return;
1901  }
1902  }
1903  *step_length = current_ratio;
1904 }
1905 
1906 // This implements the pricing step for the dual simplex.
1907 Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row,
1908  Fractional* cost_variation,
1910  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1911  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
1912 
1913  // TODO(user): Reuse parameters_.optimization_rule() to decide if we use
1914  // steepest edge or the normal Dantzig pricing.
1915  const DenseColumn& squared_norm = dual_edge_norms_.GetEdgeSquaredNorms();
1916  SCOPED_TIME_STAT(&function_stats_);
1917 
1918  *leaving_row = kInvalidRow;
1919  Fractional best_price(0.0);
1920  const DenseColumn& squared_infeasibilities =
1921  variable_values_.GetPrimalSquaredInfeasibilities();
1922  equivalent_leaving_choices_.clear();
1923  for (const RowIndex row : variable_values_.GetPrimalInfeasiblePositions()) {
1924  const Fractional scaled_best_price = best_price * squared_norm[row];
1925  if (squared_infeasibilities[row] >= scaled_best_price) {
1926  if (squared_infeasibilities[row] == scaled_best_price) {
1927  DCHECK_NE(*leaving_row, kInvalidRow);
1928  equivalent_leaving_choices_.push_back(row);
1929  continue;
1930  }
1931  equivalent_leaving_choices_.clear();
1932  best_price = squared_infeasibilities[row] / squared_norm[row];
1933  *leaving_row = row;
1934  }
1935  }
1936 
1937  // Break the ties randomly.
1938  if (!equivalent_leaving_choices_.empty()) {
1939  equivalent_leaving_choices_.push_back(*leaving_row);
1940  *leaving_row =
1941  equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1942  0, equivalent_leaving_choices_.size() - 1)(random_)];
1943  }
1944 
1945  // Return right away if there is no leaving variable.
1946  // Fill cost_variation and target_bound otherwise.
1947  if (*leaving_row == kInvalidRow) return Status::OK();
1948  const ColIndex leaving_col = basis_[*leaving_row];
1949  const Fractional value = variable_values_.Get(leaving_col);
1950  if (value < lower_bound_[leaving_col]) {
1951  *cost_variation = lower_bound_[leaving_col] - value;
1952  *target_bound = lower_bound_[leaving_col];
1953  DCHECK_GT(*cost_variation, 0.0);
1954  } else {
1955  *cost_variation = upper_bound_[leaving_col] - value;
1956  *target_bound = upper_bound_[leaving_col];
1957  DCHECK_LT(*cost_variation, 0.0);
1958  }
1959  return Status::OK();
1960 }
1961 
1962 namespace {
1963 
1964 // Returns true if a basic variable with given cost and type is to be considered
1965 // as a leaving candidate for the dual phase I. This utility function is used
1966 // to keep is_dual_entering_candidate_ up to date.
1967 bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type,
1968  Fractional threshold) {
1969  if (cost == 0.0) return false;
1970  return type == VariableType::UPPER_AND_LOWER_BOUNDED ||
1971  type == VariableType::FIXED_VARIABLE ||
1972  (type == VariableType::UPPER_BOUNDED && cost < -threshold) ||
1973  (type == VariableType::LOWER_BOUNDED && cost > threshold);
1974 }
1975 
1976 } // namespace
1977 
1978 void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
1979  ColIndex entering_col) {
1980  SCOPED_TIME_STAT(&function_stats_);
1981  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
1982  const Fractional threshold = parameters_.ratio_test_zero_threshold();
1983 
1984  // Convert the dual_pricing_vector_ from the old basis into the new one (which
1985  // is the same as multiplying it by an Eta matrix corresponding to the
1986  // direction).
1987  const Fractional step =
1988  dual_pricing_vector_[leaving_row] / direction_[leaving_row];
1989  for (const auto e : direction_) {
1990  dual_pricing_vector_[e.row()] -= e.coefficient() * step;
1991  is_dual_entering_candidate_.Set(
1992  e.row(), IsDualPhaseILeavingCandidate(dual_pricing_vector_[e.row()],
1993  variable_type[basis_[e.row()]],
1994  threshold));
1995  }
1996  dual_pricing_vector_[leaving_row] = step;
1997 
1998  // The entering_col which was dual-infeasible is now dual-feasible, so we
1999  // have to remove it from the infeasibility sum.
2000  dual_pricing_vector_[leaving_row] -=
2001  dual_infeasibility_improvement_direction_[entering_col];
2002  if (dual_infeasibility_improvement_direction_[entering_col] != 0.0) {
2003  --num_dual_infeasible_positions_;
2004  }
2005  dual_infeasibility_improvement_direction_[entering_col] = 0.0;
2006 
2007  // The leaving variable will also be dual-feasible.
2008  dual_infeasibility_improvement_direction_[basis_[leaving_row]] = 0.0;
2009 
2010  // Update the leaving row entering candidate status.
2011  is_dual_entering_candidate_.Set(
2012  leaving_row,
2013  IsDualPhaseILeavingCandidate(dual_pricing_vector_[leaving_row],
2014  variable_type[entering_col], threshold));
2015 }
2016 
2017 template <typename Cols>
2018 void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
2019  const Cols& cols) {
2020  SCOPED_TIME_STAT(&function_stats_);
2021  bool something_to_do = false;
2022  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
2023  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
2024  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2025  const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance();
2026  for (ColIndex col : cols) {
2027  const Fractional reduced_cost = reduced_costs[col];
2028  const Fractional sign =
2029  (can_increase.IsSet(col) && reduced_cost < -tolerance) ? 1.0
2030  : (can_decrease.IsSet(col) && reduced_cost > tolerance) ? -1.0
2031  : 0.0;
2032  if (sign != dual_infeasibility_improvement_direction_[col]) {
2033  if (sign == 0.0) {
2034  --num_dual_infeasible_positions_;
2035  } else if (dual_infeasibility_improvement_direction_[col] == 0.0) {
2036  ++num_dual_infeasible_positions_;
2037  }
2038  if (!something_to_do) {
2039  initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
2040  initially_all_zero_scratchpad_.ClearSparseMask();
2041  initially_all_zero_scratchpad_.non_zeros.clear();
2042  something_to_do = true;
2043  }
2045  col, sign - dual_infeasibility_improvement_direction_[col],
2046  &initially_all_zero_scratchpad_);
2047  dual_infeasibility_improvement_direction_[col] = sign;
2048  }
2049  }
2050  if (something_to_do) {
2051  initially_all_zero_scratchpad_.ClearNonZerosIfTooDense();
2052  initially_all_zero_scratchpad_.ClearSparseMask();
2053 
2054  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2055  const Fractional threshold = parameters_.ratio_test_zero_threshold();
2056  basis_factorization_.RightSolve(&initially_all_zero_scratchpad_);
2057  if (initially_all_zero_scratchpad_.non_zeros.empty()) {
2058  for (RowIndex row(0); row < num_rows_; ++row) {
2059  if (initially_all_zero_scratchpad_[row] == 0.0) continue;
2060  dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
2061  is_dual_entering_candidate_.Set(
2062  row, IsDualPhaseILeavingCandidate(dual_pricing_vector_[row],
2063  variable_type[basis_[row]],
2064  threshold));
2065  }
2066  initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
2067  } else {
2068  for (const auto e : initially_all_zero_scratchpad_) {
2069  dual_pricing_vector_[e.row()] += e.coefficient();
2070  initially_all_zero_scratchpad_[e.row()] = 0.0;
2071  is_dual_entering_candidate_.Set(
2072  e.row(), IsDualPhaseILeavingCandidate(
2073  dual_pricing_vector_[e.row()],
2074  variable_type[basis_[e.row()]], threshold));
2075  }
2076  }
2077  initially_all_zero_scratchpad_.non_zeros.clear();
2078  }
2079 }
2080 
2081 Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow(
2082  RowIndex* leaving_row, Fractional* cost_variation,
2084  SCOPED_TIME_STAT(&function_stats_);
2085  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2086  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2087 
2088  // dual_infeasibility_improvement_direction_ is zero for dual-feasible
2089  // positions and contains the sign in which the reduced cost of this column
2090  // needs to move to improve the feasibility otherwise (+1 or -1).
2091  //
2092  // Its current value was the one used to compute dual_pricing_vector_ and
2093  // was updated accordingly by DualPhaseIUpdatePrice().
2094  //
2095  // If more variables changed of dual-feasibility status during the last
2096  // iteration, we need to call DualPhaseIUpdatePriceOnReducedCostChange() to
2097  // take them into account.
2098  if (reduced_costs_.AreReducedCostsRecomputed() ||
2099  dual_pricing_vector_.empty()) {
2100  // Recompute everything from scratch.
2101  num_dual_infeasible_positions_ = 0;
2102  dual_pricing_vector_.AssignToZero(num_rows_);
2103  is_dual_entering_candidate_.ClearAndResize(num_rows_);
2104  dual_infeasibility_improvement_direction_.AssignToZero(num_cols_);
2105  DualPhaseIUpdatePriceOnReducedCostChange(
2106  variables_info_.GetIsRelevantBitRow());
2107  } else {
2108  // Update row is still equal to the row used during the last iteration
2109  // to update the reduced costs.
2110  DualPhaseIUpdatePriceOnReducedCostChange(update_row_.GetNonZeroPositions());
2111  }
2112 
2113  // If there is no dual-infeasible position, we are done.
2114  *leaving_row = kInvalidRow;
2115  if (num_dual_infeasible_positions_ == 0) return Status::OK();
2116 
2117  // TODO(user): Reuse parameters_.optimization_rule() to decide if we use
2118  // steepest edge or the normal Dantzig pricing.
2119  const DenseColumn& squared_norm = dual_edge_norms_.GetEdgeSquaredNorms();
2120 
2121  // Now take a leaving variable that maximizes the infeasibility variation and
2122  // can leave the basis while being dual-feasible.
2123  Fractional best_price(0.0);
2124  equivalent_leaving_choices_.clear();
2125  for (const RowIndex row : is_dual_entering_candidate_) {
2126  const Fractional squared_cost = Square(dual_pricing_vector_[row]);
2127  const Fractional scaled_best_price = best_price * squared_norm[row];
2128  if (squared_cost >= scaled_best_price) {
2129  if (squared_cost == scaled_best_price) {
2130  DCHECK_NE(*leaving_row, kInvalidRow);
2131  equivalent_leaving_choices_.push_back(row);
2132  continue;
2133  }
2134  equivalent_leaving_choices_.clear();
2135  best_price = squared_cost / squared_norm[row];
2136  *leaving_row = row;
2137  }
2138  }
2139 
2140  // Break the ties randomly.
2141  if (!equivalent_leaving_choices_.empty()) {
2142  equivalent_leaving_choices_.push_back(*leaving_row);
2143  *leaving_row =
2144  equivalent_leaving_choices_[std::uniform_int_distribution<int>(
2145  0, equivalent_leaving_choices_.size() - 1)(random_)];
2146  }
2147 
2148  // Returns right away if there is no leaving variable or fill the other
2149  // return values otherwise.
2150  if (*leaving_row == kInvalidRow) return Status::OK();
2151  *cost_variation = dual_pricing_vector_[*leaving_row];
2152  const ColIndex leaving_col = basis_[*leaving_row];
2153  if (*cost_variation < 0.0) {
2154  *target_bound = upper_bound_[leaving_col];
2155  } else {
2156  *target_bound = lower_bound_[leaving_col];
2157  }
2158  DCHECK(IsFinite(*target_bound));
2159  return Status::OK();
2160 }
2161 
2162 template <typename BoxedVariableCols>
2163 void RevisedSimplex::MakeBoxedVariableDualFeasible(
2164  const BoxedVariableCols& cols, bool update_basic_values) {
2165  SCOPED_TIME_STAT(&function_stats_);
2166  std::vector<ColIndex> changed_cols;
2167 
2168  // It is important to flip bounds within a tolerance because of precision
2169  // errors. Otherwise, this leads to cycling on many of the Netlib problems
2170  // since this is called at each iteration (because of the bound-flipping ratio
2171  // test).
2172  const DenseRow& variable_values = variable_values_.GetDenseRow();
2173  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2174  const Fractional dual_feasibility_tolerance =
2175  reduced_costs_.GetDualFeasibilityTolerance();
2176  const VariableStatusRow& variable_status = variables_info_.GetStatusRow();
2177  for (const ColIndex col : cols) {
2178  const Fractional reduced_cost = reduced_costs[col];
2179  const VariableStatus status = variable_status[col];
2180  DCHECK(variables_info_.GetTypeRow()[col] ==
2181  VariableType::UPPER_AND_LOWER_BOUNDED);
2182  // TODO(user): refactor this as DCHECK(IsVariableBasicOrExactlyAtBound())?
2183  DCHECK(variable_values[col] == lower_bound_[col] ||
2184  variable_values[col] == upper_bound_[col] ||
2185  status == VariableStatus::BASIC);
2186  if (reduced_cost > dual_feasibility_tolerance &&
2187  status == VariableStatus::AT_UPPER_BOUND) {
2188  variables_info_.Update(col, VariableStatus::AT_LOWER_BOUND);
2189  changed_cols.push_back(col);
2190  } else if (reduced_cost < -dual_feasibility_tolerance &&
2191  status == VariableStatus::AT_LOWER_BOUND) {
2192  variables_info_.Update(col, VariableStatus::AT_UPPER_BOUND);
2193  changed_cols.push_back(col);
2194  }
2195  }
2196 
2197  if (!changed_cols.empty()) {
2198  variable_values_.UpdateGivenNonBasicVariables(changed_cols,
2199  update_basic_values);
2200  }
2201 }
2202 
2203 Fractional RevisedSimplex::ComputeStepToMoveBasicVariableToBound(
2204  RowIndex leaving_row, Fractional target_bound) {
2205  SCOPED_TIME_STAT(&function_stats_);
2206 
2207  // We just want the leaving variable to go to its target_bound.
2208  const ColIndex leaving_col = basis_[leaving_row];
2209  const Fractional leaving_variable_value = variable_values_.Get(leaving_col);
2210  Fractional unscaled_step = leaving_variable_value - target_bound;
2211 
2212  // In Chvatal p 157 update_[entering_col] is used instead of
2213  // direction_[leaving_row], but the two quantities are actually the
2214  // same. This is because update_[col] is the value at leaving_row of
2215  // the right inverse of col and direction_ is the right inverse of the
2216  // entering_col. Note that direction_[leaving_row] is probably more
2217  // precise.
2218  // TODO(user): use this to check precision and trigger recomputation.
2219  return unscaled_step / direction_[leaving_row];
2220 }
2221 
2222 bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
2223  VLOG(1) << "Test pivot.";
2224  SCOPED_TIME_STAT(&function_stats_);
2225  const ColIndex leaving_col = basis_[leaving_row];
2226  basis_[leaving_row] = entering_col;
2227 
2228  // TODO(user): If 'is_ok' is true, we could use the computed lu in
2229  // basis_factorization_ rather than recompute it during UpdateAndPivot().
2230  CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
2231  const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
2232  basis_[leaving_row] = leaving_col;
2233  return is_ok;
2234 }
2235 
2236 // Note that this function is an optimization and that if it was doing nothing
2237 // the algorithm will still be correct and work. Using it does change the pivot
2238 // taken during the simplex method though.
2239 void RevisedSimplex::PermuteBasis() {
2240  SCOPED_TIME_STAT(&function_stats_);
2241 
2242  // Fetch the current basis column permutation and return if it is empty which
2243  // means the permutation is the identity.
2244  const ColumnPermutation& col_perm =
2245  basis_factorization_.GetColumnPermutation();
2246  if (col_perm.empty()) return;
2247 
2248  // Permute basis_.
2249  ApplyColumnPermutationToRowIndexedVector(col_perm, &basis_);
2250 
2251  // Permute dual_pricing_vector_ if needed.
2252  if (!dual_pricing_vector_.empty()) {
2253  // TODO(user): We need to permute is_dual_entering_candidate_ too. Right
2254  // now, we recompute both the dual_pricing_vector_ and
2255  // is_dual_entering_candidate_ on each refactorization, so this don't
2256  // matter.
2257  ApplyColumnPermutationToRowIndexedVector(col_perm, &dual_pricing_vector_);
2258  }
2259 
2260  // Notify the other classes.
2261  reduced_costs_.UpdateDataOnBasisPermutation();
2262  dual_edge_norms_.UpdateDataOnBasisPermutation(col_perm);
2263 
2264  // Finally, remove the column permutation from all subsequent solves since
2265  // it has been taken into account in basis_.
2266  basis_factorization_.SetColumnPermutationToIdentity();
2267 }
2268 
2269 Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
2270  RowIndex leaving_row,
2272  SCOPED_TIME_STAT(&function_stats_);
2273  const ColIndex leaving_col = basis_[leaving_row];
2274  const VariableStatus leaving_variable_status =
2275  lower_bound_[leaving_col] == upper_bound_[leaving_col]
2277  : target_bound == lower_bound_[leaving_col]
2280  if (variable_values_.Get(leaving_col) != target_bound) {
2281  ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) -
2282  target_bound);
2283  }
2284  UpdateBasis(entering_col, leaving_row, leaving_variable_status);
2285 
2286  const Fractional pivot_from_direction = direction_[leaving_row];
2287  const Fractional pivot_from_update_row =
2288  update_row_.GetCoefficient(entering_col);
2289  const Fractional diff =
2290  std::abs(pivot_from_update_row - pivot_from_direction);
2291  if (diff > parameters_.refactorization_threshold() *
2292  (1 + std::abs(pivot_from_direction))) {
2293  VLOG(1) << "Refactorizing: imprecise pivot " << pivot_from_direction
2294  << " diff = " << diff;
2295  GLOP_RETURN_IF_ERROR(basis_factorization_.ForceRefactorization());
2296  } else {
2298  basis_factorization_.Update(entering_col, leaving_row, direction_));
2299  }
2300  if (basis_factorization_.IsRefactorized()) {
2301  PermuteBasis();
2302  }
2303  return Status::OK();
2304 }
2305 
2306 bool RevisedSimplex::NeedsBasisRefactorization(bool refactorize) {
2307  if (basis_factorization_.IsRefactorized()) return false;
2308  if (reduced_costs_.NeedsBasisRefactorization()) return true;
2309  const GlopParameters::PricingRule pricing_rule =
2310  feasibility_phase_ ? parameters_.feasibility_rule()
2311  : parameters_.optimization_rule();
2312  if (parameters_.use_dual_simplex()) {
2313  // TODO(user): Currently the dual is always using STEEPEST_EDGE.
2314  DCHECK_EQ(pricing_rule, GlopParameters::STEEPEST_EDGE);
2315  if (dual_edge_norms_.NeedsBasisRefactorization()) return true;
2316  } else {
2317  if (pricing_rule == GlopParameters::STEEPEST_EDGE &&
2318  primal_edge_norms_.NeedsBasisRefactorization()) {
2319  return true;
2320  }
2321  }
2322  return refactorize;
2323 }
2324 
2325 Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
2326  SCOPED_TIME_STAT(&function_stats_);
2327  if (NeedsBasisRefactorization(*refactorize)) {
2328  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
2329  update_row_.Invalidate();
2330  PermuteBasis();
2331  }
2332  *refactorize = false;
2333  return Status::OK();
2334 }
2335 
2336 // Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
2337 // x an n-vector.
2338 //
2339 // x is split in two parts x_B and x_N (B standing for basis).
2340 // In the same way, A is split in A_B (also known as B) and A_N, and
2341 // c is split into c_B and c_N.
2342 //
2343 // The goal is to minimize c_B.x_B + c_N.x_N
2344 // subject to B.x_B + A_N.x_N = 0
2345 // and x_lower <= x <= x_upper.
2346 //
2347 // To minimize c.x, at each iteration a variable from x_N is selected to
2348 // enter the basis, and a variable from x_B is selected to leave the basis.
2349 // To avoid explicit inversion of B, the algorithm solves two sub-systems:
2350 // y.B = c_B and B.d = a (a being the entering column).
2351 Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
2353  Cleanup update_deterministic_time_on_return(
2354  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2355  num_consecutive_degenerate_iterations_ = 0;
2356  DisplayIterationInfo();
2357  bool refactorize = false;
2358 
2359  if (feasibility_phase_) {
2360  // Initialize the primal phase-I objective.
2361  // Note that this temporarily erases the problem objective.
2362  objective_.AssignToZero(num_cols_);
2363  variable_values_.UpdatePrimalPhaseICosts(
2364  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_), &objective_);
2365  reduced_costs_.ResetForNewObjective();
2366  }
2367 
2368  while (true) {
2369  // TODO(user): we may loop a bit more than the actual number of iteration.
2370  // fix.
2372  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2373  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2374  if (basis_factorization_.IsRefactorized()) {
2375  CorrectErrorsOnVariableValues();
2376  DisplayIterationInfo();
2377 
2378  if (feasibility_phase_) {
2379  // Since the variable values may have been recomputed, we need to
2380  // recompute the primal infeasible variables and update their costs.
2381  if (variable_values_.UpdatePrimalPhaseICosts(
2382  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_),
2383  &objective_)) {
2384  reduced_costs_.ResetForNewObjective();
2385  }
2386  }
2387 
2388  // Computing the objective at each iteration takes time, so we just
2389  // check the limit when the basis is refactorized.
2390  if (!feasibility_phase_ &&
2391  ComputeObjectiveValue() < primal_objective_limit_) {
2392  VLOG(1) << "Stopping the primal simplex because"
2393  << " the objective limit " << primal_objective_limit_
2394  << " has been reached.";
2395  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2396  objective_limit_reached_ = true;
2397  return Status::OK();
2398  }
2399  } else if (feasibility_phase_) {
2400  // Note that direction_.non_zeros contains the positions of the basic
2401  // variables whose values were updated during the last iteration.
2402  if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
2403  &objective_)) {
2404  reduced_costs_.ResetForNewObjective();
2405  }
2406  }
2407 
2408  Fractional reduced_cost = 0.0;
2409  ColIndex entering_col = kInvalidCol;
2411  entering_variable_.PrimalChooseEnteringColumn(&entering_col));
2412  if (entering_col == kInvalidCol) {
2413  if (reduced_costs_.AreReducedCostsPrecise() &&
2414  basis_factorization_.IsRefactorized()) {
2415  if (feasibility_phase_) {
2416  const Fractional primal_infeasibility =
2417  variable_values_.ComputeMaximumPrimalInfeasibility();
2418  if (primal_infeasibility <
2419  parameters_.primal_feasibility_tolerance()) {
2420  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2421  } else {
2422  VLOG(1) << "Infeasible problem! infeasibility = "
2423  << primal_infeasibility;
2424  problem_status_ = ProblemStatus::PRIMAL_INFEASIBLE;
2425  }
2426  } else {
2427  problem_status_ = ProblemStatus::OPTIMAL;
2428  }
2429  break;
2430  } else {
2431  VLOG(1) << "Optimal reached, double checking...";
2432  reduced_costs_.MakeReducedCostsPrecise();
2433  refactorize = true;
2434  continue;
2435  }
2436  } else {
2437  reduced_cost = reduced_costs_.GetReducedCosts()[entering_col];
2438  DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
2439 
2440  // Solve the system B.d = a with a the entering column.
2441  ComputeDirection(entering_col);
2442  primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col,
2443  direction_);
2444  if (!reduced_costs_.TestEnteringReducedCostPrecision(
2445  entering_col, direction_, &reduced_cost)) {
2446  VLOG(1) << "Skipping col #" << entering_col << " whose reduced cost is "
2447  << reduced_cost;
2448  continue;
2449  }
2450  }
2451 
2452  // This test takes place after the check for optimality/feasibility because
2453  // when running with 0 iterations, we still want to report
2454  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2455  // case at the beginning of the algorithm.
2456  AdvanceDeterministicTime(time_limit);
2457  if (num_iterations_ == parameters_.max_number_of_iterations() ||
2458  time_limit->LimitReached()) {
2459  break;
2460  }
2461 
2462  Fractional step_length;
2463  RowIndex leaving_row;
2465  if (feasibility_phase_) {
2466  PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
2467  &refactorize, &leaving_row,
2468  &step_length, &target_bound);
2469  } else {
2471  ChooseLeavingVariableRow(entering_col, reduced_cost, &refactorize,
2472  &leaving_row, &step_length, &target_bound));
2473  }
2474  if (refactorize) continue;
2475 
2476  if (step_length == kInfinity || step_length == -kInfinity) {
2477  if (!basis_factorization_.IsRefactorized() ||
2478  !reduced_costs_.AreReducedCostsPrecise()) {
2479  VLOG(1) << "Infinite step length, double checking...";
2480  reduced_costs_.MakeReducedCostsPrecise();
2481  continue;
2482  }
2483  if (feasibility_phase_) {
2484  // This shouldn't happen by construction.
2485  VLOG(1) << "Unbounded feasibility problem !?";
2486  problem_status_ = ProblemStatus::ABNORMAL;
2487  } else {
2488  VLOG(1) << "Unbounded problem.";
2489  problem_status_ = ProblemStatus::PRIMAL_UNBOUNDED;
2490  solution_primal_ray_.AssignToZero(num_cols_);
2491  for (RowIndex row(0); row < num_rows_; ++row) {
2492  const ColIndex col = basis_[row];
2493  solution_primal_ray_[col] = -direction_[row];
2494  }
2495  solution_primal_ray_[entering_col] = 1.0;
2496  if (step_length == -kInfinity) {
2497  ChangeSign(&solution_primal_ray_);
2498  }
2499  }
2500  break;
2501  }
2502 
2503  Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
2504  if (feasibility_phase_ && leaving_row != kInvalidRow) {
2505  // For phase-I we currently always set the leaving variable to its exact
2506  // bound even if by doing so we may take a small step in the wrong
2507  // direction and may increase the overall infeasibility.
2508  //
2509  // TODO(user): Investigate alternatives even if this seems to work well in
2510  // practice. Note that the final returned solution will have the property
2511  // that all non-basic variables are at their exact bound, so it is nice
2512  // that we do not report ProblemStatus::PRIMAL_FEASIBLE if a solution with
2513  // this property cannot be found.
2514  step = ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2515  }
2516 
2517  // Store the leaving_col before basis_ change.
2518  const ColIndex leaving_col =
2519  (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
2520 
2521  // An iteration is called 'degenerate' if the leaving variable is already
2522  // primal-infeasible and we make it even more infeasible or if we do a zero
2523  // step.
2524  bool is_degenerate = false;
2525  if (leaving_row != kInvalidRow) {
2526  Fractional dir = -direction_[leaving_row] * step;
2527  is_degenerate =
2528  (dir == 0.0) ||
2529  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2530  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2531 
2532  // If the iteration is not degenerate, the leaving variable should go to
2533  // its exact target bound (it is how the step is computed).
2534  if (!is_degenerate) {
2535  DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
2536  target_bound));
2537  }
2538  }
2539 
2540  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2541  if (leaving_row != kInvalidRow) {
2542  primal_edge_norms_.UpdateBeforeBasisPivot(
2543  entering_col, basis_[leaving_row], leaving_row, direction_,
2544  &update_row_);
2545  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
2546  direction_, &update_row_);
2547  if (!is_degenerate) {
2548  // On a non-degenerate iteration, the leaving variable should be at its
2549  // exact bound. This corrects an eventual small numerical error since
2550  // 'value + direction * step' where step is
2551  // '(target_bound - value) / direction'
2552  // may be slighlty different from target_bound.
2553  variable_values_.Set(leaving_col, target_bound);
2554  }
2556  UpdateAndPivot(entering_col, leaving_row, target_bound));
2558  if (is_degenerate) {
2559  timer.AlsoUpdate(&iteration_stats_.degenerate);
2560  } else {
2561  timer.AlsoUpdate(&iteration_stats_.normal);
2562  }
2563  });
2564  } else {
2565  // Bound flip. This makes sure that the flipping variable is at its bound
2566  // and has the correct status.
2567  DCHECK_EQ(VariableType::UPPER_AND_LOWER_BOUNDED,
2568  variables_info_.GetTypeRow()[entering_col]);
2569  if (step > 0.0) {
2570  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2572  } else if (step < 0.0) {
2573  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2575  }
2576  reduced_costs_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
2577  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
2578  }
2579 
2580  if (feasibility_phase_ && leaving_row != kInvalidRow) {
2581  // Set the leaving variable to its exact bound.
2582  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
2583  reduced_costs_.SetNonBasicVariableCostToZero(leaving_col,
2584  &objective_[leaving_col]);
2585  }
2586 
2587  // Stats about consecutive degenerate iterations.
2588  if (step_length == 0.0) {
2589  num_consecutive_degenerate_iterations_++;
2590  } else {
2591  if (num_consecutive_degenerate_iterations_ > 0) {
2592  iteration_stats_.degenerate_run_size.Add(
2593  num_consecutive_degenerate_iterations_);
2594  num_consecutive_degenerate_iterations_ = 0;
2595  }
2596  }
2597  ++num_iterations_;
2598  }
2599  if (num_consecutive_degenerate_iterations_ > 0) {
2600  iteration_stats_.degenerate_run_size.Add(
2601  num_consecutive_degenerate_iterations_);
2602  }
2603  return Status::OK();
2604 }
2605 
2606 // TODO(user): Two other approaches for the phase I described in Koberstein's
2607 // PhD thesis seem worth trying at some point:
2608 // - The subproblem approach, which enables one to use a normal phase II dual,
2609 // but requires an efficient bound-flipping ratio test since the new problem
2610 // has all its variables boxed.
2611 // - Pan's method, which is really fast but have no theoretical guarantee of
2612 // terminating and thus needs to use one of the other methods as a fallback if
2613 // it fails to make progress.
2614 //
2615 // Note that the returned status applies to the primal problem!
2616 Status RevisedSimplex::DualMinimize(TimeLimit* time_limit) {
2617  Cleanup update_deterministic_time_on_return(
2618  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2619  num_consecutive_degenerate_iterations_ = 0;
2620  bool refactorize = false;
2621 
2622  bound_flip_candidates_.clear();
2623  pair_to_ignore_.clear();
2624 
2625  // Leaving variable.
2626  RowIndex leaving_row;
2627  Fractional cost_variation;
2629 
2630  // Entering variable.
2631  ColIndex entering_col;
2632  Fractional ratio;
2633 
2634  while (true) {
2635  // TODO(user): we may loop a bit more than the actual number of iteration.
2636  // fix.
2638  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2639 
2640  const bool old_refactorize_value = refactorize;
2641  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2642 
2643  // If the basis is refactorized, we recompute all the values in order to
2644  // have a good precision.
2645  if (basis_factorization_.IsRefactorized()) {
2646  // We do not want to recompute the reduced costs too often, this is
2647  // because that may break the overall direction taken by the last steps
2648  // and may lead to less improvement on degenerate problems.
2649  //
2650  // During phase-I, we do want the reduced costs to be as precise as
2651  // possible. TODO(user): Investigate why and fix the TODO in
2652  // PermuteBasis().
2653  //
2654  // Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
2655  // do recompute them, it is better to do that first.
2656  if (!feasibility_phase_ && !reduced_costs_.AreReducedCostsRecomputed() &&
2657  !old_refactorize_value) {
2658  const Fractional dual_residual_error =
2659  reduced_costs_.ComputeMaximumDualResidual();
2660  if (dual_residual_error >
2661  reduced_costs_.GetDualFeasibilityTolerance()) {
2662  VLOG(1) << "Recomputing reduced costs. Dual residual = "
2663  << dual_residual_error;
2664  reduced_costs_.MakeReducedCostsPrecise();
2665  }
2666  } else {
2667  reduced_costs_.MakeReducedCostsPrecise();
2668  }
2669 
2670  // TODO(user): Make RecomputeBasicVariableValues() do nothing
2671  // if it was already recomputed on a refactorized basis. This is the
2672  // same behavior as MakeReducedCostsPrecise().
2673  //
2674  // TODO(user): Do not recompute the variable values each time we
2675  // refactorize the matrix, like for the reduced costs? That may lead to
2676  // a worse behavior than keeping the "imprecise" version and only
2677  // recomputing it when its precision is above a threshold.
2678  if (!feasibility_phase_) {
2679  MakeBoxedVariableDualFeasible(
2680  variables_info_.GetNonBasicBoxedVariables(),
2681  /*update_basic_values=*/false);
2682  variable_values_.RecomputeBasicVariableValues();
2683  variable_values_.ResetPrimalInfeasibilityInformation();
2684 
2685  // Computing the objective at each iteration takes time, so we just
2686  // check the limit when the basis is refactorized.
2687  if (ComputeObjectiveValue() > dual_objective_limit_) {
2688  VLOG(1) << "Stopping the dual simplex because"
2689  << " the objective limit " << dual_objective_limit_
2690  << " has been reached.";
2691  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
2692  objective_limit_reached_ = true;
2693  return Status::OK();
2694  }
2695  }
2696 
2697  reduced_costs_.GetReducedCosts();
2698  DisplayIterationInfo();
2699  } else {
2700  // Updates from the previous iteration that can be skipped if we
2701  // recompute everything (see other case above).
2702  if (!feasibility_phase_) {
2703  // Make sure the boxed variables are dual-feasible before choosing the
2704  // leaving variable row.
2705  MakeBoxedVariableDualFeasible(bound_flip_candidates_,
2706  /*update_basic_values=*/true);
2707  bound_flip_candidates_.clear();
2708 
2709  // The direction_.non_zeros contains the positions for which the basic
2710  // variable value was changed during the previous iterations.
2711  variable_values_.UpdatePrimalInfeasibilityInformation(
2712  direction_.non_zeros);
2713  }
2714  }
2715 
2716  if (feasibility_phase_) {
2717  GLOP_RETURN_IF_ERROR(DualPhaseIChooseLeavingVariableRow(
2718  &leaving_row, &cost_variation, &target_bound));
2719  } else {
2720  GLOP_RETURN_IF_ERROR(DualChooseLeavingVariableRow(
2721  &leaving_row, &cost_variation, &target_bound));
2722  }
2723  if (leaving_row == kInvalidRow) {
2724  if (!basis_factorization_.IsRefactorized()) {
2725  VLOG(1) << "Optimal reached, double checking.";
2726  refactorize = true;
2727  continue;
2728  }
2729  if (feasibility_phase_) {
2730  // Note that since the basis is refactorized, the variable values
2731  // will be recomputed at the beginning of the second phase. The boxed
2732  // variable values will also be corrected by
2733  // MakeBoxedVariableDualFeasible().
2734  if (num_dual_infeasible_positions_ == 0) {
2735  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
2736  } else {
2737  problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
2738  }
2739  } else {
2740  problem_status_ = ProblemStatus::OPTIMAL;
2741  }
2742  return Status::OK();
2743  }
2744 
2745  update_row_.ComputeUpdateRow(leaving_row);
2746  for (std::pair<RowIndex, ColIndex> pair : pair_to_ignore_) {
2747  if (pair.first == leaving_row) {
2748  update_row_.IgnoreUpdatePosition(pair.second);
2749  }
2750  }
2751  if (feasibility_phase_) {
2753  update_row_, cost_variation, &entering_col, &ratio));
2754  } else {
2756  update_row_, cost_variation, &bound_flip_candidates_, &entering_col,
2757  &ratio));
2758  }
2759 
2760  // No entering_col: Unbounded problem / Infeasible problem.
2761  if (entering_col == kInvalidCol) {
2762  if (!reduced_costs_.AreReducedCostsPrecise()) {
2763  VLOG(1) << "No entering column. Double checking...";
2764  refactorize = true;
2765  continue;
2766  }
2767  DCHECK(basis_factorization_.IsRefactorized());
2768  if (feasibility_phase_) {
2769  // This shouldn't happen by construction.
2770  VLOG(1) << "Unbounded dual feasibility problem !?";
2771  problem_status_ = ProblemStatus::ABNORMAL;
2772  } else {
2773  problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
2774  solution_dual_ray_ =
2775  Transpose(update_row_.GetUnitRowLeftInverse().values);
2776  update_row_.RecomputeFullUpdateRow(leaving_row);
2777  solution_dual_ray_row_combination_.AssignToZero(num_cols_);
2778  for (const ColIndex col : update_row_.GetNonZeroPositions()) {
2779  solution_dual_ray_row_combination_[col] =
2780  update_row_.GetCoefficient(col);
2781  }
2782  if (cost_variation < 0) {
2783  ChangeSign(&solution_dual_ray_);
2784  ChangeSign(&solution_dual_ray_row_combination_);
2785  }
2786  }
2787  return Status::OK();
2788  }
2789 
2790  // If the coefficient is too small, we recompute the reduced costs.
2791  const Fractional entering_coeff = update_row_.GetCoefficient(entering_col);
2792  if (std::abs(entering_coeff) < parameters_.dual_small_pivot_threshold() &&
2793  !reduced_costs_.AreReducedCostsPrecise()) {
2794  VLOG(1) << "Trying not to pivot by " << entering_coeff;
2795  refactorize = true;
2796  continue;
2797  }
2798 
2799  // If the reduced cost is already precise, we check with the direction_.
2800  // This is at least needed to avoid corner cases where
2801  // direction_[leaving_row] is actually 0 which causes a floating
2802  // point exception below.
2803  ComputeDirection(entering_col);
2804  if (std::abs(direction_[leaving_row]) <
2805  parameters_.minimum_acceptable_pivot()) {
2806  VLOG(1) << "Do not pivot by " << entering_coeff
2807  << " because the direction is " << direction_[leaving_row];
2808  refactorize = true;
2809  pair_to_ignore_.push_back({leaving_row, entering_col});
2810  continue;
2811  }
2812  pair_to_ignore_.clear();
2813 
2814  // This test takes place after the check for optimality/feasibility because
2815  // when running with 0 iterations, we still want to report
2816  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2817  // case at the beginning of the algorithm.
2818  AdvanceDeterministicTime(time_limit);
2819  if (num_iterations_ == parameters_.max_number_of_iterations() ||
2820  time_limit->LimitReached()) {
2821  return Status::OK();
2822  }
2823 
2825  if (ratio == 0.0) {
2826  timer.AlsoUpdate(&iteration_stats_.degenerate);
2827  } else {
2828  timer.AlsoUpdate(&iteration_stats_.normal);
2829  }
2830  });
2831 
2832  // Update basis. Note that direction_ is already computed.
2833  //
2834  // TODO(user): this is pretty much the same in the primal or dual code.
2835  // We just need to know to what bound the leaving variable will be set to.
2836  // Factorize more common code?
2837  //
2838  // During phase I, we do not need the basic variable values at all.
2839  Fractional primal_step = 0.0;
2840  if (feasibility_phase_) {
2841  DualPhaseIUpdatePrice(leaving_row, entering_col);
2842  } else {
2843  primal_step =
2844  ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2845  variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
2846  }
2847 
2848  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2849  &update_row_);
2850  dual_edge_norms_.UpdateBeforeBasisPivot(
2851  entering_col, leaving_row, direction_,
2852  update_row_.GetUnitRowLeftInverse());
2853 
2854  // It is important to do the actual pivot after the update above!
2855  const ColIndex leaving_col = basis_[leaving_row];
2857  UpdateAndPivot(entering_col, leaving_row, target_bound));
2858 
2859  // This makes sure the leaving variable is at its exact bound. Tests
2860  // indicate that this makes everything more stable. Note also that during
2861  // the feasibility phase, the variable values are not used, but that the
2862  // correct non-basic variable value are needed at the end.
2863  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
2864 
2865  // This is slow, but otherwise we have a really bad precision on the
2866  // variable values ...
2867  if (std::abs(primal_step) * parameters_.primal_feasibility_tolerance() >
2868  1.0) {
2869  refactorize = true;
2870  }
2871  ++num_iterations_;
2872  }
2873  return Status::OK();
2874 }
2875 
2876 ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
2877  // TODO(user): Remove this function.
2879  return first_slack_col_ + RowToColIndex(row);
2880 }
2881 
2883  std::string result;
2884  result.append(iteration_stats_.StatString());
2885  result.append(ratio_test_stats_.StatString());
2886  result.append(entering_variable_.StatString());
2887  result.append(reduced_costs_.StatString());
2888  result.append(variable_values_.StatString());
2889  result.append(primal_edge_norms_.StatString());
2890  result.append(dual_edge_norms_.StatString());
2891  result.append(update_row_.StatString());
2892  result.append(basis_factorization_.StatString());
2893  result.append(function_stats_.StatString());
2894  return result;
2895 }
2896 
2897 void RevisedSimplex::DisplayAllStats() {
2898  if (FLAGS_simplex_display_stats) {
2899  absl::FPrintF(stderr, "%s", StatString());
2900  absl::FPrintF(stderr, "%s", GetPrettySolverStats());
2901  }
2902 }
2903 
2904 Fractional RevisedSimplex::ComputeObjectiveValue() const {
2905  SCOPED_TIME_STAT(&function_stats_);
2906  return PreciseScalarProduct(objective_,
2907  Transpose(variable_values_.GetDenseRow()));
2908 }
2909 
2910 Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
2911  SCOPED_TIME_STAT(&function_stats_);
2912  const Fractional sum = PreciseScalarProduct(
2913  objective_, Transpose(variable_values_.GetDenseRow()));
2914  return objective_scaling_factor_ * (sum + objective_offset_);
2915 }
2916 
2917 void RevisedSimplex::SetParameters(const GlopParameters& parameters) {
2918  SCOPED_TIME_STAT(&function_stats_);
2919  random_.seed(parameters.random_seed());
2920  initial_parameters_ = parameters;
2921  parameters_ = parameters;
2922  PropagateParameters();
2923 }
2924 
2925 void RevisedSimplex::PropagateParameters() {
2926  SCOPED_TIME_STAT(&function_stats_);
2927  basis_factorization_.SetParameters(parameters_);
2928  entering_variable_.SetParameters(parameters_);
2929  reduced_costs_.SetParameters(parameters_);
2930  dual_edge_norms_.SetParameters(parameters_);
2931  primal_edge_norms_.SetParameters(parameters_);
2932  update_row_.SetParameters(parameters_);
2933 }
2934 
2935 void RevisedSimplex::DisplayIterationInfo() const {
2936  if (VLOG_IS_ON(1)) {
2937  const int iter = feasibility_phase_
2938  ? num_iterations_
2939  : num_iterations_ - num_feasibility_iterations_;
2940  // Note that in the dual phase II, ComputeObjectiveValue() is also computing
2941  // the dual objective even if it uses the variable values. This is because
2942  // if we modify the bounds to make the problem primal-feasible, we are at
2943  // the optimal and hence the two objectives are the same.
2944  const Fractional objective =
2945  !feasibility_phase_
2946  ? ComputeInitialProblemObjectiveValue()
2947  : (parameters_.use_dual_simplex()
2948  ? reduced_costs_.ComputeSumOfDualInfeasibilities()
2949  : variable_values_.ComputeSumOfPrimalInfeasibilities());
2950  VLOG(1) << (feasibility_phase_ ? "Feasibility" : "Optimization")
2951  << " phase, iteration # " << iter
2952  << ", objective = " << absl::StrFormat("%.15E", objective);
2953  }
2954 }
2955 
2956 void RevisedSimplex::DisplayErrors() const {
2957  if (VLOG_IS_ON(1)) {
2958  VLOG(1) << "Primal infeasibility (bounds) = "
2959  << variable_values_.ComputeMaximumPrimalInfeasibility();
2960  VLOG(1) << "Primal residual |A.x - b| = "
2961  << variable_values_.ComputeMaximumPrimalResidual();
2962  VLOG(1) << "Dual infeasibility (reduced costs) = "
2963  << reduced_costs_.ComputeMaximumDualInfeasibility();
2964  VLOG(1) << "Dual residual |c_B - y.B| = "
2965  << reduced_costs_.ComputeMaximumDualResidual();
2966  }
2967 }
2968 
2969 namespace {
2970 
2971 std::string StringifyMonomialWithFlags(const Fractional a,
2972  const std::string& x) {
2973  return StringifyMonomial(a, x, FLAGS_simplex_display_numbers_as_fractions);
2974 }
2975 
2976 // Returns a string representing the rational approximation of x or a decimal
2977 // approximation of x according to FLAGS_simplex_display_numbers_as_fractions.
2978 std::string StringifyWithFlags(const Fractional x) {
2979  return Stringify(x, FLAGS_simplex_display_numbers_as_fractions);
2980 }
2981 
2982 } // namespace
2983 
2984 std::string RevisedSimplex::SimpleVariableInfo(ColIndex col) const {
2985  std::string output;
2986  VariableType variable_type = variables_info_.GetTypeRow()[col];
2987  VariableStatus variable_status = variables_info_.GetStatusRow()[col];
2988  absl::StrAppendFormat(&output, "%d (%s) = %s, %s, %s, [%s,%s]", col.value(),
2989  variable_name_[col],
2990  StringifyWithFlags(variable_values_.Get(col)),
2991  GetVariableStatusString(variable_status),
2992  GetVariableTypeString(variable_type),
2993  StringifyWithFlags(lower_bound_[col]),
2994  StringifyWithFlags(upper_bound_[col]));
2995  return output;
2996 }
2997 
2998 void RevisedSimplex::DisplayInfoOnVariables() const {
2999  if (VLOG_IS_ON(3)) {
3000  for (ColIndex col(0); col < num_cols_; ++col) {
3001  const Fractional variable_value = variable_values_.Get(col);
3002  const Fractional objective_coefficient = objective_[col];
3003  const Fractional objective_contribution =
3004  objective_coefficient * variable_value;
3005  VLOG(3) << SimpleVariableInfo(col) << ". " << variable_name_[col] << " = "
3006  << StringifyWithFlags(variable_value) << " * "
3007  << StringifyWithFlags(objective_coefficient)
3008  << "(obj) = " << StringifyWithFlags(objective_contribution);
3009  }
3010  VLOG(3) << "------";
3011  }
3012 }
3013 
3014 void RevisedSimplex::DisplayVariableBounds() {
3015  if (VLOG_IS_ON(3)) {
3016  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
3017  for (ColIndex col(0); col < num_cols_; ++col) {
3018  switch (variable_type[col]) {
3019  case VariableType::UNCONSTRAINED:
3020  break;
3021  case VariableType::LOWER_BOUNDED:
3022  VLOG(3) << variable_name_[col]
3023  << " >= " << StringifyWithFlags(lower_bound_[col]) << ";";
3024  break;
3025  case VariableType::UPPER_BOUNDED:
3026  VLOG(3) << variable_name_[col]
3027  << " <= " << StringifyWithFlags(upper_bound_[col]) << ";";
3028  break;
3029  case VariableType::UPPER_AND_LOWER_BOUNDED:
3030  VLOG(3) << StringifyWithFlags(lower_bound_[col])
3031  << " <= " << variable_name_[col]
3032  << " <= " << StringifyWithFlags(upper_bound_[col]) << ";";
3033  break;
3035  VLOG(3) << variable_name_[col] << " = "
3036  << StringifyWithFlags(lower_bound_[col]) << ";";
3037  break;
3038  default: // This should never happen.
3039  LOG(DFATAL) << "Column " << col << " has no meaningful status.";
3040  break;
3041  }
3042  }
3043  }
3044 }
3045 
3047  const DenseRow* column_scales) {
3048  gtl::ITIVector<RowIndex, SparseRow> dictionary(num_rows_.value());
3049  for (ColIndex col(0); col < num_cols_; ++col) {
3050  ComputeDirection(col);
3051  for (const auto e : direction_) {
3052  if (column_scales == nullptr) {
3053  dictionary[e.row()].SetCoefficient(col, e.coefficient());
3054  continue;
3055  }
3056  const Fractional numerator =
3057  col < column_scales->size() ? (*column_scales)[col] : 1.0;
3058  const Fractional denominator = GetBasis(e.row()) < column_scales->size()
3059  ? (*column_scales)[GetBasis(e.row())]
3060  : 1.0;
3061  dictionary[e.row()].SetCoefficient(
3062  col, direction_[e.row()] * (numerator / denominator));
3063  }
3064  }
3065  return dictionary;
3066 }
3067 
3069  const LinearProgram& linear_program, const BasisState& state) {
3070  LoadStateForNextSolve(state);
3071  Status status = Initialize(linear_program);
3072  if (status.ok()) {
3073  variable_values_.RecomputeBasicVariableValues();
3074  variable_values_.ResetPrimalInfeasibilityInformation();
3075  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
3076  }
3077 }
3078 
3079 void RevisedSimplex::DisplayRevisedSimplexDebugInfo() {
3080  if (VLOG_IS_ON(3)) {
3081  // This function has a complexity in O(num_non_zeros_in_matrix).
3082  DisplayInfoOnVariables();
3083 
3084  std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue());
3085  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
3086  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3087  absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col],
3088  variable_name_[col]));
3089  }
3090  VLOG(3) << output << ";";
3091 
3092  const RevisedSimplexDictionary dictionary(nullptr, this);
3093  RowIndex r(0);
3094  for (const SparseRow& row : dictionary) {
3095  output.clear();
3096  ColIndex basic_col = basis_[r];
3097  absl::StrAppend(&output, variable_name_[basic_col], " = ",
3098  StringifyWithFlags(variable_values_.Get(basic_col)));
3099  for (const SparseRowEntry e : row) {
3100  if (e.col() != basic_col) {
3101  absl::StrAppend(&output,
3102  StringifyMonomialWithFlags(e.coefficient(),
3103  variable_name_[e.col()]));
3104  }
3105  }
3106  VLOG(3) << output << ";";
3107  }
3108  VLOG(3) << "------";
3109  DisplayVariableBounds();
3110  ++r;
3111  }
3112 }
3113 
3114 void RevisedSimplex::DisplayProblem() const {
3115  // This function has a complexity in O(num_rows * num_cols *
3116  // num_non_zeros_in_row).
3117  if (VLOG_IS_ON(3)) {
3118  DisplayInfoOnVariables();
3119  std::string output = "min: ";
3120  bool has_objective = false;
3121  for (ColIndex col(0); col < num_cols_; ++col) {
3122  const Fractional coeff = objective_[col];
3123  has_objective |= (coeff != 0.0);
3124  absl::StrAppend(&output,
3125  StringifyMonomialWithFlags(coeff, variable_name_[col]));
3126  }
3127  if (!has_objective) {
3128  absl::StrAppend(&output, " 0");
3129  }
3130  VLOG(3) << output << ";";
3131  for (RowIndex row(0); row < num_rows_; ++row) {
3132  output = "";
3133  for (ColIndex col(0); col < num_cols_; ++col) {
3134  absl::StrAppend(&output,
3135  StringifyMonomialWithFlags(
3136  compact_matrix_.column(col).LookUpCoefficient(row),
3137  variable_name_[col]));
3138  }
3139  VLOG(3) << output << " = 0;";
3140  }
3141  VLOG(3) << "------";
3142  }
3143 }
3144 
3145 void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) {
3146  DCHECK(time_limit != nullptr);
3147  const double current_deterministic_time = DeterministicTime();
3148  const double deterministic_time_delta =
3149  current_deterministic_time - last_deterministic_time_update_;
3150  time_limit->AdvanceDeterministicTime(deterministic_time_delta);
3151  last_deterministic_time_update_ = current_deterministic_time;
3152 }
3153 
3154 #undef DCHECK_COL_BOUNDS
3155 #undef DCHECK_ROW_BOUNDS
3156 
3157 } // namespace glop
3158 } // namespace operations_research
operations_research::glop::ColumnView::LookUpCoefficient
Fractional LookUpCoefficient(RowIndex index) const
Definition: sparse_column.h:100
operations_research::glop::ReducedCosts::ClearAndRemoveCostShifts
void ClearAndRemoveCostShifts()
Definition: reduced_costs.cc:302
operations_research::glop::RevisedSimplex::GetProblemStatus
ProblemStatus GetProblemStatus() const
Definition: revised_simplex.cc:415
operations_research::glop::ScatteredVector::ClearSparseMask
void ClearSparseMask()
Definition: scattered_vector.h:133
operations_research::glop::ReducedCosts::PerturbCosts
void PerturbCosts()
Definition: reduced_costs.cc:240
util::IntegerRange
Definition: iterators.h:146
operations_research::glop::VariableStatus::AT_UPPER_BOUND
@ AT_UPPER_BOUND
operations_research::glop::RevisedSimplex::GetBasisFactorization
const BasisFactorization & GetBasisFactorization() const
Definition: revised_simplex.cc:486
operations_research::glop::StrictITIVector::resize
void resize(IntType size)
Definition: lp_types.h:269
if
if(!yyg->yy_init)
Definition: parser.yy.cc:965
min
int64 min
Definition: alldiff_cst.cc:138
integral_types.h
operations_research::glop::DenseRow
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:299
operations_research::glop::VariableStatus::BASIC
@ BASIC
operations_research::Bitset64::IsSet
bool IsSet(IndexType i) const
Definition: bitset.h:483
operations_research::glop::VariablesInfo::GetNotBasicBitRow
const DenseBitRow & GetNotBasicBitRow() const
Definition: variables_info.cc:119
operations_research::glop::ReducedCosts::UpdateDataOnBasisPermutation
void UpdateDataOnBasisPermutation()
Definition: reduced_costs.cc:226
operations_research::glop::ApplyColumnPermutationToRowIndexedVector
void ApplyColumnPermutationToRowIndexedVector(const Permutation< ColIndex > &col_perm, RowIndexedVector *v)
Definition: lp_data/permutation.h:115
operations_research::glop::VariableValues::RecomputeBasicVariableValues
void RecomputeBasicVariableValues()
Definition: variable_values.cc:92
operations_research::glop::RevisedSimplex::StatString
std::string StatString()
Definition: revised_simplex.cc:2882
max
int64 max
Definition: alldiff_cst.cc:139
operations_research::glop::RevisedSimplex::DeterministicTime
double DeterministicTime() const
Definition: revised_simplex.cc:506
operations_research::glop::ReducedCosts::MakeReducedCostsPrecise
void MakeReducedCostsPrecise()
Definition: reduced_costs.cc:232
operations_research::glop::CompactSparseMatrix::ColumnCopyToDenseColumn
void ColumnCopyToDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:418
IF_STATS_ENABLED
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:435
operations_research::glop::ProblemStatus::ABNORMAL
@ ABNORMAL
operations_research::glop::DualEdgeNorms::SetParameters
void SetParameters(const GlopParameters &parameters)
Definition: dual_edge_norms.h:87
operations_research::glop::VariableValues::ResetPrimalInfeasibilityInformation
void ResetPrimalInfeasibilityInformation()
Definition: variable_values.cc:226
operations_research::glop::UpdateRow::StatString
std::string StatString() const
Definition: update_row.h:81
operations_research::glop::kInvalidCol
const ColIndex kInvalidCol(-1)
operations_research::glop::BasisFactorization::StatString
std::string StatString() const
Definition: basis_representation.h:275
lp_data.h
operations_research::glop::VariableValues::UpdateGivenNonBasicVariables
void UpdateGivenNonBasicVariables(const std::vector< ColIndex > &cols_to_update, bool update_basic_variables)
Definition: variable_values.cc:167
operations_research::glop::CompactSparseMatrix::column
ColumnView column(ColIndex col) const
Definition: sparse.h:364
operations_research::glop::Status::ERROR_INVALID_PROBLEM
@ ERROR_INVALID_PROBLEM
Definition: status.h:41
operations_research::glop::DualEdgeNorms::NeedsBasisRefactorization
bool NeedsBasisRefactorization()
Definition: dual_edge_norms.cc:25
operations_research::glop::ReducedCosts::AreReducedCostsRecomputed
bool AreReducedCostsRecomputed()
Definition: reduced_costs.h:112
operations_research::glop::UpdateRow::IgnoreUpdatePosition
void IgnoreUpdatePosition(ColIndex col)
Definition: update_row.cc:45
operations_research::glop::CompactSparseMatrix::ColumnAddMultipleToDenseColumn
void ColumnAddMultipleToDenseColumn(ColIndex col, Fractional multiplier, DenseColumn *dense_column) const
Definition: sparse.h:393
operations_research::glop::BasisFactorization::Clear
void Clear()
Definition: basis_representation.cc:193
operations_research::glop::EnteringVariable::DualPhaseIChooseEnteringColumn
ABSL_MUST_USE_RESULT Status DualPhaseIChooseEnteringColumn(const UpdateRow &update_row, Fractional cost_variation, ColIndex *entering_col, Fractional *step)
Definition: entering_variable.cc:268
operations_research::glop::RevisedSimplex::LoadStateForNextSolve
void LoadStateForNextSolve(const BasisState &state)
Definition: revised_simplex.cc:124
operations_research::glop::VariablesInfo::UpdateToBasicStatus
void UpdateToBasicStatus(ColIndex col)
Definition: variables_info.cc:68
gtl::ITIVector
Definition: int_type_indexed_vector.h:76
operations_research::glop::BasisFactorization::Update
ABSL_MUST_USE_RESULT Status Update(ColIndex entering_col, RowIndex leaving_variable_row, const ScatteredColumn &direction)
Definition: basis_representation.cc:284
operations_research::glop::VariablesInfo::GetBoundDifference
Fractional GetBoundDifference(ColIndex col) const
Definition: variables_info.h:76
operations_research::glop::ColToIntIndex
Index ColToIntIndex(ColIndex col)
Definition: lp_types.h:54
logging.h
operations_research::glop::PrimalEdgeNorms::SetParameters
void SetParameters(const GlopParameters &parameters)
Definition: primal_edge_norms.h:110
operations_research::glop::CompactSparseMatrix::PopulateFromMatrixView
void PopulateFromMatrixView(const MatrixView &input)
Definition: sparse.cc:437
operations_research::glop::StringifyMonomial
std::string StringifyMonomial(const Fractional a, const std::string &x, bool fraction)
Definition: lp_print_utils.cc:53
operations_research::glop::ColumnView::IsEmpty
bool IsEmpty() const
Definition: sparse_column.h:114
operations_research::glop::ScatteredVector::non_zeros
std::vector< Index > non_zeros
Definition: scattered_vector.h:63
operations_research::glop::UpdateRow::GetCoefficient
const Fractional GetCoefficient(ColIndex col) const
Definition: update_row.h:66
operations_research::glop::VariablesInfo::MakeBoxedVariableRelevant
void MakeBoxedVariableRelevant(bool value)
Definition: variables_info.cc:46
operations_research::glop::VariableValues::UpdatePrimalInfeasibilityInformation
void UpdatePrimalInfeasibilityInformation(const std::vector< RowIndex > &rows)
Definition: variable_values.cc:244
operations_research::glop::ReducedCosts::ComputeMaximumDualInfeasibility
Fractional ComputeMaximumDualInfeasibility() const
Definition: reduced_costs.cc:141
value
int64 value
Definition: demon_profiler.cc:43
operations_research::glop::CompactSparseMatrix::num_rows
RowIndex num_rows() const
Definition: sparse.h:344
operations_research::glop::DenseBooleanColumn
StrictITIVector< RowIndex, bool > DenseBooleanColumn
Definition: lp_types.h:331
operations_research::glop::ReducedCosts::NeedsBasisRefactorization
bool NeedsBasisRefactorization() const
Definition: reduced_costs.cc:54
coeff_magnitude
Fractional coeff_magnitude
Definition: revised_simplex.cc:1794
operations_research::glop::Status
Definition: status.h:24
lp_utils.h
operations_research::glop::ReducedCosts::GetDualValues
const DenseColumn & GetDualValues()
Definition: reduced_costs.cc:324
operations_research::glop::RowToColMapping
StrictITIVector< RowIndex, ColIndex > RowToColMapping
Definition: lp_types.h:342
operations_research::glop::InfinityNorm
Fractional InfinityNorm(const DenseColumn &v)
Definition: lp_data/lp_utils.cc:81
operations_research::glop::VariableValues::ResetAllNonBasicVariableValues
void ResetAllNonBasicVariableValues()
Definition: variable_values.cc:67
operations_research::glop::StrictITIVector::size
IntType size() const
Definition: lp_types.h:276
operations_research
The vehicle routing library lets one model and solve generic vehicle routing problems ranging from th...
Definition: dense_doubly_linked_list.h:21
operations_research::glop::DualEdgeNorms::GetEdgeSquaredNorms
const DenseColumn & GetEdgeSquaredNorms()
Definition: dual_edge_norms.cc:35
operations_research::glop::ConstraintStatus
ConstraintStatus
Definition: lp_types.h:227
operations_research::glop::CompactSparseMatrix::num_cols
ColIndex num_cols() const
Definition: sparse.h:345
operations_research::glop::UpdateRow::DeterministicTime
double DeterministicTime() const
Definition: update_row.h:92
operations_research::glop::CompactSparseMatrix::num_entries
EntryIndex num_entries() const
Definition: sparse.h:340
operations_research::glop::Status::OK
static const Status OK()
Definition: status.h:54
operations_research::glop::BasisFactorization::IsRefactorized
bool IsRefactorized() const
Definition: basis_representation.cc:214
int64
int64_t int64
Definition: integral_types.h:34
operations_research::glop::Stringify
std::string Stringify(const Fractional x, bool fraction)
Definition: lp_print_utils.cc:45
operations_research::glop::PrimalEdgeNorms::NeedsBasisRefactorization
bool NeedsBasisRefactorization() const
Definition: primal_edge_norms.cc:43
operations_research::glop::UpdateRow::Invalidate
void Invalidate()
Definition: update_row.cc:40
operations_research::TimeLimit
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
RETURN_IF_NULL
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
operations_research::glop::VariablesInfo::GetStatusRow
const VariableStatusRow & GetStatusRow() const
Definition: variables_info.cc:101
operations_research::glop::ReducedCosts::AreReducedCostsPrecise
bool AreReducedCostsPrecise()
Definition: reduced_costs.h:108
operations_research::glop::kDeterministicSeed
constexpr const uint64 kDeterministicSeed
Definition: revised_simplex.cc:75
operations_research::glop::VariableTypeRow
StrictITIVector< ColIndex, VariableType > VariableTypeRow
Definition: lp_types.h:317
matrix_utils.h
DEFINE_bool
DEFINE_bool(simplex_display_numbers_as_fractions, false, "Display numbers as fractions.")
operations_research::glop::RevisedSimplex::GetObjectiveValue
Fractional GetObjectiveValue() const
Definition: revised_simplex.cc:419
revised_simplex.h
operations_research::glop::PrimalEdgeNorms::UpdateBeforeBasisPivot
void UpdateBeforeBasisPivot(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
Definition: primal_edge_norms.cc:86
gtl::ITIVector::empty
bool empty() const
Definition: int_type_indexed_vector.h:155
operations_research::glop::VariableToConstraintStatus
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
operations_research::glop::Fractional
double Fractional
Definition: lp_types.h:77
operations_research::glop::ConstraintStatus::AT_UPPER_BOUND
@ AT_UPPER_BOUND
operations_research::glop::UpdateRow::RecomputeFullUpdateRow
void RecomputeFullUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:244
operations_research::glop::ReducedCosts::SetAndDebugCheckThatColumnIsDualFeasible
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
Definition: reduced_costs.cc:200
operations_research::glop::RevisedSimplex::Solve
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
Definition: revised_simplex.cc:134
operations_research::glop::VariableStatus::FIXED_VALUE
@ FIXED_VALUE
operations_research::glop::RevisedSimplex::GetPrimalRay
const DenseRow & GetPrimalRay() const
Definition: revised_simplex.cc:470
operations_research::glop::ColumnView::GetFirstCoefficient
Fractional GetFirstCoefficient() const
Definition: sparse_column.h:86
ratio
Fractional ratio
Definition: revised_simplex.cc:1793
SCOPED_TIME_STAT
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:436
operations_research::glop::BasisFactorization::Refactorize
ABSL_MUST_USE_RESULT Status Refactorize()
Definition: basis_representation.cc:216
operations_research::glop::BasisFactorization::Initialize
ABSL_MUST_USE_RESULT Status Initialize()
Definition: basis_representation.cc:206
operations_research::glop::kInfinity
const double kInfinity
Definition: lp_types.h:83
operations_research::glop::RowToColIndex
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:48
cost
int64 cost
Definition: routing_flow.cc:130
operations_research::glop::Status::ERROR_LU
@ ERROR_LU
Definition: status.h:32
operations_research::glop::VariablesInfo::GetCanIncreaseBitRow
const DenseBitRow & GetCanIncreaseBitRow() const
Definition: variables_info.cc:105
DEBUG_MODE
const bool DEBUG_MODE
Definition: macros.h:24
a
int64 a
Definition: constraint_solver/table.cc:42
operations_research::glop::RevisedSimplex::ComputeDictionary
RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales)
Definition: revised_simplex.cc:3046
operations_research::glop::DualEdgeNorms::UpdateBeforeBasisPivot
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, const ScatteredRow &unit_row_left_inverse)
Definition: dual_edge_norms.cc:46
operations_research::glop::DenseBitRow
Bitset64< ColIndex > DenseBitRow
Definition: lp_types.h:323
operations_research::glop::GetVariableTypeString
std::string GetVariableTypeString(VariableType variable_type)
Definition: lp_types.cc:52
operations_research::glop::RevisedSimplex::GetDualRayRowCombination
const DenseRow & GetDualRayRowCombination() const
Definition: revised_simplex.cc:479
operations_research::glop::PrimalEdgeNorms::TestEnteringEdgeNormPrecision
void TestEnteringEdgeNormPrecision(ColIndex entering_col, const ScatteredColumn &direction)
Definition: primal_edge_norms.cc:62
target_bound
Fractional target_bound
Definition: revised_simplex.cc:1795
operations_research::glop::BasisFactorization::DeterministicTime
double DeterministicTime() const
Definition: basis_representation.cc:572
operations_research::glop::BasisState
Definition: revised_simplex.h:132
operations_research::glop::UpdateRow::ComputeUpdateRow
void ComputeUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:78
time_limit
SharedTimeLimit * time_limit
Definition: cp_model_solver.cc:2063
operations_research::glop::IsFinite
bool IsFinite(Fractional value)
Definition: lp_types.h:90
row
RowIndex row
Definition: revised_simplex.cc:1792
operations_research::glop::BasisFactorization
Definition: basis_representation.h:151
DCHECK_COL_BOUNDS
#define DCHECK_COL_BOUNDS(col)
Definition: revised_simplex.cc:63
operations_research::glop::GetVariableStatusString
std::string GetVariableStatusString(VariableStatus status)
Definition: lp_types.cc:71
operations_research::glop::RevisedSimplex::GetConstraintStatus
ConstraintStatus GetConstraintStatus(RowIndex row) const
Definition: revised_simplex.cc:457
operations_research::glop::ReducedCosts::MaintainDualInfeasiblePositions
void MaintainDualInfeasiblePositions(bool maintain)
Definition: reduced_costs.cc:311
operations_research::glop::RevisedSimplex::GetState
const BasisState & GetState() const
Definition: revised_simplex.cc:449
operations_research::glop::VariableValues::ComputeMaximumPrimalResidual
Fractional ComputeMaximumPrimalResidual() const
Definition: variable_values.cc:108
fp_utils.h
operations_research::glop::LuFactorization::Clear
void Clear()
Definition: lu_factorization.cc:31
operations_research::glop::BasisFactorization::ForceRefactorization
ABSL_MUST_USE_RESULT Status ForceRefactorization()
Definition: basis_representation.cc:221
operations_research::glop::VariablesInfo::InitializeAndComputeType
void InitializeAndComputeType()
Definition: variables_info.cc:27
operations_research::glop::ReducedCosts::SetNonBasicVariableCostToZero
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
Definition: reduced_costs.cc:206
operations_research::glop::DualEdgeNorms::StatString
std::string StatString() const
Definition: dual_edge_norms.h:92
DCHECK_ROW_BOUNDS
#define DCHECK_ROW_BOUNDS(row)
Definition: revised_simplex.cc:69
operations_research::glop::PrimalEdgeNorms::StatString
std::string StatString() const
Definition: primal_edge_norms.h:115
operations_research::glop::BasisFactorization::SetColumnPermutationToIdentity
void SetColumnPermutationToIdentity()
Definition: basis_representation.h:176
operations_research::glop::StrictITIVector< ColIndex, Fractional >
operations_research::glop::RevisedSimplex::ComputeBasicVariablesForState
void ComputeBasicVariablesForState(const LinearProgram &linear_program, const BasisState &state)
Definition: revised_simplex.cc:3068
operations_research::glop::VariableValues::Get
const Fractional Get(ColIndex col) const
Definition: variable_values.h:49
operations_research::glop::LinearProgram::IsInEquationForm
bool IsInEquationForm() const
Definition: lp_data.cc:1403
operations_research::glop::EnteringVariable::PrimalChooseEnteringColumn
ABSL_MUST_USE_RESULT Status PrimalChooseEnteringColumn(ColIndex *entering_col)
Definition: entering_variable.cc:37
operations_research::glop::VariableType::FIXED_VARIABLE
@ FIXED_VARIABLE
operations_research::glop::VariableStatusRow
StrictITIVector< ColIndex, VariableStatus > VariableStatusRow
Definition: lp_types.h:320
operations_research::glop::UpdateRow::SetParameters
void SetParameters(const GlopParameters &parameters)
Definition: update_row.cc:174
operations_research::glop::EnteringVariable::SetParameters
void SetParameters(const GlopParameters &parameters)
Definition: entering_variable.cc:372
objective_
IntVar *const objective_
Definition: search.cc:2945
operations_research::glop::StrictITIVector::AssignToZero
void AssignToZero(IntType size)
Definition: lp_types.h:290
operations_research::glop::RevisedSimplex::GetNumberOfIterations
int64 GetNumberOfIterations() const
Definition: revised_simplex.cc:423
operations_research::glop::VariableValues::SetNonBasicVariableValueFromStatus
void SetNonBasicVariableValueFromStatus(ColIndex col)
Definition: variable_values.cc:34
operations_research::glop::VariableValues::Set
void Set(ColIndex col, Fractional value)
Definition: variable_values.h:115
operations_research::glop::ReducedCosts::GetReducedCosts
const DenseRow & GetReducedCosts()
Definition: reduced_costs.cc:318
operations_research::glop::VariablesInfo::GetCanDecreaseBitRow
const DenseBitRow & GetCanDecreaseBitRow() const
Definition: variables_info.cc:109
uint64
uint64_t uint64
Definition: integral_types.h:39
operations_research::glop::RevisedSimplex::GetConstraintActivity
Fractional GetConstraintActivity(RowIndex row) const
Definition: revised_simplex.cc:451
operations_research::glop::GetProblemStatusString
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
operations_research::glop::BasisFactorization::RightSolve
void RightSolve(ScatteredColumn *d) const
Definition: basis_representation.cc:322
operations_research::glop::ScatteredVector::ClearNonZerosIfTooDense
void ClearNonZerosIfTooDense(double ratio_for_using_dense_representation)
Definition: scattered_vector.h:153
operations_research::glop::RevisedSimplex::GetDualValue
Fractional GetDualValue(RowIndex row) const
Definition: revised_simplex.cc:441
operations_research::glop::ReducedCosts::StatString
std::string StatString() const
Definition: reduced_costs.h:173
operations_research::glop::RevisedSimplex::GetVariableStatus
VariableStatus GetVariableStatus(ColIndex col) const
Definition: revised_simplex.cc:445
GLOP_RETURN_ERROR_IF_NULL
#define GLOP_RETURN_ERROR_IF_NULL(arg)
Definition: status.h:85
operations_research::glop::VariablesInfo::GetIsBasicBitRow
const DenseBitRow & GetIsBasicBitRow() const
Definition: variables_info.cc:117
operations_research::glop::VariableValues::StatString
std::string StatString() const
Definition: variable_values.h:118
operations_research::glop::Transpose
const DenseRow & Transpose(const DenseColumn &col)
Definition: lp_data/lp_utils.h:192
operations_research::glop::RevisedSimplex::SetParameters
void SetParameters(const GlopParameters &parameters)
Definition: revised_simplex.cc:2917
operations_research::glop::DualEdgeNorms::ResizeOnNewRows
void ResizeOnNewRows(RowIndex new_size)
Definition: dual_edge_norms.cc:31
operations_research::glop::VariableValues::UpdateOnPivoting
void UpdateOnPivoting(const ScatteredColumn &direction, ColIndex entering_col, Fractional step)
Definition: variable_values.cc:144
operations_research::glop::VariablesInfo::GetIsRelevantBitRow
const DenseBitRow & GetIsRelevantBitRow() const
Definition: variables_info.cc:113
operations_research::Bitset64::Set
void Set(IndexType i)
Definition: bitset.h:493
operations_research::glop::ColumnView::EntryRow
RowIndex EntryRow(EntryIndex i) const
Definition: sparse_column.h:89
operations_research::glop::BasisState::IsEmpty
bool IsEmpty() const
Definition: revised_simplex.h:143
operations_research::glop::RevisedSimplex::NotifyThatMatrixIsUnchangedForNextSolve
void NotifyThatMatrixIsUnchangedForNextSolve()
Definition: revised_simplex.cc:130
operations_research::glop::CompactSparseMatrix::PopulateFromTranspose
void PopulateFromTranspose(const CompactSparseMatrix &input)
Definition: sparse.cc:456
operations_research::glop::EnteringVariable::StatString
std::string StatString() const
Definition: entering_variable.h:92
operations_research::glop::AreFirstColumnsAndRowsExactlyEquals
bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, const SparseMatrix &matrix_a, const CompactSparseMatrix &matrix_b)
Definition: matrix_utils.cc:190
operations_research::glop::ColumnPermutation
Permutation< ColIndex > ColumnPermutation
Definition: lp_data/permutation.h:94
operations_research::ScopedTimeDistributionUpdater
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition: stats.h:432
operations_research::glop::SparseVector< RowIndex, SparseColumnIterator >::Entry
typename Iterator::Entry Entry
Definition: sparse_vector.h:91
operations_research::glop::ReducedCosts::SetParameters
void SetParameters(const GlopParameters &parameters)
Definition: reduced_costs.cc:214
operations_research::glop::ReducedCosts::TestEnteringReducedCostPrecision
bool TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction, Fractional *reduced_cost)
Definition: reduced_costs.cc:58
operations_research::glop::ColumnView::num_entries
EntryIndex num_entries() const
Definition: sparse_column.h:82
operations_research::glop::UpdateRow::GetUnitRowLeftInverse
const ScatteredRow & GetUnitRowLeftInverse() const
Definition: update_row.cc:51
operations_research::glop::VariableValues::GetPrimalInfeasiblePositions
const DenseBitColumn & GetPrimalInfeasiblePositions() const
Definition: variable_values.cc:222
operations_research::glop::UpdateRow::GetNonZeroPositions
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:170
lp_print_utils.h
gtl::ITIVector::clear
void clear()
Definition: int_type_indexed_vector.h:169
operations_research::glop::ReducedCosts::ResetForNewObjective
void ResetForNewObjective()
Definition: reduced_costs.cc:218
operations_research::glop::ProblemStatus::OPTIMAL
@ OPTIMAL
operations_research::glop::ChangeSign
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
Definition: lp_data/lp_utils.h:300
col
ColIndex col
Definition: markowitz.cc:176
operations_research::glop::LuFactorization::ComputeFactorization
ABSL_MUST_USE_RESULT Status ComputeFactorization(const CompactSparseMatrixView &compact_matrix)
Definition: lu_factorization.cc:44
operations_research::glop::EnteringVariable::SetPricingRule
void SetPricingRule(GlopParameters::PricingRule rule)
Definition: entering_variable.cc:376
initial_basis.h
operations_research::glop::ProblemStatus
ProblemStatus
Definition: lp_types.h:101
operations_research::glop::LinearProgram::IsMaximizationProblem
bool IsMaximizationProblem() const
Definition: lp_data.h:171
operations_research::glop::PrimalEdgeNorms::Clear
void Clear()
Definition: primal_edge_norms.cc:37
operations_research::glop::LinearProgram
Definition: lp_data.h:55
operations_research::glop::Square
Fractional Square(Fractional f)
Definition: lp_data/lp_utils.h:36
operations_research::glop::VariablesInfo::UpdateToNonBasicStatus
void UpdateToNonBasicStatus(ColIndex col, VariableStatus status)
Definition: variables_info.cc:78
operations_research::glop::DenseColumn
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:328
operations_research::glop::ConstraintStatus::AT_LOWER_BOUND
@ AT_LOWER_BOUND
operations_research::glop::RevisedSimplex::ClearStateForNextSolve
void ClearStateForNextSolve()
Definition: revised_simplex.cc:119
operations_research::glop::LinearProgram::IsCleanedUp
bool IsCleanedUp() const
Definition: lp_data.cc:352
operations_research::glop::CompactSparseMatrix::ColumnAddMultipleToSparseScatteredColumn
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col, Fractional multiplier, ScatteredColumn *column) const
Definition: sparse.h:405
operations_research::glop::VariablesInfo::GetNonBasicBoxedVariables
const DenseBitRow & GetNonBasicBoxedVariables() const
Definition: variables_info.cc:123
operations_research::glop::ProblemStatus::INIT
@ INIT
operations_research::glop::VariablesInfo::GetTypeRow
const VariableTypeRow & GetTypeRow() const
Definition: variables_info.cc:97
operations_research::glop::VariableValues::GetPrimalSquaredInfeasibilities
const DenseColumn & GetPrimalSquaredInfeasibilities() const
Definition: variable_values.cc:218
operations_research::glop::ReducedCosts::GetDualFeasibilityTolerance
Fractional GetDualFeasibilityTolerance() const
Definition: reduced_costs.h:176
operations_research::glop::VariableType
VariableType
Definition: lp_types.h:174
operations_research::glop::RevisedSimplex::GetProblemNumRows
RowIndex GetProblemNumRows() const
Definition: revised_simplex.cc:425
operations_research::glop::VariableValues::GetDenseRow
const DenseRow & GetDenseRow() const
Definition: variable_values.h:50
operations_research::glop::RevisedSimplex::RevisedSimplex
RevisedSimplex()
Definition: revised_simplex.cc:77
operations_research::glop::ReducedCosts::ComputeMaximumDualResidual
Fractional ComputeMaximumDualResidual() const
Definition: reduced_costs.cc:113
operations_research::glop::VariableValues::ComputeMaximumPrimalInfeasibility
Fractional ComputeMaximumPrimalInfeasibility() const
Definition: variable_values.cc:120
operations_research::glop::DualEdgeNorms::UpdateDataOnBasisPermutation
void UpdateDataOnBasisPermutation(const ColumnPermutation &col_perm)
Definition: dual_edge_norms.cc:40
operations_research::glop::ReducedCosts::IsValidPrimalEnteringCandidate
bool IsValidPrimalEnteringCandidate(ColIndex col) const
Definition: reduced_costs.cc:516
operations_research::glop::Permutation::empty
bool empty() const
Definition: lp_data/permutation.h:50
operations_research::glop::RevisedSimplex::GetVariableValue
Fractional GetVariableValue(ColIndex col) const
Definition: revised_simplex.cc:429
operations_research::glop::VariableValues::UpdatePrimalPhaseICosts
bool UpdatePrimalPhaseICosts(const Rows &rows, DenseRow *objective)
Definition: variable_values.h:157
operations_research::glop::RevisedSimplex::GetDualRay
const DenseColumn & GetDualRay() const
Definition: revised_simplex.cc:474
permutation.h
operations_research::glop::BasisFactorization::ComputeInfinityNormConditionNumberUpperBound
Fractional ComputeInfinityNormConditionNumberUpperBound() const
Definition: basis_representation.cc:564
operations_research::glop::BasisFactorization::SetParameters
void SetParameters(const GlopParameters &parameters)
Definition: basis_representation.h:158
operations_research::glop::VariableStatus
VariableStatus
Definition: lp_types.h:196
operations_research::glop::VariableStatus::FREE
@ FREE
operations_research::glop::RevisedSimplex::GetBasis
ColIndex GetBasis(RowIndex row) const
Definition: revised_simplex.cc:484
operations_research::glop::PreciseScalarProduct
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
Definition: lp_data/lp_utils.h:92
operations_research::StatsGroup::StatString
std::string StatString() const
Definition: stats.cc:71
operations_research::glop::kInvalidRow
const RowIndex kInvalidRow(-1)
operations_research::glop::BasisFactorization::GetColumnPermutation
const ColumnPermutation & GetColumnPermutation() const
Definition: basis_representation.h:168
operations_research::glop::RevisedSimplex::GetReducedCosts
const DenseRow & GetReducedCosts() const
Definition: revised_simplex.cc:437
operations_research::Bitset64::ClearAndResize
void ClearAndResize(IndexType size)
Definition: bitset.h:438
operations_research::glop::BasisState::statuses
VariableStatusRow statuses
Definition: revised_simplex.h:140
operations_research::glop::RevisedSimplex::GetReducedCost
Fractional GetReducedCost(ColIndex col) const
Definition: revised_simplex.cc:433
operations_research::glop::ReducedCosts::UpdateBeforeBasisPivot
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
Definition: reduced_costs.cc:176
operations_research::glop::BasisFactorization::RightSolveForProblemColumn
void RightSolveForProblemColumn(ColIndex col, ScatteredColumn *d) const
Definition: basis_representation.cc:428
operations_research::glop::PrimalEdgeNorms::DeterministicTime
double DeterministicTime() const
Definition: primal_edge_norms.h:118
commandlineflags.h
parameters
SatParameters parameters
Definition: cp_model_fz_solver.cc:107
GLOP_RETURN_IF_ERROR
#define GLOP_RETURN_IF_ERROR(function_call)
Definition: status.h:70
operations_research::glop::Status::ok
bool ok() const
Definition: status.h:59
operations_research::glop::ScatteredVector::values
StrictITIVector< Index, Fractional > values
Definition: scattered_vector.h:58
operations_research::glop::DualEdgeNorms::Clear
void Clear()
Definition: dual_edge_norms.cc:29
parameters.pb.h
operations_research::glop::RevisedSimplex::GetProblemNumCols
ColIndex GetProblemNumCols() const
Definition: revised_simplex.cc:427
operations_research::glop::EnteringVariable::DualChooseEnteringColumn
ABSL_MUST_USE_RESULT Status DualChooseEnteringColumn(const UpdateRow &update_row, Fractional cost_variation, std::vector< ColIndex > *bound_flip_candidates, ColIndex *entering_col, Fractional *step)
Definition: entering_variable.cc:89
operations_research::glop::VariablesInfo::Update
void Update(ColIndex col, VariableStatus status)
Definition: variables_info.cc:60
operations_research::glop::VariableStatus::AT_LOWER_BOUND
@ AT_LOWER_BOUND
operations_research::glop::IsRightMostSquareMatrixIdentity
bool IsRightMostSquareMatrixIdentity(const SparseMatrix &matrix)
Definition: matrix_utils.cc:231
operations_research::glop::ColumnView::EntryCoefficient
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse_column.h:83