OR-Tools  9.0
revised_simplex.cc
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <algorithm>
17 #include <cmath>
18 #include <cstdint>
19 #include <functional>
20 #include <map>
21 #include <string>
22 #include <utility>
23 #include <vector>
24 
25 #include "absl/strings/str_cat.h"
26 #include "absl/strings/str_format.h"
29 #include "ortools/base/logging.h"
38 #include "ortools/util/fp_utils.h"
39 
40 ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false,
41  "Display numbers as fractions.");
42 ABSL_FLAG(bool, simplex_stop_after_first_basis, false,
43  "Stop after first basis has been computed.");
44 ABSL_FLAG(bool, simplex_stop_after_feasibility, false,
45  "Stop after first phase has been completed.");
46 ABSL_FLAG(bool, simplex_display_stats, false, "Display algorithm statistics.");
47 
48 namespace operations_research {
49 namespace glop {
50 namespace {
51 
52 // Calls the given closure upon destruction. It can be used to ensure that a
53 // closure is executed whenever a function returns.
54 class Cleanup {
55  public:
56  explicit Cleanup(std::function<void()> closure)
57  : closure_(std::move(closure)) {}
58  ~Cleanup() { closure_(); }
59 
60  private:
61  std::function<void()> closure_;
62 };
63 } // namespace
64 
65 #define DCHECK_COL_BOUNDS(col) \
66  { \
67  DCHECK_LE(0, col); \
68  DCHECK_GT(num_cols_, col); \
69  }
70 
71 #define DCHECK_ROW_BOUNDS(row) \
72  { \
73  DCHECK_LE(0, row); \
74  DCHECK_GT(num_rows_, row); \
75  }
76 
77 constexpr const uint64_t kDeterministicSeed = 42;
78 
80  : problem_status_(ProblemStatus::INIT),
81  num_rows_(0),
82  num_cols_(0),
83  first_slack_col_(0),
84  objective_(),
85  basis_(),
86  variable_name_(),
87  direction_(),
88  error_(),
89  deterministic_random_(kDeterministicSeed),
90  random_(deterministic_random_),
91  basis_factorization_(&compact_matrix_, &basis_),
92  variables_info_(compact_matrix_),
93  primal_edge_norms_(compact_matrix_, variables_info_,
94  basis_factorization_),
95  dual_edge_norms_(basis_factorization_),
96  dual_prices_(random_),
97  variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
98  basis_factorization_, &dual_edge_norms_, &dual_prices_),
99  update_row_(compact_matrix_, transposed_matrix_, variables_info_, basis_,
100  basis_factorization_),
101  reduced_costs_(compact_matrix_, objective_, basis_, variables_info_,
102  basis_factorization_, random_),
103  entering_variable_(variables_info_, random_, &reduced_costs_),
104  primal_prices_(random_, variables_info_, &primal_edge_norms_,
105  &reduced_costs_),
106  num_iterations_(0),
107  num_feasibility_iterations_(0),
108  num_optimization_iterations_(0),
109  total_time_(0.0),
110  feasibility_time_(0.0),
111  optimization_time_(0.0),
112  last_deterministic_time_update_(0.0),
113  iteration_stats_(),
114  ratio_test_stats_(),
115  function_stats_("SimplexFunctionStats"),
116  parameters_(),
117  test_lu_(),
118  feasibility_phase_(true) {
119  SetParameters(parameters_);
120 }
121 
123  SCOPED_TIME_STAT(&function_stats_);
124  solution_state_.statuses.clear();
125 }
126 
128  SCOPED_TIME_STAT(&function_stats_);
129  solution_state_ = state;
130  solution_state_has_been_set_externally_ = true;
131 }
132 
134  notify_that_matrix_is_unchanged_ = true;
135 }
136 
138  SCOPED_TIME_STAT(&function_stats_);
139  DCHECK(lp.IsCleanedUp());
141  if (!lp.IsInEquationForm()) {
143  "The problem is not in the equations form.");
144  }
145  Cleanup update_deterministic_time_on_return(
146  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
147 
148  // Initialization. Note That Initialize() must be called first since it
149  // analyzes the current solver state.
150  const double start_time = time_limit->GetElapsedTime();
151  GLOP_RETURN_IF_ERROR(Initialize(lp));
152 
153  dual_infeasibility_improvement_direction_.clear();
154  update_row_.Invalidate();
155  test_lu_.Clear();
156  problem_status_ = ProblemStatus::INIT;
157  feasibility_phase_ = true;
158  num_iterations_ = 0;
159  num_feasibility_iterations_ = 0;
160  num_optimization_iterations_ = 0;
161  feasibility_time_ = 0.0;
162  optimization_time_ = 0.0;
163  total_time_ = 0.0;
164 
165  // In case we abort because of an error, we cannot assume that the current
166  // solution state will be in sync with all our internal data structure. In
167  // case we abort without resetting it, setting this allow us to still use the
168  // previous state info, but we will double-check everything.
169  solution_state_has_been_set_externally_ = true;
170 
171  if (VLOG_IS_ON(1)) {
172  ComputeNumberOfEmptyRows();
173  ComputeNumberOfEmptyColumns();
174  DisplayBasicVariableStatistics();
175  DisplayProblem();
176  }
177  if (absl::GetFlag(FLAGS_simplex_stop_after_first_basis)) {
178  DisplayAllStats();
179  return Status::OK();
180  }
181 
182  const bool use_dual = parameters_.use_dual_simplex();
183  const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
184  if (log_info) {
185  LOG(INFO) << "------ " << (use_dual ? "Dual simplex." : "Primal simplex.");
186  LOG(INFO) << "The matrix has " << compact_matrix_.num_rows() << " rows, "
187  << compact_matrix_.num_cols() << " columns, "
188  << compact_matrix_.num_entries() << " entries.";
189  }
190 
191  // TODO(user): Avoid doing the first phase checks when we know from the
192  // incremental solve that the solution is already dual or primal feasible.
193  if (log_info) LOG(INFO) << "------ First phase: feasibility.";
194  primal_edge_norms_.SetPricingRule(parameters_.feasibility_rule());
195  if (use_dual) {
196  if (parameters_.perturb_costs_in_dual_simplex()) {
197  reduced_costs_.PerturbCosts();
198  }
199 
200  if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
201  variables_info_.MakeBoxedVariableRelevant(false);
202  GLOP_RETURN_IF_ERROR(DualMinimize(feasibility_phase_, time_limit));
203  DisplayIterationInfo();
204 
205  if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
206  // Note(user): In most cases, the matrix will already be refactorized
207  // and both Refactorize() and PermuteBasis() will do nothing. However,
208  // if the time limit is reached during the first phase, this might not
209  // be the case and RecomputeBasicVariableValues() below DCHECKs that the
210  // matrix is refactorized. This is not required, but we currently only
211  // want to recompute values from scratch when the matrix was just
212  // refactorized to maximize precision.
213  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
214  PermuteBasis();
215 
216  variables_info_.MakeBoxedVariableRelevant(true);
217  reduced_costs_.MakeReducedCostsPrecise();
218 
219  // This is needed to display errors properly.
220  MakeBoxedVariableDualFeasible(
221  variables_info_.GetNonBasicBoxedVariables(),
222  /*update_basic_values=*/false);
223  variable_values_.RecomputeBasicVariableValues();
224  }
225  } else {
226  // Test initial dual infeasibility, ignoring boxed variables. We currently
227  // refactorize/recompute the reduced costs if not already done.
228  // TODO(user): Not ideal in an incremental setting.
229  reduced_costs_.MakeReducedCostsPrecise();
230  bool unused;
231  RefactorizeBasisIfNeeded(&unused);
232  reduced_costs_.GetReducedCosts();
233 
234  const Fractional initial_infeasibility =
236  if (initial_infeasibility <
237  reduced_costs_.GetDualFeasibilityTolerance()) {
238  if (log_info) LOG(INFO) << "Initial basis is dual feasible.";
239  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
240  MakeBoxedVariableDualFeasible(
241  variables_info_.GetNonBasicBoxedVariables(),
242  /*update_basic_values=*/false);
243  variable_values_.RecomputeBasicVariableValues();
244  } else {
245  // Transform problem and recompute variable values.
246  variables_info_.TransformToDualPhaseIProblem(
247  reduced_costs_.GetDualFeasibilityTolerance(),
248  reduced_costs_.GetReducedCosts());
249  variable_values_.ResetAllNonBasicVariableValues();
250  variable_values_.RecomputeBasicVariableValues();
251 
252  // Optimize.
253  DisplayErrors();
254  GLOP_RETURN_IF_ERROR(DualMinimize(false, time_limit));
255  DisplayIterationInfo();
256 
257  // Restore original problem and recompute variable values. Note that we
258  // need the reduced cost on the fixed positions here.
259  variables_info_.EndDualPhaseI(
260  reduced_costs_.GetDualFeasibilityTolerance(),
261  reduced_costs_.GetFullReducedCosts());
262  variable_values_.ResetAllNonBasicVariableValues();
263  variable_values_.RecomputeBasicVariableValues();
264 
265  // TODO(user): Note that if there was cost shifts, we just keep them
266  // until the end of the optim.
267  //
268  // TODO(user): What if slightly infeasible? we shouldn't really stop.
269  // Call primal ? use higher tolerance ? It seems we can always kind of
270  // continue and deal with the issue later. Find a way other than this +
271  // 1e-6 hack.
272  if (problem_status_ == ProblemStatus::OPTIMAL) {
273  if (reduced_costs_.ComputeMaximumDualInfeasibility() <
274  reduced_costs_.GetDualFeasibilityTolerance() + 1e-6) {
275  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
276  } else {
277  if (log_info) LOG(INFO) << "Infeasible after first phase.";
278  problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
279  }
280  }
281  }
282  }
283  } else {
284  GLOP_RETURN_IF_ERROR(Minimize(time_limit));
285  DisplayIterationInfo();
286 
287  // After the primal phase I, we need to restore the objective.
288  if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) {
289  InitializeObjectiveAndTestIfUnchanged(lp);
290  reduced_costs_.ResetForNewObjective();
291  }
292  }
293 
294  // Reduced costs must be explicitly recomputed because DisplayErrors() is
295  // const.
296  // TODO(user): This API is not really nice.
297  reduced_costs_.GetReducedCosts();
298  DisplayErrors();
299 
300  feasibility_phase_ = false;
301  feasibility_time_ = time_limit->GetElapsedTime() - start_time;
302  primal_edge_norms_.SetPricingRule(parameters_.optimization_rule());
303  num_feasibility_iterations_ = num_iterations_;
304 
305  if (log_info) LOG(INFO) << "------ Second phase: optimization.";
306 
307  // Because of shifts or perturbations, we may need to re-run a dual simplex
308  // after the primal simplex finished, or the opposite.
309  //
310  // We alter between solving with primal and dual Phase II algorithm as long as
311  // time limit permits *and* we did not yet achieve the desired precision.
312  // I.e., we run iteration i if the solution from iteration i-1 was not precise
313  // after we removed the bound and cost shifts and perturbations.
314  //
315  // NOTE(user): We may still hit the limit of max_number_of_reoptimizations()
316  // which means the status returned can be PRIMAL_FEASIBLE or DUAL_FEASIBLE
317  // (i.e., these statuses are not necesserily a consequence of hitting a time
318  // limit).
319  for (int num_optims = 0;
320  // We want to enter the loop when both num_optims and num_iterations_ are
321  // *equal* to the corresponding limits (to return a meaningful status
322  // when the limits are set to 0).
323  num_optims <= parameters_.max_number_of_reoptimizations() &&
324  !objective_limit_reached_ &&
325  (num_iterations_ == 0 ||
326  num_iterations_ < parameters_.max_number_of_iterations()) &&
327  !time_limit->LimitReached() &&
328  !absl::GetFlag(FLAGS_simplex_stop_after_feasibility) &&
329  (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
330  problem_status_ == ProblemStatus::DUAL_FEASIBLE);
331  ++num_optims) {
332  if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
333  // Run the primal simplex.
334  GLOP_RETURN_IF_ERROR(Minimize(time_limit));
335  } else {
336  // Run the dual simplex.
337  GLOP_RETURN_IF_ERROR(DualMinimize(feasibility_phase_, time_limit));
338  }
339 
340  // Minimize() or DualMinimize() always double check the result with maximum
341  // precision by refactoring the basis before exiting (except if an
342  // iteration or time limit was reached).
343  DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
344  problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
345  basis_factorization_.IsRefactorized());
346 
347  // If SetIntegralityScale() was called, we preform a polish operation.
348  if (!integrality_scale_.empty() &&
349  problem_status_ == ProblemStatus::OPTIMAL) {
351  }
352 
353  // Remove the bound and cost shifts (or perturbations).
354  //
355  // Note(user): Currently, we never do both at the same time, so we could
356  // be a bit faster here, but then this is quick anyway.
357  variable_values_.ResetAllNonBasicVariableValues();
358  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
359  PermuteBasis();
360  variable_values_.RecomputeBasicVariableValues();
361  reduced_costs_.ClearAndRemoveCostShifts();
362 
363  // Reduced costs must be explicitly recomputed because DisplayErrors() is
364  // const.
365  // TODO(user): This API is not really nice.
366  reduced_costs_.GetReducedCosts();
367  DisplayIterationInfo();
368  DisplayErrors();
369 
370  // TODO(user): We should also confirm the PRIMAL_UNBOUNDED or DUAL_UNBOUNDED
371  // status by checking with the other phase I that the problem is really
372  // DUAL_INFEASIBLE or PRIMAL_INFEASIBLE. For instance we currently report
373  // PRIMAL_UNBOUNDED with the primal on the problem l30.mps instead of
374  // OPTIMAL and the dual does not have issues on this problem.
375  //
376  // TODO(user): There is another issue on infeas/qual.mps. I think we should
377  // just check the dual ray, not really the current solution dual
378  // feasibility.
379  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) {
380  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
381  if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
382  variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
383  reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
384  if (log_info) {
385  LOG(INFO) << "DUAL_UNBOUNDED was reported, but the residual and/or "
386  << "dual infeasibility is above the tolerance";
387  }
388  }
389  break;
390  }
391 
392  // Change the status, if after the shift and perturbation removal the
393  // problem is not OPTIMAL anymore.
394  if (problem_status_ == ProblemStatus::OPTIMAL) {
395  const Fractional solution_tolerance =
396  parameters_.solution_feasibility_tolerance();
397  const Fractional primal_residual =
398  variable_values_.ComputeMaximumPrimalResidual();
399  const Fractional dual_residual =
400  reduced_costs_.ComputeMaximumDualResidual();
401  if (primal_residual > solution_tolerance ||
402  dual_residual > solution_tolerance) {
403  if (log_info) {
404  LOG(INFO) << "OPTIMAL was reported, yet one of the residuals is "
405  "above the solution feasibility tolerance after the "
406  "shift/perturbation are removed.";
407  }
408  if (parameters_.change_status_to_imprecise()) {
409  problem_status_ = ProblemStatus::IMPRECISE;
410  }
411  } else {
412  // We use the "precise" tolerances here to try to report the best
413  // possible solution. Note however that we cannot really hope for an
414  // infeasibility lower than its corresponding residual error. Note that
415  // we already adapt the tolerance like this during the simplex
416  // execution.
417  const Fractional primal_tolerance = std::max(
418  primal_residual, parameters_.primal_feasibility_tolerance());
419  const Fractional dual_tolerance =
420  std::max(dual_residual, parameters_.dual_feasibility_tolerance());
421  const Fractional primal_infeasibility =
422  variable_values_.ComputeMaximumPrimalInfeasibility();
423  const Fractional dual_infeasibility =
424  reduced_costs_.ComputeMaximumDualInfeasibility();
425  if (primal_infeasibility > primal_tolerance &&
426  dual_infeasibility > dual_tolerance) {
427  if (log_info) {
428  LOG(INFO) << "OPTIMAL was reported, yet both of the infeasibility "
429  "are above the tolerance after the "
430  "shift/perturbation are removed.";
431  }
432  if (parameters_.change_status_to_imprecise()) {
433  problem_status_ = ProblemStatus::IMPRECISE;
434  }
435  } else if (primal_infeasibility > primal_tolerance) {
436  if (num_optims == parameters_.max_number_of_reoptimizations()) {
437  if (log_info) {
438  LOG(INFO) << "The primal infeasibility is still higher than the "
439  "requested internal tolerance, but the maximum "
440  "number of optimization is reached.";
441  }
442  break;
443  }
444  if (log_info) LOG(INFO) << "Re-optimizing with dual simplex ... ";
445  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
446  } else if (dual_infeasibility > dual_tolerance) {
447  if (num_optims == parameters_.max_number_of_reoptimizations()) {
448  if (log_info) {
449  LOG(INFO) << "The dual infeasibility is still higher than the "
450  "requested internal tolerance, but the maximum "
451  "number of optimization is reached.";
452  }
453  break;
454  }
455  if (log_info) LOG(INFO) << "Re-optimizing with primal simplex ... ";
456  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
457  }
458  }
459  }
460  }
461 
462  // Check that the return status is "precise".
463  //
464  // TODO(user): we currently skip the DUAL_INFEASIBLE status because the
465  // quantities are not up to date in this case.
466  if (parameters_.change_status_to_imprecise() &&
467  problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
468  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
469  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
470  reduced_costs_.ComputeMaximumDualResidual() > tolerance) {
471  problem_status_ = ProblemStatus::IMPRECISE;
472  } else if (problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
473  problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
474  problem_status_ == ProblemStatus::PRIMAL_INFEASIBLE) {
475  if (reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
476  problem_status_ = ProblemStatus::IMPRECISE;
477  }
478  } else if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
479  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED ||
480  problem_status_ == ProblemStatus::DUAL_INFEASIBLE) {
481  if (variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
482  problem_status_ = ProblemStatus::IMPRECISE;
483  }
484  }
485  }
486 
487  // Store the result for the solution getters.
488  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
489  solution_dual_values_ = reduced_costs_.GetDualValues();
490  solution_reduced_costs_ = reduced_costs_.GetReducedCosts();
491  SaveState();
492 
493  if (lp.IsMaximizationProblem()) {
494  ChangeSign(&solution_dual_values_);
495  ChangeSign(&solution_reduced_costs_);
496  }
497 
498  // If the problem is unbounded, set the objective value to +/- infinity.
499  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
500  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
501  solution_objective_value_ =
502  (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity
503  : -kInfinity;
504  if (lp.IsMaximizationProblem()) {
505  solution_objective_value_ = -solution_objective_value_;
506  }
507  }
508 
509  total_time_ = time_limit->GetElapsedTime() - start_time;
510  optimization_time_ = total_time_ - feasibility_time_;
511  num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
512 
513  DisplayAllStats();
514  return Status::OK();
515 }
516 
518  return problem_status_;
519 }
520 
522  return solution_objective_value_;
523 }
524 
526  return num_iterations_;
527 }
528 
529 RowIndex RevisedSimplex::GetProblemNumRows() const { return num_rows_; }
530 
531 ColIndex RevisedSimplex::GetProblemNumCols() const { return num_cols_; }
532 
534  return variable_values_.Get(col);
535 }
536 
538  return solution_reduced_costs_[col];
539 }
540 
542  return solution_reduced_costs_;
543 }
544 
546  return solution_dual_values_[row];
547 }
548 
550  return variables_info_.GetStatusRow()[col];
551 }
552 
553 const BasisState& RevisedSimplex::GetState() const { return solution_state_; }
554 
556  // Note the negative sign since the slack variable is such that
557  // constraint_activity + slack_value = 0.
558  return -variable_values_.Get(SlackColIndex(row));
559 }
560 
562  // The status of the given constraint is the same as the status of the
563  // associated slack variable with a change of sign.
564  const VariableStatus s = variables_info_.GetStatusRow()[SlackColIndex(row)];
567  }
570  }
571  return VariableToConstraintStatus(s);
572 }
573 
575  DCHECK_EQ(problem_status_, ProblemStatus::PRIMAL_UNBOUNDED);
576  return solution_primal_ray_;
577 }
579  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
580  return solution_dual_ray_;
581 }
582 
584  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
585  return solution_dual_ray_row_combination_;
586 }
587 
588 ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; }
589 
591  DCHECK(basis_factorization_.GetColumnPermutation().empty());
592  return basis_factorization_;
593 }
594 
595 std::string RevisedSimplex::GetPrettySolverStats() const {
596  return absl::StrFormat(
597  "Problem status : %s\n"
598  "Solving time : %-6.4g\n"
599  "Number of iterations : %u\n"
600  "Time for solvability (first phase) : %-6.4g\n"
601  "Number of iterations for solvability : %u\n"
602  "Time for optimization : %-6.4g\n"
603  "Number of iterations for optimization : %u\n"
604  "Stop after first basis : %d\n",
605  GetProblemStatusString(problem_status_), total_time_, num_iterations_,
606  feasibility_time_, num_feasibility_iterations_, optimization_time_,
607  num_optimization_iterations_,
608  absl::GetFlag(FLAGS_simplex_stop_after_first_basis));
609 }
610 
612  // TODO(user): Count what is missing.
613  return DeterministicTimeForFpOperations(num_update_price_operations_) +
614  basis_factorization_.DeterministicTime() +
615  update_row_.DeterministicTime() +
616  entering_variable_.DeterministicTime() +
617  reduced_costs_.DeterministicTime() +
618  primal_edge_norms_.DeterministicTime();
619 }
620 
621 void RevisedSimplex::SetVariableNames() {
622  variable_name_.resize(num_cols_, "");
623  for (ColIndex col(0); col < first_slack_col_; ++col) {
624  const ColIndex var_index = col + 1;
625  variable_name_[col] = absl::StrFormat("x%d", ColToIntIndex(var_index));
626  }
627  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
628  const ColIndex var_index = col - first_slack_col_ + 1;
629  variable_name_[col] = absl::StrFormat("s%d", ColToIntIndex(var_index));
630  }
631 }
632 
633 void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue(
634  ColIndex col, VariableStatus status) {
635  variables_info_.UpdateToNonBasicStatus(col, status);
636  variable_values_.SetNonBasicVariableValueFromStatus(col);
637 }
638 
639 bool RevisedSimplex::BasisIsConsistent() const {
640  const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
641  const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
642  for (RowIndex row(0); row < num_rows_; ++row) {
643  const ColIndex col = basis_[row];
644  if (!is_basic.IsSet(col)) return false;
645  if (variable_statuses[col] != VariableStatus::BASIC) return false;
646  }
647  ColIndex cols_in_basis(0);
648  ColIndex cols_not_in_basis(0);
649  for (ColIndex col(0); col < num_cols_; ++col) {
650  cols_in_basis += is_basic.IsSet(col);
651  cols_not_in_basis += !is_basic.IsSet(col);
652  if (is_basic.IsSet(col) !=
653  (variable_statuses[col] == VariableStatus::BASIC)) {
654  return false;
655  }
656  }
657  if (cols_in_basis != RowToColIndex(num_rows_)) return false;
658  if (cols_not_in_basis != num_cols_ - RowToColIndex(num_rows_)) return false;
659  return true;
660 }
661 
662 // Note(user): The basis factorization is not updated by this function but by
663 // UpdateAndPivot().
664 void RevisedSimplex::UpdateBasis(ColIndex entering_col, RowIndex basis_row,
665  VariableStatus leaving_variable_status) {
666  SCOPED_TIME_STAT(&function_stats_);
667  DCHECK_COL_BOUNDS(entering_col);
668  DCHECK_ROW_BOUNDS(basis_row);
669 
670  // Check that this is not called with an entering_col already in the basis
671  // and that the leaving col is indeed in the basis.
672  DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
673  DCHECK_NE(basis_[basis_row], entering_col);
674  DCHECK_NE(basis_[basis_row], kInvalidCol);
675 
676  const ColIndex leaving_col = basis_[basis_row];
677  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
678 
679  // Make leaving_col leave the basis and update relevant data.
680  // Note thate the leaving variable value is not necessarily at its exact
681  // bound, which is like a bound shift.
682  variables_info_.UpdateToNonBasicStatus(leaving_col, leaving_variable_status);
683  DCHECK(leaving_variable_status == VariableStatus::AT_UPPER_BOUND ||
684  leaving_variable_status == VariableStatus::AT_LOWER_BOUND ||
685  leaving_variable_status == VariableStatus::FIXED_VALUE);
686 
687  basis_[basis_row] = entering_col;
688  variables_info_.UpdateToBasicStatus(entering_col);
689  update_row_.Invalidate();
690 }
691 
692 namespace {
693 
694 // Comparator used to sort column indices according to a given value vector.
695 class ColumnComparator {
696  public:
697  explicit ColumnComparator(const DenseRow& value) : value_(value) {}
698  bool operator()(ColIndex col_a, ColIndex col_b) const {
699  return value_[col_a] < value_[col_b];
700  }
701 
702  private:
703  const DenseRow& value_;
704 };
705 
706 } // namespace
707 
708 // To understand better what is going on in this function, let us say that this
709 // algorithm will produce the optimal solution to a problem containing only
710 // singleton columns (provided that the variables start at the minimum possible
711 // cost, see DefaultVariableStatus()). This is unit tested.
712 //
713 // The error_ must be equal to the constraint activity for the current variable
714 // values before this function is called. If error_[row] is 0.0, that mean this
715 // constraint is currently feasible.
716 void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) {
717  SCOPED_TIME_STAT(&function_stats_);
718  // Computes the singleton columns and the cost variation of the corresponding
719  // variables (in the only possible direction, i.e away from its current bound)
720  // for a unit change in the infeasibility of the corresponding row.
721  //
722  // Note that the slack columns will be treated as normal singleton columns.
723  std::vector<ColIndex> singleton_column;
724  DenseRow cost_variation(num_cols_, 0.0);
725  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
726  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
727  for (ColIndex col(0); col < num_cols_; ++col) {
728  if (compact_matrix_.column(col).num_entries() != 1) continue;
729  if (lower_bounds[col] == upper_bounds[col]) continue;
730  const Fractional slope = compact_matrix_.column(col).GetFirstCoefficient();
731  if (variable_values_.Get(col) == lower_bounds[col]) {
732  cost_variation[col] = objective_[col] / std::abs(slope);
733  } else {
734  cost_variation[col] = -objective_[col] / std::abs(slope);
735  }
736  singleton_column.push_back(col);
737  }
738  if (singleton_column.empty()) return;
739 
740  // Sort the singleton columns for the case where many of them correspond to
741  // the same row (equivalent to a piecewise-linear objective on this variable).
742  // Negative cost_variation first since moving the singleton variable away from
743  // its current bound means the least decrease in the objective function for
744  // the same "error" variation.
745  ColumnComparator comparator(cost_variation);
746  std::sort(singleton_column.begin(), singleton_column.end(), comparator);
747  DCHECK_LE(cost_variation[singleton_column.front()],
748  cost_variation[singleton_column.back()]);
749 
750  // Use a singleton column to "absorb" the error when possible to avoid
751  // introducing unneeded artificial variables. Note that with scaling on, the
752  // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to
753  // them) and that the SingletonColumnSignPreprocessor makes them all positive.
754  // However, this code works for any coefficient value.
755  const DenseRow& variable_values = variable_values_.GetDenseRow();
756  for (const ColIndex col : singleton_column) {
757  const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0));
758 
759  // If no singleton columns have entered the basis for this row, choose the
760  // first one. It will be the one with the least decrease in the objective
761  // function when it leaves the basis.
762  if ((*basis)[row] == kInvalidCol) {
763  (*basis)[row] = col;
764  }
765 
766  // If there is already no error in this row (i.e. it is primal-feasible),
767  // there is nothing to do.
768  if (error_[row] == 0.0) continue;
769 
770  // In this case, all the infeasibility can be "absorbed" and this variable
771  // may not be at one of its bound anymore, so we have to use it in the
772  // basis.
773  const Fractional coeff =
774  compact_matrix_.column(col).EntryCoefficient(EntryIndex(0));
775  const Fractional new_value = variable_values[col] + error_[row] / coeff;
776  if (new_value >= lower_bounds[col] && new_value <= upper_bounds[col]) {
777  error_[row] = 0.0;
778 
779  // Use this variable in the initial basis.
780  (*basis)[row] = col;
781  continue;
782  }
783 
784  // The idea here is that if the singleton column cannot be used to "absorb"
785  // all error_[row], if it is boxed, it can still be used to make the
786  // infeasibility smaller (with a bound flip).
787  const Fractional box_width = variables_info_.GetBoundDifference(col);
788  DCHECK_NE(box_width, 0.0);
789  DCHECK_NE(error_[row], 0.0);
790  const Fractional error_sign = error_[row] / coeff;
791  if (variable_values[col] == lower_bounds[col] && error_sign > 0.0) {
792  DCHECK(IsFinite(box_width));
793  error_[row] -= coeff * box_width;
794  SetNonBasicVariableStatusAndDeriveValue(col,
796  continue;
797  }
798  if (variable_values[col] == upper_bounds[col] && error_sign < 0.0) {
799  DCHECK(IsFinite(box_width));
800  error_[row] += coeff * box_width;
801  SetNonBasicVariableStatusAndDeriveValue(col,
803  continue;
804  }
805  }
806 }
807 
808 bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged(
809  const LinearProgram& lp, bool* only_change_is_new_rows,
810  bool* only_change_is_new_cols, ColIndex* num_new_cols) {
811  SCOPED_TIME_STAT(&function_stats_);
812  DCHECK(only_change_is_new_rows != nullptr);
813  DCHECK(only_change_is_new_cols != nullptr);
814  DCHECK(num_new_cols != nullptr);
815  DCHECK_NE(kInvalidCol, lp.GetFirstSlackVariable());
816  DCHECK_EQ(num_cols_, compact_matrix_.num_cols());
817  DCHECK_EQ(num_rows_, compact_matrix_.num_rows());
818 
819  DCHECK_EQ(lp.num_variables(),
820  lp.GetFirstSlackVariable() + RowToColIndex(lp.num_constraints()));
821  DCHECK(IsRightMostSquareMatrixIdentity(lp.GetSparseMatrix()));
822  const bool old_part_of_matrix_is_unchanged =
824  num_rows_, first_slack_col_, lp.GetSparseMatrix(), compact_matrix_);
825 
826  // Test if the matrix is unchanged, and if yes, just returns true. Note that
827  // this doesn't check the columns corresponding to the slack variables,
828  // because they were checked by lp.IsInEquationForm() when Solve() was called.
829  if (old_part_of_matrix_is_unchanged && lp.num_constraints() == num_rows_ &&
830  lp.num_variables() == num_cols_) {
831  return true;
832  }
833 
834  // Check if the new matrix can be derived from the old one just by adding
835  // new rows (i.e new constraints).
836  *only_change_is_new_rows = old_part_of_matrix_is_unchanged &&
837  lp.num_constraints() > num_rows_ &&
838  lp.GetFirstSlackVariable() == first_slack_col_;
839 
840  // Check if the new matrix can be derived from the old one just by adding
841  // new columns (i.e new variables).
842  *only_change_is_new_cols = old_part_of_matrix_is_unchanged &&
843  lp.num_constraints() == num_rows_ &&
844  lp.GetFirstSlackVariable() > first_slack_col_;
845  *num_new_cols =
846  *only_change_is_new_cols ? lp.num_variables() - num_cols_ : ColIndex(0);
847 
848  // Initialize first_slack_.
849  first_slack_col_ = lp.GetFirstSlackVariable();
850 
851  // Initialize the new dimensions.
852  num_rows_ = lp.num_constraints();
853  num_cols_ = lp.num_variables();
854 
855  // Populate compact_matrix_ and transposed_matrix_ if needed. Note that we
856  // already added all the slack variables at this point, so matrix_ will not
857  // change anymore.
858  // TODO(user): This can be sped up by removing the MatrixView.
859  compact_matrix_.PopulateFromMatrixView(MatrixView(lp.GetSparseMatrix()));
860  if (parameters_.use_transposed_matrix()) {
861  transposed_matrix_.PopulateFromTranspose(compact_matrix_);
862  }
863  return false;
864 }
865 
866 bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
867  const LinearProgram& lp, ColIndex num_new_cols) {
868  SCOPED_TIME_STAT(&function_stats_);
869  DCHECK_EQ(lp.num_variables(), num_cols_);
870  DCHECK_LE(num_new_cols, first_slack_col_);
871  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
872 
873  // Check the original variable bounds.
874  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
875  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
876  for (ColIndex col(0); col < first_new_col; ++col) {
877  if (lower_bounds[col] != lp.variable_lower_bounds()[col] ||
878  upper_bounds[col] != lp.variable_upper_bounds()[col]) {
879  return false;
880  }
881  }
882  // Check that each new variable has a bound of zero.
883  for (ColIndex col(first_new_col); col < first_slack_col_; ++col) {
884  if (lp.variable_lower_bounds()[col] != 0.0 &&
885  lp.variable_upper_bounds()[col] != 0.0) {
886  return false;
887  }
888  }
889  // Check that the slack bounds are unchanged.
890  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
891  if (lower_bounds[col - num_new_cols] != lp.variable_lower_bounds()[col] ||
892  upper_bounds[col - num_new_cols] != lp.variable_upper_bounds()[col]) {
893  return false;
894  }
895  }
896  return true;
897 }
898 
899 bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
900  const LinearProgram& lp) {
901  SCOPED_TIME_STAT(&function_stats_);
902 
903  bool objective_is_unchanged = true;
904  objective_.resize(num_cols_, 0.0);
905  DCHECK_EQ(num_cols_, lp.num_variables());
906  if (lp.IsMaximizationProblem()) {
907  // Note that we use the minimization version of the objective internally.
908  for (ColIndex col(0); col < lp.num_variables(); ++col) {
909  const Fractional coeff = -lp.objective_coefficients()[col];
910  if (objective_[col] != coeff) {
911  objective_is_unchanged = false;
912  }
913  objective_[col] = coeff;
914  }
915  objective_offset_ = -lp.objective_offset();
916  objective_scaling_factor_ = -lp.objective_scaling_factor();
917  } else {
918  for (ColIndex col(0); col < lp.num_variables(); ++col) {
919  if (objective_[col] != lp.objective_coefficients()[col]) {
920  objective_is_unchanged = false;
921  break;
922  }
923  }
924  if (!objective_is_unchanged) {
925  objective_ = lp.objective_coefficients();
926  }
927  objective_offset_ = lp.objective_offset();
928  objective_scaling_factor_ = lp.objective_scaling_factor();
929  }
930  return objective_is_unchanged;
931 }
932 
933 void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) {
934  objective_limit_reached_ = false;
935  DCHECK(std::isfinite(objective_offset_));
936  DCHECK(std::isfinite(objective_scaling_factor_));
937  DCHECK_NE(0.0, objective_scaling_factor_);
938 
939  // This sets dual_objective_limit_ and then primal_objective_limit_.
940  for (const bool set_dual : {true, false}) {
941  // NOTE(user): If objective_scaling_factor_ is negative, the optimization
942  // direction was reversed (during preprocessing or inside revised simplex),
943  // i.e., the original problem is maximization. In such case the _meaning_ of
944  // the lower and upper limits is swapped. To this end we must change the
945  // signs of limits, which happens automatically when calculating shifted
946  // limits. We must also use upper (resp. lower) limit in place of lower
947  // (resp. upper) limit when calculating the final objective_limit_.
948  //
949  // Choose lower limit if using the dual simplex and scaling factor is
950  // negative or if using the primal simplex and scaling is nonnegative, upper
951  // limit otherwise.
952  const Fractional limit = (objective_scaling_factor_ >= 0.0) != set_dual
953  ? parameters_.objective_lower_limit()
954  : parameters_.objective_upper_limit();
955  const Fractional shifted_limit =
956  limit / objective_scaling_factor_ - objective_offset_;
957  if (set_dual) {
958  dual_objective_limit_ = shifted_limit;
959  } else {
960  primal_objective_limit_ = shifted_limit;
961  }
962  }
963 }
964 
965 // This implementation starts with an initial matrix B equal to the identity
966 // matrix (modulo a column permutation). For that it uses either the slack
967 // variables or the singleton columns present in the problem. Afterwards, the
968 // fixed slacks in the basis are exchanged with normal columns of A if possible
969 // by the InitialBasis class.
970 Status RevisedSimplex::CreateInitialBasis() {
971  SCOPED_TIME_STAT(&function_stats_);
972 
973  // Initialize the variable values and statuses.
974  // Note that for the dual algorithm, boxed variables will be made
975  // dual-feasible later by MakeBoxedVariableDualFeasible(), so it doesn't
976  // really matter at which of their two finite bounds they start.
977  variables_info_.InitializeToDefaultStatus();
978  variable_values_.ResetAllNonBasicVariableValues();
979 
980  if (VLOG_IS_ON(1)) {
981  int num_free_variables = 0;
982  for (const VariableStatus status : variables_info_.GetStatusRow()) {
983  if (status == VariableStatus::FREE) ++num_free_variables;
984  }
985  VLOG(1) << "Number of free variables in the problem: "
986  << num_free_variables;
987  }
988 
989  // Start by using an all-slack basis.
990  RowToColMapping basis(num_rows_, kInvalidCol);
991  for (RowIndex row(0); row < num_rows_; ++row) {
992  basis[row] = SlackColIndex(row);
993  }
994 
995  // If possible, for the primal simplex we replace some slack variables with
996  // some singleton columns present in the problem.
997  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
998  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
999  if (!parameters_.use_dual_simplex() &&
1000  parameters_.initial_basis() != GlopParameters::MAROS &&
1001  parameters_.exploit_singleton_column_in_initial_basis()) {
1002  // For UseSingletonColumnInInitialBasis() to work better, we change
1003  // the value of the boxed singleton column with a non-zero cost to the best
1004  // of their two bounds.
1005  for (ColIndex col(0); col < num_cols_; ++col) {
1006  if (compact_matrix_.column(col).num_entries() != 1) continue;
1007  const VariableStatus status = variables_info_.GetStatusRow()[col];
1008  const Fractional objective = objective_[col];
1009  if (objective > 0 && IsFinite(lower_bounds[col]) &&
1010  status == VariableStatus::AT_UPPER_BOUND) {
1011  SetNonBasicVariableStatusAndDeriveValue(col,
1013  } else if (objective < 0 && IsFinite(upper_bounds[col]) &&
1014  status == VariableStatus::AT_LOWER_BOUND) {
1015  SetNonBasicVariableStatusAndDeriveValue(col,
1017  }
1018  }
1019 
1020  // Compute the primal infeasibility of the initial variable values in
1021  // error_.
1022  ComputeVariableValuesError();
1023 
1024  // TODO(user): A better but slightly more complex algorithm would be to:
1025  // - Ignore all singleton columns except the slacks during phase I.
1026  // - For this, change the slack variable bounds accordingly.
1027  // - At the end of phase I, restore the slack variable bounds and perform
1028  // the same algorithm to start with feasible and "optimal" values of the
1029  // singleton columns.
1030  basis.assign(num_rows_, kInvalidCol);
1031  UseSingletonColumnInInitialBasis(&basis);
1032 
1033  // Eventually complete the basis with fixed slack columns.
1034  for (RowIndex row(0); row < num_rows_; ++row) {
1035  if (basis[row] == kInvalidCol) {
1036  basis[row] = SlackColIndex(row);
1037  }
1038  }
1039  }
1040 
1041  // Use an advanced initial basis to remove the fixed variables from the basis.
1042  if (parameters_.initial_basis() == GlopParameters::NONE) {
1043  return InitializeFirstBasis(basis);
1044  }
1045  if (parameters_.initial_basis() == GlopParameters::MAROS) {
1046  InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1047  upper_bounds, variables_info_.GetTypeRow());
1048  if (parameters_.use_dual_simplex()) {
1049  // This dual version only uses zero-cost columns to complete the
1050  // basis.
1051  initial_basis.GetDualMarosBasis(num_cols_, &basis);
1052  } else {
1053  initial_basis.GetPrimalMarosBasis(num_cols_, &basis);
1054  }
1055  int number_changed = 0;
1056  for (RowIndex row(0); row < num_rows_; ++row) {
1057  if (basis[row] != SlackColIndex(row)) {
1058  number_changed++;
1059  }
1060  }
1061  VLOG(1) << "Number of Maros basis changes: " << number_changed;
1062  } else if (parameters_.initial_basis() == GlopParameters::BIXBY ||
1063  parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1064  // First unassign the fixed variables from basis.
1065  int num_fixed_variables = 0;
1066  for (RowIndex row(0); row < basis.size(); ++row) {
1067  const ColIndex col = basis[row];
1068  if (lower_bounds[col] == upper_bounds[col]) {
1069  basis[row] = kInvalidCol;
1070  ++num_fixed_variables;
1071  }
1072  }
1073 
1074  if (num_fixed_variables == 0) {
1075  VLOG(1) << "Crash is set to " << parameters_.initial_basis()
1076  << " but there is no equality rows to remove from initial all "
1077  "slack basis.";
1078  } else {
1079  // Then complete the basis with an advanced initial basis algorithm.
1080  VLOG(1) << "Trying to remove " << num_fixed_variables
1081  << " fixed variables from the initial basis.";
1082  InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1083  upper_bounds, variables_info_.GetTypeRow());
1084 
1085  if (parameters_.initial_basis() == GlopParameters::BIXBY) {
1086  if (parameters_.use_scaling()) {
1087  initial_basis.CompleteBixbyBasis(first_slack_col_, &basis);
1088  } else {
1089  VLOG(1) << "Bixby initial basis algorithm requires the problem "
1090  << "to be scaled. Skipping Bixby's algorithm.";
1091  }
1092  } else if (parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1093  // Note the use of num_cols_ here because this algorithm
1094  // benefits from treating fixed slack columns like any other column.
1095  if (parameters_.use_dual_simplex()) {
1096  // This dual version only uses zero-cost columns to complete the
1097  // basis.
1098  initial_basis.CompleteTriangularDualBasis(num_cols_, &basis);
1099  } else {
1100  initial_basis.CompleteTriangularPrimalBasis(num_cols_, &basis);
1101  }
1102 
1103  const Status status = InitializeFirstBasis(basis);
1104  if (status.ok()) {
1105  return status;
1106  } else {
1107  VLOG(1) << "Reverting to all slack basis.";
1108 
1109  for (RowIndex row(0); row < num_rows_; ++row) {
1110  basis[row] = SlackColIndex(row);
1111  }
1112  }
1113  }
1114  }
1115  } else {
1116  LOG(WARNING) << "Unsupported initial_basis parameters: "
1117  << parameters_.initial_basis();
1118  }
1119 
1120  return InitializeFirstBasis(basis);
1121 }
1122 
1123 Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) {
1124  basis_ = basis;
1125 
1126  // For each row which does not have a basic column, assign it to the
1127  // corresponding slack column.
1128  basis_.resize(num_rows_, kInvalidCol);
1129  for (RowIndex row(0); row < num_rows_; ++row) {
1130  if (basis_[row] == kInvalidCol) {
1131  basis_[row] = SlackColIndex(row);
1132  }
1133  }
1134 
1135  GLOP_RETURN_IF_ERROR(basis_factorization_.Initialize());
1136  PermuteBasis();
1137 
1138  // Test that the upper bound on the condition number of basis is not too high.
1139  // The number was not computed by any rigorous analysis, we just prefer to
1140  // revert to the all slack basis if the condition number of our heuristic
1141  // first basis seems bad. See for instance on cond11.mps, where we get an
1142  // infinity upper bound.
1143  const Fractional condition_number_ub =
1144  basis_factorization_.ComputeInfinityNormConditionNumberUpperBound();
1145  if (condition_number_ub > parameters_.initial_condition_number_threshold()) {
1146  const std::string error_message =
1147  absl::StrCat("The matrix condition number upper bound is too high: ",
1148  condition_number_ub);
1149  VLOG(1) << error_message;
1150  return Status(Status::ERROR_LU, error_message);
1151  }
1152 
1153  // Everything is okay, finish the initialization.
1154  for (RowIndex row(0); row < num_rows_; ++row) {
1155  variables_info_.UpdateToBasicStatus(basis_[row]);
1156  }
1157  DCHECK(BasisIsConsistent());
1158 
1159  // TODO(user): Maybe return an error status if this is too high. Note however
1160  // that if we want to do that, we need to reset variables_info_ to a
1161  // consistent state.
1162  variable_values_.ResetAllNonBasicVariableValues();
1163  variable_values_.RecomputeBasicVariableValues();
1164  if (VLOG_IS_ON(1)) {
1165  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1166  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) {
1167  VLOG(1) << absl::StrCat(
1168  "The primal residual of the initial basis is above the tolerance, ",
1169  variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance);
1170  }
1171  }
1172  return Status::OK();
1173 }
1174 
1175 Status RevisedSimplex::Initialize(const LinearProgram& lp) {
1176  parameters_ = initial_parameters_;
1177  PropagateParameters();
1178 
1179  // Calling InitializeMatrixAndTestIfUnchanged() first is important because
1180  // this is where num_rows_ and num_cols_ are computed.
1181  //
1182  // Note that these functions can't depend on use_dual_simplex() since we may
1183  // change it below.
1184  ColIndex num_new_cols(0);
1185  bool only_change_is_new_rows = false;
1186  bool only_change_is_new_cols = false;
1187  bool matrix_is_unchanged = true;
1188  bool only_new_bounds = false;
1189  if (solution_state_.IsEmpty() || !notify_that_matrix_is_unchanged_) {
1190  matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged(
1191  lp, &only_change_is_new_rows, &only_change_is_new_cols, &num_new_cols);
1192  only_new_bounds = only_change_is_new_cols && num_new_cols > 0 &&
1193  OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1194  lp, num_new_cols);
1195  } else if (DEBUG_MODE) {
1196  CHECK(InitializeMatrixAndTestIfUnchanged(
1197  lp, &only_change_is_new_rows, &only_change_is_new_cols, &num_new_cols));
1198  }
1199  notify_that_matrix_is_unchanged_ = false;
1200 
1201  // TODO(user): move objective with ReducedCosts class.
1202  const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp);
1203 
1204  const bool bounds_are_unchanged =
1205  variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1206  lp.variable_lower_bounds(), lp.variable_upper_bounds());
1207 
1208  // If parameters_.allow_simplex_algorithm_change() is true and we already have
1209  // a primal (resp. dual) feasible solution, then we use the primal (resp.
1210  // dual) algorithm since there is a good chance that it will be faster.
1211  if (matrix_is_unchanged && parameters_.allow_simplex_algorithm_change()) {
1212  if (objective_is_unchanged && !bounds_are_unchanged) {
1213  parameters_.set_use_dual_simplex(true);
1214  PropagateParameters();
1215  }
1216  if (bounds_are_unchanged && !objective_is_unchanged) {
1217  parameters_.set_use_dual_simplex(false);
1218  PropagateParameters();
1219  }
1220  }
1221 
1222  InitializeObjectiveLimit(lp);
1223 
1224  // Computes the variable name as soon as possible for logging.
1225  // TODO(user): do we really need to store them? we could just compute them
1226  // on the fly since we do not need the speed.
1227  if (VLOG_IS_ON(1)) {
1228  SetVariableNames();
1229  }
1230 
1231  // Warm-start? This is supported only if the solution_state_ is non empty,
1232  // i.e., this revised simplex i) was already used to solve a problem, or
1233  // ii) the solution state was provided externally. Note that the
1234  // solution_state_ may have nothing to do with the current problem, e.g.,
1235  // objective, matrix, and/or bounds had changed. So we support several
1236  // scenarios of warm-start depending on how did the problem change and which
1237  // simplex algorithm is used (primal or dual).
1238  bool solve_from_scratch = true;
1239 
1240  // Try to perform a "quick" warm-start with no matrix factorization involved.
1241  if (!solution_state_.IsEmpty() && !solution_state_has_been_set_externally_) {
1242  if (!parameters_.use_dual_simplex()) {
1243  // With primal simplex, always clear dual norms and dual pricing.
1244  // Incrementality is supported only if only change to the matrix and
1245  // bounds is adding new columns (objective may change), and that all
1246  // new columns have a bound equal to zero.
1247  dual_edge_norms_.Clear();
1248  dual_pricing_vector_.clear();
1249  if (matrix_is_unchanged && bounds_are_unchanged) {
1250  // TODO(user): Do not do that if objective_is_unchanged. Currently
1251  // this seems to break something. Investigate.
1252  reduced_costs_.ClearAndRemoveCostShifts();
1253  solve_from_scratch = false;
1254  } else if (only_change_is_new_cols && only_new_bounds) {
1255  variables_info_.InitializeFromBasisState(first_slack_col_, num_new_cols,
1256  solution_state_);
1257  variable_values_.ResetAllNonBasicVariableValues();
1258 
1259  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1260  for (ColIndex& col_ref : basis_) {
1261  if (col_ref >= first_new_col) {
1262  col_ref += num_new_cols;
1263  }
1264  }
1265 
1266  // Make sure the primal edge norm are recomputed from scratch.
1267  // TODO(user): only the norms of the new columns actually need to be
1268  // computed.
1269  primal_edge_norms_.Clear();
1270  reduced_costs_.ClearAndRemoveCostShifts();
1271  solve_from_scratch = false;
1272  }
1273  } else {
1274  // With dual simplex, always clear primal norms. Incrementality is
1275  // supported only if the objective remains the same (the matrix may
1276  // contain new rows and the bounds may change).
1277  primal_edge_norms_.Clear();
1278  if (objective_is_unchanged) {
1279  if (matrix_is_unchanged) {
1280  if (!bounds_are_unchanged) {
1281  variables_info_.InitializeFromBasisState(
1282  first_slack_col_, ColIndex(0), solution_state_);
1283  variable_values_.ResetAllNonBasicVariableValues();
1284  variable_values_.RecomputeBasicVariableValues();
1285  }
1286  solve_from_scratch = false;
1287  } else if (only_change_is_new_rows) {
1288  // For the dual-simplex, we also perform a warm start if a couple of
1289  // new rows where added.
1290  variables_info_.InitializeFromBasisState(
1291  first_slack_col_, ColIndex(0), solution_state_);
1292  dual_edge_norms_.ResizeOnNewRows(num_rows_);
1293 
1294  // TODO(user): The reduced costs do not really need to be recomputed.
1295  // We just need to initialize the ones of the new slack variables to
1296  // 0.
1297  reduced_costs_.ClearAndRemoveCostShifts();
1298  dual_pricing_vector_.clear();
1299 
1300  // Note that this needs to be done after the Clear() calls above.
1301  if (InitializeFirstBasis(basis_).ok()) {
1302  solve_from_scratch = false;
1303  }
1304  }
1305  }
1306  }
1307  }
1308 
1309  // If we couldn't perform a "quick" warm start above, we can at least try to
1310  // reuse the variable statuses.
1311  const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
1312  if (solve_from_scratch && !solution_state_.IsEmpty()) {
1313  // If an external basis has been provided or if the matrix changed, we need
1314  // to perform more work, e.g., factorize the proposed basis and validate it.
1315  variables_info_.InitializeFromBasisState(first_slack_col_, ColIndex(0),
1316  solution_state_);
1317  basis_.assign(num_rows_, kInvalidCol);
1318  RowIndex row(0);
1319  for (ColIndex col : variables_info_.GetIsBasicBitRow()) {
1320  basis_[row] = col;
1321  ++row;
1322  }
1323 
1324  basis_factorization_.Clear();
1325  reduced_costs_.ClearAndRemoveCostShifts();
1326  primal_edge_norms_.Clear();
1327  dual_edge_norms_.Clear();
1328  dual_pricing_vector_.clear();
1329 
1330  // TODO(user): If the basis is incomplete, we could complete it with
1331  // better slack variables than is done by InitializeFirstBasis() by
1332  // using a partial LU decomposition (see markowitz.h).
1333  if (InitializeFirstBasis(basis_).ok()) {
1334  solve_from_scratch = false;
1335  } else {
1336  if (log_info) {
1337  LOG(INFO) << "RevisedSimplex is not using the warm start "
1338  "basis because it is not factorizable.";
1339  }
1340  }
1341  }
1342 
1343  if (solve_from_scratch) {
1344  if (log_info) LOG(INFO) << "Solve from scratch.";
1345  basis_factorization_.Clear();
1346  reduced_costs_.ClearAndRemoveCostShifts();
1347  primal_edge_norms_.Clear();
1348  dual_edge_norms_.Clear();
1349  dual_pricing_vector_.clear();
1350  GLOP_RETURN_IF_ERROR(CreateInitialBasis());
1351  } else {
1352  if (log_info) LOG(INFO) << "Incremental solve.";
1353  }
1354  DCHECK(BasisIsConsistent());
1355  return Status::OK();
1356 }
1357 
1358 void RevisedSimplex::DisplayBasicVariableStatistics() {
1359  SCOPED_TIME_STAT(&function_stats_);
1360 
1361  int num_fixed_variables = 0;
1362  int num_free_variables = 0;
1363  int num_variables_at_bound = 0;
1364  int num_slack_variables = 0;
1365  int num_infeasible_variables = 0;
1366 
1367  const DenseRow& variable_values = variable_values_.GetDenseRow();
1368  const VariableTypeRow& variable_types = variables_info_.GetTypeRow();
1369  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1370  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1371  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1372  for (RowIndex row(0); row < num_rows_; ++row) {
1373  const ColIndex col = basis_[row];
1374  const Fractional value = variable_values[col];
1375  if (variable_types[col] == VariableType::UNCONSTRAINED) {
1376  ++num_free_variables;
1377  }
1378  if (value > upper_bounds[col] + tolerance ||
1379  value < lower_bounds[col] - tolerance) {
1380  ++num_infeasible_variables;
1381  }
1382  if (col >= first_slack_col_) {
1383  ++num_slack_variables;
1384  }
1385  if (lower_bounds[col] == upper_bounds[col]) {
1386  ++num_fixed_variables;
1387  } else if (variable_values[col] == lower_bounds[col] ||
1388  variable_values[col] == upper_bounds[col]) {
1389  ++num_variables_at_bound;
1390  }
1391  }
1392 
1393  VLOG(1) << "Basis size: " << num_rows_;
1394  VLOG(1) << "Number of basic infeasible variables: "
1395  << num_infeasible_variables;
1396  VLOG(1) << "Number of basic slack variables: " << num_slack_variables;
1397  VLOG(1) << "Number of basic variables at bound: " << num_variables_at_bound;
1398  VLOG(1) << "Number of basic fixed variables: " << num_fixed_variables;
1399  VLOG(1) << "Number of basic free variables: " << num_free_variables;
1400 }
1401 
1402 void RevisedSimplex::SaveState() {
1403  DCHECK_EQ(num_cols_, variables_info_.GetStatusRow().size());
1404  solution_state_.statuses = variables_info_.GetStatusRow();
1405  solution_state_has_been_set_externally_ = false;
1406 }
1407 
1408 RowIndex RevisedSimplex::ComputeNumberOfEmptyRows() {
1409  DenseBooleanColumn contains_data(num_rows_, false);
1410  for (ColIndex col(0); col < num_cols_; ++col) {
1411  for (const SparseColumn::Entry e : compact_matrix_.column(col)) {
1412  contains_data[e.row()] = true;
1413  }
1414  }
1415  RowIndex num_empty_rows(0);
1416  for (RowIndex row(0); row < num_rows_; ++row) {
1417  if (!contains_data[row]) {
1418  ++num_empty_rows;
1419  VLOG(1) << "Row " << row << " is empty.";
1420  }
1421  }
1422  return num_empty_rows;
1423 }
1424 
1425 ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
1426  ColIndex num_empty_cols(0);
1427  for (ColIndex col(0); col < num_cols_; ++col) {
1428  if (compact_matrix_.column(col).IsEmpty()) {
1429  ++num_empty_cols;
1430  VLOG(1) << "Column " << col << " is empty.";
1431  }
1432  }
1433  return num_empty_cols;
1434 }
1435 
1436 void RevisedSimplex::CorrectErrorsOnVariableValues() {
1437  SCOPED_TIME_STAT(&function_stats_);
1438  DCHECK(basis_factorization_.IsRefactorized());
1439 
1440  // TODO(user): The primal residual error does not change if we take degenerate
1441  // steps or if we do not change the variable values. No need to recompute it
1442  // in this case.
1443  const Fractional primal_residual =
1444  variable_values_.ComputeMaximumPrimalResidual();
1445 
1446  // If the primal_residual is within the tolerance, no need to recompute
1447  // the basic variable values with a better precision.
1448  if (primal_residual >= parameters_.harris_tolerance_ratio() *
1449  parameters_.primal_feasibility_tolerance()) {
1450  variable_values_.RecomputeBasicVariableValues();
1451  VLOG(1) << "Primal infeasibility (bounds error) = "
1452  << variable_values_.ComputeMaximumPrimalInfeasibility()
1453  << ", Primal residual |A.x - b| = "
1454  << variable_values_.ComputeMaximumPrimalResidual();
1455  }
1456 }
1457 
1458 void RevisedSimplex::ComputeVariableValuesError() {
1459  SCOPED_TIME_STAT(&function_stats_);
1460  error_.AssignToZero(num_rows_);
1461  const DenseRow& variable_values = variable_values_.GetDenseRow();
1462  for (ColIndex col(0); col < num_cols_; ++col) {
1463  const Fractional value = variable_values[col];
1464  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_);
1465  }
1466 }
1467 
1468 void RevisedSimplex::ComputeDirection(ColIndex col) {
1469  SCOPED_TIME_STAT(&function_stats_);
1471  basis_factorization_.RightSolveForProblemColumn(col, &direction_);
1472  direction_infinity_norm_ = 0.0;
1473  if (direction_.non_zeros.empty()) {
1474  // We still compute the direction non-zeros because our code relies on it.
1475  for (RowIndex row(0); row < num_rows_; ++row) {
1476  const Fractional value = direction_[row];
1477  if (value != 0.0) {
1478  direction_.non_zeros.push_back(row);
1479  direction_infinity_norm_ =
1480  std::max(direction_infinity_norm_, std::abs(value));
1481  }
1482  }
1483  } else {
1484  for (const auto e : direction_) {
1485  direction_infinity_norm_ =
1486  std::max(direction_infinity_norm_, std::abs(e.coefficient()));
1487  }
1488  }
1489  IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
1490  num_rows_ == 0 ? 0.0
1491  : static_cast<double>(direction_.non_zeros.size()) /
1492  static_cast<double>(num_rows_.value())));
1493 }
1494 
1495 Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
1496  SCOPED_TIME_STAT(&function_stats_);
1497  compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
1498  for (const auto e : direction_) {
1499  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
1500  &error_);
1501  }
1502  return InfinityNorm(error_);
1503 }
1504 
1505 template <bool is_entering_reduced_cost_positive>
1506 Fractional RevisedSimplex::GetRatio(const DenseRow& lower_bounds,
1507  const DenseRow& upper_bounds,
1508  RowIndex row) const {
1509  const ColIndex col = basis_[row];
1510  const Fractional direction = direction_[row];
1511  const Fractional value = variable_values_.Get(col);
1512  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1513  DCHECK_NE(direction, 0.0);
1514  if (is_entering_reduced_cost_positive) {
1515  if (direction > 0.0) {
1516  return (upper_bounds[col] - value) / direction;
1517  } else {
1518  return (lower_bounds[col] - value) / direction;
1519  }
1520  } else {
1521  if (direction > 0.0) {
1522  return (value - lower_bounds[col]) / direction;
1523  } else {
1524  return (value - upper_bounds[col]) / direction;
1525  }
1526  }
1527 }
1528 
1529 template <bool is_entering_reduced_cost_positive>
1530 Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
1531  Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const {
1532  SCOPED_TIME_STAT(&function_stats_);
1533  const Fractional harris_tolerance =
1534  parameters_.harris_tolerance_ratio() *
1535  parameters_.primal_feasibility_tolerance();
1536  const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
1537  parameters_.primal_feasibility_tolerance();
1538 
1539  // Initially, we can skip any variable with a ratio greater than
1540  // bound_flip_ratio since it seems to be always better to choose the
1541  // bound-flip over such leaving variable.
1542  Fractional harris_ratio = bound_flip_ratio;
1543  leaving_candidates->Clear();
1544 
1545  // If the basis is refactorized, then we should have everything with a good
1546  // precision, so we only consider "acceptable" pivots. Otherwise we consider
1547  // all the entries, and if the algorithm return a pivot that is too small, we
1548  // will refactorize and recompute the relevant quantities.
1549  const Fractional threshold = basis_factorization_.IsRefactorized()
1550  ? parameters_.minimum_acceptable_pivot()
1551  : parameters_.ratio_test_zero_threshold();
1552 
1553  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1554  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1555  for (const auto e : direction_) {
1556  const Fractional magnitude = std::abs(e.coefficient());
1557  if (magnitude <= threshold) continue;
1558  const Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(
1559  lower_bounds, upper_bounds, e.row());
1560  if (ratio <= harris_ratio) {
1561  leaving_candidates->SetCoefficient(e.row(), ratio);
1562 
1563  // The second max() makes sure harris_ratio is lower bounded by a small
1564  // positive value. The more classical approach is to bound it by 0.0 but
1565  // since we will always perform a small positive step, we allow any
1566  // variable to go a bit more out of bound (even if it is past the harris
1567  // tolerance). This increase the number of candidates and allows us to
1568  // choose a more numerically stable pivot.
1569  //
1570  // Note that at least lower bounding it by 0.0 is really important on
1571  // numerically difficult problems because its helps in the choice of a
1572  // stable pivot.
1573  harris_ratio = std::min(harris_ratio,
1574  std::max(minimum_delta / magnitude,
1575  ratio + harris_tolerance / magnitude));
1576  }
1577  }
1578  return harris_ratio;
1579 }
1580 
1581 namespace {
1582 
1583 // Returns true if the candidate ratio is supposed to be more stable than the
1584 // current ratio (or if the two are equal).
1585 // The idea here is to take, by order of preference:
1586 // - the minimum positive ratio in order to intoduce a primal infeasibility
1587 // which is as small as possible.
1588 // - or the least negative one in order to have the smallest bound shift
1589 // possible on the leaving variable.
1590 bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) {
1591  if (current >= 0.0) {
1592  return candidate >= 0.0 && candidate <= current;
1593  } else {
1594  return candidate >= current;
1595  }
1596 }
1597 
1598 } // namespace
1599 
1600 // Ratio-test or Quotient-test. Choose the row of the leaving variable.
1601 // Known as CHUZR or CHUZRO in FORTRAN codes.
1602 Status RevisedSimplex::ChooseLeavingVariableRow(
1603  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1604  RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) {
1605  SCOPED_TIME_STAT(&function_stats_);
1606  GLOP_RETURN_ERROR_IF_NULL(refactorize);
1607  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1608  GLOP_RETURN_ERROR_IF_NULL(step_length);
1609  DCHECK_COL_BOUNDS(entering_col);
1610  DCHECK_NE(0.0, reduced_cost);
1611 
1612  // A few cases will cause the test to be recomputed from the beginning.
1613  int stats_num_leaving_choices = 0;
1614  equivalent_leaving_choices_.clear();
1615  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1616  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1617  while (true) {
1618  stats_num_leaving_choices = 0;
1619 
1620  // We initialize current_ratio with the maximum step the entering variable
1621  // can take (bound-flip). Note that we do not use tolerance here.
1622  const Fractional entering_value = variable_values_.Get(entering_col);
1623  Fractional current_ratio =
1624  (reduced_cost > 0.0) ? entering_value - lower_bounds[entering_col]
1625  : upper_bounds[entering_col] - entering_value;
1626  DCHECK_GT(current_ratio, 0.0);
1627 
1628  // First pass of the Harris ratio test. If 'harris_tolerance' is zero, this
1629  // actually computes the minimum leaving ratio of all the variables. This is
1630  // the same as the 'classic' ratio test.
1631  const Fractional harris_ratio =
1632  (reduced_cost > 0.0) ? ComputeHarrisRatioAndLeavingCandidates<true>(
1633  current_ratio, &leaving_candidates_)
1634  : ComputeHarrisRatioAndLeavingCandidates<false>(
1635  current_ratio, &leaving_candidates_);
1636 
1637  // If the bound-flip is a viable solution (i.e. it doesn't move the basic
1638  // variable too much out of bounds), we take it as it is always stable and
1639  // fast.
1640  if (current_ratio <= harris_ratio) {
1641  *leaving_row = kInvalidRow;
1642  *step_length = current_ratio;
1643  break;
1644  }
1645 
1646  // Second pass of the Harris ratio test. Amongst the variables with 'ratio
1647  // <= harris_ratio', we choose the leaving row with the largest coefficient.
1648  //
1649  // This has a big impact, because picking a leaving variable with a small
1650  // direction_[row] is the main source of Abnormal LU errors.
1651  Fractional pivot_magnitude = 0.0;
1652  stats_num_leaving_choices = 0;
1653  *leaving_row = kInvalidRow;
1654  equivalent_leaving_choices_.clear();
1655  for (const SparseColumn::Entry e : leaving_candidates_) {
1656  const Fractional ratio = e.coefficient();
1657  if (ratio > harris_ratio) continue;
1658  ++stats_num_leaving_choices;
1659  const RowIndex row = e.row();
1660 
1661  // If the magnitudes are the same, we choose the leaving variable with
1662  // what is probably the more stable ratio, see
1663  // IsRatioMoreOrEquallyStable().
1664  const Fractional candidate_magnitude = std::abs(direction_[row]);
1665  if (candidate_magnitude < pivot_magnitude) continue;
1666  if (candidate_magnitude == pivot_magnitude) {
1667  if (!IsRatioMoreOrEquallyStable(ratio, current_ratio)) continue;
1668  if (ratio == current_ratio) {
1669  DCHECK_NE(kInvalidRow, *leaving_row);
1670  equivalent_leaving_choices_.push_back(row);
1671  continue;
1672  }
1673  }
1674  equivalent_leaving_choices_.clear();
1675  current_ratio = ratio;
1676  pivot_magnitude = candidate_magnitude;
1677  *leaving_row = row;
1678  }
1679 
1680  // Break the ties randomly.
1681  if (!equivalent_leaving_choices_.empty()) {
1682  equivalent_leaving_choices_.push_back(*leaving_row);
1683  *leaving_row =
1684  equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1685  0, equivalent_leaving_choices_.size() - 1)(random_)];
1686  }
1687 
1688  // Since we took care of the bound-flip at the beginning, at this point
1689  // we have a valid leaving row.
1690  DCHECK_NE(kInvalidRow, *leaving_row);
1691 
1692  // A variable already outside one of its bounds +/- tolerance is considered
1693  // at its bound and its ratio is zero. Not doing this may lead to a step
1694  // that moves the objective in the wrong direction. We may want to allow
1695  // such steps, but then we will need to check that it doesn't break the
1696  // bounds of the other variables.
1697  if (current_ratio <= 0.0) {
1698  // Instead of doing a zero step, we do a small positive step. This
1699  // helps on degenerate problems.
1700  const Fractional minimum_delta =
1701  parameters_.degenerate_ministep_factor() *
1702  parameters_.primal_feasibility_tolerance();
1703  *step_length = minimum_delta / pivot_magnitude;
1704  } else {
1705  *step_length = current_ratio;
1706  }
1707 
1708  // Note(user): Testing the pivot at each iteration is useful for debugging
1709  // an LU factorization problem. Remove the false if you need to investigate
1710  // this, it makes sure that this will be compiled away.
1711  if (/* DISABLES CODE */ (false)) {
1712  TestPivot(entering_col, *leaving_row);
1713  }
1714 
1715  // We try various "heuristics" to avoid a small pivot.
1716  //
1717  // The smaller 'direction_[*leaving_row]', the less precise
1718  // it is. So we want to avoid pivoting by such a row. Small pivots lead to
1719  // ill-conditioned bases or even to matrices that are not a basis at all if
1720  // the actual (infinite-precision) coefficient is zero.
1721  //
1722  // TODO(user): We may have to choose another entering column if
1723  // we cannot prevent pivoting by a small pivot.
1724  // (Chvatal, p.115, about epsilon2.)
1725  if (pivot_magnitude <
1726  parameters_.small_pivot_threshold() * direction_infinity_norm_) {
1727  // The first countermeasure is to recompute everything to the best
1728  // precision we can in the hope of avoiding such a choice. Note that this
1729  // helps a lot on the Netlib problems.
1730  if (!basis_factorization_.IsRefactorized()) {
1731  VLOG(1) << "Refactorizing to avoid pivoting by "
1732  << direction_[*leaving_row]
1733  << " direction_infinity_norm_ = " << direction_infinity_norm_
1734  << " reduced cost = " << reduced_cost;
1735  *refactorize = true;
1736  return Status::OK();
1737  }
1738 
1739  // Because of the "threshold" in ComputeHarrisRatioAndLeavingCandidates()
1740  // we kwnow that this pivot will still have an acceptable magnitude.
1741  //
1742  // TODO(user): An issue left to fix is that if there is no such pivot at
1743  // all, then we will report unbounded even if this is not really the case.
1744  // As of 2018/07/18, this happens on l30.mps.
1745  VLOG(1) << "Couldn't avoid pivoting by " << direction_[*leaving_row]
1746  << " direction_infinity_norm_ = " << direction_infinity_norm_
1747  << " reduced cost = " << reduced_cost;
1748  DCHECK_GE(std::abs(direction_[*leaving_row]),
1749  parameters_.minimum_acceptable_pivot());
1750  IF_STATS_ENABLED(ratio_test_stats_.abs_tested_pivot.Add(pivot_magnitude));
1751  }
1752  break;
1753  }
1754 
1755  // Update the target bound.
1756  if (*leaving_row != kInvalidRow) {
1757  const bool is_reduced_cost_positive = (reduced_cost > 0.0);
1758  const bool is_leaving_coeff_positive = (direction_[*leaving_row] > 0.0);
1759  *target_bound = (is_reduced_cost_positive == is_leaving_coeff_positive)
1760  ? upper_bounds[basis_[*leaving_row]]
1761  : lower_bounds[basis_[*leaving_row]];
1762  }
1763 
1764  // Stats.
1766  ratio_test_stats_.leaving_choices.Add(stats_num_leaving_choices);
1767  if (!equivalent_leaving_choices_.empty()) {
1768  ratio_test_stats_.num_perfect_ties.Add(
1769  equivalent_leaving_choices_.size());
1770  }
1771  if (*leaving_row != kInvalidRow) {
1772  ratio_test_stats_.abs_used_pivot.Add(std::abs(direction_[*leaving_row]));
1773  }
1774  });
1775  return Status::OK();
1776 }
1777 
1778 namespace {
1779 
1780 // Store a row with its ratio, coefficient magnitude and target bound. This is
1781 // used by PrimalPhaseIChooseLeavingVariableRow(), see this function for more
1782 // details.
1783 struct BreakPoint {
1784  BreakPoint(RowIndex _row, Fractional _ratio, Fractional _coeff_magnitude,
1785  Fractional _target_bound)
1786  : row(_row),
1787  ratio(_ratio),
1788  coeff_magnitude(_coeff_magnitude),
1789  target_bound(_target_bound) {}
1790 
1791  // We want to process the breakpoints by increasing ratio and decreasing
1792  // coefficient magnitude (if the ratios are the same). Returns false if "this"
1793  // is before "other" in a priority queue.
1794  bool operator<(const BreakPoint& other) const {
1795  if (ratio == other.ratio) {
1796  if (coeff_magnitude == other.coeff_magnitude) {
1797  return row > other.row;
1798  }
1799  return coeff_magnitude < other.coeff_magnitude;
1800  }
1801  return ratio > other.ratio;
1802  }
1803 
1804  RowIndex row;
1808 };
1809 
1810 } // namespace
1811 
1812 void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
1813  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1814  RowIndex* leaving_row, Fractional* step_length,
1815  Fractional* target_bound) const {
1816  SCOPED_TIME_STAT(&function_stats_);
1817  RETURN_IF_NULL(refactorize);
1818  RETURN_IF_NULL(leaving_row);
1819  RETURN_IF_NULL(step_length);
1820  DCHECK_COL_BOUNDS(entering_col);
1821  DCHECK_NE(0.0, reduced_cost);
1822  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1823  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1824 
1825  // We initialize current_ratio with the maximum step the entering variable
1826  // can take (bound-flip). Note that we do not use tolerance here.
1827  const Fractional entering_value = variable_values_.Get(entering_col);
1828  Fractional current_ratio = (reduced_cost > 0.0)
1829  ? entering_value - lower_bounds[entering_col]
1830  : upper_bounds[entering_col] - entering_value;
1831  DCHECK_GT(current_ratio, 0.0);
1832 
1833  std::vector<BreakPoint> breakpoints;
1834  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1835  for (const auto e : direction_) {
1836  const Fractional direction =
1837  reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
1838  const Fractional magnitude = std::abs(direction);
1839  if (magnitude < tolerance) continue;
1840 
1841  // Computes by how much we can add 'direction' to the basic variable value
1842  // with index 'row' until it changes of primal feasibility status. That is
1843  // from infeasible to feasible or from feasible to infeasible. Note that the
1844  // transition infeasible->feasible->infeasible is possible. We use
1845  // tolerances here, but when the step will be performed, it will move the
1846  // variable to the target bound (possibly taking a small negative step).
1847  //
1848  // Note(user): The negative step will only happen when the leaving variable
1849  // was slightly infeasible (less than tolerance). Moreover, the overall
1850  // infeasibility will not necessarily increase since it doesn't take into
1851  // account all the variables with an infeasibility smaller than the
1852  // tolerance, and here we will at least improve the one of the leaving
1853  // variable.
1854  const ColIndex col = basis_[e.row()];
1855  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1856 
1857  const Fractional value = variable_values_.Get(col);
1860  const Fractional to_lower = (lower_bound - tolerance - value) / direction;
1861  const Fractional to_upper = (upper_bound + tolerance - value) / direction;
1862 
1863  // Enqueue the possible transitions. Note that the second tests exclude the
1864  // case where to_lower or to_upper are infinite.
1865  if (to_lower >= 0.0 && to_lower < current_ratio) {
1866  breakpoints.push_back(
1867  BreakPoint(e.row(), to_lower, magnitude, lower_bound));
1868  }
1869  if (to_upper >= 0.0 && to_upper < current_ratio) {
1870  breakpoints.push_back(
1871  BreakPoint(e.row(), to_upper, magnitude, upper_bound));
1872  }
1873  }
1874 
1875  // Order the breakpoints by increasing ratio and decreasing coefficient
1876  // magnitude (if the ratios are the same).
1877  std::make_heap(breakpoints.begin(), breakpoints.end());
1878 
1879  // Select the last breakpoint that still improves the infeasibility and has
1880  // the largest coefficient magnitude.
1881  Fractional improvement = std::abs(reduced_cost);
1882  Fractional best_magnitude = 0.0;
1883  *leaving_row = kInvalidRow;
1884  while (!breakpoints.empty()) {
1885  const BreakPoint top = breakpoints.front();
1886  // TODO(user): consider using >= here. That will lead to bigger ratio and
1887  // hence a better impact on the infeasibility. The drawback is that more
1888  // effort may be needed to update the reduced costs.
1889  //
1890  // TODO(user): Use a random tie breaking strategy for BreakPoint with
1891  // same ratio and same coefficient magnitude? Koberstein explains in his PhD
1892  // that it helped on the dual-simplex.
1893  if (top.coeff_magnitude > best_magnitude) {
1894  *leaving_row = top.row;
1895  current_ratio = top.ratio;
1896  best_magnitude = top.coeff_magnitude;
1897  *target_bound = top.target_bound;
1898  }
1899 
1900  // As long as the sum of primal infeasibilities is decreasing, we look for
1901  // pivots that are numerically more stable.
1902  improvement -= top.coeff_magnitude;
1903  if (improvement <= 0.0) break;
1904  std::pop_heap(breakpoints.begin(), breakpoints.end());
1905  breakpoints.pop_back();
1906  }
1907 
1908  // Try to avoid a small pivot by refactorizing.
1909  if (*leaving_row != kInvalidRow) {
1910  const Fractional threshold =
1911  parameters_.small_pivot_threshold() * direction_infinity_norm_;
1912  if (best_magnitude < threshold && !basis_factorization_.IsRefactorized()) {
1913  *refactorize = true;
1914  return;
1915  }
1916  }
1917  *step_length = current_ratio;
1918 }
1919 
1920 // This implements the pricing step for the dual simplex.
1921 Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row,
1922  Fractional* cost_variation,
1924  SCOPED_TIME_STAT(&function_stats_);
1925  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1926  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
1928 
1929  // This is not supposed to happen, but better be safe.
1930  if (dual_prices_.Size() == 0) {
1931  variable_values_.RecomputeDualPrices();
1932  }
1933 
1934  // Return right away if there is no leaving variable.
1935  // Fill cost_variation and target_bound otherwise.
1936  *leaving_row = dual_prices_.GetMaximum();
1937  if (*leaving_row == kInvalidRow) return Status::OK();
1938 
1939  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1940  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1941  const ColIndex leaving_col = basis_[*leaving_row];
1942  const Fractional value = variable_values_.Get(leaving_col);
1943  if (value < lower_bounds[leaving_col]) {
1944  *cost_variation = lower_bounds[leaving_col] - value;
1945  *target_bound = lower_bounds[leaving_col];
1946  DCHECK_GT(*cost_variation, 0.0);
1947  } else {
1948  *cost_variation = upper_bounds[leaving_col] - value;
1949  *target_bound = upper_bounds[leaving_col];
1950  DCHECK_LT(*cost_variation, 0.0);
1951  }
1952  return Status::OK();
1953 }
1954 
1955 namespace {
1956 
1957 // Returns true if a basic variable with given cost and type is to be considered
1958 // as a leaving candidate for the dual phase I.
1959 bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type,
1960  Fractional threshold) {
1961  if (cost == 0.0) return false;
1962  return type == VariableType::UPPER_AND_LOWER_BOUNDED ||
1963  type == VariableType::FIXED_VARIABLE ||
1964  (type == VariableType::UPPER_BOUNDED && cost < -threshold) ||
1965  (type == VariableType::LOWER_BOUNDED && cost > threshold);
1966 }
1967 
1968 } // namespace
1969 
1970 // Important: The norm should be updated before this is called.
1971 template <bool use_dense_update>
1972 void RevisedSimplex::OnDualPriceChange(const DenseColumn& squared_norm,
1973  RowIndex row, VariableType type,
1974  Fractional threshold) {
1975  const Fractional price = dual_pricing_vector_[row];
1976  const bool is_candidate =
1977  IsDualPhaseILeavingCandidate(price, type, threshold);
1978  if (is_candidate) {
1979  if (use_dense_update) {
1980  dual_prices_.DenseAddOrUpdate(row, Square(price) / squared_norm[row]);
1981  } else {
1982  dual_prices_.AddOrUpdate(row, Square(price) / squared_norm[row]);
1983  }
1984  } else {
1985  dual_prices_.Remove(row);
1986  }
1987 }
1988 
1989 void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
1990  ColIndex entering_col) {
1991  SCOPED_TIME_STAT(&function_stats_);
1992  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
1993  const Fractional threshold = parameters_.ratio_test_zero_threshold();
1994 
1995  // Note that because the norm are also updated only on the position of the
1996  // direction, scaled_dual_pricing_vector_ will be up to date.
1997  const DenseColumn& squared_norms = dual_edge_norms_.GetEdgeSquaredNorms();
1998 
1999  // Convert the dual_pricing_vector_ from the old basis into the new one (which
2000  // is the same as multiplying it by an Eta matrix corresponding to the
2001  // direction).
2002  const Fractional step =
2003  dual_pricing_vector_[leaving_row] / direction_[leaving_row];
2004  for (const auto e : direction_) {
2005  dual_pricing_vector_[e.row()] -= e.coefficient() * step;
2006  OnDualPriceChange(squared_norms, e.row(), variable_type[basis_[e.row()]],
2007  threshold);
2008  }
2009  dual_pricing_vector_[leaving_row] = step;
2010 
2011  // The entering_col which was dual-infeasible is now dual-feasible, so we
2012  // have to remove it from the infeasibility sum.
2013  dual_pricing_vector_[leaving_row] -=
2014  dual_infeasibility_improvement_direction_[entering_col];
2015  if (dual_infeasibility_improvement_direction_[entering_col] != 0.0) {
2016  --num_dual_infeasible_positions_;
2017  }
2018  dual_infeasibility_improvement_direction_[entering_col] = 0.0;
2019 
2020  // The leaving variable will also be dual-feasible.
2021  dual_infeasibility_improvement_direction_[basis_[leaving_row]] = 0.0;
2022 
2023  // Update the leaving row entering candidate status.
2024  OnDualPriceChange(squared_norms, leaving_row, variable_type[entering_col],
2025  threshold);
2026 }
2027 
2028 template <typename Cols>
2029 void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
2030  const Cols& cols) {
2031  SCOPED_TIME_STAT(&function_stats_);
2032  bool something_to_do = false;
2033  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
2034  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
2035  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2036  const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance();
2037  for (ColIndex col : cols) {
2038  const Fractional reduced_cost = reduced_costs[col];
2039  const Fractional sign =
2040  (can_increase.IsSet(col) && reduced_cost < -tolerance) ? 1.0
2041  : (can_decrease.IsSet(col) && reduced_cost > tolerance) ? -1.0
2042  : 0.0;
2043  if (sign != dual_infeasibility_improvement_direction_[col]) {
2044  if (sign == 0.0) {
2045  --num_dual_infeasible_positions_;
2046  } else if (dual_infeasibility_improvement_direction_[col] == 0.0) {
2047  ++num_dual_infeasible_positions_;
2048  }
2049  if (!something_to_do) {
2050  initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
2051  initially_all_zero_scratchpad_.ClearSparseMask();
2052  initially_all_zero_scratchpad_.non_zeros.clear();
2053  something_to_do = true;
2054  }
2055 
2056  // We add a factor 10 because of the scattered access.
2057  num_update_price_operations_ +=
2058  10 * compact_matrix_.column(col).num_entries().value();
2060  col, sign - dual_infeasibility_improvement_direction_[col],
2061  &initially_all_zero_scratchpad_);
2062  dual_infeasibility_improvement_direction_[col] = sign;
2063  }
2064  }
2065  if (something_to_do) {
2066  initially_all_zero_scratchpad_.ClearNonZerosIfTooDense();
2067  initially_all_zero_scratchpad_.ClearSparseMask();
2068  const DenseColumn& squared_norms = dual_edge_norms_.GetEdgeSquaredNorms();
2069 
2070  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2071  const Fractional threshold = parameters_.ratio_test_zero_threshold();
2072  basis_factorization_.RightSolve(&initially_all_zero_scratchpad_);
2073  if (initially_all_zero_scratchpad_.non_zeros.empty()) {
2074  dual_prices_.StartDenseUpdates();
2075  for (RowIndex row(0); row < num_rows_; ++row) {
2076  if (initially_all_zero_scratchpad_[row] == 0.0) continue;
2077  dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
2078  OnDualPriceChange</*use_dense_update=*/true>(
2079  squared_norms, row, variable_type[basis_[row]], threshold);
2080  }
2081  initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
2082  } else {
2083  for (const auto e : initially_all_zero_scratchpad_) {
2084  dual_pricing_vector_[e.row()] += e.coefficient();
2085  OnDualPriceChange(squared_norms, e.row(),
2086  variable_type[basis_[e.row()]], threshold);
2087  initially_all_zero_scratchpad_[e.row()] = 0.0;
2088  }
2089  }
2090  initially_all_zero_scratchpad_.non_zeros.clear();
2091  }
2092 }
2093 
2094 Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow(
2095  RowIndex* leaving_row, Fractional* cost_variation,
2097  SCOPED_TIME_STAT(&function_stats_);
2098  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2099  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2100  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2101  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2102 
2103  // dual_infeasibility_improvement_direction_ is zero for dual-feasible
2104  // positions and contains the sign in which the reduced cost of this column
2105  // needs to move to improve the feasibility otherwise (+1 or -1).
2106  //
2107  // Its current value was the one used to compute dual_pricing_vector_ and
2108  // was updated accordingly by DualPhaseIUpdatePrice().
2109  //
2110  // If more variables changed of dual-feasibility status during the last
2111  // iteration, we need to call DualPhaseIUpdatePriceOnReducedCostChange() to
2112  // take them into account.
2113  if (reduced_costs_.AreReducedCostsRecomputed() ||
2114  dual_edge_norms_.NeedsBasisRefactorization() ||
2115  dual_pricing_vector_.empty()) {
2116  // Recompute everything from scratch.
2117  num_dual_infeasible_positions_ = 0;
2118  dual_pricing_vector_.AssignToZero(num_rows_);
2119  dual_prices_.ClearAndResize(num_rows_);
2120  dual_infeasibility_improvement_direction_.AssignToZero(num_cols_);
2121  DualPhaseIUpdatePriceOnReducedCostChange(
2122  variables_info_.GetIsRelevantBitRow());
2123  } else {
2124  // Update row is still equal to the row used during the last iteration
2125  // to update the reduced costs.
2126  DualPhaseIUpdatePriceOnReducedCostChange(update_row_.GetNonZeroPositions());
2127  }
2128 
2129  // If there is no dual-infeasible position, we are done.
2130  *leaving_row = kInvalidRow;
2131  if (num_dual_infeasible_positions_ == 0) return Status::OK();
2132 
2133  *leaving_row = dual_prices_.GetMaximum();
2134 
2135  // Returns right away if there is no leaving variable or fill the other
2136  // return values otherwise.
2137  if (*leaving_row == kInvalidRow) return Status::OK();
2138  *cost_variation = dual_pricing_vector_[*leaving_row];
2139  const ColIndex leaving_col = basis_[*leaving_row];
2140  if (*cost_variation < 0.0) {
2141  *target_bound = upper_bounds[leaving_col];
2142  } else {
2143  *target_bound = lower_bounds[leaving_col];
2144  }
2146  return Status::OK();
2147 }
2148 
2149 template <typename BoxedVariableCols>
2150 void RevisedSimplex::MakeBoxedVariableDualFeasible(
2151  const BoxedVariableCols& cols, bool update_basic_values) {
2152  SCOPED_TIME_STAT(&function_stats_);
2153  std::vector<ColIndex> changed_cols;
2154 
2155  // It is important to flip bounds within a tolerance because of precision
2156  // errors. Otherwise, this leads to cycling on many of the Netlib problems
2157  // since this is called at each iteration (because of the bound-flipping ratio
2158  // test).
2159  const DenseRow& variable_values = variable_values_.GetDenseRow();
2160  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2161  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2162  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2163  const Fractional dual_feasibility_tolerance =
2164  reduced_costs_.GetDualFeasibilityTolerance();
2165  const VariableStatusRow& variable_status = variables_info_.GetStatusRow();
2166  for (const ColIndex col : cols) {
2167  const Fractional reduced_cost = reduced_costs[col];
2168  const VariableStatus status = variable_status[col];
2169  DCHECK(variables_info_.GetTypeRow()[col] ==
2171  // TODO(user): refactor this as DCHECK(IsVariableBasicOrExactlyAtBound())?
2172  DCHECK(variable_values[col] == lower_bounds[col] ||
2173  variable_values[col] == upper_bounds[col] ||
2174  status == VariableStatus::BASIC);
2175  if (reduced_cost > dual_feasibility_tolerance &&
2176  status == VariableStatus::AT_UPPER_BOUND) {
2177  variables_info_.UpdateToNonBasicStatus(col,
2179  changed_cols.push_back(col);
2180  } else if (reduced_cost < -dual_feasibility_tolerance &&
2181  status == VariableStatus::AT_LOWER_BOUND) {
2182  variables_info_.UpdateToNonBasicStatus(col,
2184  changed_cols.push_back(col);
2185  }
2186  }
2187 
2188  if (!changed_cols.empty()) {
2189  iteration_stats_.num_dual_flips.Add(changed_cols.size());
2190  variable_values_.UpdateGivenNonBasicVariables(changed_cols,
2191  update_basic_values);
2192  }
2193 }
2194 
2195 Fractional RevisedSimplex::ComputeStepToMoveBasicVariableToBound(
2196  RowIndex leaving_row, Fractional target_bound) {
2197  SCOPED_TIME_STAT(&function_stats_);
2198 
2199  // We just want the leaving variable to go to its target_bound.
2200  const ColIndex leaving_col = basis_[leaving_row];
2201  const Fractional leaving_variable_value = variable_values_.Get(leaving_col);
2202  Fractional unscaled_step = leaving_variable_value - target_bound;
2203 
2204  // In Chvatal p 157 update_[entering_col] is used instead of
2205  // direction_[leaving_row], but the two quantities are actually the
2206  // same. This is because update_[col] is the value at leaving_row of
2207  // the right inverse of col and direction_ is the right inverse of the
2208  // entering_col. Note that direction_[leaving_row] is probably more
2209  // precise.
2210  // TODO(user): use this to check precision and trigger recomputation.
2211  return unscaled_step / direction_[leaving_row];
2212 }
2213 
2214 bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
2215  VLOG(1) << "Test pivot.";
2216  SCOPED_TIME_STAT(&function_stats_);
2217  const ColIndex leaving_col = basis_[leaving_row];
2218  basis_[leaving_row] = entering_col;
2219 
2220  // TODO(user): If 'is_ok' is true, we could use the computed lu in
2221  // basis_factorization_ rather than recompute it during UpdateAndPivot().
2222  CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
2223  const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
2224  basis_[leaving_row] = leaving_col;
2225  return is_ok;
2226 }
2227 
2228 // Note that this function is an optimization and that if it was doing nothing
2229 // the algorithm will still be correct and work. Using it does change the pivot
2230 // taken during the simplex method though.
2231 void RevisedSimplex::PermuteBasis() {
2232  SCOPED_TIME_STAT(&function_stats_);
2233 
2234  // Fetch the current basis column permutation and return if it is empty which
2235  // means the permutation is the identity.
2236  const ColumnPermutation& col_perm =
2237  basis_factorization_.GetColumnPermutation();
2238  if (col_perm.empty()) return;
2239 
2240  // Permute basis_.
2241  ApplyColumnPermutationToRowIndexedVector(col_perm, &basis_);
2242 
2243  // Permute dual_pricing_vector_ if needed.
2244  if (!dual_pricing_vector_.empty()) {
2245  // TODO(user): We need to permute dual_prices_ too now, we recompute
2246  // everything one each basis factorization, so this don't matter.
2247  ApplyColumnPermutationToRowIndexedVector(col_perm, &dual_pricing_vector_);
2248  }
2249 
2250  // Notify the other classes.
2251  reduced_costs_.UpdateDataOnBasisPermutation();
2252  dual_edge_norms_.UpdateDataOnBasisPermutation(col_perm);
2253 
2254  // Finally, remove the column permutation from all subsequent solves since
2255  // it has been taken into account in basis_.
2256  basis_factorization_.SetColumnPermutationToIdentity();
2257 }
2258 
2259 Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
2260  RowIndex leaving_row,
2262  SCOPED_TIME_STAT(&function_stats_);
2263  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2264  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2265  const ColIndex leaving_col = basis_[leaving_row];
2266  const VariableStatus leaving_variable_status =
2267  lower_bounds[leaving_col] == upper_bounds[leaving_col]
2269  : target_bound == lower_bounds[leaving_col]
2272  if (variable_values_.Get(leaving_col) != target_bound) {
2273  ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) -
2274  target_bound);
2275  }
2276  UpdateBasis(entering_col, leaving_row, leaving_variable_status);
2277 
2278  const Fractional pivot_from_direction = direction_[leaving_row];
2279  const Fractional pivot_from_update_row =
2280  update_row_.GetCoefficient(entering_col);
2281  const Fractional diff =
2282  std::abs(pivot_from_update_row - pivot_from_direction);
2283  if (diff > parameters_.refactorization_threshold() *
2284  (1 + std::abs(pivot_from_direction))) {
2285  VLOG(1) << "Refactorizing: imprecise pivot " << pivot_from_direction
2286  << " diff = " << diff;
2287  GLOP_RETURN_IF_ERROR(basis_factorization_.ForceRefactorization());
2288  } else {
2290  basis_factorization_.Update(entering_col, leaving_row, direction_));
2291  }
2292  if (basis_factorization_.IsRefactorized()) {
2293  PermuteBasis();
2294  }
2295  return Status::OK();
2296 }
2297 
2298 bool RevisedSimplex::NeedsBasisRefactorization(bool refactorize) {
2299  if (basis_factorization_.IsRefactorized()) return false;
2300  if (reduced_costs_.NeedsBasisRefactorization()) return true;
2301  const GlopParameters::PricingRule pricing_rule =
2302  feasibility_phase_ ? parameters_.feasibility_rule()
2303  : parameters_.optimization_rule();
2304  if (parameters_.use_dual_simplex()) {
2305  // TODO(user): Currently the dual is always using STEEPEST_EDGE.
2306  DCHECK_EQ(pricing_rule, GlopParameters::STEEPEST_EDGE);
2307  if (dual_edge_norms_.NeedsBasisRefactorization()) return true;
2308  } else {
2309  if (pricing_rule == GlopParameters::STEEPEST_EDGE &&
2310  primal_edge_norms_.NeedsBasisRefactorization()) {
2311  return true;
2312  }
2313  }
2314  return refactorize;
2315 }
2316 
2317 Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
2318  SCOPED_TIME_STAT(&function_stats_);
2319  if (NeedsBasisRefactorization(*refactorize)) {
2320  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
2321  update_row_.Invalidate();
2322  PermuteBasis();
2323  }
2324  *refactorize = false;
2325  return Status::OK();
2326 }
2327 
2329  if (col >= integrality_scale_.size()) {
2330  integrality_scale_.resize(col + 1, 0.0);
2331  }
2332  integrality_scale_[col] = scale;
2333 }
2334 
2335 Status RevisedSimplex::Polish(TimeLimit* time_limit) {
2337  Cleanup update_deterministic_time_on_return(
2338  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2339 
2340  // Get all non-basic variables with a reduced costs close to zero.
2341  // Note that because we only choose entering candidate with a cost of zero,
2342  // this set will not change (modulo epsilons).
2343  const DenseRow& rc = reduced_costs_.GetReducedCosts();
2344  std::vector<ColIndex> candidates;
2345  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
2346  if (!variables_info_.GetIsRelevantBitRow()[col]) continue;
2347  if (std::abs(rc[col]) < 1e-9) candidates.push_back(col);
2348  }
2349 
2350  bool refactorize = false;
2351  int num_pivots = 0;
2352  Fractional total_gain = 0.0;
2353  for (int i = 0; i < 10; ++i) {
2354  AdvanceDeterministicTime(time_limit);
2355  if (time_limit->LimitReached()) break;
2356  if (num_pivots >= 5) break;
2357  if (candidates.empty()) break;
2358 
2359  // Pick a random one and remove it from the list.
2360  const int index =
2361  std::uniform_int_distribution<int>(0, candidates.size() - 1)(random_);
2362  const ColIndex entering_col = candidates[index];
2363  std::swap(candidates[index], candidates.back());
2364  candidates.pop_back();
2365 
2366  // We need the entering variable to move in the correct direction.
2367  Fractional fake_rc = 1.0;
2368  if (!variables_info_.GetCanDecreaseBitRow()[entering_col]) {
2369  CHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
2370  fake_rc = -1.0;
2371  }
2372 
2373  // Compute the direction and by how much we can move along it.
2374  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2375  ComputeDirection(entering_col);
2376  Fractional step_length;
2377  RowIndex leaving_row;
2379  bool local_refactorize = false;
2381  ChooseLeavingVariableRow(entering_col, fake_rc, &local_refactorize,
2382  &leaving_row, &step_length, &target_bound));
2383 
2384  if (local_refactorize) continue;
2385  if (step_length == kInfinity || step_length == -kInfinity) continue;
2386  if (std::abs(step_length) <= 1e-6) continue;
2387  if (leaving_row != kInvalidRow && std::abs(direction_[leaving_row]) < 0.1) {
2388  continue;
2389  }
2390  const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
2391 
2392  // Evaluate if pivot reduce the fractionality of the basis.
2393  //
2394  // TODO(user): Count with more weight variable with a small domain, i.e.
2395  // binary variable, compared to a variable in [0, 1k] ?
2396  const auto get_diff = [this](ColIndex col, Fractional old_value,
2397  Fractional new_value) {
2398  if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) {
2399  return 0.0;
2400  }
2401  const Fractional s = integrality_scale_[col];
2402  return (std::abs(new_value * s - std::round(new_value * s)) -
2403  std::abs(old_value * s - std::round(old_value * s)));
2404  };
2405  Fractional diff = get_diff(entering_col, variable_values_.Get(entering_col),
2406  variable_values_.Get(entering_col) + step);
2407  for (const auto e : direction_) {
2408  const ColIndex col = basis_[e.row()];
2409  const Fractional old_value = variable_values_.Get(col);
2410  const Fractional new_value = old_value - e.coefficient() * step;
2411  diff += get_diff(col, old_value, new_value);
2412  }
2413 
2414  // Ignore low decrease in integrality.
2415  if (diff > -1e-2) continue;
2416  total_gain -= diff;
2417 
2418  // We perform the change.
2419  num_pivots++;
2420  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2421 
2422  // This is a bound flip of the entering column.
2423  if (leaving_row == kInvalidRow) {
2424  if (step > 0.0) {
2425  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2427  } else if (step < 0.0) {
2428  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2430  }
2431  continue;
2432  }
2433 
2434  // Perform the pivot.
2435  const ColIndex leaving_col = basis_[leaving_row];
2436  update_row_.ComputeUpdateRow(leaving_row);
2437  primal_edge_norms_.UpdateBeforeBasisPivot(
2438  entering_col, leaving_col, leaving_row, direction_, &update_row_);
2439  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2440  &update_row_);
2441 
2442  const Fractional dir = -direction_[leaving_row] * step;
2443  const bool is_degenerate =
2444  (dir == 0.0) ||
2445  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2446  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2447  if (!is_degenerate) {
2448  variable_values_.Set(leaving_col, target_bound);
2449  }
2451  UpdateAndPivot(entering_col, leaving_row, target_bound));
2452  }
2453 
2454  VLOG(1) << "Polish num_pivots: " << num_pivots << " gain:" << total_gain;
2455  return Status::OK();
2456 }
2457 
2458 // Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
2459 // x an n-vector.
2460 //
2461 // x is split in two parts x_B and x_N (B standing for basis).
2462 // In the same way, A is split in A_B (also known as B) and A_N, and
2463 // c is split into c_B and c_N.
2464 //
2465 // The goal is to minimize c_B.x_B + c_N.x_N
2466 // subject to B.x_B + A_N.x_N = 0
2467 // and x_lower <= x <= x_upper.
2468 //
2469 // To minimize c.x, at each iteration a variable from x_N is selected to
2470 // enter the basis, and a variable from x_B is selected to leave the basis.
2471 // To avoid explicit inversion of B, the algorithm solves two sub-systems:
2472 // y.B = c_B and B.d = a (a being the entering column).
2473 Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
2475  Cleanup update_deterministic_time_on_return(
2476  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2477  num_consecutive_degenerate_iterations_ = 0;
2478  DisplayIterationInfo();
2479  bool refactorize = false;
2480 
2481  // At this point, we are not sure the prices are always up to date, so
2482  // lets always reset them for the first iteration below.
2483  primal_prices_.ForceRecomputation();
2484 
2485  if (feasibility_phase_) {
2486  // Initialize the primal phase-I objective.
2487  // Note that this temporarily erases the problem objective.
2488  objective_.AssignToZero(num_cols_);
2489  variable_values_.UpdatePrimalPhaseICosts(
2490  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_), &objective_);
2491  reduced_costs_.ResetForNewObjective();
2492  }
2493 
2494  while (true) {
2495  // TODO(user): we may loop a bit more than the actual number of iteration.
2496  // fix.
2498  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2499  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2500  if (basis_factorization_.IsRefactorized()) {
2501  CorrectErrorsOnVariableValues();
2502  DisplayIterationInfo();
2503 
2504  if (feasibility_phase_) {
2505  // Since the variable values may have been recomputed, we need to
2506  // recompute the primal infeasible variables and update their costs.
2507  if (variable_values_.UpdatePrimalPhaseICosts(
2508  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_),
2509  &objective_)) {
2510  reduced_costs_.ResetForNewObjective();
2511  }
2512  }
2513 
2514  // Computing the objective at each iteration takes time, so we just
2515  // check the limit when the basis is refactorized.
2516  if (!feasibility_phase_ &&
2517  ComputeObjectiveValue() < primal_objective_limit_) {
2518  VLOG(1) << "Stopping the primal simplex because"
2519  << " the objective limit " << primal_objective_limit_
2520  << " has been reached.";
2521  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2522  objective_limit_reached_ = true;
2523  return Status::OK();
2524  }
2525  } else if (feasibility_phase_) {
2526  // Note that direction_.non_zeros contains the positions of the basic
2527  // variables whose values were updated during the last iteration.
2528  if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
2529  &objective_)) {
2530  reduced_costs_.ResetForNewObjective();
2531  }
2532  }
2533 
2534  Fractional reduced_cost = 0.0;
2535  const ColIndex entering_col = primal_prices_.GetBestEnteringColumn();
2536  if (entering_col == kInvalidCol) {
2537  if (reduced_costs_.AreReducedCostsPrecise() &&
2538  basis_factorization_.IsRefactorized()) {
2539  if (feasibility_phase_) {
2540  const Fractional primal_infeasibility =
2541  variable_values_.ComputeMaximumPrimalInfeasibility();
2542  if (primal_infeasibility <
2543  parameters_.primal_feasibility_tolerance()) {
2544  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2545  } else {
2546  VLOG(1) << "Infeasible problem! infeasibility = "
2547  << primal_infeasibility;
2548  problem_status_ = ProblemStatus::PRIMAL_INFEASIBLE;
2549  }
2550  } else {
2551  problem_status_ = ProblemStatus::OPTIMAL;
2552  }
2553  break;
2554  } else {
2555  VLOG(1) << "Optimal reached, double checking...";
2556  reduced_costs_.MakeReducedCostsPrecise();
2557  refactorize = true;
2558  continue;
2559  }
2560  } else {
2561  reduced_cost = reduced_costs_.GetReducedCosts()[entering_col];
2562  DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
2563 
2564  // Solve the system B.d = a with a the entering column.
2565  ComputeDirection(entering_col);
2566  primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col,
2567  direction_);
2568  if (!reduced_costs_.TestEnteringReducedCostPrecision(
2569  entering_col, direction_, &reduced_cost)) {
2570  primal_prices_.RecomputePriceAt(entering_col);
2571  VLOG(1) << "Skipping col #" << entering_col << " whose reduced cost is "
2572  << reduced_cost;
2573  continue;
2574  }
2575  primal_prices_.RecomputePriceAt(entering_col);
2576  }
2577 
2578  // This test takes place after the check for optimality/feasibility because
2579  // when running with 0 iterations, we still want to report
2580  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2581  // case at the beginning of the algorithm.
2582  AdvanceDeterministicTime(time_limit);
2583  if (num_iterations_ == parameters_.max_number_of_iterations() ||
2584  time_limit->LimitReached()) {
2585  break;
2586  }
2587 
2588  Fractional step_length;
2589  RowIndex leaving_row;
2591  if (feasibility_phase_) {
2592  PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
2593  &refactorize, &leaving_row,
2594  &step_length, &target_bound);
2595  } else {
2597  ChooseLeavingVariableRow(entering_col, reduced_cost, &refactorize,
2598  &leaving_row, &step_length, &target_bound));
2599  }
2600  if (refactorize) continue;
2601 
2602  if (step_length == kInfinity || step_length == -kInfinity) {
2603  if (!basis_factorization_.IsRefactorized() ||
2604  !reduced_costs_.AreReducedCostsPrecise()) {
2605  VLOG(1) << "Infinite step length, double checking...";
2606  reduced_costs_.MakeReducedCostsPrecise();
2607  continue;
2608  }
2609  if (feasibility_phase_) {
2610  // This shouldn't happen by construction.
2611  VLOG(1) << "Unbounded feasibility problem !?";
2612  problem_status_ = ProblemStatus::ABNORMAL;
2613  } else {
2614  VLOG(1) << "Unbounded problem.";
2615  problem_status_ = ProblemStatus::PRIMAL_UNBOUNDED;
2616  solution_primal_ray_.AssignToZero(num_cols_);
2617  for (RowIndex row(0); row < num_rows_; ++row) {
2618  const ColIndex col = basis_[row];
2619  solution_primal_ray_[col] = -direction_[row];
2620  }
2621  solution_primal_ray_[entering_col] = 1.0;
2622  if (step_length == -kInfinity) {
2623  ChangeSign(&solution_primal_ray_);
2624  }
2625  }
2626  break;
2627  }
2628 
2629  Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
2630  if (feasibility_phase_ && leaving_row != kInvalidRow) {
2631  // For phase-I we currently always set the leaving variable to its exact
2632  // bound even if by doing so we may take a small step in the wrong
2633  // direction and may increase the overall infeasibility.
2634  //
2635  // TODO(user): Investigate alternatives even if this seems to work well in
2636  // practice. Note that the final returned solution will have the property
2637  // that all non-basic variables are at their exact bound, so it is nice
2638  // that we do not report ProblemStatus::PRIMAL_FEASIBLE if a solution with
2639  // this property cannot be found.
2640  step = ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2641  }
2642 
2643  // Store the leaving_col before basis_ change.
2644  const ColIndex leaving_col =
2645  (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
2646 
2647  // An iteration is called 'degenerate' if the leaving variable is already
2648  // primal-infeasible and we make it even more infeasible or if we do a zero
2649  // step.
2650  bool is_degenerate = false;
2651  if (leaving_row != kInvalidRow) {
2652  Fractional dir = -direction_[leaving_row] * step;
2653  is_degenerate =
2654  (dir == 0.0) ||
2655  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2656  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2657 
2658  // If the iteration is not degenerate, the leaving variable should go to
2659  // its exact target bound (it is how the step is computed).
2660  if (!is_degenerate) {
2661  DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
2662  target_bound));
2663  }
2664  }
2665 
2666  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2667  if (leaving_row != kInvalidRow) {
2668  // Important: the norm must be updated before the reduced_cost.
2669  primal_edge_norms_.UpdateBeforeBasisPivot(
2670  entering_col, basis_[leaving_row], leaving_row, direction_,
2671  &update_row_);
2672  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
2673  direction_, &update_row_);
2674  primal_prices_.UpdateBeforeBasisPivot(entering_col, &update_row_);
2675  if (!is_degenerate) {
2676  // On a non-degenerate iteration, the leaving variable should be at its
2677  // exact bound. This corrects an eventual small numerical error since
2678  // 'value + direction * step' where step is
2679  // '(target_bound - value) / direction'
2680  // may be slighlty different from target_bound.
2681  variable_values_.Set(leaving_col, target_bound);
2682  }
2684  UpdateAndPivot(entering_col, leaving_row, target_bound));
2686  if (is_degenerate) {
2687  timer.AlsoUpdate(&iteration_stats_.degenerate);
2688  } else {
2689  timer.AlsoUpdate(&iteration_stats_.normal);
2690  }
2691  });
2692  } else {
2693  // Bound flip. This makes sure that the flipping variable is at its bound
2694  // and has the correct status.
2696  variables_info_.GetTypeRow()[entering_col]);
2697  if (step > 0.0) {
2698  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2700  } else if (step < 0.0) {
2701  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2703  }
2704  primal_prices_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
2705  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
2706  }
2707 
2708  if (feasibility_phase_ && leaving_row != kInvalidRow) {
2709  // Set the leaving variable to its exact bound.
2710  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
2711 
2712  // Change the objective value of the leaving variable to zero.
2713  reduced_costs_.SetNonBasicVariableCostToZero(leaving_col,
2714  &objective_[leaving_col]);
2715  primal_prices_.RecomputePriceAt(leaving_col);
2716  }
2717 
2718  // Stats about consecutive degenerate iterations.
2719  if (step_length == 0.0) {
2720  num_consecutive_degenerate_iterations_++;
2721  } else {
2722  if (num_consecutive_degenerate_iterations_ > 0) {
2723  iteration_stats_.degenerate_run_size.Add(
2724  num_consecutive_degenerate_iterations_);
2725  num_consecutive_degenerate_iterations_ = 0;
2726  }
2727  }
2728  ++num_iterations_;
2729  }
2730  if (num_consecutive_degenerate_iterations_ > 0) {
2731  iteration_stats_.degenerate_run_size.Add(
2732  num_consecutive_degenerate_iterations_);
2733  }
2734  return Status::OK();
2735 }
2736 
2737 // TODO(user): Two other approaches for the phase I described in Koberstein's
2738 // PhD thesis seem worth trying at some point:
2739 // - The subproblem approach, which enables one to use a normal phase II dual,
2740 // but requires an efficient bound-flipping ratio test since the new problem
2741 // has all its variables boxed. This one is implemented now, but require
2742 // a bit more tunning.
2743 // - Pan's method, which is really fast but have no theoretical guarantee of
2744 // terminating and thus needs to use one of the other methods as a fallback if
2745 // it fails to make progress.
2746 //
2747 // Note that the returned status applies to the primal problem!
2748 Status RevisedSimplex::DualMinimize(bool feasibility_phase,
2749  TimeLimit* time_limit) {
2750  Cleanup update_deterministic_time_on_return(
2751  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2752  num_consecutive_degenerate_iterations_ = 0;
2753  bool refactorize = false;
2754 
2755  bound_flip_candidates_.clear();
2756 
2757  // Leaving variable.
2758  RowIndex leaving_row;
2759  Fractional cost_variation;
2761 
2762  // Entering variable.
2763  ColIndex entering_col;
2764 
2765  while (true) {
2766  // TODO(user): we may loop a bit more than the actual number of iteration.
2767  // fix.
2769  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2770 
2771  const bool old_refactorize_value = refactorize;
2772  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2773 
2774  // If the basis is refactorized, we recompute all the values in order to
2775  // have a good precision.
2776  if (basis_factorization_.IsRefactorized()) {
2777  // We do not want to recompute the reduced costs too often, this is
2778  // because that may break the overall direction taken by the last steps
2779  // and may lead to less improvement on degenerate problems.
2780  //
2781  // During phase-I, we do want the reduced costs to be as precise as
2782  // possible. TODO(user): Investigate why and fix the TODO in
2783  // PermuteBasis().
2784  //
2785  // Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
2786  // do recompute them, it is better to do that first.
2787  if (!feasibility_phase && !reduced_costs_.AreReducedCostsRecomputed() &&
2788  !old_refactorize_value) {
2789  const Fractional dual_residual_error =
2790  reduced_costs_.ComputeMaximumDualResidual();
2791  if (dual_residual_error >
2792  reduced_costs_.GetDualFeasibilityTolerance()) {
2793  VLOG(1) << "Recomputing reduced costs. Dual residual = "
2794  << dual_residual_error;
2795  reduced_costs_.MakeReducedCostsPrecise();
2796  }
2797  } else {
2798  reduced_costs_.MakeReducedCostsPrecise();
2799  }
2800 
2801  // TODO(user): Make RecomputeBasicVariableValues() do nothing
2802  // if it was already recomputed on a refactorized basis. This is the
2803  // same behavior as MakeReducedCostsPrecise().
2804  //
2805  // TODO(user): Do not recompute the variable values each time we
2806  // refactorize the matrix, like for the reduced costs? That may lead to
2807  // a worse behavior than keeping the "imprecise" version and only
2808  // recomputing it when its precision is above a threshold.
2809  if (!feasibility_phase) {
2810  MakeBoxedVariableDualFeasible(
2811  variables_info_.GetNonBasicBoxedVariables(),
2812  /*update_basic_values=*/false);
2813  variable_values_.RecomputeBasicVariableValues();
2814  variable_values_.RecomputeDualPrices();
2815 
2816  // Computing the objective at each iteration takes time, so we just
2817  // check the limit when the basis is refactorized.
2818  //
2819  // Hack: We need the feasibility_phase_ here and not the local variable
2820  // because this must not be checked for the dual phase I algo that use
2821  // the same code as the dual phase II (i.e. the local feasibility_phase
2822  // will be false).
2823  if (!feasibility_phase_ && dual_objective_limit_ != kInfinity &&
2824  ComputeObjectiveValue() > dual_objective_limit_) {
2825  VLOG(1) << "Stopping the dual simplex because"
2826  << " the objective limit " << dual_objective_limit_
2827  << " has been reached.";
2828  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
2829  objective_limit_reached_ = true;
2830  return Status::OK();
2831  }
2832  }
2833 
2834  reduced_costs_.GetReducedCosts();
2835  DisplayIterationInfo();
2836  } else {
2837  // Updates from the previous iteration that can be skipped if we
2838  // recompute everything (see other case above).
2839  if (!feasibility_phase) {
2840  // Make sure the boxed variables are dual-feasible before choosing the
2841  // leaving variable row.
2842  MakeBoxedVariableDualFeasible(bound_flip_candidates_,
2843  /*update_basic_values=*/true);
2844  bound_flip_candidates_.clear();
2845 
2846  // The direction_.non_zeros contains the positions for which the basic
2847  // variable value was changed during the previous iterations.
2848  variable_values_.UpdateDualPrices(direction_.non_zeros);
2849  }
2850  }
2851 
2852  if (feasibility_phase) {
2853  GLOP_RETURN_IF_ERROR(DualPhaseIChooseLeavingVariableRow(
2854  &leaving_row, &cost_variation, &target_bound));
2855  } else {
2856  GLOP_RETURN_IF_ERROR(DualChooseLeavingVariableRow(
2857  &leaving_row, &cost_variation, &target_bound));
2858  }
2859  if (leaving_row == kInvalidRow) {
2860  // TODO(user): integrate this with the main "re-optimization" loop.
2861  // Also distinguish cost perturbation and shifts?
2862  if (!basis_factorization_.IsRefactorized() ||
2863  reduced_costs_.HasCostShift()) {
2864  VLOG(1) << "Optimal reached, double checking.";
2865  reduced_costs_.ClearAndRemoveCostShifts();
2866  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
2867  refactorize = true;
2868  continue;
2869  }
2870  if (feasibility_phase) {
2871  // Note that since the basis is refactorized, the variable values
2872  // will be recomputed at the beginning of the second phase. The boxed
2873  // variable values will also be corrected by
2874  // MakeBoxedVariableDualFeasible().
2875  if (num_dual_infeasible_positions_ == 0) {
2876  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
2877  } else {
2878  VLOG(1) << "DUAL infeasible in dual phase I.";
2879  problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
2880  }
2881  } else {
2882  problem_status_ = ProblemStatus::OPTIMAL;
2883  }
2884  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
2885  return Status::OK();
2886  }
2887 
2888  update_row_.ComputeUpdateRow(leaving_row);
2889  if (feasibility_phase) {
2891  reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
2892  &entering_col));
2893  } else {
2895  reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
2896  &bound_flip_candidates_, &entering_col));
2897  }
2898 
2899  // No entering_col: dual unbounded (i.e. primal infeasible).
2900  if (entering_col == kInvalidCol) {
2901  if (!reduced_costs_.AreReducedCostsPrecise()) {
2902  VLOG(1) << "No entering column. Double checking...";
2903  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
2904  refactorize = true;
2905  continue;
2906  }
2907  DCHECK(basis_factorization_.IsRefactorized());
2908  if (feasibility_phase) {
2909  // This shouldn't happen by construction.
2910  VLOG(1) << "Unbounded dual feasibility problem !?";
2911  problem_status_ = ProblemStatus::ABNORMAL;
2912  } else {
2913  problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
2914  solution_dual_ray_ =
2915  Transpose(update_row_.GetUnitRowLeftInverse().values);
2916  update_row_.RecomputeFullUpdateRow(leaving_row);
2917  solution_dual_ray_row_combination_.AssignToZero(num_cols_);
2918  for (const ColIndex col : update_row_.GetNonZeroPositions()) {
2919  solution_dual_ray_row_combination_[col] =
2920  update_row_.GetCoefficient(col);
2921  }
2922  if (cost_variation < 0) {
2923  ChangeSign(&solution_dual_ray_);
2924  ChangeSign(&solution_dual_ray_row_combination_);
2925  }
2926  }
2927  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
2928  return Status::OK();
2929  }
2930 
2931  // If the coefficient is too small, we recompute the reduced costs if not
2932  // already done. This is an extra heuristic to avoid computing the direction
2933  // If the pivot is small. But the real recomputation step is just below.
2934  const Fractional entering_coeff = update_row_.GetCoefficient(entering_col);
2935  if (std::abs(entering_coeff) < parameters_.dual_small_pivot_threshold() &&
2936  !reduced_costs_.AreReducedCostsPrecise()) {
2937  VLOG(1) << "Trying not to pivot by " << entering_coeff;
2938  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
2939  refactorize = true;
2940  continue;
2941  }
2942 
2943  ComputeDirection(entering_col);
2944 
2945  // If the pivot is small compared to others in the direction_ vector we try
2946  // to recompute everything. If we cannot, then note that
2947  // DualChooseEnteringColumn() should guaranteed that the pivot is not too
2948  // small when everything has already been recomputed.
2949  if (std::abs(direction_[leaving_row]) <
2950  parameters_.small_pivot_threshold() * direction_infinity_norm_) {
2951  if (!reduced_costs_.AreReducedCostsPrecise()) {
2952  VLOG(1) << "Trying not pivot by " << entering_coeff << " ("
2953  << direction_[leaving_row]
2954  << ") because the direction has a norm of "
2955  << direction_infinity_norm_;
2956  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
2957  refactorize = true;
2958  continue;
2959  }
2960  }
2961 
2962  // This test takes place after the check for optimality/feasibility because
2963  // when running with 0 iterations, we still want to report
2964  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2965  // case at the beginning of the algorithm.
2966  AdvanceDeterministicTime(time_limit);
2967  if (num_iterations_ == parameters_.max_number_of_iterations() ||
2968  time_limit->LimitReached()) {
2969  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
2970  return Status::OK();
2971  }
2972 
2973  // Before we update the reduced costs, if its sign is already dual
2974  // infeasible and the update direction will make it worse we make sure the
2975  // reduced cost is 0.0 so UpdateReducedCosts() will not take a step that
2976  // goes in the wrong direction (a few experiments seems to indicate that
2977  // this is not a good idea). See comment at the top of UpdateReducedCosts().
2978  //
2979  // Note that ShiftCostIfNeeded() actually shifts the cost a bit more in
2980  // order to do a non-zero step. This helps on degenerate problems. Like the
2981  // pertubation, we will remove all these shifts at the end.
2982  const bool increasing_rc_is_needed =
2983  (cost_variation > 0.0) == (entering_coeff > 0.0);
2984  reduced_costs_.ShiftCostIfNeeded(increasing_rc_is_needed, entering_col);
2985 
2987  if (reduced_costs_.StepIsDualDegenerate(increasing_rc_is_needed,
2988  entering_col)) {
2989  timer.AlsoUpdate(&iteration_stats_.degenerate);
2990  } else {
2991  timer.AlsoUpdate(&iteration_stats_.normal);
2992  }
2993  });
2994 
2995  // Update basis. Note that direction_ is already computed.
2996  //
2997  // TODO(user): this is pretty much the same in the primal or dual code.
2998  // We just need to know to what bound the leaving variable will be set to.
2999  // Factorize more common code?
3000  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
3001  &update_row_);
3002  dual_edge_norms_.UpdateBeforeBasisPivot(
3003  entering_col, leaving_row, direction_,
3004  update_row_.GetUnitRowLeftInverse());
3005 
3006  // During phase I, we do not need the basic variable values at all.
3007  // Important: The norm should be updated before that.
3008  Fractional primal_step = 0.0;
3009  if (feasibility_phase) {
3010  DualPhaseIUpdatePrice(leaving_row, entering_col);
3011  } else {
3012  primal_step =
3013  ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
3014  variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
3015  }
3016 
3017  // It is important to do the actual pivot after the update above!
3018  const ColIndex leaving_col = basis_[leaving_row];
3020  UpdateAndPivot(entering_col, leaving_row, target_bound));
3021 
3022  // This makes sure the leaving variable is at its exact bound. Tests
3023  // indicate that this makes everything more stable. Note also that during
3024  // the feasibility phase, the variable values are not used, but that the
3025  // correct non-basic variable value are needed at the end.
3026  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3027 
3028  // This is slow, but otherwise we have a really bad precision on the
3029  // variable values ...
3030  if (std::abs(primal_step) * parameters_.primal_feasibility_tolerance() >
3031  1.0) {
3032  refactorize = true;
3033  }
3034  ++num_iterations_;
3035  }
3036  return Status::OK();
3037 }
3038 
3039 ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
3040  // TODO(user): Remove this function.
3042  return first_slack_col_ + RowToColIndex(row);
3043 }
3044 
3046  std::string result;
3047  result.append(iteration_stats_.StatString());
3048  result.append(ratio_test_stats_.StatString());
3049  result.append(entering_variable_.StatString());
3050  result.append(dual_prices_.StatString());
3051  result.append(reduced_costs_.StatString());
3052  result.append(variable_values_.StatString());
3053  result.append(primal_edge_norms_.StatString());
3054  result.append(dual_edge_norms_.StatString());
3055  result.append(update_row_.StatString());
3056  result.append(basis_factorization_.StatString());
3057  result.append(function_stats_.StatString());
3058  return result;
3059 }
3060 
3061 void RevisedSimplex::DisplayAllStats() {
3062  if (absl::GetFlag(FLAGS_simplex_display_stats)) {
3063  absl::FPrintF(stderr, "%s", StatString());
3064  absl::FPrintF(stderr, "%s", GetPrettySolverStats());
3065  }
3066 }
3067 
3068 Fractional RevisedSimplex::ComputeObjectiveValue() const {
3069  SCOPED_TIME_STAT(&function_stats_);
3070  return PreciseScalarProduct(objective_,
3071  Transpose(variable_values_.GetDenseRow()));
3072 }
3073 
3074 Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
3075  SCOPED_TIME_STAT(&function_stats_);
3076  const Fractional sum = PreciseScalarProduct(
3077  objective_, Transpose(variable_values_.GetDenseRow()));
3078  return objective_scaling_factor_ * (sum + objective_offset_);
3079 }
3080 
3081 void RevisedSimplex::SetParameters(const GlopParameters& parameters) {
3082  SCOPED_TIME_STAT(&function_stats_);
3083  deterministic_random_.seed(parameters.random_seed());
3084 
3085  initial_parameters_ = parameters;
3086  parameters_ = parameters;
3087  PropagateParameters();
3088 }
3089 
3090 void RevisedSimplex::PropagateParameters() {
3091  SCOPED_TIME_STAT(&function_stats_);
3092  basis_factorization_.SetParameters(parameters_);
3093  entering_variable_.SetParameters(parameters_);
3094  reduced_costs_.SetParameters(parameters_);
3095  dual_edge_norms_.SetParameters(parameters_);
3096  primal_edge_norms_.SetParameters(parameters_);
3097  update_row_.SetParameters(parameters_);
3098 }
3099 
3100 void RevisedSimplex::DisplayIterationInfo() const {
3101  if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
3102  Fractional objective;
3103  std::string name;
3104  if (!feasibility_phase_) {
3105  // Note that in the dual phase II, ComputeObjectiveValue() is also
3106  // computing the dual objective even if it uses the variable values. This
3107  // is because if we modify the bounds to make the problem primal-feasible,
3108  // we are at the optimal and hence the two objectives are the same.
3109  objective = ComputeInitialProblemObjectiveValue();
3110  name = "objective";
3111  } else if (parameters_.use_dual_simplex()) {
3112  if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
3113  objective = reduced_costs_.ComputeSumOfDualInfeasibilities();
3114  } else {
3115  // The internal objective of the transformed problem is the negation
3116  // of the sum of the dual infeasibility of the original problem.
3117  objective = -PreciseScalarProduct(
3118  objective_, Transpose(variable_values_.GetDenseRow()));
3119  }
3120  name = "sum_dual_infeasibilities";
3121  } else {
3122  objective = variable_values_.ComputeSumOfPrimalInfeasibilities();
3123  name = "sum_primal_infeasibilities";
3124  }
3125 
3126  const int iter = feasibility_phase_
3127  ? num_iterations_
3128  : num_iterations_ - num_feasibility_iterations_;
3129  LOG(INFO) << (feasibility_phase_ ? "Feasibility" : "Optimization")
3130  << " phase, iteration # " << iter << ", " << name << " = "
3131  << absl::StrFormat("%.15E", objective);
3132  }
3133 }
3134 
3135 void RevisedSimplex::DisplayErrors() const {
3136  if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
3137  LOG(INFO) << "Primal infeasibility (bounds) = "
3138  << variable_values_.ComputeMaximumPrimalInfeasibility();
3139  LOG(INFO) << "Primal residual |A.x - b| = "
3140  << variable_values_.ComputeMaximumPrimalResidual();
3141  LOG(INFO) << "Dual infeasibility (reduced costs) = "
3142  << reduced_costs_.ComputeMaximumDualInfeasibility();
3143  LOG(INFO) << "Dual residual |c_B - y.B| = "
3144  << reduced_costs_.ComputeMaximumDualResidual();
3145  }
3146 }
3147 
3148 namespace {
3149 
3150 std::string StringifyMonomialWithFlags(const Fractional a,
3151  const std::string& x) {
3152  return StringifyMonomial(
3153  a, x, absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3154 }
3155 
3156 // Returns a string representing the rational approximation of x or a decimal
3157 // approximation of x according to
3158 // absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions).
3159 std::string StringifyWithFlags(const Fractional x) {
3160  return Stringify(x,
3161  absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3162 }
3163 
3164 } // namespace
3165 
3166 std::string RevisedSimplex::SimpleVariableInfo(ColIndex col) const {
3167  std::string output;
3168  VariableType variable_type = variables_info_.GetTypeRow()[col];
3169  VariableStatus variable_status = variables_info_.GetStatusRow()[col];
3170  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3171  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3172  absl::StrAppendFormat(&output, "%d (%s) = %s, %s, %s, [%s,%s]", col.value(),
3173  variable_name_[col],
3174  StringifyWithFlags(variable_values_.Get(col)),
3175  GetVariableStatusString(variable_status),
3176  GetVariableTypeString(variable_type),
3177  StringifyWithFlags(lower_bounds[col]),
3178  StringifyWithFlags(upper_bounds[col]));
3179  return output;
3180 }
3181 
3182 void RevisedSimplex::DisplayInfoOnVariables() const {
3183  if (VLOG_IS_ON(3)) {
3184  for (ColIndex col(0); col < num_cols_; ++col) {
3185  const Fractional variable_value = variable_values_.Get(col);
3186  const Fractional objective_coefficient = objective_[col];
3187  const Fractional objective_contribution =
3188  objective_coefficient * variable_value;
3189  VLOG(3) << SimpleVariableInfo(col) << ". " << variable_name_[col] << " = "
3190  << StringifyWithFlags(variable_value) << " * "
3191  << StringifyWithFlags(objective_coefficient)
3192  << "(obj) = " << StringifyWithFlags(objective_contribution);
3193  }
3194  VLOG(3) << "------";
3195  }
3196 }
3197 
3198 void RevisedSimplex::DisplayVariableBounds() {
3199  if (VLOG_IS_ON(3)) {
3200  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
3201  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3202  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3203  for (ColIndex col(0); col < num_cols_; ++col) {
3204  switch (variable_type[col]) {
3206  break;
3208  VLOG(3) << variable_name_[col]
3209  << " >= " << StringifyWithFlags(lower_bounds[col]) << ";";
3210  break;
3212  VLOG(3) << variable_name_[col]
3213  << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3214  break;
3216  VLOG(3) << StringifyWithFlags(lower_bounds[col])
3217  << " <= " << variable_name_[col]
3218  << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3219  break;
3221  VLOG(3) << variable_name_[col] << " = "
3222  << StringifyWithFlags(lower_bounds[col]) << ";";
3223  break;
3224  default: // This should never happen.
3225  LOG(DFATAL) << "Column " << col << " has no meaningful status.";
3226  break;
3227  }
3228  }
3229  }
3230 }
3231 
3233  const DenseRow* column_scales) {
3234  absl::StrongVector<RowIndex, SparseRow> dictionary(num_rows_.value());
3235  for (ColIndex col(0); col < num_cols_; ++col) {
3236  ComputeDirection(col);
3237  for (const auto e : direction_) {
3238  if (column_scales == nullptr) {
3239  dictionary[e.row()].SetCoefficient(col, e.coefficient());
3240  continue;
3241  }
3242  const Fractional numerator =
3243  col < column_scales->size() ? (*column_scales)[col] : 1.0;
3244  const Fractional denominator = GetBasis(e.row()) < column_scales->size()
3245  ? (*column_scales)[GetBasis(e.row())]
3246  : 1.0;
3247  dictionary[e.row()].SetCoefficient(
3248  col, direction_[e.row()] * (numerator / denominator));
3249  }
3250  }
3251  return dictionary;
3252 }
3253 
3255  const LinearProgram& linear_program, const BasisState& state) {
3256  LoadStateForNextSolve(state);
3257  Status status = Initialize(linear_program);
3258  if (status.ok()) {
3259  variable_values_.RecomputeBasicVariableValues();
3260  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
3261  }
3262 }
3263 
3264 void RevisedSimplex::DisplayRevisedSimplexDebugInfo() {
3265  if (VLOG_IS_ON(3)) {
3266  // This function has a complexity in O(num_non_zeros_in_matrix).
3267  DisplayInfoOnVariables();
3268 
3269  std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue());
3270  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
3271  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3272  absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col],
3273  variable_name_[col]));
3274  }
3275  VLOG(3) << output << ";";
3276 
3277  const RevisedSimplexDictionary dictionary(nullptr, this);
3278  RowIndex r(0);
3279  for (const SparseRow& row : dictionary) {
3280  output.clear();
3281  ColIndex basic_col = basis_[r];
3282  absl::StrAppend(&output, variable_name_[basic_col], " = ",
3283  StringifyWithFlags(variable_values_.Get(basic_col)));
3284  for (const SparseRowEntry e : row) {
3285  if (e.col() != basic_col) {
3286  absl::StrAppend(&output,
3287  StringifyMonomialWithFlags(e.coefficient(),
3288  variable_name_[e.col()]));
3289  }
3290  }
3291  VLOG(3) << output << ";";
3292  }
3293  VLOG(3) << "------";
3294  DisplayVariableBounds();
3295  ++r;
3296  }
3297 }
3298 
3299 void RevisedSimplex::DisplayProblem() const {
3300  // This function has a complexity in O(num_rows * num_cols *
3301  // num_non_zeros_in_row).
3302  if (VLOG_IS_ON(3)) {
3303  DisplayInfoOnVariables();
3304  std::string output = "min: ";
3305  bool has_objective = false;
3306  for (ColIndex col(0); col < num_cols_; ++col) {
3307  const Fractional coeff = objective_[col];
3308  has_objective |= (coeff != 0.0);
3309  absl::StrAppend(&output,
3310  StringifyMonomialWithFlags(coeff, variable_name_[col]));
3311  }
3312  if (!has_objective) {
3313  absl::StrAppend(&output, " 0");
3314  }
3315  VLOG(3) << output << ";";
3316  for (RowIndex row(0); row < num_rows_; ++row) {
3317  output = "";
3318  for (ColIndex col(0); col < num_cols_; ++col) {
3319  absl::StrAppend(&output,
3320  StringifyMonomialWithFlags(
3321  compact_matrix_.column(col).LookUpCoefficient(row),
3322  variable_name_[col]));
3323  }
3324  VLOG(3) << output << " = 0;";
3325  }
3326  VLOG(3) << "------";
3327  }
3328 }
3329 
3330 void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) {
3331  DCHECK(time_limit != nullptr);
3332  const double current_deterministic_time = DeterministicTime();
3333  const double deterministic_time_delta =
3334  current_deterministic_time - last_deterministic_time_update_;
3335  time_limit->AdvanceDeterministicTime(deterministic_time_delta);
3336  last_deterministic_time_update_ = current_deterministic_time;
3337 }
3338 
3339 #undef DCHECK_COL_BOUNDS
3340 #undef DCHECK_ROW_BOUNDS
3341 
3342 } // namespace glop
3343 } // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:498
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:895
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:894
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:897
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:898
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:896
#define LOG(severity)
Definition: base/logging.h:423
#define DCHECK(condition)
Definition: base/logging.h:892
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:893
#define VLOG(verboselevel)
Definition: base/logging.h:986
bool empty() const
void push_back(const value_type &x)
bool IsSet(IndexType i) const
Definition: bitset.h:485
std::string StatString() const
Definition: stats.cc:71
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
const ColumnPermutation & GetColumnPermutation() const
ABSL_MUST_USE_RESULT Status Update(ColIndex entering_col, RowIndex leaving_variable_row, const ScatteredColumn &direction)
void RightSolveForProblemColumn(ColIndex col, ScatteredColumn *d) const
void SetParameters(const GlopParameters &parameters)
Fractional LookUpCoefficient(RowIndex index) const
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse_column.h:83
RowIndex EntryRow(EntryIndex i) const
Definition: sparse_column.h:89
void ColumnCopyToDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:418
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col, Fractional multiplier, ScatteredColumn *column) const
Definition: sparse.h:405
void PopulateFromTranspose(const CompactSparseMatrix &input)
Definition: sparse.cc:456
void ColumnAddMultipleToDenseColumn(ColIndex col, Fractional multiplier, DenseColumn *dense_column) const
Definition: sparse.h:393
void PopulateFromMatrixView(const MatrixView &input)
Definition: sparse.cc:437
ColumnView column(ColIndex col) const
Definition: sparse.h:364
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, const ScatteredRow &unit_row_left_inverse)
void UpdateDataOnBasisPermutation(const ColumnPermutation &col_perm)
void SetParameters(const GlopParameters &parameters)
void AddOrUpdate(Index position, Fractional value)
Definition: pricing.h:185
void DenseAddOrUpdate(Index position, Fractional value)
Definition: pricing.h:176
ABSL_MUST_USE_RESULT Status DualPhaseIChooseEnteringColumn(bool nothing_to_recompute, const UpdateRow &update_row, Fractional cost_variation, ColIndex *entering_col)
void SetParameters(const GlopParameters &parameters)
ABSL_MUST_USE_RESULT Status DualChooseEnteringColumn(bool nothing_to_recompute, const UpdateRow &update_row, Fractional cost_variation, std::vector< ColIndex > *bound_flip_candidates, ColIndex *entering_col)
ABSL_MUST_USE_RESULT Status ComputeFactorization(const CompactSparseMatrixView &compact_matrix)
void UpdateBeforeBasisPivot(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
void SetPricingRule(GlopParameters::PricingRule rule)
void TestEnteringEdgeNormPrecision(ColIndex entering_col, const ScatteredColumn &direction)
void SetParameters(const GlopParameters &parameters)
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
void UpdateBeforeBasisPivot(ColIndex entering_col, UpdateRow *update_row)
bool TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction, Fractional *reduced_cost)
bool IsValidPrimalEnteringCandidate(ColIndex col) const
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
bool StepIsDualDegenerate(bool increasing_rc_is_needed, ColIndex col)
Fractional ComputeMaximumDualInfeasibilityOnNonBoxedVariables() const
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
Fractional GetDualFeasibilityTolerance() const
Fractional ComputeMaximumDualInfeasibility() const
Fractional ComputeSumOfDualInfeasibilities() const
void ShiftCostIfNeeded(bool increasing_rc_is_needed, ColIndex col)
void SetParameters(const GlopParameters &parameters)
const DenseRow & GetDualRayRowCombination() const
Fractional GetVariableValue(ColIndex col) const
void SetIntegralityScale(ColIndex col, Fractional scale)
Fractional GetConstraintActivity(RowIndex row) const
VariableStatus GetVariableStatus(ColIndex col) const
Fractional GetReducedCost(ColIndex col) const
const DenseColumn & GetDualRay() const
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales)
Fractional GetDualValue(RowIndex row) const
ConstraintStatus GetConstraintStatus(RowIndex row) const
void ComputeBasicVariablesForState(const LinearProgram &linear_program, const BasisState &state)
void LoadStateForNextSolve(const BasisState &state)
const BasisFactorization & GetBasisFactorization() const
ColIndex GetBasis(RowIndex row) const
void SetParameters(const GlopParameters &parameters)
static const Status OK()
Definition: status.h:54
const ScatteredRow & GetUnitRowLeftInverse() const
Definition: update_row.cc:45
void RecomputeFullUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:242
const Fractional GetCoefficient(ColIndex col) const
Definition: update_row.h:68
void ComputeUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:72
void SetParameters(const GlopParameters &parameters)
Definition: update_row.cc:172
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:168
std::string StatString() const
Definition: update_row.h:80
void Set(ColIndex col, Fractional value)
void UpdateGivenNonBasicVariables(const std::vector< ColIndex > &cols_to_update, bool update_basic_variables)
void UpdateDualPrices(const std::vector< RowIndex > &row)
void UpdateOnPivoting(const ScatteredColumn &direction, ColIndex entering_col, Fractional step)
const Fractional Get(ColIndex col) const
bool UpdatePrimalPhaseICosts(const Rows &rows, DenseRow *objective)
const DenseBitRow & GetIsBasicBitRow() const
const DenseRow & GetVariableUpperBounds() const
const DenseBitRow & GetNonBasicBoxedVariables() const
Fractional GetBoundDifference(ColIndex col) const
const DenseBitRow & GetCanIncreaseBitRow() const
const DenseBitRow & GetCanDecreaseBitRow() const
const VariableTypeRow & GetTypeRow() const
void EndDualPhaseI(Fractional dual_feasibility_tolerance, const DenseRow &reduced_costs)
void UpdateToNonBasicStatus(ColIndex col, VariableStatus status)
const DenseRow & GetVariableLowerBounds() const
const DenseBitRow & GetNotBasicBitRow() const
const VariableStatusRow & GetStatusRow() const
const DenseBitRow & GetIsRelevantBitRow() const
void InitializeFromBasisState(ColIndex first_slack, ColIndex num_new_cols, const BasisState &state)
bool LoadBoundsAndReturnTrueIfUnchanged(const DenseRow &new_lower_bounds, const DenseRow &new_upper_bounds)
void TransformToDualPhaseIProblem(Fractional dual_feasibility_tolerance, const DenseRow &reduced_costs)
int64_t a
SatParameters parameters
SharedTimeLimit * time_limit
const std::string name
int64_t value
double upper_bound
double lower_bound
const int WARNING
Definition: log_severity.h:31
const int INFO
Definition: log_severity.h:31
const bool DEBUG_MODE
Definition: macros.h:24
ColIndex col
Definition: markowitz.cc:183
std::string StringifyMonomial(const Fractional a, const std::string &x, bool fraction)
bool IsRightMostSquareMatrixIdentity(const SparseMatrix &matrix)
Fractional Square(Fractional f)
Fractional InfinityNorm(const DenseColumn &v)
const RowIndex kInvalidRow(-1)
std::string Stringify(const Fractional x, bool fraction)
StrictITIVector< ColIndex, VariableType > VariableTypeRow
Definition: lp_types.h:318
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:300
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
Index ColToIntIndex(ColIndex col)
Definition: lp_types.h:55
Permutation< ColIndex > ColumnPermutation
StrictITIVector< ColIndex, VariableStatus > VariableStatusRow
Definition: lp_types.h:321
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
bool IsFinite(Fractional value)
Definition: lp_types.h:91
bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, const SparseMatrix &matrix_a, const CompactSparseMatrix &matrix_b)
const DenseRow & Transpose(const DenseColumn &col)
Bitset64< ColIndex > DenseBitRow
Definition: lp_types.h:324
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
constexpr const uint64_t kDeterministicSeed
StrictITIVector< RowIndex, ColIndex > RowToColMapping
Definition: lp_types.h:343
std::string GetVariableTypeString(VariableType variable_type)
Definition: lp_types.cc:52
void ApplyColumnPermutationToRowIndexedVector(const Permutation< ColIndex > &col_perm, RowIndexedVector *v)
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:329
StrictITIVector< RowIndex, bool > DenseBooleanColumn
Definition: lp_types.h:332
static double DeterministicTimeForFpOperations(int64_t n)
Definition: lp_types.h:380
std::string GetVariableStatusString(VariableStatus status)
Definition: lp_types.cc:71
const double kInfinity
Definition: lp_types.h:84
const ColIndex kInvalidCol(-1)
void swap(IdMap< K, V > &a, IdMap< K, V > &b)
Definition: id_map.h:263
Collection of objects used to extend the Constraint Solver library.
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition: stats.h:434
int index
Definition: pack.cc:509
if(!yyg->yy_init)
Definition: parser.yy.cc:965
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
Fractional coeff_magnitude
#define DCHECK_ROW_BOUNDS(row)
ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false, "Display numbers as fractions.")
Fractional target_bound
RowIndex row
#define DCHECK_COL_BOUNDS(col)
Fractional ratio
int64_t cost
std::vector< double > lower_bounds
std::vector< double > upper_bounds
IntVar *const objective_
Definition: search.cc:2966
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
#define GLOP_RETURN_IF_ERROR(function_call)
Definition: status.h:70
#define GLOP_RETURN_ERROR_IF_NULL(arg)
Definition: status.h:85
void ClearNonZerosIfTooDense(double ratio_for_using_dense_representation)
StrictITIVector< Index, Fractional > values
#define VLOG_IS_ON(verboselevel)
Definition: vlog_is_on.h:41