OR-Tools  9.1
revised_simplex.cc
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <algorithm>
17 #include <cmath>
18 #include <cstdint>
19 #include <functional>
20 #include <map>
21 #include <string>
22 #include <utility>
23 #include <vector>
24 
25 #include "absl/strings/str_cat.h"
26 #include "absl/strings/str_format.h"
29 #include "ortools/base/logging.h"
39 #include "ortools/util/fp_utils.h"
40 
41 ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false,
42  "Display numbers as fractions.");
43 ABSL_FLAG(bool, simplex_stop_after_first_basis, false,
44  "Stop after first basis has been computed.");
45 ABSL_FLAG(bool, simplex_stop_after_feasibility, false,
46  "Stop after first phase has been completed.");
47 ABSL_FLAG(bool, simplex_display_stats, false, "Display algorithm statistics.");
48 
49 namespace operations_research {
50 namespace glop {
51 namespace {
52 
53 // Calls the given closure upon destruction. It can be used to ensure that a
54 // closure is executed whenever a function returns.
55 class Cleanup {
56  public:
57  explicit Cleanup(std::function<void()> closure)
58  : closure_(std::move(closure)) {}
59  ~Cleanup() { closure_(); }
60 
61  private:
62  std::function<void()> closure_;
63 };
64 } // namespace
65 
66 #define DCHECK_COL_BOUNDS(col) \
67  { \
68  DCHECK_LE(0, col); \
69  DCHECK_GT(num_cols_, col); \
70  }
71 
72 // TODO(user): Remove this function.
73 #define DCHECK_ROW_BOUNDS(row) \
74  { \
75  DCHECK_LE(0, row); \
76  DCHECK_GT(num_rows_, row); \
77  }
78 
79 constexpr const uint64_t kDeterministicSeed = 42;
80 
82  : problem_status_(ProblemStatus::INIT),
83  objective_(),
84  basis_(),
85  variable_name_(),
86  direction_(),
87  error_(),
88  deterministic_random_(kDeterministicSeed),
89  random_(deterministic_random_),
90  basis_factorization_(&compact_matrix_, &basis_),
91  variables_info_(compact_matrix_),
92  primal_edge_norms_(compact_matrix_, variables_info_,
93  basis_factorization_),
94  dual_edge_norms_(basis_factorization_),
95  dual_prices_(random_),
96  variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
97  basis_factorization_, &dual_edge_norms_, &dual_prices_),
98  update_row_(compact_matrix_, transposed_matrix_, variables_info_, basis_,
99  basis_factorization_),
100  reduced_costs_(compact_matrix_, objective_, basis_, variables_info_,
101  basis_factorization_, random_),
102  entering_variable_(variables_info_, random_, &reduced_costs_),
103  primal_prices_(random_, variables_info_, &primal_edge_norms_,
104  &reduced_costs_),
105  iteration_stats_(),
106  ratio_test_stats_(),
107  function_stats_("SimplexFunctionStats"),
108  parameters_(),
109  test_lu_() {
110  SetParameters(parameters_);
111 }
112 
114  SCOPED_TIME_STAT(&function_stats_);
115  solution_state_.statuses.clear();
116  variable_starting_values_.clear();
117 }
118 
120  SCOPED_TIME_STAT(&function_stats_);
121  solution_state_ = state;
122  solution_state_has_been_set_externally_ = true;
123 }
124 
126  const DenseRow& values) {
127  variable_starting_values_ = values;
128 }
129 
131  notify_that_matrix_is_unchanged_ = true;
132 }
133 
135  SCOPED_TIME_STAT(&function_stats_);
136  DCHECK(lp.IsCleanedUp());
138  Cleanup update_deterministic_time_on_return(
139  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
140 
141  // Initialization. Note That Initialize() must be called first since it
142  // analyzes the current solver state.
143  const double start_time = time_limit->GetElapsedTime();
144  GLOP_RETURN_IF_ERROR(Initialize(lp));
145 
146  dual_infeasibility_improvement_direction_.clear();
147  update_row_.Invalidate();
148  test_lu_.Clear();
149  problem_status_ = ProblemStatus::INIT;
150  phase_ = Phase::FEASIBILITY;
151  num_iterations_ = 0;
152  num_feasibility_iterations_ = 0;
153  num_optimization_iterations_ = 0;
154  num_push_iterations_ = 0;
155  feasibility_time_ = 0.0;
156  optimization_time_ = 0.0;
157  push_time_ = 0.0;
158  total_time_ = 0.0;
159 
160  // In case we abort because of an error, we cannot assume that the current
161  // solution state will be in sync with all our internal data structure. In
162  // case we abort without resetting it, setting this allow us to still use the
163  // previous state info, but we will double-check everything.
164  solution_state_has_been_set_externally_ = true;
165 
166  if (VLOG_IS_ON(1)) {
167  ComputeNumberOfEmptyRows();
168  ComputeNumberOfEmptyColumns();
169  DisplayBasicVariableStatistics();
170  DisplayProblem();
171  }
172  if (absl::GetFlag(FLAGS_simplex_stop_after_first_basis)) {
173  DisplayAllStats();
174  return Status::OK();
175  }
176 
177  const bool use_dual = parameters_.use_dual_simplex();
178  const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
179  if (log_info) {
180  LOG(INFO) << "------ " << (use_dual ? "Dual simplex." : "Primal simplex.");
181  LOG(INFO) << "The matrix has " << compact_matrix_.num_rows() << " rows, "
182  << compact_matrix_.num_cols() << " columns, "
183  << compact_matrix_.num_entries() << " entries.";
184  LOG(INFO) << "Initial number of super-basic variables: "
185  << ComputeNumberOfSuperBasicVariables();
186  }
187 
188  // TODO(user): Avoid doing the first phase checks when we know from the
189  // incremental solve that the solution is already dual or primal feasible.
190  if (log_info) LOG(INFO) << "------ First phase: feasibility.";
191  primal_edge_norms_.SetPricingRule(parameters_.feasibility_rule());
192  if (use_dual) {
193  if (parameters_.perturb_costs_in_dual_simplex()) {
194  reduced_costs_.PerturbCosts();
195  }
196 
197  if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
198  variables_info_.MakeBoxedVariableRelevant(false);
200  DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
201  DisplayIterationInfo();
202 
203  if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
204  // Note(user): In most cases, the matrix will already be refactorized
205  // and both Refactorize() and PermuteBasis() will do nothing. However,
206  // if the time limit is reached during the first phase, this might not
207  // be the case and RecomputeBasicVariableValues() below DCHECKs that the
208  // matrix is refactorized. This is not required, but we currently only
209  // want to recompute values from scratch when the matrix was just
210  // refactorized to maximize precision.
211  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
212  PermuteBasis();
213 
214  variables_info_.MakeBoxedVariableRelevant(true);
215  reduced_costs_.MakeReducedCostsPrecise();
216 
217  // This is needed to display errors properly.
218  MakeBoxedVariableDualFeasible(
219  variables_info_.GetNonBasicBoxedVariables(),
220  /*update_basic_values=*/false);
221  variable_values_.RecomputeBasicVariableValues();
222  }
223  } else {
224  // Test initial dual infeasibility, ignoring boxed variables. We currently
225  // refactorize/recompute the reduced costs if not already done.
226  // TODO(user): Not ideal in an incremental setting.
227  reduced_costs_.MakeReducedCostsPrecise();
228  bool refactorize = reduced_costs_.NeedsBasisRefactorization();
229  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
230 
231  const Fractional initial_infeasibility =
233  if (initial_infeasibility <
234  reduced_costs_.GetDualFeasibilityTolerance()) {
235  if (log_info) LOG(INFO) << "Initial basis is dual feasible.";
236  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
237  MakeBoxedVariableDualFeasible(
238  variables_info_.GetNonBasicBoxedVariables(),
239  /*update_basic_values=*/false);
240  variable_values_.RecomputeBasicVariableValues();
241  } else {
242  // Transform problem and recompute variable values.
243  variables_info_.TransformToDualPhaseIProblem(
244  reduced_costs_.GetDualFeasibilityTolerance(),
245  reduced_costs_.GetReducedCosts());
246  DenseRow zero; // We want the FREE variable at zero here.
247  variable_values_.ResetAllNonBasicVariableValues(zero);
248  variable_values_.RecomputeBasicVariableValues();
249 
250  // Optimize.
251  DisplayErrors();
252  GLOP_RETURN_IF_ERROR(DualMinimize(false, time_limit));
253  DisplayIterationInfo();
254 
255  // Restore original problem and recompute variable values. Note that we
256  // need the reduced cost on the fixed positions here.
257  variables_info_.EndDualPhaseI(
258  reduced_costs_.GetDualFeasibilityTolerance(),
259  reduced_costs_.GetFullReducedCosts());
260  variable_values_.ResetAllNonBasicVariableValues(
261  variable_starting_values_);
262  variable_values_.RecomputeBasicVariableValues();
263 
264  // TODO(user): Note that if there was cost shifts, we just keep them
265  // until the end of the optim.
266  //
267  // TODO(user): What if slightly infeasible? we shouldn't really stop.
268  // Call primal ? use higher tolerance ? It seems we can always kind of
269  // continue and deal with the issue later. Find a way other than this +
270  // 1e-6 hack.
271  if (problem_status_ == ProblemStatus::OPTIMAL) {
272  if (reduced_costs_.ComputeMaximumDualInfeasibility() <
273  reduced_costs_.GetDualFeasibilityTolerance() + 1e-6) {
274  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
275  } else {
276  if (log_info) LOG(INFO) << "Infeasible after first phase.";
277  problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
278  }
279  }
280  }
281  }
282  } else {
283  GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
284  DisplayIterationInfo();
285 
286  // After the primal phase I, we need to restore the objective.
287  if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) {
288  InitializeObjectiveAndTestIfUnchanged(lp);
289  reduced_costs_.ResetForNewObjective();
290  }
291  }
292 
293  DisplayErrors();
294 
295  phase_ = Phase::OPTIMIZATION;
296  feasibility_time_ = time_limit->GetElapsedTime() - start_time;
297  primal_edge_norms_.SetPricingRule(parameters_.optimization_rule());
298  num_feasibility_iterations_ = num_iterations_;
299 
300  if (log_info) LOG(INFO) << "------ Second phase: optimization.";
301 
302  // Because of shifts or perturbations, we may need to re-run a dual simplex
303  // after the primal simplex finished, or the opposite.
304  //
305  // We alter between solving with primal and dual Phase II algorithm as long as
306  // time limit permits *and* we did not yet achieve the desired precision.
307  // I.e., we run iteration i if the solution from iteration i-1 was not precise
308  // after we removed the bound and cost shifts and perturbations.
309  //
310  // NOTE(user): We may still hit the limit of max_number_of_reoptimizations()
311  // which means the status returned can be PRIMAL_FEASIBLE or DUAL_FEASIBLE
312  // (i.e., these statuses are not necesserily a consequence of hitting a time
313  // limit).
314  for (int num_optims = 0;
315  // We want to enter the loop when both num_optims and num_iterations_ are
316  // *equal* to the corresponding limits (to return a meaningful status
317  // when the limits are set to 0).
318  num_optims <= parameters_.max_number_of_reoptimizations() &&
319  !objective_limit_reached_ &&
320  (num_iterations_ == 0 ||
321  num_iterations_ < parameters_.max_number_of_iterations()) &&
322  !time_limit->LimitReached() &&
323  !absl::GetFlag(FLAGS_simplex_stop_after_feasibility) &&
324  (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
325  problem_status_ == ProblemStatus::DUAL_FEASIBLE);
326  ++num_optims) {
327  if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
328  // Run the primal simplex.
329  GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
330  } else {
331  // Run the dual simplex.
333  DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
334  }
335 
336  // PrimalMinimize() or DualMinimize() always double check the result with
337  // maximum precision by refactoring the basis before exiting (except if an
338  // iteration or time limit was reached).
339  DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
340  problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
341  basis_factorization_.IsRefactorized());
342 
343  // If SetIntegralityScale() was called, we preform a polish operation.
344  if (!integrality_scale_.empty() &&
345  problem_status_ == ProblemStatus::OPTIMAL) {
347  }
348 
349  // Remove the bound and cost shifts (or perturbations).
350  //
351  // Note(user): Currently, we never do both at the same time, so we could
352  // be a bit faster here, but then this is quick anyway.
353  variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
354  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
355  PermuteBasis();
356  variable_values_.RecomputeBasicVariableValues();
357  reduced_costs_.ClearAndRemoveCostShifts();
358 
359  DisplayIterationInfo();
360  DisplayErrors();
361 
362  // TODO(user): We should also confirm the PRIMAL_UNBOUNDED or DUAL_UNBOUNDED
363  // status by checking with the other phase I that the problem is really
364  // DUAL_INFEASIBLE or PRIMAL_INFEASIBLE. For instance we currently report
365  // PRIMAL_UNBOUNDED with the primal on the problem l30.mps instead of
366  // OPTIMAL and the dual does not have issues on this problem.
367  //
368  // TODO(user): There is another issue on infeas/qual.mps. I think we should
369  // just check the dual ray, not really the current solution dual
370  // feasibility.
371  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) {
372  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
373  if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
374  variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
375  reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
376  if (log_info) {
377  LOG(INFO) << "DUAL_UNBOUNDED was reported, but the residual and/or "
378  << "dual infeasibility is above the tolerance";
379  }
380  }
381  break;
382  }
383 
384  // Change the status, if after the shift and perturbation removal the
385  // problem is not OPTIMAL anymore.
386  if (problem_status_ == ProblemStatus::OPTIMAL) {
387  const Fractional solution_tolerance =
388  parameters_.solution_feasibility_tolerance();
389  const Fractional primal_residual =
390  variable_values_.ComputeMaximumPrimalResidual();
391  const Fractional dual_residual =
392  reduced_costs_.ComputeMaximumDualResidual();
393  if (primal_residual > solution_tolerance ||
394  dual_residual > solution_tolerance) {
395  if (log_info) {
396  LOG(INFO) << "OPTIMAL was reported, yet one of the residuals is "
397  "above the solution feasibility tolerance after the "
398  "shift/perturbation are removed.";
399  }
400  if (parameters_.change_status_to_imprecise()) {
401  problem_status_ = ProblemStatus::IMPRECISE;
402  }
403  } else {
404  // We use the "precise" tolerances here to try to report the best
405  // possible solution. Note however that we cannot really hope for an
406  // infeasibility lower than its corresponding residual error. Note that
407  // we already adapt the tolerance like this during the simplex
408  // execution.
409  const Fractional primal_tolerance = std::max(
410  primal_residual, parameters_.primal_feasibility_tolerance());
411  const Fractional dual_tolerance =
412  std::max(dual_residual, parameters_.dual_feasibility_tolerance());
413  const Fractional primal_infeasibility =
414  variable_values_.ComputeMaximumPrimalInfeasibility();
415  const Fractional dual_infeasibility =
416  reduced_costs_.ComputeMaximumDualInfeasibility();
417  if (primal_infeasibility > primal_tolerance &&
418  dual_infeasibility > dual_tolerance) {
419  if (log_info) {
420  LOG(INFO) << "OPTIMAL was reported, yet both of the infeasibility "
421  "are above the tolerance after the "
422  "shift/perturbation are removed.";
423  }
424  if (parameters_.change_status_to_imprecise()) {
425  problem_status_ = ProblemStatus::IMPRECISE;
426  }
427  } else if (primal_infeasibility > primal_tolerance) {
428  if (num_optims == parameters_.max_number_of_reoptimizations()) {
429  if (log_info) {
430  LOG(INFO) << "The primal infeasibility is still higher than the "
431  "requested internal tolerance, but the maximum "
432  "number of optimization is reached.";
433  }
434  break;
435  }
436  if (log_info) LOG(INFO) << "Re-optimizing with dual simplex ... ";
437  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
438  } else if (dual_infeasibility > dual_tolerance) {
439  if (num_optims == parameters_.max_number_of_reoptimizations()) {
440  if (log_info) {
441  LOG(INFO) << "The dual infeasibility is still higher than the "
442  "requested internal tolerance, but the maximum "
443  "number of optimization is reached.";
444  }
445  break;
446  }
447  if (log_info) LOG(INFO) << "Re-optimizing with primal simplex ... ";
448  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
449  }
450  }
451  }
452  }
453 
454  // Check that the return status is "precise".
455  //
456  // TODO(user): we currently skip the DUAL_INFEASIBLE status because the
457  // quantities are not up to date in this case.
458  if (parameters_.change_status_to_imprecise() &&
459  problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
460  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
461  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
462  reduced_costs_.ComputeMaximumDualResidual() > tolerance) {
463  problem_status_ = ProblemStatus::IMPRECISE;
464  } else if (problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
465  problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
466  problem_status_ == ProblemStatus::PRIMAL_INFEASIBLE) {
467  if (reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
468  problem_status_ = ProblemStatus::IMPRECISE;
469  }
470  } else if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
471  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED ||
472  problem_status_ == ProblemStatus::DUAL_INFEASIBLE) {
473  if (variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
474  problem_status_ = ProblemStatus::IMPRECISE;
475  }
476  }
477  }
478 
479  total_time_ = time_limit->GetElapsedTime() - start_time;
480  optimization_time_ = total_time_ - feasibility_time_;
481  num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
482 
483  // If the user didn't provide starting variable values, then there is no need
484  // to check for super-basic variables.
485  if (!variable_starting_values_.empty()) {
486  const int num_super_basic = ComputeNumberOfSuperBasicVariables();
487  if (num_super_basic > 0) {
488  if (log_info) {
489  LOG(INFO) << "Num super-basic variables left after optimize phase: "
490  << num_super_basic;
491  }
492  if (parameters_.push_to_vertex()) {
493  if (problem_status_ == ProblemStatus::OPTIMAL) {
494  if (log_info) LOG(INFO) << "------ Third phase: push.";
495  phase_ = Phase::PUSH;
496  GLOP_RETURN_IF_ERROR(PrimalPush(time_limit));
497  // TODO(user): We should re-check for feasibility at this point and
498  // apply clean-up as needed.
499  } else if (log_info) {
500  LOG(INFO) << "Skipping push phase because optimize didn't succeed.";
501  }
502  }
503  }
504  }
505 
506  total_time_ = time_limit->GetElapsedTime() - start_time;
507  push_time_ = total_time_ - feasibility_time_ - optimization_time_;
508  num_push_iterations_ = num_iterations_ - num_feasibility_iterations_ -
509  num_optimization_iterations_;
510 
511  // Store the result for the solution getters.
512  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
513  solution_dual_values_ = reduced_costs_.GetDualValues();
514  solution_reduced_costs_ = reduced_costs_.GetReducedCosts();
515  SaveState();
516 
517  if (lp.IsMaximizationProblem()) {
518  ChangeSign(&solution_dual_values_);
519  ChangeSign(&solution_reduced_costs_);
520  }
521 
522  // If the problem is unbounded, set the objective value to +/- infinity.
523  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
524  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
525  solution_objective_value_ =
526  (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity
527  : -kInfinity;
528  if (lp.IsMaximizationProblem()) {
529  solution_objective_value_ = -solution_objective_value_;
530  }
531  }
532 
533  variable_starting_values_.clear();
534  DisplayAllStats();
535  return Status::OK();
536 }
537 
539  return problem_status_;
540 }
541 
543  return solution_objective_value_;
544 }
545 
547  return num_iterations_;
548 }
549 
550 RowIndex RevisedSimplex::GetProblemNumRows() const { return num_rows_; }
551 
552 ColIndex RevisedSimplex::GetProblemNumCols() const { return num_cols_; }
553 
555  return variable_values_.Get(col);
556 }
557 
559  return solution_reduced_costs_[col];
560 }
561 
563  return solution_reduced_costs_;
564 }
565 
567  return solution_dual_values_[row];
568 }
569 
571  return variables_info_.GetStatusRow()[col];
572 }
573 
574 const BasisState& RevisedSimplex::GetState() const { return solution_state_; }
575 
577  // Note the negative sign since the slack variable is such that
578  // constraint_activity + slack_value = 0.
579  return -variable_values_.Get(SlackColIndex(row));
580 }
581 
583  // The status of the given constraint is the same as the status of the
584  // associated slack variable with a change of sign.
585  const VariableStatus s = variables_info_.GetStatusRow()[SlackColIndex(row)];
588  }
591  }
592  return VariableToConstraintStatus(s);
593 }
594 
596  DCHECK_EQ(problem_status_, ProblemStatus::PRIMAL_UNBOUNDED);
597  return solution_primal_ray_;
598 }
600  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
601  return solution_dual_ray_;
602 }
603 
605  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
606  return solution_dual_ray_row_combination_;
607 }
608 
609 ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; }
610 
612  DCHECK(basis_factorization_.GetColumnPermutation().empty());
613  return basis_factorization_;
614 }
615 
616 std::string RevisedSimplex::GetPrettySolverStats() const {
617  return absl::StrFormat(
618  "Problem status : %s\n"
619  "Solving time : %-6.4g\n"
620  "Number of iterations : %u\n"
621  "Time for solvability (first phase) : %-6.4g\n"
622  "Number of iterations for solvability : %u\n"
623  "Time for optimization : %-6.4g\n"
624  "Number of iterations for optimization : %u\n"
625  "Stop after first basis : %d\n",
626  GetProblemStatusString(problem_status_), total_time_, num_iterations_,
627  feasibility_time_, num_feasibility_iterations_, optimization_time_,
628  num_optimization_iterations_,
629  absl::GetFlag(FLAGS_simplex_stop_after_first_basis));
630 }
631 
633  // TODO(user): Count what is missing.
634  return DeterministicTimeForFpOperations(num_update_price_operations_) +
635  basis_factorization_.DeterministicTime() +
636  update_row_.DeterministicTime() +
637  entering_variable_.DeterministicTime() +
638  reduced_costs_.DeterministicTime() +
639  primal_edge_norms_.DeterministicTime();
640 }
641 
642 void RevisedSimplex::SetVariableNames() {
643  variable_name_.resize(num_cols_, "");
644  for (ColIndex col(0); col < first_slack_col_; ++col) {
645  const ColIndex var_index = col + 1;
646  variable_name_[col] = absl::StrFormat("x%d", ColToIntIndex(var_index));
647  }
648  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
649  const ColIndex var_index = col - first_slack_col_ + 1;
650  variable_name_[col] = absl::StrFormat("s%d", ColToIntIndex(var_index));
651  }
652 }
653 
654 void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue(
655  ColIndex col, VariableStatus status) {
656  variables_info_.UpdateToNonBasicStatus(col, status);
657  variable_values_.SetNonBasicVariableValueFromStatus(col);
658 }
659 
660 bool RevisedSimplex::BasisIsConsistent() const {
661  const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
662  const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
663  for (RowIndex row(0); row < num_rows_; ++row) {
664  const ColIndex col = basis_[row];
665  if (!is_basic.IsSet(col)) return false;
666  if (variable_statuses[col] != VariableStatus::BASIC) return false;
667  }
668  ColIndex cols_in_basis(0);
669  ColIndex cols_not_in_basis(0);
670  for (ColIndex col(0); col < num_cols_; ++col) {
671  cols_in_basis += is_basic.IsSet(col);
672  cols_not_in_basis += !is_basic.IsSet(col);
673  if (is_basic.IsSet(col) !=
674  (variable_statuses[col] == VariableStatus::BASIC)) {
675  return false;
676  }
677  }
678  if (cols_in_basis != RowToColIndex(num_rows_)) return false;
679  if (cols_not_in_basis != num_cols_ - RowToColIndex(num_rows_)) return false;
680  return true;
681 }
682 
683 // Note(user): The basis factorization is not updated by this function but by
684 // UpdateAndPivot().
685 void RevisedSimplex::UpdateBasis(ColIndex entering_col, RowIndex basis_row,
686  VariableStatus leaving_variable_status) {
687  SCOPED_TIME_STAT(&function_stats_);
688  DCHECK_COL_BOUNDS(entering_col);
689  DCHECK_ROW_BOUNDS(basis_row);
690 
691  // Check that this is not called with an entering_col already in the basis
692  // and that the leaving col is indeed in the basis.
693  DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
694  DCHECK_NE(basis_[basis_row], entering_col);
695  DCHECK_NE(basis_[basis_row], kInvalidCol);
696 
697  const ColIndex leaving_col = basis_[basis_row];
698  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
699 
700  // Make leaving_col leave the basis and update relevant data.
701  // Note thate the leaving variable value is not necessarily at its exact
702  // bound, which is like a bound shift.
703  variables_info_.UpdateToNonBasicStatus(leaving_col, leaving_variable_status);
704  DCHECK(leaving_variable_status == VariableStatus::AT_UPPER_BOUND ||
705  leaving_variable_status == VariableStatus::AT_LOWER_BOUND ||
706  leaving_variable_status == VariableStatus::FIXED_VALUE);
707 
708  basis_[basis_row] = entering_col;
709  variables_info_.UpdateToBasicStatus(entering_col);
710  update_row_.Invalidate();
711 }
712 
713 namespace {
714 
715 // Comparator used to sort column indices according to a given value vector.
716 class ColumnComparator {
717  public:
718  explicit ColumnComparator(const DenseRow& value) : value_(value) {}
719  bool operator()(ColIndex col_a, ColIndex col_b) const {
720  return value_[col_a] < value_[col_b];
721  }
722 
723  private:
724  const DenseRow& value_;
725 };
726 
727 } // namespace
728 
729 // To understand better what is going on in this function, let us say that this
730 // algorithm will produce the optimal solution to a problem containing only
731 // singleton columns (provided that the variables start at the minimum possible
732 // cost, see DefaultVariableStatus()). This is unit tested.
733 //
734 // The error_ must be equal to the constraint activity for the current variable
735 // values before this function is called. If error_[row] is 0.0, that mean this
736 // constraint is currently feasible.
737 void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) {
738  SCOPED_TIME_STAT(&function_stats_);
739  // Computes the singleton columns and the cost variation of the corresponding
740  // variables (in the only possible direction, i.e away from its current bound)
741  // for a unit change in the infeasibility of the corresponding row.
742  //
743  // Note that the slack columns will be treated as normal singleton columns.
744  std::vector<ColIndex> singleton_column;
745  DenseRow cost_variation(num_cols_, 0.0);
746  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
747  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
748  for (ColIndex col(0); col < num_cols_; ++col) {
749  if (compact_matrix_.column(col).num_entries() != 1) continue;
750  if (lower_bounds[col] == upper_bounds[col]) continue;
751  const Fractional slope = compact_matrix_.column(col).GetFirstCoefficient();
752  if (variable_values_.Get(col) == lower_bounds[col]) {
753  cost_variation[col] = objective_[col] / std::abs(slope);
754  } else {
755  cost_variation[col] = -objective_[col] / std::abs(slope);
756  }
757  singleton_column.push_back(col);
758  }
759  if (singleton_column.empty()) return;
760 
761  // Sort the singleton columns for the case where many of them correspond to
762  // the same row (equivalent to a piecewise-linear objective on this variable).
763  // Negative cost_variation first since moving the singleton variable away from
764  // its current bound means the least decrease in the objective function for
765  // the same "error" variation.
766  ColumnComparator comparator(cost_variation);
767  std::sort(singleton_column.begin(), singleton_column.end(), comparator);
768  DCHECK_LE(cost_variation[singleton_column.front()],
769  cost_variation[singleton_column.back()]);
770 
771  // Use a singleton column to "absorb" the error when possible to avoid
772  // introducing unneeded artificial variables. Note that with scaling on, the
773  // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to
774  // them) and that the SingletonColumnSignPreprocessor makes them all positive.
775  // However, this code works for any coefficient value.
776  const DenseRow& variable_values = variable_values_.GetDenseRow();
777  for (const ColIndex col : singleton_column) {
778  const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0));
779 
780  // If no singleton columns have entered the basis for this row, choose the
781  // first one. It will be the one with the least decrease in the objective
782  // function when it leaves the basis.
783  if ((*basis)[row] == kInvalidCol) {
784  (*basis)[row] = col;
785  }
786 
787  // If there is already no error in this row (i.e. it is primal-feasible),
788  // there is nothing to do.
789  if (error_[row] == 0.0) continue;
790 
791  // In this case, all the infeasibility can be "absorbed" and this variable
792  // may not be at one of its bound anymore, so we have to use it in the
793  // basis.
794  const Fractional coeff =
795  compact_matrix_.column(col).EntryCoefficient(EntryIndex(0));
796  const Fractional new_value = variable_values[col] + error_[row] / coeff;
797  if (new_value >= lower_bounds[col] && new_value <= upper_bounds[col]) {
798  error_[row] = 0.0;
799 
800  // Use this variable in the initial basis.
801  (*basis)[row] = col;
802  continue;
803  }
804 
805  // The idea here is that if the singleton column cannot be used to "absorb"
806  // all error_[row], if it is boxed, it can still be used to make the
807  // infeasibility smaller (with a bound flip).
808  const Fractional box_width = variables_info_.GetBoundDifference(col);
809  DCHECK_NE(box_width, 0.0);
810  DCHECK_NE(error_[row], 0.0);
811  const Fractional error_sign = error_[row] / coeff;
812  if (variable_values[col] == lower_bounds[col] && error_sign > 0.0) {
813  DCHECK(IsFinite(box_width));
814  error_[row] -= coeff * box_width;
815  SetNonBasicVariableStatusAndDeriveValue(col,
817  continue;
818  }
819  if (variable_values[col] == upper_bounds[col] && error_sign < 0.0) {
820  DCHECK(IsFinite(box_width));
821  error_[row] += coeff * box_width;
822  SetNonBasicVariableStatusAndDeriveValue(col,
824  continue;
825  }
826  }
827 }
828 
829 bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged(
830  const LinearProgram& lp, bool lp_is_in_equation_form,
831  bool* only_change_is_new_rows, bool* only_change_is_new_cols,
832  ColIndex* num_new_cols) {
833  SCOPED_TIME_STAT(&function_stats_);
834  DCHECK(only_change_is_new_rows != nullptr);
835  DCHECK(only_change_is_new_cols != nullptr);
836  DCHECK(num_new_cols != nullptr);
837  DCHECK_EQ(num_cols_, compact_matrix_.num_cols());
838  DCHECK_EQ(num_rows_, compact_matrix_.num_rows());
839 
840  // This works whether the lp is in equation form (with slack) or not.
841  const bool old_part_of_matrix_is_unchanged =
843  num_rows_, first_slack_col_, lp.GetSparseMatrix(), compact_matrix_);
844 
845  // This is the only adaptation we need for the test below.
846  const ColIndex lp_first_slack =
847  lp_is_in_equation_form ? lp.GetFirstSlackVariable() : lp.num_variables();
848 
849  // Test if the matrix is unchanged, and if yes, just returns true. Note that
850  // this doesn't check the columns corresponding to the slack variables,
851  // because they were checked by lp.IsInEquationForm() when Solve() was called.
852  if (old_part_of_matrix_is_unchanged && lp.num_constraints() == num_rows_ &&
853  lp_first_slack == first_slack_col_) {
854  return true;
855  }
856 
857  // Check if the new matrix can be derived from the old one just by adding
858  // new rows (i.e new constraints).
859  *only_change_is_new_rows = old_part_of_matrix_is_unchanged &&
860  lp.num_constraints() > num_rows_ &&
861  lp_first_slack == first_slack_col_;
862 
863  // Check if the new matrix can be derived from the old one just by adding
864  // new columns (i.e new variables).
865  *only_change_is_new_cols = old_part_of_matrix_is_unchanged &&
866  lp.num_constraints() == num_rows_ &&
867  lp_first_slack > first_slack_col_;
868  *num_new_cols = *only_change_is_new_cols ? lp_first_slack - first_slack_col_
869  : ColIndex(0);
870 
871  // Initialize first_slack_.
872  first_slack_col_ = lp_first_slack;
873 
874  // Initialize the new dimensions.
875  num_rows_ = lp.num_constraints();
876  num_cols_ = lp_first_slack + RowToColIndex(lp.num_constraints());
877 
878  // Populate compact_matrix_ and transposed_matrix_ if needed. Note that we
879  // already added all the slack variables at this point, so matrix_ will not
880  // change anymore.
881  if (lp_is_in_equation_form) {
882  // TODO(user): This can be sped up by removing the MatrixView, but then
883  // this path will likely go away.
884  compact_matrix_.PopulateFromMatrixView(MatrixView(lp.GetSparseMatrix()));
885  } else {
886  compact_matrix_.PopulateFromSparseMatrixAndAddSlacks(lp.GetSparseMatrix());
887  }
888  if (parameters_.use_transposed_matrix()) {
889  transposed_matrix_.PopulateFromTranspose(compact_matrix_);
890  }
891  return false;
892 }
893 
894 // Preconditions: This should only be called if there are only new variable
895 // in the lp.
896 bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
897  const LinearProgram& lp, bool lp_is_in_equation_form,
898  ColIndex num_new_cols) {
899  SCOPED_TIME_STAT(&function_stats_);
900  DCHECK_LE(num_new_cols, first_slack_col_);
901  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
902 
903  // Check the original variable bounds.
904  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
905  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
906  for (ColIndex col(0); col < first_new_col; ++col) {
907  if (lower_bounds[col] != lp.variable_lower_bounds()[col] ||
908  upper_bounds[col] != lp.variable_upper_bounds()[col]) {
909  return false;
910  }
911  }
912 
913  // Check that each new variable has a bound of zero.
914  for (ColIndex col(first_new_col); col < first_slack_col_; ++col) {
915  if (lp.variable_lower_bounds()[col] != 0.0 &&
916  lp.variable_upper_bounds()[col] != 0.0) {
917  return false;
918  }
919  }
920 
921  // Check that the slack bounds are unchanged.
922  if (lp_is_in_equation_form) {
923  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
924  if (lower_bounds[col - num_new_cols] != lp.variable_lower_bounds()[col] ||
925  upper_bounds[col - num_new_cols] != lp.variable_upper_bounds()[col]) {
926  return false;
927  }
928  }
929  } else {
930  DCHECK_EQ(num_rows_, lp.num_constraints());
931  for (RowIndex row(0); row < num_rows_; ++row) {
932  const ColIndex col = first_slack_col_ + RowToColIndex(row);
933  if (lower_bounds[col - num_new_cols] !=
934  -lp.constraint_upper_bounds()[row] ||
935  upper_bounds[col - num_new_cols] !=
936  -lp.constraint_lower_bounds()[row]) {
937  return false;
938  }
939  }
940  }
941  return true;
942 }
943 
944 bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
945  const LinearProgram& lp) {
946  SCOPED_TIME_STAT(&function_stats_);
947 
948  bool objective_is_unchanged = true;
949  objective_.resize(num_cols_, 0.0);
950 
951  // This function work whether the lp is in equation form (with slack) or
952  // without, since the objective of the slacks are always zero.
953  DCHECK_GE(num_cols_, lp.num_variables());
954  for (ColIndex col(lp.num_variables()); col < num_cols_; ++col) {
955  if (objective_[col] != 0.0) {
956  objective_is_unchanged = false;
957  objective_[col] = 0.0;
958  }
959  }
960 
961  if (lp.IsMaximizationProblem()) {
962  // Note that we use the minimization version of the objective internally.
963  for (ColIndex col(0); col < lp.num_variables(); ++col) {
964  const Fractional coeff = -lp.objective_coefficients()[col];
965  if (objective_[col] != coeff) {
966  objective_is_unchanged = false;
967  objective_[col] = coeff;
968  }
969  }
970  objective_offset_ = -lp.objective_offset();
971  objective_scaling_factor_ = -lp.objective_scaling_factor();
972  } else {
973  for (ColIndex col(0); col < lp.num_variables(); ++col) {
974  const Fractional coeff = lp.objective_coefficients()[col];
975  if (objective_[col] != coeff) {
976  objective_is_unchanged = false;
977  objective_[col] = coeff;
978  }
979  }
980  objective_offset_ = lp.objective_offset();
981  objective_scaling_factor_ = lp.objective_scaling_factor();
982  }
983 
984  return objective_is_unchanged;
985 }
986 
987 void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) {
988  objective_limit_reached_ = false;
989  DCHECK(std::isfinite(objective_offset_));
990  DCHECK(std::isfinite(objective_scaling_factor_));
991  DCHECK_NE(0.0, objective_scaling_factor_);
992 
993  // This sets dual_objective_limit_ and then primal_objective_limit_.
994  for (const bool set_dual : {true, false}) {
995  // NOTE(user): If objective_scaling_factor_ is negative, the optimization
996  // direction was reversed (during preprocessing or inside revised simplex),
997  // i.e., the original problem is maximization. In such case the _meaning_ of
998  // the lower and upper limits is swapped. To this end we must change the
999  // signs of limits, which happens automatically when calculating shifted
1000  // limits. We must also use upper (resp. lower) limit in place of lower
1001  // (resp. upper) limit when calculating the final objective_limit_.
1002  //
1003  // Choose lower limit if using the dual simplex and scaling factor is
1004  // negative or if using the primal simplex and scaling is nonnegative, upper
1005  // limit otherwise.
1006  const Fractional limit = (objective_scaling_factor_ >= 0.0) != set_dual
1007  ? parameters_.objective_lower_limit()
1008  : parameters_.objective_upper_limit();
1009  const Fractional shifted_limit =
1010  limit / objective_scaling_factor_ - objective_offset_;
1011  if (set_dual) {
1012  dual_objective_limit_ = shifted_limit;
1013  } else {
1014  primal_objective_limit_ = shifted_limit;
1015  }
1016  }
1017 }
1018 
1019 // This implementation starts with an initial matrix B equal to the identity
1020 // matrix (modulo a column permutation). For that it uses either the slack
1021 // variables or the singleton columns present in the problem. Afterwards, the
1022 // fixed slacks in the basis are exchanged with normal columns of A if possible
1023 // by the InitialBasis class.
1024 Status RevisedSimplex::CreateInitialBasis() {
1025  SCOPED_TIME_STAT(&function_stats_);
1026 
1027  // Initialize the variable values and statuses.
1028  // Note that for the dual algorithm, boxed variables will be made
1029  // dual-feasible later by MakeBoxedVariableDualFeasible(), so it doesn't
1030  // really matter at which of their two finite bounds they start.
1031  variables_info_.InitializeToDefaultStatus();
1032  variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1033 
1034  // Start by using an all-slack basis.
1035  RowToColMapping basis(num_rows_, kInvalidCol);
1036  for (RowIndex row(0); row < num_rows_; ++row) {
1037  basis[row] = SlackColIndex(row);
1038  }
1039 
1040  // If possible, for the primal simplex we replace some slack variables with
1041  // some singleton columns present in the problem.
1042  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1043  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1044  if (!parameters_.use_dual_simplex() &&
1045  parameters_.initial_basis() != GlopParameters::MAROS &&
1047  // For UseSingletonColumnInInitialBasis() to work better, we change
1048  // the value of the boxed singleton column with a non-zero cost to the best
1049  // of their two bounds.
1050  for (ColIndex col(0); col < num_cols_; ++col) {
1051  if (compact_matrix_.column(col).num_entries() != 1) continue;
1052  const VariableStatus status = variables_info_.GetStatusRow()[col];
1053  const Fractional objective = objective_[col];
1054  if (objective > 0 && IsFinite(lower_bounds[col]) &&
1055  status == VariableStatus::AT_UPPER_BOUND) {
1056  SetNonBasicVariableStatusAndDeriveValue(col,
1058  } else if (objective < 0 && IsFinite(upper_bounds[col]) &&
1059  status == VariableStatus::AT_LOWER_BOUND) {
1060  SetNonBasicVariableStatusAndDeriveValue(col,
1062  }
1063  }
1064 
1065  // Compute the primal infeasibility of the initial variable values in
1066  // error_.
1067  ComputeVariableValuesError();
1068 
1069  // TODO(user): A better but slightly more complex algorithm would be to:
1070  // - Ignore all singleton columns except the slacks during phase I.
1071  // - For this, change the slack variable bounds accordingly.
1072  // - At the end of phase I, restore the slack variable bounds and perform
1073  // the same algorithm to start with feasible and "optimal" values of the
1074  // singleton columns.
1075  basis.assign(num_rows_, kInvalidCol);
1076  UseSingletonColumnInInitialBasis(&basis);
1077 
1078  // Eventually complete the basis with fixed slack columns.
1079  for (RowIndex row(0); row < num_rows_; ++row) {
1080  if (basis[row] == kInvalidCol) {
1081  basis[row] = SlackColIndex(row);
1082  }
1083  }
1084  }
1085 
1086  // Use an advanced initial basis to remove the fixed variables from the basis.
1087  if (parameters_.initial_basis() == GlopParameters::NONE) {
1088  return InitializeFirstBasis(basis);
1089  }
1090  if (parameters_.initial_basis() == GlopParameters::MAROS) {
1091  InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1092  upper_bounds, variables_info_.GetTypeRow());
1093  if (parameters_.use_dual_simplex()) {
1094  // This dual version only uses zero-cost columns to complete the
1095  // basis.
1096  initial_basis.GetDualMarosBasis(num_cols_, &basis);
1097  } else {
1098  initial_basis.GetPrimalMarosBasis(num_cols_, &basis);
1099  }
1100  int number_changed = 0;
1101  for (RowIndex row(0); row < num_rows_; ++row) {
1102  if (basis[row] != SlackColIndex(row)) {
1103  number_changed++;
1104  }
1105  }
1106  VLOG(1) << "Number of Maros basis changes: " << number_changed;
1107  } else if (parameters_.initial_basis() == GlopParameters::BIXBY ||
1108  parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1109  // First unassign the fixed variables from basis.
1110  int num_fixed_variables = 0;
1111  for (RowIndex row(0); row < basis.size(); ++row) {
1112  const ColIndex col = basis[row];
1113  if (lower_bounds[col] == upper_bounds[col]) {
1114  basis[row] = kInvalidCol;
1115  ++num_fixed_variables;
1116  }
1117  }
1118 
1119  if (num_fixed_variables == 0) {
1120  VLOG(1) << "Crash is set to " << parameters_.initial_basis()
1121  << " but there is no equality rows to remove from initial all "
1122  "slack basis.";
1123  } else {
1124  // Then complete the basis with an advanced initial basis algorithm.
1125  VLOG(1) << "Trying to remove " << num_fixed_variables
1126  << " fixed variables from the initial basis.";
1127  InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1128  upper_bounds, variables_info_.GetTypeRow());
1129 
1130  if (parameters_.initial_basis() == GlopParameters::BIXBY) {
1131  if (parameters_.use_scaling()) {
1132  initial_basis.CompleteBixbyBasis(first_slack_col_, &basis);
1133  } else {
1134  VLOG(1) << "Bixby initial basis algorithm requires the problem "
1135  << "to be scaled. Skipping Bixby's algorithm.";
1136  }
1137  } else if (parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1138  // Note the use of num_cols_ here because this algorithm
1139  // benefits from treating fixed slack columns like any other column.
1140  if (parameters_.use_dual_simplex()) {
1141  // This dual version only uses zero-cost columns to complete the
1142  // basis.
1143  initial_basis.CompleteTriangularDualBasis(num_cols_, &basis);
1144  } else {
1145  initial_basis.CompleteTriangularPrimalBasis(num_cols_, &basis);
1146  }
1147 
1148  const Status status = InitializeFirstBasis(basis);
1149  if (status.ok()) {
1150  return status;
1151  } else {
1152  VLOG(1) << "Reverting to all slack basis.";
1153 
1154  for (RowIndex row(0); row < num_rows_; ++row) {
1155  basis[row] = SlackColIndex(row);
1156  }
1157  }
1158  }
1159  }
1160  } else {
1161  LOG(WARNING) << "Unsupported initial_basis parameters: "
1162  << parameters_.initial_basis();
1163  }
1164 
1165  return InitializeFirstBasis(basis);
1166 }
1167 
1168 Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) {
1169  basis_ = basis;
1170 
1171  // For each row which does not have a basic column, assign it to the
1172  // corresponding slack column.
1173  basis_.resize(num_rows_, kInvalidCol);
1174  for (RowIndex row(0); row < num_rows_; ++row) {
1175  if (basis_[row] == kInvalidCol) {
1176  basis_[row] = SlackColIndex(row);
1177  }
1178  }
1179 
1180  GLOP_RETURN_IF_ERROR(basis_factorization_.Initialize());
1181  PermuteBasis();
1182 
1183  // Test that the upper bound on the condition number of basis is not too high.
1184  // The number was not computed by any rigorous analysis, we just prefer to
1185  // revert to the all slack basis if the condition number of our heuristic
1186  // first basis seems bad. See for instance on cond11.mps, where we get an
1187  // infinity upper bound.
1188  const Fractional condition_number_ub =
1189  basis_factorization_.ComputeInfinityNormConditionNumberUpperBound();
1190  if (condition_number_ub > parameters_.initial_condition_number_threshold()) {
1191  const std::string error_message =
1192  absl::StrCat("The matrix condition number upper bound is too high: ",
1193  condition_number_ub);
1194  VLOG(1) << error_message;
1195  return Status(Status::ERROR_LU, error_message);
1196  }
1197 
1198  // Everything is okay, finish the initialization.
1199  for (RowIndex row(0); row < num_rows_; ++row) {
1200  variables_info_.UpdateToBasicStatus(basis_[row]);
1201  }
1202  DCHECK(BasisIsConsistent());
1203 
1204  variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1205  variable_values_.RecomputeBasicVariableValues();
1206 
1207  if (VLOG_IS_ON(1)) {
1208  // TODO(user): Maybe return an error status if this is too high. Note
1209  // however that if we want to do that, we need to reset variables_info_ to a
1210  // consistent state.
1211  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1212  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) {
1213  VLOG(1) << absl::StrCat(
1214  "The primal residual of the initial basis is above the tolerance, ",
1215  variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance);
1216  }
1217  }
1218  return Status::OK();
1219 }
1220 
1221 Status RevisedSimplex::Initialize(const LinearProgram& lp) {
1222  parameters_ = initial_parameters_;
1223  PropagateParameters();
1224 
1225  // We accept both kind of input.
1226  //
1227  // TODO(user): Ideally there should be no need to ever put the slack in the
1228  // LinearProgram. That take extra memory (one big SparseColumn per slack) and
1229  // just add visible overhead in incremental solve when one wants to add/remove
1230  // constraints. But for historical reason, we handle both for now.
1231  const bool lp_is_in_equation_form = lp.IsInEquationForm();
1232 
1233  // Calling InitializeMatrixAndTestIfUnchanged() first is important because
1234  // this is where num_rows_ and num_cols_ are computed.
1235  //
1236  // Note that these functions can't depend on use_dual_simplex() since we may
1237  // change it below.
1238  ColIndex num_new_cols(0);
1239  bool only_change_is_new_rows = false;
1240  bool only_change_is_new_cols = false;
1241  bool matrix_is_unchanged = true;
1242  bool only_new_bounds = false;
1243  if (solution_state_.IsEmpty() || !notify_that_matrix_is_unchanged_) {
1244  matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged(
1245  lp, lp_is_in_equation_form, &only_change_is_new_rows,
1246  &only_change_is_new_cols, &num_new_cols);
1247  only_new_bounds = only_change_is_new_cols && num_new_cols > 0 &&
1248  OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1249  lp, lp_is_in_equation_form, num_new_cols);
1250  } else if (DEBUG_MODE) {
1251  CHECK(InitializeMatrixAndTestIfUnchanged(
1252  lp, lp_is_in_equation_form, &only_change_is_new_rows,
1253  &only_change_is_new_cols, &num_new_cols));
1254  }
1255  notify_that_matrix_is_unchanged_ = false;
1256 
1257  // TODO(user): move objective with ReducedCosts class.
1258  const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp);
1259 
1260  const bool bounds_are_unchanged =
1261  lp_is_in_equation_form
1262  ? variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1263  lp.variable_lower_bounds(), lp.variable_upper_bounds())
1264  : variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1265  lp.variable_lower_bounds(), lp.variable_upper_bounds(),
1266  lp.constraint_lower_bounds(), lp.constraint_upper_bounds());
1267 
1268  // If parameters_.allow_simplex_algorithm_change() is true and we already have
1269  // a primal (resp. dual) feasible solution, then we use the primal (resp.
1270  // dual) algorithm since there is a good chance that it will be faster.
1271  if (matrix_is_unchanged && parameters_.allow_simplex_algorithm_change()) {
1272  if (objective_is_unchanged && !bounds_are_unchanged) {
1273  parameters_.set_use_dual_simplex(true);
1274  PropagateParameters();
1275  }
1276  if (bounds_are_unchanged && !objective_is_unchanged) {
1277  parameters_.set_use_dual_simplex(false);
1278  PropagateParameters();
1279  }
1280  }
1281 
1282  InitializeObjectiveLimit(lp);
1283 
1284  // Computes the variable name as soon as possible for logging.
1285  // TODO(user): do we really need to store them? we could just compute them
1286  // on the fly since we do not need the speed.
1287  if (VLOG_IS_ON(1)) {
1288  SetVariableNames();
1289  }
1290 
1291  // Warm-start? This is supported only if the solution_state_ is non empty,
1292  // i.e., this revised simplex i) was already used to solve a problem, or
1293  // ii) the solution state was provided externally. Note that the
1294  // solution_state_ may have nothing to do with the current problem, e.g.,
1295  // objective, matrix, and/or bounds had changed. So we support several
1296  // scenarios of warm-start depending on how did the problem change and which
1297  // simplex algorithm is used (primal or dual).
1298  bool solve_from_scratch = true;
1299 
1300  // Try to perform a "quick" warm-start with no matrix factorization involved.
1301  if (!solution_state_.IsEmpty() && !solution_state_has_been_set_externally_) {
1302  if (!parameters_.use_dual_simplex()) {
1303  // With primal simplex, always clear dual norms and dual pricing.
1304  // Incrementality is supported only if only change to the matrix and
1305  // bounds is adding new columns (objective may change), and that all
1306  // new columns have a bound equal to zero.
1307  dual_edge_norms_.Clear();
1308  dual_pricing_vector_.clear();
1309  if (matrix_is_unchanged && bounds_are_unchanged) {
1310  // TODO(user): Do not do that if objective_is_unchanged. Currently
1311  // this seems to break something. Investigate.
1312  reduced_costs_.ClearAndRemoveCostShifts();
1313  solve_from_scratch = false;
1314  } else if (only_change_is_new_cols && only_new_bounds) {
1315  variables_info_.InitializeFromBasisState(first_slack_col_, num_new_cols,
1316  solution_state_);
1317  variable_values_.ResetAllNonBasicVariableValues(
1318  variable_starting_values_);
1319 
1320  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1321  for (ColIndex& col_ref : basis_) {
1322  if (col_ref >= first_new_col) {
1323  col_ref += num_new_cols;
1324  }
1325  }
1326 
1327  // Make sure the primal edge norm are recomputed from scratch.
1328  // TODO(user): only the norms of the new columns actually need to be
1329  // computed.
1330  primal_edge_norms_.Clear();
1331  reduced_costs_.ClearAndRemoveCostShifts();
1332  solve_from_scratch = false;
1333  }
1334  } else {
1335  // With dual simplex, always clear primal norms. Incrementality is
1336  // supported only if the objective remains the same (the matrix may
1337  // contain new rows and the bounds may change).
1338  primal_edge_norms_.Clear();
1339  if (objective_is_unchanged) {
1340  if (matrix_is_unchanged) {
1341  if (!bounds_are_unchanged) {
1342  variables_info_.InitializeFromBasisState(
1343  first_slack_col_, ColIndex(0), solution_state_);
1344  variable_values_.ResetAllNonBasicVariableValues(
1345  variable_starting_values_);
1346  variable_values_.RecomputeBasicVariableValues();
1347  }
1348  solve_from_scratch = false;
1349  } else if (only_change_is_new_rows) {
1350  // For the dual-simplex, we also perform a warm start if a couple of
1351  // new rows where added.
1352  variables_info_.InitializeFromBasisState(
1353  first_slack_col_, ColIndex(0), solution_state_);
1354  dual_edge_norms_.ResizeOnNewRows(num_rows_);
1355 
1356  // TODO(user): The reduced costs do not really need to be recomputed.
1357  // We just need to initialize the ones of the new slack variables to
1358  // 0.
1359  reduced_costs_.ClearAndRemoveCostShifts();
1360  dual_pricing_vector_.clear();
1361 
1362  // Note that this needs to be done after the Clear() calls above.
1363  if (InitializeFirstBasis(basis_).ok()) {
1364  solve_from_scratch = false;
1365  }
1366  }
1367  }
1368  }
1369  }
1370 
1371  // If we couldn't perform a "quick" warm start above, we can at least try to
1372  // reuse the variable statuses.
1373  const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
1374  if (solve_from_scratch && !solution_state_.IsEmpty()) {
1375  basis_factorization_.Clear();
1376  reduced_costs_.ClearAndRemoveCostShifts();
1377  primal_edge_norms_.Clear();
1378  dual_edge_norms_.Clear();
1379  dual_pricing_vector_.clear();
1380 
1381  // If an external basis has been provided or if the matrix changed, we need
1382  // to perform more work, e.g., factorize the proposed basis and validate it.
1383  variables_info_.InitializeFromBasisState(first_slack_col_, ColIndex(0),
1384  solution_state_);
1385 
1386  // Use the set of basic columns as a "hint" to construct the first basis.
1387  std::vector<ColIndex> candidates;
1388  for (const ColIndex col : variables_info_.GetIsBasicBitRow()) {
1389  candidates.push_back(col);
1390  }
1391  if (log_info) {
1392  LOG(INFO) << "The warm-start state contains " << candidates.size()
1393  << " candidates for the basis (num_rows = " << num_rows_
1394  << ").";
1395  }
1396 
1397  // Optimization: Try to factorize it right away if we have the correct
1398  // number of element. Ideally the other path below would no require a
1399  // "double" factorization effort, so this would not be needed.
1400  if (candidates.size() == num_rows_) {
1401  basis_.clear();
1402  for (const ColIndex col : candidates) {
1403  basis_.push_back(col);
1404  }
1405 
1406  // TODO(user): Depending on the error here, there is no point doing extra
1407  // work below. This is the case when we fail because of a bad initial
1408  // condition number for instance.
1409  if (InitializeFirstBasis(basis_).ok()) {
1410  solve_from_scratch = false;
1411  }
1412  }
1413 
1414  if (solve_from_scratch) {
1415  basis_ = basis_factorization_.ComputeInitialBasis(candidates);
1416  const int num_super_basic =
1417  variables_info_.ChangeUnusedBasicVariablesToFree(basis_);
1418  const int num_snapped = variables_info_.SnapFreeVariablesToBound(
1419  parameters_.crossover_bound_snapping_distance(),
1420  variable_starting_values_);
1421  if (log_info) {
1422  LOG(INFO) << "The initial basis did not use " << num_super_basic
1423  << " BASIC columns from the initial state and used "
1424  << (num_rows_ - (candidates.size() - num_super_basic))
1425  << " slack variables that were not marked BASIC.";
1426  if (num_snapped > 0) {
1427  LOG(INFO) << num_snapped
1428  << " of the FREE variables where moved to their bound.";
1429  }
1430  }
1431 
1432  if (InitializeFirstBasis(basis_).ok()) {
1433  solve_from_scratch = false;
1434  } else {
1435  if (log_info) {
1436  LOG(INFO) << "RevisedSimplex is not using the warm start "
1437  "basis because it is not factorizable.";
1438  }
1439  }
1440  }
1441  }
1442 
1443  if (solve_from_scratch) {
1444  if (log_info) LOG(INFO) << "Solve from scratch.";
1445  basis_factorization_.Clear();
1446  reduced_costs_.ClearAndRemoveCostShifts();
1447  primal_edge_norms_.Clear();
1448  dual_edge_norms_.Clear();
1449  dual_pricing_vector_.clear();
1450  GLOP_RETURN_IF_ERROR(CreateInitialBasis());
1451  } else {
1452  if (log_info) LOG(INFO) << "Incremental solve.";
1453  }
1454  DCHECK(BasisIsConsistent());
1455  return Status::OK();
1456 }
1457 
1458 void RevisedSimplex::DisplayBasicVariableStatistics() {
1459  SCOPED_TIME_STAT(&function_stats_);
1460 
1461  int num_fixed_variables = 0;
1462  int num_free_variables = 0;
1463  int num_variables_at_bound = 0;
1464  int num_slack_variables = 0;
1465  int num_infeasible_variables = 0;
1466 
1467  const DenseRow& variable_values = variable_values_.GetDenseRow();
1468  const VariableTypeRow& variable_types = variables_info_.GetTypeRow();
1469  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1470  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1471  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1472  for (RowIndex row(0); row < num_rows_; ++row) {
1473  const ColIndex col = basis_[row];
1474  const Fractional value = variable_values[col];
1475  if (variable_types[col] == VariableType::UNCONSTRAINED) {
1476  ++num_free_variables;
1477  }
1478  if (value > upper_bounds[col] + tolerance ||
1479  value < lower_bounds[col] - tolerance) {
1480  ++num_infeasible_variables;
1481  }
1482  if (col >= first_slack_col_) {
1483  ++num_slack_variables;
1484  }
1485  if (lower_bounds[col] == upper_bounds[col]) {
1486  ++num_fixed_variables;
1487  } else if (variable_values[col] == lower_bounds[col] ||
1488  variable_values[col] == upper_bounds[col]) {
1489  ++num_variables_at_bound;
1490  }
1491  }
1492 
1493  VLOG(1) << "Basis size: " << num_rows_;
1494  VLOG(1) << "Number of basic infeasible variables: "
1495  << num_infeasible_variables;
1496  VLOG(1) << "Number of basic slack variables: " << num_slack_variables;
1497  VLOG(1) << "Number of basic variables at bound: " << num_variables_at_bound;
1498  VLOG(1) << "Number of basic fixed variables: " << num_fixed_variables;
1499  VLOG(1) << "Number of basic free variables: " << num_free_variables;
1500 }
1501 
1502 void RevisedSimplex::SaveState() {
1503  DCHECK_EQ(num_cols_, variables_info_.GetStatusRow().size());
1504  solution_state_.statuses = variables_info_.GetStatusRow();
1505  solution_state_has_been_set_externally_ = false;
1506 }
1507 
1508 RowIndex RevisedSimplex::ComputeNumberOfEmptyRows() {
1509  DenseBooleanColumn contains_data(num_rows_, false);
1510  for (ColIndex col(0); col < num_cols_; ++col) {
1511  for (const SparseColumn::Entry e : compact_matrix_.column(col)) {
1512  contains_data[e.row()] = true;
1513  }
1514  }
1515  RowIndex num_empty_rows(0);
1516  for (RowIndex row(0); row < num_rows_; ++row) {
1517  if (!contains_data[row]) {
1518  ++num_empty_rows;
1519  VLOG(1) << "Row " << row << " is empty.";
1520  }
1521  }
1522  return num_empty_rows;
1523 }
1524 
1525 ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
1526  ColIndex num_empty_cols(0);
1527  for (ColIndex col(0); col < num_cols_; ++col) {
1528  if (compact_matrix_.column(col).IsEmpty()) {
1529  ++num_empty_cols;
1530  VLOG(2) << "Column " << col << " is empty.";
1531  }
1532  }
1533  return num_empty_cols;
1534 }
1535 
1536 int RevisedSimplex::ComputeNumberOfSuperBasicVariables() const {
1537  const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
1538  int num_super_basic = 0;
1539  for (ColIndex col(0); col < num_cols_; ++col) {
1540  if (variable_statuses[col] == VariableStatus::FREE &&
1541  variable_values_.Get(col) != 0.0) {
1542  ++num_super_basic;
1543  }
1544  }
1545  return num_super_basic;
1546 }
1547 
1548 void RevisedSimplex::CorrectErrorsOnVariableValues() {
1549  SCOPED_TIME_STAT(&function_stats_);
1550  DCHECK(basis_factorization_.IsRefactorized());
1551 
1552  // TODO(user): The primal residual error does not change if we take degenerate
1553  // steps or if we do not change the variable values. No need to recompute it
1554  // in this case.
1555  const Fractional primal_residual =
1556  variable_values_.ComputeMaximumPrimalResidual();
1557 
1558  // If the primal_residual is within the tolerance, no need to recompute
1559  // the basic variable values with a better precision.
1560  if (primal_residual >= parameters_.harris_tolerance_ratio() *
1561  parameters_.primal_feasibility_tolerance()) {
1562  variable_values_.RecomputeBasicVariableValues();
1563  VLOG(1) << "Primal infeasibility (bounds error) = "
1564  << variable_values_.ComputeMaximumPrimalInfeasibility()
1565  << ", Primal residual |A.x - b| = "
1566  << variable_values_.ComputeMaximumPrimalResidual();
1567  }
1568 }
1569 
1570 void RevisedSimplex::ComputeVariableValuesError() {
1571  SCOPED_TIME_STAT(&function_stats_);
1572  error_.AssignToZero(num_rows_);
1573  const DenseRow& variable_values = variable_values_.GetDenseRow();
1574  for (ColIndex col(0); col < num_cols_; ++col) {
1575  const Fractional value = variable_values[col];
1576  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_);
1577  }
1578 }
1579 
1580 void RevisedSimplex::ComputeDirection(ColIndex col) {
1581  SCOPED_TIME_STAT(&function_stats_);
1583  basis_factorization_.RightSolveForProblemColumn(col, &direction_);
1584  direction_infinity_norm_ = 0.0;
1585  if (direction_.non_zeros.empty()) {
1586  // We still compute the direction non-zeros because our code relies on it.
1587  for (RowIndex row(0); row < num_rows_; ++row) {
1588  const Fractional value = direction_[row];
1589  if (value != 0.0) {
1590  direction_.non_zeros.push_back(row);
1591  direction_infinity_norm_ =
1592  std::max(direction_infinity_norm_, std::abs(value));
1593  }
1594  }
1595  } else {
1596  for (const auto e : direction_) {
1597  direction_infinity_norm_ =
1598  std::max(direction_infinity_norm_, std::abs(e.coefficient()));
1599  }
1600  }
1601  IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
1602  num_rows_ == 0 ? 0.0
1603  : static_cast<double>(direction_.non_zeros.size()) /
1604  static_cast<double>(num_rows_.value())));
1605 }
1606 
1607 Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
1608  SCOPED_TIME_STAT(&function_stats_);
1609  compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
1610  for (const auto e : direction_) {
1611  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
1612  &error_);
1613  }
1614  return InfinityNorm(error_);
1615 }
1616 
1617 template <bool is_entering_reduced_cost_positive>
1618 Fractional RevisedSimplex::GetRatio(const DenseRow& lower_bounds,
1619  const DenseRow& upper_bounds,
1620  RowIndex row) const {
1621  const ColIndex col = basis_[row];
1622  const Fractional direction = direction_[row];
1623  const Fractional value = variable_values_.Get(col);
1624  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1625  DCHECK_NE(direction, 0.0);
1626  if (is_entering_reduced_cost_positive) {
1627  if (direction > 0.0) {
1628  return (upper_bounds[col] - value) / direction;
1629  } else {
1630  return (lower_bounds[col] - value) / direction;
1631  }
1632  } else {
1633  if (direction > 0.0) {
1634  return (value - lower_bounds[col]) / direction;
1635  } else {
1636  return (value - upper_bounds[col]) / direction;
1637  }
1638  }
1639 }
1640 
1641 template <bool is_entering_reduced_cost_positive>
1642 Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
1643  Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const {
1644  SCOPED_TIME_STAT(&function_stats_);
1645  const Fractional harris_tolerance =
1646  parameters_.harris_tolerance_ratio() *
1647  parameters_.primal_feasibility_tolerance();
1648  const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
1649  parameters_.primal_feasibility_tolerance();
1650 
1651  // Initially, we can skip any variable with a ratio greater than
1652  // bound_flip_ratio since it seems to be always better to choose the
1653  // bound-flip over such leaving variable.
1654  Fractional harris_ratio = bound_flip_ratio;
1655  leaving_candidates->Clear();
1656 
1657  // If the basis is refactorized, then we should have everything with a good
1658  // precision, so we only consider "acceptable" pivots. Otherwise we consider
1659  // all the entries, and if the algorithm return a pivot that is too small, we
1660  // will refactorize and recompute the relevant quantities.
1661  const Fractional threshold = basis_factorization_.IsRefactorized()
1662  ? parameters_.minimum_acceptable_pivot()
1663  : parameters_.ratio_test_zero_threshold();
1664 
1665  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1666  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1667  for (const auto e : direction_) {
1668  const Fractional magnitude = std::abs(e.coefficient());
1669  if (magnitude <= threshold) continue;
1670  const Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(
1671  lower_bounds, upper_bounds, e.row());
1672  if (ratio <= harris_ratio) {
1673  leaving_candidates->SetCoefficient(e.row(), ratio);
1674 
1675  // The second max() makes sure harris_ratio is lower bounded by a small
1676  // positive value. The more classical approach is to bound it by 0.0 but
1677  // since we will always perform a small positive step, we allow any
1678  // variable to go a bit more out of bound (even if it is past the harris
1679  // tolerance). This increase the number of candidates and allows us to
1680  // choose a more numerically stable pivot.
1681  //
1682  // Note that at least lower bounding it by 0.0 is really important on
1683  // numerically difficult problems because its helps in the choice of a
1684  // stable pivot.
1685  harris_ratio = std::min(harris_ratio,
1686  std::max(minimum_delta / magnitude,
1687  ratio + harris_tolerance / magnitude));
1688  }
1689  }
1690  return harris_ratio;
1691 }
1692 
1693 namespace {
1694 
1695 // Returns true if the candidate ratio is supposed to be more stable than the
1696 // current ratio (or if the two are equal).
1697 // The idea here is to take, by order of preference:
1698 // - the minimum positive ratio in order to intoduce a primal infeasibility
1699 // which is as small as possible.
1700 // - or the least negative one in order to have the smallest bound shift
1701 // possible on the leaving variable.
1702 bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) {
1703  if (current >= 0.0) {
1704  return candidate >= 0.0 && candidate <= current;
1705  } else {
1706  return candidate >= current;
1707  }
1708 }
1709 
1710 } // namespace
1711 
1712 // Ratio-test or Quotient-test. Choose the row of the leaving variable.
1713 // Known as CHUZR or CHUZRO in FORTRAN codes.
1714 Status RevisedSimplex::ChooseLeavingVariableRow(
1715  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1716  RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) {
1717  SCOPED_TIME_STAT(&function_stats_);
1718  GLOP_RETURN_ERROR_IF_NULL(refactorize);
1719  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1720  GLOP_RETURN_ERROR_IF_NULL(step_length);
1721  DCHECK_COL_BOUNDS(entering_col);
1722  DCHECK_NE(0.0, reduced_cost);
1723 
1724  // A few cases will cause the test to be recomputed from the beginning.
1725  int stats_num_leaving_choices = 0;
1726  equivalent_leaving_choices_.clear();
1727  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1728  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1729  while (true) {
1730  stats_num_leaving_choices = 0;
1731 
1732  // We initialize current_ratio with the maximum step the entering variable
1733  // can take (bound-flip). Note that we do not use tolerance here.
1734  const Fractional entering_value = variable_values_.Get(entering_col);
1735  Fractional current_ratio =
1736  (reduced_cost > 0.0) ? entering_value - lower_bounds[entering_col]
1737  : upper_bounds[entering_col] - entering_value;
1738  DCHECK_GT(current_ratio, 0.0);
1739 
1740  // First pass of the Harris ratio test. If 'harris_tolerance' is zero, this
1741  // actually computes the minimum leaving ratio of all the variables. This is
1742  // the same as the 'classic' ratio test.
1743  const Fractional harris_ratio =
1744  (reduced_cost > 0.0) ? ComputeHarrisRatioAndLeavingCandidates<true>(
1745  current_ratio, &leaving_candidates_)
1746  : ComputeHarrisRatioAndLeavingCandidates<false>(
1747  current_ratio, &leaving_candidates_);
1748 
1749  // If the bound-flip is a viable solution (i.e. it doesn't move the basic
1750  // variable too much out of bounds), we take it as it is always stable and
1751  // fast.
1752  if (current_ratio <= harris_ratio) {
1753  *leaving_row = kInvalidRow;
1754  *step_length = current_ratio;
1755  break;
1756  }
1757 
1758  // Second pass of the Harris ratio test. Amongst the variables with 'ratio
1759  // <= harris_ratio', we choose the leaving row with the largest coefficient.
1760  //
1761  // This has a big impact, because picking a leaving variable with a small
1762  // direction_[row] is the main source of Abnormal LU errors.
1763  Fractional pivot_magnitude = 0.0;
1764  stats_num_leaving_choices = 0;
1765  *leaving_row = kInvalidRow;
1766  equivalent_leaving_choices_.clear();
1767  for (const SparseColumn::Entry e : leaving_candidates_) {
1768  const Fractional ratio = e.coefficient();
1769  if (ratio > harris_ratio) continue;
1770  ++stats_num_leaving_choices;
1771  const RowIndex row = e.row();
1772 
1773  // If the magnitudes are the same, we choose the leaving variable with
1774  // what is probably the more stable ratio, see
1775  // IsRatioMoreOrEquallyStable().
1776  const Fractional candidate_magnitude = std::abs(direction_[row]);
1777  if (candidate_magnitude < pivot_magnitude) continue;
1778  if (candidate_magnitude == pivot_magnitude) {
1779  if (!IsRatioMoreOrEquallyStable(ratio, current_ratio)) continue;
1780  if (ratio == current_ratio) {
1781  DCHECK_NE(kInvalidRow, *leaving_row);
1782  equivalent_leaving_choices_.push_back(row);
1783  continue;
1784  }
1785  }
1786  equivalent_leaving_choices_.clear();
1787  current_ratio = ratio;
1788  pivot_magnitude = candidate_magnitude;
1789  *leaving_row = row;
1790  }
1791 
1792  // Break the ties randomly.
1793  if (!equivalent_leaving_choices_.empty()) {
1794  equivalent_leaving_choices_.push_back(*leaving_row);
1795  *leaving_row =
1796  equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1797  0, equivalent_leaving_choices_.size() - 1)(random_)];
1798  }
1799 
1800  // Since we took care of the bound-flip at the beginning, at this point
1801  // we have a valid leaving row.
1802  DCHECK_NE(kInvalidRow, *leaving_row);
1803 
1804  // A variable already outside one of its bounds +/- tolerance is considered
1805  // at its bound and its ratio is zero. Not doing this may lead to a step
1806  // that moves the objective in the wrong direction. We may want to allow
1807  // such steps, but then we will need to check that it doesn't break the
1808  // bounds of the other variables.
1809  if (current_ratio <= 0.0) {
1810  // Instead of doing a zero step, we do a small positive step. This
1811  // helps on degenerate problems.
1812  const Fractional minimum_delta =
1813  parameters_.degenerate_ministep_factor() *
1814  parameters_.primal_feasibility_tolerance();
1815  *step_length = minimum_delta / pivot_magnitude;
1816  } else {
1817  *step_length = current_ratio;
1818  }
1819 
1820  // Note(user): Testing the pivot at each iteration is useful for debugging
1821  // an LU factorization problem. Remove the false if you need to investigate
1822  // this, it makes sure that this will be compiled away.
1823  if (/* DISABLES CODE */ (false)) {
1824  TestPivot(entering_col, *leaving_row);
1825  }
1826 
1827  // We try various "heuristics" to avoid a small pivot.
1828  //
1829  // The smaller 'direction_[*leaving_row]', the less precise
1830  // it is. So we want to avoid pivoting by such a row. Small pivots lead to
1831  // ill-conditioned bases or even to matrices that are not a basis at all if
1832  // the actual (infinite-precision) coefficient is zero.
1833  //
1834  // TODO(user): We may have to choose another entering column if
1835  // we cannot prevent pivoting by a small pivot.
1836  // (Chvatal, p.115, about epsilon2.)
1837  if (pivot_magnitude <
1838  parameters_.small_pivot_threshold() * direction_infinity_norm_) {
1839  // The first countermeasure is to recompute everything to the best
1840  // precision we can in the hope of avoiding such a choice. Note that this
1841  // helps a lot on the Netlib problems.
1842  if (!basis_factorization_.IsRefactorized()) {
1843  VLOG(1) << "Refactorizing to avoid pivoting by "
1844  << direction_[*leaving_row]
1845  << " direction_infinity_norm_ = " << direction_infinity_norm_
1846  << " reduced cost = " << reduced_cost;
1847  *refactorize = true;
1848  return Status::OK();
1849  }
1850 
1851  // Because of the "threshold" in ComputeHarrisRatioAndLeavingCandidates()
1852  // we kwnow that this pivot will still have an acceptable magnitude.
1853  //
1854  // TODO(user): An issue left to fix is that if there is no such pivot at
1855  // all, then we will report unbounded even if this is not really the case.
1856  // As of 2018/07/18, this happens on l30.mps.
1857  VLOG(1) << "Couldn't avoid pivoting by " << direction_[*leaving_row]
1858  << " direction_infinity_norm_ = " << direction_infinity_norm_
1859  << " reduced cost = " << reduced_cost;
1860  DCHECK_GE(std::abs(direction_[*leaving_row]),
1861  parameters_.minimum_acceptable_pivot());
1862  IF_STATS_ENABLED(ratio_test_stats_.abs_tested_pivot.Add(pivot_magnitude));
1863  }
1864  break;
1865  }
1866 
1867  // Update the target bound.
1868  if (*leaving_row != kInvalidRow) {
1869  const bool is_reduced_cost_positive = (reduced_cost > 0.0);
1870  const bool is_leaving_coeff_positive = (direction_[*leaving_row] > 0.0);
1871  *target_bound = (is_reduced_cost_positive == is_leaving_coeff_positive)
1872  ? upper_bounds[basis_[*leaving_row]]
1873  : lower_bounds[basis_[*leaving_row]];
1874  }
1875 
1876  // Stats.
1878  ratio_test_stats_.leaving_choices.Add(stats_num_leaving_choices);
1879  if (!equivalent_leaving_choices_.empty()) {
1880  ratio_test_stats_.num_perfect_ties.Add(
1881  equivalent_leaving_choices_.size());
1882  }
1883  if (*leaving_row != kInvalidRow) {
1884  ratio_test_stats_.abs_used_pivot.Add(std::abs(direction_[*leaving_row]));
1885  }
1886  });
1887  return Status::OK();
1888 }
1889 
1890 namespace {
1891 
1892 // Store a row with its ratio, coefficient magnitude and target bound. This is
1893 // used by PrimalPhaseIChooseLeavingVariableRow(), see this function for more
1894 // details.
1895 struct BreakPoint {
1896  BreakPoint(RowIndex _row, Fractional _ratio, Fractional _coeff_magnitude,
1897  Fractional _target_bound)
1898  : row(_row),
1899  ratio(_ratio),
1900  coeff_magnitude(_coeff_magnitude),
1901  target_bound(_target_bound) {}
1902 
1903  // We want to process the breakpoints by increasing ratio and decreasing
1904  // coefficient magnitude (if the ratios are the same). Returns false if "this"
1905  // is before "other" in a priority queue.
1906  bool operator<(const BreakPoint& other) const {
1907  if (ratio == other.ratio) {
1908  if (coeff_magnitude == other.coeff_magnitude) {
1909  return row > other.row;
1910  }
1911  return coeff_magnitude < other.coeff_magnitude;
1912  }
1913  return ratio > other.ratio;
1914  }
1915 
1916  RowIndex row;
1920 };
1921 
1922 } // namespace
1923 
1924 void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
1925  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1926  RowIndex* leaving_row, Fractional* step_length,
1927  Fractional* target_bound) const {
1928  SCOPED_TIME_STAT(&function_stats_);
1929  RETURN_IF_NULL(refactorize);
1930  RETURN_IF_NULL(leaving_row);
1931  RETURN_IF_NULL(step_length);
1932  DCHECK_COL_BOUNDS(entering_col);
1933  DCHECK_NE(0.0, reduced_cost);
1934  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1935  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1936 
1937  // We initialize current_ratio with the maximum step the entering variable
1938  // can take (bound-flip). Note that we do not use tolerance here.
1939  const Fractional entering_value = variable_values_.Get(entering_col);
1940  Fractional current_ratio = (reduced_cost > 0.0)
1941  ? entering_value - lower_bounds[entering_col]
1942  : upper_bounds[entering_col] - entering_value;
1943  DCHECK_GT(current_ratio, 0.0);
1944 
1945  std::vector<BreakPoint> breakpoints;
1946  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1947  for (const auto e : direction_) {
1948  const Fractional direction =
1949  reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
1950  const Fractional magnitude = std::abs(direction);
1951  if (magnitude < tolerance) continue;
1952 
1953  // Computes by how much we can add 'direction' to the basic variable value
1954  // with index 'row' until it changes of primal feasibility status. That is
1955  // from infeasible to feasible or from feasible to infeasible. Note that the
1956  // transition infeasible->feasible->infeasible is possible. We use
1957  // tolerances here, but when the step will be performed, it will move the
1958  // variable to the target bound (possibly taking a small negative step).
1959  //
1960  // Note(user): The negative step will only happen when the leaving variable
1961  // was slightly infeasible (less than tolerance). Moreover, the overall
1962  // infeasibility will not necessarily increase since it doesn't take into
1963  // account all the variables with an infeasibility smaller than the
1964  // tolerance, and here we will at least improve the one of the leaving
1965  // variable.
1966  const ColIndex col = basis_[e.row()];
1967  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1968 
1969  const Fractional value = variable_values_.Get(col);
1972  const Fractional to_lower = (lower_bound - tolerance - value) / direction;
1973  const Fractional to_upper = (upper_bound + tolerance - value) / direction;
1974 
1975  // Enqueue the possible transitions. Note that the second tests exclude the
1976  // case where to_lower or to_upper are infinite.
1977  if (to_lower >= 0.0 && to_lower < current_ratio) {
1978  breakpoints.push_back(
1979  BreakPoint(e.row(), to_lower, magnitude, lower_bound));
1980  }
1981  if (to_upper >= 0.0 && to_upper < current_ratio) {
1982  breakpoints.push_back(
1983  BreakPoint(e.row(), to_upper, magnitude, upper_bound));
1984  }
1985  }
1986 
1987  // Order the breakpoints by increasing ratio and decreasing coefficient
1988  // magnitude (if the ratios are the same).
1989  std::make_heap(breakpoints.begin(), breakpoints.end());
1990 
1991  // Select the last breakpoint that still improves the infeasibility and has
1992  // the largest coefficient magnitude.
1993  Fractional improvement = std::abs(reduced_cost);
1994  Fractional best_magnitude = 0.0;
1995  *leaving_row = kInvalidRow;
1996  while (!breakpoints.empty()) {
1997  const BreakPoint top = breakpoints.front();
1998  // TODO(user): consider using >= here. That will lead to bigger ratio and
1999  // hence a better impact on the infeasibility. The drawback is that more
2000  // effort may be needed to update the reduced costs.
2001  //
2002  // TODO(user): Use a random tie breaking strategy for BreakPoint with
2003  // same ratio and same coefficient magnitude? Koberstein explains in his PhD
2004  // that it helped on the dual-simplex.
2005  if (top.coeff_magnitude > best_magnitude) {
2006  *leaving_row = top.row;
2007  current_ratio = top.ratio;
2008  best_magnitude = top.coeff_magnitude;
2009  *target_bound = top.target_bound;
2010  }
2011 
2012  // As long as the sum of primal infeasibilities is decreasing, we look for
2013  // pivots that are numerically more stable.
2014  improvement -= top.coeff_magnitude;
2015  if (improvement <= 0.0) break;
2016  std::pop_heap(breakpoints.begin(), breakpoints.end());
2017  breakpoints.pop_back();
2018  }
2019 
2020  // Try to avoid a small pivot by refactorizing.
2021  if (*leaving_row != kInvalidRow) {
2022  const Fractional threshold =
2023  parameters_.small_pivot_threshold() * direction_infinity_norm_;
2024  if (best_magnitude < threshold && !basis_factorization_.IsRefactorized()) {
2025  *refactorize = true;
2026  return;
2027  }
2028  }
2029  *step_length = current_ratio;
2030 }
2031 
2032 // This implements the pricing step for the dual simplex.
2033 Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row,
2034  Fractional* cost_variation,
2036  SCOPED_TIME_STAT(&function_stats_);
2037  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2038  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2040 
2041  // This is not supposed to happen, but better be safe.
2042  if (dual_prices_.Size() == 0) {
2043  variable_values_.RecomputeDualPrices();
2044  }
2045 
2046  // Return right away if there is no leaving variable.
2047  // Fill cost_variation and target_bound otherwise.
2048  *leaving_row = dual_prices_.GetMaximum();
2049  if (*leaving_row == kInvalidRow) return Status::OK();
2050 
2051  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2052  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2053  const ColIndex leaving_col = basis_[*leaving_row];
2054  const Fractional value = variable_values_.Get(leaving_col);
2055  if (value < lower_bounds[leaving_col]) {
2056  *cost_variation = lower_bounds[leaving_col] - value;
2057  *target_bound = lower_bounds[leaving_col];
2058  DCHECK_GT(*cost_variation, 0.0);
2059  } else {
2060  *cost_variation = upper_bounds[leaving_col] - value;
2061  *target_bound = upper_bounds[leaving_col];
2062  DCHECK_LT(*cost_variation, 0.0);
2063  }
2064  return Status::OK();
2065 }
2066 
2067 namespace {
2068 
2069 // Returns true if a basic variable with given cost and type is to be considered
2070 // as a leaving candidate for the dual phase I.
2071 bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type,
2072  Fractional threshold) {
2073  if (cost == 0.0) return false;
2074  return type == VariableType::UPPER_AND_LOWER_BOUNDED ||
2075  type == VariableType::FIXED_VARIABLE ||
2076  (type == VariableType::UPPER_BOUNDED && cost < -threshold) ||
2077  (type == VariableType::LOWER_BOUNDED && cost > threshold);
2078 }
2079 
2080 } // namespace
2081 
2082 // Important: The norm should be updated before this is called.
2083 template <bool use_dense_update>
2084 void RevisedSimplex::OnDualPriceChange(const DenseColumn& squared_norm,
2085  RowIndex row, VariableType type,
2086  Fractional threshold) {
2087  const Fractional price = dual_pricing_vector_[row];
2088  const bool is_candidate =
2089  IsDualPhaseILeavingCandidate(price, type, threshold);
2090  if (is_candidate) {
2091  if (use_dense_update) {
2092  dual_prices_.DenseAddOrUpdate(row, Square(price) / squared_norm[row]);
2093  } else {
2094  dual_prices_.AddOrUpdate(row, Square(price) / squared_norm[row]);
2095  }
2096  } else {
2097  dual_prices_.Remove(row);
2098  }
2099 }
2100 
2101 void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
2102  ColIndex entering_col) {
2103  SCOPED_TIME_STAT(&function_stats_);
2104  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2105  const Fractional threshold = parameters_.ratio_test_zero_threshold();
2106 
2107  // Note that because the norm are also updated only on the position of the
2108  // direction, scaled_dual_pricing_vector_ will be up to date.
2109  const DenseColumn& squared_norms = dual_edge_norms_.GetEdgeSquaredNorms();
2110 
2111  // Convert the dual_pricing_vector_ from the old basis into the new one (which
2112  // is the same as multiplying it by an Eta matrix corresponding to the
2113  // direction).
2114  const Fractional step =
2115  dual_pricing_vector_[leaving_row] / direction_[leaving_row];
2116  for (const auto e : direction_) {
2117  dual_pricing_vector_[e.row()] -= e.coefficient() * step;
2118  OnDualPriceChange(squared_norms, e.row(), variable_type[basis_[e.row()]],
2119  threshold);
2120  }
2121  dual_pricing_vector_[leaving_row] = step;
2122 
2123  // The entering_col which was dual-infeasible is now dual-feasible, so we
2124  // have to remove it from the infeasibility sum.
2125  dual_pricing_vector_[leaving_row] -=
2126  dual_infeasibility_improvement_direction_[entering_col];
2127  if (dual_infeasibility_improvement_direction_[entering_col] != 0.0) {
2128  --num_dual_infeasible_positions_;
2129  }
2130  dual_infeasibility_improvement_direction_[entering_col] = 0.0;
2131 
2132  // The leaving variable will also be dual-feasible.
2133  dual_infeasibility_improvement_direction_[basis_[leaving_row]] = 0.0;
2134 
2135  // Update the leaving row entering candidate status.
2136  OnDualPriceChange(squared_norms, leaving_row, variable_type[entering_col],
2137  threshold);
2138 }
2139 
2140 template <typename Cols>
2141 void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
2142  const Cols& cols) {
2143  SCOPED_TIME_STAT(&function_stats_);
2144  bool something_to_do = false;
2145  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
2146  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
2147  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2148  const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance();
2149  for (ColIndex col : cols) {
2150  const Fractional reduced_cost = reduced_costs[col];
2151  const Fractional sign =
2152  (can_increase.IsSet(col) && reduced_cost < -tolerance) ? 1.0
2153  : (can_decrease.IsSet(col) && reduced_cost > tolerance) ? -1.0
2154  : 0.0;
2155  if (sign != dual_infeasibility_improvement_direction_[col]) {
2156  if (sign == 0.0) {
2157  --num_dual_infeasible_positions_;
2158  } else if (dual_infeasibility_improvement_direction_[col] == 0.0) {
2159  ++num_dual_infeasible_positions_;
2160  }
2161  if (!something_to_do) {
2162  initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
2163  initially_all_zero_scratchpad_.ClearSparseMask();
2164  initially_all_zero_scratchpad_.non_zeros.clear();
2165  something_to_do = true;
2166  }
2167 
2168  // We add a factor 10 because of the scattered access.
2169  num_update_price_operations_ +=
2170  10 * compact_matrix_.column(col).num_entries().value();
2172  col, sign - dual_infeasibility_improvement_direction_[col],
2173  &initially_all_zero_scratchpad_);
2174  dual_infeasibility_improvement_direction_[col] = sign;
2175  }
2176  }
2177  if (something_to_do) {
2178  initially_all_zero_scratchpad_.ClearNonZerosIfTooDense();
2179  initially_all_zero_scratchpad_.ClearSparseMask();
2180  const DenseColumn& squared_norms = dual_edge_norms_.GetEdgeSquaredNorms();
2181 
2182  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2183  const Fractional threshold = parameters_.ratio_test_zero_threshold();
2184  basis_factorization_.RightSolve(&initially_all_zero_scratchpad_);
2185  if (initially_all_zero_scratchpad_.non_zeros.empty()) {
2186  dual_prices_.StartDenseUpdates();
2187  for (RowIndex row(0); row < num_rows_; ++row) {
2188  if (initially_all_zero_scratchpad_[row] == 0.0) continue;
2189  dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
2190  OnDualPriceChange</*use_dense_update=*/true>(
2191  squared_norms, row, variable_type[basis_[row]], threshold);
2192  }
2193  initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
2194  } else {
2195  for (const auto e : initially_all_zero_scratchpad_) {
2196  dual_pricing_vector_[e.row()] += e.coefficient();
2197  OnDualPriceChange(squared_norms, e.row(),
2198  variable_type[basis_[e.row()]], threshold);
2199  initially_all_zero_scratchpad_[e.row()] = 0.0;
2200  }
2201  }
2202  initially_all_zero_scratchpad_.non_zeros.clear();
2203  }
2204 }
2205 
2206 Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow(
2207  RowIndex* leaving_row, Fractional* cost_variation,
2209  SCOPED_TIME_STAT(&function_stats_);
2210  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2211  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2212  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2213  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2214 
2215  // dual_infeasibility_improvement_direction_ is zero for dual-feasible
2216  // positions and contains the sign in which the reduced cost of this column
2217  // needs to move to improve the feasibility otherwise (+1 or -1).
2218  //
2219  // Its current value was the one used to compute dual_pricing_vector_ and
2220  // was updated accordingly by DualPhaseIUpdatePrice().
2221  //
2222  // If more variables changed of dual-feasibility status during the last
2223  // iteration, we need to call DualPhaseIUpdatePriceOnReducedCostChange() to
2224  // take them into account.
2225  if (reduced_costs_.AreReducedCostsRecomputed() ||
2226  dual_edge_norms_.NeedsBasisRefactorization() ||
2227  dual_pricing_vector_.empty()) {
2228  // Recompute everything from scratch.
2229  num_dual_infeasible_positions_ = 0;
2230  dual_pricing_vector_.AssignToZero(num_rows_);
2231  dual_prices_.ClearAndResize(num_rows_);
2232  dual_infeasibility_improvement_direction_.AssignToZero(num_cols_);
2233  DualPhaseIUpdatePriceOnReducedCostChange(
2234  variables_info_.GetIsRelevantBitRow());
2235  } else {
2236  // Update row is still equal to the row used during the last iteration
2237  // to update the reduced costs.
2238  DualPhaseIUpdatePriceOnReducedCostChange(update_row_.GetNonZeroPositions());
2239  }
2240 
2241  // If there is no dual-infeasible position, we are done.
2242  *leaving_row = kInvalidRow;
2243  if (num_dual_infeasible_positions_ == 0) return Status::OK();
2244 
2245  *leaving_row = dual_prices_.GetMaximum();
2246 
2247  // Returns right away if there is no leaving variable or fill the other
2248  // return values otherwise.
2249  if (*leaving_row == kInvalidRow) return Status::OK();
2250  *cost_variation = dual_pricing_vector_[*leaving_row];
2251  const ColIndex leaving_col = basis_[*leaving_row];
2252  if (*cost_variation < 0.0) {
2253  *target_bound = upper_bounds[leaving_col];
2254  } else {
2255  *target_bound = lower_bounds[leaving_col];
2256  }
2258  return Status::OK();
2259 }
2260 
2261 template <typename BoxedVariableCols>
2262 void RevisedSimplex::MakeBoxedVariableDualFeasible(
2263  const BoxedVariableCols& cols, bool update_basic_values) {
2264  SCOPED_TIME_STAT(&function_stats_);
2265  std::vector<ColIndex> changed_cols;
2266 
2267  // It is important to flip bounds within a tolerance because of precision
2268  // errors. Otherwise, this leads to cycling on many of the Netlib problems
2269  // since this is called at each iteration (because of the bound-flipping ratio
2270  // test).
2271  const DenseRow& variable_values = variable_values_.GetDenseRow();
2272  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2273  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2274  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2275  const Fractional dual_feasibility_tolerance =
2276  reduced_costs_.GetDualFeasibilityTolerance();
2277  const VariableStatusRow& variable_status = variables_info_.GetStatusRow();
2278  for (const ColIndex col : cols) {
2279  const Fractional reduced_cost = reduced_costs[col];
2280  const VariableStatus status = variable_status[col];
2281  DCHECK(variables_info_.GetTypeRow()[col] ==
2283  // TODO(user): refactor this as DCHECK(IsVariableBasicOrExactlyAtBound())?
2284  DCHECK(variable_values[col] == lower_bounds[col] ||
2285  variable_values[col] == upper_bounds[col] ||
2286  status == VariableStatus::BASIC);
2287  if (reduced_cost > dual_feasibility_tolerance &&
2288  status == VariableStatus::AT_UPPER_BOUND) {
2289  variables_info_.UpdateToNonBasicStatus(col,
2291  changed_cols.push_back(col);
2292  } else if (reduced_cost < -dual_feasibility_tolerance &&
2293  status == VariableStatus::AT_LOWER_BOUND) {
2294  variables_info_.UpdateToNonBasicStatus(col,
2296  changed_cols.push_back(col);
2297  }
2298  }
2299 
2300  if (!changed_cols.empty()) {
2301  iteration_stats_.num_dual_flips.Add(changed_cols.size());
2302  variable_values_.UpdateGivenNonBasicVariables(changed_cols,
2303  update_basic_values);
2304  }
2305 }
2306 
2307 Fractional RevisedSimplex::ComputeStepToMoveBasicVariableToBound(
2308  RowIndex leaving_row, Fractional target_bound) {
2309  SCOPED_TIME_STAT(&function_stats_);
2310 
2311  // We just want the leaving variable to go to its target_bound.
2312  const ColIndex leaving_col = basis_[leaving_row];
2313  const Fractional leaving_variable_value = variable_values_.Get(leaving_col);
2314  Fractional unscaled_step = leaving_variable_value - target_bound;
2315 
2316  // In Chvatal p 157 update_[entering_col] is used instead of
2317  // direction_[leaving_row], but the two quantities are actually the
2318  // same. This is because update_[col] is the value at leaving_row of
2319  // the right inverse of col and direction_ is the right inverse of the
2320  // entering_col. Note that direction_[leaving_row] is probably more
2321  // precise.
2322  // TODO(user): use this to check precision and trigger recomputation.
2323  return unscaled_step / direction_[leaving_row];
2324 }
2325 
2326 bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
2327  VLOG(1) << "Test pivot.";
2328  SCOPED_TIME_STAT(&function_stats_);
2329  const ColIndex leaving_col = basis_[leaving_row];
2330  basis_[leaving_row] = entering_col;
2331 
2332  // TODO(user): If 'is_ok' is true, we could use the computed lu in
2333  // basis_factorization_ rather than recompute it during UpdateAndPivot().
2334  CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
2335  const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
2336  basis_[leaving_row] = leaving_col;
2337  return is_ok;
2338 }
2339 
2340 // Note that this function is an optimization and that if it was doing nothing
2341 // the algorithm will still be correct and work. Using it does change the pivot
2342 // taken during the simplex method though.
2343 void RevisedSimplex::PermuteBasis() {
2344  SCOPED_TIME_STAT(&function_stats_);
2345 
2346  // Fetch the current basis column permutation and return if it is empty which
2347  // means the permutation is the identity.
2348  const ColumnPermutation& col_perm =
2349  basis_factorization_.GetColumnPermutation();
2350  if (col_perm.empty()) return;
2351 
2352  // Permute basis_.
2353  ApplyColumnPermutationToRowIndexedVector(col_perm, &basis_);
2354 
2355  // Permute dual_pricing_vector_ if needed.
2356  if (!dual_pricing_vector_.empty()) {
2357  // TODO(user): We need to permute dual_prices_ too now, we recompute
2358  // everything one each basis factorization, so this don't matter.
2359  ApplyColumnPermutationToRowIndexedVector(col_perm, &dual_pricing_vector_);
2360  }
2361 
2362  // Notify the other classes.
2363  reduced_costs_.UpdateDataOnBasisPermutation();
2364  dual_edge_norms_.UpdateDataOnBasisPermutation(col_perm);
2365 
2366  // Finally, remove the column permutation from all subsequent solves since
2367  // it has been taken into account in basis_.
2368  basis_factorization_.SetColumnPermutationToIdentity();
2369 }
2370 
2371 Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
2372  RowIndex leaving_row,
2374  SCOPED_TIME_STAT(&function_stats_);
2375 
2376  // Tricky and a bit hacky.
2377  //
2378  // The basis update code assumes that we already computed the left inverse of
2379  // the leaving row, otherwise it will just refactorize the basis. This left
2380  // inverse is needed by update_row_.ComputeUpdateRow(), so in most case it
2381  // will already be computed. However, in some situation we don't need the
2382  // full update row, so just the left inverse can be computed.
2383  //
2384  // TODO(user): Ideally this shouldn't be needed if we are going to refactorize
2385  // the basis anyway. So we should know that before hand which is currently
2386  // hard to do.
2387  Fractional pivot_from_update_row;
2388  if (update_row_.IsComputed()) {
2389  pivot_from_update_row = update_row_.GetCoefficient(entering_col);
2390  } else {
2391  // We only need the left inverse and the update row position at the
2392  // entering_col to check precision.
2393  update_row_.ComputeUnitRowLeftInverse(leaving_row);
2394  pivot_from_update_row = compact_matrix_.ColumnScalarProduct(
2395  entering_col, update_row_.GetUnitRowLeftInverse().values);
2396  }
2397 
2398  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2399  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2400  const ColIndex leaving_col = basis_[leaving_row];
2401  const VariableStatus leaving_variable_status =
2402  lower_bounds[leaving_col] == upper_bounds[leaving_col]
2404  : target_bound == lower_bounds[leaving_col]
2407  if (variable_values_.Get(leaving_col) != target_bound) {
2408  ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) -
2409  target_bound);
2410  }
2411  UpdateBasis(entering_col, leaving_row, leaving_variable_status);
2412 
2413  // Test precision by comparing two ways to get the "pivot".
2414  const Fractional pivot_from_direction = direction_[leaving_row];
2415  const Fractional diff =
2416  std::abs(pivot_from_update_row - pivot_from_direction);
2417  if (diff > parameters_.refactorization_threshold() *
2418  (1 + std::abs(pivot_from_direction))) {
2419  VLOG(1) << "Refactorizing: imprecise pivot " << pivot_from_direction
2420  << " diff = " << diff;
2421  GLOP_RETURN_IF_ERROR(basis_factorization_.ForceRefactorization());
2422  } else {
2424  basis_factorization_.Update(entering_col, leaving_row, direction_));
2425  }
2426  if (basis_factorization_.IsRefactorized()) {
2427  PermuteBasis();
2428  }
2429  return Status::OK();
2430 }
2431 
2432 Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
2433  SCOPED_TIME_STAT(&function_stats_);
2434  if (*refactorize && !basis_factorization_.IsRefactorized()) {
2435  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
2436  update_row_.Invalidate();
2437  PermuteBasis();
2438  }
2439  *refactorize = false;
2440  return Status::OK();
2441 }
2442 
2444  if (col >= integrality_scale_.size()) {
2445  integrality_scale_.resize(col + 1, 0.0);
2446  }
2447  integrality_scale_[col] = scale;
2448 }
2449 
2450 Status RevisedSimplex::Polish(TimeLimit* time_limit) {
2452  Cleanup update_deterministic_time_on_return(
2453  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2454 
2455  // Get all non-basic variables with a reduced costs close to zero.
2456  // Note that because we only choose entering candidate with a cost of zero,
2457  // this set will not change (modulo epsilons).
2458  const DenseRow& rc = reduced_costs_.GetReducedCosts();
2459  std::vector<ColIndex> candidates;
2460  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
2461  if (!variables_info_.GetIsRelevantBitRow()[col]) continue;
2462  if (std::abs(rc[col]) < 1e-9) candidates.push_back(col);
2463  }
2464 
2465  bool refactorize = false;
2466  int num_pivots = 0;
2467  Fractional total_gain = 0.0;
2468  for (int i = 0; i < 10; ++i) {
2469  AdvanceDeterministicTime(time_limit);
2470  if (time_limit->LimitReached()) break;
2471  if (num_pivots >= 5) break;
2472  if (candidates.empty()) break;
2473 
2474  // Pick a random one and remove it from the list.
2475  const int index =
2476  std::uniform_int_distribution<int>(0, candidates.size() - 1)(random_);
2477  const ColIndex entering_col = candidates[index];
2478  std::swap(candidates[index], candidates.back());
2479  candidates.pop_back();
2480 
2481  // We need the entering variable to move in the correct direction.
2482  Fractional fake_rc = 1.0;
2483  if (!variables_info_.GetCanDecreaseBitRow()[entering_col]) {
2484  CHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
2485  fake_rc = -1.0;
2486  }
2487 
2488  // Refactorize if needed.
2489  if (reduced_costs_.NeedsBasisRefactorization()) refactorize = true;
2490  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2491 
2492  // Compute the direction and by how much we can move along it.
2493  ComputeDirection(entering_col);
2494  Fractional step_length;
2495  RowIndex leaving_row;
2497  bool local_refactorize = false;
2499  ChooseLeavingVariableRow(entering_col, fake_rc, &local_refactorize,
2500  &leaving_row, &step_length, &target_bound));
2501 
2502  if (local_refactorize) continue;
2503  if (step_length == kInfinity || step_length == -kInfinity) continue;
2504  if (std::abs(step_length) <= 1e-6) continue;
2505  if (leaving_row != kInvalidRow && std::abs(direction_[leaving_row]) < 0.1) {
2506  continue;
2507  }
2508  const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
2509 
2510  // Evaluate if pivot reduce the fractionality of the basis.
2511  //
2512  // TODO(user): Count with more weight variable with a small domain, i.e.
2513  // binary variable, compared to a variable in [0, 1k] ?
2514  const auto get_diff = [this](ColIndex col, Fractional old_value,
2515  Fractional new_value) {
2516  if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) {
2517  return 0.0;
2518  }
2519  const Fractional s = integrality_scale_[col];
2520  return (std::abs(new_value * s - std::round(new_value * s)) -
2521  std::abs(old_value * s - std::round(old_value * s)));
2522  };
2523  Fractional diff = get_diff(entering_col, variable_values_.Get(entering_col),
2524  variable_values_.Get(entering_col) + step);
2525  for (const auto e : direction_) {
2526  const ColIndex col = basis_[e.row()];
2527  const Fractional old_value = variable_values_.Get(col);
2528  const Fractional new_value = old_value - e.coefficient() * step;
2529  diff += get_diff(col, old_value, new_value);
2530  }
2531 
2532  // Ignore low decrease in integrality.
2533  if (diff > -1e-2) continue;
2534  total_gain -= diff;
2535 
2536  // We perform the change.
2537  num_pivots++;
2538  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2539 
2540  // This is a bound flip of the entering column.
2541  if (leaving_row == kInvalidRow) {
2542  if (step > 0.0) {
2543  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2545  } else if (step < 0.0) {
2546  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2548  }
2549  continue;
2550  }
2551 
2552  // Perform the pivot.
2553  const ColIndex leaving_col = basis_[leaving_row];
2554  update_row_.ComputeUpdateRow(leaving_row);
2555 
2556  // Note that this will only do work if the norms are computed.
2557  //
2558  // TODO(user): We should probably move all the "update" in a function so
2559  // that all "iterations" function can just reuse the same code. Everything
2560  // that is currently not "cleared" should be updated. If one does not want
2561  // that, then it is easy to call Clear() on the quantities that do not needs
2562  // to be kept in sync with the current basis.
2563  primal_edge_norms_.UpdateBeforeBasisPivot(
2564  entering_col, leaving_col, leaving_row, direction_, &update_row_);
2565  dual_edge_norms_.UpdateBeforeBasisPivot(
2566  entering_col, leaving_row, direction_,
2567  update_row_.GetUnitRowLeftInverse());
2568 
2569  // TODO(user): Rather than maintaining this, it is probably better to
2570  // recompute it in one go after Polish() is done. We don't use the reduced
2571  // costs here as we just assume that the set of candidates does not change.
2572  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2573  &update_row_);
2574 
2575  const Fractional dir = -direction_[leaving_row] * step;
2576  const bool is_degenerate =
2577  (dir == 0.0) ||
2578  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2579  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2580  if (!is_degenerate) {
2581  variable_values_.Set(leaving_col, target_bound);
2582  }
2584  UpdateAndPivot(entering_col, leaving_row, target_bound));
2585  }
2586 
2587  VLOG(1) << "Polish num_pivots: " << num_pivots << " gain:" << total_gain;
2588  return Status::OK();
2589 }
2590 
2591 // Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
2592 // x an n-vector.
2593 //
2594 // x is split in two parts x_B and x_N (B standing for basis).
2595 // In the same way, A is split in A_B (also known as B) and A_N, and
2596 // c is split into c_B and c_N.
2597 //
2598 // The goal is to minimize c_B.x_B + c_N.x_N
2599 // subject to B.x_B + A_N.x_N = 0
2600 // and x_lower <= x <= x_upper.
2601 //
2602 // To minimize c.x, at each iteration a variable from x_N is selected to
2603 // enter the basis, and a variable from x_B is selected to leave the basis.
2604 // To avoid explicit inversion of B, the algorithm solves two sub-systems:
2605 // y.B = c_B and B.d = a (a being the entering column).
2606 Status RevisedSimplex::PrimalMinimize(TimeLimit* time_limit) {
2608  Cleanup update_deterministic_time_on_return(
2609  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2610  num_consecutive_degenerate_iterations_ = 0;
2611  DisplayIterationInfo();
2612  bool refactorize = false;
2613 
2614  // At this point, we are not sure the prices are always up to date, so
2615  // lets always reset them for the first iteration below.
2616  primal_prices_.ForceRecomputation();
2617 
2618  if (phase_ == Phase::FEASIBILITY) {
2619  // Initialize the primal phase-I objective.
2620  // Note that this temporarily erases the problem objective.
2621  objective_.AssignToZero(num_cols_);
2622  variable_values_.UpdatePrimalPhaseICosts(
2623  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_), &objective_);
2624  reduced_costs_.ResetForNewObjective();
2625  }
2626 
2627  while (true) {
2628  // TODO(user): we may loop a bit more than the actual number of iteration.
2629  // fix.
2631  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2632 
2633  // Trigger a refactorization if one of the class we use request it.
2634  if (reduced_costs_.NeedsBasisRefactorization()) refactorize = true;
2635  if (primal_edge_norms_.NeedsBasisRefactorization()) refactorize = true;
2636  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2637 
2638  if (basis_factorization_.IsRefactorized()) {
2639  CorrectErrorsOnVariableValues();
2640  DisplayIterationInfo();
2641 
2642  if (phase_ == Phase::FEASIBILITY) {
2643  // Since the variable values may have been recomputed, we need to
2644  // recompute the primal infeasible variables and update their costs.
2645  if (variable_values_.UpdatePrimalPhaseICosts(
2646  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_),
2647  &objective_)) {
2648  reduced_costs_.ResetForNewObjective();
2649  }
2650  }
2651 
2652  // Computing the objective at each iteration takes time, so we just
2653  // check the limit when the basis is refactorized.
2654  if (phase_ == Phase::OPTIMIZATION &&
2655  ComputeObjectiveValue() < primal_objective_limit_) {
2656  VLOG(1) << "Stopping the primal simplex because"
2657  << " the objective limit " << primal_objective_limit_
2658  << " has been reached.";
2659  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2660  objective_limit_reached_ = true;
2661  return Status::OK();
2662  }
2663  } else if (phase_ == Phase::FEASIBILITY) {
2664  // Note that direction_.non_zeros contains the positions of the basic
2665  // variables whose values were updated during the last iteration.
2666  if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
2667  &objective_)) {
2668  reduced_costs_.ResetForNewObjective();
2669  }
2670  }
2671 
2672  const ColIndex entering_col = primal_prices_.GetBestEnteringColumn();
2673  if (entering_col == kInvalidCol) {
2674  if (reduced_costs_.AreReducedCostsPrecise() &&
2675  basis_factorization_.IsRefactorized()) {
2676  if (phase_ == Phase::FEASIBILITY) {
2677  const Fractional primal_infeasibility =
2678  variable_values_.ComputeMaximumPrimalInfeasibility();
2679  if (primal_infeasibility <
2680  parameters_.primal_feasibility_tolerance()) {
2681  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2682  } else {
2683  VLOG(1) << "Infeasible problem! infeasibility = "
2684  << primal_infeasibility;
2685  problem_status_ = ProblemStatus::PRIMAL_INFEASIBLE;
2686  }
2687  } else {
2688  problem_status_ = ProblemStatus::OPTIMAL;
2689  }
2690  break;
2691  }
2692 
2693  VLOG(1) << "Optimal reached, double checking...";
2694  reduced_costs_.MakeReducedCostsPrecise();
2695  refactorize = true;
2696  continue;
2697  }
2698 
2699  DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
2700 
2701  // Solve the system B.d = a with a the entering column.
2702  ComputeDirection(entering_col);
2703 
2704  // This might trigger a recomputation on the next iteration, but we
2705  // finish this one even if the price is imprecise.
2706  primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col, direction_);
2707  const Fractional reduced_cost =
2708  reduced_costs_.TestEnteringReducedCostPrecision(entering_col,
2709  direction_);
2710 
2711  // The test might have changed the reduced cost of the entering_col.
2712  // If it is no longer a valid entering candidate, we loop.
2713  primal_prices_.RecomputePriceAt(entering_col);
2714  if (!reduced_costs_.IsValidPrimalEnteringCandidate(entering_col)) {
2715  reduced_costs_.MakeReducedCostsPrecise();
2716  VLOG(1) << "Skipping col #" << entering_col
2717  << " whose reduced cost is no longer valid under precise reduced "
2718  "cost: "
2719  << reduced_cost;
2720  continue;
2721  }
2722 
2723  // This test takes place after the check for optimality/feasibility because
2724  // when running with 0 iterations, we still want to report
2725  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2726  // case at the beginning of the algorithm.
2727  AdvanceDeterministicTime(time_limit);
2728  if (num_iterations_ == parameters_.max_number_of_iterations() ||
2729  time_limit->LimitReached()) {
2730  break;
2731  }
2732 
2733  Fractional step_length;
2734  RowIndex leaving_row;
2736  if (phase_ == Phase::FEASIBILITY) {
2737  PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
2738  &refactorize, &leaving_row,
2739  &step_length, &target_bound);
2740  } else {
2742  ChooseLeavingVariableRow(entering_col, reduced_cost, &refactorize,
2743  &leaving_row, &step_length, &target_bound));
2744  }
2745  if (refactorize) continue;
2746 
2747  if (step_length == kInfinity || step_length == -kInfinity) {
2748  if (!basis_factorization_.IsRefactorized() ||
2749  !reduced_costs_.AreReducedCostsPrecise()) {
2750  VLOG(1) << "Infinite step length, double checking...";
2751  reduced_costs_.MakeReducedCostsPrecise();
2752  continue;
2753  }
2754  if (phase_ == Phase::FEASIBILITY) {
2755  // This shouldn't happen by construction.
2756  VLOG(1) << "Unbounded feasibility problem !?";
2757  problem_status_ = ProblemStatus::ABNORMAL;
2758  } else {
2759  VLOG(1) << "Unbounded problem.";
2760  problem_status_ = ProblemStatus::PRIMAL_UNBOUNDED;
2761  solution_primal_ray_.AssignToZero(num_cols_);
2762  for (RowIndex row(0); row < num_rows_; ++row) {
2763  const ColIndex col = basis_[row];
2764  solution_primal_ray_[col] = -direction_[row];
2765  }
2766  solution_primal_ray_[entering_col] = 1.0;
2767  if (step_length == -kInfinity) {
2768  ChangeSign(&solution_primal_ray_);
2769  }
2770  }
2771  break;
2772  }
2773 
2774  Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
2775  if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
2776  // For phase-I we currently always set the leaving variable to its exact
2777  // bound even if by doing so we may take a small step in the wrong
2778  // direction and may increase the overall infeasibility.
2779  //
2780  // TODO(user): Investigate alternatives even if this seems to work well in
2781  // practice. Note that the final returned solution will have the property
2782  // that all non-basic variables are at their exact bound, so it is nice
2783  // that we do not report ProblemStatus::PRIMAL_FEASIBLE if a solution with
2784  // this property cannot be found.
2785  step = ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2786  }
2787 
2788  // Store the leaving_col before basis_ change.
2789  const ColIndex leaving_col =
2790  (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
2791 
2792  // An iteration is called 'degenerate' if the leaving variable is already
2793  // primal-infeasible and we make it even more infeasible or if we do a zero
2794  // step.
2795  bool is_degenerate = false;
2796  if (leaving_row != kInvalidRow) {
2797  Fractional dir = -direction_[leaving_row] * step;
2798  is_degenerate =
2799  (dir == 0.0) ||
2800  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2801  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2802 
2803  // If the iteration is not degenerate, the leaving variable should go to
2804  // its exact target bound (it is how the step is computed).
2805  if (!is_degenerate) {
2806  DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
2807  target_bound));
2808  }
2809  }
2810 
2811  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2812  if (leaving_row != kInvalidRow) {
2813  // Important: the norm must be updated before the reduced_cost.
2814  primal_edge_norms_.UpdateBeforeBasisPivot(
2815  entering_col, basis_[leaving_row], leaving_row, direction_,
2816  &update_row_);
2817  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
2818  direction_, &update_row_);
2819  primal_prices_.UpdateBeforeBasisPivot(entering_col, &update_row_);
2820  if (!is_degenerate) {
2821  // On a non-degenerate iteration, the leaving variable should be at its
2822  // exact bound. This corrects an eventual small numerical error since
2823  // 'value + direction * step' where step is
2824  // '(target_bound - value) / direction'
2825  // may be slighlty different from target_bound.
2826  variable_values_.Set(leaving_col, target_bound);
2827  }
2829  UpdateAndPivot(entering_col, leaving_row, target_bound));
2831  if (is_degenerate) {
2832  timer.AlsoUpdate(&iteration_stats_.degenerate);
2833  } else {
2834  timer.AlsoUpdate(&iteration_stats_.normal);
2835  }
2836  });
2837  } else {
2838  // Bound flip. This makes sure that the flipping variable is at its bound
2839  // and has the correct status.
2841  variables_info_.GetTypeRow()[entering_col]);
2842  if (step > 0.0) {
2843  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2845  } else if (step < 0.0) {
2846  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2848  }
2849  primal_prices_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
2850  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
2851  }
2852 
2853  if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
2854  // Set the leaving variable to its exact bound.
2855  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
2856 
2857  // Change the objective value of the leaving variable to zero.
2858  reduced_costs_.SetNonBasicVariableCostToZero(leaving_col,
2859  &objective_[leaving_col]);
2860  primal_prices_.RecomputePriceAt(leaving_col);
2861  }
2862 
2863  // Stats about consecutive degenerate iterations.
2864  if (step_length == 0.0) {
2865  num_consecutive_degenerate_iterations_++;
2866  } else {
2867  if (num_consecutive_degenerate_iterations_ > 0) {
2868  iteration_stats_.degenerate_run_size.Add(
2869  num_consecutive_degenerate_iterations_);
2870  num_consecutive_degenerate_iterations_ = 0;
2871  }
2872  }
2873  ++num_iterations_;
2874  }
2875  if (num_consecutive_degenerate_iterations_ > 0) {
2876  iteration_stats_.degenerate_run_size.Add(
2877  num_consecutive_degenerate_iterations_);
2878  }
2879  return Status::OK();
2880 }
2881 
2882 // TODO(user): Two other approaches for the phase I described in Koberstein's
2883 // PhD thesis seem worth trying at some point:
2884 // - The subproblem approach, which enables one to use a normal phase II dual,
2885 // but requires an efficient bound-flipping ratio test since the new problem
2886 // has all its variables boxed. This one is implemented now, but require
2887 // a bit more tunning.
2888 // - Pan's method, which is really fast but have no theoretical guarantee of
2889 // terminating and thus needs to use one of the other methods as a fallback if
2890 // it fails to make progress.
2891 //
2892 // Note that the returned status applies to the primal problem!
2893 Status RevisedSimplex::DualMinimize(bool feasibility_phase,
2894  TimeLimit* time_limit) {
2895  Cleanup update_deterministic_time_on_return(
2896  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2897  num_consecutive_degenerate_iterations_ = 0;
2898  bool refactorize = false;
2899 
2900  bound_flip_candidates_.clear();
2901 
2902  // Leaving variable.
2903  RowIndex leaving_row;
2904  Fractional cost_variation;
2906 
2907  // Entering variable.
2908  ColIndex entering_col;
2909 
2910  while (true) {
2911  // TODO(user): we may loop a bit more than the actual number of iteration.
2912  // fix.
2914  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2915 
2916  // Trigger a refactorization if one of the class we use request it.
2917  const bool old_refactorize_value = refactorize;
2918  if (reduced_costs_.NeedsBasisRefactorization()) refactorize = true;
2919  if (dual_edge_norms_.NeedsBasisRefactorization()) refactorize = true;
2920  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2921 
2922  // If the basis is refactorized, we recompute all the values in order to
2923  // have a good precision.
2924  if (basis_factorization_.IsRefactorized()) {
2925  // We do not want to recompute the reduced costs too often, this is
2926  // because that may break the overall direction taken by the last steps
2927  // and may lead to less improvement on degenerate problems.
2928  //
2929  // For now, we just recompute them if refactorize was set during the
2930  // loop and not because of normal refactorization.
2931  //
2932  // During phase-I, we do want the reduced costs to be as precise as
2933  // possible. TODO(user): Investigate why and fix the TODO in
2934  // PermuteBasis().
2935  //
2936  // Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
2937  // do recompute them, it is better to do that first.
2938  if (feasibility_phase || old_refactorize_value) {
2939  reduced_costs_.MakeReducedCostsPrecise();
2940  }
2941 
2942  // TODO(user): Make RecomputeBasicVariableValues() do nothing
2943  // if it was already recomputed on a refactorized basis. This is the
2944  // same behavior as MakeReducedCostsPrecise().
2945  //
2946  // TODO(user): Do not recompute the variable values each time we
2947  // refactorize the matrix, like for the reduced costs? That may lead to
2948  // a worse behavior than keeping the "imprecise" version and only
2949  // recomputing it when its precision is above a threshold.
2950  if (!feasibility_phase) {
2951  MakeBoxedVariableDualFeasible(
2952  variables_info_.GetNonBasicBoxedVariables(),
2953  /*update_basic_values=*/false);
2954  variable_values_.RecomputeBasicVariableValues();
2955  variable_values_.RecomputeDualPrices();
2956 
2957  // Computing the objective at each iteration takes time, so we just
2958  // check the limit when the basis is refactorized.
2959  //
2960  // Hack: We need phase_ here and not the local feasibility_phase
2961  // variable because this must not be checked for the dual phase I algo
2962  // that use the same code as the dual phase II (i.e. the local
2963  // feasibility_phase will be false).
2964  if (phase_ == Phase::OPTIMIZATION &&
2965  dual_objective_limit_ != kInfinity &&
2966  ComputeObjectiveValue() > dual_objective_limit_) {
2967  VLOG(1) << "Stopping the dual simplex because"
2968  << " the objective limit " << dual_objective_limit_
2969  << " has been reached.";
2970  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
2971  objective_limit_reached_ = true;
2972  return Status::OK();
2973  }
2974  }
2975 
2976  DisplayIterationInfo();
2977  } else {
2978  // Updates from the previous iteration that can be skipped if we
2979  // recompute everything (see other case above).
2980  if (!feasibility_phase) {
2981  // Make sure the boxed variables are dual-feasible before choosing the
2982  // leaving variable row.
2983  MakeBoxedVariableDualFeasible(bound_flip_candidates_,
2984  /*update_basic_values=*/true);
2985  bound_flip_candidates_.clear();
2986 
2987  // The direction_.non_zeros contains the positions for which the basic
2988  // variable value was changed during the previous iterations.
2989  variable_values_.UpdateDualPrices(direction_.non_zeros);
2990  }
2991  }
2992 
2993  if (feasibility_phase) {
2994  GLOP_RETURN_IF_ERROR(DualPhaseIChooseLeavingVariableRow(
2995  &leaving_row, &cost_variation, &target_bound));
2996  } else {
2997  GLOP_RETURN_IF_ERROR(DualChooseLeavingVariableRow(
2998  &leaving_row, &cost_variation, &target_bound));
2999  }
3000  if (leaving_row == kInvalidRow) {
3001  // TODO(user): integrate this with the main "re-optimization" loop.
3002  // Also distinguish cost perturbation and shifts?
3003  if (!basis_factorization_.IsRefactorized() ||
3004  reduced_costs_.HasCostShift()) {
3005  VLOG(1) << "Optimal reached, double checking.";
3006  reduced_costs_.ClearAndRemoveCostShifts();
3007  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3008  refactorize = true;
3009  continue;
3010  }
3011  if (feasibility_phase) {
3012  // Note that since the basis is refactorized, the variable values
3013  // will be recomputed at the beginning of the second phase. The boxed
3014  // variable values will also be corrected by
3015  // MakeBoxedVariableDualFeasible().
3016  if (num_dual_infeasible_positions_ == 0) {
3017  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
3018  } else {
3019  VLOG(1) << "DUAL infeasible in dual phase I.";
3020  problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
3021  }
3022  } else {
3023  problem_status_ = ProblemStatus::OPTIMAL;
3024  }
3025  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3026  return Status::OK();
3027  }
3028 
3029  update_row_.ComputeUpdateRow(leaving_row);
3030  if (feasibility_phase) {
3032  reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3033  &entering_col));
3034  } else {
3036  reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3037  &bound_flip_candidates_, &entering_col));
3038  }
3039 
3040  // No entering_col: dual unbounded (i.e. primal infeasible).
3041  if (entering_col == kInvalidCol) {
3042  if (!reduced_costs_.AreReducedCostsPrecise()) {
3043  VLOG(1) << "No entering column. Double checking...";
3044  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3045  refactorize = true;
3046  continue;
3047  }
3048  DCHECK(basis_factorization_.IsRefactorized());
3049  if (feasibility_phase) {
3050  // This shouldn't happen by construction.
3051  VLOG(1) << "Unbounded dual feasibility problem !?";
3052  problem_status_ = ProblemStatus::ABNORMAL;
3053  } else {
3054  problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
3055  solution_dual_ray_ =
3056  Transpose(update_row_.GetUnitRowLeftInverse().values);
3057  update_row_.RecomputeFullUpdateRow(leaving_row);
3058  solution_dual_ray_row_combination_.AssignToZero(num_cols_);
3059  for (const ColIndex col : update_row_.GetNonZeroPositions()) {
3060  solution_dual_ray_row_combination_[col] =
3061  update_row_.GetCoefficient(col);
3062  }
3063  if (cost_variation < 0) {
3064  ChangeSign(&solution_dual_ray_);
3065  ChangeSign(&solution_dual_ray_row_combination_);
3066  }
3067  }
3068  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3069  return Status::OK();
3070  }
3071 
3072  // If the coefficient is too small, we recompute the reduced costs if not
3073  // already done. This is an extra heuristic to avoid computing the direction
3074  // If the pivot is small. But the real recomputation step is just below.
3075  const Fractional entering_coeff = update_row_.GetCoefficient(entering_col);
3076  if (std::abs(entering_coeff) < parameters_.dual_small_pivot_threshold() &&
3077  !reduced_costs_.AreReducedCostsPrecise()) {
3078  VLOG(1) << "Trying not to pivot by " << entering_coeff;
3079  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3080  refactorize = true;
3081  continue;
3082  }
3083 
3084  ComputeDirection(entering_col);
3085 
3086  // If the pivot is small compared to others in the direction_ vector we try
3087  // to recompute everything. If we cannot, then note that
3088  // DualChooseEnteringColumn() should guaranteed that the pivot is not too
3089  // small when everything has already been recomputed.
3090  if (std::abs(direction_[leaving_row]) <
3091  parameters_.small_pivot_threshold() * direction_infinity_norm_) {
3092  if (!reduced_costs_.AreReducedCostsPrecise()) {
3093  VLOG(1) << "Trying not pivot by " << entering_coeff << " ("
3094  << direction_[leaving_row]
3095  << ") because the direction has a norm of "
3096  << direction_infinity_norm_;
3097  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3098  refactorize = true;
3099  continue;
3100  }
3101  }
3102 
3103  // This test takes place after the check for optimality/feasibility because
3104  // when running with 0 iterations, we still want to report
3105  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
3106  // case at the beginning of the algorithm.
3107  AdvanceDeterministicTime(time_limit);
3108  if (num_iterations_ == parameters_.max_number_of_iterations() ||
3109  time_limit->LimitReached()) {
3110  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3111  return Status::OK();
3112  }
3113 
3114  // Before we update the reduced costs, if its sign is already dual
3115  // infeasible and the update direction will make it worse we make sure the
3116  // reduced cost is 0.0 so UpdateReducedCosts() will not take a step that
3117  // goes in the wrong direction (a few experiments seems to indicate that
3118  // this is not a good idea). See comment at the top of UpdateReducedCosts().
3119  //
3120  // Note that ShiftCostIfNeeded() actually shifts the cost a bit more in
3121  // order to do a non-zero step. This helps on degenerate problems. Like the
3122  // pertubation, we will remove all these shifts at the end.
3123  const bool increasing_rc_is_needed =
3124  (cost_variation > 0.0) == (entering_coeff > 0.0);
3125  reduced_costs_.ShiftCostIfNeeded(increasing_rc_is_needed, entering_col);
3126 
3128  if (reduced_costs_.StepIsDualDegenerate(increasing_rc_is_needed,
3129  entering_col)) {
3130  timer.AlsoUpdate(&iteration_stats_.degenerate);
3131  } else {
3132  timer.AlsoUpdate(&iteration_stats_.normal);
3133  }
3134  });
3135 
3136  // Update basis. Note that direction_ is already computed.
3137  //
3138  // TODO(user): this is pretty much the same in the primal or dual code.
3139  // We just need to know to what bound the leaving variable will be set to.
3140  // Factorize more common code?
3141  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
3142  &update_row_);
3143  dual_edge_norms_.UpdateBeforeBasisPivot(
3144  entering_col, leaving_row, direction_,
3145  update_row_.GetUnitRowLeftInverse());
3146 
3147  // During phase I, we do not need the basic variable values at all.
3148  // Important: The norm should be updated before that.
3149  Fractional primal_step = 0.0;
3150  if (feasibility_phase) {
3151  DualPhaseIUpdatePrice(leaving_row, entering_col);
3152  } else {
3153  primal_step =
3154  ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
3155  variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
3156  }
3157 
3158  // It is important to do the actual pivot after the update above!
3159  const ColIndex leaving_col = basis_[leaving_row];
3161  UpdateAndPivot(entering_col, leaving_row, target_bound));
3162 
3163  // This makes sure the leaving variable is at its exact bound. Tests
3164  // indicate that this makes everything more stable. Note also that during
3165  // the feasibility phase, the variable values are not used, but that the
3166  // correct non-basic variable value are needed at the end.
3167  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3168 
3169  // This is slow, but otherwise we have a really bad precision on the
3170  // variable values ...
3171  if (std::abs(primal_step) * parameters_.primal_feasibility_tolerance() >
3172  1.0) {
3173  refactorize = true;
3174  }
3175  ++num_iterations_;
3176  }
3177  return Status::OK();
3178 }
3179 
3180 Status RevisedSimplex::PrimalPush(TimeLimit* time_limit) {
3182  Cleanup update_deterministic_time_on_return(
3183  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
3184 
3185  DisplayIterationInfo();
3186  bool refactorize = false;
3187 
3188  // We clear all the quantities that we don't update so they will be recomputed
3189  // later if needed.
3190  primal_edge_norms_.Clear();
3191  dual_edge_norms_.Clear();
3192  update_row_.Invalidate();
3193  reduced_costs_.ClearAndRemoveCostShifts();
3194 
3195  std::vector<ColIndex> super_basic_cols;
3196  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3197  if (variables_info_.GetStatusRow()[col] == VariableStatus::FREE &&
3198  variable_values_.Get(col) != 0) {
3199  super_basic_cols.push_back(col);
3200  }
3201  }
3202 
3203  while (!super_basic_cols.empty()) {
3204  AdvanceDeterministicTime(time_limit);
3205  if (time_limit->LimitReached()) break;
3206 
3208  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
3209  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
3210  if (basis_factorization_.IsRefactorized()) {
3211  CorrectErrorsOnVariableValues();
3212  DisplayIterationInfo();
3213  }
3214 
3215  // TODO(user): Select at random like in Polish().
3216  ColIndex entering_col = super_basic_cols.back();
3217 
3218  DCHECK(variables_info_.GetCanDecreaseBitRow()[entering_col]);
3219  DCHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
3220 
3221  // Decide which direction to send the entering column.
3222  // UNCONSTRAINED variables go towards zero. Other variables go towards their
3223  // closest bound. We assume that we're at an optimal solution, so all FREE
3224  // variables have approximately zero reduced cost, which means that the
3225  // objective value won't change from moving this column into the basis.
3226  // TODO(user): As an improvement for variables with two bounds, try both
3227  // and pick one that doesn't require a basis change (if possible), otherwise
3228  // pick the closer bound.
3229  Fractional fake_rc;
3230  const Fractional entering_value = variable_values_.Get(entering_col);
3231  if (variables_info_.GetTypeRow()[entering_col] ==
3233  if (entering_value > 0) {
3234  fake_rc = 1.0;
3235  } else {
3236  fake_rc = -1.0;
3237  }
3238  } else {
3239  const Fractional diff_ub =
3240  variables_info_.GetVariableUpperBounds()[entering_col] -
3241  entering_value;
3242  const Fractional diff_lb =
3243  entering_value -
3244  variables_info_.GetVariableLowerBounds()[entering_col];
3245  if (diff_lb <= diff_ub) {
3246  fake_rc = 1.0;
3247  } else {
3248  fake_rc = -1.0;
3249  }
3250  }
3251 
3252  // Solve the system B.d = a with a the entering column.
3253  ComputeDirection(entering_col);
3254 
3255  Fractional step_length;
3256  RowIndex leaving_row;
3258 
3259  GLOP_RETURN_IF_ERROR(ChooseLeavingVariableRow(entering_col, fake_rc,
3260  &refactorize, &leaving_row,
3261  &step_length, &target_bound));
3262 
3263  if (refactorize) continue;
3264 
3265  // At this point, we know the iteration will finish or stop with an error.
3266  super_basic_cols.pop_back();
3267 
3268  if (step_length == kInfinity || step_length == -kInfinity) {
3269  if (variables_info_.GetTypeRow()[entering_col] ==
3271  step_length = std::fabs(entering_value);
3272  } else {
3273  VLOG(1) << "Infinite step for bounded variable ?!";
3274  problem_status_ = ProblemStatus::ABNORMAL;
3275  break;
3276  }
3277  }
3278 
3279  const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
3280 
3281  // Store the leaving_col before basis_ change.
3282  const ColIndex leaving_col =
3283  (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
3284 
3285  // An iteration is called 'degenerate' if the leaving variable is already
3286  // primal-infeasible and we make it even more infeasible or if we do a zero
3287  // step.
3288  // TODO(user): Test setting the step size to zero for degenerate steps.
3289  // We don't need to force a positive step because each super-basic variable
3290  // is pivoted in exactly once.
3291  bool is_degenerate = false;
3292  if (leaving_row != kInvalidRow) {
3293  Fractional dir = -direction_[leaving_row] * step;
3294  is_degenerate =
3295  (dir == 0.0) ||
3296  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
3297  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
3298 
3299  // If the iteration is not degenerate, the leaving variable should go to
3300  // its exact target bound (it is how the step is computed).
3301  if (!is_degenerate) {
3302  DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
3303  target_bound));
3304  }
3305  }
3306 
3307  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
3308  if (leaving_row != kInvalidRow) {
3309  if (!is_degenerate) {
3310  // On a non-degenerate iteration, the leaving variable should be at its
3311  // exact bound. This corrects an eventual small numerical error since
3312  // 'value + direction * step' where step is
3313  // '(target_bound - value) / direction'
3314  // may be slighlty different from target_bound.
3315  variable_values_.Set(leaving_col, target_bound);
3316  }
3318  UpdateAndPivot(entering_col, leaving_row, target_bound));
3320  if (is_degenerate) {
3321  timer.AlsoUpdate(&iteration_stats_.degenerate);
3322  } else {
3323  timer.AlsoUpdate(&iteration_stats_.normal);
3324  }
3325  });
3326  } else {
3327  // Snap the super-basic variable to its bound. Note that
3328  // variable_values_.UpdateOnPivoting() should already be close to that but
3329  // here we make sure it is exact and remove any small numerical errors.
3330  if (variables_info_.GetTypeRow()[entering_col] ==
3332  variable_values_.Set(entering_col, 0.0);
3333  } else if (step > 0.0) {
3334  SetNonBasicVariableStatusAndDeriveValue(entering_col,
3336  } else if (step < 0.0) {
3337  SetNonBasicVariableStatusAndDeriveValue(entering_col,
3339  }
3340  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
3341  }
3342 
3343  ++num_iterations_;
3344  }
3345 
3346  if (!super_basic_cols.empty() > 0 &&
3347  (parameters_.log_search_progress() || VLOG_IS_ON(1))) {
3348  LOG(INFO) << "Push terminated early with " << super_basic_cols.size()
3349  << " super-basic variables remaining.";
3350  }
3351 
3352  // TODO(user): What status should be returned if the time limit is hit?
3353  // If the optimization phase finished, then OPTIMAL is technically correct
3354  // but also misleading.
3355 
3356  return Status::OK();
3357 }
3358 
3359 ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
3361  return first_slack_col_ + RowToColIndex(row);
3362 }
3363 
3365  std::string result;
3366  result.append(iteration_stats_.StatString());
3367  result.append(ratio_test_stats_.StatString());
3368  result.append(entering_variable_.StatString());
3369  result.append(dual_prices_.StatString());
3370  result.append(reduced_costs_.StatString());
3371  result.append(variable_values_.StatString());
3372  result.append(primal_edge_norms_.StatString());
3373  result.append(dual_edge_norms_.StatString());
3374  result.append(update_row_.StatString());
3375  result.append(basis_factorization_.StatString());
3376  result.append(function_stats_.StatString());
3377  return result;
3378 }
3379 
3380 void RevisedSimplex::DisplayAllStats() {
3381  if (absl::GetFlag(FLAGS_simplex_display_stats)) {
3382  absl::FPrintF(stderr, "%s", StatString());
3383  absl::FPrintF(stderr, "%s", GetPrettySolverStats());
3384  }
3385 }
3386 
3387 Fractional RevisedSimplex::ComputeObjectiveValue() const {
3388  SCOPED_TIME_STAT(&function_stats_);
3389  return PreciseScalarProduct(objective_,
3390  Transpose(variable_values_.GetDenseRow()));
3391 }
3392 
3393 Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
3394  SCOPED_TIME_STAT(&function_stats_);
3395  const Fractional sum = PreciseScalarProduct(
3396  objective_, Transpose(variable_values_.GetDenseRow()));
3397  return objective_scaling_factor_ * (sum + objective_offset_);
3398 }
3399 
3401  SCOPED_TIME_STAT(&function_stats_);
3402  deterministic_random_.seed(parameters.random_seed());
3403 
3404  initial_parameters_ = parameters;
3405  parameters_ = parameters;
3406  PropagateParameters();
3407 }
3408 
3409 void RevisedSimplex::PropagateParameters() {
3410  SCOPED_TIME_STAT(&function_stats_);
3411  basis_factorization_.SetParameters(parameters_);
3412  entering_variable_.SetParameters(parameters_);
3413  reduced_costs_.SetParameters(parameters_);
3414  dual_edge_norms_.SetParameters(parameters_);
3415  primal_edge_norms_.SetParameters(parameters_);
3416  update_row_.SetParameters(parameters_);
3417 }
3418 
3419 void RevisedSimplex::DisplayIterationInfo() {
3420  const bool log = parameters_.log_search_progress() || VLOG_IS_ON(1);
3421  if (!log) return;
3422 
3423  switch (phase_) {
3424  case Phase::FEASIBILITY: {
3425  const int64_t iter = num_iterations_;
3426  std::string name;
3427  Fractional objective;
3428  if (parameters_.use_dual_simplex()) {
3429  if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
3430  objective = reduced_costs_.ComputeSumOfDualInfeasibilities();
3431  } else {
3432  // The internal objective of the transformed problem is the negation
3433  // of the sum of the dual infeasibility of the original problem.
3434  objective = -PreciseScalarProduct(
3435  objective_, Transpose(variable_values_.GetDenseRow()));
3436  }
3437  name = "sum_dual_infeasibilities";
3438  } else {
3439  objective = variable_values_.ComputeSumOfPrimalInfeasibilities();
3440  name = "sum_primal_infeasibilities";
3441  }
3442 
3443  LOG(INFO) << "Feasibility phase, iteration # " << iter << ", " << name
3444  << " = " << absl::StrFormat("%.15E", objective);
3445  break;
3446  }
3447  case Phase::OPTIMIZATION: {
3448  const int64_t iter = num_iterations_ - num_feasibility_iterations_;
3449  // Note that in the dual phase II, ComputeObjectiveValue() is also
3450  // computing the dual objective even if it uses the variable values.
3451  // This is because if we modify the bounds to make the problem
3452  // primal-feasible, we are at the optimal and hence the two objectives
3453  // are the same.
3454  const Fractional objective = ComputeInitialProblemObjectiveValue();
3455  LOG(INFO) << "Optimization phase, iteration # " << iter
3456  << ", objective = " << absl::StrFormat("%.15E", objective);
3457  break;
3458  }
3459  case Phase::PUSH: {
3460  const int64_t iter = num_iterations_ - num_feasibility_iterations_ -
3461  num_optimization_iterations_;
3462  LOG(INFO) << "Push phase, iteration # " << iter
3463  << ", remaining_variables_to_push = "
3464  << ComputeNumberOfSuperBasicVariables();
3465  }
3466  }
3467 }
3468 
3469 void RevisedSimplex::DisplayErrors() {
3470  if (parameters_.log_search_progress() || VLOG_IS_ON(1)) {
3471  LOG(INFO) << "Primal infeasibility (bounds) = "
3472  << variable_values_.ComputeMaximumPrimalInfeasibility();
3473  LOG(INFO) << "Primal residual |A.x - b| = "
3474  << variable_values_.ComputeMaximumPrimalResidual();
3475  LOG(INFO) << "Dual infeasibility (reduced costs) = "
3476  << reduced_costs_.ComputeMaximumDualInfeasibility();
3477  LOG(INFO) << "Dual residual |c_B - y.B| = "
3478  << reduced_costs_.ComputeMaximumDualResidual();
3479  }
3480 }
3481 
3482 namespace {
3483 
3484 std::string StringifyMonomialWithFlags(const Fractional a,
3485  const std::string& x) {
3486  return StringifyMonomial(
3487  a, x, absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3488 }
3489 
3490 // Returns a string representing the rational approximation of x or a decimal
3491 // approximation of x according to
3492 // absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions).
3493 std::string StringifyWithFlags(const Fractional x) {
3494  return Stringify(x,
3495  absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3496 }
3497 
3498 } // namespace
3499 
3500 std::string RevisedSimplex::SimpleVariableInfo(ColIndex col) const {
3501  std::string output;
3502  VariableType variable_type = variables_info_.GetTypeRow()[col];
3503  VariableStatus variable_status = variables_info_.GetStatusRow()[col];
3504  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3505  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3506  absl::StrAppendFormat(&output, "%d (%s) = %s, %s, %s, [%s,%s]", col.value(),
3507  variable_name_[col],
3508  StringifyWithFlags(variable_values_.Get(col)),
3509  GetVariableStatusString(variable_status),
3510  GetVariableTypeString(variable_type),
3511  StringifyWithFlags(lower_bounds[col]),
3512  StringifyWithFlags(upper_bounds[col]));
3513  return output;
3514 }
3515 
3516 void RevisedSimplex::DisplayInfoOnVariables() const {
3517  if (VLOG_IS_ON(3)) {
3518  for (ColIndex col(0); col < num_cols_; ++col) {
3519  const Fractional variable_value = variable_values_.Get(col);
3520  const Fractional objective_coefficient = objective_[col];
3521  const Fractional objective_contribution =
3522  objective_coefficient * variable_value;
3523  VLOG(3) << SimpleVariableInfo(col) << ". " << variable_name_[col] << " = "
3524  << StringifyWithFlags(variable_value) << " * "
3525  << StringifyWithFlags(objective_coefficient)
3526  << "(obj) = " << StringifyWithFlags(objective_contribution);
3527  }
3528  VLOG(3) << "------";
3529  }
3530 }
3531 
3532 void RevisedSimplex::DisplayVariableBounds() {
3533  if (VLOG_IS_ON(3)) {
3534  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
3535  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3536  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3537  for (ColIndex col(0); col < num_cols_; ++col) {
3538  switch (variable_type[col]) {
3540  break;
3542  VLOG(3) << variable_name_[col]
3543  << " >= " << StringifyWithFlags(lower_bounds[col]) << ";";
3544  break;
3546  VLOG(3) << variable_name_[col]
3547  << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3548  break;
3550  VLOG(3) << StringifyWithFlags(lower_bounds[col])
3551  << " <= " << variable_name_[col]
3552  << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3553  break;
3555  VLOG(3) << variable_name_[col] << " = "
3556  << StringifyWithFlags(lower_bounds[col]) << ";";
3557  break;
3558  default: // This should never happen.
3559  LOG(DFATAL) << "Column " << col << " has no meaningful status.";
3560  break;
3561  }
3562  }
3563  }
3564 }
3565 
3567  const DenseRow* column_scales) {
3568  absl::StrongVector<RowIndex, SparseRow> dictionary(num_rows_.value());
3569  for (ColIndex col(0); col < num_cols_; ++col) {
3570  ComputeDirection(col);
3571  for (const auto e : direction_) {
3572  if (column_scales == nullptr) {
3573  dictionary[e.row()].SetCoefficient(col, e.coefficient());
3574  continue;
3575  }
3576  const Fractional numerator =
3577  col < column_scales->size() ? (*column_scales)[col] : 1.0;
3578  const Fractional denominator = GetBasis(e.row()) < column_scales->size()
3579  ? (*column_scales)[GetBasis(e.row())]
3580  : 1.0;
3581  dictionary[e.row()].SetCoefficient(
3582  col, direction_[e.row()] * (numerator / denominator));
3583  }
3584  }
3585  return dictionary;
3586 }
3587 
3589  const LinearProgram& linear_program, const BasisState& state) {
3590  LoadStateForNextSolve(state);
3591  Status status = Initialize(linear_program);
3592  if (status.ok()) {
3593  variable_values_.RecomputeBasicVariableValues();
3594  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
3595  }
3596 }
3597 
3598 void RevisedSimplex::DisplayRevisedSimplexDebugInfo() {
3599  if (VLOG_IS_ON(3)) {
3600  // This function has a complexity in O(num_non_zeros_in_matrix).
3601  DisplayInfoOnVariables();
3602 
3603  std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue());
3604  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
3605  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3606  absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col],
3607  variable_name_[col]));
3608  }
3609  VLOG(3) << output << ";";
3610 
3611  const RevisedSimplexDictionary dictionary(nullptr, this);
3612  RowIndex r(0);
3613  for (const SparseRow& row : dictionary) {
3614  output.clear();
3615  ColIndex basic_col = basis_[r];
3616  absl::StrAppend(&output, variable_name_[basic_col], " = ",
3617  StringifyWithFlags(variable_values_.Get(basic_col)));
3618  for (const SparseRowEntry e : row) {
3619  if (e.col() != basic_col) {
3620  absl::StrAppend(&output,
3621  StringifyMonomialWithFlags(e.coefficient(),
3622  variable_name_[e.col()]));
3623  }
3624  }
3625  VLOG(3) << output << ";";
3626  }
3627  VLOG(3) << "------";
3628  DisplayVariableBounds();
3629  ++r;
3630  }
3631 }
3632 
3633 void RevisedSimplex::DisplayProblem() const {
3634  // This function has a complexity in O(num_rows * num_cols *
3635  // num_non_zeros_in_row).
3636  if (VLOG_IS_ON(3)) {
3637  DisplayInfoOnVariables();
3638  std::string output = "min: ";
3639  bool has_objective = false;
3640  for (ColIndex col(0); col < num_cols_; ++col) {
3641  const Fractional coeff = objective_[col];
3642  has_objective |= (coeff != 0.0);
3643  absl::StrAppend(&output,
3644  StringifyMonomialWithFlags(coeff, variable_name_[col]));
3645  }
3646  if (!has_objective) {
3647  absl::StrAppend(&output, " 0");
3648  }
3649  VLOG(3) << output << ";";
3650  for (RowIndex row(0); row < num_rows_; ++row) {
3651  output = "";
3652  for (ColIndex col(0); col < num_cols_; ++col) {
3653  absl::StrAppend(&output,
3654  StringifyMonomialWithFlags(
3655  compact_matrix_.column(col).LookUpCoefficient(row),
3656  variable_name_[col]));
3657  }
3658  VLOG(3) << output << " = 0;";
3659  }
3660  VLOG(3) << "------";
3661  }
3662 }
3663 
3664 void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) {
3665  DCHECK(time_limit != nullptr);
3666  const double current_deterministic_time = DeterministicTime();
3667  const double deterministic_time_delta =
3668  current_deterministic_time - last_deterministic_time_update_;
3669  time_limit->AdvanceDeterministicTime(deterministic_time_delta);
3670  last_deterministic_time_update_ = current_deterministic_time;
3671 }
3672 
3673 #undef DCHECK_COL_BOUNDS
3674 #undef DCHECK_ROW_BOUNDS
3675 
3676 } // namespace glop
3677 } // namespace operations_research
Index ColToIntIndex(ColIndex col)
Definition: lp_types.h:55
static constexpr InitialBasisHeuristic NONE
Fractional InfinityNorm(const DenseColumn &v)
const BasisFactorization & GetBasisFactorization() const
void Set(ColIndex col, Fractional value)
const DenseRow & Transpose(const DenseColumn &col)
#define CHECK(condition)
Definition: base/logging.h:491
void UpdateDualPrices(const std::vector< RowIndex > &row)
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
void InitializeFromBasisState(ColIndex first_slack, ColIndex num_new_cols, const BasisState &state)
Fractional ratio
std::string StatString() const
Definition: stats.cc:71
StrictITIVector< RowIndex, ColIndex > RowToColMapping
Definition: lp_types.h:346
const DenseBitRow & GetIsBasicBitRow() const
const bool DEBUG_MODE
Definition: macros.h:24
int64_t min
Definition: alldiff_cst.cc:139
int ChangeUnusedBasicVariablesToFree(const RowToColMapping &basis)
ABSL_MUST_USE_RESULT Status ComputeFactorization(const CompactSparseMatrixView &compact_matrix)
Fractional coeff_magnitude
void LoadStateForNextSolve(const BasisState &state)
ModelSharedTimeLimit * time_limit
Fractional ComputeMaximumDualInfeasibilityOnNonBoxedVariables()
ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false, "Display numbers as fractions.")
void ShiftCostIfNeeded(bool increasing_rc_is_needed, ColIndex col)
const DenseBitRow & GetCanDecreaseBitRow() const
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
void PopulateFromSparseMatrixAndAddSlacks(const SparseMatrix &input)
Definition: sparse.cc:456
const DenseBitRow & GetNotBasicBitRow() const
const DenseBitRow & GetCanIncreaseBitRow() const
bool UpdatePrimalPhaseICosts(const Rows &rows, DenseRow *objective)
#define VLOG(verboselevel)
Definition: base/logging.h:979
std::vector< double > lower_bounds
const std::string name
Fractional GetVariableValue(ColIndex col) const
Fractional GetDualFeasibilityTolerance() const
const ColIndex kInvalidCol(-1)
void PopulateFromMatrixView(const MatrixView &input)
Definition: sparse.cc:437
void swap(IdMap< K, V > &a, IdMap< K, V > &b)
Definition: id_map.h:263
void TestEnteringEdgeNormPrecision(ColIndex entering_col, const ScatteredColumn &direction)
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
#define LOG(severity)
Definition: base/logging.h:416
ColIndex col
Definition: markowitz.cc:183
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
void UpdateBeforeBasisPivot(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
#define GLOP_RETURN_ERROR_IF_NULL(arg)
Definition: status.h:85
static const Status OK()
Definition: status.h:54
ABSL_MUST_USE_RESULT Status DualChooseEnteringColumn(bool nothing_to_recompute, const UpdateRow &update_row, Fractional cost_variation, std::vector< ColIndex > *bound_flip_candidates, ColIndex *entering_col)
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:891
int SnapFreeVariablesToBound(Fractional distance, const DenseRow &starting_values)
Permutation< ColIndex > ColumnPermutation
bool IsFinite(Fractional value)
Definition: lp_types.h:91
#define GLOP_RETURN_IF_ERROR(function_call)
Definition: status.h:70
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
Fractional TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction)
void UpdateGivenNonBasicVariables(const std::vector< ColIndex > &cols_to_update, bool update_basic_variables)
Fractional GetReducedCost(ColIndex col) const
::operations_research::glop::GlopParameters_InitialBasisHeuristic initial_basis() const
ColumnView column(ColIndex col) const
Definition: sparse.h:369
void ComputeBasicVariablesForState(const LinearProgram &linear_program, const BasisState &state)
void ColumnCopyToDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:423
void SetParameters(const GlopParameters &parameters)
void ComputeUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:71
#define DCHECK_COL_BOUNDS(col)
static constexpr InitialBasisHeuristic BIXBY
void PopulateFromTranspose(const CompactSparseMatrix &input)
Definition: sparse.cc:483
int64_t max
Definition: alldiff_cst.cc:140
static constexpr InitialBasisHeuristic MAROS
double upper_bound
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
void SetParameters(const GlopParameters &parameters)
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:332
std::string Stringify(const Fractional x, bool fraction)
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse_column.h:83
::operations_research::glop::GlopParameters_PricingRule optimization_rule() const
const int WARNING
Definition: log_severity.h:31
void SetParameters(const GlopParameters &parameters)
bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, const SparseMatrix &matrix_a, const CompactSparseMatrix &matrix_b)
RowIndex EntryRow(EntryIndex i) const
Definition: sparse_column.h:89
bool empty() const
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:887
void SetParameters(const GlopParameters &parameters)
void SetParameters(const GlopParameters &parameters)
VariableStatus GetVariableStatus(ColIndex col) const
StrictITIVector< Index, Fractional > values
void SetIntegralityScale(ColIndex col, Fractional scale)
double lower_bound
void UpdateBeforeBasisPivot(ColIndex entering_col, UpdateRow *update_row)
void SetPricingRule(GlopParameters::PricingRule rule)
void ApplyColumnPermutationToRowIndexedVector(const Permutation< ColIndex > &col_perm, RowIndexedVector *v)
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
IntVar *const objective_
Definition: search.cc:2966
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition: sparse.h:387
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col, Fractional multiplier, ScatteredColumn *column) const
Definition: sparse.h:410
StrictITIVector< ColIndex, VariableStatus > VariableStatusRow
Definition: lp_types.h:324
void AddOrUpdate(Index position, Fractional value)
Definition: pricing.h:185
const double kInfinity
Definition: lp_types.h:84
const VariableTypeRow & GetTypeRow() const
const DenseRow & GetVariableUpperBounds() const
void UpdateDataOnBasisPermutation(const ColumnPermutation &col_perm)
int index
Definition: pack.cc:509
const DenseBitRow & GetNonBasicBoxedVariables() const
RowIndex row
Fractional target_bound
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition: stats.h:434
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:890
std::string StatString() const
Definition: update_row.h:82
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:167
const DenseColumn & GetDualRay() const
ABSL_MUST_USE_RESULT Status DualPhaseIChooseEnteringColumn(bool nothing_to_recompute, const UpdateRow &update_row, Fractional cost_variation, ColIndex *entering_col)
const RowIndex kInvalidRow(-1)
void SetStartingVariableValuesForNextSolve(const DenseRow &values)
static constexpr InitialBasisHeuristic TRIANGULAR
Bitset64< ColIndex > DenseBitRow
Definition: lp_types.h:327
void ComputeUnitRowLeftInverse(RowIndex leaving_row)
Definition: update_row.cc:57
int64_t cost
const DenseRow & GetVariableLowerBounds() const
void DenseAddOrUpdate(Index position, Fractional value)
Definition: pricing.h:176
#define DCHECK_ROW_BOUNDS(row)
const ColumnPermutation & GetColumnPermutation() const
Fractional LookUpCoefficient(RowIndex index) const
#define DCHECK(condition)
Definition: base/logging.h:885
void SetParameters(const GlopParameters &parameters)
void EndDualPhaseI(Fractional dual_feasibility_tolerance, const DenseRow &reduced_costs)
ConstraintStatus GetConstraintStatus(RowIndex row) const
ABSL_MUST_USE_RESULT Status Update(ColIndex entering_col, RowIndex leaving_variable_row, const ScatteredColumn &direction)
const Fractional Get(ColIndex col) const
RowToColMapping ComputeInitialBasis(const std::vector< ColIndex > &candidates)
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:886
::operations_research::glop::GlopParameters_PricingRule feasibility_rule() const
const DenseRow & GetDualRayRowCombination() const
bool IsValidPrimalEnteringCandidate(ColIndex col) const
bool IsSet(IndexType i) const
Definition: bitset.h:485
void UpdateOnPivoting(const ScatteredColumn &direction, ColIndex entering_col, Fractional step)
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, const ScatteredRow &unit_row_left_inverse)
Fractional Square(Fractional f)
std::string GetVariableStatusString(VariableStatus status)
Definition: lp_types.cc:71
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:888
void TransformToDualPhaseIProblem(Fractional dual_feasibility_tolerance, const DenseRow &reduced_costs)
constexpr const uint64_t kDeterministicSeed
StrictITIVector< RowIndex, bool > DenseBooleanColumn
Definition: lp_types.h:335
Collection of objects used to extend the Constraint Solver library.
::PROTOBUF_NAMESPACE_ID::int64 max_number_of_iterations() const
std::string GetVariableTypeString(VariableType variable_type)
Definition: lp_types.cc:52
const VariableStatusRow & GetStatusRow() const
void RightSolveForProblemColumn(ColIndex col, ScatteredColumn *d) const
Fractional GetBoundDifference(ColIndex col) const
SatParameters parameters
StrictITIVector< ColIndex, VariableType > VariableTypeRow
Definition: lp_types.h:321
Fractional GetDualValue(RowIndex row) const
std::vector< double > upper_bounds
const ScatteredRow & GetUnitRowLeftInverse() const
Definition: update_row.cc:45
#define VLOG_IS_ON(verboselevel)
Definition: vlog_is_on.h:41
RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales)
const Fractional GetCoefficient(ColIndex col) const
Definition: update_row.h:70
static double DeterministicTimeForFpOperations(int64_t n)
Definition: lp_types.h:383
void ResetAllNonBasicVariableValues(const DenseRow &free_initial_values)
std::string StringifyMonomial(const Fractional a, const std::string &x, bool fraction)
void RecomputeFullUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:241
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:303
void ClearNonZerosIfTooDense(double ratio_for_using_dense_representation)
void ColumnAddMultipleToDenseColumn(ColIndex col, Fractional multiplier, DenseColumn *dense_column) const
Definition: sparse.h:398
int64_t value
ColIndex GetBasis(RowIndex row) const
void SetParameters(const GlopParameters &parameters)
Definition: update_row.cc:171
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:889
Fractional GetConstraintActivity(RowIndex row) const
const DenseBitRow & GetIsRelevantBitRow() const
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
const int INFO
Definition: log_severity.h:31
bool LoadBoundsAndReturnTrueIfUnchanged(const DenseRow &new_lower_bounds, const DenseRow &new_upper_bounds)
int64_t a
void UpdateToNonBasicStatus(ColIndex col, VariableStatus status)
bool StepIsDualDegenerate(bool increasing_rc_is_needed, ColIndex col)