OR-Tools  9.2
revised_simplex.cc
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <algorithm>
17 #include <cmath>
18 #include <cstdint>
19 #include <functional>
20 #include <map>
21 #include <string>
22 #include <utility>
23 #include <vector>
24 
25 #include "absl/strings/str_cat.h"
26 #include "absl/strings/str_format.h"
29 #include "ortools/base/logging.h"
39 #include "ortools/util/fp_utils.h"
40 
41 ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false,
42  "Display numbers as fractions.");
43 ABSL_FLAG(bool, simplex_stop_after_first_basis, false,
44  "Stop after first basis has been computed.");
45 ABSL_FLAG(bool, simplex_stop_after_feasibility, false,
46  "Stop after first phase has been completed.");
47 ABSL_FLAG(bool, simplex_display_stats, false, "Display algorithm statistics.");
48 
49 namespace operations_research {
50 namespace glop {
51 namespace {
52 
53 // Calls the given closure upon destruction. It can be used to ensure that a
54 // closure is executed whenever a function returns.
55 class Cleanup {
56  public:
57  explicit Cleanup(std::function<void()> closure)
58  : closure_(std::move(closure)) {}
59  ~Cleanup() { closure_(); }
60 
61  private:
62  std::function<void()> closure_;
63 };
64 } // namespace
65 
66 #define DCHECK_COL_BOUNDS(col) \
67  { \
68  DCHECK_LE(0, col); \
69  DCHECK_GT(num_cols_, col); \
70  }
71 
72 // TODO(user): Remove this function.
73 #define DCHECK_ROW_BOUNDS(row) \
74  { \
75  DCHECK_LE(0, row); \
76  DCHECK_GT(num_rows_, row); \
77  }
78 
79 constexpr const uint64_t kDeterministicSeed = 42;
80 
82  : problem_status_(ProblemStatus::INIT),
83  objective_(),
84  basis_(),
85  variable_name_(),
86  direction_(),
87  error_(),
88  deterministic_random_(kDeterministicSeed),
89  random_(deterministic_random_),
90  basis_factorization_(&compact_matrix_, &basis_),
91  variables_info_(compact_matrix_),
92  primal_edge_norms_(compact_matrix_, variables_info_,
93  basis_factorization_),
94  dual_edge_norms_(basis_factorization_),
95  dual_prices_(random_),
96  variable_values_(parameters_, compact_matrix_, basis_, variables_info_,
97  basis_factorization_, &dual_edge_norms_, &dual_prices_),
98  update_row_(compact_matrix_, transposed_matrix_, variables_info_, basis_,
99  basis_factorization_),
100  reduced_costs_(compact_matrix_, objective_, basis_, variables_info_,
101  basis_factorization_, random_),
102  entering_variable_(variables_info_, random_, &reduced_costs_),
103  primal_prices_(random_, variables_info_, &primal_edge_norms_,
104  &reduced_costs_),
105  iteration_stats_(),
106  ratio_test_stats_(),
107  function_stats_("SimplexFunctionStats"),
108  parameters_(),
109  test_lu_() {
110  SetParameters(parameters_);
111 }
112 
114  SCOPED_TIME_STAT(&function_stats_);
115  solution_state_.statuses.clear();
116  variable_starting_values_.clear();
117 }
118 
120  SCOPED_TIME_STAT(&function_stats_);
121  solution_state_ = state;
122  solution_state_has_been_set_externally_ = true;
123 }
124 
126  const DenseRow& values) {
127  variable_starting_values_ = values;
128 }
129 
131  notify_that_matrix_is_unchanged_ = true;
132 }
133 
135  SCOPED_TIME_STAT(&function_stats_);
136  DCHECK(lp.IsCleanedUp());
138  Cleanup update_deterministic_time_on_return(
139  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
140 
141  default_logger_.EnableLogging(parameters_.log_search_progress());
142  default_logger_.SetLogToStdOut(parameters_.log_to_stdout());
143  SOLVER_LOG(logger_, "");
144 
145  // Initialization. Note That Initialize() must be called first since it
146  // analyzes the current solver state.
147  const double start_time = time_limit->GetElapsedTime();
148  GLOP_RETURN_IF_ERROR(Initialize(lp));
149  if (logger_->LoggingIsEnabled()) {
150  DisplayBasicVariableStatistics();
151  }
152 
153  dual_infeasibility_improvement_direction_.clear();
154  update_row_.Invalidate();
155  test_lu_.Clear();
156  problem_status_ = ProblemStatus::INIT;
157  phase_ = Phase::FEASIBILITY;
158  num_iterations_ = 0;
159  num_feasibility_iterations_ = 0;
160  num_optimization_iterations_ = 0;
161  num_push_iterations_ = 0;
162  feasibility_time_ = 0.0;
163  optimization_time_ = 0.0;
164  push_time_ = 0.0;
165  total_time_ = 0.0;
166 
167  // In case we abort because of an error, we cannot assume that the current
168  // solution state will be in sync with all our internal data structure. In
169  // case we abort without resetting it, setting this allow us to still use the
170  // previous state info, but we will double-check everything.
171  solution_state_has_been_set_externally_ = true;
172 
173  if (VLOG_IS_ON(2)) {
174  ComputeNumberOfEmptyRows();
175  ComputeNumberOfEmptyColumns();
176  DisplayProblem();
177  }
178  if (absl::GetFlag(FLAGS_simplex_stop_after_first_basis)) {
179  DisplayAllStats();
180  return Status::OK();
181  }
182 
183  const bool use_dual = parameters_.use_dual_simplex();
184 
185  // TODO(user): Avoid doing the first phase checks when we know from the
186  // incremental solve that the solution is already dual or primal feasible.
187  SOLVER_LOG(logger_, "");
188  primal_edge_norms_.SetPricingRule(parameters_.feasibility_rule());
189  if (use_dual) {
190  if (parameters_.perturb_costs_in_dual_simplex()) {
191  reduced_costs_.PerturbCosts();
192  }
193 
194  if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
195  variables_info_.MakeBoxedVariableRelevant(false);
197  DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
198 
199  if (problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
200  // Note(user): In most cases, the matrix will already be refactorized
201  // and both Refactorize() and PermuteBasis() will do nothing. However,
202  // if the time limit is reached during the first phase, this might not
203  // be the case and RecomputeBasicVariableValues() below DCHECKs that the
204  // matrix is refactorized. This is not required, but we currently only
205  // want to recompute values from scratch when the matrix was just
206  // refactorized to maximize precision.
207  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
208  PermuteBasis();
209 
210  variables_info_.MakeBoxedVariableRelevant(true);
211  reduced_costs_.MakeReducedCostsPrecise();
212 
213  // This is needed to display errors properly.
214  MakeBoxedVariableDualFeasible(
215  variables_info_.GetNonBasicBoxedVariables(),
216  /*update_basic_values=*/false);
217  variable_values_.RecomputeBasicVariableValues();
218  }
219  } else {
220  // Test initial dual infeasibility, ignoring boxed variables. We currently
221  // refactorize/recompute the reduced costs if not already done.
222  // TODO(user): Not ideal in an incremental setting.
223  reduced_costs_.MakeReducedCostsPrecise();
224  bool refactorize = reduced_costs_.NeedsBasisRefactorization();
225  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
226 
227  const Fractional initial_infeasibility =
229  if (initial_infeasibility <
230  reduced_costs_.GetDualFeasibilityTolerance()) {
231  SOLVER_LOG(logger_, "Initial basis is dual feasible.");
232  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
233  MakeBoxedVariableDualFeasible(
234  variables_info_.GetNonBasicBoxedVariables(),
235  /*update_basic_values=*/false);
236  variable_values_.RecomputeBasicVariableValues();
237  } else {
238  // Transform problem and recompute variable values.
239  variables_info_.TransformToDualPhaseIProblem(
240  reduced_costs_.GetDualFeasibilityTolerance(),
241  reduced_costs_.GetReducedCosts());
242  DenseRow zero; // We want the FREE variable at zero here.
243  variable_values_.ResetAllNonBasicVariableValues(zero);
244  variable_values_.RecomputeBasicVariableValues();
245 
246  // Optimize.
247  DisplayErrors();
248  GLOP_RETURN_IF_ERROR(DualMinimize(false, time_limit));
249 
250  // Restore original problem and recompute variable values. Note that we
251  // need the reduced cost on the fixed positions here.
252  variables_info_.EndDualPhaseI(
253  reduced_costs_.GetDualFeasibilityTolerance(),
254  reduced_costs_.GetFullReducedCosts());
255  variable_values_.ResetAllNonBasicVariableValues(
256  variable_starting_values_);
257  variable_values_.RecomputeBasicVariableValues();
258 
259  // TODO(user): Note that if there was cost shifts, we just keep them
260  // until the end of the optim.
261  //
262  // TODO(user): What if slightly infeasible? we shouldn't really stop.
263  // Call primal ? use higher tolerance ? It seems we can always kind of
264  // continue and deal with the issue later. Find a way other than this +
265  // 1e-6 hack.
266  if (problem_status_ == ProblemStatus::OPTIMAL) {
267  if (reduced_costs_.ComputeMaximumDualInfeasibility() <
268  reduced_costs_.GetDualFeasibilityTolerance() + 1e-6) {
269  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
270  } else {
271  SOLVER_LOG(logger_, "Infeasible after first phase.");
272  problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
273  }
274  }
275  }
276  }
277  } else {
278  GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
279 
280  // After the primal phase I, we need to restore the objective.
281  if (problem_status_ != ProblemStatus::PRIMAL_INFEASIBLE) {
282  InitializeObjectiveAndTestIfUnchanged(lp);
283  reduced_costs_.ResetForNewObjective();
284  }
285  }
286 
287  DisplayErrors();
288 
289  phase_ = Phase::OPTIMIZATION;
290  feasibility_time_ = time_limit->GetElapsedTime() - start_time;
291  primal_edge_norms_.SetPricingRule(parameters_.optimization_rule());
292  num_feasibility_iterations_ = num_iterations_;
293 
294  // Because of shifts or perturbations, we may need to re-run a dual simplex
295  // after the primal simplex finished, or the opposite.
296  //
297  // We alter between solving with primal and dual Phase II algorithm as long as
298  // time limit permits *and* we did not yet achieve the desired precision.
299  // I.e., we run iteration i if the solution from iteration i-1 was not precise
300  // after we removed the bound and cost shifts and perturbations.
301  //
302  // NOTE(user): We may still hit the limit of max_number_of_reoptimizations()
303  // which means the status returned can be PRIMAL_FEASIBLE or DUAL_FEASIBLE
304  // (i.e., these statuses are not necesserily a consequence of hitting a time
305  // limit).
306  SOLVER_LOG(logger_, "");
307  for (int num_optims = 0;
308  // We want to enter the loop when both num_optims and num_iterations_ are
309  // *equal* to the corresponding limits (to return a meaningful status
310  // when the limits are set to 0).
311  num_optims <= parameters_.max_number_of_reoptimizations() &&
312  !objective_limit_reached_ &&
313  (num_iterations_ == 0 ||
314  num_iterations_ < parameters_.max_number_of_iterations()) &&
315  !time_limit->LimitReached() &&
316  !absl::GetFlag(FLAGS_simplex_stop_after_feasibility) &&
317  (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
318  problem_status_ == ProblemStatus::DUAL_FEASIBLE);
319  ++num_optims) {
320  if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE) {
321  // Run the primal simplex.
322  GLOP_RETURN_IF_ERROR(PrimalMinimize(time_limit));
323  } else {
324  // Run the dual simplex.
326  DualMinimize(phase_ == Phase::FEASIBILITY, time_limit));
327  }
328 
329  // PrimalMinimize() or DualMinimize() always double check the result with
330  // maximum precision by refactoring the basis before exiting (except if an
331  // iteration or time limit was reached).
332  DCHECK(problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
333  problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
334  basis_factorization_.IsRefactorized());
335 
336  // If SetIntegralityScale() was called, we preform a polish operation.
337  if (!integrality_scale_.empty() &&
338  problem_status_ == ProblemStatus::OPTIMAL) {
340  }
341 
342  // Remove the bound and cost shifts (or perturbations).
343  //
344  // Note(user): Currently, we never do both at the same time, so we could
345  // be a bit faster here, but then this is quick anyway.
346  variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
347  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
348  PermuteBasis();
349  variable_values_.RecomputeBasicVariableValues();
350  reduced_costs_.ClearAndRemoveCostShifts();
351 
352  DisplayErrors();
353 
354  // TODO(user): We should also confirm the PRIMAL_UNBOUNDED or DUAL_UNBOUNDED
355  // status by checking with the other phase I that the problem is really
356  // DUAL_INFEASIBLE or PRIMAL_INFEASIBLE. For instance we currently report
357  // PRIMAL_UNBOUNDED with the primal on the problem l30.mps instead of
358  // OPTIMAL and the dual does not have issues on this problem.
359  //
360  // TODO(user): There is another issue on infeas/qual.mps. I think we should
361  // just check the dual ray, not really the current solution dual
362  // feasibility.
363  if (problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
364  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
365  if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
366  variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
367  variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
368  SOLVER_LOG(logger_,
369  "PRIMAL_UNBOUNDED was reported, but the residual and/or "
370  "dual infeasibility is above the tolerance");
371  if (parameters_.change_status_to_imprecise()) {
372  problem_status_ = ProblemStatus::IMPRECISE;
373  }
374  break;
375  }
376 
377  // All of our tolerance are okay, but the dual ray might be fishy. This
378  // happens on l30.mps and on L1_sixm250obs.mps.gz. If the ray do not
379  // seems good enough, we might actually just be at the optimal and have
380  // trouble going down to our relatively low default tolerances.
381  //
382  // The difference bettween optimal and unbounded can be thin. Say you
383  // have a free variable with no constraint and a cost of epsilon,
384  // depending on epsilon and your tolerance, this will either cause the
385  // problem to be unbounded, or can be ignored.
386  //
387  // Here, we compute what is the cost gain if we move from the current
388  // value with the ray up to the bonds + tolerance. If this gain is < 1,
389  // it is hard to claim we are really unbounded. This is a quick
390  // heuristic to error on the side of optimality rather than
391  // unboundedness.
392  double max_magnitude = 0.0;
393  double min_distance = kInfinity;
394  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
395  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
396  double cost_delta = 0.0;
397  for (ColIndex col(0); col < num_cols_; ++col) {
398  cost_delta += solution_primal_ray_[col] * objective_[col];
399  if (solution_primal_ray_[col] > 0 && upper_bounds[col] != kInfinity) {
400  const Fractional value = variable_values_.Get(col);
401  const Fractional distance = (upper_bounds[col] - value + tolerance) /
402  solution_primal_ray_[col];
403  min_distance = std::min(distance, min_distance);
404  max_magnitude = std::max(solution_primal_ray_[col], max_magnitude);
405  }
406  if (solution_primal_ray_[col] < 0 && lower_bounds[col] != -kInfinity) {
407  const Fractional value = variable_values_.Get(col);
408  const Fractional distance = (value - lower_bounds[col] + tolerance) /
409  -solution_primal_ray_[col];
410  min_distance = std::min(distance, min_distance);
411  max_magnitude = std::max(-solution_primal_ray_[col], max_magnitude);
412  }
413  }
414  SOLVER_LOG(logger_, "Primal unbounded ray: max blocking magnitude = ",
415  max_magnitude, ", min distance to bound + ", tolerance, " = ",
416  min_distance, ", ray cost delta = ", cost_delta);
417  if (min_distance * std::abs(cost_delta) < 1 &&
418  reduced_costs_.ComputeMaximumDualInfeasibility() <= tolerance) {
419  SOLVER_LOG(logger_,
420  "PRIMAL_UNBOUNDED was reported, but the tolerance are good "
421  "and the unbounded ray is not great.");
422  SOLVER_LOG(logger_,
423  "The difference between unbounded and optimal can depends "
424  "on a slight change of tolerance, trying to see if we are "
425  "at OPTIMAL after postsolve.");
426  problem_status_ = ProblemStatus::OPTIMAL;
427  }
428  break;
429  }
430  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) {
431  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
432  if (reduced_costs_.ComputeMaximumDualResidual() > tolerance ||
433  variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
434  reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
435  SOLVER_LOG(logger_,
436  "DUAL_UNBOUNDED was reported, but the residual and/or "
437  "dual infeasibility is above the tolerance");
438  if (parameters_.change_status_to_imprecise()) {
439  problem_status_ = ProblemStatus::IMPRECISE;
440  }
441  }
442  break;
443  }
444 
445  // Change the status, if after the shift and perturbation removal the
446  // problem is not OPTIMAL anymore.
447  if (problem_status_ == ProblemStatus::OPTIMAL) {
448  const Fractional solution_tolerance =
449  parameters_.solution_feasibility_tolerance();
450  const Fractional primal_residual =
451  variable_values_.ComputeMaximumPrimalResidual();
452  const Fractional dual_residual =
453  reduced_costs_.ComputeMaximumDualResidual();
454  if (primal_residual > solution_tolerance ||
455  dual_residual > solution_tolerance) {
456  SOLVER_LOG(logger_,
457  "OPTIMAL was reported, yet one of the residuals is "
458  "above the solution feasibility tolerance after the "
459  "shift/perturbation are removed.");
460  if (parameters_.change_status_to_imprecise()) {
461  problem_status_ = ProblemStatus::IMPRECISE;
462  }
463  } else {
464  // We use the "precise" tolerances here to try to report the best
465  // possible solution. Note however that we cannot really hope for an
466  // infeasibility lower than its corresponding residual error. Note that
467  // we already adapt the tolerance like this during the simplex
468  // execution.
469  const Fractional primal_tolerance = std::max(
470  primal_residual, parameters_.primal_feasibility_tolerance());
471  const Fractional dual_tolerance =
472  std::max(dual_residual, parameters_.dual_feasibility_tolerance());
473  const Fractional primal_infeasibility =
474  variable_values_.ComputeMaximumPrimalInfeasibility();
475  const Fractional dual_infeasibility =
476  reduced_costs_.ComputeMaximumDualInfeasibility();
477  if (primal_infeasibility > primal_tolerance &&
478  dual_infeasibility > dual_tolerance) {
479  SOLVER_LOG(logger_,
480  "OPTIMAL was reported, yet both of the infeasibility "
481  "are above the tolerance after the "
482  "shift/perturbation are removed.");
483  if (parameters_.change_status_to_imprecise()) {
484  problem_status_ = ProblemStatus::IMPRECISE;
485  }
486  } else if (primal_infeasibility > primal_tolerance) {
487  if (num_optims == parameters_.max_number_of_reoptimizations()) {
488  SOLVER_LOG(logger_,
489  "The primal infeasibility is still higher than the "
490  "requested internal tolerance, but the maximum "
491  "number of optimization is reached.");
492  break;
493  }
494  SOLVER_LOG(logger_, "");
495  SOLVER_LOG(logger_, "Re-optimizing with dual simplex ... ");
496  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
497  } else if (dual_infeasibility > dual_tolerance) {
498  if (num_optims == parameters_.max_number_of_reoptimizations()) {
499  SOLVER_LOG(logger_,
500  "The dual infeasibility is still higher than the "
501  "requested internal tolerance, but the maximum "
502  "number of optimization is reached.");
503  break;
504  }
505  SOLVER_LOG(logger_, "");
506  SOLVER_LOG(logger_, "Re-optimizing with primal simplex ... ");
507  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
508  }
509  }
510  }
511  }
512 
513  // Check that the return status is "precise".
514  //
515  // TODO(user): we currently skip the DUAL_INFEASIBLE status because the
516  // quantities are not up to date in this case.
517  if (parameters_.change_status_to_imprecise() &&
518  problem_status_ != ProblemStatus::DUAL_INFEASIBLE) {
519  const Fractional tolerance = parameters_.solution_feasibility_tolerance();
520  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance ||
521  reduced_costs_.ComputeMaximumDualResidual() > tolerance) {
522  problem_status_ = ProblemStatus::IMPRECISE;
523  } else if (problem_status_ == ProblemStatus::DUAL_FEASIBLE ||
524  problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
525  problem_status_ == ProblemStatus::PRIMAL_INFEASIBLE) {
526  if (reduced_costs_.ComputeMaximumDualInfeasibility() > tolerance) {
527  problem_status_ = ProblemStatus::IMPRECISE;
528  }
529  } else if (problem_status_ == ProblemStatus::PRIMAL_FEASIBLE ||
530  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED ||
531  problem_status_ == ProblemStatus::DUAL_INFEASIBLE) {
532  if (variable_values_.ComputeMaximumPrimalInfeasibility() > tolerance) {
533  problem_status_ = ProblemStatus::IMPRECISE;
534  }
535  }
536  }
537 
538  total_time_ = time_limit->GetElapsedTime() - start_time;
539  optimization_time_ = total_time_ - feasibility_time_;
540  num_optimization_iterations_ = num_iterations_ - num_feasibility_iterations_;
541 
542  // If the user didn't provide starting variable values, then there is no need
543  // to check for super-basic variables.
544  if (!variable_starting_values_.empty()) {
545  const int num_super_basic = ComputeNumberOfSuperBasicVariables();
546  if (num_super_basic > 0) {
547  SOLVER_LOG(logger_,
548  "Num super-basic variables left after optimize phase: ",
549  num_super_basic);
550  if (parameters_.push_to_vertex()) {
551  if (problem_status_ == ProblemStatus::OPTIMAL) {
552  SOLVER_LOG(logger_, "");
553  phase_ = Phase::PUSH;
554  GLOP_RETURN_IF_ERROR(PrimalPush(time_limit));
555  // TODO(user): We should re-check for feasibility at this point and
556  // apply clean-up as needed.
557  } else {
558  SOLVER_LOG(logger_,
559  "Skipping push phase because optimize didn't succeed.");
560  }
561  }
562  }
563  }
564 
565  total_time_ = time_limit->GetElapsedTime() - start_time;
566  push_time_ = total_time_ - feasibility_time_ - optimization_time_;
567  num_push_iterations_ = num_iterations_ - num_feasibility_iterations_ -
568  num_optimization_iterations_;
569 
570  // Store the result for the solution getters.
571  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
572  solution_dual_values_ = reduced_costs_.GetDualValues();
573  solution_reduced_costs_ = reduced_costs_.GetReducedCosts();
574  SaveState();
575 
576  if (lp.IsMaximizationProblem()) {
577  ChangeSign(&solution_dual_values_);
578  ChangeSign(&solution_reduced_costs_);
579  }
580 
581  // If the problem is unbounded, set the objective value to +/- infinity.
582  if (problem_status_ == ProblemStatus::DUAL_UNBOUNDED ||
583  problem_status_ == ProblemStatus::PRIMAL_UNBOUNDED) {
584  solution_objective_value_ =
585  (problem_status_ == ProblemStatus::DUAL_UNBOUNDED) ? kInfinity
586  : -kInfinity;
587  if (lp.IsMaximizationProblem()) {
588  solution_objective_value_ = -solution_objective_value_;
589  }
590  }
591 
592  variable_starting_values_.clear();
593  DisplayAllStats();
594  return Status::OK();
595 }
596 
598  return problem_status_;
599 }
600 
602  return solution_objective_value_;
603 }
604 
606  return num_iterations_;
607 }
608 
609 RowIndex RevisedSimplex::GetProblemNumRows() const { return num_rows_; }
610 
611 ColIndex RevisedSimplex::GetProblemNumCols() const { return num_cols_; }
612 
614  return variable_values_.Get(col);
615 }
616 
618  return solution_reduced_costs_[col];
619 }
620 
622  return solution_reduced_costs_;
623 }
624 
626  return solution_dual_values_[row];
627 }
628 
630  return variables_info_.GetStatusRow()[col];
631 }
632 
633 const BasisState& RevisedSimplex::GetState() const { return solution_state_; }
634 
636  // Note the negative sign since the slack variable is such that
637  // constraint_activity + slack_value = 0.
638  return -variable_values_.Get(SlackColIndex(row));
639 }
640 
642  // The status of the given constraint is the same as the status of the
643  // associated slack variable with a change of sign.
644  const VariableStatus s = variables_info_.GetStatusRow()[SlackColIndex(row)];
647  }
650  }
651  return VariableToConstraintStatus(s);
652 }
653 
655  DCHECK_EQ(problem_status_, ProblemStatus::PRIMAL_UNBOUNDED);
656  return solution_primal_ray_;
657 }
659  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
660  return solution_dual_ray_;
661 }
662 
664  DCHECK_EQ(problem_status_, ProblemStatus::DUAL_UNBOUNDED);
665  return solution_dual_ray_row_combination_;
666 }
667 
668 ColIndex RevisedSimplex::GetBasis(RowIndex row) const { return basis_[row]; }
669 
671  DCHECK(basis_factorization_.GetColumnPermutation().empty());
672  return basis_factorization_;
673 }
674 
675 std::string RevisedSimplex::GetPrettySolverStats() const {
676  return absl::StrFormat(
677  "Problem status : %s\n"
678  "Solving time : %-6.4g\n"
679  "Number of iterations : %u\n"
680  "Time for solvability (first phase) : %-6.4g\n"
681  "Number of iterations for solvability : %u\n"
682  "Time for optimization : %-6.4g\n"
683  "Number of iterations for optimization : %u\n"
684  "Stop after first basis : %d\n",
685  GetProblemStatusString(problem_status_), total_time_, num_iterations_,
686  feasibility_time_, num_feasibility_iterations_, optimization_time_,
687  num_optimization_iterations_,
688  absl::GetFlag(FLAGS_simplex_stop_after_first_basis));
689 }
690 
692  // TODO(user): Count what is missing.
693  return DeterministicTimeForFpOperations(num_update_price_operations_) +
694  basis_factorization_.DeterministicTime() +
695  update_row_.DeterministicTime() +
696  entering_variable_.DeterministicTime() +
697  reduced_costs_.DeterministicTime() +
698  primal_edge_norms_.DeterministicTime();
699 }
700 
701 void RevisedSimplex::SetVariableNames() {
702  variable_name_.resize(num_cols_, "");
703  for (ColIndex col(0); col < first_slack_col_; ++col) {
704  const ColIndex var_index = col + 1;
705  variable_name_[col] = absl::StrFormat("x%d", ColToIntIndex(var_index));
706  }
707  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
708  const ColIndex var_index = col - first_slack_col_ + 1;
709  variable_name_[col] = absl::StrFormat("s%d", ColToIntIndex(var_index));
710  }
711 }
712 
713 void RevisedSimplex::SetNonBasicVariableStatusAndDeriveValue(
714  ColIndex col, VariableStatus status) {
715  variables_info_.UpdateToNonBasicStatus(col, status);
716  variable_values_.SetNonBasicVariableValueFromStatus(col);
717 }
718 
719 bool RevisedSimplex::BasisIsConsistent() const {
720  const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
721  const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
722  for (RowIndex row(0); row < num_rows_; ++row) {
723  const ColIndex col = basis_[row];
724  if (!is_basic.IsSet(col)) return false;
725  if (variable_statuses[col] != VariableStatus::BASIC) return false;
726  }
727  ColIndex cols_in_basis(0);
728  ColIndex cols_not_in_basis(0);
729  for (ColIndex col(0); col < num_cols_; ++col) {
730  cols_in_basis += is_basic.IsSet(col);
731  cols_not_in_basis += !is_basic.IsSet(col);
732  if (is_basic.IsSet(col) !=
733  (variable_statuses[col] == VariableStatus::BASIC)) {
734  return false;
735  }
736  }
737  if (cols_in_basis != RowToColIndex(num_rows_)) return false;
738  if (cols_not_in_basis != num_cols_ - RowToColIndex(num_rows_)) return false;
739  return true;
740 }
741 
742 // Note(user): The basis factorization is not updated by this function but by
743 // UpdateAndPivot().
744 void RevisedSimplex::UpdateBasis(ColIndex entering_col, RowIndex basis_row,
745  VariableStatus leaving_variable_status) {
746  SCOPED_TIME_STAT(&function_stats_);
747  DCHECK_COL_BOUNDS(entering_col);
748  DCHECK_ROW_BOUNDS(basis_row);
749 
750  // Check that this is not called with an entering_col already in the basis
751  // and that the leaving col is indeed in the basis.
752  DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
753  DCHECK_NE(basis_[basis_row], entering_col);
754  DCHECK_NE(basis_[basis_row], kInvalidCol);
755 
756  const ColIndex leaving_col = basis_[basis_row];
757  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
758 
759  // Make leaving_col leave the basis and update relevant data.
760  // Note thate the leaving variable value is not necessarily at its exact
761  // bound, which is like a bound shift.
762  variables_info_.UpdateToNonBasicStatus(leaving_col, leaving_variable_status);
763  DCHECK(leaving_variable_status == VariableStatus::AT_UPPER_BOUND ||
764  leaving_variable_status == VariableStatus::AT_LOWER_BOUND ||
765  leaving_variable_status == VariableStatus::FIXED_VALUE);
766 
767  basis_[basis_row] = entering_col;
768  variables_info_.UpdateToBasicStatus(entering_col);
769  update_row_.Invalidate();
770 }
771 
772 namespace {
773 
774 // Comparator used to sort column indices according to a given value vector.
775 class ColumnComparator {
776  public:
777  explicit ColumnComparator(const DenseRow& value) : value_(value) {}
778  bool operator()(ColIndex col_a, ColIndex col_b) const {
779  return value_[col_a] < value_[col_b];
780  }
781 
782  private:
783  const DenseRow& value_;
784 };
785 
786 } // namespace
787 
788 // To understand better what is going on in this function, let us say that this
789 // algorithm will produce the optimal solution to a problem containing only
790 // singleton columns (provided that the variables start at the minimum possible
791 // cost, see DefaultVariableStatus()). This is unit tested.
792 //
793 // The error_ must be equal to the constraint activity for the current variable
794 // values before this function is called. If error_[row] is 0.0, that mean this
795 // constraint is currently feasible.
796 void RevisedSimplex::UseSingletonColumnInInitialBasis(RowToColMapping* basis) {
797  SCOPED_TIME_STAT(&function_stats_);
798  // Computes the singleton columns and the cost variation of the corresponding
799  // variables (in the only possible direction, i.e away from its current bound)
800  // for a unit change in the infeasibility of the corresponding row.
801  //
802  // Note that the slack columns will be treated as normal singleton columns.
803  std::vector<ColIndex> singleton_column;
804  DenseRow cost_variation(num_cols_, 0.0);
805  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
806  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
807  for (ColIndex col(0); col < num_cols_; ++col) {
808  if (compact_matrix_.column(col).num_entries() != 1) continue;
809  if (lower_bounds[col] == upper_bounds[col]) continue;
810  const Fractional slope = compact_matrix_.column(col).GetFirstCoefficient();
811  if (variable_values_.Get(col) == lower_bounds[col]) {
812  cost_variation[col] = objective_[col] / std::abs(slope);
813  } else {
814  cost_variation[col] = -objective_[col] / std::abs(slope);
815  }
816  singleton_column.push_back(col);
817  }
818  if (singleton_column.empty()) return;
819 
820  // Sort the singleton columns for the case where many of them correspond to
821  // the same row (equivalent to a piecewise-linear objective on this variable).
822  // Negative cost_variation first since moving the singleton variable away from
823  // its current bound means the least decrease in the objective function for
824  // the same "error" variation.
825  ColumnComparator comparator(cost_variation);
826  std::sort(singleton_column.begin(), singleton_column.end(), comparator);
827  DCHECK_LE(cost_variation[singleton_column.front()],
828  cost_variation[singleton_column.back()]);
829 
830  // Use a singleton column to "absorb" the error when possible to avoid
831  // introducing unneeded artificial variables. Note that with scaling on, the
832  // only possible coefficient values are 1.0 or -1.0 (or maybe epsilon close to
833  // them) and that the SingletonColumnSignPreprocessor makes them all positive.
834  // However, this code works for any coefficient value.
835  const DenseRow& variable_values = variable_values_.GetDenseRow();
836  for (const ColIndex col : singleton_column) {
837  const RowIndex row = compact_matrix_.column(col).EntryRow(EntryIndex(0));
838 
839  // If no singleton columns have entered the basis for this row, choose the
840  // first one. It will be the one with the least decrease in the objective
841  // function when it leaves the basis.
842  if ((*basis)[row] == kInvalidCol) {
843  (*basis)[row] = col;
844  }
845 
846  // If there is already no error in this row (i.e. it is primal-feasible),
847  // there is nothing to do.
848  if (error_[row] == 0.0) continue;
849 
850  // In this case, all the infeasibility can be "absorbed" and this variable
851  // may not be at one of its bound anymore, so we have to use it in the
852  // basis.
853  const Fractional coeff =
854  compact_matrix_.column(col).EntryCoefficient(EntryIndex(0));
855  const Fractional new_value = variable_values[col] + error_[row] / coeff;
856  if (new_value >= lower_bounds[col] && new_value <= upper_bounds[col]) {
857  error_[row] = 0.0;
858 
859  // Use this variable in the initial basis.
860  (*basis)[row] = col;
861  continue;
862  }
863 
864  // The idea here is that if the singleton column cannot be used to "absorb"
865  // all error_[row], if it is boxed, it can still be used to make the
866  // infeasibility smaller (with a bound flip).
867  const Fractional box_width = variables_info_.GetBoundDifference(col);
868  DCHECK_NE(box_width, 0.0);
869  DCHECK_NE(error_[row], 0.0);
870  const Fractional error_sign = error_[row] / coeff;
871  if (variable_values[col] == lower_bounds[col] && error_sign > 0.0) {
872  DCHECK(IsFinite(box_width));
873  error_[row] -= coeff * box_width;
874  SetNonBasicVariableStatusAndDeriveValue(col,
876  continue;
877  }
878  if (variable_values[col] == upper_bounds[col] && error_sign < 0.0) {
879  DCHECK(IsFinite(box_width));
880  error_[row] += coeff * box_width;
881  SetNonBasicVariableStatusAndDeriveValue(col,
883  continue;
884  }
885  }
886 }
887 
888 bool RevisedSimplex::InitializeMatrixAndTestIfUnchanged(
889  const LinearProgram& lp, bool lp_is_in_equation_form,
890  bool* only_change_is_new_rows, bool* only_change_is_new_cols,
891  ColIndex* num_new_cols) {
892  SCOPED_TIME_STAT(&function_stats_);
893  DCHECK(only_change_is_new_rows != nullptr);
894  DCHECK(only_change_is_new_cols != nullptr);
895  DCHECK(num_new_cols != nullptr);
896  DCHECK_EQ(num_cols_, compact_matrix_.num_cols());
897  DCHECK_EQ(num_rows_, compact_matrix_.num_rows());
898 
899  // This works whether the lp is in equation form (with slack) or not.
900  const bool old_part_of_matrix_is_unchanged =
902  num_rows_, first_slack_col_, lp.GetSparseMatrix(), compact_matrix_);
903 
904  // This is the only adaptation we need for the test below.
905  const ColIndex lp_first_slack =
906  lp_is_in_equation_form ? lp.GetFirstSlackVariable() : lp.num_variables();
907 
908  // Test if the matrix is unchanged, and if yes, just returns true. Note that
909  // this doesn't check the columns corresponding to the slack variables,
910  // because they were checked by lp.IsInEquationForm() when Solve() was called.
911  if (old_part_of_matrix_is_unchanged && lp.num_constraints() == num_rows_ &&
912  lp_first_slack == first_slack_col_) {
913  return true;
914  }
915 
916  // Check if the new matrix can be derived from the old one just by adding
917  // new rows (i.e new constraints).
918  *only_change_is_new_rows = old_part_of_matrix_is_unchanged &&
919  lp.num_constraints() > num_rows_ &&
920  lp_first_slack == first_slack_col_;
921 
922  // Check if the new matrix can be derived from the old one just by adding
923  // new columns (i.e new variables).
924  *only_change_is_new_cols = old_part_of_matrix_is_unchanged &&
925  lp.num_constraints() == num_rows_ &&
926  lp_first_slack > first_slack_col_;
927  *num_new_cols = *only_change_is_new_cols ? lp_first_slack - first_slack_col_
928  : ColIndex(0);
929 
930  // Initialize first_slack_.
931  first_slack_col_ = lp_first_slack;
932 
933  // Initialize the new dimensions.
934  num_rows_ = lp.num_constraints();
935  num_cols_ = lp_first_slack + RowToColIndex(lp.num_constraints());
936 
937  // Populate compact_matrix_ and transposed_matrix_ if needed. Note that we
938  // already added all the slack variables at this point, so matrix_ will not
939  // change anymore.
940  if (lp_is_in_equation_form) {
941  // TODO(user): This can be sped up by removing the MatrixView, but then
942  // this path will likely go away.
943  compact_matrix_.PopulateFromMatrixView(MatrixView(lp.GetSparseMatrix()));
944  } else {
945  compact_matrix_.PopulateFromSparseMatrixAndAddSlacks(lp.GetSparseMatrix());
946  }
947  if (parameters_.use_transposed_matrix()) {
948  transposed_matrix_.PopulateFromTranspose(compact_matrix_);
949  }
950  return false;
951 }
952 
953 // Preconditions: This should only be called if there are only new variable
954 // in the lp.
955 bool RevisedSimplex::OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
956  const LinearProgram& lp, bool lp_is_in_equation_form,
957  ColIndex num_new_cols) {
958  SCOPED_TIME_STAT(&function_stats_);
959  DCHECK_LE(num_new_cols, first_slack_col_);
960  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
961 
962  // Check the original variable bounds.
963  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
964  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
965  for (ColIndex col(0); col < first_new_col; ++col) {
966  if (lower_bounds[col] != lp.variable_lower_bounds()[col] ||
967  upper_bounds[col] != lp.variable_upper_bounds()[col]) {
968  return false;
969  }
970  }
971 
972  // Check that each new variable has a bound of zero.
973  for (ColIndex col(first_new_col); col < first_slack_col_; ++col) {
974  if (lp.variable_lower_bounds()[col] != 0.0 &&
975  lp.variable_upper_bounds()[col] != 0.0) {
976  return false;
977  }
978  }
979 
980  // Check that the slack bounds are unchanged.
981  if (lp_is_in_equation_form) {
982  for (ColIndex col(first_slack_col_); col < num_cols_; ++col) {
983  if (lower_bounds[col - num_new_cols] != lp.variable_lower_bounds()[col] ||
984  upper_bounds[col - num_new_cols] != lp.variable_upper_bounds()[col]) {
985  return false;
986  }
987  }
988  } else {
989  DCHECK_EQ(num_rows_, lp.num_constraints());
990  for (RowIndex row(0); row < num_rows_; ++row) {
991  const ColIndex col = first_slack_col_ + RowToColIndex(row);
992  if (lower_bounds[col - num_new_cols] !=
993  -lp.constraint_upper_bounds()[row] ||
994  upper_bounds[col - num_new_cols] !=
995  -lp.constraint_lower_bounds()[row]) {
996  return false;
997  }
998  }
999  }
1000  return true;
1001 }
1002 
1003 bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
1004  const LinearProgram& lp) {
1005  SCOPED_TIME_STAT(&function_stats_);
1006 
1007  bool objective_is_unchanged = true;
1008  objective_.resize(num_cols_, 0.0);
1009 
1010  // This function work whether the lp is in equation form (with slack) or
1011  // without, since the objective of the slacks are always zero.
1012  DCHECK_GE(num_cols_, lp.num_variables());
1013  for (ColIndex col(lp.num_variables()); col < num_cols_; ++col) {
1014  if (objective_[col] != 0.0) {
1015  objective_is_unchanged = false;
1016  objective_[col] = 0.0;
1017  }
1018  }
1019 
1020  if (lp.IsMaximizationProblem()) {
1021  // Note that we use the minimization version of the objective internally.
1022  for (ColIndex col(0); col < lp.num_variables(); ++col) {
1023  const Fractional coeff = -lp.objective_coefficients()[col];
1024  if (objective_[col] != coeff) {
1025  objective_is_unchanged = false;
1026  objective_[col] = coeff;
1027  }
1028  }
1029  objective_offset_ = -lp.objective_offset();
1030  objective_scaling_factor_ = -lp.objective_scaling_factor();
1031  } else {
1032  for (ColIndex col(0); col < lp.num_variables(); ++col) {
1033  const Fractional coeff = lp.objective_coefficients()[col];
1034  if (objective_[col] != coeff) {
1035  objective_is_unchanged = false;
1036  objective_[col] = coeff;
1037  }
1038  }
1039  objective_offset_ = lp.objective_offset();
1040  objective_scaling_factor_ = lp.objective_scaling_factor();
1041  }
1042 
1043  return objective_is_unchanged;
1044 }
1045 
1046 void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) {
1047  objective_limit_reached_ = false;
1048  DCHECK(std::isfinite(objective_offset_));
1049  DCHECK(std::isfinite(objective_scaling_factor_));
1050  DCHECK_NE(0.0, objective_scaling_factor_);
1051 
1052  // This sets dual_objective_limit_ and then primal_objective_limit_.
1053  for (const bool set_dual : {true, false}) {
1054  // NOTE(user): If objective_scaling_factor_ is negative, the optimization
1055  // direction was reversed (during preprocessing or inside revised simplex),
1056  // i.e., the original problem is maximization. In such case the _meaning_ of
1057  // the lower and upper limits is swapped. To this end we must change the
1058  // signs of limits, which happens automatically when calculating shifted
1059  // limits. We must also use upper (resp. lower) limit in place of lower
1060  // (resp. upper) limit when calculating the final objective_limit_.
1061  //
1062  // Choose lower limit if using the dual simplex and scaling factor is
1063  // negative or if using the primal simplex and scaling is nonnegative, upper
1064  // limit otherwise.
1065  const Fractional limit = (objective_scaling_factor_ >= 0.0) != set_dual
1066  ? parameters_.objective_lower_limit()
1067  : parameters_.objective_upper_limit();
1068  const Fractional shifted_limit =
1069  limit / objective_scaling_factor_ - objective_offset_;
1070  if (set_dual) {
1071  dual_objective_limit_ = shifted_limit;
1072  } else {
1073  primal_objective_limit_ = shifted_limit;
1074  }
1075  }
1076 }
1077 
1078 // This implementation starts with an initial matrix B equal to the identity
1079 // matrix (modulo a column permutation). For that it uses either the slack
1080 // variables or the singleton columns present in the problem. Afterwards, the
1081 // fixed slacks in the basis are exchanged with normal columns of A if possible
1082 // by the InitialBasis class.
1083 Status RevisedSimplex::CreateInitialBasis() {
1084  SCOPED_TIME_STAT(&function_stats_);
1085 
1086  // Initialize the variable values and statuses.
1087  // Note that for the dual algorithm, boxed variables will be made
1088  // dual-feasible later by MakeBoxedVariableDualFeasible(), so it doesn't
1089  // really matter at which of their two finite bounds they start.
1090  variables_info_.InitializeToDefaultStatus();
1091  variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1092 
1093  // Start by using an all-slack basis.
1094  RowToColMapping basis(num_rows_, kInvalidCol);
1095  for (RowIndex row(0); row < num_rows_; ++row) {
1096  basis[row] = SlackColIndex(row);
1097  }
1098 
1099  // If possible, for the primal simplex we replace some slack variables with
1100  // some singleton columns present in the problem.
1101  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1102  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1103  if (!parameters_.use_dual_simplex() &&
1104  parameters_.initial_basis() != GlopParameters::MAROS &&
1106  // For UseSingletonColumnInInitialBasis() to work better, we change
1107  // the value of the boxed singleton column with a non-zero cost to the best
1108  // of their two bounds.
1109  for (ColIndex col(0); col < num_cols_; ++col) {
1110  if (compact_matrix_.column(col).num_entries() != 1) continue;
1111  const VariableStatus status = variables_info_.GetStatusRow()[col];
1112  const Fractional objective = objective_[col];
1113  if (objective > 0 && IsFinite(lower_bounds[col]) &&
1114  status == VariableStatus::AT_UPPER_BOUND) {
1115  SetNonBasicVariableStatusAndDeriveValue(col,
1117  } else if (objective < 0 && IsFinite(upper_bounds[col]) &&
1118  status == VariableStatus::AT_LOWER_BOUND) {
1119  SetNonBasicVariableStatusAndDeriveValue(col,
1121  }
1122  }
1123 
1124  // Compute the primal infeasibility of the initial variable values in
1125  // error_.
1126  ComputeVariableValuesError();
1127 
1128  // TODO(user): A better but slightly more complex algorithm would be to:
1129  // - Ignore all singleton columns except the slacks during phase I.
1130  // - For this, change the slack variable bounds accordingly.
1131  // - At the end of phase I, restore the slack variable bounds and perform
1132  // the same algorithm to start with feasible and "optimal" values of the
1133  // singleton columns.
1134  basis.assign(num_rows_, kInvalidCol);
1135  UseSingletonColumnInInitialBasis(&basis);
1136 
1137  // Eventually complete the basis with fixed slack columns.
1138  for (RowIndex row(0); row < num_rows_; ++row) {
1139  if (basis[row] == kInvalidCol) {
1140  basis[row] = SlackColIndex(row);
1141  }
1142  }
1143  }
1144 
1145  // Use an advanced initial basis to remove the fixed variables from the basis.
1146  if (parameters_.initial_basis() == GlopParameters::NONE) {
1147  return InitializeFirstBasis(basis);
1148  }
1149  if (parameters_.initial_basis() == GlopParameters::MAROS) {
1150  InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1151  upper_bounds, variables_info_.GetTypeRow());
1152  if (parameters_.use_dual_simplex()) {
1153  // This dual version only uses zero-cost columns to complete the
1154  // basis.
1155  initial_basis.GetDualMarosBasis(num_cols_, &basis);
1156  } else {
1157  initial_basis.GetPrimalMarosBasis(num_cols_, &basis);
1158  }
1159  int number_changed = 0;
1160  for (RowIndex row(0); row < num_rows_; ++row) {
1161  if (basis[row] != SlackColIndex(row)) {
1162  number_changed++;
1163  }
1164  }
1165  VLOG(1) << "Number of Maros basis changes: " << number_changed;
1166  } else if (parameters_.initial_basis() == GlopParameters::BIXBY ||
1167  parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1168  // First unassign the fixed variables from basis.
1169  int num_fixed_variables = 0;
1170  for (RowIndex row(0); row < basis.size(); ++row) {
1171  const ColIndex col = basis[row];
1172  if (lower_bounds[col] == upper_bounds[col]) {
1173  basis[row] = kInvalidCol;
1174  ++num_fixed_variables;
1175  }
1176  }
1177 
1178  if (num_fixed_variables == 0) {
1179  SOLVER_LOG(logger_, "Crash is set to ", parameters_.initial_basis(),
1180  " but there is no equality rows to remove from initial all "
1181  "slack basis. Starting from there.");
1182  } else {
1183  // Then complete the basis with an advanced initial basis algorithm.
1184  SOLVER_LOG(logger_, "Trying to remove ", num_fixed_variables,
1185  " fixed variables from the initial basis.");
1186  InitialBasis initial_basis(compact_matrix_, objective_, lower_bounds,
1187  upper_bounds, variables_info_.GetTypeRow());
1188 
1189  if (parameters_.initial_basis() == GlopParameters::BIXBY) {
1190  if (parameters_.use_scaling()) {
1191  initial_basis.CompleteBixbyBasis(first_slack_col_, &basis);
1192  } else {
1193  VLOG(1) << "Bixby initial basis algorithm requires the problem "
1194  << "to be scaled. Skipping Bixby's algorithm.";
1195  }
1196  } else if (parameters_.initial_basis() == GlopParameters::TRIANGULAR) {
1197  // Note the use of num_cols_ here because this algorithm
1198  // benefits from treating fixed slack columns like any other column.
1199  if (parameters_.use_dual_simplex()) {
1200  // This dual version only uses zero-cost columns to complete the
1201  // basis.
1202  initial_basis.CompleteTriangularDualBasis(num_cols_, &basis);
1203  } else {
1204  initial_basis.CompleteTriangularPrimalBasis(num_cols_, &basis);
1205  }
1206 
1207  const Status status = InitializeFirstBasis(basis);
1208  if (status.ok()) {
1209  return status;
1210  } else {
1211  SOLVER_LOG(
1212  logger_,
1213  "Advanced basis algo failed, Reverting to all slack basis.");
1214 
1215  for (RowIndex row(0); row < num_rows_; ++row) {
1216  basis[row] = SlackColIndex(row);
1217  }
1218  }
1219  }
1220  }
1221  } else {
1222  LOG(WARNING) << "Unsupported initial_basis parameters: "
1223  << parameters_.initial_basis();
1224  }
1225 
1226  return InitializeFirstBasis(basis);
1227 }
1228 
1229 Status RevisedSimplex::InitializeFirstBasis(const RowToColMapping& basis) {
1230  basis_ = basis;
1231 
1232  // For each row which does not have a basic column, assign it to the
1233  // corresponding slack column.
1234  basis_.resize(num_rows_, kInvalidCol);
1235  for (RowIndex row(0); row < num_rows_; ++row) {
1236  if (basis_[row] == kInvalidCol) {
1237  basis_[row] = SlackColIndex(row);
1238  }
1239  }
1240 
1241  GLOP_RETURN_IF_ERROR(basis_factorization_.Initialize());
1242  PermuteBasis();
1243 
1244  // Test that the upper bound on the condition number of basis is not too high.
1245  // The number was not computed by any rigorous analysis, we just prefer to
1246  // revert to the all slack basis if the condition number of our heuristic
1247  // first basis seems bad. See for instance on cond11.mps, where we get an
1248  // infinity upper bound.
1249  const Fractional condition_number_ub =
1250  basis_factorization_.ComputeInfinityNormConditionNumberUpperBound();
1251  if (condition_number_ub > parameters_.initial_condition_number_threshold()) {
1252  const std::string error_message =
1253  absl::StrCat("The matrix condition number upper bound is too high: ",
1254  condition_number_ub);
1255  SOLVER_LOG(logger_, error_message);
1256  return Status(Status::ERROR_LU, error_message);
1257  }
1258 
1259  // Everything is okay, finish the initialization.
1260  for (RowIndex row(0); row < num_rows_; ++row) {
1261  variables_info_.UpdateToBasicStatus(basis_[row]);
1262  }
1263  DCHECK(BasisIsConsistent());
1264 
1265  variable_values_.ResetAllNonBasicVariableValues(variable_starting_values_);
1266  variable_values_.RecomputeBasicVariableValues();
1267 
1268  if (logger_->LoggingIsEnabled()) {
1269  // TODO(user): Maybe return an error status if this is too high. Note
1270  // however that if we want to do that, we need to reset variables_info_ to a
1271  // consistent state.
1272  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1273  if (variable_values_.ComputeMaximumPrimalResidual() > tolerance) {
1274  SOLVER_LOG(
1275  logger_,
1276  "The primal residual of the initial basis is above the tolerance, ",
1277  variable_values_.ComputeMaximumPrimalResidual(), " vs. ", tolerance);
1278  }
1279  }
1280  return Status::OK();
1281 }
1282 
1283 Status RevisedSimplex::Initialize(const LinearProgram& lp) {
1284  parameters_ = initial_parameters_;
1285  PropagateParameters();
1286 
1287  // We accept both kind of input.
1288  //
1289  // TODO(user): Ideally there should be no need to ever put the slack in the
1290  // LinearProgram. That take extra memory (one big SparseColumn per slack) and
1291  // just add visible overhead in incremental solve when one wants to add/remove
1292  // constraints. But for historical reason, we handle both for now.
1293  const bool lp_is_in_equation_form = lp.IsInEquationForm();
1294 
1295  // Calling InitializeMatrixAndTestIfUnchanged() first is important because
1296  // this is where num_rows_ and num_cols_ are computed.
1297  //
1298  // Note that these functions can't depend on use_dual_simplex() since we may
1299  // change it below.
1300  ColIndex num_new_cols(0);
1301  bool only_change_is_new_rows = false;
1302  bool only_change_is_new_cols = false;
1303  bool matrix_is_unchanged = true;
1304  bool only_new_bounds = false;
1305  if (solution_state_.IsEmpty() || !notify_that_matrix_is_unchanged_) {
1306  matrix_is_unchanged = InitializeMatrixAndTestIfUnchanged(
1307  lp, lp_is_in_equation_form, &only_change_is_new_rows,
1308  &only_change_is_new_cols, &num_new_cols);
1309  only_new_bounds = only_change_is_new_cols && num_new_cols > 0 &&
1310  OldBoundsAreUnchangedAndNewVariablesHaveOneBoundAtZero(
1311  lp, lp_is_in_equation_form, num_new_cols);
1312  } else if (DEBUG_MODE) {
1313  CHECK(InitializeMatrixAndTestIfUnchanged(
1314  lp, lp_is_in_equation_form, &only_change_is_new_rows,
1315  &only_change_is_new_cols, &num_new_cols));
1316  }
1317  notify_that_matrix_is_unchanged_ = false;
1318 
1319  // TODO(user): move objective with ReducedCosts class.
1320  const bool objective_is_unchanged = InitializeObjectiveAndTestIfUnchanged(lp);
1321 
1322  const bool bounds_are_unchanged =
1323  lp_is_in_equation_form
1324  ? variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1325  lp.variable_lower_bounds(), lp.variable_upper_bounds())
1326  : variables_info_.LoadBoundsAndReturnTrueIfUnchanged(
1327  lp.variable_lower_bounds(), lp.variable_upper_bounds(),
1328  lp.constraint_lower_bounds(), lp.constraint_upper_bounds());
1329 
1330  // If parameters_.allow_simplex_algorithm_change() is true and we already have
1331  // a primal (resp. dual) feasible solution, then we use the primal (resp.
1332  // dual) algorithm since there is a good chance that it will be faster.
1333  if (matrix_is_unchanged && parameters_.allow_simplex_algorithm_change()) {
1334  if (objective_is_unchanged && !bounds_are_unchanged) {
1335  parameters_.set_use_dual_simplex(true);
1336  PropagateParameters();
1337  }
1338  if (bounds_are_unchanged && !objective_is_unchanged) {
1339  parameters_.set_use_dual_simplex(false);
1340  PropagateParameters();
1341  }
1342  }
1343 
1344  InitializeObjectiveLimit(lp);
1345 
1346  // Computes the variable name as soon as possible for logging.
1347  // TODO(user): do we really need to store them? we could just compute them
1348  // on the fly since we do not need the speed.
1349  if (VLOG_IS_ON(2)) {
1350  SetVariableNames();
1351  }
1352 
1353  // Warm-start? This is supported only if the solution_state_ is non empty,
1354  // i.e., this revised simplex i) was already used to solve a problem, or
1355  // ii) the solution state was provided externally. Note that the
1356  // solution_state_ may have nothing to do with the current problem, e.g.,
1357  // objective, matrix, and/or bounds had changed. So we support several
1358  // scenarios of warm-start depending on how did the problem change and which
1359  // simplex algorithm is used (primal or dual).
1360  bool solve_from_scratch = true;
1361 
1362  // Try to perform a "quick" warm-start with no matrix factorization involved.
1363  if (!solution_state_.IsEmpty() && !solution_state_has_been_set_externally_) {
1364  if (!parameters_.use_dual_simplex()) {
1365  // With primal simplex, always clear dual norms and dual pricing.
1366  // Incrementality is supported only if only change to the matrix and
1367  // bounds is adding new columns (objective may change), and that all
1368  // new columns have a bound equal to zero.
1369  dual_edge_norms_.Clear();
1370  dual_pricing_vector_.clear();
1371  if (matrix_is_unchanged && bounds_are_unchanged) {
1372  // TODO(user): Do not do that if objective_is_unchanged. Currently
1373  // this seems to break something. Investigate.
1374  reduced_costs_.ClearAndRemoveCostShifts();
1375  solve_from_scratch = false;
1376  } else if (only_change_is_new_cols && only_new_bounds) {
1377  variables_info_.InitializeFromBasisState(first_slack_col_, num_new_cols,
1378  solution_state_);
1379  variable_values_.ResetAllNonBasicVariableValues(
1380  variable_starting_values_);
1381 
1382  const ColIndex first_new_col(first_slack_col_ - num_new_cols);
1383  for (ColIndex& col_ref : basis_) {
1384  if (col_ref >= first_new_col) {
1385  col_ref += num_new_cols;
1386  }
1387  }
1388 
1389  // Make sure the primal edge norm are recomputed from scratch.
1390  // TODO(user): only the norms of the new columns actually need to be
1391  // computed.
1392  primal_edge_norms_.Clear();
1393  reduced_costs_.ClearAndRemoveCostShifts();
1394  solve_from_scratch = false;
1395  }
1396  } else {
1397  // With dual simplex, always clear primal norms. Incrementality is
1398  // supported only if the objective remains the same (the matrix may
1399  // contain new rows and the bounds may change).
1400  primal_edge_norms_.Clear();
1401  if (objective_is_unchanged) {
1402  if (matrix_is_unchanged) {
1403  if (!bounds_are_unchanged) {
1404  variables_info_.InitializeFromBasisState(
1405  first_slack_col_, ColIndex(0), solution_state_);
1406  variable_values_.ResetAllNonBasicVariableValues(
1407  variable_starting_values_);
1408  variable_values_.RecomputeBasicVariableValues();
1409  }
1410  solve_from_scratch = false;
1411  } else if (only_change_is_new_rows) {
1412  // For the dual-simplex, we also perform a warm start if a couple of
1413  // new rows where added.
1414  variables_info_.InitializeFromBasisState(
1415  first_slack_col_, ColIndex(0), solution_state_);
1416  dual_edge_norms_.ResizeOnNewRows(num_rows_);
1417 
1418  // TODO(user): The reduced costs do not really need to be recomputed.
1419  // We just need to initialize the ones of the new slack variables to
1420  // 0.
1421  reduced_costs_.ClearAndRemoveCostShifts();
1422  dual_pricing_vector_.clear();
1423 
1424  // Note that this needs to be done after the Clear() calls above.
1425  if (InitializeFirstBasis(basis_).ok()) {
1426  solve_from_scratch = false;
1427  }
1428  }
1429  }
1430  }
1431  }
1432 
1433  // If we couldn't perform a "quick" warm start above, we can at least try to
1434  // reuse the variable statuses.
1435  if (solve_from_scratch && !solution_state_.IsEmpty()) {
1436  basis_factorization_.Clear();
1437  reduced_costs_.ClearAndRemoveCostShifts();
1438  primal_edge_norms_.Clear();
1439  dual_edge_norms_.Clear();
1440  dual_pricing_vector_.clear();
1441 
1442  // If an external basis has been provided or if the matrix changed, we need
1443  // to perform more work, e.g., factorize the proposed basis and validate it.
1444  variables_info_.InitializeFromBasisState(first_slack_col_, ColIndex(0),
1445  solution_state_);
1446 
1447  // Use the set of basic columns as a "hint" to construct the first basis.
1448  std::vector<ColIndex> candidates;
1449  for (const ColIndex col : variables_info_.GetIsBasicBitRow()) {
1450  candidates.push_back(col);
1451  }
1452  SOLVER_LOG(logger_, "The warm-start state contains ", candidates.size(),
1453  " candidates for the basis (num_rows = ", num_rows_.value(),
1454  ").");
1455 
1456  // Optimization: Try to factorize it right away if we have the correct
1457  // number of element. Ideally the other path below would no require a
1458  // "double" factorization effort, so this would not be needed.
1459  if (candidates.size() == num_rows_) {
1460  basis_.clear();
1461  for (const ColIndex col : candidates) {
1462  basis_.push_back(col);
1463  }
1464 
1465  // TODO(user): Depending on the error here, there is no point doing extra
1466  // work below. This is the case when we fail because of a bad initial
1467  // condition number for instance.
1468  if (InitializeFirstBasis(basis_).ok()) {
1469  solve_from_scratch = false;
1470  }
1471  }
1472 
1473  if (solve_from_scratch) {
1474  basis_ = basis_factorization_.ComputeInitialBasis(candidates);
1475  const int num_super_basic =
1476  variables_info_.ChangeUnusedBasicVariablesToFree(basis_);
1477  const int num_snapped = variables_info_.SnapFreeVariablesToBound(
1478  parameters_.crossover_bound_snapping_distance(),
1479  variable_starting_values_);
1480  if (logger_->LoggingIsEnabled()) {
1481  SOLVER_LOG(logger_, "The initial basis did not use ",
1482  " BASIC columns from the initial state and used ",
1483  (num_rows_ - (candidates.size() - num_super_basic)).value(),
1484  " slack variables that were not marked BASIC.");
1485  if (num_snapped > 0) {
1486  SOLVER_LOG(logger_, num_snapped,
1487  " of the FREE variables where moved to their bound.");
1488  }
1489  }
1490 
1491  if (InitializeFirstBasis(basis_).ok()) {
1492  solve_from_scratch = false;
1493  } else {
1494  SOLVER_LOG(logger_,
1495  "RevisedSimplex is not using the warm start "
1496  "basis because it is not factorizable.");
1497  }
1498  }
1499  }
1500 
1501  if (solve_from_scratch) {
1502  SOLVER_LOG(logger_, "Starting basis: create from scratch.");
1503  basis_factorization_.Clear();
1504  reduced_costs_.ClearAndRemoveCostShifts();
1505  primal_edge_norms_.Clear();
1506  dual_edge_norms_.Clear();
1507  dual_pricing_vector_.clear();
1508  GLOP_RETURN_IF_ERROR(CreateInitialBasis());
1509  } else {
1510  SOLVER_LOG(logger_, "Starting basis: incremental solve.");
1511  }
1512  DCHECK(BasisIsConsistent());
1513  return Status::OK();
1514 }
1515 
1516 void RevisedSimplex::DisplayBasicVariableStatistics() {
1517  SCOPED_TIME_STAT(&function_stats_);
1518 
1519  int num_fixed_variables = 0;
1520  int num_free_variables = 0;
1521  int num_variables_at_bound = 0;
1522  int num_slack_variables = 0;
1523  int num_infeasible_variables = 0;
1524 
1525  const DenseRow& variable_values = variable_values_.GetDenseRow();
1526  const VariableTypeRow& variable_types = variables_info_.GetTypeRow();
1527  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1528  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1529  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
1530  for (RowIndex row(0); row < num_rows_; ++row) {
1531  const ColIndex col = basis_[row];
1532  const Fractional value = variable_values[col];
1533  if (variable_types[col] == VariableType::UNCONSTRAINED) {
1534  ++num_free_variables;
1535  }
1536  if (value > upper_bounds[col] + tolerance ||
1537  value < lower_bounds[col] - tolerance) {
1538  ++num_infeasible_variables;
1539  }
1540  if (col >= first_slack_col_) {
1541  ++num_slack_variables;
1542  }
1543  if (lower_bounds[col] == upper_bounds[col]) {
1544  ++num_fixed_variables;
1545  } else if (variable_values[col] == lower_bounds[col] ||
1546  variable_values[col] == upper_bounds[col]) {
1547  ++num_variables_at_bound;
1548  }
1549  }
1550 
1551  SOLVER_LOG(logger_, "The matrix with slacks has ",
1552  compact_matrix_.num_rows().value(), " rows, ",
1553  compact_matrix_.num_cols().value(), " columns, ",
1554  compact_matrix_.num_entries().value(), " entries.");
1555  SOLVER_LOG(logger_, "Number of basic infeasible variables: ",
1556  num_infeasible_variables);
1557  SOLVER_LOG(logger_, "Number of basic slack variables: ", num_slack_variables);
1558  SOLVER_LOG(logger_,
1559  "Number of basic variables at bound: ", num_variables_at_bound);
1560  SOLVER_LOG(logger_, "Number of basic fixed variables: ", num_fixed_variables);
1561  SOLVER_LOG(logger_, "Number of basic free variables: ", num_free_variables);
1562  SOLVER_LOG(logger_, "Number of super-basic variables: ",
1563  ComputeNumberOfSuperBasicVariables());
1564 }
1565 
1566 void RevisedSimplex::SaveState() {
1567  DCHECK_EQ(num_cols_, variables_info_.GetStatusRow().size());
1568  solution_state_.statuses = variables_info_.GetStatusRow();
1569  solution_state_has_been_set_externally_ = false;
1570 }
1571 
1572 RowIndex RevisedSimplex::ComputeNumberOfEmptyRows() {
1573  DenseBooleanColumn contains_data(num_rows_, false);
1574  for (ColIndex col(0); col < num_cols_; ++col) {
1575  for (const SparseColumn::Entry e : compact_matrix_.column(col)) {
1576  contains_data[e.row()] = true;
1577  }
1578  }
1579  RowIndex num_empty_rows(0);
1580  for (RowIndex row(0); row < num_rows_; ++row) {
1581  if (!contains_data[row]) {
1582  ++num_empty_rows;
1583  VLOG(2) << "Row " << row << " is empty.";
1584  }
1585  }
1586  return num_empty_rows;
1587 }
1588 
1589 ColIndex RevisedSimplex::ComputeNumberOfEmptyColumns() {
1590  ColIndex num_empty_cols(0);
1591  for (ColIndex col(0); col < num_cols_; ++col) {
1592  if (compact_matrix_.column(col).IsEmpty()) {
1593  ++num_empty_cols;
1594  VLOG(2) << "Column " << col << " is empty.";
1595  }
1596  }
1597  return num_empty_cols;
1598 }
1599 
1600 int RevisedSimplex::ComputeNumberOfSuperBasicVariables() const {
1601  const VariableStatusRow& variable_statuses = variables_info_.GetStatusRow();
1602  int num_super_basic = 0;
1603  for (ColIndex col(0); col < num_cols_; ++col) {
1604  if (variable_statuses[col] == VariableStatus::FREE &&
1605  variable_values_.Get(col) != 0.0) {
1606  ++num_super_basic;
1607  }
1608  }
1609  return num_super_basic;
1610 }
1611 
1612 void RevisedSimplex::CorrectErrorsOnVariableValues() {
1613  SCOPED_TIME_STAT(&function_stats_);
1614  DCHECK(basis_factorization_.IsRefactorized());
1615 
1616  // TODO(user): The primal residual error does not change if we take degenerate
1617  // steps or if we do not change the variable values. No need to recompute it
1618  // in this case.
1619  const Fractional primal_residual =
1620  variable_values_.ComputeMaximumPrimalResidual();
1621 
1622  // If the primal_residual is within the tolerance, no need to recompute
1623  // the basic variable values with a better precision.
1624  if (primal_residual >= parameters_.harris_tolerance_ratio() *
1625  parameters_.primal_feasibility_tolerance()) {
1626  variable_values_.RecomputeBasicVariableValues();
1627  VLOG(1) << "Primal infeasibility (bounds error) = "
1628  << variable_values_.ComputeMaximumPrimalInfeasibility()
1629  << ", Primal residual |A.x - b| = "
1630  << variable_values_.ComputeMaximumPrimalResidual();
1631  }
1632 }
1633 
1634 void RevisedSimplex::ComputeVariableValuesError() {
1635  SCOPED_TIME_STAT(&function_stats_);
1636  error_.AssignToZero(num_rows_);
1637  const DenseRow& variable_values = variable_values_.GetDenseRow();
1638  for (ColIndex col(0); col < num_cols_; ++col) {
1639  const Fractional value = variable_values[col];
1640  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -value, &error_);
1641  }
1642 }
1643 
1644 void RevisedSimplex::ComputeDirection(ColIndex col) {
1645  SCOPED_TIME_STAT(&function_stats_);
1647  basis_factorization_.RightSolveForProblemColumn(col, &direction_);
1648  direction_infinity_norm_ = 0.0;
1649  if (direction_.non_zeros.empty()) {
1650  // We still compute the direction non-zeros because our code relies on it.
1651  for (RowIndex row(0); row < num_rows_; ++row) {
1652  const Fractional value = direction_[row];
1653  if (value != 0.0) {
1654  direction_.non_zeros.push_back(row);
1655  direction_infinity_norm_ =
1656  std::max(direction_infinity_norm_, std::abs(value));
1657  }
1658  }
1659  } else {
1660  for (const auto e : direction_) {
1661  direction_infinity_norm_ =
1662  std::max(direction_infinity_norm_, std::abs(e.coefficient()));
1663  }
1664  }
1665  IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
1666  num_rows_ == 0 ? 0.0
1667  : static_cast<double>(direction_.non_zeros.size()) /
1668  static_cast<double>(num_rows_.value())));
1669 }
1670 
1671 Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
1672  SCOPED_TIME_STAT(&function_stats_);
1673  compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
1674  for (const auto e : direction_) {
1675  compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
1676  &error_);
1677  }
1678  return InfinityNorm(error_);
1679 }
1680 
1681 template <bool is_entering_reduced_cost_positive>
1682 Fractional RevisedSimplex::GetRatio(const DenseRow& lower_bounds,
1683  const DenseRow& upper_bounds,
1684  RowIndex row) const {
1685  const ColIndex col = basis_[row];
1686  const Fractional direction = direction_[row];
1687  const Fractional value = variable_values_.Get(col);
1688  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
1689  DCHECK_NE(direction, 0.0);
1690  if (is_entering_reduced_cost_positive) {
1691  if (direction > 0.0) {
1692  return (upper_bounds[col] - value) / direction;
1693  } else {
1694  return (lower_bounds[col] - value) / direction;
1695  }
1696  } else {
1697  if (direction > 0.0) {
1698  return (value - lower_bounds[col]) / direction;
1699  } else {
1700  return (value - upper_bounds[col]) / direction;
1701  }
1702  }
1703 }
1704 
1705 template <bool is_entering_reduced_cost_positive>
1706 Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
1707  Fractional bound_flip_ratio, SparseColumn* leaving_candidates) const {
1708  SCOPED_TIME_STAT(&function_stats_);
1709  const Fractional harris_tolerance =
1710  parameters_.harris_tolerance_ratio() *
1711  parameters_.primal_feasibility_tolerance();
1712  const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
1713  parameters_.primal_feasibility_tolerance();
1714 
1715  // Initially, we can skip any variable with a ratio greater than
1716  // bound_flip_ratio since it seems to be always better to choose the
1717  // bound-flip over such leaving variable.
1718  Fractional harris_ratio = bound_flip_ratio;
1719  leaving_candidates->Clear();
1720 
1721  // If the basis is refactorized, then we should have everything with a good
1722  // precision, so we only consider "acceptable" pivots. Otherwise we consider
1723  // all the entries, and if the algorithm return a pivot that is too small, we
1724  // will refactorize and recompute the relevant quantities.
1725  const Fractional threshold = basis_factorization_.IsRefactorized()
1726  ? parameters_.minimum_acceptable_pivot()
1727  : parameters_.ratio_test_zero_threshold();
1728 
1729  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1730  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1731  for (const auto e : direction_) {
1732  const Fractional magnitude = std::abs(e.coefficient());
1733  if (magnitude <= threshold) continue;
1734  const Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(
1735  lower_bounds, upper_bounds, e.row());
1736  if (ratio <= harris_ratio) {
1737  leaving_candidates->SetCoefficient(e.row(), ratio);
1738 
1739  // The second max() makes sure harris_ratio is lower bounded by a small
1740  // positive value. The more classical approach is to bound it by 0.0 but
1741  // since we will always perform a small positive step, we allow any
1742  // variable to go a bit more out of bound (even if it is past the harris
1743  // tolerance). This increase the number of candidates and allows us to
1744  // choose a more numerically stable pivot.
1745  //
1746  // Note that at least lower bounding it by 0.0 is really important on
1747  // numerically difficult problems because its helps in the choice of a
1748  // stable pivot.
1749  harris_ratio = std::min(harris_ratio,
1750  std::max(minimum_delta / magnitude,
1751  ratio + harris_tolerance / magnitude));
1752  }
1753  }
1754  return harris_ratio;
1755 }
1756 
1757 namespace {
1758 
1759 // Returns true if the candidate ratio is supposed to be more stable than the
1760 // current ratio (or if the two are equal).
1761 // The idea here is to take, by order of preference:
1762 // - the minimum positive ratio in order to intoduce a primal infeasibility
1763 // which is as small as possible.
1764 // - or the least negative one in order to have the smallest bound shift
1765 // possible on the leaving variable.
1766 bool IsRatioMoreOrEquallyStable(Fractional candidate, Fractional current) {
1767  if (current >= 0.0) {
1768  return candidate >= 0.0 && candidate <= current;
1769  } else {
1770  return candidate >= current;
1771  }
1772 }
1773 
1774 } // namespace
1775 
1776 // Ratio-test or Quotient-test. Choose the row of the leaving variable.
1777 // Known as CHUZR or CHUZRO in FORTRAN codes.
1778 Status RevisedSimplex::ChooseLeavingVariableRow(
1779  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1780  RowIndex* leaving_row, Fractional* step_length, Fractional* target_bound) {
1781  SCOPED_TIME_STAT(&function_stats_);
1782  GLOP_RETURN_ERROR_IF_NULL(refactorize);
1783  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
1784  GLOP_RETURN_ERROR_IF_NULL(step_length);
1785  DCHECK_COL_BOUNDS(entering_col);
1786  DCHECK_NE(0.0, reduced_cost);
1787 
1788  // A few cases will cause the test to be recomputed from the beginning.
1789  int stats_num_leaving_choices = 0;
1790  equivalent_leaving_choices_.clear();
1791  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1792  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
1793  while (true) {
1794  stats_num_leaving_choices = 0;
1795 
1796  // We initialize current_ratio with the maximum step the entering variable
1797  // can take (bound-flip). Note that we do not use tolerance here.
1798  const Fractional entering_value = variable_values_.Get(entering_col);
1799  Fractional current_ratio =
1800  (reduced_cost > 0.0) ? entering_value - lower_bounds[entering_col]
1801  : upper_bounds[entering_col] - entering_value;
1802  DCHECK_GT(current_ratio, 0.0);
1803 
1804  // First pass of the Harris ratio test. If 'harris_tolerance' is zero, this
1805  // actually computes the minimum leaving ratio of all the variables. This is
1806  // the same as the 'classic' ratio test.
1807  const Fractional harris_ratio =
1808  (reduced_cost > 0.0) ? ComputeHarrisRatioAndLeavingCandidates<true>(
1809  current_ratio, &leaving_candidates_)
1810  : ComputeHarrisRatioAndLeavingCandidates<false>(
1811  current_ratio, &leaving_candidates_);
1812 
1813  // If the bound-flip is a viable solution (i.e. it doesn't move the basic
1814  // variable too much out of bounds), we take it as it is always stable and
1815  // fast.
1816  if (current_ratio <= harris_ratio) {
1817  *leaving_row = kInvalidRow;
1818  *step_length = current_ratio;
1819  break;
1820  }
1821 
1822  // Second pass of the Harris ratio test. Amongst the variables with 'ratio
1823  // <= harris_ratio', we choose the leaving row with the largest coefficient.
1824  //
1825  // This has a big impact, because picking a leaving variable with a small
1826  // direction_[row] is the main source of Abnormal LU errors.
1827  Fractional pivot_magnitude = 0.0;
1828  stats_num_leaving_choices = 0;
1829  *leaving_row = kInvalidRow;
1830  equivalent_leaving_choices_.clear();
1831  for (const SparseColumn::Entry e : leaving_candidates_) {
1832  const Fractional ratio = e.coefficient();
1833  if (ratio > harris_ratio) continue;
1834  ++stats_num_leaving_choices;
1835  const RowIndex row = e.row();
1836 
1837  // If the magnitudes are the same, we choose the leaving variable with
1838  // what is probably the more stable ratio, see
1839  // IsRatioMoreOrEquallyStable().
1840  const Fractional candidate_magnitude = std::abs(direction_[row]);
1841  if (candidate_magnitude < pivot_magnitude) continue;
1842  if (candidate_magnitude == pivot_magnitude) {
1843  if (!IsRatioMoreOrEquallyStable(ratio, current_ratio)) continue;
1844  if (ratio == current_ratio) {
1845  DCHECK_NE(kInvalidRow, *leaving_row);
1846  equivalent_leaving_choices_.push_back(row);
1847  continue;
1848  }
1849  }
1850  equivalent_leaving_choices_.clear();
1851  current_ratio = ratio;
1852  pivot_magnitude = candidate_magnitude;
1853  *leaving_row = row;
1854  }
1855 
1856  // Break the ties randomly.
1857  if (!equivalent_leaving_choices_.empty()) {
1858  equivalent_leaving_choices_.push_back(*leaving_row);
1859  *leaving_row =
1860  equivalent_leaving_choices_[std::uniform_int_distribution<int>(
1861  0, equivalent_leaving_choices_.size() - 1)(random_)];
1862  }
1863 
1864  // Since we took care of the bound-flip at the beginning, at this point
1865  // we have a valid leaving row.
1866  DCHECK_NE(kInvalidRow, *leaving_row);
1867 
1868  // A variable already outside one of its bounds +/- tolerance is considered
1869  // at its bound and its ratio is zero. Not doing this may lead to a step
1870  // that moves the objective in the wrong direction. We may want to allow
1871  // such steps, but then we will need to check that it doesn't break the
1872  // bounds of the other variables.
1873  if (current_ratio <= 0.0) {
1874  // Instead of doing a zero step, we do a small positive step. This
1875  // helps on degenerate problems.
1876  const Fractional minimum_delta =
1877  parameters_.degenerate_ministep_factor() *
1878  parameters_.primal_feasibility_tolerance();
1879  *step_length = minimum_delta / pivot_magnitude;
1880  } else {
1881  *step_length = current_ratio;
1882  }
1883 
1884  // Note(user): Testing the pivot at each iteration is useful for debugging
1885  // an LU factorization problem. Remove the false if you need to investigate
1886  // this, it makes sure that this will be compiled away.
1887  if (/* DISABLES CODE */ (false)) {
1888  TestPivot(entering_col, *leaving_row);
1889  }
1890 
1891  // We try various "heuristics" to avoid a small pivot.
1892  //
1893  // The smaller 'direction_[*leaving_row]', the less precise
1894  // it is. So we want to avoid pivoting by such a row. Small pivots lead to
1895  // ill-conditioned bases or even to matrices that are not a basis at all if
1896  // the actual (infinite-precision) coefficient is zero.
1897  //
1898  // TODO(user): We may have to choose another entering column if
1899  // we cannot prevent pivoting by a small pivot.
1900  // (Chvatal, p.115, about epsilon2.)
1901  if (pivot_magnitude <
1902  parameters_.small_pivot_threshold() * direction_infinity_norm_) {
1903  // The first countermeasure is to recompute everything to the best
1904  // precision we can in the hope of avoiding such a choice. Note that this
1905  // helps a lot on the Netlib problems.
1906  if (!basis_factorization_.IsRefactorized()) {
1907  VLOG(1) << "Refactorizing to avoid pivoting by "
1908  << direction_[*leaving_row]
1909  << " direction_infinity_norm_ = " << direction_infinity_norm_
1910  << " reduced cost = " << reduced_cost;
1911  *refactorize = true;
1912  return Status::OK();
1913  }
1914 
1915  // Because of the "threshold" in ComputeHarrisRatioAndLeavingCandidates()
1916  // we kwnow that this pivot will still have an acceptable magnitude.
1917  //
1918  // TODO(user): An issue left to fix is that if there is no such pivot at
1919  // all, then we will report unbounded even if this is not really the case.
1920  // As of 2018/07/18, this happens on l30.mps.
1921  VLOG(1) << "Couldn't avoid pivoting by " << direction_[*leaving_row]
1922  << " direction_infinity_norm_ = " << direction_infinity_norm_
1923  << " reduced cost = " << reduced_cost;
1924  DCHECK_GE(std::abs(direction_[*leaving_row]),
1925  parameters_.minimum_acceptable_pivot());
1926  IF_STATS_ENABLED(ratio_test_stats_.abs_tested_pivot.Add(pivot_magnitude));
1927  }
1928  break;
1929  }
1930 
1931  // Update the target bound.
1932  if (*leaving_row != kInvalidRow) {
1933  const bool is_reduced_cost_positive = (reduced_cost > 0.0);
1934  const bool is_leaving_coeff_positive = (direction_[*leaving_row] > 0.0);
1935  *target_bound = (is_reduced_cost_positive == is_leaving_coeff_positive)
1936  ? upper_bounds[basis_[*leaving_row]]
1937  : lower_bounds[basis_[*leaving_row]];
1938  }
1939 
1940  // Stats.
1942  ratio_test_stats_.leaving_choices.Add(stats_num_leaving_choices);
1943  if (!equivalent_leaving_choices_.empty()) {
1944  ratio_test_stats_.num_perfect_ties.Add(
1945  equivalent_leaving_choices_.size());
1946  }
1947  if (*leaving_row != kInvalidRow) {
1948  ratio_test_stats_.abs_used_pivot.Add(std::abs(direction_[*leaving_row]));
1949  }
1950  });
1951  return Status::OK();
1952 }
1953 
1954 namespace {
1955 
1956 // Store a row with its ratio, coefficient magnitude and target bound. This is
1957 // used by PrimalPhaseIChooseLeavingVariableRow(), see this function for more
1958 // details.
1959 struct BreakPoint {
1960  BreakPoint(RowIndex _row, Fractional _ratio, Fractional _coeff_magnitude,
1961  Fractional _target_bound)
1962  : row(_row),
1963  ratio(_ratio),
1964  coeff_magnitude(_coeff_magnitude),
1965  target_bound(_target_bound) {}
1966 
1967  // We want to process the breakpoints by increasing ratio and decreasing
1968  // coefficient magnitude (if the ratios are the same). Returns false if "this"
1969  // is before "other" in a priority queue.
1970  bool operator<(const BreakPoint& other) const {
1971  if (ratio == other.ratio) {
1972  if (coeff_magnitude == other.coeff_magnitude) {
1973  return row > other.row;
1974  }
1975  return coeff_magnitude < other.coeff_magnitude;
1976  }
1977  return ratio > other.ratio;
1978  }
1979 
1980  RowIndex row;
1984 };
1985 
1986 } // namespace
1987 
1988 void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
1989  ColIndex entering_col, Fractional reduced_cost, bool* refactorize,
1990  RowIndex* leaving_row, Fractional* step_length,
1991  Fractional* target_bound) const {
1992  SCOPED_TIME_STAT(&function_stats_);
1993  RETURN_IF_NULL(refactorize);
1994  RETURN_IF_NULL(leaving_row);
1995  RETURN_IF_NULL(step_length);
1996  DCHECK_COL_BOUNDS(entering_col);
1997  DCHECK_NE(0.0, reduced_cost);
1998  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
1999  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2000 
2001  // We initialize current_ratio with the maximum step the entering variable
2002  // can take (bound-flip). Note that we do not use tolerance here.
2003  const Fractional entering_value = variable_values_.Get(entering_col);
2004  Fractional current_ratio = (reduced_cost > 0.0)
2005  ? entering_value - lower_bounds[entering_col]
2006  : upper_bounds[entering_col] - entering_value;
2007  DCHECK_GT(current_ratio, 0.0);
2008 
2009  std::vector<BreakPoint> breakpoints;
2010  const Fractional tolerance = parameters_.primal_feasibility_tolerance();
2011  for (const auto e : direction_) {
2012  const Fractional direction =
2013  reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
2014  const Fractional magnitude = std::abs(direction);
2015  if (magnitude < tolerance) continue;
2016 
2017  // Computes by how much we can add 'direction' to the basic variable value
2018  // with index 'row' until it changes of primal feasibility status. That is
2019  // from infeasible to feasible or from feasible to infeasible. Note that the
2020  // transition infeasible->feasible->infeasible is possible. We use
2021  // tolerances here, but when the step will be performed, it will move the
2022  // variable to the target bound (possibly taking a small negative step).
2023  //
2024  // Note(user): The negative step will only happen when the leaving variable
2025  // was slightly infeasible (less than tolerance). Moreover, the overall
2026  // infeasibility will not necessarily increase since it doesn't take into
2027  // account all the variables with an infeasibility smaller than the
2028  // tolerance, and here we will at least improve the one of the leaving
2029  // variable.
2030  const ColIndex col = basis_[e.row()];
2031  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
2032 
2033  const Fractional value = variable_values_.Get(col);
2036  const Fractional to_lower = (lower_bound - tolerance - value) / direction;
2037  const Fractional to_upper = (upper_bound + tolerance - value) / direction;
2038 
2039  // Enqueue the possible transitions. Note that the second tests exclude the
2040  // case where to_lower or to_upper are infinite.
2041  if (to_lower >= 0.0 && to_lower < current_ratio) {
2042  breakpoints.push_back(
2043  BreakPoint(e.row(), to_lower, magnitude, lower_bound));
2044  }
2045  if (to_upper >= 0.0 && to_upper < current_ratio) {
2046  breakpoints.push_back(
2047  BreakPoint(e.row(), to_upper, magnitude, upper_bound));
2048  }
2049  }
2050 
2051  // Order the breakpoints by increasing ratio and decreasing coefficient
2052  // magnitude (if the ratios are the same).
2053  std::make_heap(breakpoints.begin(), breakpoints.end());
2054 
2055  // Select the last breakpoint that still improves the infeasibility and has
2056  // the largest coefficient magnitude.
2057  Fractional improvement = std::abs(reduced_cost);
2058  Fractional best_magnitude = 0.0;
2059  *leaving_row = kInvalidRow;
2060  while (!breakpoints.empty()) {
2061  const BreakPoint top = breakpoints.front();
2062  // TODO(user): consider using >= here. That will lead to bigger ratio and
2063  // hence a better impact on the infeasibility. The drawback is that more
2064  // effort may be needed to update the reduced costs.
2065  //
2066  // TODO(user): Use a random tie breaking strategy for BreakPoint with
2067  // same ratio and same coefficient magnitude? Koberstein explains in his PhD
2068  // that it helped on the dual-simplex.
2069  if (top.coeff_magnitude > best_magnitude) {
2070  *leaving_row = top.row;
2071  current_ratio = top.ratio;
2072  best_magnitude = top.coeff_magnitude;
2073  *target_bound = top.target_bound;
2074  }
2075 
2076  // As long as the sum of primal infeasibilities is decreasing, we look for
2077  // pivots that are numerically more stable.
2078  improvement -= top.coeff_magnitude;
2079  if (improvement <= 0.0) break;
2080  std::pop_heap(breakpoints.begin(), breakpoints.end());
2081  breakpoints.pop_back();
2082  }
2083 
2084  // Try to avoid a small pivot by refactorizing.
2085  if (*leaving_row != kInvalidRow) {
2086  const Fractional threshold =
2087  parameters_.small_pivot_threshold() * direction_infinity_norm_;
2088  if (best_magnitude < threshold && !basis_factorization_.IsRefactorized()) {
2089  *refactorize = true;
2090  return;
2091  }
2092  }
2093  *step_length = current_ratio;
2094 }
2095 
2096 // This implements the pricing step for the dual simplex.
2097 Status RevisedSimplex::DualChooseLeavingVariableRow(RowIndex* leaving_row,
2098  Fractional* cost_variation,
2100  SCOPED_TIME_STAT(&function_stats_);
2101  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2102  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2104 
2105  // This is not supposed to happen, but better be safe.
2106  if (dual_prices_.Size() == 0) {
2107  variable_values_.RecomputeDualPrices();
2108  }
2109 
2110  // Return right away if there is no leaving variable.
2111  // Fill cost_variation and target_bound otherwise.
2112  *leaving_row = dual_prices_.GetMaximum();
2113  if (*leaving_row == kInvalidRow) return Status::OK();
2114 
2115  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2116  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2117  const ColIndex leaving_col = basis_[*leaving_row];
2118  const Fractional value = variable_values_.Get(leaving_col);
2119  if (value < lower_bounds[leaving_col]) {
2120  *cost_variation = lower_bounds[leaving_col] - value;
2121  *target_bound = lower_bounds[leaving_col];
2122  DCHECK_GT(*cost_variation, 0.0);
2123  } else {
2124  *cost_variation = upper_bounds[leaving_col] - value;
2125  *target_bound = upper_bounds[leaving_col];
2126  DCHECK_LT(*cost_variation, 0.0);
2127  }
2128  return Status::OK();
2129 }
2130 
2131 namespace {
2132 
2133 // Returns true if a basic variable with given cost and type is to be considered
2134 // as a leaving candidate for the dual phase I.
2135 bool IsDualPhaseILeavingCandidate(Fractional cost, VariableType type,
2136  Fractional threshold) {
2137  if (cost == 0.0) return false;
2138  return type == VariableType::UPPER_AND_LOWER_BOUNDED ||
2139  type == VariableType::FIXED_VARIABLE ||
2140  (type == VariableType::UPPER_BOUNDED && cost < -threshold) ||
2141  (type == VariableType::LOWER_BOUNDED && cost > threshold);
2142 }
2143 
2144 } // namespace
2145 
2146 // Important: The norm should be updated before this is called.
2147 template <bool use_dense_update>
2148 void RevisedSimplex::OnDualPriceChange(const DenseColumn& squared_norm,
2149  RowIndex row, VariableType type,
2150  Fractional threshold) {
2151  const Fractional price = dual_pricing_vector_[row];
2152  const bool is_candidate =
2153  IsDualPhaseILeavingCandidate(price, type, threshold);
2154  if (is_candidate) {
2155  if (use_dense_update) {
2156  dual_prices_.DenseAddOrUpdate(row, Square(price) / squared_norm[row]);
2157  } else {
2158  dual_prices_.AddOrUpdate(row, Square(price) / squared_norm[row]);
2159  }
2160  } else {
2161  dual_prices_.Remove(row);
2162  }
2163 }
2164 
2165 void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
2166  ColIndex entering_col) {
2167  SCOPED_TIME_STAT(&function_stats_);
2168 
2169  // If the prices are going to be recomputed, there is nothing to do. See the
2170  // logic at the beginning of DualPhaseIChooseLeavingVariableRow() which must
2171  // be in sync with this one.
2172  //
2173  // TODO(user): Move the logic in a single class, so it is easier to enforce
2174  // invariant.
2175  if (reduced_costs_.AreReducedCostsRecomputed() ||
2176  dual_edge_norms_.NeedsBasisRefactorization() ||
2177  dual_pricing_vector_.empty()) {
2178  return;
2179  }
2180 
2181  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2182  const Fractional threshold = parameters_.ratio_test_zero_threshold();
2183 
2184  // Note that because the norm are also updated only on the position of the
2185  // direction, scaled_dual_pricing_vector_ will be up to date.
2186  const DenseColumn& squared_norms = dual_edge_norms_.GetEdgeSquaredNorms();
2187 
2188  // Convert the dual_pricing_vector_ from the old basis into the new one (which
2189  // is the same as multiplying it by an Eta matrix corresponding to the
2190  // direction).
2191  const Fractional step =
2192  dual_pricing_vector_[leaving_row] / direction_[leaving_row];
2193  for (const auto e : direction_) {
2194  dual_pricing_vector_[e.row()] -= e.coefficient() * step;
2195  OnDualPriceChange(squared_norms, e.row(), variable_type[basis_[e.row()]],
2196  threshold);
2197  }
2198  dual_pricing_vector_[leaving_row] = step;
2199 
2200  // The entering_col which was dual-infeasible is now dual-feasible, so we
2201  // have to remove it from the infeasibility sum.
2202  dual_pricing_vector_[leaving_row] -=
2203  dual_infeasibility_improvement_direction_[entering_col];
2204  if (dual_infeasibility_improvement_direction_[entering_col] != 0.0) {
2205  --num_dual_infeasible_positions_;
2206  }
2207  dual_infeasibility_improvement_direction_[entering_col] = 0.0;
2208 
2209  // The leaving variable will also be dual-feasible.
2210  dual_infeasibility_improvement_direction_[basis_[leaving_row]] = 0.0;
2211 
2212  // Update the leaving row entering candidate status.
2213  OnDualPriceChange(squared_norms, leaving_row, variable_type[entering_col],
2214  threshold);
2215 }
2216 
2217 template <typename Cols>
2218 void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
2219  const Cols& cols) {
2220  SCOPED_TIME_STAT(&function_stats_);
2221  bool something_to_do = false;
2222  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
2223  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
2224  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2225  const Fractional tolerance = reduced_costs_.GetDualFeasibilityTolerance();
2226  for (ColIndex col : cols) {
2227  const Fractional reduced_cost = reduced_costs[col];
2228  const Fractional sign =
2229  (can_increase.IsSet(col) && reduced_cost < -tolerance) ? 1.0
2230  : (can_decrease.IsSet(col) && reduced_cost > tolerance) ? -1.0
2231  : 0.0;
2232  if (sign != dual_infeasibility_improvement_direction_[col]) {
2233  if (sign == 0.0) {
2234  --num_dual_infeasible_positions_;
2235  } else if (dual_infeasibility_improvement_direction_[col] == 0.0) {
2236  ++num_dual_infeasible_positions_;
2237  }
2238  if (!something_to_do) {
2239  initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
2240  initially_all_zero_scratchpad_.ClearSparseMask();
2241  initially_all_zero_scratchpad_.non_zeros.clear();
2242  something_to_do = true;
2243  }
2244 
2245  // We add a factor 10 because of the scattered access.
2246  num_update_price_operations_ +=
2247  10 * compact_matrix_.column(col).num_entries().value();
2249  col, sign - dual_infeasibility_improvement_direction_[col],
2250  &initially_all_zero_scratchpad_);
2251  dual_infeasibility_improvement_direction_[col] = sign;
2252  }
2253  }
2254  if (something_to_do) {
2255  initially_all_zero_scratchpad_.ClearNonZerosIfTooDense();
2256  initially_all_zero_scratchpad_.ClearSparseMask();
2257  const DenseColumn& squared_norms = dual_edge_norms_.GetEdgeSquaredNorms();
2258 
2259  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
2260  const Fractional threshold = parameters_.ratio_test_zero_threshold();
2261  basis_factorization_.RightSolve(&initially_all_zero_scratchpad_);
2262  if (initially_all_zero_scratchpad_.non_zeros.empty()) {
2263  dual_prices_.StartDenseUpdates();
2264  for (RowIndex row(0); row < num_rows_; ++row) {
2265  if (initially_all_zero_scratchpad_[row] == 0.0) continue;
2266  dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
2267  OnDualPriceChange</*use_dense_update=*/true>(
2268  squared_norms, row, variable_type[basis_[row]], threshold);
2269  }
2270  initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
2271  } else {
2272  for (const auto e : initially_all_zero_scratchpad_) {
2273  dual_pricing_vector_[e.row()] += e.coefficient();
2274  OnDualPriceChange(squared_norms, e.row(),
2275  variable_type[basis_[e.row()]], threshold);
2276  initially_all_zero_scratchpad_[e.row()] = 0.0;
2277  }
2278  }
2279  initially_all_zero_scratchpad_.non_zeros.clear();
2280  }
2281 }
2282 
2283 Status RevisedSimplex::DualPhaseIChooseLeavingVariableRow(
2284  RowIndex* leaving_row, Fractional* cost_variation,
2286  SCOPED_TIME_STAT(&function_stats_);
2287  GLOP_RETURN_ERROR_IF_NULL(leaving_row);
2288  GLOP_RETURN_ERROR_IF_NULL(cost_variation);
2289  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2290  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2291 
2292  // dual_infeasibility_improvement_direction_ is zero for dual-feasible
2293  // positions and contains the sign in which the reduced cost of this column
2294  // needs to move to improve the feasibility otherwise (+1 or -1).
2295  //
2296  // Its current value was the one used to compute dual_pricing_vector_ and
2297  // was updated accordingly by DualPhaseIUpdatePrice().
2298  //
2299  // If more variables changed of dual-feasibility status during the last
2300  // iteration, we need to call DualPhaseIUpdatePriceOnReducedCostChange() to
2301  // take them into account.
2302  if (reduced_costs_.AreReducedCostsRecomputed() ||
2303  dual_edge_norms_.NeedsBasisRefactorization() ||
2304  dual_pricing_vector_.empty()) {
2305  // Recompute everything from scratch.
2306  num_dual_infeasible_positions_ = 0;
2307  dual_pricing_vector_.AssignToZero(num_rows_);
2308  dual_prices_.ClearAndResize(num_rows_);
2309  dual_infeasibility_improvement_direction_.AssignToZero(num_cols_);
2310  DualPhaseIUpdatePriceOnReducedCostChange(
2311  variables_info_.GetIsRelevantBitRow());
2312  } else {
2313  // Update row is still equal to the row used during the last iteration
2314  // to update the reduced costs.
2315  DualPhaseIUpdatePriceOnReducedCostChange(update_row_.GetNonZeroPositions());
2316  }
2317 
2318  // If there is no dual-infeasible position, we are done.
2319  *leaving_row = kInvalidRow;
2320  if (num_dual_infeasible_positions_ == 0) return Status::OK();
2321 
2322  *leaving_row = dual_prices_.GetMaximum();
2323 
2324  // Returns right away if there is no leaving variable or fill the other
2325  // return values otherwise.
2326  if (*leaving_row == kInvalidRow) return Status::OK();
2327  *cost_variation = dual_pricing_vector_[*leaving_row];
2328  const ColIndex leaving_col = basis_[*leaving_row];
2329  if (*cost_variation < 0.0) {
2330  *target_bound = upper_bounds[leaving_col];
2331  } else {
2332  *target_bound = lower_bounds[leaving_col];
2333  }
2335  return Status::OK();
2336 }
2337 
2338 template <typename BoxedVariableCols>
2339 void RevisedSimplex::MakeBoxedVariableDualFeasible(
2340  const BoxedVariableCols& cols, bool update_basic_values) {
2341  SCOPED_TIME_STAT(&function_stats_);
2342  std::vector<ColIndex> changed_cols;
2343 
2344  // It is important to flip bounds within a tolerance because of precision
2345  // errors. Otherwise, this leads to cycling on many of the Netlib problems
2346  // since this is called at each iteration (because of the bound-flipping ratio
2347  // test).
2348  const DenseRow& variable_values = variable_values_.GetDenseRow();
2349  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
2350  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2351  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2352  const Fractional dual_feasibility_tolerance =
2353  reduced_costs_.GetDualFeasibilityTolerance();
2354  const VariableStatusRow& variable_status = variables_info_.GetStatusRow();
2355  for (const ColIndex col : cols) {
2356  const Fractional reduced_cost = reduced_costs[col];
2357  const VariableStatus status = variable_status[col];
2358  DCHECK(variables_info_.GetTypeRow()[col] ==
2360  // TODO(user): refactor this as DCHECK(IsVariableBasicOrExactlyAtBound())?
2361  DCHECK(variable_values[col] == lower_bounds[col] ||
2362  variable_values[col] == upper_bounds[col] ||
2363  status == VariableStatus::BASIC);
2364  if (reduced_cost > dual_feasibility_tolerance &&
2365  status == VariableStatus::AT_UPPER_BOUND) {
2366  variables_info_.UpdateToNonBasicStatus(col,
2368  changed_cols.push_back(col);
2369  } else if (reduced_cost < -dual_feasibility_tolerance &&
2370  status == VariableStatus::AT_LOWER_BOUND) {
2371  variables_info_.UpdateToNonBasicStatus(col,
2373  changed_cols.push_back(col);
2374  }
2375  }
2376 
2377  if (!changed_cols.empty()) {
2378  iteration_stats_.num_dual_flips.Add(changed_cols.size());
2379  variable_values_.UpdateGivenNonBasicVariables(changed_cols,
2380  update_basic_values);
2381  }
2382 }
2383 
2384 Fractional RevisedSimplex::ComputeStepToMoveBasicVariableToBound(
2385  RowIndex leaving_row, Fractional target_bound) {
2386  SCOPED_TIME_STAT(&function_stats_);
2387 
2388  // We just want the leaving variable to go to its target_bound.
2389  const ColIndex leaving_col = basis_[leaving_row];
2390  const Fractional leaving_variable_value = variable_values_.Get(leaving_col);
2391  Fractional unscaled_step = leaving_variable_value - target_bound;
2392 
2393  // In Chvatal p 157 update_[entering_col] is used instead of
2394  // direction_[leaving_row], but the two quantities are actually the
2395  // same. This is because update_[col] is the value at leaving_row of
2396  // the right inverse of col and direction_ is the right inverse of the
2397  // entering_col. Note that direction_[leaving_row] is probably more
2398  // precise.
2399  // TODO(user): use this to check precision and trigger recomputation.
2400  return unscaled_step / direction_[leaving_row];
2401 }
2402 
2403 bool RevisedSimplex::TestPivot(ColIndex entering_col, RowIndex leaving_row) {
2404  VLOG(1) << "Test pivot.";
2405  SCOPED_TIME_STAT(&function_stats_);
2406  const ColIndex leaving_col = basis_[leaving_row];
2407  basis_[leaving_row] = entering_col;
2408 
2409  // TODO(user): If 'is_ok' is true, we could use the computed lu in
2410  // basis_factorization_ rather than recompute it during UpdateAndPivot().
2411  CompactSparseMatrixView basis_matrix(&compact_matrix_, &basis_);
2412  const bool is_ok = test_lu_.ComputeFactorization(basis_matrix).ok();
2413  basis_[leaving_row] = leaving_col;
2414  return is_ok;
2415 }
2416 
2417 // Note that this function is an optimization and that if it was doing nothing
2418 // the algorithm will still be correct and work. Using it does change the pivot
2419 // taken during the simplex method though.
2420 void RevisedSimplex::PermuteBasis() {
2421  SCOPED_TIME_STAT(&function_stats_);
2422 
2423  // Fetch the current basis column permutation and return if it is empty which
2424  // means the permutation is the identity.
2425  const ColumnPermutation& col_perm =
2426  basis_factorization_.GetColumnPermutation();
2427  if (col_perm.empty()) return;
2428 
2429  // Permute basis_.
2430  ApplyColumnPermutationToRowIndexedVector(col_perm, &basis_);
2431 
2432  // Permute dual_pricing_vector_ if needed.
2433  if (!dual_pricing_vector_.empty()) {
2434  // TODO(user): We need to permute dual_prices_ too now, we recompute
2435  // everything one each basis factorization, so this don't matter.
2436  ApplyColumnPermutationToRowIndexedVector(col_perm, &dual_pricing_vector_);
2437  }
2438 
2439  // Notify the other classes.
2440  reduced_costs_.UpdateDataOnBasisPermutation();
2441  dual_edge_norms_.UpdateDataOnBasisPermutation(col_perm);
2442 
2443  // Finally, remove the column permutation from all subsequent solves since
2444  // it has been taken into account in basis_.
2445  basis_factorization_.SetColumnPermutationToIdentity();
2446 }
2447 
2448 Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
2449  RowIndex leaving_row,
2451  SCOPED_TIME_STAT(&function_stats_);
2452 
2453  // Tricky and a bit hacky.
2454  //
2455  // The basis update code assumes that we already computed the left inverse of
2456  // the leaving row, otherwise it will just refactorize the basis. This left
2457  // inverse is needed by update_row_.ComputeUpdateRow(), so in most case it
2458  // will already be computed. However, in some situation we don't need the
2459  // full update row, so just the left inverse can be computed.
2460  //
2461  // TODO(user): Ideally this shouldn't be needed if we are going to refactorize
2462  // the basis anyway. So we should know that before hand which is currently
2463  // hard to do.
2464  Fractional pivot_from_update_row;
2465  if (update_row_.IsComputed()) {
2466  pivot_from_update_row = update_row_.GetCoefficient(entering_col);
2467  } else {
2468  // We only need the left inverse and the update row position at the
2469  // entering_col to check precision.
2470  update_row_.ComputeUnitRowLeftInverse(leaving_row);
2471  pivot_from_update_row = compact_matrix_.ColumnScalarProduct(
2472  entering_col, update_row_.GetUnitRowLeftInverse().values);
2473  }
2474 
2475  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
2476  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
2477  const ColIndex leaving_col = basis_[leaving_row];
2478  const VariableStatus leaving_variable_status =
2479  lower_bounds[leaving_col] == upper_bounds[leaving_col]
2481  : target_bound == lower_bounds[leaving_col]
2484  if (variable_values_.Get(leaving_col) != target_bound) {
2485  ratio_test_stats_.bound_shift.Add(variable_values_.Get(leaving_col) -
2486  target_bound);
2487  }
2488  UpdateBasis(entering_col, leaving_row, leaving_variable_status);
2489 
2490  // Test precision by comparing two ways to get the "pivot".
2491  const Fractional pivot_from_direction = direction_[leaving_row];
2492  const Fractional diff =
2493  std::abs(pivot_from_update_row - pivot_from_direction);
2494  if (diff > parameters_.refactorization_threshold() *
2495  (1 + std::abs(pivot_from_direction))) {
2496  VLOG(1) << "Refactorizing: imprecise pivot " << pivot_from_direction
2497  << " diff = " << diff;
2498  GLOP_RETURN_IF_ERROR(basis_factorization_.ForceRefactorization());
2499  } else {
2501  basis_factorization_.Update(entering_col, leaving_row, direction_));
2502  }
2503  if (basis_factorization_.IsRefactorized()) {
2504  PermuteBasis();
2505  }
2506  return Status::OK();
2507 }
2508 
2509 Status RevisedSimplex::RefactorizeBasisIfNeeded(bool* refactorize) {
2510  SCOPED_TIME_STAT(&function_stats_);
2511  if (*refactorize && !basis_factorization_.IsRefactorized()) {
2512  GLOP_RETURN_IF_ERROR(basis_factorization_.Refactorize());
2513  update_row_.Invalidate();
2514  PermuteBasis();
2515  }
2516  *refactorize = false;
2517  return Status::OK();
2518 }
2519 
2521  if (col >= integrality_scale_.size()) {
2522  integrality_scale_.resize(col + 1, 0.0);
2523  }
2524  integrality_scale_[col] = scale;
2525 }
2526 
2527 Status RevisedSimplex::Polish(TimeLimit* time_limit) {
2529  Cleanup update_deterministic_time_on_return(
2530  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2531 
2532  // Get all non-basic variables with a reduced costs close to zero.
2533  // Note that because we only choose entering candidate with a cost of zero,
2534  // this set will not change (modulo epsilons).
2535  const DenseRow& rc = reduced_costs_.GetReducedCosts();
2536  std::vector<ColIndex> candidates;
2537  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
2538  if (!variables_info_.GetIsRelevantBitRow()[col]) continue;
2539  if (std::abs(rc[col]) < 1e-9) candidates.push_back(col);
2540  }
2541 
2542  bool refactorize = false;
2543  int num_pivots = 0;
2544  Fractional total_gain = 0.0;
2545  for (int i = 0; i < 10; ++i) {
2546  AdvanceDeterministicTime(time_limit);
2547  if (time_limit->LimitReached()) break;
2548  if (num_pivots >= 5) break;
2549  if (candidates.empty()) break;
2550 
2551  // Pick a random one and remove it from the list.
2552  const int index =
2553  std::uniform_int_distribution<int>(0, candidates.size() - 1)(random_);
2554  const ColIndex entering_col = candidates[index];
2555  std::swap(candidates[index], candidates.back());
2556  candidates.pop_back();
2557 
2558  // We need the entering variable to move in the correct direction.
2559  Fractional fake_rc = 1.0;
2560  if (!variables_info_.GetCanDecreaseBitRow()[entering_col]) {
2561  CHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
2562  fake_rc = -1.0;
2563  }
2564 
2565  // Refactorize if needed.
2566  if (reduced_costs_.NeedsBasisRefactorization()) refactorize = true;
2567  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2568 
2569  // Compute the direction and by how much we can move along it.
2570  ComputeDirection(entering_col);
2571  Fractional step_length;
2572  RowIndex leaving_row;
2574  bool local_refactorize = false;
2576  ChooseLeavingVariableRow(entering_col, fake_rc, &local_refactorize,
2577  &leaving_row, &step_length, &target_bound));
2578 
2579  if (local_refactorize) continue;
2580  if (step_length == kInfinity || step_length == -kInfinity) continue;
2581  if (std::abs(step_length) <= 1e-6) continue;
2582  if (leaving_row != kInvalidRow && std::abs(direction_[leaving_row]) < 0.1) {
2583  continue;
2584  }
2585  const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
2586 
2587  // Evaluate if pivot reduce the fractionality of the basis.
2588  //
2589  // TODO(user): Count with more weight variable with a small domain, i.e.
2590  // binary variable, compared to a variable in [0, 1k] ?
2591  const auto get_diff = [this](ColIndex col, Fractional old_value,
2592  Fractional new_value) {
2593  if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) {
2594  return 0.0;
2595  }
2596  const Fractional s = integrality_scale_[col];
2597  return (std::abs(new_value * s - std::round(new_value * s)) -
2598  std::abs(old_value * s - std::round(old_value * s)));
2599  };
2600  Fractional diff = get_diff(entering_col, variable_values_.Get(entering_col),
2601  variable_values_.Get(entering_col) + step);
2602  for (const auto e : direction_) {
2603  const ColIndex col = basis_[e.row()];
2604  const Fractional old_value = variable_values_.Get(col);
2605  const Fractional new_value = old_value - e.coefficient() * step;
2606  diff += get_diff(col, old_value, new_value);
2607  }
2608 
2609  // Ignore low decrease in integrality.
2610  if (diff > -1e-2) continue;
2611  total_gain -= diff;
2612 
2613  // We perform the change.
2614  num_pivots++;
2615  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2616 
2617  // This is a bound flip of the entering column.
2618  if (leaving_row == kInvalidRow) {
2619  if (step > 0.0) {
2620  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2622  } else if (step < 0.0) {
2623  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2625  }
2626  continue;
2627  }
2628 
2629  // Perform the pivot.
2630  const ColIndex leaving_col = basis_[leaving_row];
2631  update_row_.ComputeUpdateRow(leaving_row);
2632 
2633  // Note that this will only do work if the norms are computed.
2634  //
2635  // TODO(user): We should probably move all the "update" in a function so
2636  // that all "iterations" function can just reuse the same code. Everything
2637  // that is currently not "cleared" should be updated. If one does not want
2638  // that, then it is easy to call Clear() on the quantities that do not needs
2639  // to be kept in sync with the current basis.
2640  primal_edge_norms_.UpdateBeforeBasisPivot(
2641  entering_col, leaving_col, leaving_row, direction_, &update_row_);
2642  dual_edge_norms_.UpdateBeforeBasisPivot(
2643  entering_col, leaving_row, direction_,
2644  update_row_.GetUnitRowLeftInverse());
2645 
2646  // TODO(user): Rather than maintaining this, it is probably better to
2647  // recompute it in one go after Polish() is done. We don't use the reduced
2648  // costs here as we just assume that the set of candidates does not change.
2649  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
2650  &update_row_);
2651 
2652  const Fractional dir = -direction_[leaving_row] * step;
2653  const bool is_degenerate =
2654  (dir == 0.0) ||
2655  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2656  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2657  if (!is_degenerate) {
2658  variable_values_.Set(leaving_col, target_bound);
2659  }
2661  UpdateAndPivot(entering_col, leaving_row, target_bound));
2662  }
2663 
2664  VLOG(1) << "Polish num_pivots: " << num_pivots << " gain:" << total_gain;
2665  return Status::OK();
2666 }
2667 
2668 // Minimizes c.x subject to A.x = 0 where A is an mxn-matrix, c an n-vector, and
2669 // x an n-vector.
2670 //
2671 // x is split in two parts x_B and x_N (B standing for basis).
2672 // In the same way, A is split in A_B (also known as B) and A_N, and
2673 // c is split into c_B and c_N.
2674 //
2675 // The goal is to minimize c_B.x_B + c_N.x_N
2676 // subject to B.x_B + A_N.x_N = 0
2677 // and x_lower <= x <= x_upper.
2678 //
2679 // To minimize c.x, at each iteration a variable from x_N is selected to
2680 // enter the basis, and a variable from x_B is selected to leave the basis.
2681 // To avoid explicit inversion of B, the algorithm solves two sub-systems:
2682 // y.B = c_B and B.d = a (a being the entering column).
2683 Status RevisedSimplex::PrimalMinimize(TimeLimit* time_limit) {
2685  Cleanup update_deterministic_time_on_return(
2686  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2687  num_consecutive_degenerate_iterations_ = 0;
2688  bool refactorize = false;
2689 
2690  // At this point, we are not sure the prices are always up to date, so
2691  // lets always reset them for the first iteration below.
2692  primal_prices_.ForceRecomputation();
2693 
2694  if (phase_ == Phase::FEASIBILITY) {
2695  // Initialize the primal phase-I objective.
2696  // Note that this temporarily erases the problem objective.
2697  objective_.AssignToZero(num_cols_);
2698  variable_values_.UpdatePrimalPhaseICosts(
2699  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_), &objective_);
2700  reduced_costs_.ResetForNewObjective();
2701  }
2702 
2703  while (true) {
2704  // TODO(user): we may loop a bit more than the actual number of iteration.
2705  // fix.
2707  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2708 
2709  // Trigger a refactorization if one of the class we use request it.
2710  if (reduced_costs_.NeedsBasisRefactorization()) refactorize = true;
2711  if (primal_edge_norms_.NeedsBasisRefactorization()) refactorize = true;
2712  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2713 
2714  if (basis_factorization_.IsRefactorized()) {
2715  CorrectErrorsOnVariableValues();
2716  DisplayIterationInfo(/*primal=*/true);
2717 
2718  if (phase_ == Phase::FEASIBILITY) {
2719  // Since the variable values may have been recomputed, we need to
2720  // recompute the primal infeasible variables and update their costs.
2721  if (variable_values_.UpdatePrimalPhaseICosts(
2722  util::IntegerRange<RowIndex>(RowIndex(0), num_rows_),
2723  &objective_)) {
2724  reduced_costs_.ResetForNewObjective();
2725  }
2726  }
2727 
2728  // Computing the objective at each iteration takes time, so we just
2729  // check the limit when the basis is refactorized.
2730  if (phase_ == Phase::OPTIMIZATION &&
2731  ComputeObjectiveValue() < primal_objective_limit_) {
2732  VLOG(1) << "Stopping the primal simplex because"
2733  << " the objective limit " << primal_objective_limit_
2734  << " has been reached.";
2735  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2736  objective_limit_reached_ = true;
2737  return Status::OK();
2738  }
2739  } else if (phase_ == Phase::FEASIBILITY) {
2740  // Note that direction_.non_zeros contains the positions of the basic
2741  // variables whose values were updated during the last iteration.
2742  if (variable_values_.UpdatePrimalPhaseICosts(direction_.non_zeros,
2743  &objective_)) {
2744  reduced_costs_.ResetForNewObjective();
2745  }
2746  }
2747 
2748  const ColIndex entering_col = primal_prices_.GetBestEnteringColumn();
2749  if (entering_col == kInvalidCol) {
2750  if (reduced_costs_.AreReducedCostsPrecise() &&
2751  basis_factorization_.IsRefactorized()) {
2752  if (phase_ == Phase::FEASIBILITY) {
2753  const Fractional primal_infeasibility =
2754  variable_values_.ComputeMaximumPrimalInfeasibility();
2755  if (primal_infeasibility <
2756  parameters_.primal_feasibility_tolerance()) {
2757  problem_status_ = ProblemStatus::PRIMAL_FEASIBLE;
2758  } else {
2759  VLOG(1) << "Infeasible problem! infeasibility = "
2760  << primal_infeasibility;
2761  problem_status_ = ProblemStatus::PRIMAL_INFEASIBLE;
2762  }
2763  } else {
2764  problem_status_ = ProblemStatus::OPTIMAL;
2765  }
2766  break;
2767  }
2768 
2769  VLOG(1) << "Optimal reached, double checking...";
2770  reduced_costs_.MakeReducedCostsPrecise();
2771  refactorize = true;
2772  continue;
2773  }
2774 
2775  DCHECK(reduced_costs_.IsValidPrimalEnteringCandidate(entering_col));
2776 
2777  // Solve the system B.d = a with a the entering column.
2778  ComputeDirection(entering_col);
2779 
2780  // This might trigger a recomputation on the next iteration, but we
2781  // finish this one even if the price is imprecise.
2782  primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col, direction_);
2783  const Fractional reduced_cost =
2784  reduced_costs_.TestEnteringReducedCostPrecision(entering_col,
2785  direction_);
2786 
2787  // The test might have changed the reduced cost of the entering_col.
2788  // If it is no longer a valid entering candidate, we loop.
2789  primal_prices_.RecomputePriceAt(entering_col);
2790  if (!reduced_costs_.IsValidPrimalEnteringCandidate(entering_col)) {
2791  reduced_costs_.MakeReducedCostsPrecise();
2792  VLOG(1) << "Skipping col #" << entering_col
2793  << " whose reduced cost is no longer valid under precise reduced "
2794  "cost: "
2795  << reduced_cost;
2796  continue;
2797  }
2798 
2799  // This test takes place after the check for optimality/feasibility because
2800  // when running with 0 iterations, we still want to report
2801  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
2802  // case at the beginning of the algorithm.
2803  AdvanceDeterministicTime(time_limit);
2804  if (num_iterations_ == parameters_.max_number_of_iterations() ||
2805  time_limit->LimitReached()) {
2806  break;
2807  }
2808 
2809  Fractional step_length;
2810  RowIndex leaving_row;
2812  if (phase_ == Phase::FEASIBILITY) {
2813  PrimalPhaseIChooseLeavingVariableRow(entering_col, reduced_cost,
2814  &refactorize, &leaving_row,
2815  &step_length, &target_bound);
2816  } else {
2818  ChooseLeavingVariableRow(entering_col, reduced_cost, &refactorize,
2819  &leaving_row, &step_length, &target_bound));
2820  }
2821  if (refactorize) continue;
2822 
2823  if (step_length == kInfinity || step_length == -kInfinity) {
2824  if (!basis_factorization_.IsRefactorized() ||
2825  !reduced_costs_.AreReducedCostsPrecise()) {
2826  VLOG(1) << "Infinite step length, double checking...";
2827  reduced_costs_.MakeReducedCostsPrecise();
2828  continue;
2829  }
2830  if (phase_ == Phase::FEASIBILITY) {
2831  // This shouldn't happen by construction.
2832  VLOG(1) << "Unbounded feasibility problem !?";
2833  problem_status_ = ProblemStatus::ABNORMAL;
2834  } else {
2835  VLOG(1) << "Unbounded problem.";
2836  problem_status_ = ProblemStatus::PRIMAL_UNBOUNDED;
2837  solution_primal_ray_.AssignToZero(num_cols_);
2838  for (RowIndex row(0); row < num_rows_; ++row) {
2839  const ColIndex col = basis_[row];
2840  solution_primal_ray_[col] = -direction_[row];
2841  }
2842  solution_primal_ray_[entering_col] = 1.0;
2843  if (step_length == -kInfinity) {
2844  ChangeSign(&solution_primal_ray_);
2845  }
2846  }
2847  break;
2848  }
2849 
2850  Fractional step = (reduced_cost > 0.0) ? -step_length : step_length;
2851  if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
2852  // For phase-I we currently always set the leaving variable to its exact
2853  // bound even if by doing so we may take a small step in the wrong
2854  // direction and may increase the overall infeasibility.
2855  //
2856  // TODO(user): Investigate alternatives even if this seems to work well in
2857  // practice. Note that the final returned solution will have the property
2858  // that all non-basic variables are at their exact bound, so it is nice
2859  // that we do not report ProblemStatus::PRIMAL_FEASIBLE if a solution with
2860  // this property cannot be found.
2861  step = ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
2862  }
2863 
2864  // Store the leaving_col before basis_ change.
2865  const ColIndex leaving_col =
2866  (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
2867 
2868  // An iteration is called 'degenerate' if the leaving variable is already
2869  // primal-infeasible and we make it even more infeasible or if we do a zero
2870  // step.
2871  bool is_degenerate = false;
2872  if (leaving_row != kInvalidRow) {
2873  Fractional dir = -direction_[leaving_row] * step;
2874  is_degenerate =
2875  (dir == 0.0) ||
2876  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
2877  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
2878 
2879  // If the iteration is not degenerate, the leaving variable should go to
2880  // its exact target bound (it is how the step is computed).
2881  if (!is_degenerate) {
2882  DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
2883  target_bound));
2884  }
2885  }
2886 
2887  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
2888  if (leaving_row != kInvalidRow) {
2889  // Important: the norm must be updated before the reduced_cost.
2890  primal_edge_norms_.UpdateBeforeBasisPivot(
2891  entering_col, basis_[leaving_row], leaving_row, direction_,
2892  &update_row_);
2893  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
2894  direction_, &update_row_);
2895  primal_prices_.UpdateBeforeBasisPivot(entering_col, &update_row_);
2896  if (!is_degenerate) {
2897  // On a non-degenerate iteration, the leaving variable should be at its
2898  // exact bound. This corrects an eventual small numerical error since
2899  // 'value + direction * step' where step is
2900  // '(target_bound - value) / direction'
2901  // may be slighlty different from target_bound.
2902  variable_values_.Set(leaving_col, target_bound);
2903  }
2905  UpdateAndPivot(entering_col, leaving_row, target_bound));
2907  if (is_degenerate) {
2908  timer.AlsoUpdate(&iteration_stats_.degenerate);
2909  } else {
2910  timer.AlsoUpdate(&iteration_stats_.normal);
2911  }
2912  });
2913  } else {
2914  // Bound flip. This makes sure that the flipping variable is at its bound
2915  // and has the correct status.
2917  variables_info_.GetTypeRow()[entering_col]);
2918  if (step > 0.0) {
2919  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2921  } else if (step < 0.0) {
2922  SetNonBasicVariableStatusAndDeriveValue(entering_col,
2924  }
2925  primal_prices_.SetAndDebugCheckThatColumnIsDualFeasible(entering_col);
2926  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
2927  }
2928 
2929  if (phase_ == Phase::FEASIBILITY && leaving_row != kInvalidRow) {
2930  // Set the leaving variable to its exact bound.
2931  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
2932 
2933  // Change the objective value of the leaving variable to zero.
2934  reduced_costs_.SetNonBasicVariableCostToZero(leaving_col,
2935  &objective_[leaving_col]);
2936  primal_prices_.RecomputePriceAt(leaving_col);
2937  }
2938 
2939  // Stats about consecutive degenerate iterations.
2940  if (step_length == 0.0) {
2941  num_consecutive_degenerate_iterations_++;
2942  } else {
2943  if (num_consecutive_degenerate_iterations_ > 0) {
2944  iteration_stats_.degenerate_run_size.Add(
2945  num_consecutive_degenerate_iterations_);
2946  num_consecutive_degenerate_iterations_ = 0;
2947  }
2948  }
2949  ++num_iterations_;
2950  }
2951  if (num_consecutive_degenerate_iterations_ > 0) {
2952  iteration_stats_.degenerate_run_size.Add(
2953  num_consecutive_degenerate_iterations_);
2954  }
2955  return Status::OK();
2956 }
2957 
2958 // TODO(user): Two other approaches for the phase I described in Koberstein's
2959 // PhD thesis seem worth trying at some point:
2960 // - The subproblem approach, which enables one to use a normal phase II dual,
2961 // but requires an efficient bound-flipping ratio test since the new problem
2962 // has all its variables boxed. This one is implemented now, but require
2963 // a bit more tunning.
2964 // - Pan's method, which is really fast but have no theoretical guarantee of
2965 // terminating and thus needs to use one of the other methods as a fallback if
2966 // it fails to make progress.
2967 //
2968 // Note that the returned status applies to the primal problem!
2969 Status RevisedSimplex::DualMinimize(bool feasibility_phase,
2970  TimeLimit* time_limit) {
2971  Cleanup update_deterministic_time_on_return(
2972  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
2973  num_consecutive_degenerate_iterations_ = 0;
2974  bool refactorize = false;
2975 
2976  bound_flip_candidates_.clear();
2977 
2978  // Leaving variable.
2979  RowIndex leaving_row;
2980  Fractional cost_variation;
2982 
2983  // Entering variable.
2984  ColIndex entering_col;
2985 
2986  while (true) {
2987  // TODO(user): we may loop a bit more than the actual number of iteration.
2988  // fix.
2990  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
2991 
2992  // Trigger a refactorization if one of the class we use request it.
2993  const bool old_refactorize_value = refactorize;
2994  if (reduced_costs_.NeedsBasisRefactorization()) refactorize = true;
2995  if (dual_edge_norms_.NeedsBasisRefactorization()) refactorize = true;
2996  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
2997 
2998  // If the basis is refactorized, we recompute all the values in order to
2999  // have a good precision.
3000  if (basis_factorization_.IsRefactorized()) {
3001  // We do not want to recompute the reduced costs too often, this is
3002  // because that may break the overall direction taken by the last steps
3003  // and may lead to less improvement on degenerate problems.
3004  //
3005  // For now, we just recompute them if refactorize was set during the
3006  // loop and not because of normal refactorization.
3007  //
3008  // During phase-I, we do want the reduced costs to be as precise as
3009  // possible. TODO(user): Investigate why and fix the TODO in
3010  // PermuteBasis().
3011  //
3012  // Reduced costs are needed by MakeBoxedVariableDualFeasible(), so if we
3013  // do recompute them, it is better to do that first.
3014  if (feasibility_phase || old_refactorize_value) {
3015  reduced_costs_.MakeReducedCostsPrecise();
3016  }
3017 
3018  // TODO(user): Make RecomputeBasicVariableValues() do nothing
3019  // if it was already recomputed on a refactorized basis. This is the
3020  // same behavior as MakeReducedCostsPrecise().
3021  //
3022  // TODO(user): Do not recompute the variable values each time we
3023  // refactorize the matrix, like for the reduced costs? That may lead to
3024  // a worse behavior than keeping the "imprecise" version and only
3025  // recomputing it when its precision is above a threshold.
3026  if (!feasibility_phase) {
3027  MakeBoxedVariableDualFeasible(
3028  variables_info_.GetNonBasicBoxedVariables(),
3029  /*update_basic_values=*/false);
3030  variable_values_.RecomputeBasicVariableValues();
3031  variable_values_.RecomputeDualPrices();
3032 
3033  // Computing the objective at each iteration takes time, so we just
3034  // check the limit when the basis is refactorized.
3035  //
3036  // Hack: We need phase_ here and not the local feasibility_phase
3037  // variable because this must not be checked for the dual phase I algo
3038  // that use the same code as the dual phase II (i.e. the local
3039  // feasibility_phase will be false).
3040  if (phase_ == Phase::OPTIMIZATION &&
3041  dual_objective_limit_ != kInfinity &&
3042  ComputeObjectiveValue() > dual_objective_limit_) {
3043  SOLVER_LOG(logger_,
3044  "Stopping the dual simplex because"
3045  " the objective limit ",
3046  dual_objective_limit_, " has been reached.");
3047  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
3048  objective_limit_reached_ = true;
3049  return Status::OK();
3050  }
3051  }
3052 
3053  DisplayIterationInfo(/*primal=*/false);
3054  } else {
3055  // Updates from the previous iteration that can be skipped if we
3056  // recompute everything (see other case above).
3057  if (!feasibility_phase) {
3058  // Make sure the boxed variables are dual-feasible before choosing the
3059  // leaving variable row.
3060  MakeBoxedVariableDualFeasible(bound_flip_candidates_,
3061  /*update_basic_values=*/true);
3062  bound_flip_candidates_.clear();
3063 
3064  // The direction_.non_zeros contains the positions for which the basic
3065  // variable value was changed during the previous iterations.
3066  variable_values_.UpdateDualPrices(direction_.non_zeros);
3067  }
3068  }
3069 
3070  if (feasibility_phase) {
3071  GLOP_RETURN_IF_ERROR(DualPhaseIChooseLeavingVariableRow(
3072  &leaving_row, &cost_variation, &target_bound));
3073  } else {
3074  GLOP_RETURN_IF_ERROR(DualChooseLeavingVariableRow(
3075  &leaving_row, &cost_variation, &target_bound));
3076  }
3077  if (leaving_row == kInvalidRow) {
3078  // TODO(user): integrate this with the main "re-optimization" loop.
3079  // Also distinguish cost perturbation and shifts?
3080  if (!basis_factorization_.IsRefactorized() ||
3081  reduced_costs_.HasCostShift()) {
3082  VLOG(1) << "Optimal reached, double checking.";
3083  reduced_costs_.ClearAndRemoveCostShifts();
3084  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3085  refactorize = true;
3086  continue;
3087  }
3088  if (feasibility_phase) {
3089  // Note that since the basis is refactorized, the variable values
3090  // will be recomputed at the beginning of the second phase. The boxed
3091  // variable values will also be corrected by
3092  // MakeBoxedVariableDualFeasible().
3093  if (num_dual_infeasible_positions_ == 0) {
3094  problem_status_ = ProblemStatus::DUAL_FEASIBLE;
3095  } else {
3096  VLOG(1) << "DUAL infeasible in dual phase I.";
3097  problem_status_ = ProblemStatus::DUAL_INFEASIBLE;
3098  }
3099  } else {
3100  problem_status_ = ProblemStatus::OPTIMAL;
3101  }
3102  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3103  return Status::OK();
3104  }
3105 
3106  update_row_.ComputeUpdateRow(leaving_row);
3107  if (feasibility_phase) {
3109  reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3110  &entering_col));
3111  } else {
3113  reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation,
3114  &bound_flip_candidates_, &entering_col));
3115  }
3116 
3117  // No entering_col: dual unbounded (i.e. primal infeasible).
3118  if (entering_col == kInvalidCol) {
3119  if (!reduced_costs_.AreReducedCostsPrecise()) {
3120  VLOG(1) << "No entering column. Double checking...";
3121  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3122  refactorize = true;
3123  continue;
3124  }
3125  DCHECK(basis_factorization_.IsRefactorized());
3126  if (feasibility_phase) {
3127  // This shouldn't happen by construction.
3128  VLOG(1) << "Unbounded dual feasibility problem !?";
3129  problem_status_ = ProblemStatus::ABNORMAL;
3130  } else {
3131  problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
3132  solution_dual_ray_ =
3133  Transpose(update_row_.GetUnitRowLeftInverse().values);
3134  update_row_.RecomputeFullUpdateRow(leaving_row);
3135  solution_dual_ray_row_combination_.AssignToZero(num_cols_);
3136  for (const ColIndex col : update_row_.GetNonZeroPositions()) {
3137  solution_dual_ray_row_combination_[col] =
3138  update_row_.GetCoefficient(col);
3139  }
3140  if (cost_variation < 0) {
3141  ChangeSign(&solution_dual_ray_);
3142  ChangeSign(&solution_dual_ray_row_combination_);
3143  }
3144  }
3145  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3146  return Status::OK();
3147  }
3148 
3149  // If the coefficient is too small, we recompute the reduced costs if not
3150  // already done. This is an extra heuristic to avoid computing the direction
3151  // If the pivot is small. But the real recomputation step is just below.
3152  const Fractional entering_coeff = update_row_.GetCoefficient(entering_col);
3153  if (std::abs(entering_coeff) < parameters_.dual_small_pivot_threshold() &&
3154  !reduced_costs_.AreReducedCostsPrecise()) {
3155  VLOG(1) << "Trying not to pivot by " << entering_coeff;
3156  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3157  refactorize = true;
3158  continue;
3159  }
3160 
3161  ComputeDirection(entering_col);
3162 
3163  // If the pivot is small compared to others in the direction_ vector we try
3164  // to recompute everything. If we cannot, then note that
3165  // DualChooseEnteringColumn() should guaranteed that the pivot is not too
3166  // small when everything has already been recomputed.
3167  if (std::abs(direction_[leaving_row]) <
3168  parameters_.small_pivot_threshold() * direction_infinity_norm_) {
3169  if (!reduced_costs_.AreReducedCostsPrecise()) {
3170  VLOG(1) << "Trying not pivot by " << entering_coeff << " ("
3171  << direction_[leaving_row]
3172  << ") because the direction has a norm of "
3173  << direction_infinity_norm_;
3174  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.refactorize));
3175  refactorize = true;
3176  continue;
3177  }
3178  }
3179 
3180  // This test takes place after the check for optimality/feasibility because
3181  // when running with 0 iterations, we still want to report
3182  // ProblemStatus::OPTIMAL or ProblemStatus::PRIMAL_FEASIBLE if it is the
3183  // case at the beginning of the algorithm.
3184  AdvanceDeterministicTime(time_limit);
3185  if (num_iterations_ == parameters_.max_number_of_iterations() ||
3186  time_limit->LimitReached()) {
3187  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.normal));
3188  return Status::OK();
3189  }
3190 
3191  // Before we update the reduced costs, if its sign is already dual
3192  // infeasible and the update direction will make it worse we make sure the
3193  // reduced cost is 0.0 so UpdateReducedCosts() will not take a step that
3194  // goes in the wrong direction (a few experiments seems to indicate that
3195  // this is not a good idea). See comment at the top of UpdateReducedCosts().
3196  //
3197  // Note that ShiftCostIfNeeded() actually shifts the cost a bit more in
3198  // order to do a non-zero step. This helps on degenerate problems. Like the
3199  // pertubation, we will remove all these shifts at the end.
3200  const bool increasing_rc_is_needed =
3201  (cost_variation > 0.0) == (entering_coeff > 0.0);
3202  reduced_costs_.ShiftCostIfNeeded(increasing_rc_is_needed, entering_col);
3203 
3205  if (reduced_costs_.StepIsDualDegenerate(increasing_rc_is_needed,
3206  entering_col)) {
3207  timer.AlsoUpdate(&iteration_stats_.degenerate);
3208  } else {
3209  timer.AlsoUpdate(&iteration_stats_.normal);
3210  }
3211  });
3212 
3213  // Update basis. Note that direction_ is already computed.
3214  //
3215  // TODO(user): this is pretty much the same in the primal or dual code.
3216  // We just need to know to what bound the leaving variable will be set to.
3217  // Factorize more common code?
3218  reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
3219  &update_row_);
3220  dual_edge_norms_.UpdateBeforeBasisPivot(
3221  entering_col, leaving_row, direction_,
3222  update_row_.GetUnitRowLeftInverse());
3223 
3224  // During phase I, we do not need the basic variable values at all.
3225  // Important: The norm should be updated before that.
3226  Fractional primal_step = 0.0;
3227  if (feasibility_phase) {
3228  DualPhaseIUpdatePrice(leaving_row, entering_col);
3229  } else {
3230  primal_step =
3231  ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
3232  variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
3233  }
3234 
3235  // It is important to do the actual pivot after the update above!
3236  const ColIndex leaving_col = basis_[leaving_row];
3238  UpdateAndPivot(entering_col, leaving_row, target_bound));
3239 
3240  // This makes sure the leaving variable is at its exact bound. Tests
3241  // indicate that this makes everything more stable. Note also that during
3242  // the feasibility phase, the variable values are not used, but that the
3243  // correct non-basic variable value are needed at the end.
3244  variable_values_.SetNonBasicVariableValueFromStatus(leaving_col);
3245 
3246  // This is slow, but otherwise we have a really bad precision on the
3247  // variable values ...
3248  if (std::abs(primal_step) * parameters_.primal_feasibility_tolerance() >
3249  1.0) {
3250  refactorize = true;
3251  }
3252  ++num_iterations_;
3253  }
3254  return Status::OK();
3255 }
3256 
3257 Status RevisedSimplex::PrimalPush(TimeLimit* time_limit) {
3259  Cleanup update_deterministic_time_on_return(
3260  [this, time_limit]() { AdvanceDeterministicTime(time_limit); });
3261  bool refactorize = false;
3262 
3263  // We clear all the quantities that we don't update so they will be recomputed
3264  // later if needed.
3265  primal_edge_norms_.Clear();
3266  dual_edge_norms_.Clear();
3267  update_row_.Invalidate();
3268  reduced_costs_.ClearAndRemoveCostShifts();
3269 
3270  std::vector<ColIndex> super_basic_cols;
3271  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3272  if (variables_info_.GetStatusRow()[col] == VariableStatus::FREE &&
3273  variable_values_.Get(col) != 0) {
3274  super_basic_cols.push_back(col);
3275  }
3276  }
3277 
3278  while (!super_basic_cols.empty()) {
3279  AdvanceDeterministicTime(time_limit);
3280  if (time_limit->LimitReached()) break;
3281 
3283  ScopedTimeDistributionUpdater timer(&iteration_stats_.total));
3284  GLOP_RETURN_IF_ERROR(RefactorizeBasisIfNeeded(&refactorize));
3285  if (basis_factorization_.IsRefactorized()) {
3286  CorrectErrorsOnVariableValues();
3287  DisplayIterationInfo(/*primal=*/true);
3288  }
3289 
3290  // TODO(user): Select at random like in Polish().
3291  ColIndex entering_col = super_basic_cols.back();
3292 
3293  DCHECK(variables_info_.GetCanDecreaseBitRow()[entering_col]);
3294  DCHECK(variables_info_.GetCanIncreaseBitRow()[entering_col]);
3295 
3296  // Decide which direction to send the entering column.
3297  // UNCONSTRAINED variables go towards zero. Other variables go towards their
3298  // closest bound. We assume that we're at an optimal solution, so all FREE
3299  // variables have approximately zero reduced cost, which means that the
3300  // objective value won't change from moving this column into the basis.
3301  // TODO(user): As an improvement for variables with two bounds, try both
3302  // and pick one that doesn't require a basis change (if possible), otherwise
3303  // pick the closer bound.
3304  Fractional fake_rc;
3305  const Fractional entering_value = variable_values_.Get(entering_col);
3306  if (variables_info_.GetTypeRow()[entering_col] ==
3308  if (entering_value > 0) {
3309  fake_rc = 1.0;
3310  } else {
3311  fake_rc = -1.0;
3312  }
3313  } else {
3314  const Fractional diff_ub =
3315  variables_info_.GetVariableUpperBounds()[entering_col] -
3316  entering_value;
3317  const Fractional diff_lb =
3318  entering_value -
3319  variables_info_.GetVariableLowerBounds()[entering_col];
3320  if (diff_lb <= diff_ub) {
3321  fake_rc = 1.0;
3322  } else {
3323  fake_rc = -1.0;
3324  }
3325  }
3326 
3327  // Solve the system B.d = a with a the entering column.
3328  ComputeDirection(entering_col);
3329 
3330  Fractional step_length;
3331  RowIndex leaving_row;
3333 
3334  GLOP_RETURN_IF_ERROR(ChooseLeavingVariableRow(entering_col, fake_rc,
3335  &refactorize, &leaving_row,
3336  &step_length, &target_bound));
3337 
3338  if (refactorize) continue;
3339 
3340  // At this point, we know the iteration will finish or stop with an error.
3341  super_basic_cols.pop_back();
3342 
3343  if (step_length == kInfinity || step_length == -kInfinity) {
3344  if (variables_info_.GetTypeRow()[entering_col] ==
3346  step_length = std::fabs(entering_value);
3347  } else {
3348  VLOG(1) << "Infinite step for bounded variable ?!";
3349  problem_status_ = ProblemStatus::ABNORMAL;
3350  break;
3351  }
3352  }
3353 
3354  const Fractional step = (fake_rc > 0.0) ? -step_length : step_length;
3355 
3356  // Store the leaving_col before basis_ change.
3357  const ColIndex leaving_col =
3358  (leaving_row == kInvalidRow) ? kInvalidCol : basis_[leaving_row];
3359 
3360  // An iteration is called 'degenerate' if the leaving variable is already
3361  // primal-infeasible and we make it even more infeasible or if we do a zero
3362  // step.
3363  // TODO(user): Test setting the step size to zero for degenerate steps.
3364  // We don't need to force a positive step because each super-basic variable
3365  // is pivoted in exactly once.
3366  bool is_degenerate = false;
3367  if (leaving_row != kInvalidRow) {
3368  Fractional dir = -direction_[leaving_row] * step;
3369  is_degenerate =
3370  (dir == 0.0) ||
3371  (dir > 0.0 && variable_values_.Get(leaving_col) >= target_bound) ||
3372  (dir < 0.0 && variable_values_.Get(leaving_col) <= target_bound);
3373 
3374  // If the iteration is not degenerate, the leaving variable should go to
3375  // its exact target bound (it is how the step is computed).
3376  if (!is_degenerate) {
3377  DCHECK_EQ(step, ComputeStepToMoveBasicVariableToBound(leaving_row,
3378  target_bound));
3379  }
3380  }
3381 
3382  variable_values_.UpdateOnPivoting(direction_, entering_col, step);
3383  if (leaving_row != kInvalidRow) {
3384  if (!is_degenerate) {
3385  // On a non-degenerate iteration, the leaving variable should be at its
3386  // exact bound. This corrects an eventual small numerical error since
3387  // 'value + direction * step' where step is
3388  // '(target_bound - value) / direction'
3389  // may be slighlty different from target_bound.
3390  variable_values_.Set(leaving_col, target_bound);
3391  }
3393  UpdateAndPivot(entering_col, leaving_row, target_bound));
3395  if (is_degenerate) {
3396  timer.AlsoUpdate(&iteration_stats_.degenerate);
3397  } else {
3398  timer.AlsoUpdate(&iteration_stats_.normal);
3399  }
3400  });
3401  } else {
3402  // Snap the super-basic variable to its bound. Note that
3403  // variable_values_.UpdateOnPivoting() should already be close to that but
3404  // here we make sure it is exact and remove any small numerical errors.
3405  if (variables_info_.GetTypeRow()[entering_col] ==
3407  variable_values_.Set(entering_col, 0.0);
3408  } else if (step > 0.0) {
3409  SetNonBasicVariableStatusAndDeriveValue(entering_col,
3411  } else if (step < 0.0) {
3412  SetNonBasicVariableStatusAndDeriveValue(entering_col,
3414  }
3415  IF_STATS_ENABLED(timer.AlsoUpdate(&iteration_stats_.bound_flip));
3416  }
3417 
3418  ++num_iterations_;
3419  }
3420 
3421  if (!super_basic_cols.empty() > 0) {
3422  SOLVER_LOG(logger_, "Push terminated early with ", super_basic_cols.size(),
3423  " super-basic variables remaining.");
3424  }
3425 
3426  // TODO(user): What status should be returned if the time limit is hit?
3427  // If the optimization phase finished, then OPTIMAL is technically correct
3428  // but also misleading.
3429 
3430  return Status::OK();
3431 }
3432 
3433 ColIndex RevisedSimplex::SlackColIndex(RowIndex row) const {
3435  return first_slack_col_ + RowToColIndex(row);
3436 }
3437 
3439  std::string result;
3440  result.append(iteration_stats_.StatString());
3441  result.append(ratio_test_stats_.StatString());
3442  result.append(entering_variable_.StatString());
3443  result.append(dual_prices_.StatString());
3444  result.append(reduced_costs_.StatString());
3445  result.append(variable_values_.StatString());
3446  result.append(primal_edge_norms_.StatString());
3447  result.append(dual_edge_norms_.StatString());
3448  result.append(update_row_.StatString());
3449  result.append(basis_factorization_.StatString());
3450  result.append(function_stats_.StatString());
3451  return result;
3452 }
3453 
3454 void RevisedSimplex::DisplayAllStats() {
3455  if (absl::GetFlag(FLAGS_simplex_display_stats)) {
3456  absl::FPrintF(stderr, "%s", StatString());
3457  absl::FPrintF(stderr, "%s", GetPrettySolverStats());
3458  }
3459 }
3460 
3461 Fractional RevisedSimplex::ComputeObjectiveValue() const {
3462  SCOPED_TIME_STAT(&function_stats_);
3463  return PreciseScalarProduct(objective_,
3464  Transpose(variable_values_.GetDenseRow()));
3465 }
3466 
3467 Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
3468  SCOPED_TIME_STAT(&function_stats_);
3469  const Fractional sum = PreciseScalarProduct(
3470  objective_, Transpose(variable_values_.GetDenseRow()));
3471  return objective_scaling_factor_ * (sum + objective_offset_);
3472 }
3473 
3475  SCOPED_TIME_STAT(&function_stats_);
3476  deterministic_random_.seed(parameters.random_seed());
3477 
3478  initial_parameters_ = parameters;
3479  parameters_ = parameters;
3480  PropagateParameters();
3481 }
3482 
3483 void RevisedSimplex::PropagateParameters() {
3484  SCOPED_TIME_STAT(&function_stats_);
3485  basis_factorization_.SetParameters(parameters_);
3486  entering_variable_.SetParameters(parameters_);
3487  reduced_costs_.SetParameters(parameters_);
3488  dual_edge_norms_.SetParameters(parameters_);
3489  primal_edge_norms_.SetParameters(parameters_);
3490  update_row_.SetParameters(parameters_);
3491 }
3492 
3493 void RevisedSimplex::DisplayIterationInfo(bool primal) {
3494  if (!logger_->LoggingIsEnabled()) return;
3495  const std::string first_word = primal ? "Primal " : "Dual ";
3496 
3497  switch (phase_) {
3498  case Phase::FEASIBILITY: {
3499  const int64_t iter = num_iterations_;
3500  std::string name;
3501  Fractional objective;
3502  if (parameters_.use_dual_simplex()) {
3503  if (parameters_.use_dedicated_dual_feasibility_algorithm()) {
3504  objective = reduced_costs_.ComputeSumOfDualInfeasibilities();
3505  } else {
3506  // The internal objective of the transformed problem is the negation
3507  // of the sum of the dual infeasibility of the original problem.
3508  objective = -PreciseScalarProduct(
3509  objective_, Transpose(variable_values_.GetDenseRow()));
3510  }
3511  name = "sum_dual_infeasibilities";
3512  } else {
3513  objective = variable_values_.ComputeSumOfPrimalInfeasibilities();
3514  name = "sum_primal_infeasibilities";
3515  }
3516 
3517  SOLVER_LOG(logger_, first_word, "feasibility phase, iteration # ", iter,
3518  ", ", name, " = ", absl::StrFormat("%.15E", objective));
3519  break;
3520  }
3521  case Phase::OPTIMIZATION: {
3522  const int64_t iter = num_iterations_ - num_feasibility_iterations_;
3523  // Note that in the dual phase II, ComputeObjectiveValue() is also
3524  // computing the dual objective even if it uses the variable values.
3525  // This is because if we modify the bounds to make the problem
3526  // primal-feasible, we are at the optimal and hence the two objectives
3527  // are the same.
3528  const Fractional objective = ComputeInitialProblemObjectiveValue();
3529  SOLVER_LOG(logger_, first_word, "optimization phase, iteration # ", iter,
3530  ", objective = ", absl::StrFormat("%.15E", objective));
3531  break;
3532  }
3533  case Phase::PUSH: {
3534  const int64_t iter = num_iterations_ - num_feasibility_iterations_ -
3535  num_optimization_iterations_;
3536  SOLVER_LOG(logger_, first_word, "push phase, iteration # ", iter,
3537  ", remaining_variables_to_push = ",
3538  ComputeNumberOfSuperBasicVariables());
3539  }
3540  }
3541 }
3542 
3543 void RevisedSimplex::DisplayErrors() {
3544  if (!logger_->LoggingIsEnabled()) return;
3545  SOLVER_LOG(logger_,
3546  "Current status: ", GetProblemStatusString(problem_status_));
3547  SOLVER_LOG(logger_, "Primal infeasibility (bounds) = ",
3548  variable_values_.ComputeMaximumPrimalInfeasibility());
3549  SOLVER_LOG(logger_, "Primal residual |A.x - b| = ",
3550  variable_values_.ComputeMaximumPrimalResidual());
3551  SOLVER_LOG(logger_, "Dual infeasibility (reduced costs) = ",
3552  reduced_costs_.ComputeMaximumDualInfeasibility());
3553  SOLVER_LOG(logger_, "Dual residual |c_B - y.B| = ",
3554  reduced_costs_.ComputeMaximumDualResidual());
3555 }
3556 
3557 namespace {
3558 
3559 std::string StringifyMonomialWithFlags(const Fractional a,
3560  const std::string& x) {
3561  return StringifyMonomial(
3562  a, x, absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3563 }
3564 
3565 // Returns a string representing the rational approximation of x or a decimal
3566 // approximation of x according to
3567 // absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions).
3568 std::string StringifyWithFlags(const Fractional x) {
3569  return Stringify(x,
3570  absl::GetFlag(FLAGS_simplex_display_numbers_as_fractions));
3571 }
3572 
3573 } // namespace
3574 
3575 std::string RevisedSimplex::SimpleVariableInfo(ColIndex col) const {
3576  std::string output;
3577  VariableType variable_type = variables_info_.GetTypeRow()[col];
3578  VariableStatus variable_status = variables_info_.GetStatusRow()[col];
3579  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3580  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3581  absl::StrAppendFormat(&output, "%d (%s) = %s, %s, %s, [%s,%s]", col.value(),
3582  variable_name_[col],
3583  StringifyWithFlags(variable_values_.Get(col)),
3584  GetVariableStatusString(variable_status),
3585  GetVariableTypeString(variable_type),
3586  StringifyWithFlags(lower_bounds[col]),
3587  StringifyWithFlags(upper_bounds[col]));
3588  return output;
3589 }
3590 
3591 void RevisedSimplex::DisplayInfoOnVariables() const {
3592  if (VLOG_IS_ON(3)) {
3593  for (ColIndex col(0); col < num_cols_; ++col) {
3594  const Fractional variable_value = variable_values_.Get(col);
3595  const Fractional objective_coefficient = objective_[col];
3596  const Fractional objective_contribution =
3597  objective_coefficient * variable_value;
3598  VLOG(3) << SimpleVariableInfo(col) << ". " << variable_name_[col] << " = "
3599  << StringifyWithFlags(variable_value) << " * "
3600  << StringifyWithFlags(objective_coefficient)
3601  << "(obj) = " << StringifyWithFlags(objective_contribution);
3602  }
3603  VLOG(3) << "------";
3604  }
3605 }
3606 
3607 void RevisedSimplex::DisplayVariableBounds() {
3608  if (VLOG_IS_ON(3)) {
3609  const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
3610  const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds();
3611  const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds();
3612  for (ColIndex col(0); col < num_cols_; ++col) {
3613  switch (variable_type[col]) {
3615  break;
3617  VLOG(3) << variable_name_[col]
3618  << " >= " << StringifyWithFlags(lower_bounds[col]) << ";";
3619  break;
3621  VLOG(3) << variable_name_[col]
3622  << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3623  break;
3625  VLOG(3) << StringifyWithFlags(lower_bounds[col])
3626  << " <= " << variable_name_[col]
3627  << " <= " << StringifyWithFlags(upper_bounds[col]) << ";";
3628  break;
3630  VLOG(3) << variable_name_[col] << " = "
3631  << StringifyWithFlags(lower_bounds[col]) << ";";
3632  break;
3633  default: // This should never happen.
3634  LOG(DFATAL) << "Column " << col << " has no meaningful status.";
3635  break;
3636  }
3637  }
3638  }
3639 }
3640 
3642  const DenseRow* column_scales) {
3643  absl::StrongVector<RowIndex, SparseRow> dictionary(num_rows_.value());
3644  for (ColIndex col(0); col < num_cols_; ++col) {
3645  ComputeDirection(col);
3646  for (const auto e : direction_) {
3647  if (column_scales == nullptr) {
3648  dictionary[e.row()].SetCoefficient(col, e.coefficient());
3649  continue;
3650  }
3651  const Fractional numerator =
3652  col < column_scales->size() ? (*column_scales)[col] : 1.0;
3653  const Fractional denominator = GetBasis(e.row()) < column_scales->size()
3654  ? (*column_scales)[GetBasis(e.row())]
3655  : 1.0;
3656  dictionary[e.row()].SetCoefficient(
3657  col, direction_[e.row()] * (numerator / denominator));
3658  }
3659  }
3660  return dictionary;
3661 }
3662 
3664  const LinearProgram& linear_program, const BasisState& state) {
3665  LoadStateForNextSolve(state);
3666  Status status = Initialize(linear_program);
3667  if (status.ok()) {
3668  variable_values_.RecomputeBasicVariableValues();
3669  solution_objective_value_ = ComputeInitialProblemObjectiveValue();
3670  }
3671 }
3672 
3673 void RevisedSimplex::DisplayRevisedSimplexDebugInfo() {
3674  if (VLOG_IS_ON(3)) {
3675  // This function has a complexity in O(num_non_zeros_in_matrix).
3676  DisplayInfoOnVariables();
3677 
3678  std::string output = "z = " + StringifyWithFlags(ComputeObjectiveValue());
3679  const DenseRow& reduced_costs = reduced_costs_.GetReducedCosts();
3680  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
3681  absl::StrAppend(&output, StringifyMonomialWithFlags(reduced_costs[col],
3682  variable_name_[col]));
3683  }
3684  VLOG(3) << output << ";";
3685 
3686  const RevisedSimplexDictionary dictionary(nullptr, this);
3687  RowIndex r(0);
3688  for (const SparseRow& row : dictionary) {
3689  output.clear();
3690  ColIndex basic_col = basis_[r];
3691  absl::StrAppend(&output, variable_name_[basic_col], " = ",
3692  StringifyWithFlags(variable_values_.Get(basic_col)));
3693  for (const SparseRowEntry e : row) {
3694  if (e.col() != basic_col) {
3695  absl::StrAppend(&output,
3696  StringifyMonomialWithFlags(e.coefficient(),
3697  variable_name_[e.col()]));
3698  }
3699  }
3700  VLOG(3) << output << ";";
3701  }
3702  VLOG(3) << "------";
3703  DisplayVariableBounds();
3704  ++r;
3705  }
3706 }
3707 
3708 void RevisedSimplex::DisplayProblem() const {
3709  // This function has a complexity in O(num_rows * num_cols *
3710  // num_non_zeros_in_row).
3711  if (VLOG_IS_ON(3)) {
3712  DisplayInfoOnVariables();
3713  std::string output = "min: ";
3714  bool has_objective = false;
3715  for (ColIndex col(0); col < num_cols_; ++col) {
3716  const Fractional coeff = objective_[col];
3717  has_objective |= (coeff != 0.0);
3718  absl::StrAppend(&output,
3719  StringifyMonomialWithFlags(coeff, variable_name_[col]));
3720  }
3721  if (!has_objective) {
3722  absl::StrAppend(&output, " 0");
3723  }
3724  VLOG(3) << output << ";";
3725  for (RowIndex row(0); row < num_rows_; ++row) {
3726  output = "";
3727  for (ColIndex col(0); col < num_cols_; ++col) {
3728  absl::StrAppend(&output,
3729  StringifyMonomialWithFlags(
3730  compact_matrix_.column(col).LookUpCoefficient(row),
3731  variable_name_[col]));
3732  }
3733  VLOG(3) << output << " = 0;";
3734  }
3735  VLOG(3) << "------";
3736  }
3737 }
3738 
3739 void RevisedSimplex::AdvanceDeterministicTime(TimeLimit* time_limit) {
3740  DCHECK(time_limit != nullptr);
3741  const double current_deterministic_time = DeterministicTime();
3742  const double deterministic_time_delta =
3743  current_deterministic_time - last_deterministic_time_update_;
3744  time_limit->AdvanceDeterministicTime(deterministic_time_delta);
3745  last_deterministic_time_update_ = current_deterministic_time;
3746 }
3747 
3748 #undef DCHECK_COL_BOUNDS
3749 #undef DCHECK_ROW_BOUNDS
3750 
3751 } // namespace glop
3752 } // namespace operations_research
Index ColToIntIndex(ColIndex col)
Definition: lp_types.h:55
static constexpr InitialBasisHeuristic NONE
Fractional InfinityNorm(const DenseColumn &v)
const BasisFactorization & GetBasisFactorization() const
void Set(ColIndex col, Fractional value)
const DenseRow & Transpose(const DenseColumn &col)
#define CHECK(condition)
Definition: base/logging.h:495
void UpdateDualPrices(const std::vector< RowIndex > &row)
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:106
void InitializeFromBasisState(ColIndex first_slack, ColIndex num_new_cols, const BasisState &state)
Fractional ratio
std::string StatString() const
Definition: stats.cc:71
StrictITIVector< RowIndex, ColIndex > RowToColMapping
Definition: lp_types.h:346
const DenseBitRow & GetIsBasicBitRow() const
const bool DEBUG_MODE
Definition: macros.h:24
int64_t min
Definition: alldiff_cst.cc:139
#define SOLVER_LOG(logger,...)
Definition: util/logging.h:69
int ChangeUnusedBasicVariablesToFree(const RowToColMapping &basis)
ABSL_MUST_USE_RESULT Status ComputeFactorization(const CompactSparseMatrixView &compact_matrix)
Fractional coeff_magnitude
void LoadStateForNextSolve(const BasisState &state)
ModelSharedTimeLimit * time_limit
Fractional ComputeMaximumDualInfeasibilityOnNonBoxedVariables()
ABSL_FLAG(bool, simplex_display_numbers_as_fractions, false, "Display numbers as fractions.")
void ShiftCostIfNeeded(bool increasing_rc_is_needed, ColIndex col)
const DenseBitRow & GetCanDecreaseBitRow() const
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
void PopulateFromSparseMatrixAndAddSlacks(const SparseMatrix &input)
Definition: sparse.cc:456
const DenseBitRow & GetNotBasicBitRow() const
const DenseBitRow & GetCanIncreaseBitRow() const
bool UpdatePrimalPhaseICosts(const Rows &rows, DenseRow *objective)
#define VLOG(verboselevel)
Definition: base/logging.h:983
std::vector< double > lower_bounds
const std::string name
Fractional GetVariableValue(ColIndex col) const
Fractional GetDualFeasibilityTolerance() const
const ColIndex kInvalidCol(-1)
void PopulateFromMatrixView(const MatrixView &input)
Definition: sparse.cc:437
void swap(IdMap< K, V > &a, IdMap< K, V > &b)
Definition: id_map.h:263
void TestEnteringEdgeNormPrecision(ColIndex entering_col, const ScatteredColumn &direction)
void SetLogToStdOut(bool enable)
Definition: util/logging.h:45
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
#define LOG(severity)
Definition: base/logging.h:420
ColIndex col
Definition: markowitz.cc:183
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
void UpdateBeforeBasisPivot(ColIndex entering_col, ColIndex leaving_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
ABSL_MUST_USE_RESULT Status Solve(const LinearProgram &lp, TimeLimit *time_limit)
#define GLOP_RETURN_ERROR_IF_NULL(arg)
Definition: status.h:87
static const Status OK()
Definition: status.h:56
ABSL_MUST_USE_RESULT Status DualChooseEnteringColumn(bool nothing_to_recompute, const UpdateRow &update_row, Fractional cost_variation, std::vector< ColIndex > *bound_flip_candidates, ColIndex *entering_col)
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:895
int SnapFreeVariablesToBound(Fractional distance, const DenseRow &starting_values)
Permutation< ColIndex > ColumnPermutation
bool IsFinite(Fractional value)
Definition: lp_types.h:91
#define GLOP_RETURN_IF_ERROR(function_call)
Definition: status.h:72
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
Fractional TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction)
void UpdateGivenNonBasicVariables(const std::vector< ColIndex > &cols_to_update, bool update_basic_variables)
Fractional GetReducedCost(ColIndex col) const
::operations_research::glop::GlopParameters_InitialBasisHeuristic initial_basis() const
ColumnView column(ColIndex col) const
Definition: sparse.h:369
void ComputeBasicVariablesForState(const LinearProgram &linear_program, const BasisState &state)
void ColumnCopyToDenseColumn(ColIndex col, DenseColumn *dense_column) const
Definition: sparse.h:423
void SetParameters(const GlopParameters &parameters)
void ComputeUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:71
#define DCHECK_COL_BOUNDS(col)
static constexpr InitialBasisHeuristic BIXBY
void PopulateFromTranspose(const CompactSparseMatrix &input)
Definition: sparse.cc:483
int64_t max
Definition: alldiff_cst.cc:140
static constexpr InitialBasisHeuristic MAROS
double upper_bound
void ChangeSign(StrictITIVector< IndexType, Fractional > *data)
void SetParameters(const GlopParameters &parameters)
StrictITIVector< RowIndex, Fractional > DenseColumn
Definition: lp_types.h:332
std::string Stringify(const Fractional x, bool fraction)
Fractional EntryCoefficient(EntryIndex i) const
Definition: sparse_column.h:83
::operations_research::glop::GlopParameters_PricingRule optimization_rule() const
const int WARNING
Definition: log_severity.h:31
void SetParameters(const GlopParameters &parameters)
bool AreFirstColumnsAndRowsExactlyEquals(RowIndex num_rows, ColIndex num_cols, const SparseMatrix &matrix_a, const CompactSparseMatrix &matrix_b)
RowIndex EntryRow(EntryIndex i) const
Definition: sparse_column.h:89
bool empty() const
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:891
void SetParameters(const GlopParameters &parameters)
void SetParameters(const GlopParameters &parameters)
VariableStatus GetVariableStatus(ColIndex col) const
StrictITIVector< Index, Fractional > values
void SetIntegralityScale(ColIndex col, Fractional scale)
double lower_bound
void UpdateBeforeBasisPivot(ColIndex entering_col, UpdateRow *update_row)
void SetPricingRule(GlopParameters::PricingRule rule)
void ApplyColumnPermutationToRowIndexedVector(const Permutation< ColIndex > &col_perm, RowIndexedVector *v)
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
IntVar *const objective_
Definition: search.cc:3017
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition: sparse.h:387
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col, Fractional multiplier, ScatteredColumn *column) const
Definition: sparse.h:410
StrictITIVector< ColIndex, VariableStatus > VariableStatusRow
Definition: lp_types.h:324
void AddOrUpdate(Index position, Fractional value)
Definition: pricing.h:185
const double kInfinity
Definition: lp_types.h:84
const VariableTypeRow & GetTypeRow() const
const DenseRow & GetVariableUpperBounds() const
void UpdateDataOnBasisPermutation(const ColumnPermutation &col_perm)
int index
Definition: pack.cc:509
const DenseBitRow & GetNonBasicBoxedVariables() const
RowIndex row
Fractional target_bound
DisabledScopedTimeDistributionUpdater ScopedTimeDistributionUpdater
Definition: stats.h:434
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:894
std::string StatString() const
Definition: update_row.h:82
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:167
const DenseColumn & GetDualRay() const
ABSL_MUST_USE_RESULT Status DualPhaseIChooseEnteringColumn(bool nothing_to_recompute, const UpdateRow &update_row, Fractional cost_variation, ColIndex *entering_col)
const RowIndex kInvalidRow(-1)
void SetStartingVariableValuesForNextSolve(const DenseRow &values)
static constexpr InitialBasisHeuristic TRIANGULAR
Bitset64< ColIndex > DenseBitRow
Definition: lp_types.h:327
void ComputeUnitRowLeftInverse(RowIndex leaving_row)
Definition: update_row.cc:57
int64_t cost
const DenseRow & GetVariableLowerBounds() const
void DenseAddOrUpdate(Index position, Fractional value)
Definition: pricing.h:176
#define DCHECK_ROW_BOUNDS(row)
const ColumnPermutation & GetColumnPermutation() const
Fractional LookUpCoefficient(RowIndex index) const
#define DCHECK(condition)
Definition: base/logging.h:889
void SetParameters(const GlopParameters &parameters)
void EndDualPhaseI(Fractional dual_feasibility_tolerance, const DenseRow &reduced_costs)
ConstraintStatus GetConstraintStatus(RowIndex row) const
ABSL_MUST_USE_RESULT Status Update(ColIndex entering_col, RowIndex leaving_variable_row, const ScatteredColumn &direction)
const Fractional Get(ColIndex col) const
RowToColMapping ComputeInitialBasis(const std::vector< ColIndex > &candidates)
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:890
::operations_research::glop::GlopParameters_PricingRule feasibility_rule() const
const DenseRow & GetDualRayRowCombination() const
bool IsValidPrimalEnteringCandidate(ColIndex col) const
bool IsSet(IndexType i) const
Definition: bitset.h:485
void UpdateOnPivoting(const ScatteredColumn &direction, ColIndex entering_col, Fractional step)
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, const ScatteredRow &unit_row_left_inverse)
Fractional Square(Fractional f)
std::string GetVariableStatusString(VariableStatus status)
Definition: lp_types.cc:71
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:892
void TransformToDualPhaseIProblem(Fractional dual_feasibility_tolerance, const DenseRow &reduced_costs)
constexpr const uint64_t kDeterministicSeed
StrictITIVector< RowIndex, bool > DenseBooleanColumn
Definition: lp_types.h:335
Collection of objects used to extend the Constraint Solver library.
std::string GetVariableTypeString(VariableType variable_type)
Definition: lp_types.cc:52
const VariableStatusRow & GetStatusRow() const
void RightSolveForProblemColumn(ColIndex col, ScatteredColumn *d) const
Fractional GetBoundDifference(ColIndex col) const
SatParameters parameters
StrictITIVector< ColIndex, VariableType > VariableTypeRow
Definition: lp_types.h:321
Fractional GetDualValue(RowIndex row) const
std::vector< double > upper_bounds
const ScatteredRow & GetUnitRowLeftInverse() const
Definition: update_row.cc:45
#define VLOG_IS_ON(verboselevel)
Definition: vlog_is_on.h:44
RowMajorSparseMatrix ComputeDictionary(const DenseRow *column_scales)
const Fractional GetCoefficient(ColIndex col) const
Definition: update_row.h:70
static double DeterministicTimeForFpOperations(int64_t n)
Definition: lp_types.h:383
void ResetAllNonBasicVariableValues(const DenseRow &free_initial_values)
std::string StringifyMonomial(const Fractional a, const std::string &x, bool fraction)
void RecomputeFullUpdateRow(RowIndex leaving_row)
Definition: update_row.cc:241
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:303
void ClearNonZerosIfTooDense(double ratio_for_using_dense_representation)
void ColumnAddMultipleToDenseColumn(ColIndex col, Fractional multiplier, DenseColumn *dense_column) const
Definition: sparse.h:398
int64_t value
double distance
ColIndex GetBasis(RowIndex row) const
void SetParameters(const GlopParameters &parameters)
Definition: update_row.cc:171
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:893
Fractional GetConstraintActivity(RowIndex row) const
const DenseBitRow & GetIsRelevantBitRow() const
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
bool LoadBoundsAndReturnTrueIfUnchanged(const DenseRow &new_lower_bounds, const DenseRow &new_upper_bounds)
int64_t a
void UpdateToNonBasicStatus(ColIndex col, VariableStatus status)
bool StepIsDualDegenerate(bool increasing_rc_is_needed, ColIndex col)