OR-Tools  9.1
preprocessor.cc
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <cstdint>
17 #include <limits>
18 
19 #include "absl/strings/str_format.h"
23 #include "ortools/glop/status.h"
28 
29 namespace operations_research {
30 namespace glop {
31 
33 
34 namespace {
35 // Returns an interval as an human readable string for debugging.
36 std::string IntervalString(Fractional lb, Fractional ub) {
37  return absl::StrFormat("[%g, %g]", lb, ub);
38 }
39 
40 #if defined(_MSC_VER)
41 double trunc(double d) { return d > 0 ? floor(d) : ceil(d); }
42 #endif
43 } // namespace
44 
45 // --------------------------------------------------------
46 // Preprocessor
47 // --------------------------------------------------------
49  : status_(ProblemStatus::INIT),
50  parameters_(*parameters),
51  in_mip_context_(false),
52  infinite_time_limit_(TimeLimit::Infinite()),
53  time_limit_(infinite_time_limit_.get()) {}
55 
56 // --------------------------------------------------------
57 // MainLpPreprocessor
58 // --------------------------------------------------------
59 
60 #define RUN_PREPROCESSOR(name) \
61  RunAndPushIfRelevant(std::unique_ptr<Preprocessor>(new name(&parameters_)), \
62  #name, time_limit_, lp)
63 
65  RETURN_VALUE_IF_NULL(lp, false);
66  initial_num_rows_ = lp->num_constraints();
67  initial_num_cols_ = lp->num_variables();
68  initial_num_entries_ = lp->num_entries();
71 
72  // We run it a few times because running one preprocessor may allow another
73  // one to remove more stuff.
74  const int kMaxNumPasses = 20;
75  for (int i = 0; i < kMaxNumPasses; ++i) {
76  const int old_stack_size = preprocessors_.size();
85 
86  // Abort early if none of the preprocessors did something. Technically
87  // this is true if none of the preprocessors above needs postsolving,
88  // which has exactly the same meaning for these particular preprocessors.
89  if (preprocessors_.size() == old_stack_size) {
90  // We use i here because the last pass did nothing.
92  LOG(INFO) << "Reached fixed point after presolve pass #" << i;
93  }
94  break;
95  }
96  }
99 
100  // TODO(user): Run them in the loop above if the effect on the running time
101  // is good. This needs more investigation.
104 
105  // If DualizerPreprocessor was run, we need to do some extra preprocessing.
106  // This is because it currently adds a lot of zero-cost singleton columns.
107  const int old_stack_size = preprocessors_.size();
108 
109  // TODO(user): We probably want to scale the costs before and after this
110  // preprocessor so that the rhs/objective of the dual are with a good
111  // magnitude.
113  if (old_stack_size != preprocessors_.size()) {
119  }
120 
122  }
123 
124  // The scaling is controlled by use_scaling, not use_preprocessing.
126 
127  return !preprocessors_.empty();
128 }
129 
130 #undef RUN_PREPROCESSOR
131 
132 void MainLpPreprocessor::RunAndPushIfRelevant(
133  std::unique_ptr<Preprocessor> preprocessor, const std::string& name,
135  RETURN_IF_NULL(preprocessor);
137  if (status_ != ProblemStatus::INIT || time_limit->LimitReached()) return;
138 
139  const double start_time = time_limit->GetElapsedTime();
140  preprocessor->SetTimeLimit(time_limit);
141 
142  // No need to run the preprocessor if the lp is empty.
143  // TODO(user): without this test, the code is failing as of 2013-03-18.
144  if (lp->num_variables() == 0 && lp->num_constraints() == 0) {
146  return;
147  }
148 
149  const bool log_info = parameters_.log_search_progress() || VLOG_IS_ON(1);
150  if (preprocessor->Run(lp)) {
151  const EntryIndex new_num_entries = lp->num_entries();
152  const double preprocess_time = time_limit->GetElapsedTime() - start_time;
153  if (log_info) {
154  LOG(INFO) << absl::StrFormat(
155  "%s(%fs): %d(%d) rows, %d(%d) columns, %d(%d) entries.", name,
156  preprocess_time, lp->num_constraints().value(),
157  (lp->num_constraints() - initial_num_rows_).value(),
158  lp->num_variables().value(),
159  (lp->num_variables() - initial_num_cols_).value(),
160  // static_cast<int64_t> is needed because the Android port uses
161  // int32_t.
162  static_cast<int64_t>(new_num_entries.value()),
163  static_cast<int64_t>(new_num_entries.value() -
164  initial_num_entries_.value()));
165  }
166  status_ = preprocessor->status();
167  preprocessors_.push_back(std::move(preprocessor));
168  return;
169  } else {
170  // Even if a preprocessor returns false (i.e. no need for postsolve), it
171  // can detect an issue with the problem.
172  status_ = preprocessor->status();
173  if (status_ != ProblemStatus::INIT && log_info) {
174  LOG(INFO) << name << " detected that the problem is "
176  }
177  }
178 }
179 
182  for (const auto& p : gtl::reversed_view(preprocessors_)) {
183  p->RecoverSolution(solution);
184  }
185 }
186 
189  while (!preprocessors_.empty()) {
190  preprocessors_.back()->RecoverSolution(solution);
191  preprocessors_.pop_back();
192  }
193 }
194 
195 // --------------------------------------------------------
196 // ColumnDeletionHelper
197 // --------------------------------------------------------
198 
199 void ColumnsSaver::SaveColumn(ColIndex col, const SparseColumn& column) {
200  const int index = saved_columns_.size();
201  CHECK(saved_columns_index_.insert({col, index}).second);
202  saved_columns_.push_back(column);
203 }
204 
206  const SparseColumn& column) {
207  const int index = saved_columns_.size();
208  const bool inserted = saved_columns_index_.insert({col, index}).second;
209  if (inserted) saved_columns_.push_back(column);
210 }
211 
213  const auto it = saved_columns_index_.find(col);
214  CHECK(it != saved_columns_index_.end());
215  return saved_columns_[it->second];
216 }
217 
219  const auto it = saved_columns_index_.find(col);
220  return it == saved_columns_index_.end() ? empty_column_
221  : saved_columns_[it->second];
222 }
223 
225  is_column_deleted_.clear();
226  stored_value_.clear();
227 }
228 
231 }
232 
234  ColIndex col, Fractional fixed_value, VariableStatus status) {
235  DCHECK_GE(col, 0);
236  if (col >= is_column_deleted_.size()) {
237  is_column_deleted_.resize(col + 1, false);
238  stored_value_.resize(col + 1, 0.0);
239  stored_status_.resize(col + 1, VariableStatus::FREE);
240  }
241  is_column_deleted_[col] = true;
242  stored_value_[col] = fixed_value;
243  stored_status_[col] = status;
244 }
245 
247  ProblemSolution* solution) const {
248  DenseRow new_primal_values;
249  VariableStatusRow new_variable_statuses;
250  ColIndex old_index(0);
251  for (ColIndex col(0); col < is_column_deleted_.size(); ++col) {
252  if (is_column_deleted_[col]) {
253  new_primal_values.push_back(stored_value_[col]);
254  new_variable_statuses.push_back(stored_status_[col]);
255  } else {
256  new_primal_values.push_back(solution->primal_values[old_index]);
257  new_variable_statuses.push_back(solution->variable_statuses[old_index]);
258  ++old_index;
259  }
260  }
261 
262  // Copy the end of the vectors and swap them with the ones in solution.
263  const ColIndex num_cols = solution->primal_values.size();
264  DCHECK_EQ(num_cols, solution->variable_statuses.size());
265  for (; old_index < num_cols; ++old_index) {
266  new_primal_values.push_back(solution->primal_values[old_index]);
267  new_variable_statuses.push_back(solution->variable_statuses[old_index]);
268  }
269  new_primal_values.swap(solution->primal_values);
270  new_variable_statuses.swap(solution->variable_statuses);
271 }
272 
273 // --------------------------------------------------------
274 // RowDeletionHelper
275 // --------------------------------------------------------
276 
277 void RowDeletionHelper::Clear() { is_row_deleted_.clear(); }
278 
280  DCHECK_GE(row, 0);
281  if (row >= is_row_deleted_.size()) {
282  is_row_deleted_.resize(row + 1, false);
283  }
284  is_row_deleted_[row] = true;
285 }
286 
288  if (row >= is_row_deleted_.size()) return;
289  is_row_deleted_[row] = false;
290 }
291 
293  return is_row_deleted_;
294 }
295 
297  DenseColumn new_dual_values;
298  ConstraintStatusColumn new_constraint_statuses;
299  RowIndex old_index(0);
300  const RowIndex end = is_row_deleted_.size();
301  for (RowIndex row(0); row < end; ++row) {
302  if (is_row_deleted_[row]) {
303  new_dual_values.push_back(0.0);
304  new_constraint_statuses.push_back(ConstraintStatus::BASIC);
305  } else {
306  new_dual_values.push_back(solution->dual_values[old_index]);
307  new_constraint_statuses.push_back(
308  solution->constraint_statuses[old_index]);
309  ++old_index;
310  }
311  }
312 
313  // Copy the end of the vectors and swap them with the ones in solution.
314  const RowIndex num_rows = solution->dual_values.size();
315  DCHECK_EQ(num_rows, solution->constraint_statuses.size());
316  for (; old_index < num_rows; ++old_index) {
317  new_dual_values.push_back(solution->dual_values[old_index]);
318  new_constraint_statuses.push_back(solution->constraint_statuses[old_index]);
319  }
320  new_dual_values.swap(solution->dual_values);
321  new_constraint_statuses.swap(solution->constraint_statuses);
322 }
323 
324 // --------------------------------------------------------
325 // EmptyColumnPreprocessor
326 // --------------------------------------------------------
327 
328 namespace {
329 
330 // Computes the status of a variable given its value and bounds. This only works
331 // with a value exactly at one of the bounds, or a value of 0.0 for free
332 // variables.
333 VariableStatus ComputeVariableStatus(Fractional value, Fractional lower_bound,
335  if (lower_bound == upper_bound) {
339  }
340  if (value == lower_bound) {
343  }
344  if (value == upper_bound) {
347  }
348 
349  // TODO(user): restrict this to unbounded variables with a value of zero.
350  // We can't do that when postsolving infeasible problem. Don't call postsolve
351  // on an infeasible problem?
352  return VariableStatus::FREE;
353 }
354 
355 // Returns the input with the smallest magnitude or zero if both are infinite.
356 Fractional MinInMagnitudeOrZeroIfInfinite(Fractional a, Fractional b) {
357  const Fractional value = std::abs(a) < std::abs(b) ? a : b;
358  return IsFinite(value) ? value : 0.0;
359 }
360 
361 Fractional MagnitudeOrZeroIfInfinite(Fractional value) {
362  return IsFinite(value) ? std::abs(value) : 0.0;
363 }
364 
365 // Returns the maximum magnitude of the finite variable bounds of the given
366 // linear program.
367 Fractional ComputeMaxVariableBoundsMagnitude(const LinearProgram& lp) {
368  Fractional max_bounds_magnitude = 0.0;
369  const ColIndex num_cols = lp.num_variables();
370  for (ColIndex col(0); col < num_cols; ++col) {
371  max_bounds_magnitude = std::max(
372  max_bounds_magnitude,
373  std::max(MagnitudeOrZeroIfInfinite(lp.variable_lower_bounds()[col]),
374  MagnitudeOrZeroIfInfinite(lp.variable_upper_bounds()[col])));
375  }
376  return max_bounds_magnitude;
377 }
378 
379 } // namespace
380 
383  RETURN_VALUE_IF_NULL(lp, false);
384  column_deletion_helper_.Clear();
385  const ColIndex num_cols = lp->num_variables();
386  for (ColIndex col(0); col < num_cols; ++col) {
387  if (lp->GetSparseColumn(col).IsEmpty()) {
390  const Fractional objective_coefficient =
393  if (objective_coefficient == 0) {
394  // Any feasible value will do.
395  if (upper_bound != kInfinity) {
396  value = upper_bound;
397  } else {
398  if (lower_bound != -kInfinity) {
399  value = lower_bound;
400  } else {
401  value = Fractional(0.0);
402  }
403  }
404  } else {
405  value = objective_coefficient > 0 ? lower_bound : upper_bound;
406  if (!IsFinite(value)) {
407  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, empty column " << col
408  << " has a minimization cost of " << objective_coefficient
409  << " and bounds"
410  << " [" << lower_bound << "," << upper_bound << "]";
412  return false;
413  }
415  value * lp->objective_coefficients()[col]);
416  }
417  column_deletion_helper_.MarkColumnForDeletionWithState(
418  col, value, ComputeVariableStatus(value, lower_bound, upper_bound));
419  }
420  }
421  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
422  return !column_deletion_helper_.IsEmpty();
423 }
424 
427  RETURN_IF_NULL(solution);
428  column_deletion_helper_.RestoreDeletedColumns(solution);
429 }
430 
431 // --------------------------------------------------------
432 // ProportionalColumnPreprocessor
433 // --------------------------------------------------------
434 
435 namespace {
436 
437 // Subtracts 'multiple' times the column col of the given linear program from
438 // the constraint bounds. That is, for a non-zero entry of coefficient c,
439 // c * multiple is subtracted from both the constraint upper and lower bound.
440 void SubtractColumnMultipleFromConstraintBound(ColIndex col,
441  Fractional multiple,
442  LinearProgram* lp) {
445  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
446  const RowIndex row = e.row();
447  const Fractional delta = multiple * e.coefficient();
448  (*lbs)[row] -= delta;
449  (*ubs)[row] -= delta;
450  }
451  // While not needed for correctness, this allows the presolved problem to
452  // have the same objective value as the original one.
454  lp->objective_coefficients()[col] * multiple);
455 }
456 
457 // Struct used to detect proportional columns with the same cost. For that, a
458 // vector of such struct will be sorted, and only the columns that end up
459 // together need to be compared.
460 struct ColumnWithRepresentativeAndScaledCost {
461  ColumnWithRepresentativeAndScaledCost(ColIndex _col, ColIndex _representative,
462  Fractional _scaled_cost)
463  : col(_col), representative(_representative), scaled_cost(_scaled_cost) {}
464  ColIndex col;
465  ColIndex representative;
467 
468  bool operator<(const ColumnWithRepresentativeAndScaledCost& other) const {
469  if (representative == other.representative) {
470  if (scaled_cost == other.scaled_cost) {
471  return col < other.col;
472  }
473  return scaled_cost < other.scaled_cost;
474  }
475  return representative < other.representative;
476  }
477 };
478 
479 } // namespace
480 
483  RETURN_VALUE_IF_NULL(lp, false);
486 
487  // Compute some statistics and make each class representative point to itself
488  // in the mapping. Also store the columns that are proportional to at least
489  // another column in proportional_columns to iterate on them more efficiently.
490  //
491  // TODO(user): Change FindProportionalColumns for this?
492  int num_proportionality_classes = 0;
493  std::vector<ColIndex> proportional_columns;
494  for (ColIndex col(0); col < mapping.size(); ++col) {
495  const ColIndex representative = mapping[col];
496  if (representative != kInvalidCol) {
497  if (mapping[representative] == kInvalidCol) {
498  proportional_columns.push_back(representative);
499  ++num_proportionality_classes;
500  mapping[representative] = representative;
501  }
502  proportional_columns.push_back(col);
503  }
504  }
505  if (proportional_columns.empty()) return false;
506  VLOG(1) << "The problem contains " << proportional_columns.size()
507  << " columns which belong to " << num_proportionality_classes
508  << " proportionality classes.";
509 
510  // Note(user): using the first coefficient may not give the best precision.
511  const ColIndex num_cols = lp->num_variables();
512  column_factors_.assign(num_cols, 0.0);
513  for (const ColIndex col : proportional_columns) {
514  const SparseColumn& column = lp->GetSparseColumn(col);
515  column_factors_[col] = column.GetFirstCoefficient();
516  }
517 
518  // This is only meaningful for column representative.
519  //
520  // The reduced cost of a column is 'cost - dual_values.column' and we know
521  // that for all proportional columns, 'dual_values.column /
522  // column_factors_[col]' is the same. Here, we bound this quantity which is
523  // related to the cost 'slope' of a proportional column:
524  // cost / column_factors_[col].
525  DenseRow slope_lower_bound(num_cols, -kInfinity);
526  DenseRow slope_upper_bound(num_cols, +kInfinity);
527  for (const ColIndex col : proportional_columns) {
528  const ColIndex representative = mapping[col];
529 
530  // We reason in terms of a minimization problem here.
531  const bool is_rc_positive_or_zero =
532  (lp->variable_upper_bounds()[col] == kInfinity);
533  const bool is_rc_negative_or_zero =
534  (lp->variable_lower_bounds()[col] == -kInfinity);
535  bool is_slope_upper_bounded = is_rc_positive_or_zero;
536  bool is_slope_lower_bounded = is_rc_negative_or_zero;
537  if (column_factors_[col] < 0.0) {
538  std::swap(is_slope_lower_bounded, is_slope_upper_bounded);
539  }
540  const Fractional slope =
542  column_factors_[col];
543  if (is_slope_lower_bounded) {
544  slope_lower_bound[representative] =
545  std::max(slope_lower_bound[representative], slope);
546  }
547  if (is_slope_upper_bounded) {
548  slope_upper_bound[representative] =
549  std::min(slope_upper_bound[representative], slope);
550  }
551  }
552 
553  // Deal with empty slope intervals.
554  for (const ColIndex col : proportional_columns) {
555  const ColIndex representative = mapping[col];
556 
557  // This is only needed for class representative columns.
558  if (representative == col) {
560  slope_lower_bound[representative],
561  slope_upper_bound[representative])) {
562  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, no feasible dual values"
563  << " can satisfy the constraints of the proportional columns"
564  << " with representative " << representative << "."
565  << " the associated quantity must be in ["
566  << slope_lower_bound[representative] << ","
567  << slope_upper_bound[representative] << "].";
569  return false;
570  }
571  }
572  }
573 
574  // Now, fix the columns that can be fixed to one of their bounds.
575  for (const ColIndex col : proportional_columns) {
576  const ColIndex representative = mapping[col];
577  const Fractional slope =
579  column_factors_[col];
580 
581  // The scaled reduced cost is slope - quantity.
582  bool variable_can_be_fixed = false;
583  Fractional target_bound = 0.0;
584 
587  if (!IsSmallerWithinFeasibilityTolerance(slope_lower_bound[representative],
588  slope)) {
589  // The scaled reduced cost is < 0.
590  variable_can_be_fixed = true;
591  target_bound = (column_factors_[col] >= 0.0) ? upper_bound : lower_bound;
593  slope, slope_upper_bound[representative])) {
594  // The scaled reduced cost is > 0.
595  variable_can_be_fixed = true;
596  target_bound = (column_factors_[col] >= 0.0) ? lower_bound : upper_bound;
597  }
598 
599  if (variable_can_be_fixed) {
600  // Clear mapping[col] so this column will not be considered for the next
601  // stage.
602  mapping[col] = kInvalidCol;
603  if (!IsFinite(target_bound)) {
604  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED.";
606  return false;
607  } else {
608  SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
609  column_deletion_helper_.MarkColumnForDeletionWithState(
610  col, target_bound,
611  ComputeVariableStatus(target_bound, lower_bound, upper_bound));
612  }
613  }
614  }
615 
616  // Merge the variables with the same scaled cost.
617  std::vector<ColumnWithRepresentativeAndScaledCost> sorted_columns;
618  for (const ColIndex col : proportional_columns) {
619  const ColIndex representative = mapping[col];
620 
621  // This test is needed because we already removed some columns.
622  if (mapping[col] != kInvalidCol) {
623  sorted_columns.push_back(ColumnWithRepresentativeAndScaledCost(
625  lp->objective_coefficients()[col] / column_factors_[col]));
626  }
627  }
628  std::sort(sorted_columns.begin(), sorted_columns.end());
629 
630  // All this will be needed during postsolve.
631  merged_columns_.assign(num_cols, kInvalidCol);
632  lower_bounds_.assign(num_cols, -kInfinity);
633  upper_bounds_.assign(num_cols, kInfinity);
634  new_lower_bounds_.assign(num_cols, -kInfinity);
635  new_upper_bounds_.assign(num_cols, kInfinity);
636 
637  for (int i = 0; i < sorted_columns.size();) {
638  const ColIndex target_col = sorted_columns[i].col;
639  const ColIndex target_representative = sorted_columns[i].representative;
640  const Fractional target_scaled_cost = sorted_columns[i].scaled_cost;
641 
642  // Save the initial bounds before modifying them.
643  lower_bounds_[target_col] = lp->variable_lower_bounds()[target_col];
644  upper_bounds_[target_col] = lp->variable_upper_bounds()[target_col];
645 
646  int num_merged = 0;
647  for (++i; i < sorted_columns.size(); ++i) {
648  if (sorted_columns[i].representative != target_representative) break;
649  if (std::abs(sorted_columns[i].scaled_cost - target_scaled_cost) >=
651  break;
652  }
653  ++num_merged;
654  const ColIndex col = sorted_columns[i].col;
657  lower_bounds_[col] = lower_bound;
658  upper_bounds_[col] = upper_bound;
659  merged_columns_[col] = target_col;
660 
661  // This is a bit counter intuitive, but when a column is divided by x,
662  // the corresponding bounds have to be multiplied by x.
663  const Fractional bound_factor =
664  column_factors_[col] / column_factors_[target_col];
665 
666  // We need to shift the variable so that a basic solution of the new
667  // problem can easily be converted to a basic solution of the original
668  // problem.
669 
670  // A feasible value for the variable must be chosen, and the variable must
671  // be shifted by this value. This is done to make sure that it will be
672  // possible to recreate a basic solution of the original problem from a
673  // basic solution of the pre-solved problem during post-solve.
674  const Fractional target_value =
675  MinInMagnitudeOrZeroIfInfinite(lower_bound, upper_bound);
676  Fractional lower_diff = (lower_bound - target_value) * bound_factor;
677  Fractional upper_diff = (upper_bound - target_value) * bound_factor;
678  if (bound_factor < 0.0) {
679  std::swap(lower_diff, upper_diff);
680  }
681  lp->SetVariableBounds(
682  target_col, lp->variable_lower_bounds()[target_col] + lower_diff,
683  lp->variable_upper_bounds()[target_col] + upper_diff);
684  SubtractColumnMultipleFromConstraintBound(col, target_value, lp);
685  column_deletion_helper_.MarkColumnForDeletionWithState(
686  col, target_value,
687  ComputeVariableStatus(target_value, lower_bound, upper_bound));
688  }
689 
690  // If at least one column was merged, the target column must be shifted like
691  // the other columns in the same equivalence class for the same reason (see
692  // above).
693  if (num_merged > 0) {
694  merged_columns_[target_col] = target_col;
695  const Fractional target_value = MinInMagnitudeOrZeroIfInfinite(
696  lower_bounds_[target_col], upper_bounds_[target_col]);
697  lp->SetVariableBounds(
698  target_col, lp->variable_lower_bounds()[target_col] - target_value,
699  lp->variable_upper_bounds()[target_col] - target_value);
700  SubtractColumnMultipleFromConstraintBound(target_col, target_value, lp);
701  new_lower_bounds_[target_col] = lp->variable_lower_bounds()[target_col];
702  new_upper_bounds_[target_col] = lp->variable_upper_bounds()[target_col];
703  }
704  }
705 
706  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
707  return !column_deletion_helper_.IsEmpty();
708 }
709 
711  ProblemSolution* solution) const {
713  RETURN_IF_NULL(solution);
714  column_deletion_helper_.RestoreDeletedColumns(solution);
715 
716  // The rest of this function is to unmerge the columns so that the solution be
717  // primal-feasible.
718  const ColIndex num_cols = merged_columns_.size();
719  DenseBooleanRow is_representative_basic(num_cols, false);
720  DenseBooleanRow is_distance_to_upper_bound(num_cols, false);
721  DenseRow distance_to_bound(num_cols, 0.0);
722  DenseRow wanted_value(num_cols, 0.0);
723 
724  // The first pass is a loop over the representatives to compute the current
725  // distance to the new bounds.
726  for (ColIndex col(0); col < num_cols; ++col) {
727  if (merged_columns_[col] == col) {
728  const Fractional value = solution->primal_values[col];
729  const Fractional distance_to_upper_bound = new_upper_bounds_[col] - value;
730  const Fractional distance_to_lower_bound = value - new_lower_bounds_[col];
731  if (distance_to_upper_bound < distance_to_lower_bound) {
732  distance_to_bound[col] = distance_to_upper_bound;
733  is_distance_to_upper_bound[col] = true;
734  } else {
735  distance_to_bound[col] = distance_to_lower_bound;
736  is_distance_to_upper_bound[col] = false;
737  }
738  is_representative_basic[col] =
740 
741  // Restore the representative value to a feasible value of the initial
742  // variable. Now all the merged variable are at a feasible value.
743  wanted_value[col] = value;
744  solution->primal_values[col] = MinInMagnitudeOrZeroIfInfinite(
745  lower_bounds_[col], upper_bounds_[col]);
746  solution->variable_statuses[col] = ComputeVariableStatus(
747  solution->primal_values[col], lower_bounds_[col], upper_bounds_[col]);
748  }
749  }
750 
751  // Second pass to correct the values.
752  for (ColIndex col(0); col < num_cols; ++col) {
753  const ColIndex representative = merged_columns_[col];
754  if (representative != kInvalidCol) {
755  if (IsFinite(distance_to_bound[representative])) {
756  // If the distance is finite, then each variable is set to its
757  // corresponding bound (the one from which the distance is computed) and
758  // is then changed by as much as possible until the distance is zero.
759  const Fractional bound_factor =
760  column_factors_[col] / column_factors_[representative];
761  const Fractional scaled_distance =
762  distance_to_bound[representative] / std::abs(bound_factor);
763  const Fractional width = upper_bounds_[col] - lower_bounds_[col];
764  const bool to_upper_bound =
765  (bound_factor > 0.0) == is_distance_to_upper_bound[representative];
766  if (width <= scaled_distance) {
767  solution->primal_values[col] =
768  to_upper_bound ? lower_bounds_[col] : upper_bounds_[col];
769  solution->variable_statuses[col] =
770  ComputeVariableStatus(solution->primal_values[col],
771  lower_bounds_[col], upper_bounds_[col]);
772  distance_to_bound[representative] -= width * std::abs(bound_factor);
773  } else {
774  solution->primal_values[col] =
775  to_upper_bound ? upper_bounds_[col] - scaled_distance
776  : lower_bounds_[col] + scaled_distance;
777  solution->variable_statuses[col] =
778  is_representative_basic[representative]
780  : ComputeVariableStatus(solution->primal_values[col],
781  lower_bounds_[col],
782  upper_bounds_[col]);
783  distance_to_bound[representative] = 0.0;
784  is_representative_basic[representative] = false;
785  }
786  } else {
787  // If the distance is not finite, then only one variable needs to be
788  // changed from its current feasible value in order to have a
789  // primal-feasible solution.
790  const Fractional error = wanted_value[representative];
791  if (error == 0.0) {
792  if (is_representative_basic[representative]) {
794  is_representative_basic[representative] = false;
795  }
796  } else {
797  const Fractional bound_factor =
798  column_factors_[col] / column_factors_[representative];
799  const bool use_this_variable =
800  (error * bound_factor > 0.0) ? (upper_bounds_[col] == kInfinity)
801  : (lower_bounds_[col] == -kInfinity);
802  if (use_this_variable) {
803  wanted_value[representative] = 0.0;
804  solution->primal_values[col] += error / bound_factor;
805  if (is_representative_basic[representative]) {
807  is_representative_basic[representative] = false;
808  } else {
809  // This should not happen on an OPTIMAL or FEASIBLE solution.
810  DCHECK(solution->status != ProblemStatus::OPTIMAL &&
813  }
814  }
815  }
816  }
817  }
818  }
819 }
820 
821 // --------------------------------------------------------
822 // ProportionalRowPreprocessor
823 // --------------------------------------------------------
824 
827  RETURN_VALUE_IF_NULL(lp, false);
828  const RowIndex num_rows = lp->num_constraints();
829  const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
830 
831  // Use the first coefficient of each row to compute the proportionality
832  // factor. Note that the sign is important.
833  //
834  // Note(user): using the first coefficient may not give the best precision.
835  row_factors_.assign(num_rows, 0.0);
836  for (RowIndex row(0); row < num_rows; ++row) {
837  const SparseColumn& row_transpose = transpose.column(RowToColIndex(row));
838  if (!row_transpose.IsEmpty()) {
839  row_factors_[row] = row_transpose.GetFirstCoefficient();
840  }
841  }
842 
843  // The new row bounds (only meaningful for the proportional rows).
844  DenseColumn lower_bounds(num_rows, -kInfinity);
845  DenseColumn upper_bounds(num_rows, +kInfinity);
846 
847  // Where the new bounds are coming from. Only for the constraints that stay
848  // in the lp and are modified, kInvalidRow otherwise.
849  upper_bound_sources_.assign(num_rows, kInvalidRow);
850  lower_bound_sources_.assign(num_rows, kInvalidRow);
851 
852  // Initialization.
853  // We need the first representative of each proportional row class to point to
854  // itself for the loop below. TODO(user): Already return such a mapping from
855  // FindProportionalColumns()?
858  DenseBooleanColumn is_a_representative(num_rows, false);
859  int num_proportional_rows = 0;
860  for (RowIndex row(0); row < num_rows; ++row) {
861  const ColIndex representative_row_as_col = mapping[RowToColIndex(row)];
862  if (representative_row_as_col != kInvalidCol) {
863  mapping[representative_row_as_col] = representative_row_as_col;
864  is_a_representative[ColToRowIndex(representative_row_as_col)] = true;
865  ++num_proportional_rows;
866  }
867  }
868 
869  // Compute the bound of each representative as implied by the rows
870  // which are proportional to it. Also keep the source row of each bound.
871  for (RowIndex row(0); row < num_rows; ++row) {
872  const ColIndex row_as_col = RowToColIndex(row);
873  if (mapping[row_as_col] != kInvalidCol) {
874  // For now, delete all the rows that are proportional to another one.
875  // Note that we will unmark the final representative of this class later.
876  row_deletion_helper_.MarkRowForDeletion(row);
877  const RowIndex representative_row = ColToRowIndex(mapping[row_as_col]);
878 
879  const Fractional factor =
880  row_factors_[representative_row] / row_factors_[row];
881  Fractional implied_lb = factor * lp->constraint_lower_bounds()[row];
882  Fractional implied_ub = factor * lp->constraint_upper_bounds()[row];
883  if (factor < 0.0) {
884  std::swap(implied_lb, implied_ub);
885  }
886 
887  // TODO(user): if the bounds are equal, use the largest row in magnitude?
888  if (implied_lb >= lower_bounds[representative_row]) {
889  lower_bounds[representative_row] = implied_lb;
890  lower_bound_sources_[representative_row] = row;
891  }
892  if (implied_ub <= upper_bounds[representative_row]) {
893  upper_bounds[representative_row] = implied_ub;
894  upper_bound_sources_[representative_row] = row;
895  }
896  }
897  }
898 
899  // For maximum precision, and also to simplify the postsolve, we choose
900  // a representative for each class of proportional columns that has at least
901  // one of the two tightest bounds.
902  for (RowIndex row(0); row < num_rows; ++row) {
903  if (!is_a_representative[row]) continue;
904  const RowIndex lower_source = lower_bound_sources_[row];
905  const RowIndex upper_source = upper_bound_sources_[row];
906  lower_bound_sources_[row] = kInvalidRow;
907  upper_bound_sources_[row] = kInvalidRow;
908  DCHECK_NE(lower_source, kInvalidRow);
909  DCHECK_NE(upper_source, kInvalidRow);
910  if (lower_source == upper_source) {
911  // In this case, a simple change of representative is enough.
912  // The constraint bounds of the representative will not change.
913  DCHECK_NE(lower_source, kInvalidRow);
914  row_deletion_helper_.UnmarkRow(lower_source);
915  } else {
916  // Report ProblemStatus::PRIMAL_INFEASIBLE if the new lower bound is not
917  // lower than the new upper bound modulo the default tolerance.
919  upper_bounds[row])) {
921  return false;
922  }
923 
924  // Special case for fixed rows.
925  if (lp->constraint_lower_bounds()[lower_source] ==
926  lp->constraint_upper_bounds()[lower_source]) {
927  row_deletion_helper_.UnmarkRow(lower_source);
928  continue;
929  }
930  if (lp->constraint_lower_bounds()[upper_source] ==
931  lp->constraint_upper_bounds()[upper_source]) {
932  row_deletion_helper_.UnmarkRow(upper_source);
933  continue;
934  }
935 
936  // This is the only case where a more complex postsolve is needed.
937  // To maximize precision, the class representative is changed to either
938  // upper_source or lower_source depending of which row has the largest
939  // proportionality factor.
940  RowIndex new_representative = lower_source;
941  RowIndex other = upper_source;
942  if (std::abs(row_factors_[new_representative]) <
943  std::abs(row_factors_[other])) {
944  std::swap(new_representative, other);
945  }
946 
947  // Initialize the new bounds with the implied ones.
948  const Fractional factor =
949  row_factors_[new_representative] / row_factors_[other];
950  Fractional new_lb = factor * lp->constraint_lower_bounds()[other];
951  Fractional new_ub = factor * lp->constraint_upper_bounds()[other];
952  if (factor < 0.0) {
953  std::swap(new_lb, new_ub);
954  }
955 
956  lower_bound_sources_[new_representative] = new_representative;
957  upper_bound_sources_[new_representative] = new_representative;
958 
959  if (new_lb > lp->constraint_lower_bounds()[new_representative]) {
960  lower_bound_sources_[new_representative] = other;
961  } else {
962  new_lb = lp->constraint_lower_bounds()[new_representative];
963  }
964  if (new_ub < lp->constraint_upper_bounds()[new_representative]) {
965  upper_bound_sources_[new_representative] = other;
966  } else {
967  new_ub = lp->constraint_upper_bounds()[new_representative];
968  }
969  const RowIndex new_lower_source =
970  lower_bound_sources_[new_representative];
971  if (new_lower_source == upper_bound_sources_[new_representative]) {
972  row_deletion_helper_.UnmarkRow(new_lower_source);
973  lower_bound_sources_[new_representative] = kInvalidRow;
974  upper_bound_sources_[new_representative] = kInvalidRow;
975  continue;
976  }
977 
978  // Take care of small numerical imprecision by making sure that lb <= ub.
979  // Note that if the imprecision was greater than the tolerance, the code
980  // at the beginning of this block would have reported
981  // ProblemStatus::PRIMAL_INFEASIBLE.
983  if (new_lb > new_ub) {
984  if (lower_bound_sources_[new_representative] == new_representative) {
985  new_ub = lp->constraint_lower_bounds()[new_representative];
986  } else {
987  new_lb = lp->constraint_upper_bounds()[new_representative];
988  }
989  }
990  row_deletion_helper_.UnmarkRow(new_representative);
991  lp->SetConstraintBounds(new_representative, new_lb, new_ub);
992  }
993  }
994 
995  lp_is_maximization_problem_ = lp->IsMaximizationProblem();
996  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
997  return !row_deletion_helper_.IsEmpty();
998 }
999 
1001  ProblemSolution* solution) const {
1003  RETURN_IF_NULL(solution);
1004  row_deletion_helper_.RestoreDeletedRows(solution);
1005 
1006  // Make sure that all non-zero dual values on the proportional rows are
1007  // assigned to the correct row with the correct sign and that the statuses
1008  // are correct.
1009  const RowIndex num_rows = solution->dual_values.size();
1010  for (RowIndex row(0); row < num_rows; ++row) {
1011  const RowIndex lower_source = lower_bound_sources_[row];
1012  const RowIndex upper_source = upper_bound_sources_[row];
1013  if (lower_source == kInvalidRow && upper_source == kInvalidRow) continue;
1014  DCHECK_NE(lower_source, upper_source);
1015  DCHECK(lower_source == row || upper_source == row);
1016 
1017  // If the representative is ConstraintStatus::BASIC, then all rows in this
1018  // class will be ConstraintStatus::BASIC and there is nothing to do.
1020  if (status == ConstraintStatus::BASIC) continue;
1021 
1022  // If the row is FIXED it will behave as a row
1023  // ConstraintStatus::AT_UPPER_BOUND or
1024  // ConstraintStatus::AT_LOWER_BOUND depending on the corresponding dual
1025  // variable sign.
1027  const Fractional corrected_dual_value = lp_is_maximization_problem_
1028  ? -solution->dual_values[row]
1029  : solution->dual_values[row];
1030  if (corrected_dual_value != 0.0) {
1031  status = corrected_dual_value > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1033  }
1034  }
1035 
1036  // If one of the two conditions below are true, set the row status to
1037  // ConstraintStatus::BASIC.
1038  // Note that the source which is not row can't be FIXED (see presolve).
1039  if (lower_source != row && status == ConstraintStatus::AT_LOWER_BOUND) {
1040  DCHECK_EQ(0.0, solution->dual_values[lower_source]);
1041  const Fractional factor = row_factors_[row] / row_factors_[lower_source];
1042  solution->dual_values[lower_source] = factor * solution->dual_values[row];
1043  solution->dual_values[row] = 0.0;
1045  solution->constraint_statuses[lower_source] =
1046  factor > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1048  }
1049  if (upper_source != row && status == ConstraintStatus::AT_UPPER_BOUND) {
1050  DCHECK_EQ(0.0, solution->dual_values[upper_source]);
1051  const Fractional factor = row_factors_[row] / row_factors_[upper_source];
1052  solution->dual_values[upper_source] = factor * solution->dual_values[row];
1053  solution->dual_values[row] = 0.0;
1055  solution->constraint_statuses[upper_source] =
1056  factor > 0.0 ? ConstraintStatus::AT_UPPER_BOUND
1058  }
1059 
1060  // If the row status is still ConstraintStatus::FIXED_VALUE, we need to
1061  // relax its status.
1063  solution->constraint_statuses[row] =
1064  lower_source != row ? ConstraintStatus::AT_UPPER_BOUND
1066  }
1067  }
1068 }
1069 
1070 // --------------------------------------------------------
1071 // FixedVariablePreprocessor
1072 // --------------------------------------------------------
1073 
1076  RETURN_VALUE_IF_NULL(lp, false);
1077  const ColIndex num_cols = lp->num_variables();
1078  for (ColIndex col(0); col < num_cols; ++col) {
1081  if (lower_bound == upper_bound) {
1082  const Fractional fixed_value = lower_bound;
1083  DCHECK(IsFinite(fixed_value));
1084 
1085  // We need to change the constraint bounds.
1086  SubtractColumnMultipleFromConstraintBound(col, fixed_value, lp);
1087  column_deletion_helper_.MarkColumnForDeletionWithState(
1088  col, fixed_value, VariableStatus::FIXED_VALUE);
1089  }
1090  }
1091 
1092  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
1093  return !column_deletion_helper_.IsEmpty();
1094 }
1095 
1097  ProblemSolution* solution) const {
1099  RETURN_IF_NULL(solution);
1100  column_deletion_helper_.RestoreDeletedColumns(solution);
1101 }
1102 
1103 // --------------------------------------------------------
1104 // ForcingAndImpliedFreeConstraintPreprocessor
1105 // --------------------------------------------------------
1106 
1109  RETURN_VALUE_IF_NULL(lp, false);
1110  const RowIndex num_rows = lp->num_constraints();
1111 
1112  // Compute the implied constraint bounds from the variable bounds.
1113  DenseColumn implied_lower_bounds(num_rows, 0);
1114  DenseColumn implied_upper_bounds(num_rows, 0);
1115  const ColIndex num_cols = lp->num_variables();
1116  StrictITIVector<RowIndex, int> row_degree(num_rows, 0);
1117  for (ColIndex col(0); col < num_cols; ++col) {
1118  const Fractional lower = lp->variable_lower_bounds()[col];
1119  const Fractional upper = lp->variable_upper_bounds()[col];
1120  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1121  const RowIndex row = e.row();
1122  const Fractional coeff = e.coefficient();
1123  if (coeff > 0.0) {
1124  implied_lower_bounds[row] += lower * coeff;
1125  implied_upper_bounds[row] += upper * coeff;
1126  } else {
1127  implied_lower_bounds[row] += upper * coeff;
1128  implied_upper_bounds[row] += lower * coeff;
1129  }
1130  ++row_degree[row];
1131  }
1132  }
1133 
1134  // Note that the ScalingPreprocessor is currently executed last, so here the
1135  // problem has not been scaled yet.
1136  int num_implied_free_constraints = 0;
1137  int num_forcing_constraints = 0;
1138  is_forcing_up_.assign(num_rows, false);
1139  DenseBooleanColumn is_forcing_down(num_rows, false);
1140  for (RowIndex row(0); row < num_rows; ++row) {
1141  if (row_degree[row] == 0) continue;
1142  Fractional lower = lp->constraint_lower_bounds()[row];
1143  Fractional upper = lp->constraint_upper_bounds()[row];
1144 
1145  // Check for infeasibility.
1147  implied_upper_bounds[row]) ||
1148  !IsSmallerWithinFeasibilityTolerance(implied_lower_bounds[row],
1149  upper)) {
1150  VLOG(1) << "implied bound " << implied_lower_bounds[row] << " "
1151  << implied_upper_bounds[row];
1152  VLOG(1) << "constraint bound " << lower << " " << upper;
1154  return false;
1155  }
1156 
1157  // Check if the constraint is forcing. That is, all the variables that
1158  // appear in it must be at one of their bounds.
1159  if (IsSmallerWithinPreprocessorZeroTolerance(implied_upper_bounds[row],
1160  lower)) {
1161  is_forcing_down[row] = true;
1162  ++num_forcing_constraints;
1163  continue;
1164  }
1166  implied_lower_bounds[row])) {
1167  is_forcing_up_[row] = true;
1168  ++num_forcing_constraints;
1169  continue;
1170  }
1171 
1172  // We relax the constraint bounds only if the constraint is implied to be
1173  // free. Such constraints will later be deleted by the
1174  // FreeConstraintPreprocessor.
1175  //
1176  // Note that we could relax only one of the two bounds, but the impact this
1177  // would have on the revised simplex algorithm is unclear at this point.
1179  implied_lower_bounds[row]) &&
1180  IsSmallerWithinPreprocessorZeroTolerance(implied_upper_bounds[row],
1181  upper)) {
1183  ++num_implied_free_constraints;
1184  }
1185  }
1186 
1187  if (num_implied_free_constraints > 0) {
1188  VLOG(1) << num_implied_free_constraints << " implied free constraints.";
1189  }
1190 
1191  if (num_forcing_constraints > 0) {
1192  VLOG(1) << num_forcing_constraints << " forcing constraints.";
1193  lp_is_maximization_problem_ = lp->IsMaximizationProblem();
1194  costs_.resize(num_cols, 0.0);
1195  for (ColIndex col(0); col < num_cols; ++col) {
1196  const SparseColumn& column = lp->GetSparseColumn(col);
1197  const Fractional lower = lp->variable_lower_bounds()[col];
1198  const Fractional upper = lp->variable_upper_bounds()[col];
1199  bool is_forced = false;
1200  Fractional target_bound = 0.0;
1201  for (const SparseColumn::Entry e : column) {
1202  if (is_forcing_down[e.row()]) {
1203  const Fractional candidate = e.coefficient() < 0.0 ? lower : upper;
1204  if (is_forced && candidate != target_bound) {
1205  // The bounds are really close, so we fix to the bound with
1206  // the lowest magnitude. As of 2019/11/19, this is "better" than
1207  // fixing to the mid-point, because at postsolve, we always put
1208  // non-basic variables to their exact bounds (so, with mid-point
1209  // there would be a difference of epsilon/2 between the inner
1210  // solution and the postsolved one, which might cause issues).
1211  if (IsSmallerWithinPreprocessorZeroTolerance(upper, lower)) {
1212  target_bound = std::abs(lower) < std::abs(upper) ? lower : upper;
1213  continue;
1214  }
1215  VLOG(1) << "A variable is forced in both directions! bounds: ["
1216  << std::fixed << std::setprecision(10) << lower << ", "
1217  << upper << "]. coeff:" << e.coefficient();
1219  return false;
1220  }
1221  target_bound = candidate;
1222  is_forced = true;
1223  }
1224  if (is_forcing_up_[e.row()]) {
1225  const Fractional candidate = e.coefficient() < 0.0 ? upper : lower;
1226  if (is_forced && candidate != target_bound) {
1227  // The bounds are really close, so we fix to the bound with
1228  // the lowest magnitude.
1229  if (IsSmallerWithinPreprocessorZeroTolerance(upper, lower)) {
1230  target_bound = std::abs(lower) < std::abs(upper) ? lower : upper;
1231  continue;
1232  }
1233  VLOG(1) << "A variable is forced in both directions! bounds: ["
1234  << std::fixed << std::setprecision(10) << lower << ", "
1235  << upper << "]. coeff:" << e.coefficient();
1237  return false;
1238  }
1239  target_bound = candidate;
1240  is_forced = true;
1241  }
1242  }
1243  if (is_forced) {
1244  // Fix the variable, update the constraint bounds and save this column
1245  // and its cost for the postsolve.
1246  SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
1247  column_deletion_helper_.MarkColumnForDeletionWithState(
1248  col, target_bound,
1249  ComputeVariableStatus(target_bound, lower, upper));
1250  columns_saver_.SaveColumn(col, column);
1251  costs_[col] = lp->objective_coefficients()[col];
1252  }
1253  }
1254  for (RowIndex row(0); row < num_rows; ++row) {
1255  // In theory, an M exists such that for any magnitude >= M, we will be at
1256  // an optimal solution. However, because of numerical errors, if the value
1257  // is too large, it causes problem when verifying the solution. So we
1258  // select the smallest such M (at least a resonably small one) during
1259  // postsolve. It is the reason why we need to store the columns that were
1260  // fixed.
1261  if (is_forcing_down[row] || is_forcing_up_[row]) {
1262  row_deletion_helper_.MarkRowForDeletion(row);
1263  }
1264  }
1265  }
1266 
1267  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
1268  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1269  return !column_deletion_helper_.IsEmpty();
1270 }
1271 
1273  ProblemSolution* solution) const {
1275  RETURN_IF_NULL(solution);
1276  column_deletion_helper_.RestoreDeletedColumns(solution);
1277  row_deletion_helper_.RestoreDeletedRows(solution);
1278 
1279  struct DeletionEntry {
1280  RowIndex row;
1281  ColIndex col;
1283  };
1284  std::vector<DeletionEntry> entries;
1285 
1286  // Compute for each deleted columns the last deleted row in which it appears.
1287  const ColIndex size = column_deletion_helper_.GetMarkedColumns().size();
1288  for (ColIndex col(0); col < size; ++col) {
1289  if (!column_deletion_helper_.IsColumnMarked(col)) continue;
1290 
1291  RowIndex last_row = kInvalidRow;
1292  Fractional last_coefficient;
1293  for (const SparseColumn::Entry e : columns_saver_.SavedColumn(col)) {
1294  const RowIndex row = e.row();
1295  if (row_deletion_helper_.IsRowMarked(row)) {
1296  last_row = row;
1297  last_coefficient = e.coefficient();
1298  }
1299  }
1300  if (last_row != kInvalidRow) {
1301  entries.push_back({last_row, col, last_coefficient});
1302  }
1303  }
1304 
1305  // Sort by row first and then col.
1306  std::sort(entries.begin(), entries.end(),
1307  [](const DeletionEntry& a, const DeletionEntry& b) {
1308  if (a.row == b.row) return a.col < b.col;
1309  return a.row < b.row;
1310  });
1311 
1312  // For each deleted row (in order), compute a bound on the dual values so
1313  // that all the deleted columns for which this row is the last deleted row are
1314  // dual-feasible. Note that for the other columns, it will always be possible
1315  // to make them dual-feasible with a later row.
1316  // There are two possible outcomes:
1317  // - The dual value stays 0.0, and nothing changes.
1318  // - The bounds enforce a non-zero dual value, and one column will have a
1319  // reduced cost of 0.0. This column becomes VariableStatus::BASIC, and the
1320  // constraint status is changed to ConstraintStatus::AT_LOWER_BOUND,
1321  // ConstraintStatus::AT_UPPER_BOUND or ConstraintStatus::FIXED_VALUE.
1322  for (int i = 0; i < entries.size();) {
1323  const RowIndex row = entries[i].row;
1324  DCHECK(row_deletion_helper_.IsRowMarked(row));
1325 
1326  // Process column with this last deleted row.
1327  Fractional new_dual_value = 0.0;
1328  ColIndex new_basic_column = kInvalidCol;
1329  for (; i < entries.size(); ++i) {
1330  if (entries[i].row != row) break;
1331  const ColIndex col = entries[i].col;
1332 
1333  const Fractional scalar_product =
1334  ScalarProduct(solution->dual_values, columns_saver_.SavedColumn(col));
1335  const Fractional reduced_cost = costs_[col] - scalar_product;
1336  const Fractional bound = reduced_cost / entries[i].coefficient;
1337  if (is_forcing_up_[row] == !lp_is_maximization_problem_) {
1338  if (bound < new_dual_value) {
1339  new_dual_value = bound;
1340  new_basic_column = col;
1341  }
1342  } else {
1343  if (bound > new_dual_value) {
1344  new_dual_value = bound;
1345  new_basic_column = col;
1346  }
1347  }
1348  }
1349  if (new_basic_column != kInvalidCol) {
1350  solution->dual_values[row] = new_dual_value;
1351  solution->variable_statuses[new_basic_column] = VariableStatus::BASIC;
1352  solution->constraint_statuses[row] =
1353  is_forcing_up_[row] ? ConstraintStatus::AT_UPPER_BOUND
1355  }
1356  }
1357 }
1358 
1359 // --------------------------------------------------------
1360 // ImpliedFreePreprocessor
1361 // --------------------------------------------------------
1362 
1363 namespace {
1364 struct ColWithDegree {
1365  ColIndex col;
1366  EntryIndex num_entries;
1367  ColWithDegree(ColIndex c, EntryIndex n) : col(c), num_entries(n) {}
1368  bool operator<(const ColWithDegree& other) const {
1369  if (num_entries == other.num_entries) {
1370  return col < other.col;
1371  }
1372  return num_entries < other.num_entries;
1373  }
1374 };
1375 } // namespace
1376 
1379  RETURN_VALUE_IF_NULL(lp, false);
1380  const RowIndex num_rows = lp->num_constraints();
1381  const ColIndex num_cols = lp->num_variables();
1382 
1383  // For each constraint with n entries and each of its variable, we want the
1384  // bounds implied by the (n - 1) other variables and the constraint. We
1385  // use two handy utility classes that allow us to do that efficiently while
1386  // dealing properly with infinite bounds.
1387  const int size = num_rows.value();
1388  // TODO(user) : Replace SumWithNegativeInfiniteAndOneMissing and
1389  // SumWithPositiveInfiniteAndOneMissing with IntervalSumWithOneMissing.
1391  size);
1393  size);
1394 
1395  // Initialize the sums by adding all the bounds of the variables.
1396  for (ColIndex col(0); col < num_cols; ++col) {
1399  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1400  Fractional entry_lb = e.coefficient() * lower_bound;
1401  Fractional entry_ub = e.coefficient() * upper_bound;
1402  if (e.coefficient() < 0.0) std::swap(entry_lb, entry_ub);
1403  lb_sums[e.row()].Add(entry_lb);
1404  ub_sums[e.row()].Add(entry_ub);
1405  }
1406  }
1407 
1408  // The inequality
1409  // constraint_lb <= sum(entries) <= constraint_ub
1410  // can be rewritten as:
1411  // sum(entries) + (-activity) = 0,
1412  // where (-activity) has bounds [-constraint_ub, -constraint_lb].
1413  // We use this latter convention to simplify our code.
1414  for (RowIndex row(0); row < num_rows; ++row) {
1415  lb_sums[row].Add(-lp->constraint_upper_bounds()[row]);
1416  ub_sums[row].Add(-lp->constraint_lower_bounds()[row]);
1417  }
1418 
1419  // Once a variable is freed, none of the rows in which it appears can be
1420  // used to make another variable free.
1421  DenseBooleanColumn used_rows(num_rows, false);
1422  postsolve_status_of_free_variables_.assign(num_cols, VariableStatus::FREE);
1423  variable_offsets_.assign(num_cols, 0.0);
1424 
1425  // It is better to process columns with a small degree first:
1426  // - Degree-two columns make it possible to remove a row from the problem.
1427  // - This way there is more chance to make more free columns.
1428  // - It is better to have low degree free columns since a free column will
1429  // always end up in the simplex basis (except if there is more than the
1430  // number of rows in the problem).
1431  //
1432  // TODO(user): Only process degree-two so in subsequent passes more degree-two
1433  // columns could be made free. And only when no other reduction can be
1434  // applied, process the higher degree column?
1435  //
1436  // TODO(user): Be smarter about the order that maximizes the number of free
1437  // column. For instance if we have 3 doubleton columns that use the rows (1,2)
1438  // (2,3) and (3,4) then it is better not to make (2,3) free so the two other
1439  // two can be made free.
1440  std::vector<ColWithDegree> col_by_degree;
1441  for (ColIndex col(0); col < num_cols; ++col) {
1442  col_by_degree.push_back(
1443  ColWithDegree(col, lp->GetSparseColumn(col).num_entries()));
1444  }
1445  std::sort(col_by_degree.begin(), col_by_degree.end());
1446 
1447  // Now loop over the columns in order and make all implied-free columns free.
1448  int num_already_free_variables = 0;
1449  int num_implied_free_variables = 0;
1450  int num_fixed_variables = 0;
1451  for (ColWithDegree col_with_degree : col_by_degree) {
1452  const ColIndex col = col_with_degree.col;
1453 
1454  // If the variable is already free or fixed, we do nothing.
1458  ++num_already_free_variables;
1459  continue;
1460  }
1461  if (lower_bound == upper_bound) continue;
1462 
1463  // Detect if the variable is implied free.
1464  Fractional overall_implied_lb = -kInfinity;
1465  Fractional overall_implied_ub = kInfinity;
1466  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1467  // If the row contains another implied free variable, then the bounds
1468  // implied by it will just be [-kInfinity, kInfinity] so we can skip it.
1469  if (used_rows[e.row()]) continue;
1470 
1471  // This is the contribution of this column to the sum above.
1472  const Fractional coeff = e.coefficient();
1473  Fractional entry_lb = coeff * lower_bound;
1474  Fractional entry_ub = coeff * upper_bound;
1475  if (coeff < 0.0) std::swap(entry_lb, entry_ub);
1476 
1477  // If X is the variable with index col and Y the sum of all the other
1478  // variables and of (-activity), then coeff * X + Y = 0. Since Y's bounds
1479  // are [lb_sum without X, ub_sum without X], it is easy to derive the
1480  // implied bounds on X.
1481  //
1482  // Important: If entry_lb (resp. entry_ub) are large, we cannot have a
1483  // good precision on the sum without. So we do add a defensive tolerance
1484  // that depends on these magnitude.
1485  const Fractional implied_lb =
1486  coeff > 0.0 ? -ub_sums[e.row()].SumWithoutUb(entry_ub) / coeff
1487  : -lb_sums[e.row()].SumWithoutLb(entry_lb) / coeff;
1488  const Fractional implied_ub =
1489  coeff > 0.0 ? -lb_sums[e.row()].SumWithoutLb(entry_lb) / coeff
1490  : -ub_sums[e.row()].SumWithoutUb(entry_ub) / coeff;
1491 
1492  overall_implied_lb = std::max(overall_implied_lb, implied_lb);
1493  overall_implied_ub = std::min(overall_implied_ub, implied_ub);
1494  }
1495 
1496  // Detect infeasible cases.
1497  if (!IsSmallerWithinFeasibilityTolerance(overall_implied_lb, upper_bound) ||
1498  !IsSmallerWithinFeasibilityTolerance(lower_bound, overall_implied_ub) ||
1499  !IsSmallerWithinFeasibilityTolerance(overall_implied_lb,
1500  overall_implied_ub)) {
1502  return false;
1503  }
1504 
1505  // Detect fixed variable cases (there are two kinds).
1506  // Note that currently we don't do anything here except counting them.
1508  overall_implied_lb) ||
1509  IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1510  lower_bound)) {
1511  // This case is already dealt with by the
1512  // ForcingAndImpliedFreeConstraintPreprocessor since it means that (at
1513  // least) one of the row is forcing.
1514  ++num_fixed_variables;
1515  continue;
1516  } else if (IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1517  overall_implied_lb)) {
1518  // TODO(user): As of July 2013, with our preprocessors this case is never
1519  // triggered on the Netlib. Note however that if it appears it can have a
1520  // big impact since by fixing the variable, the two involved constraints
1521  // are forcing and can be removed too (with all the variables they touch).
1522  // The postsolve step is quite involved though.
1523  ++num_fixed_variables;
1524  continue;
1525  }
1526 
1527  // Is the variable implied free? Note that for an infinite lower_bound or
1528  // upper_bound the respective inequality is always true.
1530  overall_implied_lb) &&
1531  IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1532  upper_bound)) {
1533  ++num_implied_free_variables;
1535  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1536  used_rows[e.row()] = true;
1537  }
1538 
1539  // This is a tricky part. We're freeing this variable, which means that
1540  // after solve, the modified variable will have status either
1541  // VariableStatus::FREE or VariableStatus::BASIC. In the former case
1542  // (VariableStatus::FREE, value = 0.0), we need to "fix" the
1543  // status (technically, our variable isn't free!) to either
1544  // VariableStatus::AT_LOWER_BOUND or VariableStatus::AT_UPPER_BOUND
1545  // (note that we skipped fixed variables), and "fix" the value to that
1546  // bound's value as well. We make the decision and the precomputation
1547  // here: we simply offset the variable by one of its bounds, and store
1548  // which bound that was. Note that if the modified variable turns out to
1549  // be VariableStatus::BASIC, we'll simply un-offset its value too;
1550  // and let the status be VariableStatus::BASIC.
1551  //
1552  // TODO(user): This trick is already used in the DualizerPreprocessor,
1553  // maybe we should just have a preprocessor that shifts all the variables
1554  // bounds to have at least one of them at 0.0, will that improve precision
1555  // and speed of the simplex? One advantage is that we can compute the
1556  // new constraint bounds with better precision using AccurateSum.
1558  const Fractional offset =
1559  MinInMagnitudeOrZeroIfInfinite(lower_bound, upper_bound);
1560  if (offset != 0.0) {
1561  variable_offsets_[col] = offset;
1562  SubtractColumnMultipleFromConstraintBound(col, offset, lp);
1563  }
1564  postsolve_status_of_free_variables_[col] =
1565  ComputeVariableStatus(offset, lower_bound, upper_bound);
1566  }
1567  }
1568  VLOG(1) << num_already_free_variables << " free variables in the problem.";
1569  VLOG(1) << num_implied_free_variables << " implied free columns.";
1570  VLOG(1) << num_fixed_variables << " variables can be fixed.";
1571 
1572  return num_implied_free_variables > 0;
1573 }
1574 
1577  RETURN_IF_NULL(solution);
1578  const ColIndex num_cols = solution->variable_statuses.size();
1579  for (ColIndex col(0); col < num_cols; ++col) {
1580  // Skip variables that the preprocessor didn't change.
1581  if (postsolve_status_of_free_variables_[col] == VariableStatus::FREE) {
1582  DCHECK_EQ(0.0, variable_offsets_[col]);
1583  continue;
1584  }
1585  if (solution->variable_statuses[col] == VariableStatus::FREE) {
1586  solution->variable_statuses[col] =
1587  postsolve_status_of_free_variables_[col];
1588  } else {
1590  }
1591  solution->primal_values[col] += variable_offsets_[col];
1592  }
1593 }
1594 
1595 // --------------------------------------------------------
1596 // DoubletonFreeColumnPreprocessor
1597 // --------------------------------------------------------
1598 
1601  RETURN_VALUE_IF_NULL(lp, false);
1602  // We will modify the matrix transpose and then push the change to the linear
1603  // program by calling lp->UseTransposeMatrixAsReference(). Note
1604  // that original_matrix will not change during this preprocessor run.
1605  const SparseMatrix& original_matrix = lp->GetSparseMatrix();
1606  SparseMatrix* transpose = lp->GetMutableTransposeSparseMatrix();
1607 
1608  const ColIndex num_cols(lp->num_variables());
1609  for (ColIndex doubleton_col(0); doubleton_col < num_cols; ++doubleton_col) {
1610  // Only consider doubleton free columns.
1611  if (original_matrix.column(doubleton_col).num_entries() != 2) continue;
1612  if (lp->variable_lower_bounds()[doubleton_col] != -kInfinity) continue;
1613  if (lp->variable_upper_bounds()[doubleton_col] != kInfinity) continue;
1614 
1615  // Collect the two column items. Note that we skip a column involving a
1616  // deleted row since it is no longer a doubleton then.
1617  RestoreInfo r;
1618  r.col = doubleton_col;
1619  r.objective_coefficient = lp->objective_coefficients()[r.col];
1620  int index = 0;
1621  for (const SparseColumn::Entry e : original_matrix.column(r.col)) {
1622  if (row_deletion_helper_.IsRowMarked(e.row())) break;
1623  r.row[index] = e.row();
1624  r.coeff[index] = e.coefficient();
1625  DCHECK_NE(0.0, e.coefficient());
1626  ++index;
1627  }
1628  if (index != NUM_ROWS) continue;
1629 
1630  // Since the column didn't touch any previously deleted row, we are sure
1631  // that the coefficients were left untouched.
1632  DCHECK_EQ(r.coeff[DELETED], transpose->column(RowToColIndex(r.row[DELETED]))
1633  .LookUpCoefficient(ColToRowIndex(r.col)));
1634  DCHECK_EQ(r.coeff[MODIFIED],
1635  transpose->column(RowToColIndex(r.row[MODIFIED]))
1636  .LookUpCoefficient(ColToRowIndex(r.col)));
1637 
1638  // We prefer deleting the row with the larger coefficient magnitude because
1639  // we will divide by this magnitude. TODO(user): Impact?
1640  if (std::abs(r.coeff[DELETED]) < std::abs(r.coeff[MODIFIED])) {
1641  std::swap(r.coeff[DELETED], r.coeff[MODIFIED]);
1642  std::swap(r.row[DELETED], r.row[MODIFIED]);
1643  }
1644 
1645  // Save the deleted row for postsolve. Note that we remove it from the
1646  // transpose at the same time. This last operation is not strictly needed,
1647  // but it is faster to do it this way (both here and later when we will take
1648  // the transpose of the final transpose matrix).
1649  r.deleted_row_as_column.Swap(
1650  transpose->mutable_column(RowToColIndex(r.row[DELETED])));
1651 
1652  // Move the bound of the deleted constraint to the initially free variable.
1653  {
1654  Fractional new_variable_lb =
1655  lp->constraint_lower_bounds()[r.row[DELETED]];
1656  Fractional new_variable_ub =
1657  lp->constraint_upper_bounds()[r.row[DELETED]];
1658  new_variable_lb /= r.coeff[DELETED];
1659  new_variable_ub /= r.coeff[DELETED];
1660  if (r.coeff[DELETED] < 0.0) std::swap(new_variable_lb, new_variable_ub);
1661  lp->SetVariableBounds(r.col, new_variable_lb, new_variable_ub);
1662  }
1663 
1664  // Add a multiple of the deleted row to the modified row except on
1665  // column r.col where the coefficient will be left unchanged.
1666  r.deleted_row_as_column.AddMultipleToSparseVectorAndIgnoreCommonIndex(
1667  -r.coeff[MODIFIED] / r.coeff[DELETED], ColToRowIndex(r.col),
1669  transpose->mutable_column(RowToColIndex(r.row[MODIFIED])));
1670 
1671  // We also need to correct the objective value of the variables involved in
1672  // the deleted row.
1673  if (r.objective_coefficient != 0.0) {
1674  for (const SparseColumn::Entry e : r.deleted_row_as_column) {
1675  const ColIndex col = RowToColIndex(e.row());
1676  if (col == r.col) continue;
1677  const Fractional new_objective =
1678  lp->objective_coefficients()[col] -
1679  e.coefficient() * r.objective_coefficient / r.coeff[DELETED];
1680 
1681  // This detects if the objective should actually be zero, but because of
1682  // the numerical error in the formula above, we have a really low
1683  // objective instead. The logic is the same as in
1684  // AddMultipleToSparseVectorAndIgnoreCommonIndex().
1685  if (std::abs(new_objective) > parameters_.drop_tolerance()) {
1686  lp->SetObjectiveCoefficient(col, new_objective);
1687  } else {
1688  lp->SetObjectiveCoefficient(col, 0.0);
1689  }
1690  }
1691  }
1692  row_deletion_helper_.MarkRowForDeletion(r.row[DELETED]);
1693  restore_stack_.push_back(r);
1694  }
1695 
1696  if (!row_deletion_helper_.IsEmpty()) {
1697  // The order is important.
1699  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1700  return true;
1701  }
1702  return false;
1703 }
1704 
1706  ProblemSolution* solution) const {
1708  row_deletion_helper_.RestoreDeletedRows(solution);
1709  for (const RestoreInfo& r : Reverse(restore_stack_)) {
1710  // Correct the constraint status.
1711  switch (solution->variable_statuses[r.col]) {
1713  solution->constraint_statuses[r.row[DELETED]] =
1715  break;
1717  solution->constraint_statuses[r.row[DELETED]] =
1718  r.coeff[DELETED] > 0.0 ? ConstraintStatus::AT_UPPER_BOUND
1720  break;
1722  solution->constraint_statuses[r.row[DELETED]] =
1723  r.coeff[DELETED] > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1725  break;
1726  case VariableStatus::FREE:
1727  solution->constraint_statuses[r.row[DELETED]] = ConstraintStatus::FREE;
1728  break;
1729  case VariableStatus::BASIC:
1730  // The default is good here:
1731  DCHECK_EQ(solution->constraint_statuses[r.row[DELETED]],
1733  break;
1734  }
1735 
1736  // Correct the primal variable value.
1737  {
1738  Fractional new_variable_value = solution->primal_values[r.col];
1739  for (const SparseColumn::Entry e : r.deleted_row_as_column) {
1740  const ColIndex col = RowToColIndex(e.row());
1741  if (col == r.col) continue;
1742  new_variable_value -= (e.coefficient() / r.coeff[DELETED]) *
1743  solution->primal_values[RowToColIndex(e.row())];
1744  }
1745  solution->primal_values[r.col] = new_variable_value;
1746  }
1747 
1748  // In all cases, we will make the variable r.col VariableStatus::BASIC, so
1749  // we need to adjust the dual value of the deleted row so that the variable
1750  // reduced cost is zero. Note that there is nothing to do if the variable
1751  // was already basic.
1752  if (solution->variable_statuses[r.col] != VariableStatus::BASIC) {
1753  solution->variable_statuses[r.col] = VariableStatus::BASIC;
1754  Fractional current_reduced_cost =
1755  r.objective_coefficient -
1756  r.coeff[MODIFIED] * solution->dual_values[r.row[MODIFIED]];
1757  // We want current_reduced_cost - dual * coeff = 0, so:
1758  solution->dual_values[r.row[DELETED]] =
1759  current_reduced_cost / r.coeff[DELETED];
1760  } else {
1761  DCHECK_EQ(solution->dual_values[r.row[DELETED]], 0.0);
1762  }
1763  }
1764 }
1765 
1766 // --------------------------------------------------------
1767 // UnconstrainedVariablePreprocessor
1768 // --------------------------------------------------------
1769 
1770 namespace {
1771 
1772 // Does the constraint block the variable to go to infinity in the given
1773 // direction? direction is either positive or negative and row is the index of
1774 // the constraint.
1775 bool IsConstraintBlockingVariable(const LinearProgram& lp, Fractional direction,
1776  RowIndex row) {
1777  return direction > 0.0 ? lp.constraint_upper_bounds()[row] != kInfinity
1779 }
1780 
1781 } // namespace
1782 
1784  ColIndex col, Fractional target_bound, LinearProgram* lp) {
1785  DCHECK_EQ(0.0, lp->objective_coefficients()[col]);
1786  if (rhs_.empty()) {
1787  rhs_.resize(lp->num_constraints(), 0.0);
1788  activity_sign_correction_.resize(lp->num_constraints(), 1.0);
1789  is_unbounded_.resize(lp->num_variables(), false);
1790  }
1791  const bool is_unbounded_up = (target_bound == kInfinity);
1792  const SparseColumn& column = lp->GetSparseColumn(col);
1793  for (const SparseColumn::Entry e : column) {
1794  const RowIndex row = e.row();
1795  if (!row_deletion_helper_.IsRowMarked(row)) {
1796  row_deletion_helper_.MarkRowForDeletion(row);
1797  rows_saver_.SaveColumn(
1798  RowToColIndex(row),
1800  }
1801  const bool is_constraint_upper_bound_relevant =
1802  e.coefficient() > 0.0 ? !is_unbounded_up : is_unbounded_up;
1803  activity_sign_correction_[row] =
1804  is_constraint_upper_bound_relevant ? 1.0 : -1.0;
1805  rhs_[row] = is_constraint_upper_bound_relevant
1806  ? lp->constraint_upper_bounds()[row]
1807  : lp->constraint_lower_bounds()[row];
1808 
1809  // TODO(user): Here, we may render the row free, so subsequent columns
1810  // processed by the columns loop in Run() have more chance to be removed.
1811  // However, we need to be more careful during the postsolve() if we do that.
1812  }
1813  is_unbounded_[col] = true;
1814  Fractional initial_feasible_value = MinInMagnitudeOrZeroIfInfinite(
1816  column_deletion_helper_.MarkColumnForDeletionWithState(
1817  col, initial_feasible_value,
1818  ComputeVariableStatus(initial_feasible_value,
1819  lp->variable_lower_bounds()[col],
1820  lp->variable_upper_bounds()[col]));
1821 }
1822 
1825  RETURN_VALUE_IF_NULL(lp, false);
1826 
1827  // To simplify the problem if something is almost zero, we use the low
1828  // tolerance (1e-9 by default) to be defensive. But to detect an infeasibility
1829  // we want to be sure (especially since the problem is not scaled in the
1830  // presolver) so we use an higher tolerance.
1831  //
1832  // TODO(user): Expose it as a parameter. We could rename both to
1833  // preprocessor_low_tolerance and preprocessor_high_tolerance.
1834  const Fractional low_tolerance = parameters_.preprocessor_zero_tolerance();
1835  const Fractional high_tolerance = 1e-4;
1836 
1837  // We start by the dual variable bounds from the constraints.
1838  const RowIndex num_rows = lp->num_constraints();
1839  dual_lb_.assign(num_rows, -kInfinity);
1840  dual_ub_.assign(num_rows, kInfinity);
1841  for (RowIndex row(0); row < num_rows; ++row) {
1842  if (lp->constraint_lower_bounds()[row] == -kInfinity) {
1843  dual_ub_[row] = 0.0;
1844  }
1845  if (lp->constraint_upper_bounds()[row] == kInfinity) {
1846  dual_lb_[row] = 0.0;
1847  }
1848  }
1849 
1850  const ColIndex num_cols = lp->num_variables();
1851  may_have_participated_lb_.assign(num_cols, false);
1852  may_have_participated_ub_.assign(num_cols, false);
1853 
1854  // We maintain a queue of columns to process.
1855  std::deque<ColIndex> columns_to_process;
1856  DenseBooleanRow in_columns_to_process(num_cols, true);
1857  std::vector<RowIndex> changed_rows;
1858  for (ColIndex col(0); col < num_cols; ++col) {
1859  columns_to_process.push_back(col);
1860  }
1861 
1862  // Arbitrary limit to avoid corner cases with long loops.
1863  // TODO(user): expose this as a parameter? IMO it isn't really needed as we
1864  // shouldn't reach this limit except in corner cases.
1865  const int limit = 5 * num_cols.value();
1866  for (int count = 0; !columns_to_process.empty() && count < limit; ++count) {
1867  const ColIndex col = columns_to_process.front();
1868  columns_to_process.pop_front();
1869  in_columns_to_process[col] = false;
1870  if (column_deletion_helper_.IsColumnMarked(col)) continue;
1871 
1872  const SparseColumn& column = lp->GetSparseColumn(col);
1873  const Fractional col_cost =
1875  const Fractional col_lb = lp->variable_lower_bounds()[col];
1876  const Fractional col_ub = lp->variable_upper_bounds()[col];
1877 
1878  // Compute the bounds on the reduced costs of this column.
1881  rc_lb.Add(col_cost);
1882  rc_ub.Add(col_cost);
1883  for (const SparseColumn::Entry e : column) {
1884  if (row_deletion_helper_.IsRowMarked(e.row())) continue;
1885  const Fractional coeff = e.coefficient();
1886  if (coeff > 0.0) {
1887  rc_lb.Add(-coeff * dual_ub_[e.row()]);
1888  rc_ub.Add(-coeff * dual_lb_[e.row()]);
1889  } else {
1890  rc_lb.Add(-coeff * dual_lb_[e.row()]);
1891  rc_ub.Add(-coeff * dual_ub_[e.row()]);
1892  }
1893  }
1894 
1895  // If the reduced cost domain do not contain zero (modulo the tolerance), we
1896  // can move the variable to its corresponding bound. Note that we need to be
1897  // careful that this variable didn't participate in creating the used
1898  // reduced cost bound in the first place.
1899  bool can_be_removed = false;
1901  bool rc_is_away_from_zero;
1902  if (rc_ub.Sum() <= low_tolerance) {
1903  can_be_removed = true;
1904  target_bound = col_ub;
1905  rc_is_away_from_zero = rc_ub.Sum() <= -high_tolerance;
1906  can_be_removed = !may_have_participated_ub_[col];
1907  }
1908  if (rc_lb.Sum() >= -low_tolerance) {
1909  // The second condition is here for the case we can choose one of the two
1910  // directions.
1911  if (!can_be_removed || !IsFinite(target_bound)) {
1912  can_be_removed = true;
1913  target_bound = col_lb;
1914  rc_is_away_from_zero = rc_lb.Sum() >= high_tolerance;
1915  can_be_removed = !may_have_participated_lb_[col];
1916  }
1917  }
1918 
1919  if (can_be_removed) {
1920  if (IsFinite(target_bound)) {
1921  // Note that in MIP context, this assumes that the bounds of an integer
1922  // variable are integer.
1923  column_deletion_helper_.MarkColumnForDeletionWithState(
1924  col, target_bound,
1925  ComputeVariableStatus(target_bound, col_lb, col_ub));
1926  continue;
1927  }
1928 
1929  // If the target bound is infinite and the reduced cost bound is non-zero,
1930  // then the problem is ProblemStatus::INFEASIBLE_OR_UNBOUNDED.
1931  if (rc_is_away_from_zero) {
1932  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, variable " << col
1933  << " can move to " << target_bound
1934  << " and its reduced cost is in [" << rc_lb.Sum() << ", "
1935  << rc_ub.Sum() << "]";
1937  return false;
1938  } else {
1939  // We can remove this column and all its constraints! We just need to
1940  // choose proper variable values during the call to RecoverSolution()
1941  // that make all the constraints satisfiable. Unfortunately, this is not
1942  // so easy to do in the general case, so we only deal with a simpler
1943  // case when the cost of the variable is zero, and the constraints do
1944  // not block it in one direction.
1945  //
1946  // TODO(user): deal with the more generic case.
1947  if (col_cost != 0.0) continue;
1948  bool skip = false;
1949  for (const SparseColumn::Entry e : column) {
1950  // Note that it is important to check the rows that are already
1951  // deleted here, otherwise the post-solve will not work.
1952  if (IsConstraintBlockingVariable(*lp, e.coefficient(), e.row())) {
1953  skip = true;
1954  break;
1955  }
1956  }
1957  if (skip) continue;
1958 
1959  // TODO(user): this also works if the variable is integer, but we must
1960  // choose an integer value during the post-solve. Implement this.
1961  if (in_mip_context_) continue;
1963  continue;
1964  }
1965  }
1966 
1967  // The rest of the code will update the dual bounds. There is no need to do
1968  // it if the column was removed or if it is not unconstrained in some
1969  // direction.
1970  DCHECK(!can_be_removed);
1971  if (col_lb != -kInfinity && col_ub != kInfinity) continue;
1972 
1973  // For MIP, we only exploit the constraints. TODO(user): It should probably
1974  // work with only small modification, investigate.
1975  if (in_mip_context_) continue;
1976 
1977  changed_rows.clear();
1978  for (const SparseColumn::Entry e : column) {
1979  if (row_deletion_helper_.IsRowMarked(e.row())) continue;
1980  const Fractional c = e.coefficient();
1981  const RowIndex row = e.row();
1982  if (col_ub == kInfinity) {
1983  if (c > 0.0) {
1984  const Fractional candidate =
1985  rc_ub.SumWithoutUb(-c * dual_lb_[row]) / c;
1986  if (candidate < dual_ub_[row]) {
1987  dual_ub_[row] = candidate;
1988  may_have_participated_lb_[col] = true;
1989  changed_rows.push_back(row);
1990  }
1991  } else {
1992  const Fractional candidate =
1993  rc_ub.SumWithoutUb(-c * dual_ub_[row]) / c;
1994  if (candidate > dual_lb_[row]) {
1995  dual_lb_[row] = candidate;
1996  may_have_participated_lb_[col] = true;
1997  changed_rows.push_back(row);
1998  }
1999  }
2000  }
2001  if (col_lb == -kInfinity) {
2002  if (c > 0.0) {
2003  const Fractional candidate =
2004  rc_lb.SumWithoutLb(-c * dual_ub_[row]) / c;
2005  if (candidate > dual_lb_[row]) {
2006  dual_lb_[row] = candidate;
2007  may_have_participated_ub_[col] = true;
2008  changed_rows.push_back(row);
2009  }
2010  } else {
2011  const Fractional candidate =
2012  rc_lb.SumWithoutLb(-c * dual_lb_[row]) / c;
2013  if (candidate < dual_ub_[row]) {
2014  dual_ub_[row] = candidate;
2015  may_have_participated_ub_[col] = true;
2016  changed_rows.push_back(row);
2017  }
2018  }
2019  }
2020  }
2021 
2022  if (!changed_rows.empty()) {
2023  const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
2024  for (const RowIndex row : changed_rows) {
2025  for (const SparseColumn::Entry entry :
2026  transpose.column(RowToColIndex(row))) {
2027  const ColIndex col = RowToColIndex(entry.row());
2028  if (!in_columns_to_process[col]) {
2029  columns_to_process.push_back(col);
2030  in_columns_to_process[col] = true;
2031  }
2032  }
2033  }
2034  }
2035  }
2036 
2037  // Change the rhs to reflect the fixed variables. Note that is important to do
2038  // that after all the calls to RemoveZeroCostUnconstrainedVariable() because
2039  // RemoveZeroCostUnconstrainedVariable() needs to store the rhs before this
2040  // modification!
2041  const ColIndex end = column_deletion_helper_.GetMarkedColumns().size();
2042  for (ColIndex col(0); col < end; ++col) {
2043  if (column_deletion_helper_.IsColumnMarked(col)) {
2044  const Fractional target_bound =
2045  column_deletion_helper_.GetStoredValue()[col];
2046  SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
2047  }
2048  }
2049 
2050  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
2051  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2052  return !column_deletion_helper_.IsEmpty() || !row_deletion_helper_.IsEmpty();
2053 }
2054 
2056  ProblemSolution* solution) const {
2058  RETURN_IF_NULL(solution);
2059  column_deletion_helper_.RestoreDeletedColumns(solution);
2060  row_deletion_helper_.RestoreDeletedRows(solution);
2061 
2062  struct DeletionEntry {
2063  RowIndex row;
2064  ColIndex col;
2066  };
2067  std::vector<DeletionEntry> entries;
2068 
2069  // Compute the last deleted column index for each deleted rows.
2070  const RowIndex num_rows = solution->dual_values.size();
2071  RowToColMapping last_deleted_column(num_rows, kInvalidCol);
2072  for (RowIndex row(0); row < num_rows; ++row) {
2073  if (!row_deletion_helper_.IsRowMarked(row)) continue;
2074 
2075  ColIndex last_col = kInvalidCol;
2076  Fractional last_coefficient;
2077  for (const SparseColumn::Entry e :
2078  rows_saver_.SavedColumn(RowToColIndex(row))) {
2079  const ColIndex col = RowToColIndex(e.row());
2080  if (is_unbounded_[col]) {
2081  last_col = col;
2082  last_coefficient = e.coefficient();
2083  }
2084  }
2085  if (last_col != kInvalidCol) {
2086  entries.push_back({row, last_col, last_coefficient});
2087  }
2088  }
2089 
2090  // Sort by col first and then row.
2091  std::sort(entries.begin(), entries.end(),
2092  [](const DeletionEntry& a, const DeletionEntry& b) {
2093  if (a.col == b.col) return a.row < b.row;
2094  return a.col < b.col;
2095  });
2096 
2097  // Note that this will be empty if there were no deleted rows.
2098  for (int i = 0; i < entries.size();) {
2099  const ColIndex col = entries[i].col;
2100  CHECK(is_unbounded_[col]);
2101 
2102  Fractional primal_value_shift = 0.0;
2103  RowIndex row_at_bound = kInvalidRow;
2104  for (; i < entries.size(); ++i) {
2105  if (entries[i].col != col) break;
2106  const RowIndex row = entries[i].row;
2107 
2108  // This is for VariableStatus::FREE rows.
2109  //
2110  // TODO(user): In presence of free row, we must move them to 0.
2111  // Note that currently VariableStatus::FREE rows should be removed before
2112  // this is called.
2113  DCHECK(IsFinite(rhs_[row]));
2114  if (!IsFinite(rhs_[row])) continue;
2115 
2116  const SparseColumn& row_as_column =
2117  rows_saver_.SavedColumn(RowToColIndex(row));
2118  const Fractional activity =
2119  rhs_[row] - ScalarProduct(solution->primal_values, row_as_column);
2120 
2121  // activity and sign correction must have the same sign or be zero. If
2122  // not, we find the first unbounded variable and change it accordingly.
2123  // Note that by construction, the variable value will move towards its
2124  // unbounded direction.
2125  if (activity * activity_sign_correction_[row] < 0.0) {
2126  const Fractional bound = activity / entries[i].coefficient;
2127  if (std::abs(bound) > std::abs(primal_value_shift)) {
2128  primal_value_shift = bound;
2129  row_at_bound = row;
2130  }
2131  }
2132  }
2133  solution->primal_values[col] += primal_value_shift;
2134  if (row_at_bound != kInvalidRow) {
2136  solution->constraint_statuses[row_at_bound] =
2137  activity_sign_correction_[row_at_bound] == 1.0
2140  }
2141  }
2142 }
2143 
2144 // --------------------------------------------------------
2145 // FreeConstraintPreprocessor
2146 // --------------------------------------------------------
2147 
2150  RETURN_VALUE_IF_NULL(lp, false);
2151  const RowIndex num_rows = lp->num_constraints();
2152  for (RowIndex row(0); row < num_rows; ++row) {
2155  if (lower_bound == -kInfinity && upper_bound == kInfinity) {
2156  row_deletion_helper_.MarkRowForDeletion(row);
2157  }
2158  }
2159  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2160  return !row_deletion_helper_.IsEmpty();
2161 }
2162 
2164  ProblemSolution* solution) const {
2166  RETURN_IF_NULL(solution);
2167  row_deletion_helper_.RestoreDeletedRows(solution);
2168 }
2169 
2170 // --------------------------------------------------------
2171 // EmptyConstraintPreprocessor
2172 // --------------------------------------------------------
2173 
2176  RETURN_VALUE_IF_NULL(lp, false);
2177  const RowIndex num_rows(lp->num_constraints());
2178  const ColIndex num_cols(lp->num_variables());
2179 
2180  // Compute degree.
2181  StrictITIVector<RowIndex, int> degree(num_rows, 0);
2182  for (ColIndex col(0); col < num_cols; ++col) {
2183  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
2184  ++degree[e.row()];
2185  }
2186  }
2187 
2188  // Delete degree 0 rows.
2189  for (RowIndex row(0); row < num_rows; ++row) {
2190  if (degree[row] == 0) {
2191  // We need to check that 0.0 is allowed by the constraint bounds,
2192  // otherwise, the problem is ProblemStatus::PRIMAL_INFEASIBLE.
2194  lp->constraint_lower_bounds()[row], 0) ||
2196  0, lp->constraint_upper_bounds()[row])) {
2197  VLOG(1) << "Problem PRIMAL_INFEASIBLE, constraint " << row
2198  << " is empty and its range ["
2199  << lp->constraint_lower_bounds()[row] << ","
2200  << lp->constraint_upper_bounds()[row] << "] doesn't contain 0.";
2202  return false;
2203  }
2204  row_deletion_helper_.MarkRowForDeletion(row);
2205  }
2206  }
2207  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2208  return !row_deletion_helper_.IsEmpty();
2209 }
2210 
2212  ProblemSolution* solution) const {
2214  RETURN_IF_NULL(solution);
2215  row_deletion_helper_.RestoreDeletedRows(solution);
2216 }
2217 
2218 // --------------------------------------------------------
2219 // SingletonPreprocessor
2220 // --------------------------------------------------------
2221 
2223  MatrixEntry e, ConstraintStatus status)
2224  : type_(type),
2225  is_maximization_(lp.IsMaximizationProblem()),
2226  e_(e),
2227  cost_(lp.objective_coefficients()[e.col]),
2228  variable_lower_bound_(lp.variable_lower_bounds()[e.col]),
2229  variable_upper_bound_(lp.variable_upper_bounds()[e.col]),
2230  constraint_lower_bound_(lp.constraint_lower_bounds()[e.row]),
2231  constraint_upper_bound_(lp.constraint_upper_bounds()[e.row]),
2232  constraint_status_(status) {}
2233 
2235  const SparseColumn& saved_column,
2236  const SparseColumn& saved_row,
2237  ProblemSolution* solution) const {
2238  switch (type_) {
2239  case SINGLETON_ROW:
2240  SingletonRowUndo(saved_column, solution);
2241  break;
2243  ZeroCostSingletonColumnUndo(parameters, saved_row, solution);
2244  break;
2246  SingletonColumnInEqualityUndo(parameters, saved_row, solution);
2247  break;
2249  MakeConstraintAnEqualityUndo(solution);
2250  break;
2251  }
2252 }
2253 
2254 void SingletonPreprocessor::DeleteSingletonRow(MatrixEntry e,
2255  LinearProgram* lp) {
2256  Fractional implied_lower_bound =
2257  lp->constraint_lower_bounds()[e.row] / e.coeff;
2258  Fractional implied_upper_bound =
2259  lp->constraint_upper_bounds()[e.row] / e.coeff;
2260  if (e.coeff < 0.0) {
2261  std::swap(implied_lower_bound, implied_upper_bound);
2262  }
2263 
2264  const Fractional old_lower_bound = lp->variable_lower_bounds()[e.col];
2265  const Fractional old_upper_bound = lp->variable_upper_bounds()[e.col];
2266 
2267  const Fractional potential_error =
2268  std::abs(parameters_.preprocessor_zero_tolerance() / e.coeff);
2269  Fractional new_lower_bound =
2270  implied_lower_bound - potential_error > old_lower_bound
2271  ? implied_lower_bound
2272  : old_lower_bound;
2273  Fractional new_upper_bound =
2274  implied_upper_bound + potential_error < old_upper_bound
2275  ? implied_upper_bound
2276  : old_upper_bound;
2277 
2278  if (new_upper_bound < new_lower_bound) {
2279  if (!IsSmallerWithinFeasibilityTolerance(new_lower_bound,
2280  new_upper_bound)) {
2281  VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2282  "row causes the bound of the variable "
2283  << e.col << " to be infeasible by "
2284  << new_lower_bound - new_upper_bound;
2286  return;
2287  }
2288  // Otherwise, fix the variable to one of its bounds.
2289  if (new_lower_bound == lp->variable_lower_bounds()[e.col]) {
2290  new_upper_bound = new_lower_bound;
2291  }
2292  if (new_upper_bound == lp->variable_upper_bounds()[e.col]) {
2293  new_lower_bound = new_upper_bound;
2294  }
2295  DCHECK_EQ(new_lower_bound, new_upper_bound);
2296  }
2297  row_deletion_helper_.MarkRowForDeletion(e.row);
2298  undo_stack_.push_back(SingletonUndo(SingletonUndo::SINGLETON_ROW, *lp, e,
2300  columns_saver_.SaveColumnIfNotAlreadyDone(e.col, lp->GetSparseColumn(e.col));
2301 
2302  lp->SetVariableBounds(e.col, new_lower_bound, new_upper_bound);
2303 }
2304 
2305 // The dual value of the row needs to be corrected to stay at the optimal.
2306 void SingletonUndo::SingletonRowUndo(const SparseColumn& saved_column,
2307  ProblemSolution* solution) const {
2308  DCHECK_EQ(0, solution->dual_values[e_.row]);
2309 
2310  // If the variable is basic or free, we can just keep the constraint
2311  // VariableStatus::BASIC and
2312  // 0.0 as the dual value.
2313  const VariableStatus status = solution->variable_statuses[e_.col];
2314  if (status == VariableStatus::BASIC || status == VariableStatus::FREE) return;
2315 
2316  // Compute whether or not the variable bounds changed.
2317  Fractional implied_lower_bound = constraint_lower_bound_ / e_.coeff;
2318  Fractional implied_upper_bound = constraint_upper_bound_ / e_.coeff;
2319  if (e_.coeff < 0.0) {
2320  std::swap(implied_lower_bound, implied_upper_bound);
2321  }
2322  const bool lower_bound_changed = implied_lower_bound > variable_lower_bound_;
2323  const bool upper_bound_changed = implied_upper_bound < variable_upper_bound_;
2324 
2325  if (!lower_bound_changed && !upper_bound_changed) return;
2326  if (status == VariableStatus::AT_LOWER_BOUND && !lower_bound_changed) return;
2327  if (status == VariableStatus::AT_UPPER_BOUND && !upper_bound_changed) return;
2328 
2329  // This is the reduced cost of the variable before the singleton constraint is
2330  // added back.
2331  const Fractional reduced_cost =
2332  cost_ - ScalarProduct(solution->dual_values, saved_column);
2333  const Fractional reduced_cost_for_minimization =
2334  is_maximization_ ? -reduced_cost : reduced_cost;
2335 
2336  if (status == VariableStatus::FIXED_VALUE) {
2337  DCHECK(lower_bound_changed || upper_bound_changed);
2338  if (reduced_cost_for_minimization >= 0.0 && !lower_bound_changed) {
2339  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2340  return;
2341  }
2342  if (reduced_cost_for_minimization <= 0.0 && !upper_bound_changed) {
2343  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2344  return;
2345  }
2346  }
2347 
2348  // If one of the variable bounds changes, and the variable is no longer at one
2349  // of its bounds, then its reduced cost needs to be set to 0.0 and the
2350  // variable becomes a basic variable. This is what the line below do, since
2351  // the new reduced cost of the variable will be equal to:
2352  // old_reduced_cost - coeff * solution->dual_values[row]
2353  solution->dual_values[e_.row] = reduced_cost / e_.coeff;
2354  ConstraintStatus new_constraint_status = VariableToConstraintStatus(status);
2355  if (status == VariableStatus::FIXED_VALUE &&
2356  (!lower_bound_changed || !upper_bound_changed)) {
2357  new_constraint_status = lower_bound_changed
2360  }
2361  if (e_.coeff < 0.0) {
2362  if (new_constraint_status == ConstraintStatus::AT_LOWER_BOUND) {
2363  new_constraint_status = ConstraintStatus::AT_UPPER_BOUND;
2364  } else if (new_constraint_status == ConstraintStatus::AT_UPPER_BOUND) {
2365  new_constraint_status = ConstraintStatus::AT_LOWER_BOUND;
2366  }
2367  }
2368  solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2369  solution->constraint_statuses[e_.row] = new_constraint_status;
2370 }
2371 
2372 void SingletonPreprocessor::UpdateConstraintBoundsWithVariableBounds(
2373  MatrixEntry e, LinearProgram* lp) {
2374  Fractional lower_delta = -e.coeff * lp->variable_upper_bounds()[e.col];
2375  Fractional upper_delta = -e.coeff * lp->variable_lower_bounds()[e.col];
2376  if (e.coeff < 0.0) {
2377  std::swap(lower_delta, upper_delta);
2378  }
2379  lp->SetConstraintBounds(e.row,
2380  lp->constraint_lower_bounds()[e.row] + lower_delta,
2381  lp->constraint_upper_bounds()[e.row] + upper_delta);
2382 }
2383 
2384 bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable(
2385  const MatrixEntry& matrix_entry, const LinearProgram& lp) const {
2387  DCHECK(lp.IsVariableInteger(matrix_entry.col));
2388  const SparseMatrix& transpose = lp.GetTransposeSparseMatrix();
2389  for (const SparseColumn::Entry entry :
2390  transpose.column(RowToColIndex(matrix_entry.row))) {
2391  // Check if the variable is integer.
2392  if (!lp.IsVariableInteger(RowToColIndex(entry.row()))) {
2393  return false;
2394  }
2395 
2396  const Fractional coefficient = entry.coefficient();
2397  const Fractional coefficient_ratio = coefficient / matrix_entry.coeff;
2398  // Check if coefficient_ratio is integer.
2400  coefficient_ratio, parameters_.solution_feasibility_tolerance())) {
2401  return false;
2402  }
2403  }
2404  const Fractional constraint_lb =
2405  lp.constraint_lower_bounds()[matrix_entry.row];
2406  if (IsFinite(constraint_lb)) {
2407  const Fractional lower_bound_ratio = constraint_lb / matrix_entry.coeff;
2409  lower_bound_ratio, parameters_.solution_feasibility_tolerance())) {
2410  return false;
2411  }
2412  }
2413  const Fractional constraint_ub =
2414  lp.constraint_upper_bounds()[matrix_entry.row];
2415  if (IsFinite(constraint_ub)) {
2416  const Fractional upper_bound_ratio = constraint_ub / matrix_entry.coeff;
2418  upper_bound_ratio, parameters_.solution_feasibility_tolerance())) {
2419  return false;
2420  }
2421  }
2422  return true;
2423 }
2424 
2425 void SingletonPreprocessor::DeleteZeroCostSingletonColumn(
2426  const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2427  const ColIndex transpose_col = RowToColIndex(e.row);
2428  undo_stack_.push_back(SingletonUndo(SingletonUndo::ZERO_COST_SINGLETON_COLUMN,
2429  *lp, e, ConstraintStatus::FREE));
2430  const SparseColumn& row_as_col = transpose.column(transpose_col);
2431  rows_saver_.SaveColumnIfNotAlreadyDone(RowToColIndex(e.row), row_as_col);
2432  UpdateConstraintBoundsWithVariableBounds(e, lp);
2433  column_deletion_helper_.MarkColumnForDeletion(e.col);
2434 }
2435 
2436 // We need to restore the variable value in order to satisfy the constraint.
2437 void SingletonUndo::ZeroCostSingletonColumnUndo(
2438  const GlopParameters& parameters, const SparseColumn& saved_row,
2439  ProblemSolution* solution) const {
2440  // If the variable was fixed, this is easy. Note that this is the only
2441  // possible case if the current constraint status is FIXED.
2442  if (variable_upper_bound_ == variable_lower_bound_) {
2443  solution->primal_values[e_.col] = variable_lower_bound_;
2444  solution->variable_statuses[e_.col] = VariableStatus::FIXED_VALUE;
2445  return;
2446  }
2447 
2448  const ConstraintStatus ct_status = solution->constraint_statuses[e_.row];
2450  if (ct_status == ConstraintStatus::AT_LOWER_BOUND ||
2451  ct_status == ConstraintStatus::AT_UPPER_BOUND) {
2452  if ((ct_status == ConstraintStatus::AT_UPPER_BOUND && e_.coeff > 0.0) ||
2453  (ct_status == ConstraintStatus::AT_LOWER_BOUND && e_.coeff < 0.0)) {
2454  DCHECK(IsFinite(variable_lower_bound_));
2455  solution->primal_values[e_.col] = variable_lower_bound_;
2456  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2457  } else {
2458  DCHECK(IsFinite(variable_upper_bound_));
2459  solution->primal_values[e_.col] = variable_upper_bound_;
2460  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2461  }
2462  if (constraint_upper_bound_ == constraint_lower_bound_) {
2463  solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2464  }
2465  return;
2466  }
2467 
2468  // This is the activity of the constraint before the singleton variable is
2469  // added back to it.
2470  const Fractional activity = ScalarProduct(solution->primal_values, saved_row);
2471 
2472  // First we try to fix the variable at its lower or upper bound and leave the
2473  // constraint VariableStatus::BASIC. Note that we use the same logic as in
2474  // Preprocessor::IsSmallerWithinPreprocessorZeroTolerance() which we can't use
2475  // here because we are not deriving from the Preprocessor class.
2476  const Fractional tolerance = parameters.preprocessor_zero_tolerance();
2477  const auto is_smaller_with_tolerance = [tolerance](Fractional a,
2478  Fractional b) {
2480  };
2481  if (variable_lower_bound_ != -kInfinity) {
2482  const Fractional activity_at_lb =
2483  activity + e_.coeff * variable_lower_bound_;
2484  if (is_smaller_with_tolerance(constraint_lower_bound_, activity_at_lb) &&
2485  is_smaller_with_tolerance(activity_at_lb, constraint_upper_bound_)) {
2486  solution->primal_values[e_.col] = variable_lower_bound_;
2487  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2488  return;
2489  }
2490  }
2491  if (variable_upper_bound_ != kInfinity) {
2492  const Fractional activity_at_ub =
2493  activity + e_.coeff * variable_upper_bound_;
2494  if (is_smaller_with_tolerance(constraint_lower_bound_, activity_at_ub) &&
2495  is_smaller_with_tolerance(activity_at_ub, constraint_upper_bound_)) {
2496  solution->primal_values[e_.col] = variable_upper_bound_;
2497  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2498  return;
2499  }
2500  }
2501 
2502  // If the current constraint is UNBOUNDED, then the variable is too
2503  // because of the two cases above. We just set its status to
2504  // VariableStatus::FREE.
2505  if (constraint_lower_bound_ == -kInfinity &&
2506  constraint_upper_bound_ == kInfinity) {
2507  solution->primal_values[e_.col] = 0.0;
2508  solution->variable_statuses[e_.col] = VariableStatus::FREE;
2509  return;
2510  }
2511 
2512  // If the previous cases didn't apply, the constraint will be fixed to its
2513  // bounds and the variable will be made VariableStatus::BASIC.
2514  solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2515  if (constraint_lower_bound_ == constraint_upper_bound_) {
2516  solution->primal_values[e_.col] =
2517  (constraint_lower_bound_ - activity) / e_.coeff;
2518  solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2519  return;
2520  }
2521 
2522  bool set_constraint_to_lower_bound;
2523  if (constraint_lower_bound_ == -kInfinity) {
2524  set_constraint_to_lower_bound = false;
2525  } else if (constraint_upper_bound_ == kInfinity) {
2526  set_constraint_to_lower_bound = true;
2527  } else {
2528  // In this case we select the value that is the most inside the variable
2529  // bound.
2530  const Fractional to_lb = (constraint_lower_bound_ - activity) / e_.coeff;
2531  const Fractional to_ub = (constraint_upper_bound_ - activity) / e_.coeff;
2532  set_constraint_to_lower_bound =
2533  std::max(variable_lower_bound_ - to_lb, to_lb - variable_upper_bound_) <
2534  std::max(variable_lower_bound_ - to_ub, to_ub - variable_upper_bound_);
2535  }
2536 
2537  if (set_constraint_to_lower_bound) {
2538  solution->primal_values[e_.col] =
2539  (constraint_lower_bound_ - activity) / e_.coeff;
2540  solution->constraint_statuses[e_.row] = ConstraintStatus::AT_LOWER_BOUND;
2541  } else {
2542  solution->primal_values[e_.col] =
2543  (constraint_upper_bound_ - activity) / e_.coeff;
2544  solution->constraint_statuses[e_.row] = ConstraintStatus::AT_UPPER_BOUND;
2545  }
2546 }
2547 
2548 void SingletonPreprocessor::DeleteSingletonColumnInEquality(
2549  const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2550  // Save information for the undo.
2551  const ColIndex transpose_col = RowToColIndex(e.row);
2552  const SparseColumn& row_as_column = transpose.column(transpose_col);
2553  undo_stack_.push_back(
2554  SingletonUndo(SingletonUndo::SINGLETON_COLUMN_IN_EQUALITY, *lp, e,
2556  rows_saver_.SaveColumnIfNotAlreadyDone(RowToColIndex(e.row), row_as_column);
2557 
2558  // Update the objective function using the equality constraint. We have
2559  // v_col*coeff + expression = rhs,
2560  // so the contribution of this variable to the cost function (v_col * cost)
2561  // can be rewritten as:
2562  // (rhs * cost - expression * cost) / coeff.
2563  const Fractional rhs = lp->constraint_upper_bounds()[e.row];
2564  const Fractional cost = lp->objective_coefficients()[e.col];
2565  const Fractional multiplier = cost / e.coeff;
2566  lp->SetObjectiveOffset(lp->objective_offset() + rhs * multiplier);
2567  for (const SparseColumn::Entry e : row_as_column) {
2568  const ColIndex col = RowToColIndex(e.row());
2569  if (!column_deletion_helper_.IsColumnMarked(col)) {
2570  Fractional new_cost =
2571  lp->objective_coefficients()[col] - e.coefficient() * multiplier;
2572 
2573  // TODO(user): It is important to avoid having non-zero costs which are
2574  // the result of numerical error. This is because we still miss some
2575  // tolerances in a few preprocessors. Like an empty column with a cost of
2576  // 1e-17 and unbounded towards infinity is currently implying that the
2577  // problem is unbounded. This will need fixing.
2578  if (std::abs(new_cost) < parameters_.preprocessor_zero_tolerance()) {
2579  new_cost = 0.0;
2580  }
2581  lp->SetObjectiveCoefficient(col, new_cost);
2582  }
2583  }
2584 
2585  // Now delete the column like a singleton column without cost.
2586  UpdateConstraintBoundsWithVariableBounds(e, lp);
2587  column_deletion_helper_.MarkColumnForDeletion(e.col);
2588 }
2589 
2590 void SingletonUndo::SingletonColumnInEqualityUndo(
2591  const GlopParameters& parameters, const SparseColumn& saved_row,
2592  ProblemSolution* solution) const {
2593  // First do the same as a zero-cost singleton column.
2594  ZeroCostSingletonColumnUndo(parameters, saved_row, solution);
2595 
2596  // Then, restore the dual optimal value taking into account the cost
2597  // modification.
2598  solution->dual_values[e_.row] += cost_ / e_.coeff;
2599  if (solution->constraint_statuses[e_.row] == ConstraintStatus::BASIC) {
2600  solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2601  solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2602  }
2603 }
2604 
2605 void SingletonUndo::MakeConstraintAnEqualityUndo(
2606  ProblemSolution* solution) const {
2607  if (solution->constraint_statuses[e_.row] == ConstraintStatus::FIXED_VALUE) {
2608  solution->constraint_statuses[e_.row] = constraint_status_;
2609  }
2610 }
2611 
2612 bool SingletonPreprocessor::MakeConstraintAnEqualityIfPossible(
2613  const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2614  // TODO(user): We could skip early if the relevant constraint bound is
2615  // infinity.
2616  const Fractional cst_lower_bound = lp->constraint_lower_bounds()[e.row];
2617  const Fractional cst_upper_bound = lp->constraint_upper_bounds()[e.row];
2618  if (cst_lower_bound == cst_upper_bound) return true;
2619 
2620  // To be efficient, we only process a row once and cache the domain that an
2621  // "artificial" extra variable x with coefficient 1.0 could take while still
2622  // making the constraint feasible. The domain bounds for the constraint e.row
2623  // will be stored in row_lb_sum_[e.row] and row_ub_sum_[e.row].
2624  const DenseRow& variable_ubs = lp->variable_upper_bounds();
2625  const DenseRow& variable_lbs = lp->variable_lower_bounds();
2626  if (e.row >= row_sum_is_cached_.size() || !row_sum_is_cached_[e.row]) {
2627  if (e.row >= row_sum_is_cached_.size()) {
2628  const int new_size = e.row.value() + 1;
2629  row_sum_is_cached_.resize(new_size);
2630  row_lb_sum_.resize(new_size);
2631  row_ub_sum_.resize(new_size);
2632  }
2633  row_sum_is_cached_[e.row] = true;
2634  row_lb_sum_[e.row].Add(cst_lower_bound);
2635  row_ub_sum_[e.row].Add(cst_upper_bound);
2636  for (const SparseColumn::Entry entry :
2637  transpose.column(RowToColIndex(e.row))) {
2638  const ColIndex row_as_col = RowToColIndex(entry.row());
2639 
2640  // Tricky: Even if later more columns are deleted, these "cached" sums
2641  // will actually still be valid because we only delete columns in a
2642  // compatible way.
2643  //
2644  // TODO(user): Find a more robust way? it seems easy to add new deletion
2645  // rules that may break this assumption.
2646  if (column_deletion_helper_.IsColumnMarked(row_as_col)) continue;
2647  if (entry.coefficient() > 0.0) {
2648  row_lb_sum_[e.row].Add(-entry.coefficient() * variable_ubs[row_as_col]);
2649  row_ub_sum_[e.row].Add(-entry.coefficient() * variable_lbs[row_as_col]);
2650  } else {
2651  row_lb_sum_[e.row].Add(-entry.coefficient() * variable_lbs[row_as_col]);
2652  row_ub_sum_[e.row].Add(-entry.coefficient() * variable_ubs[row_as_col]);
2653  }
2654 
2655  // TODO(user): Abort early if both sums contain more than 1 infinity?
2656  }
2657  }
2658 
2659  // Now that the lb/ub sum for the row is cached, we can use it to compute the
2660  // implied bounds on the variable from this constraint and the other
2661  // variables.
2662  const Fractional c = e.coeff;
2663  const Fractional lb =
2664  c > 0.0 ? row_lb_sum_[e.row].SumWithoutLb(-c * variable_ubs[e.col]) / c
2665  : row_ub_sum_[e.row].SumWithoutUb(-c * variable_ubs[e.col]) / c;
2666  const Fractional ub =
2667  c > 0.0 ? row_ub_sum_[e.row].SumWithoutUb(-c * variable_lbs[e.col]) / c
2668  : row_lb_sum_[e.row].SumWithoutLb(-c * variable_lbs[e.col]) / c;
2669 
2670  // Note that we could do the same for singleton variables with a cost of
2671  // 0.0, but such variable are already dealt with by
2672  // DeleteZeroCostSingletonColumn() so there is no point.
2673  const Fractional cost =
2674  lp->GetObjectiveCoefficientForMinimizationVersion(e.col);
2675  DCHECK_NE(cost, 0.0);
2676 
2677  // Note that some of the tests below will be always true if the bounds of
2678  // the column of index col are infinite. This is the desired behavior.
2681  ub, lp->variable_upper_bounds()[e.col])) {
2682  if (e.coeff > 0) {
2683  if (cst_upper_bound == kInfinity) {
2685  } else {
2686  relaxed_status = ConstraintStatus::AT_UPPER_BOUND;
2687  lp->SetConstraintBounds(e.row, cst_upper_bound, cst_upper_bound);
2688  }
2689  } else {
2690  if (cst_lower_bound == -kInfinity) {
2692  } else {
2693  relaxed_status = ConstraintStatus::AT_LOWER_BOUND;
2694  lp->SetConstraintBounds(e.row, cst_lower_bound, cst_lower_bound);
2695  }
2696  }
2697 
2699  DCHECK_EQ(ub, kInfinity);
2700  VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2701  "variable "
2702  << e.col << " has a cost (for minimization) of " << cost
2703  << " and is unbounded towards kInfinity.";
2704  return false;
2705  }
2706 
2707  // This is important but tricky: The upper bound of the variable needs to
2708  // be relaxed. This is valid because the implied bound is lower than the
2709  // original upper bound here. This is needed, so that the optimal
2710  // primal/dual values of the new problem will also be optimal of the
2711  // original one.
2712  //
2713  // Let's prove the case coeff > 0.0 for a minimization problem. In the new
2714  // problem, because the variable is unbounded towards +infinity, its
2715  // reduced cost must satisfy at optimality rc = cost - coeff * dual_v >=
2716  // 0. But this implies dual_v <= cost / coeff <= 0. This is exactly what
2717  // is needed for the optimality of the initial problem since the
2718  // constraint will be at its upper bound, and the corresponding slack
2719  // condition is that the dual value needs to be <= 0.
2720  lp->SetVariableBounds(e.col, lp->variable_lower_bounds()[e.col], kInfinity);
2721  }
2723  lp->variable_lower_bounds()[e.col], lb)) {
2724  if (e.coeff > 0) {
2725  if (cst_lower_bound == -kInfinity) {
2727  } else {
2728  relaxed_status = ConstraintStatus::AT_LOWER_BOUND;
2729  lp->SetConstraintBounds(e.row, cst_lower_bound, cst_lower_bound);
2730  }
2731  } else {
2732  if (cst_upper_bound == kInfinity) {
2734  } else {
2735  relaxed_status = ConstraintStatus::AT_UPPER_BOUND;
2736  lp->SetConstraintBounds(e.row, cst_upper_bound, cst_upper_bound);
2737  }
2738  }
2739 
2741  DCHECK_EQ(lb, -kInfinity);
2742  VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2743  "variable "
2744  << e.col << " has a cost (for minimization) of " << cost
2745  << " and is unbounded towards -kInfinity.";
2746  return false;
2747  }
2748 
2749  // Same remark as above for a lower bounded variable this time.
2750  lp->SetVariableBounds(e.col, -kInfinity,
2751  lp->variable_upper_bounds()[e.col]);
2752  }
2753 
2754  if (lp->constraint_lower_bounds()[e.row] ==
2755  lp->constraint_upper_bounds()[e.row]) {
2756  undo_stack_.push_back(SingletonUndo(
2757  SingletonUndo::MAKE_CONSTRAINT_AN_EQUALITY, *lp, e, relaxed_status));
2758  return true;
2759  }
2760  return false;
2761 }
2762 
2765  RETURN_VALUE_IF_NULL(lp, false);
2766  const SparseMatrix& matrix = lp->GetSparseMatrix();
2767  const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
2768 
2769  // Initialize column_to_process with the current singleton columns.
2770  ColIndex num_cols(matrix.num_cols());
2771  RowIndex num_rows(matrix.num_rows());
2772  StrictITIVector<ColIndex, EntryIndex> column_degree(num_cols, EntryIndex(0));
2773  std::vector<ColIndex> column_to_process;
2774  for (ColIndex col(0); col < num_cols; ++col) {
2775  column_degree[col] = matrix.column(col).num_entries();
2776  if (column_degree[col] == 1) {
2777  column_to_process.push_back(col);
2778  }
2779  }
2780 
2781  // Initialize row_to_process with the current singleton rows.
2782  StrictITIVector<RowIndex, EntryIndex> row_degree(num_rows, EntryIndex(0));
2783  std::vector<RowIndex> row_to_process;
2784  for (RowIndex row(0); row < num_rows; ++row) {
2785  row_degree[row] = transpose.column(RowToColIndex(row)).num_entries();
2786  if (row_degree[row] == 1) {
2787  row_to_process.push_back(row);
2788  }
2789  }
2790 
2791  // Process current singleton rows/columns and enqueue new ones.
2792  while (status_ == ProblemStatus::INIT &&
2793  (!column_to_process.empty() || !row_to_process.empty())) {
2794  while (status_ == ProblemStatus::INIT && !column_to_process.empty()) {
2795  const ColIndex col = column_to_process.back();
2796  column_to_process.pop_back();
2797  if (column_degree[col] <= 0) continue;
2798  const MatrixEntry e = GetSingletonColumnMatrixEntry(col, matrix);
2799  if (in_mip_context_ && lp->IsVariableInteger(e.col) &&
2800  !IntegerSingletonColumnIsRemovable(e, *lp)) {
2801  continue;
2802  }
2803 
2804  // TODO(user): It seems better to process all the singleton columns with
2805  // a cost of zero first.
2806  if (lp->objective_coefficients()[col] == 0.0) {
2807  DeleteZeroCostSingletonColumn(transpose, e, lp);
2808  } else if (MakeConstraintAnEqualityIfPossible(transpose, e, lp)) {
2809  DeleteSingletonColumnInEquality(transpose, e, lp);
2810  } else {
2811  continue;
2812  }
2813  --row_degree[e.row];
2814  if (row_degree[e.row] == 1) {
2815  row_to_process.push_back(e.row);
2816  }
2817  }
2818  while (status_ == ProblemStatus::INIT && !row_to_process.empty()) {
2819  const RowIndex row = row_to_process.back();
2820  row_to_process.pop_back();
2821  if (row_degree[row] <= 0) continue;
2822  const MatrixEntry e = GetSingletonRowMatrixEntry(row, transpose);
2823 
2824  DeleteSingletonRow(e, lp);
2825  --column_degree[e.col];
2826  if (column_degree[e.col] == 1) {
2827  column_to_process.push_back(e.col);
2828  }
2829  }
2830  }
2831 
2832  if (status_ != ProblemStatus::INIT) return false;
2833  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
2834  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2835  return !column_deletion_helper_.IsEmpty() || !row_deletion_helper_.IsEmpty();
2836 }
2837 
2840  RETURN_IF_NULL(solution);
2841 
2842  // Note that the two deletion helpers must restore 0.0 values in the positions
2843  // that will be used during Undo(). That is, all the calls done by this class
2844  // to MarkColumnForDeletion() should be done with 0.0 as the value to restore
2845  // (which is already the case when using MarkRowForDeletion()).
2846  // This is important because the various Undo() functions assume that a
2847  // primal/dual variable value which isn't restored yet has the value of 0.0.
2848  column_deletion_helper_.RestoreDeletedColumns(solution);
2849  row_deletion_helper_.RestoreDeletedRows(solution);
2850 
2851  // It is important to undo the operations in the correct order, i.e. in the
2852  // reverse order in which they were done.
2853  for (int i = undo_stack_.size() - 1; i >= 0; --i) {
2854  const SparseColumn& saved_col =
2855  columns_saver_.SavedOrEmptyColumn(undo_stack_[i].Entry().col);
2856  const SparseColumn& saved_row = rows_saver_.SavedOrEmptyColumn(
2857  RowToColIndex(undo_stack_[i].Entry().row));
2858  undo_stack_[i].Undo(parameters_, saved_col, saved_row, solution);
2859  }
2860 }
2861 
2862 MatrixEntry SingletonPreprocessor::GetSingletonColumnMatrixEntry(
2863  ColIndex col, const SparseMatrix& matrix) {
2864  for (const SparseColumn::Entry e : matrix.column(col)) {
2865  if (!row_deletion_helper_.IsRowMarked(e.row())) {
2866  DCHECK_NE(0.0, e.coefficient());
2867  return MatrixEntry(e.row(), col, e.coefficient());
2868  }
2869  }
2870  // This shouldn't happen.
2871  LOG(DFATAL) << "No unmarked entry in a column that is supposed to have one.";
2873  return MatrixEntry(RowIndex(0), ColIndex(0), 0.0);
2874 }
2875 
2876 MatrixEntry SingletonPreprocessor::GetSingletonRowMatrixEntry(
2877  RowIndex row, const SparseMatrix& transpose) {
2878  for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) {
2879  const ColIndex col = RowToColIndex(e.row());
2880  if (!column_deletion_helper_.IsColumnMarked(col)) {
2881  DCHECK_NE(0.0, e.coefficient());
2882  return MatrixEntry(row, col, e.coefficient());
2883  }
2884  }
2885  // This shouldn't happen.
2886  LOG(DFATAL) << "No unmarked entry in a row that is supposed to have one.";
2888  return MatrixEntry(RowIndex(0), ColIndex(0), 0.0);
2889 }
2890 
2891 // --------------------------------------------------------
2892 // RemoveNearZeroEntriesPreprocessor
2893 // --------------------------------------------------------
2894 
2897  RETURN_VALUE_IF_NULL(lp, false);
2898  const ColIndex num_cols = lp->num_variables();
2899  if (num_cols == 0) return false;
2900 
2901  // We will use a different threshold for each row depending on its degree.
2902  // We use Fractionals for convenience since they will be used as such below.
2903  const RowIndex num_rows = lp->num_constraints();
2904  DenseColumn row_degree(num_rows, 0.0);
2905  Fractional num_non_zero_objective_coefficients = 0.0;
2906  for (ColIndex col(0); col < num_cols; ++col) {
2907  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
2908  row_degree[e.row()] += 1.0;
2909  }
2910  if (lp->objective_coefficients()[col] != 0.0) {
2911  num_non_zero_objective_coefficients += 1.0;
2912  }
2913  }
2914 
2915  // To not have too many parameters, we use the preprocessor_zero_tolerance.
2916  const Fractional allowed_impact = parameters_.preprocessor_zero_tolerance();
2917 
2918  // TODO(user): Our criteria ensure that during presolve a primal feasible
2919  // solution will stay primal feasible. However, we have no guarantee on the
2920  // dual-feasibility (because the dual variable values range is not taken into
2921  // account). Fix that? or find a better criteria since it seems that on all
2922  // our current problems, this preprocessor helps and doesn't introduce errors.
2923  const EntryIndex initial_num_entries = lp->num_entries();
2924  int num_zeroed_objective_coefficients = 0;
2925  for (ColIndex col(0); col < num_cols; ++col) {
2928 
2929  // TODO(user): Write a small class that takes a matrix, its transpose, row
2930  // and column bounds, and "propagate" the bounds as much as possible so we
2931  // can use this better estimate here and remove more near-zero entries.
2932  const Fractional max_magnitude =
2933  std::max(std::abs(lower_bound), std::abs(upper_bound));
2934  if (max_magnitude == kInfinity || max_magnitude == 0) continue;
2935  const Fractional threshold = allowed_impact / max_magnitude;
2937  threshold, row_degree);
2938 
2939  if (lp->objective_coefficients()[col] != 0.0 &&
2940  num_non_zero_objective_coefficients *
2941  std::abs(lp->objective_coefficients()[col]) <
2942  threshold) {
2943  lp->SetObjectiveCoefficient(col, 0.0);
2944  ++num_zeroed_objective_coefficients;
2945  }
2946  }
2947 
2948  const EntryIndex num_entries = lp->num_entries();
2949  if (num_entries != initial_num_entries) {
2950  VLOG(1) << "Removed " << initial_num_entries - num_entries
2951  << " near-zero entries.";
2952  }
2953  if (num_zeroed_objective_coefficients > 0) {
2954  VLOG(1) << "Removed " << num_zeroed_objective_coefficients
2955  << " near-zero objective coefficients.";
2956  }
2957 
2958  // No post-solve is required.
2959  return false;
2960 }
2961 
2963  ProblemSolution* solution) const {}
2964 
2965 // --------------------------------------------------------
2966 // SingletonColumnSignPreprocessor
2967 // --------------------------------------------------------
2968 
2971  RETURN_VALUE_IF_NULL(lp, false);
2972  const ColIndex num_cols = lp->num_variables();
2973  if (num_cols == 0) return false;
2974 
2975  changed_columns_.clear();
2976  int num_singletons = 0;
2977  for (ColIndex col(0); col < num_cols; ++col) {
2978  SparseColumn* sparse_column = lp->GetMutableSparseColumn(col);
2979  const Fractional cost = lp->objective_coefficients()[col];
2980  if (sparse_column->num_entries() == 1) {
2981  ++num_singletons;
2982  }
2983  if (sparse_column->num_entries() == 1 &&
2984  sparse_column->GetFirstCoefficient() < 0) {
2985  sparse_column->MultiplyByConstant(-1.0);
2987  -lp->variable_lower_bounds()[col]);
2989  changed_columns_.push_back(col);
2990  }
2991  }
2992  VLOG(1) << "Changed the sign of " << changed_columns_.size() << " columns.";
2993  VLOG(1) << num_singletons << " singleton columns left.";
2994  return !changed_columns_.empty();
2995 }
2996 
2998  ProblemSolution* solution) const {
3000  RETURN_IF_NULL(solution);
3001  for (int i = 0; i < changed_columns_.size(); ++i) {
3002  const ColIndex col = changed_columns_[i];
3003  solution->primal_values[col] = -solution->primal_values[col];
3004  const VariableStatus status = solution->variable_statuses[col];
3007  } else if (status == VariableStatus::AT_LOWER_BOUND) {
3009  }
3010  }
3011 }
3012 
3013 // --------------------------------------------------------
3014 // DoubletonEqualityRowPreprocessor
3015 // --------------------------------------------------------
3016 
3019  RETURN_VALUE_IF_NULL(lp, false);
3020 
3021  // This is needed at postsolve.
3022  //
3023  // TODO(user): Get rid of the FIXED status instead to avoid spending
3024  // time/memory for no good reason here.
3025  saved_row_lower_bounds_ = lp->constraint_lower_bounds();
3026  saved_row_upper_bounds_ = lp->constraint_upper_bounds();
3027 
3028  // This is needed for postsolving dual.
3029  saved_objective_ = lp->objective_coefficients();
3030 
3031  // Note that we don't update the transpose during this preprocessor run.
3032  const SparseMatrix& original_transpose = lp->GetTransposeSparseMatrix();
3033 
3034  // Heuristic: We try to subtitute sparse columns first to avoid a complexity
3035  // explosion. Note that if we do long chain of substitution, we can still end
3036  // up with a complexity of O(num_rows x num_cols) instead of O(num_entries).
3037  //
3038  // TODO(user): There is probably some more robust ways.
3039  std::vector<std::pair<int64_t, RowIndex>> sorted_rows;
3040  const RowIndex num_rows(lp->num_constraints());
3041  for (RowIndex row(0); row < num_rows; ++row) {
3042  const SparseColumn& original_row =
3043  original_transpose.column(RowToColIndex(row));
3044  if (original_row.num_entries() != 2 ||
3045  lp->constraint_lower_bounds()[row] !=
3046  lp->constraint_upper_bounds()[row]) {
3047  continue;
3048  }
3049  int64_t score = 0;
3050  for (const SparseColumn::Entry e : original_row) {
3051  const ColIndex col = RowToColIndex(e.row());
3052  score += lp->GetSparseColumn(col).num_entries().value();
3053  }
3054  sorted_rows.push_back({score, row});
3055  }
3056  std::sort(sorted_rows.begin(), sorted_rows.end());
3057 
3058  // Iterate over the rows that were already doubletons before this preprocessor
3059  // run, and whose items don't belong to a column that we deleted during this
3060  // run. This implies that the rows are only ever touched once per run, because
3061  // we only modify rows that have an item on a deleted column.
3062  for (const auto p : sorted_rows) {
3063  const RowIndex row = p.second;
3064  const SparseColumn& original_row =
3065  original_transpose.column(RowToColIndex(row));
3066 
3067  // Collect the two row items. Skip the ones involving a deleted column.
3068  // Note: we filled r.col[] and r.coeff[] by item order, and currently we
3069  // always pick the first column as the to-be-deleted one.
3070  // TODO(user): make a smarter choice of which column to delete, and
3071  // swap col[] and coeff[] accordingly.
3072  RestoreInfo r; // Use a short name since we're using it everywhere.
3073  int entry_index = 0;
3074  for (const SparseColumn::Entry e : original_row) {
3075  const ColIndex col = RowToColIndex(e.row());
3076  if (column_deletion_helper_.IsColumnMarked(col)) continue;
3077  r.col[entry_index] = col;
3078  r.coeff[entry_index] = e.coefficient();
3079  DCHECK_NE(0.0, r.coeff[entry_index]);
3080  ++entry_index;
3081  }
3082 
3083  // Discard some cases that will be treated by other preprocessors, or by
3084  // another run of this one.
3085  // 1) One or two of the items were in a deleted column.
3086  if (entry_index < 2) continue;
3087 
3088  // Fill the RestoreInfo, even if we end up not using it (because we
3089  // give up on preprocessing this row): it has a bunch of handy shortcuts.
3090  r.row = row;
3091  r.rhs = lp->constraint_lower_bounds()[row];
3092  for (int col_choice = 0; col_choice < NUM_DOUBLETON_COLS; ++col_choice) {
3093  const ColIndex col = r.col[col_choice];
3094  r.lb[col_choice] = lp->variable_lower_bounds()[col];
3095  r.ub[col_choice] = lp->variable_upper_bounds()[col];
3096  r.objective_coefficient[col_choice] = lp->objective_coefficients()[col];
3097  }
3098 
3099  // 2) One of the columns is fixed: don't bother, it will be treated
3100  // by the FixedVariablePreprocessor.
3101  if (r.lb[DELETED] == r.ub[DELETED] || r.lb[MODIFIED] == r.ub[MODIFIED]) {
3102  continue;
3103  }
3104 
3105  // Look at the bounds of both variables and exit early if we can delegate
3106  // to another pre-processor; otherwise adjust the bounds of the remaining
3107  // variable as necessary.
3108  // If the current row is: aX + bY = c, then the bounds of Y must be
3109  // adjusted to satisfy Y = c/b + (-a/b)X
3110  //
3111  // Note: when we compute the coefficients of these equations, we can cause
3112  // underflows/overflows that could be avoided if we did the computations
3113  // more carefully; but for now we just treat those cases as
3114  // ProblemStatus::ABNORMAL.
3115  // TODO(user): consider skipping the problematic rows in this preprocessor,
3116  // or trying harder to avoid the under/overflow.
3117  {
3118  const Fractional carry_over_offset = r.rhs / r.coeff[MODIFIED];
3119  const Fractional carry_over_factor =
3120  -r.coeff[DELETED] / r.coeff[MODIFIED];
3121  if (!IsFinite(carry_over_offset) || !IsFinite(carry_over_factor) ||
3122  carry_over_factor == 0.0) {
3124  break;
3125  }
3126 
3127  Fractional lb = r.lb[MODIFIED];
3128  Fractional ub = r.ub[MODIFIED];
3129  Fractional carried_over_lb =
3130  r.lb[DELETED] * carry_over_factor + carry_over_offset;
3131  Fractional carried_over_ub =
3132  r.ub[DELETED] * carry_over_factor + carry_over_offset;
3133  if (carry_over_factor < 0) {
3134  std::swap(carried_over_lb, carried_over_ub);
3135  }
3136  if (carried_over_lb <= lb) {
3137  // Default (and simplest) case: the lower bound didn't change.
3138  r.bound_backtracking_at_lower_bound = RestoreInfo::ColChoiceAndStatus(
3139  MODIFIED, VariableStatus::AT_LOWER_BOUND, lb);
3140  } else {
3141  lb = carried_over_lb;
3142  r.bound_backtracking_at_lower_bound = RestoreInfo::ColChoiceAndStatus(
3143  DELETED,
3144  carry_over_factor > 0 ? VariableStatus::AT_LOWER_BOUND
3146  carry_over_factor > 0 ? r.lb[DELETED] : r.ub[DELETED]);
3147  }
3148  if (carried_over_ub >= ub) {
3149  // Default (and simplest) case: the upper bound didn't change.
3150  r.bound_backtracking_at_upper_bound = RestoreInfo::ColChoiceAndStatus(
3151  MODIFIED, VariableStatus::AT_UPPER_BOUND, ub);
3152  } else {
3153  ub = carried_over_ub;
3154  r.bound_backtracking_at_upper_bound = RestoreInfo::ColChoiceAndStatus(
3155  DELETED,
3156  carry_over_factor > 0 ? VariableStatus::AT_UPPER_BOUND
3158  carry_over_factor > 0 ? r.ub[DELETED] : r.lb[DELETED]);
3159  }
3160  // 3) If the new bounds are fixed (the domain is a singleton) or
3161  // infeasible, then we let the
3162  // ForcingAndImpliedFreeConstraintPreprocessor do the work.
3163  if (IsSmallerWithinPreprocessorZeroTolerance(ub, lb)) continue;
3164  lp->SetVariableBounds(r.col[MODIFIED], lb, ub);
3165  }
3166 
3167  restore_stack_.push_back(r);
3168 
3169  // Now, perform the substitution. If the current row is: aX + bY = c
3170  // then any other row containing 'X' with coefficient x can remove the
3171  // entry in X, and instead add an entry on 'Y' with coefficient x(-b/a)
3172  // and a constant offset x(c/a).
3173  // Looking at the matrix, this translates into colY += (-b/a) colX.
3174  DCHECK_NE(r.coeff[DELETED], 0.0);
3175  const Fractional substitution_factor =
3176  -r.coeff[MODIFIED] / r.coeff[DELETED]; // -b/a
3177  const Fractional constant_offset_factor = r.rhs / r.coeff[DELETED]; // c/a
3178  // Again we don't bother too much with over/underflows.
3179  if (!IsFinite(substitution_factor) || substitution_factor == 0.0 ||
3180  !IsFinite(constant_offset_factor)) {
3182  break;
3183  }
3184 
3185  // Note that we do not save again a saved column, so that we only save
3186  // columns from the initial LP. This is important to limit the memory usage.
3187  // It complexify a bit the postsolve though.
3188  for (const int col_choice : {DELETED, MODIFIED}) {
3189  const ColIndex col = r.col[col_choice];
3190  columns_saver_.SaveColumnIfNotAlreadyDone(col, lp->GetSparseColumn(col));
3191  }
3192 
3193  lp->GetSparseColumn(r.col[DELETED])
3195  substitution_factor, r.row, parameters_.drop_tolerance(),
3196  lp->GetMutableSparseColumn(r.col[MODIFIED]));
3197 
3198  // Apply similar operations on the objective coefficients.
3199  // Note that the offset is being updated by
3200  // SubtractColumnMultipleFromConstraintBound() below.
3201  {
3202  const Fractional new_objective =
3203  r.objective_coefficient[MODIFIED] +
3204  substitution_factor * r.objective_coefficient[DELETED];
3205  if (std::abs(new_objective) > parameters_.drop_tolerance()) {
3206  lp->SetObjectiveCoefficient(r.col[MODIFIED], new_objective);
3207  } else {
3208  lp->SetObjectiveCoefficient(r.col[MODIFIED], 0.0);
3209  }
3210  }
3211 
3212  // Carry over the constant factor of the substitution as well.
3213  // TODO(user): rename that method to reflect the fact that it also updates
3214  // the objective offset, in the other direction.
3215  SubtractColumnMultipleFromConstraintBound(r.col[DELETED],
3216  constant_offset_factor, lp);
3217 
3218  // If we keep substituing the same "dense" columns over and over, we can
3219  // have a memory in O(num_rows * num_cols) which can be order of magnitude
3220  // larger than the original problem. It is important to reclaim the memory
3221  // of the deleted column right away.
3222  lp->GetMutableSparseColumn(r.col[DELETED])->ClearAndRelease();
3223 
3224  // Mark the column and the row for deletion.
3225  column_deletion_helper_.MarkColumnForDeletion(r.col[DELETED]);
3226  row_deletion_helper_.MarkRowForDeletion(r.row);
3227  }
3228  if (status_ != ProblemStatus::INIT) return false;
3229  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
3230  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
3231 
3232  return !column_deletion_helper_.IsEmpty();
3233 }
3234 
3236  ProblemSolution* solution) const {
3238  RETURN_IF_NULL(solution);
3239  column_deletion_helper_.RestoreDeletedColumns(solution);
3240  row_deletion_helper_.RestoreDeletedRows(solution);
3241 
3242  const ColIndex num_cols = solution->variable_statuses.size();
3243  StrictITIVector<ColIndex, bool> new_basic_columns(num_cols, false);
3244 
3245  for (const RestoreInfo& r : Reverse(restore_stack_)) {
3246  switch (solution->variable_statuses[r.col[MODIFIED]]) {
3248  LOG(DFATAL) << "FIXED variable produced by DoubletonPreprocessor!";
3249  // In non-fastbuild mode, we rely on the rest of the code producing an
3250  // ProblemStatus::ABNORMAL status here.
3251  break;
3252  // When the modified variable is either basic or free, we keep it as is,
3253  // and simply make the deleted one basic.
3254  case VariableStatus::FREE:
3255  ABSL_FALLTHROUGH_INTENDED;
3256  case VariableStatus::BASIC:
3257  // Several code paths set the deleted column as basic. The code that
3258  // sets its value in that case is below, after the switch() block.
3259  solution->variable_statuses[r.col[DELETED]] = VariableStatus::BASIC;
3260  new_basic_columns[r.col[DELETED]] = true;
3261  break;
3263  ABSL_FALLTHROUGH_INTENDED;
3265  // The bound was induced by a bound of one of the two original
3266  // variables. Put that original variable at its bound, and make
3267  // the other one basic.
3268  const RestoreInfo::ColChoiceAndStatus& bound_backtracking =
3269  solution->variable_statuses[r.col[MODIFIED]] ==
3271  ? r.bound_backtracking_at_lower_bound
3272  : r.bound_backtracking_at_upper_bound;
3273  const ColIndex bounded_var = r.col[bound_backtracking.col_choice];
3274  const ColIndex basic_var =
3275  r.col[OtherColChoice(bound_backtracking.col_choice)];
3276  solution->variable_statuses[bounded_var] = bound_backtracking.status;
3277  solution->primal_values[bounded_var] = bound_backtracking.value;
3278  solution->variable_statuses[basic_var] = VariableStatus::BASIC;
3279  new_basic_columns[basic_var] = true;
3280  // If the modified column is VariableStatus::BASIC, then its value is
3281  // already set correctly. If it's the deleted column that is basic, its
3282  // value is set below the switch() block.
3283  }
3284  }
3285 
3286  // Restore the value of the deleted column if it is VariableStatus::BASIC.
3287  if (solution->variable_statuses[r.col[DELETED]] == VariableStatus::BASIC) {
3288  solution->primal_values[r.col[DELETED]] =
3289  (r.rhs -
3290  solution->primal_values[r.col[MODIFIED]] * r.coeff[MODIFIED]) /
3291  r.coeff[DELETED];
3292  }
3293 
3294  // Make the deleted constraint status FIXED.
3296  }
3297 
3298  // Now we need to reconstruct the dual. This is a bit tricky and is basically
3299  // the same as inverting a really structed and easy to invert matrix. For n
3300  // doubleton rows, looking only at the new_basic_columns, there is exactly n
3301  // by construction (one per row). We consider only this n x n matrix, and we
3302  // must choose dual row values so that we make the reduced costs zero on all
3303  // these columns.
3304  //
3305  // There is always an order that make this matrix triangular. We start with a
3306  // singleton column which fix its corresponding row and then work on the
3307  // square submatrix left. We can always start and continue, because if we take
3308  // the first substitued row of the current submatrix, if its deleted column
3309  // was in the submatrix we have a singleton column. If it is outside, we have
3310  // 2 n - 1 entries for a matrix with n columns, so one must be singleton.
3311  //
3312  // Note(user): Another advantage of working on the "original" matrix before
3313  // this presolve is an increased precision.
3314  //
3315  // TODO(user): We can probably use something better than a vector of set,
3316  // but the number of entry is really sparse though. And the size of a set<int>
3317  // is 24 bytes, same as a std::vector<int>.
3318  StrictITIVector<ColIndex, std::set<int>> col_to_index(num_cols);
3319  for (int i = 0; i < restore_stack_.size(); ++i) {
3320  const RestoreInfo& r = restore_stack_[i];
3321  col_to_index[r.col[MODIFIED]].insert(i);
3322  col_to_index[r.col[DELETED]].insert(i);
3323  }
3324  std::vector<ColIndex> singleton_col;
3325  for (ColIndex col(0); col < num_cols; ++col) {
3326  if (!new_basic_columns[col]) continue;
3327  if (col_to_index[col].size() == 1) singleton_col.push_back(col);
3328  }
3329  while (!singleton_col.empty()) {
3330  const ColIndex col = singleton_col.back();
3331  singleton_col.pop_back();
3332  if (!new_basic_columns[col]) continue;
3333  if (col_to_index[col].empty()) continue;
3334  CHECK_EQ(col_to_index[col].size(), 1);
3335  const int index = *col_to_index[col].begin();
3336  const RestoreInfo& r = restore_stack_[index];
3337 
3338  const ColChoice col_choice = r.col[MODIFIED] == col ? MODIFIED : DELETED;
3339 
3340  // Adjust the dual value of the deleted constraint so that col have a
3341  // reduced costs of zero.
3342  CHECK_EQ(solution->dual_values[r.row], 0.0);
3343  const SparseColumn& saved_col =
3344  columns_saver_.SavedColumn(r.col[col_choice]);
3345  const Fractional current_reduced_cost =
3346  saved_objective_[r.col[col_choice]] -
3347  PreciseScalarProduct(solution->dual_values, saved_col);
3348  solution->dual_values[r.row] = current_reduced_cost / r.coeff[col_choice];
3349 
3350  // Update singleton
3351  col_to_index[r.col[DELETED]].erase(index);
3352  col_to_index[r.col[MODIFIED]].erase(index);
3353  if (col_to_index[r.col[DELETED]].size() == 1) {
3354  singleton_col.push_back(r.col[DELETED]);
3355  }
3356  if (col_to_index[r.col[MODIFIED]].size() == 1) {
3357  singleton_col.push_back(r.col[MODIFIED]);
3358  }
3359  }
3360 
3361  // Fix potential bad ConstraintStatus::FIXED_VALUE statuses.
3362  FixConstraintWithFixedStatuses(saved_row_lower_bounds_,
3363  saved_row_upper_bounds_, solution);
3364 }
3365 
3366 void FixConstraintWithFixedStatuses(const DenseColumn& row_lower_bounds,
3367  const DenseColumn& row_upper_bounds,
3368  ProblemSolution* solution) {
3369  const RowIndex num_rows = solution->constraint_statuses.size();
3370  DCHECK_EQ(row_lower_bounds.size(), num_rows);
3371  DCHECK_EQ(row_upper_bounds.size(), num_rows);
3372  for (RowIndex row(0); row < num_rows; ++row) {
3374  continue;
3375  }
3376  if (row_lower_bounds[row] == row_upper_bounds[row]) continue;
3377 
3378  // We need to fix the status and we just need to make sure that the bound we
3379  // choose satisfies the LP optimality conditions.
3380  if (solution->dual_values[row] > 0) {
3382  } else {
3384  }
3385  }
3386 }
3387 
3388 void DoubletonEqualityRowPreprocessor::
3389  SwapDeletedAndModifiedVariableRestoreInfo(RestoreInfo* r) {
3390  using std::swap;
3391  swap(r->col[DELETED], r->col[MODIFIED]);
3392  swap(r->coeff[DELETED], r->coeff[MODIFIED]);
3393  swap(r->lb[DELETED], r->lb[MODIFIED]);
3394  swap(r->ub[DELETED], r->ub[MODIFIED]);
3395  swap(r->objective_coefficient[DELETED], r->objective_coefficient[MODIFIED]);
3396 }
3397 
3398 // --------------------------------------------------------
3399 // DualizerPreprocessor
3400 // --------------------------------------------------------
3401 
3404  RETURN_VALUE_IF_NULL(lp, false);
3406  return false;
3407  }
3408 
3409  // Store the original problem size and direction.
3410  primal_num_cols_ = lp->num_variables();
3411  primal_num_rows_ = lp->num_constraints();
3412  primal_is_maximization_problem_ = lp->IsMaximizationProblem();
3413 
3414  // If we need to decide whether or not to take the dual, we only take it when
3415  // the matrix has more rows than columns. The number of rows of a linear
3416  // program gives the size of the square matrices we need to invert and the
3417  // order of iterations of the simplex method. So solving a program with less
3418  // rows is likely a better alternative. Note that the number of row of the
3419  // dual is the number of column of the primal.
3420  //
3421  // Note however that the default is a conservative factor because if the
3422  // user gives us a primal program, we assume he knows what he is doing and
3423  // sometimes a problem is a lot faster to solve in a given formulation
3424  // even if its dimension would say otherwise.
3425  //
3426  // Another reason to be conservative, is that the number of columns of the
3427  // dual is the number of rows of the primal plus up to two times the number of
3428  // columns of the primal.
3429  //
3430  // TODO(user): This effect can be lowered if we use some of the extra
3431  // variables as slack variable which we are not doing at this point.
3433  if (1.0 * primal_num_rows_.value() <
3434  parameters_.dualizer_threshold() * primal_num_cols_.value()) {
3435  return false;
3436  }
3437  }
3438 
3439  // Save the linear program bounds.
3440  // Also make sure that all the bounded variable have at least one bound set to
3441  // zero. This will be needed to post-solve a dual-basic solution into a
3442  // primal-basic one.
3443  const ColIndex num_cols = lp->num_variables();
3444  variable_lower_bounds_.assign(num_cols, 0.0);
3445  variable_upper_bounds_.assign(num_cols, 0.0);
3446  for (ColIndex col(0); col < num_cols; ++col) {
3447  const Fractional lower = lp->variable_lower_bounds()[col];
3448  const Fractional upper = lp->variable_upper_bounds()[col];
3449 
3450  // We need to shift one of the bound to zero.
3451  variable_lower_bounds_[col] = lower;
3452  variable_upper_bounds_[col] = upper;
3453  const Fractional value = MinInMagnitudeOrZeroIfInfinite(lower, upper);
3454  if (value != 0.0) {
3455  lp->SetVariableBounds(col, lower - value, upper - value);
3456  SubtractColumnMultipleFromConstraintBound(col, value, lp);
3457  }
3458  }
3459 
3460  // Fill the information that will be needed during postsolve.
3461  //
3462  // TODO(user): This will break if PopulateFromDual() is changed. so document
3463  // the convention or make the function fill these vectors?
3464  dual_status_correspondence_.clear();
3465  for (RowIndex row(0); row < primal_num_rows_; ++row) {
3468  if (lower_bound == upper_bound) {
3469  dual_status_correspondence_.push_back(VariableStatus::FIXED_VALUE);
3470  } else if (upper_bound != kInfinity) {
3471  dual_status_correspondence_.push_back(VariableStatus::AT_UPPER_BOUND);
3472  } else if (lower_bound != -kInfinity) {
3473  dual_status_correspondence_.push_back(VariableStatus::AT_LOWER_BOUND);
3474  } else {
3475  LOG(DFATAL) << "There should be no free constraint in this lp.";
3476  }
3477  }
3478  slack_or_surplus_mapping_.clear();
3479  for (ColIndex col(0); col < primal_num_cols_; ++col) {
3482  if (lower_bound != -kInfinity) {
3483  dual_status_correspondence_.push_back(
3486  slack_or_surplus_mapping_.push_back(col);
3487  }
3488  }
3489  for (ColIndex col(0); col < primal_num_cols_; ++col) {
3492  if (upper_bound != kInfinity) {
3493  dual_status_correspondence_.push_back(
3496  slack_or_surplus_mapping_.push_back(col);
3497  }
3498  }
3499 
3500  // TODO(user): There are two different ways to deal with ranged rows when
3501  // taking the dual. The default way is to duplicate such rows, see
3502  // PopulateFromDual() for details. Another way is to call
3503  // lp->AddSlackVariablesForFreeAndBoxedRows() before calling
3504  // PopulateFromDual(). Adds an option to switch between the two as this may
3505  // change the running time?
3506  //
3507  // Note however that the default algorithm is likely to result in a faster
3508  // solving time because the dual program will have less rows.
3509  LinearProgram dual;
3510  dual.PopulateFromDual(*lp, &duplicated_rows_);
3511  dual.Swap(lp);
3512  return true;
3513 }
3514 
3515 // Note(user): This assumes that LinearProgram.PopulateFromDual() uses
3516 // the first ColIndex and RowIndex for the rows and columns of the given
3517 // problem.
3520  RETURN_IF_NULL(solution);
3521 
3522  DenseRow new_primal_values(primal_num_cols_, 0.0);
3523  VariableStatusRow new_variable_statuses(primal_num_cols_,
3525  DCHECK_LE(primal_num_cols_, RowToColIndex(solution->dual_values.size()));
3526  for (ColIndex col(0); col < primal_num_cols_; ++col) {
3527  RowIndex row = ColToRowIndex(col);
3528  const Fractional lower = variable_lower_bounds_[col];
3529  const Fractional upper = variable_upper_bounds_[col];
3530 
3531  // The new variable value corresponds to the dual value of the dual.
3532  // The shift applied during presolve needs to be removed.
3533  const Fractional shift = MinInMagnitudeOrZeroIfInfinite(lower, upper);
3534  new_primal_values[col] = solution->dual_values[row] + shift;
3535 
3536  // A variable will be VariableStatus::BASIC if the dual constraint is not.
3537  if (solution->constraint_statuses[row] != ConstraintStatus::BASIC) {
3538  new_variable_statuses[col] = VariableStatus::BASIC;
3539  } else {
3540  // Otherwise, the dual value must be zero (if the solution is feasible),
3541  // and the variable is at an exact bound or zero if it is
3542  // VariableStatus::FREE. Note that this works because the bounds are
3543  // shifted to 0.0 in the presolve!
3544  new_variable_statuses[col] = ComputeVariableStatus(shift, lower, upper);
3545  }
3546  }
3547 
3548  // A basic variable that corresponds to slack/surplus variable is the same as
3549  // a basic row. The new variable status (that was just set to
3550  // VariableStatus::BASIC above)
3551  // needs to be corrected and depends on the variable type (slack/surplus).
3552  const ColIndex begin = RowToColIndex(primal_num_rows_);
3553  const ColIndex end = dual_status_correspondence_.size();
3554  DCHECK_GE(solution->variable_statuses.size(), end);
3555  DCHECK_EQ(end - begin, slack_or_surplus_mapping_.size());
3556  for (ColIndex index(begin); index < end; ++index) {
3557  if (solution->variable_statuses[index] == VariableStatus::BASIC) {
3558  const ColIndex col = slack_or_surplus_mapping_[index - begin];
3559  const VariableStatus status = dual_status_correspondence_[index];
3560 
3561  // The new variable value is set to its exact bound because the dual
3562  // variable value can be imprecise.
3563  new_variable_statuses[col] = status;
3566  new_primal_values[col] = variable_upper_bounds_[col];
3567  } else {
3569  new_primal_values[col] = variable_lower_bounds_[col];
3570  }
3571  }
3572  }
3573 
3574  // Note the <= in the DCHECK, since we may need to add variables when taking
3575  // the dual.
3576  DCHECK_LE(primal_num_rows_, ColToRowIndex(solution->primal_values.size()));
3577  DenseColumn new_dual_values(primal_num_rows_, 0.0);
3578  ConstraintStatusColumn new_constraint_statuses(primal_num_rows_,
3580 
3581  // Note that the sign need to be corrected because of the special behavior of
3582  // PopulateFromDual() on a maximization problem, see the comment in the
3583  // declaration of PopulateFromDual().
3584  Fractional sign = primal_is_maximization_problem_ ? -1 : 1;
3585  for (RowIndex row(0); row < primal_num_rows_; ++row) {
3586  const ColIndex col = RowToColIndex(row);
3587  new_dual_values[row] = sign * solution->primal_values[col];
3588 
3589  // A constraint will be ConstraintStatus::BASIC if the dual variable is not.
3590  if (solution->variable_statuses[col] != VariableStatus::BASIC) {
3591  new_constraint_statuses[row] = ConstraintStatus::BASIC;
3592  if (duplicated_rows_[row] != kInvalidCol) {
3593  if (solution->variable_statuses[duplicated_rows_[row]] ==
3595  // The duplicated row is always about the lower bound.
3596  new_constraint_statuses[row] = ConstraintStatus::AT_LOWER_BOUND;
3597  }
3598  }
3599  } else {
3600  // ConstraintStatus::AT_LOWER_BOUND/ConstraintStatus::AT_UPPER_BOUND/
3601  // ConstraintStatus::FIXED depend on the type of the constraint at this
3602  // position.
3603  new_constraint_statuses[row] =
3604  VariableToConstraintStatus(dual_status_correspondence_[col]);
3605  }
3606 
3607  // If the original row was duplicated, we need to take into account the
3608  // value of the corresponding dual column.
3609  if (duplicated_rows_[row] != kInvalidCol) {
3610  new_dual_values[row] +=
3611  sign * solution->primal_values[duplicated_rows_[row]];
3612  }
3613 
3614  // Because non-basic variable values are exactly at one of their bounds, a
3615  // new basic constraint will have a dual value exactly equal to zero.
3616  DCHECK(new_dual_values[row] == 0 ||
3617  new_constraint_statuses[row] != ConstraintStatus::BASIC);
3618  }
3619 
3620  solution->status = ChangeStatusToDualStatus(solution->status);
3621  new_primal_values.swap(solution->primal_values);
3622  new_dual_values.swap(solution->dual_values);
3623  new_variable_statuses.swap(solution->variable_statuses);
3624  new_constraint_statuses.swap(solution->constraint_statuses);
3625 }
3626 
3628  ProblemStatus status) const {
3629  switch (status) {
3642  default:
3643  return status;
3644  }
3645 }
3646 
3647 // --------------------------------------------------------
3648 // ShiftVariableBoundsPreprocessor
3649 // --------------------------------------------------------
3650 
3653  RETURN_VALUE_IF_NULL(lp, false);
3654 
3655  // Save the linear program bounds before shifting them.
3656  bool all_variable_domains_contain_zero = true;
3657  const ColIndex num_cols = lp->num_variables();
3658  variable_initial_lbs_.assign(num_cols, 0.0);
3659  variable_initial_ubs_.assign(num_cols, 0.0);
3660  for (ColIndex col(0); col < num_cols; ++col) {
3661  variable_initial_lbs_[col] = lp->variable_lower_bounds()[col];
3662  variable_initial_ubs_[col] = lp->variable_upper_bounds()[col];
3663  if (0.0 < variable_initial_lbs_[col] || 0.0 > variable_initial_ubs_[col]) {
3664  all_variable_domains_contain_zero = false;
3665  }
3666  }
3667  VLOG(1) << "Maximum variable bounds magnitude (before shift): "
3668  << ComputeMaxVariableBoundsMagnitude(*lp);
3669 
3670  // Abort early if there is nothing to do.
3671  if (all_variable_domains_contain_zero) return false;
3672 
3673  // Shift the variable bounds and compute the changes to the constraint bounds
3674  // and objective offset in a precise way.
3675  int num_bound_shifts = 0;
3676  const RowIndex num_rows = lp->num_constraints();
3677  KahanSum objective_offset;
3678  absl::StrongVector<RowIndex, KahanSum> row_offsets(num_rows.value());
3679  offsets_.assign(num_cols, 0.0);
3680  for (ColIndex col(0); col < num_cols; ++col) {
3681  if (0.0 < variable_initial_lbs_[col] || 0.0 > variable_initial_ubs_[col]) {
3682  Fractional offset = MinInMagnitudeOrZeroIfInfinite(
3683  variable_initial_lbs_[col], variable_initial_ubs_[col]);
3684  if (in_mip_context_ && lp->IsVariableInteger(col)) {
3685  // In the integer case, we truncate the number because if for instance
3686  // the lower bound is a positive integer + epsilon, we only want to
3687  // shift by the integer and leave the lower bound at epsilon.
3688  //
3689  // TODO(user): This would not be needed, if we always make the bound
3690  // of an integer variable integer before applying this preprocessor.
3691  offset = trunc(offset);
3692  } else {
3693  DCHECK_NE(offset, 0.0);
3694  }
3695  offsets_[col] = offset;
3696  lp->SetVariableBounds(col, variable_initial_lbs_[col] - offset,
3697  variable_initial_ubs_[col] - offset);
3698  const SparseColumn& sparse_column = lp->GetSparseColumn(col);
3699  for (const SparseColumn::Entry e : sparse_column) {
3700  row_offsets[e.row()].Add(e.coefficient() * offset);
3701  }
3702  objective_offset.Add(lp->objective_coefficients()[col] * offset);
3703  ++num_bound_shifts;
3704  }
3705  }
3706  VLOG(1) << "Maximum variable bounds magnitude (after " << num_bound_shifts
3707  << " shifts): " << ComputeMaxVariableBoundsMagnitude(*lp);
3708 
3709  // Apply the changes to the constraint bound and objective offset.
3710  for (RowIndex row(0); row < num_rows; ++row) {
3711  lp->SetConstraintBounds(
3712  row, lp->constraint_lower_bounds()[row] - row_offsets[row].Value(),
3713  lp->constraint_upper_bounds()[row] - row_offsets[row].Value());
3714  }
3715  lp->SetObjectiveOffset(lp->objective_offset() + objective_offset.Value());
3716  return true;
3717 }
3718 
3720  ProblemSolution* solution) const {
3722  RETURN_IF_NULL(solution);
3723  const ColIndex num_cols = solution->variable_statuses.size();
3724  for (ColIndex col(0); col < num_cols; ++col) {
3725  if (in_mip_context_) {
3726  solution->primal_values[col] += offsets_[col];
3727  } else {
3728  switch (solution->variable_statuses[col]) {
3730  ABSL_FALLTHROUGH_INTENDED;
3732  solution->primal_values[col] = variable_initial_lbs_[col];
3733  break;
3735  solution->primal_values[col] = variable_initial_ubs_[col];
3736  break;
3737  case VariableStatus::BASIC:
3738  solution->primal_values[col] += offsets_[col];
3739  break;
3740  case VariableStatus::FREE:
3741  break;
3742  }
3743  }
3744  }
3745 }
3746 
3747 // --------------------------------------------------------
3748 // ScalingPreprocessor
3749 // --------------------------------------------------------
3750 
3753  RETURN_VALUE_IF_NULL(lp, false);
3754  if (!parameters_.use_scaling()) return false;
3755 
3756  // Save the linear program bounds before scaling them.
3757  const ColIndex num_cols = lp->num_variables();
3758  variable_lower_bounds_.assign(num_cols, 0.0);
3759  variable_upper_bounds_.assign(num_cols, 0.0);
3760  for (ColIndex col(0); col < num_cols; ++col) {
3761  variable_lower_bounds_[col] = lp->variable_lower_bounds()[col];
3762  variable_upper_bounds_[col] = lp->variable_upper_bounds()[col];
3763  }
3764 
3765  // See the doc of these functions for more details.
3766  // It is important to call Scale() before the other two.
3767  Scale(lp, &scaler_, parameters_.scaling_method());
3768  cost_scaling_factor_ = lp->ScaleObjective(parameters_.cost_scaling());
3769  bound_scaling_factor_ = lp->ScaleBounds();
3770 
3771  return true;
3772 }
3773 
3776  RETURN_IF_NULL(solution);
3777 
3778  scaler_.ScaleRowVector(false, &(solution->primal_values));
3779  for (ColIndex col(0); col < solution->primal_values.size(); ++col) {
3780  solution->primal_values[col] *= bound_scaling_factor_;
3781  }
3782 
3783  scaler_.ScaleColumnVector(false, &(solution->dual_values));
3784  for (RowIndex row(0); row < solution->dual_values.size(); ++row) {
3785  solution->dual_values[row] *= cost_scaling_factor_;
3786  }
3787 
3788  // Make sure the variable are at they exact bounds according to their status.
3789  // This just remove a really low error (about 1e-15) but allows to keep the
3790  // variables at their exact bounds.
3791  const ColIndex num_cols = solution->primal_values.size();
3792  for (ColIndex col(0); col < num_cols; ++col) {
3793  switch (solution->variable_statuses[col]) {
3795  ABSL_FALLTHROUGH_INTENDED;
3797  solution->primal_values[col] = variable_upper_bounds_[col];
3798  break;
3800  solution->primal_values[col] = variable_lower_bounds_[col];
3801  break;
3802  case VariableStatus::FREE:
3803  ABSL_FALLTHROUGH_INTENDED;
3804  case VariableStatus::BASIC:
3805  break;
3806  }
3807  }
3808 }
3809 
3810 // --------------------------------------------------------
3811 // ToMinimizationPreprocessor
3812 // --------------------------------------------------------
3813 
3816  RETURN_VALUE_IF_NULL(lp, false);
3817  if (lp->IsMaximizationProblem()) {
3818  for (ColIndex col(0); col < lp->num_variables(); ++col) {
3819  const Fractional coeff = lp->objective_coefficients()[col];
3820  if (coeff != 0.0) {
3821  lp->SetObjectiveCoefficient(col, -coeff);
3822  }
3823  }
3824  lp->SetMaximizationProblem(false);
3827  }
3828  return false;
3829 }
3830 
3832  ProblemSolution* solution) const {}
3833 
3834 // --------------------------------------------------------
3835 // AddSlackVariablesPreprocessor
3836 // --------------------------------------------------------
3837 
3840  RETURN_VALUE_IF_NULL(lp, false);
3842  /*detect_integer_constraints=*/true);
3843  first_slack_col_ = lp->GetFirstSlackVariable();
3844  return true;
3845 }
3846 
3848  ProblemSolution* solution) const {
3850  RETURN_IF_NULL(solution);
3851 
3852  // Compute constraint statuses from statuses of slack variables.
3853  const RowIndex num_rows = solution->dual_values.size();
3854  for (RowIndex row(0); row < num_rows; ++row) {
3855  const ColIndex slack_col = first_slack_col_ + RowToColIndex(row);
3856  const VariableStatus variable_status =
3857  solution->variable_statuses[slack_col];
3858  ConstraintStatus constraint_status = ConstraintStatus::FREE;
3859  // The slack variables have reversed bounds - if the value of the variable
3860  // is at one bound, the value of the constraint is at the opposite bound.
3861  switch (variable_status) {
3863  constraint_status = ConstraintStatus::AT_UPPER_BOUND;
3864  break;
3866  constraint_status = ConstraintStatus::AT_LOWER_BOUND;
3867  break;
3868  default:
3869  constraint_status = VariableToConstraintStatus(variable_status);
3870  break;
3871  }
3872  solution->constraint_statuses[row] = constraint_status;
3873  }
3874 
3875  // Drop the primal values and variable statuses for slack variables.
3876  solution->primal_values.resize(first_slack_col_, 0.0);
3877  solution->variable_statuses.resize(first_slack_col_, VariableStatus::FREE);
3878 }
3879 
3880 } // namespace glop
3881 } // namespace operations_research
#define CHECK(condition)
Definition: base/logging.h:491
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:105
int64_t bound
void RecoverSolution(ProblemSolution *solution) const final
void Add(const FpNumber &value)
Definition: accurate_sum.h:29
static constexpr SolverBehavior NEVER_DO
Fractional LookUpCoefficient(Index index) const
void RecoverSolution(ProblemSolution *solution) const final
int64_t min
Definition: alldiff_cst.cc:139
ColIndex col
void RecoverSolution(ProblemSolution *solution) const final
void RemoveZeroCostUnconstrainedVariable(ColIndex col, Fractional target_bound, LinearProgram *lp)
const SparseMatrix & GetTransposeSparseMatrix() const
Definition: lp_data.cc:376
void SetObjectiveCoefficient(ColIndex col, Fractional value)
Definition: lp_data.cc:326
bool IsVariableInteger(ColIndex col) const
Definition: lp_data.cc:295
void Swap(LinearProgram *linear_program)
Definition: lp_data.cc:1031
ModelSharedTimeLimit * time_limit
const DenseBooleanRow & GetMarkedColumns() const
Definition: preprocessor.h:193
bool IsSmallerWithinTolerance(FloatType x, FloatType y, FloatType tolerance)
Definition: fp_utils.h:157
EntryIndex num_entries
void AddMultipleToSparseVectorAndDeleteCommonIndex(Fractional multiplier, Index removed_common_index, Fractional drop_tolerance, SparseVector *accumulator_vector) const
void DeleteColumns(const DenseBooleanRow &columns_to_delete)
Definition: lp_data.cc:1065
iterator erase(const_iterator pos)
#define VLOG(verboselevel)
Definition: base/logging.h:979
std::vector< double > lower_bounds
const std::string name
const ColIndex kInvalidCol(-1)
void Scale(LinearProgram *lp, SparseMatrixScaler *scaler)
void swap(IdMap< K, V > &a, IdMap< K, V > &b)
Definition: id_map.h:263
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
#define LOG(severity)
Definition: base/logging.h:416
#define RETURN_VALUE_IF_NULL(x, v)
Definition: return_macros.h:26
void RemoveNearZeroEntriesWithWeights(Fractional threshold, const DenseVector &weights)
void swap(StrongVector &x)
void MultiplyByConstant(Fractional factor)
void RecoverSolution(ProblemSolution *solution) const final
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:309
void SetObjectiveOffset(Fractional objective_offset)
Definition: lp_data.cc:331
SingletonUndo(OperationType type, const LinearProgram &lp, MatrixEntry e, ConstraintStatus status)
bool IsIntegerWithinTolerance(FloatType x, FloatType tolerance)
Definition: fp_utils.h:165
RowIndex row
Definition: markowitz.cc:182
int64_t coefficient
void RecoverSolution(ProblemSolution *solution) const final
void assign(IntType size, const T &v)
Definition: lp_types.h:278
SparseMatrix * GetMutableTransposeSparseMatrix()
Definition: lp_data.cc:386
bool IsFinite(Fractional value)
Definition: lp_types.h:91
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
int64_t b
const DenseRow & objective_coefficients() const
Definition: lp_data.h:223
const DenseColumn & constraint_upper_bounds() const
Definition: lp_data.h:218
void RecoverSolution(ProblemSolution *solution) const final
Fractional SumWithoutLb(Fractional c) const
ReverseView< Container > reversed_view(const Container &c)
int64_t max
Definition: alldiff_cst.cc:140
#define SCOPED_INSTRUCTION_COUNT(time_limit)
Definition: stats.h:439
Fractional objective_scaling_factor() const
Definition: lp_data.h:261
double upper_bound
void resize(size_type new_size)
void RecoverSolution(ProblemSolution *solution) const final
const DenseColumn & constraint_lower_bounds() const
Definition: lp_data.h:215
bool empty() const
static constexpr SolverBehavior LET_SOLVER_DECIDE
iterator insert(const_iterator pos, const value_type &x)
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:887
const SparseMatrix & GetSparseMatrix() const
Definition: lp_data.h:175
Fractional GetObjectiveCoefficientForMinimizationVersion(ColIndex col) const
Definition: lp_data.cc:419
double lower_bound
DenseColumn * mutable_constraint_lower_bounds()
Definition: lp_data.h:550
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
void DestructiveRecoverSolution(ProblemSolution *solution)
BeginEndReverseIteratorWrapper< Container > Reverse(const Container &c)
Definition: iterators.h:98
void RecoverSolution(ProblemSolution *solution) const final
const double kInfinity
Definition: lp_types.h:84
void push_back(const value_type &x)
int index
Definition: pack.cc:509
const SparseColumn & column(ColIndex col) const
Definition: sparse.h:181
::operations_research::glop::GlopParameters_SolverBehavior solve_dual_problem() const
Fractional target_bound
void FixConstraintWithFixedStatuses(const DenseColumn &row_lower_bounds, const DenseColumn &row_upper_bounds, ProblemSolution *solution)
SparseColumn * GetMutableSparseColumn(ColIndex col)
Definition: lp_data.cc:413
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:52
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:890
void RecoverSolution(ProblemSolution *solution) const final
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:698
const RowIndex kInvalidRow(-1)
int64_t delta
Definition: resource.cc:1692
size_type size() const
ConstraintStatusColumn constraint_statuses
Definition: lp_data.h:686
int64_t cost
::operations_research::glop::GlopParameters_CostScalingAlgorithm cost_scaling() const
void ScaleRowVector(bool up, DenseRow *row_vector) const
#define DCHECK(condition)
Definition: base/logging.h:885
const DenseRow & variable_upper_bounds() const
Definition: lp_data.h:232
void MarkColumnForDeletionWithState(ColIndex col, Fractional value, VariableStatus status)
ColIndex representative
ColMapping FindProportionalColumns(const SparseMatrix &matrix, Fractional tolerance)
const DenseRow & variable_lower_bounds() const
Definition: lp_data.h:229
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:886
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:249
void RecoverSolution(ProblemSolution *solution) const final
SparseColumn * mutable_column(ColIndex col)
Definition: sparse.h:182
const SparseColumn & SavedColumn(ColIndex col) const
void RecoverSolution(ProblemSolution *solution) const final
void DeleteRows(const DenseBooleanColumn &rows_to_delete)
Definition: lp_data.cc:1258
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
const DenseBooleanColumn & GetMarkedRows() const
void RecoverSolution(ProblemSolution *solution) const final
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:888
void SetMaximizationProblem(bool maximize)
Definition: lp_data.cc:343
DenseColumn * mutable_constraint_upper_bounds()
Definition: lp_data.h:553
void RestoreDeletedRows(ProblemSolution *solution) const
Collection of objects used to extend the Constraint Solver library.
ProblemStatus ChangeStatusToDualStatus(ProblemStatus status) const
const SparseColumn & SavedOrEmptyColumn(ColIndex col) const
SatParameters parameters
Fractional ScalarProduct(const DenseRowOrColumn1 &u, const DenseRowOrColumn2 &v)
bool IsSmallerWithinPreprocessorZeroTolerance(Fractional a, Fractional b) const
Definition: preprocessor.h:84
std::vector< double > upper_bounds
void SetObjectiveScalingFactor(Fractional objective_scaling_factor)
Definition: lp_data.cc:336
::operations_research::glop::GlopParameters_ScalingAlgorithm scaling_method() const
Fractional ScaleObjective(GlopParameters::CostScalingAlgorithm method)
Definition: lp_data.cc:1188
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const override
#define VLOG_IS_ON(verboselevel)
Definition: vlog_is_on.h:41
Fractional scaled_cost
void Undo(const GlopParameters &parameters, const SparseColumn &saved_column, const SparseColumn &saved_row, ProblemSolution *solution) const
void RecoverSolution(ProblemSolution *solution) const final
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:303
Fractional SumWithoutUb(Fractional c) const
void RecoverSolution(ProblemSolution *solution) const final
bool IsSmallerWithinFeasibilityTolerance(Fractional a, Fractional b) const
Definition: preprocessor.h:80
void RestoreDeletedColumns(ProblemSolution *solution) const
int64_t value
void SaveColumn(ColIndex col, const SparseColumn &column)
Preprocessor(const GlopParameters *parameters)
Definition: preprocessor.cc:48
const GlopParameters & parameters_
Definition: preprocessor.h:92
void SaveColumnIfNotAlreadyDone(ColIndex col, const SparseColumn &column)
const SparseColumn & GetSparseColumn(ColIndex col) const
Definition: lp_data.cc:409
#define RUN_PREPROCESSOR(name)
Definition: preprocessor.cc:60
void PopulateFromDual(const LinearProgram &dual, RowToColMapping *duplicated_rows)
Definition: lp_data.cc:764
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
const int INFO
Definition: log_severity.h:31
void ScaleColumnVector(bool up, DenseColumn *column_vector) const
int64_t a
void AddSlackVariablesWhereNecessary(bool detect_integer_constraints)
Definition: lp_data.cc:697