OR-Tools  9.2
preprocessor.cc
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <cstdint>
17 #include <limits>
18 
19 #include "absl/strings/str_format.h"
23 #include "ortools/glop/status.h"
28 
29 namespace operations_research {
30 namespace glop {
31 
33 
34 namespace {
35 // Returns an interval as an human readable string for debugging.
36 std::string IntervalString(Fractional lb, Fractional ub) {
37  return absl::StrFormat("[%g, %g]", lb, ub);
38 }
39 
40 #if defined(_MSC_VER)
41 double trunc(double d) { return d > 0 ? floor(d) : ceil(d); }
42 #endif
43 } // namespace
44 
45 // --------------------------------------------------------
46 // Preprocessor
47 // --------------------------------------------------------
49  : status_(ProblemStatus::INIT),
50  parameters_(*parameters),
51  in_mip_context_(false),
52  infinite_time_limit_(TimeLimit::Infinite()),
53  time_limit_(infinite_time_limit_.get()) {}
55 
56 // --------------------------------------------------------
57 // MainLpPreprocessor
58 // --------------------------------------------------------
59 
60 #define RUN_PREPROCESSOR(name) \
61  RunAndPushIfRelevant(std::unique_ptr<Preprocessor>(new name(&parameters_)), \
62  #name, time_limit_, lp)
63 
65  RETURN_VALUE_IF_NULL(lp, false);
66 
68  default_logger_.SetLogToStdOut(parameters_.log_to_stdout());
69 
70  SOLVER_LOG(logger_, "");
71  SOLVER_LOG(logger_, "Starting presolve...");
72 
73  initial_num_rows_ = lp->num_constraints();
74  initial_num_cols_ = lp->num_variables();
75  initial_num_entries_ = lp->num_entries();
78 
79  // We run it a few times because running one preprocessor may allow another
80  // one to remove more stuff.
81  const int kMaxNumPasses = 20;
82  for (int i = 0; i < kMaxNumPasses; ++i) {
83  const int old_stack_size = preprocessors_.size();
92 
93  // Abort early if none of the preprocessors did something. Technically
94  // this is true if none of the preprocessors above needs postsolving,
95  // which has exactly the same meaning for these particular preprocessors.
96  if (preprocessors_.size() == old_stack_size) {
97  // We use i here because the last pass did nothing.
98  SOLVER_LOG(logger_, "Reached fixed point after presolve pass #", i);
99  break;
100  }
101  }
104 
105  // TODO(user): Run them in the loop above if the effect on the running time
106  // is good. This needs more investigation.
109 
110  // If DualizerPreprocessor was run, we need to do some extra preprocessing.
111  // This is because it currently adds a lot of zero-cost singleton columns.
112  const int old_stack_size = preprocessors_.size();
113 
114  // TODO(user): We probably want to scale the costs before and after this
115  // preprocessor so that the rhs/objective of the dual are with a good
116  // magnitude.
118  if (old_stack_size != preprocessors_.size()) {
124  }
125 
127  }
128 
129  // The scaling is controlled by use_scaling, not use_preprocessing.
131 
132  return !preprocessors_.empty();
133 }
134 
135 #undef RUN_PREPROCESSOR
136 
137 void MainLpPreprocessor::RunAndPushIfRelevant(
138  std::unique_ptr<Preprocessor> preprocessor, const std::string& name,
140  RETURN_IF_NULL(preprocessor);
142  if (status_ != ProblemStatus::INIT || time_limit->LimitReached()) return;
143 
144  const double start_time = time_limit->GetElapsedTime();
145  preprocessor->SetTimeLimit(time_limit);
146 
147  // No need to run the preprocessor if the lp is empty.
148  // TODO(user): without this test, the code is failing as of 2013-03-18.
149  if (lp->num_variables() == 0 && lp->num_constraints() == 0) {
151  return;
152  }
153 
154  if (preprocessor->Run(lp)) {
155  const EntryIndex new_num_entries = lp->num_entries();
156  const double preprocess_time = time_limit->GetElapsedTime() - start_time;
157  SOLVER_LOG(logger_,
158  absl::StrFormat(
159  "%-45s: %d(%d) rows, %d(%d) columns, %d(%d) entries. (%fs)",
160  name, lp->num_constraints().value(),
161  (lp->num_constraints() - initial_num_rows_).value(),
162  lp->num_variables().value(),
163  (lp->num_variables() - initial_num_cols_).value(),
164  // static_cast<int64_t> is needed because the Android port
165  // uses int32_t.
166  static_cast<int64_t>(new_num_entries.value()),
167  static_cast<int64_t>(new_num_entries.value() -
168  initial_num_entries_.value()),
169  preprocess_time));
170  status_ = preprocessor->status();
171  preprocessors_.push_back(std::move(preprocessor));
172  return;
173  } else {
174  // Even if a preprocessor returns false (i.e. no need for postsolve), it
175  // can detect an issue with the problem.
176  status_ = preprocessor->status();
177  if (status_ != ProblemStatus::INIT) {
178  SOLVER_LOG(logger_, name, " detected that the problem is ",
180  }
181  }
182 }
183 
186  for (const auto& p : gtl::reversed_view(preprocessors_)) {
187  p->RecoverSolution(solution);
188  }
189 }
190 
193  while (!preprocessors_.empty()) {
194  preprocessors_.back()->RecoverSolution(solution);
195  preprocessors_.pop_back();
196  }
197 }
198 
199 // --------------------------------------------------------
200 // ColumnDeletionHelper
201 // --------------------------------------------------------
202 
203 void ColumnsSaver::SaveColumn(ColIndex col, const SparseColumn& column) {
204  const int index = saved_columns_.size();
205  CHECK(saved_columns_index_.insert({col, index}).second);
206  saved_columns_.push_back(column);
207 }
208 
210  const SparseColumn& column) {
211  const int index = saved_columns_.size();
212  const bool inserted = saved_columns_index_.insert({col, index}).second;
213  if (inserted) saved_columns_.push_back(column);
214 }
215 
217  const auto it = saved_columns_index_.find(col);
218  CHECK(it != saved_columns_index_.end());
219  return saved_columns_[it->second];
220 }
221 
223  const auto it = saved_columns_index_.find(col);
224  return it == saved_columns_index_.end() ? empty_column_
225  : saved_columns_[it->second];
226 }
227 
229  is_column_deleted_.clear();
230  stored_value_.clear();
231 }
232 
235 }
236 
238  ColIndex col, Fractional fixed_value, VariableStatus status) {
239  DCHECK_GE(col, 0);
240  if (col >= is_column_deleted_.size()) {
241  is_column_deleted_.resize(col + 1, false);
242  stored_value_.resize(col + 1, 0.0);
243  stored_status_.resize(col + 1, VariableStatus::FREE);
244  }
245  is_column_deleted_[col] = true;
246  stored_value_[col] = fixed_value;
247  stored_status_[col] = status;
248 }
249 
251  ProblemSolution* solution) const {
252  DenseRow new_primal_values;
253  VariableStatusRow new_variable_statuses;
254  ColIndex old_index(0);
255  for (ColIndex col(0); col < is_column_deleted_.size(); ++col) {
256  if (is_column_deleted_[col]) {
257  new_primal_values.push_back(stored_value_[col]);
258  new_variable_statuses.push_back(stored_status_[col]);
259  } else {
260  new_primal_values.push_back(solution->primal_values[old_index]);
261  new_variable_statuses.push_back(solution->variable_statuses[old_index]);
262  ++old_index;
263  }
264  }
265 
266  // Copy the end of the vectors and swap them with the ones in solution.
267  const ColIndex num_cols = solution->primal_values.size();
268  DCHECK_EQ(num_cols, solution->variable_statuses.size());
269  for (; old_index < num_cols; ++old_index) {
270  new_primal_values.push_back(solution->primal_values[old_index]);
271  new_variable_statuses.push_back(solution->variable_statuses[old_index]);
272  }
273  new_primal_values.swap(solution->primal_values);
274  new_variable_statuses.swap(solution->variable_statuses);
275 }
276 
277 // --------------------------------------------------------
278 // RowDeletionHelper
279 // --------------------------------------------------------
280 
281 void RowDeletionHelper::Clear() { is_row_deleted_.clear(); }
282 
284  DCHECK_GE(row, 0);
285  if (row >= is_row_deleted_.size()) {
286  is_row_deleted_.resize(row + 1, false);
287  }
288  is_row_deleted_[row] = true;
289 }
290 
292  if (row >= is_row_deleted_.size()) return;
293  is_row_deleted_[row] = false;
294 }
295 
297  return is_row_deleted_;
298 }
299 
301  DenseColumn new_dual_values;
302  ConstraintStatusColumn new_constraint_statuses;
303  RowIndex old_index(0);
304  const RowIndex end = is_row_deleted_.size();
305  for (RowIndex row(0); row < end; ++row) {
306  if (is_row_deleted_[row]) {
307  new_dual_values.push_back(0.0);
308  new_constraint_statuses.push_back(ConstraintStatus::BASIC);
309  } else {
310  new_dual_values.push_back(solution->dual_values[old_index]);
311  new_constraint_statuses.push_back(
312  solution->constraint_statuses[old_index]);
313  ++old_index;
314  }
315  }
316 
317  // Copy the end of the vectors and swap them with the ones in solution.
318  const RowIndex num_rows = solution->dual_values.size();
319  DCHECK_EQ(num_rows, solution->constraint_statuses.size());
320  for (; old_index < num_rows; ++old_index) {
321  new_dual_values.push_back(solution->dual_values[old_index]);
322  new_constraint_statuses.push_back(solution->constraint_statuses[old_index]);
323  }
324  new_dual_values.swap(solution->dual_values);
325  new_constraint_statuses.swap(solution->constraint_statuses);
326 }
327 
328 // --------------------------------------------------------
329 // EmptyColumnPreprocessor
330 // --------------------------------------------------------
331 
332 namespace {
333 
334 // Computes the status of a variable given its value and bounds. This only works
335 // with a value exactly at one of the bounds, or a value of 0.0 for free
336 // variables.
337 VariableStatus ComputeVariableStatus(Fractional value, Fractional lower_bound,
339  if (lower_bound == upper_bound) {
343  }
344  if (value == lower_bound) {
347  }
348  if (value == upper_bound) {
351  }
352 
353  // TODO(user): restrict this to unbounded variables with a value of zero.
354  // We can't do that when postsolving infeasible problem. Don't call postsolve
355  // on an infeasible problem?
356  return VariableStatus::FREE;
357 }
358 
359 // Returns the input with the smallest magnitude or zero if both are infinite.
360 Fractional MinInMagnitudeOrZeroIfInfinite(Fractional a, Fractional b) {
361  const Fractional value = std::abs(a) < std::abs(b) ? a : b;
362  return IsFinite(value) ? value : 0.0;
363 }
364 
365 Fractional MagnitudeOrZeroIfInfinite(Fractional value) {
366  return IsFinite(value) ? std::abs(value) : 0.0;
367 }
368 
369 // Returns the maximum magnitude of the finite variable bounds of the given
370 // linear program.
371 Fractional ComputeMaxVariableBoundsMagnitude(const LinearProgram& lp) {
372  Fractional max_bounds_magnitude = 0.0;
373  const ColIndex num_cols = lp.num_variables();
374  for (ColIndex col(0); col < num_cols; ++col) {
375  max_bounds_magnitude = std::max(
376  max_bounds_magnitude,
377  std::max(MagnitudeOrZeroIfInfinite(lp.variable_lower_bounds()[col]),
378  MagnitudeOrZeroIfInfinite(lp.variable_upper_bounds()[col])));
379  }
380  return max_bounds_magnitude;
381 }
382 
383 } // namespace
384 
387  RETURN_VALUE_IF_NULL(lp, false);
388  column_deletion_helper_.Clear();
389  const ColIndex num_cols = lp->num_variables();
390  for (ColIndex col(0); col < num_cols; ++col) {
391  if (lp->GetSparseColumn(col).IsEmpty()) {
394  const Fractional objective_coefficient =
397  if (objective_coefficient == 0) {
398  // Any feasible value will do.
399  if (upper_bound != kInfinity) {
400  value = upper_bound;
401  } else {
402  if (lower_bound != -kInfinity) {
403  value = lower_bound;
404  } else {
405  value = Fractional(0.0);
406  }
407  }
408  } else {
409  value = objective_coefficient > 0 ? lower_bound : upper_bound;
410  if (!IsFinite(value)) {
411  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, empty column " << col
412  << " has a minimization cost of " << objective_coefficient
413  << " and bounds"
414  << " [" << lower_bound << "," << upper_bound << "]";
416  return false;
417  }
419  value * lp->objective_coefficients()[col]);
420  }
421  column_deletion_helper_.MarkColumnForDeletionWithState(
422  col, value, ComputeVariableStatus(value, lower_bound, upper_bound));
423  }
424  }
425  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
426  return !column_deletion_helper_.IsEmpty();
427 }
428 
431  RETURN_IF_NULL(solution);
432  column_deletion_helper_.RestoreDeletedColumns(solution);
433 }
434 
435 // --------------------------------------------------------
436 // ProportionalColumnPreprocessor
437 // --------------------------------------------------------
438 
439 namespace {
440 
441 // Subtracts 'multiple' times the column col of the given linear program from
442 // the constraint bounds. That is, for a non-zero entry of coefficient c,
443 // c * multiple is subtracted from both the constraint upper and lower bound.
444 void SubtractColumnMultipleFromConstraintBound(ColIndex col,
445  Fractional multiple,
446  LinearProgram* lp) {
449  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
450  const RowIndex row = e.row();
451  const Fractional delta = multiple * e.coefficient();
452  (*lbs)[row] -= delta;
453  (*ubs)[row] -= delta;
454  }
455  // While not needed for correctness, this allows the presolved problem to
456  // have the same objective value as the original one.
458  lp->objective_coefficients()[col] * multiple);
459 }
460 
461 // Struct used to detect proportional columns with the same cost. For that, a
462 // vector of such struct will be sorted, and only the columns that end up
463 // together need to be compared.
464 struct ColumnWithRepresentativeAndScaledCost {
465  ColumnWithRepresentativeAndScaledCost(ColIndex _col, ColIndex _representative,
466  Fractional _scaled_cost)
467  : col(_col), representative(_representative), scaled_cost(_scaled_cost) {}
468  ColIndex col;
469  ColIndex representative;
471 
472  bool operator<(const ColumnWithRepresentativeAndScaledCost& other) const {
473  if (representative == other.representative) {
474  if (scaled_cost == other.scaled_cost) {
475  return col < other.col;
476  }
477  return scaled_cost < other.scaled_cost;
478  }
479  return representative < other.representative;
480  }
481 };
482 
483 } // namespace
484 
487  RETURN_VALUE_IF_NULL(lp, false);
490 
491  // Compute some statistics and make each class representative point to itself
492  // in the mapping. Also store the columns that are proportional to at least
493  // another column in proportional_columns to iterate on them more efficiently.
494  //
495  // TODO(user): Change FindProportionalColumns for this?
496  int num_proportionality_classes = 0;
497  std::vector<ColIndex> proportional_columns;
498  for (ColIndex col(0); col < mapping.size(); ++col) {
499  const ColIndex representative = mapping[col];
500  if (representative != kInvalidCol) {
501  if (mapping[representative] == kInvalidCol) {
502  proportional_columns.push_back(representative);
503  ++num_proportionality_classes;
504  mapping[representative] = representative;
505  }
506  proportional_columns.push_back(col);
507  }
508  }
509  if (proportional_columns.empty()) return false;
510  VLOG(1) << "The problem contains " << proportional_columns.size()
511  << " columns which belong to " << num_proportionality_classes
512  << " proportionality classes.";
513 
514  // Note(user): using the first coefficient may not give the best precision.
515  const ColIndex num_cols = lp->num_variables();
516  column_factors_.assign(num_cols, 0.0);
517  for (const ColIndex col : proportional_columns) {
518  const SparseColumn& column = lp->GetSparseColumn(col);
519  column_factors_[col] = column.GetFirstCoefficient();
520  }
521 
522  // This is only meaningful for column representative.
523  //
524  // The reduced cost of a column is 'cost - dual_values.column' and we know
525  // that for all proportional columns, 'dual_values.column /
526  // column_factors_[col]' is the same. Here, we bound this quantity which is
527  // related to the cost 'slope' of a proportional column:
528  // cost / column_factors_[col].
529  DenseRow slope_lower_bound(num_cols, -kInfinity);
530  DenseRow slope_upper_bound(num_cols, +kInfinity);
531  for (const ColIndex col : proportional_columns) {
532  const ColIndex representative = mapping[col];
533 
534  // We reason in terms of a minimization problem here.
535  const bool is_rc_positive_or_zero =
536  (lp->variable_upper_bounds()[col] == kInfinity);
537  const bool is_rc_negative_or_zero =
538  (lp->variable_lower_bounds()[col] == -kInfinity);
539  bool is_slope_upper_bounded = is_rc_positive_or_zero;
540  bool is_slope_lower_bounded = is_rc_negative_or_zero;
541  if (column_factors_[col] < 0.0) {
542  std::swap(is_slope_lower_bounded, is_slope_upper_bounded);
543  }
544  const Fractional slope =
546  column_factors_[col];
547  if (is_slope_lower_bounded) {
548  slope_lower_bound[representative] =
549  std::max(slope_lower_bound[representative], slope);
550  }
551  if (is_slope_upper_bounded) {
552  slope_upper_bound[representative] =
553  std::min(slope_upper_bound[representative], slope);
554  }
555  }
556 
557  // Deal with empty slope intervals.
558  for (const ColIndex col : proportional_columns) {
559  const ColIndex representative = mapping[col];
560 
561  // This is only needed for class representative columns.
562  if (representative == col) {
564  slope_lower_bound[representative],
565  slope_upper_bound[representative])) {
566  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, no feasible dual values"
567  << " can satisfy the constraints of the proportional columns"
568  << " with representative " << representative << "."
569  << " the associated quantity must be in ["
570  << slope_lower_bound[representative] << ","
571  << slope_upper_bound[representative] << "].";
573  return false;
574  }
575  }
576  }
577 
578  // Now, fix the columns that can be fixed to one of their bounds.
579  for (const ColIndex col : proportional_columns) {
580  const ColIndex representative = mapping[col];
581  const Fractional slope =
583  column_factors_[col];
584 
585  // The scaled reduced cost is slope - quantity.
586  bool variable_can_be_fixed = false;
587  Fractional target_bound = 0.0;
588 
591  if (!IsSmallerWithinFeasibilityTolerance(slope_lower_bound[representative],
592  slope)) {
593  // The scaled reduced cost is < 0.
594  variable_can_be_fixed = true;
595  target_bound = (column_factors_[col] >= 0.0) ? upper_bound : lower_bound;
597  slope, slope_upper_bound[representative])) {
598  // The scaled reduced cost is > 0.
599  variable_can_be_fixed = true;
600  target_bound = (column_factors_[col] >= 0.0) ? lower_bound : upper_bound;
601  }
602 
603  if (variable_can_be_fixed) {
604  // Clear mapping[col] so this column will not be considered for the next
605  // stage.
606  mapping[col] = kInvalidCol;
607  if (!IsFinite(target_bound)) {
608  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED.";
610  return false;
611  } else {
612  SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
613  column_deletion_helper_.MarkColumnForDeletionWithState(
614  col, target_bound,
615  ComputeVariableStatus(target_bound, lower_bound, upper_bound));
616  }
617  }
618  }
619 
620  // Merge the variables with the same scaled cost.
621  std::vector<ColumnWithRepresentativeAndScaledCost> sorted_columns;
622  for (const ColIndex col : proportional_columns) {
623  const ColIndex representative = mapping[col];
624 
625  // This test is needed because we already removed some columns.
626  if (mapping[col] != kInvalidCol) {
627  sorted_columns.push_back(ColumnWithRepresentativeAndScaledCost(
629  lp->objective_coefficients()[col] / column_factors_[col]));
630  }
631  }
632  std::sort(sorted_columns.begin(), sorted_columns.end());
633 
634  // All this will be needed during postsolve.
635  merged_columns_.assign(num_cols, kInvalidCol);
636  lower_bounds_.assign(num_cols, -kInfinity);
637  upper_bounds_.assign(num_cols, kInfinity);
638  new_lower_bounds_.assign(num_cols, -kInfinity);
639  new_upper_bounds_.assign(num_cols, kInfinity);
640 
641  for (int i = 0; i < sorted_columns.size();) {
642  const ColIndex target_col = sorted_columns[i].col;
643  const ColIndex target_representative = sorted_columns[i].representative;
644  const Fractional target_scaled_cost = sorted_columns[i].scaled_cost;
645 
646  // Save the initial bounds before modifying them.
647  lower_bounds_[target_col] = lp->variable_lower_bounds()[target_col];
648  upper_bounds_[target_col] = lp->variable_upper_bounds()[target_col];
649 
650  int num_merged = 0;
651  for (++i; i < sorted_columns.size(); ++i) {
652  if (sorted_columns[i].representative != target_representative) break;
653  if (std::abs(sorted_columns[i].scaled_cost - target_scaled_cost) >=
655  break;
656  }
657  ++num_merged;
658  const ColIndex col = sorted_columns[i].col;
661  lower_bounds_[col] = lower_bound;
662  upper_bounds_[col] = upper_bound;
663  merged_columns_[col] = target_col;
664 
665  // This is a bit counter intuitive, but when a column is divided by x,
666  // the corresponding bounds have to be multiplied by x.
667  const Fractional bound_factor =
668  column_factors_[col] / column_factors_[target_col];
669 
670  // We need to shift the variable so that a basic solution of the new
671  // problem can easily be converted to a basic solution of the original
672  // problem.
673 
674  // A feasible value for the variable must be chosen, and the variable must
675  // be shifted by this value. This is done to make sure that it will be
676  // possible to recreate a basic solution of the original problem from a
677  // basic solution of the pre-solved problem during post-solve.
678  const Fractional target_value =
679  MinInMagnitudeOrZeroIfInfinite(lower_bound, upper_bound);
680  Fractional lower_diff = (lower_bound - target_value) * bound_factor;
681  Fractional upper_diff = (upper_bound - target_value) * bound_factor;
682  if (bound_factor < 0.0) {
683  std::swap(lower_diff, upper_diff);
684  }
685  lp->SetVariableBounds(
686  target_col, lp->variable_lower_bounds()[target_col] + lower_diff,
687  lp->variable_upper_bounds()[target_col] + upper_diff);
688  SubtractColumnMultipleFromConstraintBound(col, target_value, lp);
689  column_deletion_helper_.MarkColumnForDeletionWithState(
690  col, target_value,
691  ComputeVariableStatus(target_value, lower_bound, upper_bound));
692  }
693 
694  // If at least one column was merged, the target column must be shifted like
695  // the other columns in the same equivalence class for the same reason (see
696  // above).
697  if (num_merged > 0) {
698  merged_columns_[target_col] = target_col;
699  const Fractional target_value = MinInMagnitudeOrZeroIfInfinite(
700  lower_bounds_[target_col], upper_bounds_[target_col]);
701  lp->SetVariableBounds(
702  target_col, lp->variable_lower_bounds()[target_col] - target_value,
703  lp->variable_upper_bounds()[target_col] - target_value);
704  SubtractColumnMultipleFromConstraintBound(target_col, target_value, lp);
705  new_lower_bounds_[target_col] = lp->variable_lower_bounds()[target_col];
706  new_upper_bounds_[target_col] = lp->variable_upper_bounds()[target_col];
707  }
708  }
709 
710  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
711  return !column_deletion_helper_.IsEmpty();
712 }
713 
715  ProblemSolution* solution) const {
717  RETURN_IF_NULL(solution);
718  column_deletion_helper_.RestoreDeletedColumns(solution);
719 
720  // The rest of this function is to unmerge the columns so that the solution be
721  // primal-feasible.
722  const ColIndex num_cols = merged_columns_.size();
723  DenseBooleanRow is_representative_basic(num_cols, false);
724  DenseBooleanRow is_distance_to_upper_bound(num_cols, false);
725  DenseRow distance_to_bound(num_cols, 0.0);
726  DenseRow wanted_value(num_cols, 0.0);
727 
728  // The first pass is a loop over the representatives to compute the current
729  // distance to the new bounds.
730  for (ColIndex col(0); col < num_cols; ++col) {
731  if (merged_columns_[col] == col) {
732  const Fractional value = solution->primal_values[col];
733  const Fractional distance_to_upper_bound = new_upper_bounds_[col] - value;
734  const Fractional distance_to_lower_bound = value - new_lower_bounds_[col];
735  if (distance_to_upper_bound < distance_to_lower_bound) {
736  distance_to_bound[col] = distance_to_upper_bound;
737  is_distance_to_upper_bound[col] = true;
738  } else {
739  distance_to_bound[col] = distance_to_lower_bound;
740  is_distance_to_upper_bound[col] = false;
741  }
742  is_representative_basic[col] =
744 
745  // Restore the representative value to a feasible value of the initial
746  // variable. Now all the merged variable are at a feasible value.
747  wanted_value[col] = value;
748  solution->primal_values[col] = MinInMagnitudeOrZeroIfInfinite(
749  lower_bounds_[col], upper_bounds_[col]);
750  solution->variable_statuses[col] = ComputeVariableStatus(
751  solution->primal_values[col], lower_bounds_[col], upper_bounds_[col]);
752  }
753  }
754 
755  // Second pass to correct the values.
756  for (ColIndex col(0); col < num_cols; ++col) {
757  const ColIndex representative = merged_columns_[col];
758  if (representative != kInvalidCol) {
759  if (IsFinite(distance_to_bound[representative])) {
760  // If the distance is finite, then each variable is set to its
761  // corresponding bound (the one from which the distance is computed) and
762  // is then changed by as much as possible until the distance is zero.
763  const Fractional bound_factor =
764  column_factors_[col] / column_factors_[representative];
765  const Fractional scaled_distance =
766  distance_to_bound[representative] / std::abs(bound_factor);
767  const Fractional width = upper_bounds_[col] - lower_bounds_[col];
768  const bool to_upper_bound =
769  (bound_factor > 0.0) == is_distance_to_upper_bound[representative];
770  if (width <= scaled_distance) {
771  solution->primal_values[col] =
772  to_upper_bound ? lower_bounds_[col] : upper_bounds_[col];
773  solution->variable_statuses[col] =
774  ComputeVariableStatus(solution->primal_values[col],
775  lower_bounds_[col], upper_bounds_[col]);
776  distance_to_bound[representative] -= width * std::abs(bound_factor);
777  } else {
778  solution->primal_values[col] =
779  to_upper_bound ? upper_bounds_[col] - scaled_distance
780  : lower_bounds_[col] + scaled_distance;
781  solution->variable_statuses[col] =
782  is_representative_basic[representative]
784  : ComputeVariableStatus(solution->primal_values[col],
785  lower_bounds_[col],
786  upper_bounds_[col]);
787  distance_to_bound[representative] = 0.0;
788  is_representative_basic[representative] = false;
789  }
790  } else {
791  // If the distance is not finite, then only one variable needs to be
792  // changed from its current feasible value in order to have a
793  // primal-feasible solution.
794  const Fractional error = wanted_value[representative];
795  if (error == 0.0) {
796  if (is_representative_basic[representative]) {
798  is_representative_basic[representative] = false;
799  }
800  } else {
801  const Fractional bound_factor =
802  column_factors_[col] / column_factors_[representative];
803  const bool use_this_variable =
804  (error * bound_factor > 0.0) ? (upper_bounds_[col] == kInfinity)
805  : (lower_bounds_[col] == -kInfinity);
806  if (use_this_variable) {
807  wanted_value[representative] = 0.0;
808  solution->primal_values[col] += error / bound_factor;
809  if (is_representative_basic[representative]) {
811  is_representative_basic[representative] = false;
812  } else {
813  // This should not happen on an OPTIMAL or FEASIBLE solution.
814  DCHECK(solution->status != ProblemStatus::OPTIMAL &&
817  }
818  }
819  }
820  }
821  }
822  }
823 }
824 
825 // --------------------------------------------------------
826 // ProportionalRowPreprocessor
827 // --------------------------------------------------------
828 
831  RETURN_VALUE_IF_NULL(lp, false);
832  const RowIndex num_rows = lp->num_constraints();
833  const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
834 
835  // Use the first coefficient of each row to compute the proportionality
836  // factor. Note that the sign is important.
837  //
838  // Note(user): using the first coefficient may not give the best precision.
839  row_factors_.assign(num_rows, 0.0);
840  for (RowIndex row(0); row < num_rows; ++row) {
841  const SparseColumn& row_transpose = transpose.column(RowToColIndex(row));
842  if (!row_transpose.IsEmpty()) {
843  row_factors_[row] = row_transpose.GetFirstCoefficient();
844  }
845  }
846 
847  // The new row bounds (only meaningful for the proportional rows).
848  DenseColumn lower_bounds(num_rows, -kInfinity);
849  DenseColumn upper_bounds(num_rows, +kInfinity);
850 
851  // Where the new bounds are coming from. Only for the constraints that stay
852  // in the lp and are modified, kInvalidRow otherwise.
853  upper_bound_sources_.assign(num_rows, kInvalidRow);
854  lower_bound_sources_.assign(num_rows, kInvalidRow);
855 
856  // Initialization.
857  // We need the first representative of each proportional row class to point to
858  // itself for the loop below. TODO(user): Already return such a mapping from
859  // FindProportionalColumns()?
862  DenseBooleanColumn is_a_representative(num_rows, false);
863  int num_proportional_rows = 0;
864  for (RowIndex row(0); row < num_rows; ++row) {
865  const ColIndex representative_row_as_col = mapping[RowToColIndex(row)];
866  if (representative_row_as_col != kInvalidCol) {
867  mapping[representative_row_as_col] = representative_row_as_col;
868  is_a_representative[ColToRowIndex(representative_row_as_col)] = true;
869  ++num_proportional_rows;
870  }
871  }
872 
873  // Compute the bound of each representative as implied by the rows
874  // which are proportional to it. Also keep the source row of each bound.
875  for (RowIndex row(0); row < num_rows; ++row) {
876  const ColIndex row_as_col = RowToColIndex(row);
877  if (mapping[row_as_col] != kInvalidCol) {
878  // For now, delete all the rows that are proportional to another one.
879  // Note that we will unmark the final representative of this class later.
880  row_deletion_helper_.MarkRowForDeletion(row);
881  const RowIndex representative_row = ColToRowIndex(mapping[row_as_col]);
882 
883  const Fractional factor =
884  row_factors_[representative_row] / row_factors_[row];
885  Fractional implied_lb = factor * lp->constraint_lower_bounds()[row];
886  Fractional implied_ub = factor * lp->constraint_upper_bounds()[row];
887  if (factor < 0.0) {
888  std::swap(implied_lb, implied_ub);
889  }
890 
891  // TODO(user): if the bounds are equal, use the largest row in magnitude?
892  if (implied_lb >= lower_bounds[representative_row]) {
893  lower_bounds[representative_row] = implied_lb;
894  lower_bound_sources_[representative_row] = row;
895  }
896  if (implied_ub <= upper_bounds[representative_row]) {
897  upper_bounds[representative_row] = implied_ub;
898  upper_bound_sources_[representative_row] = row;
899  }
900  }
901  }
902 
903  // For maximum precision, and also to simplify the postsolve, we choose
904  // a representative for each class of proportional columns that has at least
905  // one of the two tightest bounds.
906  for (RowIndex row(0); row < num_rows; ++row) {
907  if (!is_a_representative[row]) continue;
908  const RowIndex lower_source = lower_bound_sources_[row];
909  const RowIndex upper_source = upper_bound_sources_[row];
910  lower_bound_sources_[row] = kInvalidRow;
911  upper_bound_sources_[row] = kInvalidRow;
912  DCHECK_NE(lower_source, kInvalidRow);
913  DCHECK_NE(upper_source, kInvalidRow);
914  if (lower_source == upper_source) {
915  // In this case, a simple change of representative is enough.
916  // The constraint bounds of the representative will not change.
917  DCHECK_NE(lower_source, kInvalidRow);
918  row_deletion_helper_.UnmarkRow(lower_source);
919  } else {
920  // Report ProblemStatus::PRIMAL_INFEASIBLE if the new lower bound is not
921  // lower than the new upper bound modulo the default tolerance.
923  upper_bounds[row])) {
925  return false;
926  }
927 
928  // Special case for fixed rows.
929  if (lp->constraint_lower_bounds()[lower_source] ==
930  lp->constraint_upper_bounds()[lower_source]) {
931  row_deletion_helper_.UnmarkRow(lower_source);
932  continue;
933  }
934  if (lp->constraint_lower_bounds()[upper_source] ==
935  lp->constraint_upper_bounds()[upper_source]) {
936  row_deletion_helper_.UnmarkRow(upper_source);
937  continue;
938  }
939 
940  // This is the only case where a more complex postsolve is needed.
941  // To maximize precision, the class representative is changed to either
942  // upper_source or lower_source depending of which row has the largest
943  // proportionality factor.
944  RowIndex new_representative = lower_source;
945  RowIndex other = upper_source;
946  if (std::abs(row_factors_[new_representative]) <
947  std::abs(row_factors_[other])) {
948  std::swap(new_representative, other);
949  }
950 
951  // Initialize the new bounds with the implied ones.
952  const Fractional factor =
953  row_factors_[new_representative] / row_factors_[other];
954  Fractional new_lb = factor * lp->constraint_lower_bounds()[other];
955  Fractional new_ub = factor * lp->constraint_upper_bounds()[other];
956  if (factor < 0.0) {
957  std::swap(new_lb, new_ub);
958  }
959 
960  lower_bound_sources_[new_representative] = new_representative;
961  upper_bound_sources_[new_representative] = new_representative;
962 
963  if (new_lb > lp->constraint_lower_bounds()[new_representative]) {
964  lower_bound_sources_[new_representative] = other;
965  } else {
966  new_lb = lp->constraint_lower_bounds()[new_representative];
967  }
968  if (new_ub < lp->constraint_upper_bounds()[new_representative]) {
969  upper_bound_sources_[new_representative] = other;
970  } else {
971  new_ub = lp->constraint_upper_bounds()[new_representative];
972  }
973  const RowIndex new_lower_source =
974  lower_bound_sources_[new_representative];
975  if (new_lower_source == upper_bound_sources_[new_representative]) {
976  row_deletion_helper_.UnmarkRow(new_lower_source);
977  lower_bound_sources_[new_representative] = kInvalidRow;
978  upper_bound_sources_[new_representative] = kInvalidRow;
979  continue;
980  }
981 
982  // Take care of small numerical imprecision by making sure that lb <= ub.
983  // Note that if the imprecision was greater than the tolerance, the code
984  // at the beginning of this block would have reported
985  // ProblemStatus::PRIMAL_INFEASIBLE.
987  if (new_lb > new_ub) {
988  if (lower_bound_sources_[new_representative] == new_representative) {
989  new_ub = lp->constraint_lower_bounds()[new_representative];
990  } else {
991  new_lb = lp->constraint_upper_bounds()[new_representative];
992  }
993  }
994  row_deletion_helper_.UnmarkRow(new_representative);
995  lp->SetConstraintBounds(new_representative, new_lb, new_ub);
996  }
997  }
998 
999  lp_is_maximization_problem_ = lp->IsMaximizationProblem();
1000  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1001  return !row_deletion_helper_.IsEmpty();
1002 }
1003 
1005  ProblemSolution* solution) const {
1007  RETURN_IF_NULL(solution);
1008  row_deletion_helper_.RestoreDeletedRows(solution);
1009 
1010  // Make sure that all non-zero dual values on the proportional rows are
1011  // assigned to the correct row with the correct sign and that the statuses
1012  // are correct.
1013  const RowIndex num_rows = solution->dual_values.size();
1014  for (RowIndex row(0); row < num_rows; ++row) {
1015  const RowIndex lower_source = lower_bound_sources_[row];
1016  const RowIndex upper_source = upper_bound_sources_[row];
1017  if (lower_source == kInvalidRow && upper_source == kInvalidRow) continue;
1018  DCHECK_NE(lower_source, upper_source);
1019  DCHECK(lower_source == row || upper_source == row);
1020 
1021  // If the representative is ConstraintStatus::BASIC, then all rows in this
1022  // class will be ConstraintStatus::BASIC and there is nothing to do.
1024  if (status == ConstraintStatus::BASIC) continue;
1025 
1026  // If the row is FIXED it will behave as a row
1027  // ConstraintStatus::AT_UPPER_BOUND or
1028  // ConstraintStatus::AT_LOWER_BOUND depending on the corresponding dual
1029  // variable sign.
1031  const Fractional corrected_dual_value = lp_is_maximization_problem_
1032  ? -solution->dual_values[row]
1033  : solution->dual_values[row];
1034  if (corrected_dual_value != 0.0) {
1035  status = corrected_dual_value > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1037  }
1038  }
1039 
1040  // If one of the two conditions below are true, set the row status to
1041  // ConstraintStatus::BASIC.
1042  // Note that the source which is not row can't be FIXED (see presolve).
1043  if (lower_source != row && status == ConstraintStatus::AT_LOWER_BOUND) {
1044  DCHECK_EQ(0.0, solution->dual_values[lower_source]);
1045  const Fractional factor = row_factors_[row] / row_factors_[lower_source];
1046  solution->dual_values[lower_source] = factor * solution->dual_values[row];
1047  solution->dual_values[row] = 0.0;
1049  solution->constraint_statuses[lower_source] =
1050  factor > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1052  }
1053  if (upper_source != row && status == ConstraintStatus::AT_UPPER_BOUND) {
1054  DCHECK_EQ(0.0, solution->dual_values[upper_source]);
1055  const Fractional factor = row_factors_[row] / row_factors_[upper_source];
1056  solution->dual_values[upper_source] = factor * solution->dual_values[row];
1057  solution->dual_values[row] = 0.0;
1059  solution->constraint_statuses[upper_source] =
1060  factor > 0.0 ? ConstraintStatus::AT_UPPER_BOUND
1062  }
1063 
1064  // If the row status is still ConstraintStatus::FIXED_VALUE, we need to
1065  // relax its status.
1067  solution->constraint_statuses[row] =
1068  lower_source != row ? ConstraintStatus::AT_UPPER_BOUND
1070  }
1071  }
1072 }
1073 
1074 // --------------------------------------------------------
1075 // FixedVariablePreprocessor
1076 // --------------------------------------------------------
1077 
1080  RETURN_VALUE_IF_NULL(lp, false);
1081  const ColIndex num_cols = lp->num_variables();
1082  for (ColIndex col(0); col < num_cols; ++col) {
1085  if (lower_bound == upper_bound) {
1086  const Fractional fixed_value = lower_bound;
1087  DCHECK(IsFinite(fixed_value));
1088 
1089  // We need to change the constraint bounds.
1090  SubtractColumnMultipleFromConstraintBound(col, fixed_value, lp);
1091  column_deletion_helper_.MarkColumnForDeletionWithState(
1092  col, fixed_value, VariableStatus::FIXED_VALUE);
1093  }
1094  }
1095 
1096  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
1097  return !column_deletion_helper_.IsEmpty();
1098 }
1099 
1101  ProblemSolution* solution) const {
1103  RETURN_IF_NULL(solution);
1104  column_deletion_helper_.RestoreDeletedColumns(solution);
1105 }
1106 
1107 // --------------------------------------------------------
1108 // ForcingAndImpliedFreeConstraintPreprocessor
1109 // --------------------------------------------------------
1110 
1113  RETURN_VALUE_IF_NULL(lp, false);
1114  const RowIndex num_rows = lp->num_constraints();
1115 
1116  // Compute the implied constraint bounds from the variable bounds.
1117  DenseColumn implied_lower_bounds(num_rows, 0);
1118  DenseColumn implied_upper_bounds(num_rows, 0);
1119  const ColIndex num_cols = lp->num_variables();
1120  StrictITIVector<RowIndex, int> row_degree(num_rows, 0);
1121  for (ColIndex col(0); col < num_cols; ++col) {
1122  const Fractional lower = lp->variable_lower_bounds()[col];
1123  const Fractional upper = lp->variable_upper_bounds()[col];
1124  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1125  const RowIndex row = e.row();
1126  const Fractional coeff = e.coefficient();
1127  if (coeff > 0.0) {
1128  implied_lower_bounds[row] += lower * coeff;
1129  implied_upper_bounds[row] += upper * coeff;
1130  } else {
1131  implied_lower_bounds[row] += upper * coeff;
1132  implied_upper_bounds[row] += lower * coeff;
1133  }
1134  ++row_degree[row];
1135  }
1136  }
1137 
1138  // Note that the ScalingPreprocessor is currently executed last, so here the
1139  // problem has not been scaled yet.
1140  int num_implied_free_constraints = 0;
1141  int num_forcing_constraints = 0;
1142  is_forcing_up_.assign(num_rows, false);
1143  DenseBooleanColumn is_forcing_down(num_rows, false);
1144  for (RowIndex row(0); row < num_rows; ++row) {
1145  if (row_degree[row] == 0) continue;
1146  Fractional lower = lp->constraint_lower_bounds()[row];
1147  Fractional upper = lp->constraint_upper_bounds()[row];
1148 
1149  // Check for infeasibility.
1151  implied_upper_bounds[row]) ||
1152  !IsSmallerWithinFeasibilityTolerance(implied_lower_bounds[row],
1153  upper)) {
1154  VLOG(1) << "implied bound " << implied_lower_bounds[row] << " "
1155  << implied_upper_bounds[row];
1156  VLOG(1) << "constraint bound " << lower << " " << upper;
1158  return false;
1159  }
1160 
1161  // Check if the constraint is forcing. That is, all the variables that
1162  // appear in it must be at one of their bounds.
1163  if (IsSmallerWithinPreprocessorZeroTolerance(implied_upper_bounds[row],
1164  lower)) {
1165  is_forcing_down[row] = true;
1166  ++num_forcing_constraints;
1167  continue;
1168  }
1170  implied_lower_bounds[row])) {
1171  is_forcing_up_[row] = true;
1172  ++num_forcing_constraints;
1173  continue;
1174  }
1175 
1176  // We relax the constraint bounds only if the constraint is implied to be
1177  // free. Such constraints will later be deleted by the
1178  // FreeConstraintPreprocessor.
1179  //
1180  // Note that we could relax only one of the two bounds, but the impact this
1181  // would have on the revised simplex algorithm is unclear at this point.
1183  implied_lower_bounds[row]) &&
1184  IsSmallerWithinPreprocessorZeroTolerance(implied_upper_bounds[row],
1185  upper)) {
1187  ++num_implied_free_constraints;
1188  }
1189  }
1190 
1191  if (num_implied_free_constraints > 0) {
1192  VLOG(1) << num_implied_free_constraints << " implied free constraints.";
1193  }
1194 
1195  if (num_forcing_constraints > 0) {
1196  VLOG(1) << num_forcing_constraints << " forcing constraints.";
1197  lp_is_maximization_problem_ = lp->IsMaximizationProblem();
1198  costs_.resize(num_cols, 0.0);
1199  for (ColIndex col(0); col < num_cols; ++col) {
1200  const SparseColumn& column = lp->GetSparseColumn(col);
1201  const Fractional lower = lp->variable_lower_bounds()[col];
1202  const Fractional upper = lp->variable_upper_bounds()[col];
1203  bool is_forced = false;
1204  Fractional target_bound = 0.0;
1205  for (const SparseColumn::Entry e : column) {
1206  if (is_forcing_down[e.row()]) {
1207  const Fractional candidate = e.coefficient() < 0.0 ? lower : upper;
1208  if (is_forced && candidate != target_bound) {
1209  // The bounds are really close, so we fix to the bound with
1210  // the lowest magnitude. As of 2019/11/19, this is "better" than
1211  // fixing to the mid-point, because at postsolve, we always put
1212  // non-basic variables to their exact bounds (so, with mid-point
1213  // there would be a difference of epsilon/2 between the inner
1214  // solution and the postsolved one, which might cause issues).
1215  if (IsSmallerWithinPreprocessorZeroTolerance(upper, lower)) {
1216  target_bound = std::abs(lower) < std::abs(upper) ? lower : upper;
1217  continue;
1218  }
1219  VLOG(1) << "A variable is forced in both directions! bounds: ["
1220  << std::fixed << std::setprecision(10) << lower << ", "
1221  << upper << "]. coeff:" << e.coefficient();
1223  return false;
1224  }
1225  target_bound = candidate;
1226  is_forced = true;
1227  }
1228  if (is_forcing_up_[e.row()]) {
1229  const Fractional candidate = e.coefficient() < 0.0 ? upper : lower;
1230  if (is_forced && candidate != target_bound) {
1231  // The bounds are really close, so we fix to the bound with
1232  // the lowest magnitude.
1233  if (IsSmallerWithinPreprocessorZeroTolerance(upper, lower)) {
1234  target_bound = std::abs(lower) < std::abs(upper) ? lower : upper;
1235  continue;
1236  }
1237  VLOG(1) << "A variable is forced in both directions! bounds: ["
1238  << std::fixed << std::setprecision(10) << lower << ", "
1239  << upper << "]. coeff:" << e.coefficient();
1241  return false;
1242  }
1243  target_bound = candidate;
1244  is_forced = true;
1245  }
1246  }
1247  if (is_forced) {
1248  // Fix the variable, update the constraint bounds and save this column
1249  // and its cost for the postsolve.
1250  SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
1251  column_deletion_helper_.MarkColumnForDeletionWithState(
1252  col, target_bound,
1253  ComputeVariableStatus(target_bound, lower, upper));
1254  columns_saver_.SaveColumn(col, column);
1255  costs_[col] = lp->objective_coefficients()[col];
1256  }
1257  }
1258  for (RowIndex row(0); row < num_rows; ++row) {
1259  // In theory, an M exists such that for any magnitude >= M, we will be at
1260  // an optimal solution. However, because of numerical errors, if the value
1261  // is too large, it causes problem when verifying the solution. So we
1262  // select the smallest such M (at least a resonably small one) during
1263  // postsolve. It is the reason why we need to store the columns that were
1264  // fixed.
1265  if (is_forcing_down[row] || is_forcing_up_[row]) {
1266  row_deletion_helper_.MarkRowForDeletion(row);
1267  }
1268  }
1269  }
1270 
1271  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
1272  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1273  return !column_deletion_helper_.IsEmpty();
1274 }
1275 
1277  ProblemSolution* solution) const {
1279  RETURN_IF_NULL(solution);
1280  column_deletion_helper_.RestoreDeletedColumns(solution);
1281  row_deletion_helper_.RestoreDeletedRows(solution);
1282 
1283  struct DeletionEntry {
1284  RowIndex row;
1285  ColIndex col;
1287  };
1288  std::vector<DeletionEntry> entries;
1289 
1290  // Compute for each deleted columns the last deleted row in which it appears.
1291  const ColIndex size = column_deletion_helper_.GetMarkedColumns().size();
1292  for (ColIndex col(0); col < size; ++col) {
1293  if (!column_deletion_helper_.IsColumnMarked(col)) continue;
1294 
1295  RowIndex last_row = kInvalidRow;
1296  Fractional last_coefficient;
1297  for (const SparseColumn::Entry e : columns_saver_.SavedColumn(col)) {
1298  const RowIndex row = e.row();
1299  if (row_deletion_helper_.IsRowMarked(row)) {
1300  last_row = row;
1301  last_coefficient = e.coefficient();
1302  }
1303  }
1304  if (last_row != kInvalidRow) {
1305  entries.push_back({last_row, col, last_coefficient});
1306  }
1307  }
1308 
1309  // Sort by row first and then col.
1310  std::sort(entries.begin(), entries.end(),
1311  [](const DeletionEntry& a, const DeletionEntry& b) {
1312  if (a.row == b.row) return a.col < b.col;
1313  return a.row < b.row;
1314  });
1315 
1316  // For each deleted row (in order), compute a bound on the dual values so
1317  // that all the deleted columns for which this row is the last deleted row are
1318  // dual-feasible. Note that for the other columns, it will always be possible
1319  // to make them dual-feasible with a later row.
1320  // There are two possible outcomes:
1321  // - The dual value stays 0.0, and nothing changes.
1322  // - The bounds enforce a non-zero dual value, and one column will have a
1323  // reduced cost of 0.0. This column becomes VariableStatus::BASIC, and the
1324  // constraint status is changed to ConstraintStatus::AT_LOWER_BOUND,
1325  // ConstraintStatus::AT_UPPER_BOUND or ConstraintStatus::FIXED_VALUE.
1326  for (int i = 0; i < entries.size();) {
1327  const RowIndex row = entries[i].row;
1328  DCHECK(row_deletion_helper_.IsRowMarked(row));
1329 
1330  // Process column with this last deleted row.
1331  Fractional new_dual_value = 0.0;
1332  ColIndex new_basic_column = kInvalidCol;
1333  for (; i < entries.size(); ++i) {
1334  if (entries[i].row != row) break;
1335  const ColIndex col = entries[i].col;
1336 
1337  const Fractional scalar_product =
1338  ScalarProduct(solution->dual_values, columns_saver_.SavedColumn(col));
1339  const Fractional reduced_cost = costs_[col] - scalar_product;
1340  const Fractional bound = reduced_cost / entries[i].coefficient;
1341  if (is_forcing_up_[row] == !lp_is_maximization_problem_) {
1342  if (bound < new_dual_value) {
1343  new_dual_value = bound;
1344  new_basic_column = col;
1345  }
1346  } else {
1347  if (bound > new_dual_value) {
1348  new_dual_value = bound;
1349  new_basic_column = col;
1350  }
1351  }
1352  }
1353  if (new_basic_column != kInvalidCol) {
1354  solution->dual_values[row] = new_dual_value;
1355  solution->variable_statuses[new_basic_column] = VariableStatus::BASIC;
1356  solution->constraint_statuses[row] =
1357  is_forcing_up_[row] ? ConstraintStatus::AT_UPPER_BOUND
1359  }
1360  }
1361 }
1362 
1363 // --------------------------------------------------------
1364 // ImpliedFreePreprocessor
1365 // --------------------------------------------------------
1366 
1367 namespace {
1368 struct ColWithDegree {
1369  ColIndex col;
1370  EntryIndex num_entries;
1371  ColWithDegree(ColIndex c, EntryIndex n) : col(c), num_entries(n) {}
1372  bool operator<(const ColWithDegree& other) const {
1373  if (num_entries == other.num_entries) {
1374  return col < other.col;
1375  }
1376  return num_entries < other.num_entries;
1377  }
1378 };
1379 } // namespace
1380 
1383  RETURN_VALUE_IF_NULL(lp, false);
1384  if (!parameters_.use_implied_free_preprocessor()) return false;
1385  const RowIndex num_rows = lp->num_constraints();
1386  const ColIndex num_cols = lp->num_variables();
1387 
1388  // For each constraint with n entries and each of its variable, we want the
1389  // bounds implied by the (n - 1) other variables and the constraint. We
1390  // use two handy utility classes that allow us to do that efficiently while
1391  // dealing properly with infinite bounds.
1392  const int size = num_rows.value();
1393  // TODO(user) : Replace SumWithNegativeInfiniteAndOneMissing and
1394  // SumWithPositiveInfiniteAndOneMissing with IntervalSumWithOneMissing.
1396  size);
1398  size);
1399 
1400  // Initialize the sums by adding all the bounds of the variables.
1401  for (ColIndex col(0); col < num_cols; ++col) {
1404  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1405  Fractional entry_lb = e.coefficient() * lower_bound;
1406  Fractional entry_ub = e.coefficient() * upper_bound;
1407  if (e.coefficient() < 0.0) std::swap(entry_lb, entry_ub);
1408  lb_sums[e.row()].Add(entry_lb);
1409  ub_sums[e.row()].Add(entry_ub);
1410  }
1411  }
1412 
1413  // The inequality
1414  // constraint_lb <= sum(entries) <= constraint_ub
1415  // can be rewritten as:
1416  // sum(entries) + (-activity) = 0,
1417  // where (-activity) has bounds [-constraint_ub, -constraint_lb].
1418  // We use this latter convention to simplify our code.
1419  for (RowIndex row(0); row < num_rows; ++row) {
1420  lb_sums[row].Add(-lp->constraint_upper_bounds()[row]);
1421  ub_sums[row].Add(-lp->constraint_lower_bounds()[row]);
1422  }
1423 
1424  // Once a variable is freed, none of the rows in which it appears can be
1425  // used to make another variable free.
1426  DenseBooleanColumn used_rows(num_rows, false);
1427  postsolve_status_of_free_variables_.assign(num_cols, VariableStatus::FREE);
1428  variable_offsets_.assign(num_cols, 0.0);
1429 
1430  // It is better to process columns with a small degree first:
1431  // - Degree-two columns make it possible to remove a row from the problem.
1432  // - This way there is more chance to make more free columns.
1433  // - It is better to have low degree free columns since a free column will
1434  // always end up in the simplex basis (except if there is more than the
1435  // number of rows in the problem).
1436  //
1437  // TODO(user): Only process degree-two so in subsequent passes more degree-two
1438  // columns could be made free. And only when no other reduction can be
1439  // applied, process the higher degree column?
1440  //
1441  // TODO(user): Be smarter about the order that maximizes the number of free
1442  // column. For instance if we have 3 doubleton columns that use the rows (1,2)
1443  // (2,3) and (3,4) then it is better not to make (2,3) free so the two other
1444  // two can be made free.
1445  std::vector<ColWithDegree> col_by_degree;
1446  for (ColIndex col(0); col < num_cols; ++col) {
1447  col_by_degree.push_back(
1448  ColWithDegree(col, lp->GetSparseColumn(col).num_entries()));
1449  }
1450  std::sort(col_by_degree.begin(), col_by_degree.end());
1451 
1452  // Now loop over the columns in order and make all implied-free columns free.
1453  int num_already_free_variables = 0;
1454  int num_implied_free_variables = 0;
1455  int num_fixed_variables = 0;
1456  for (ColWithDegree col_with_degree : col_by_degree) {
1457  const ColIndex col = col_with_degree.col;
1458 
1459  // If the variable is already free or fixed, we do nothing.
1463  ++num_already_free_variables;
1464  continue;
1465  }
1466  if (lower_bound == upper_bound) continue;
1467 
1468  // Detect if the variable is implied free.
1469  Fractional overall_implied_lb = -kInfinity;
1470  Fractional overall_implied_ub = kInfinity;
1471  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1472  // If the row contains another implied free variable, then the bounds
1473  // implied by it will just be [-kInfinity, kInfinity] so we can skip it.
1474  if (used_rows[e.row()]) continue;
1475 
1476  // This is the contribution of this column to the sum above.
1477  const Fractional coeff = e.coefficient();
1478  Fractional entry_lb = coeff * lower_bound;
1479  Fractional entry_ub = coeff * upper_bound;
1480  if (coeff < 0.0) std::swap(entry_lb, entry_ub);
1481 
1482  // If X is the variable with index col and Y the sum of all the other
1483  // variables and of (-activity), then coeff * X + Y = 0. Since Y's bounds
1484  // are [lb_sum without X, ub_sum without X], it is easy to derive the
1485  // implied bounds on X.
1486  //
1487  // Important: If entry_lb (resp. entry_ub) are large, we cannot have a
1488  // good precision on the sum without. So we do add a defensive tolerance
1489  // that depends on these magnitude.
1490  const Fractional implied_lb =
1491  coeff > 0.0 ? -ub_sums[e.row()].SumWithoutUb(entry_ub) / coeff
1492  : -lb_sums[e.row()].SumWithoutLb(entry_lb) / coeff;
1493  const Fractional implied_ub =
1494  coeff > 0.0 ? -lb_sums[e.row()].SumWithoutLb(entry_lb) / coeff
1495  : -ub_sums[e.row()].SumWithoutUb(entry_ub) / coeff;
1496 
1497  overall_implied_lb = std::max(overall_implied_lb, implied_lb);
1498  overall_implied_ub = std::min(overall_implied_ub, implied_ub);
1499  }
1500 
1501  // Detect infeasible cases.
1502  if (!IsSmallerWithinFeasibilityTolerance(overall_implied_lb, upper_bound) ||
1503  !IsSmallerWithinFeasibilityTolerance(lower_bound, overall_implied_ub) ||
1504  !IsSmallerWithinFeasibilityTolerance(overall_implied_lb,
1505  overall_implied_ub)) {
1507  return false;
1508  }
1509 
1510  // Detect fixed variable cases (there are two kinds).
1511  // Note that currently we don't do anything here except counting them.
1513  overall_implied_lb) ||
1514  IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1515  lower_bound)) {
1516  // This case is already dealt with by the
1517  // ForcingAndImpliedFreeConstraintPreprocessor since it means that (at
1518  // least) one of the row is forcing.
1519  ++num_fixed_variables;
1520  continue;
1521  } else if (IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1522  overall_implied_lb)) {
1523  // TODO(user): As of July 2013, with our preprocessors this case is never
1524  // triggered on the Netlib. Note however that if it appears it can have a
1525  // big impact since by fixing the variable, the two involved constraints
1526  // are forcing and can be removed too (with all the variables they touch).
1527  // The postsolve step is quite involved though.
1528  ++num_fixed_variables;
1529  continue;
1530  }
1531 
1532  // Is the variable implied free? Note that for an infinite lower_bound or
1533  // upper_bound the respective inequality is always true.
1535  overall_implied_lb) &&
1536  IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1537  upper_bound)) {
1538  ++num_implied_free_variables;
1540  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1541  used_rows[e.row()] = true;
1542  }
1543 
1544  // This is a tricky part. We're freeing this variable, which means that
1545  // after solve, the modified variable will have status either
1546  // VariableStatus::FREE or VariableStatus::BASIC. In the former case
1547  // (VariableStatus::FREE, value = 0.0), we need to "fix" the
1548  // status (technically, our variable isn't free!) to either
1549  // VariableStatus::AT_LOWER_BOUND or VariableStatus::AT_UPPER_BOUND
1550  // (note that we skipped fixed variables), and "fix" the value to that
1551  // bound's value as well. We make the decision and the precomputation
1552  // here: we simply offset the variable by one of its bounds, and store
1553  // which bound that was. Note that if the modified variable turns out to
1554  // be VariableStatus::BASIC, we'll simply un-offset its value too;
1555  // and let the status be VariableStatus::BASIC.
1556  //
1557  // TODO(user): This trick is already used in the DualizerPreprocessor,
1558  // maybe we should just have a preprocessor that shifts all the variables
1559  // bounds to have at least one of them at 0.0, will that improve precision
1560  // and speed of the simplex? One advantage is that we can compute the
1561  // new constraint bounds with better precision using AccurateSum.
1563  const Fractional offset =
1564  MinInMagnitudeOrZeroIfInfinite(lower_bound, upper_bound);
1565  if (offset != 0.0) {
1566  variable_offsets_[col] = offset;
1567  SubtractColumnMultipleFromConstraintBound(col, offset, lp);
1568  }
1569  postsolve_status_of_free_variables_[col] =
1570  ComputeVariableStatus(offset, lower_bound, upper_bound);
1571  }
1572  }
1573  VLOG(1) << num_already_free_variables << " free variables in the problem.";
1574  VLOG(1) << num_implied_free_variables << " implied free columns.";
1575  VLOG(1) << num_fixed_variables << " variables can be fixed.";
1576 
1577  return num_implied_free_variables > 0;
1578 }
1579 
1582  RETURN_IF_NULL(solution);
1583  const ColIndex num_cols = solution->variable_statuses.size();
1584  for (ColIndex col(0); col < num_cols; ++col) {
1585  // Skip variables that the preprocessor didn't change.
1586  if (postsolve_status_of_free_variables_[col] == VariableStatus::FREE) {
1587  DCHECK_EQ(0.0, variable_offsets_[col]);
1588  continue;
1589  }
1590  if (solution->variable_statuses[col] == VariableStatus::FREE) {
1591  solution->variable_statuses[col] =
1592  postsolve_status_of_free_variables_[col];
1593  } else {
1595  }
1596  solution->primal_values[col] += variable_offsets_[col];
1597  }
1598 }
1599 
1600 // --------------------------------------------------------
1601 // DoubletonFreeColumnPreprocessor
1602 // --------------------------------------------------------
1603 
1606  RETURN_VALUE_IF_NULL(lp, false);
1607  // We will modify the matrix transpose and then push the change to the linear
1608  // program by calling lp->UseTransposeMatrixAsReference(). Note
1609  // that original_matrix will not change during this preprocessor run.
1610  const SparseMatrix& original_matrix = lp->GetSparseMatrix();
1611  SparseMatrix* transpose = lp->GetMutableTransposeSparseMatrix();
1612 
1613  const ColIndex num_cols(lp->num_variables());
1614  for (ColIndex doubleton_col(0); doubleton_col < num_cols; ++doubleton_col) {
1615  // Only consider doubleton free columns.
1616  if (original_matrix.column(doubleton_col).num_entries() != 2) continue;
1617  if (lp->variable_lower_bounds()[doubleton_col] != -kInfinity) continue;
1618  if (lp->variable_upper_bounds()[doubleton_col] != kInfinity) continue;
1619 
1620  // Collect the two column items. Note that we skip a column involving a
1621  // deleted row since it is no longer a doubleton then.
1622  RestoreInfo r;
1623  r.col = doubleton_col;
1624  r.objective_coefficient = lp->objective_coefficients()[r.col];
1625  int index = 0;
1626  for (const SparseColumn::Entry e : original_matrix.column(r.col)) {
1627  if (row_deletion_helper_.IsRowMarked(e.row())) break;
1628  r.row[index] = e.row();
1629  r.coeff[index] = e.coefficient();
1630  DCHECK_NE(0.0, e.coefficient());
1631  ++index;
1632  }
1633  if (index != NUM_ROWS) continue;
1634 
1635  // Since the column didn't touch any previously deleted row, we are sure
1636  // that the coefficients were left untouched.
1637  DCHECK_EQ(r.coeff[DELETED], transpose->column(RowToColIndex(r.row[DELETED]))
1638  .LookUpCoefficient(ColToRowIndex(r.col)));
1639  DCHECK_EQ(r.coeff[MODIFIED],
1640  transpose->column(RowToColIndex(r.row[MODIFIED]))
1641  .LookUpCoefficient(ColToRowIndex(r.col)));
1642 
1643  // We prefer deleting the row with the larger coefficient magnitude because
1644  // we will divide by this magnitude. TODO(user): Impact?
1645  if (std::abs(r.coeff[DELETED]) < std::abs(r.coeff[MODIFIED])) {
1646  std::swap(r.coeff[DELETED], r.coeff[MODIFIED]);
1647  std::swap(r.row[DELETED], r.row[MODIFIED]);
1648  }
1649 
1650  // Save the deleted row for postsolve. Note that we remove it from the
1651  // transpose at the same time. This last operation is not strictly needed,
1652  // but it is faster to do it this way (both here and later when we will take
1653  // the transpose of the final transpose matrix).
1654  r.deleted_row_as_column.Swap(
1655  transpose->mutable_column(RowToColIndex(r.row[DELETED])));
1656 
1657  // Move the bound of the deleted constraint to the initially free variable.
1658  {
1659  Fractional new_variable_lb =
1660  lp->constraint_lower_bounds()[r.row[DELETED]];
1661  Fractional new_variable_ub =
1662  lp->constraint_upper_bounds()[r.row[DELETED]];
1663  new_variable_lb /= r.coeff[DELETED];
1664  new_variable_ub /= r.coeff[DELETED];
1665  if (r.coeff[DELETED] < 0.0) std::swap(new_variable_lb, new_variable_ub);
1666  lp->SetVariableBounds(r.col, new_variable_lb, new_variable_ub);
1667  }
1668 
1669  // Add a multiple of the deleted row to the modified row except on
1670  // column r.col where the coefficient will be left unchanged.
1671  r.deleted_row_as_column.AddMultipleToSparseVectorAndIgnoreCommonIndex(
1672  -r.coeff[MODIFIED] / r.coeff[DELETED], ColToRowIndex(r.col),
1674  transpose->mutable_column(RowToColIndex(r.row[MODIFIED])));
1675 
1676  // We also need to correct the objective value of the variables involved in
1677  // the deleted row.
1678  if (r.objective_coefficient != 0.0) {
1679  for (const SparseColumn::Entry e : r.deleted_row_as_column) {
1680  const ColIndex col = RowToColIndex(e.row());
1681  if (col == r.col) continue;
1682  const Fractional new_objective =
1683  lp->objective_coefficients()[col] -
1684  e.coefficient() * r.objective_coefficient / r.coeff[DELETED];
1685 
1686  // This detects if the objective should actually be zero, but because of
1687  // the numerical error in the formula above, we have a really low
1688  // objective instead. The logic is the same as in
1689  // AddMultipleToSparseVectorAndIgnoreCommonIndex().
1690  if (std::abs(new_objective) > parameters_.drop_tolerance()) {
1691  lp->SetObjectiveCoefficient(col, new_objective);
1692  } else {
1693  lp->SetObjectiveCoefficient(col, 0.0);
1694  }
1695  }
1696  }
1697  row_deletion_helper_.MarkRowForDeletion(r.row[DELETED]);
1698  restore_stack_.push_back(r);
1699  }
1700 
1701  if (!row_deletion_helper_.IsEmpty()) {
1702  // The order is important.
1704  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1705  return true;
1706  }
1707  return false;
1708 }
1709 
1711  ProblemSolution* solution) const {
1713  row_deletion_helper_.RestoreDeletedRows(solution);
1714  for (const RestoreInfo& r : Reverse(restore_stack_)) {
1715  // Correct the constraint status.
1716  switch (solution->variable_statuses[r.col]) {
1718  solution->constraint_statuses[r.row[DELETED]] =
1720  break;
1722  solution->constraint_statuses[r.row[DELETED]] =
1723  r.coeff[DELETED] > 0.0 ? ConstraintStatus::AT_UPPER_BOUND
1725  break;
1727  solution->constraint_statuses[r.row[DELETED]] =
1728  r.coeff[DELETED] > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1730  break;
1731  case VariableStatus::FREE:
1732  solution->constraint_statuses[r.row[DELETED]] = ConstraintStatus::FREE;
1733  break;
1734  case VariableStatus::BASIC:
1735  // The default is good here:
1736  DCHECK_EQ(solution->constraint_statuses[r.row[DELETED]],
1738  break;
1739  }
1740 
1741  // Correct the primal variable value.
1742  {
1743  Fractional new_variable_value = solution->primal_values[r.col];
1744  for (const SparseColumn::Entry e : r.deleted_row_as_column) {
1745  const ColIndex col = RowToColIndex(e.row());
1746  if (col == r.col) continue;
1747  new_variable_value -= (e.coefficient() / r.coeff[DELETED]) *
1748  solution->primal_values[RowToColIndex(e.row())];
1749  }
1750  solution->primal_values[r.col] = new_variable_value;
1751  }
1752 
1753  // In all cases, we will make the variable r.col VariableStatus::BASIC, so
1754  // we need to adjust the dual value of the deleted row so that the variable
1755  // reduced cost is zero. Note that there is nothing to do if the variable
1756  // was already basic.
1757  if (solution->variable_statuses[r.col] != VariableStatus::BASIC) {
1758  solution->variable_statuses[r.col] = VariableStatus::BASIC;
1759  Fractional current_reduced_cost =
1760  r.objective_coefficient -
1761  r.coeff[MODIFIED] * solution->dual_values[r.row[MODIFIED]];
1762  // We want current_reduced_cost - dual * coeff = 0, so:
1763  solution->dual_values[r.row[DELETED]] =
1764  current_reduced_cost / r.coeff[DELETED];
1765  } else {
1766  DCHECK_EQ(solution->dual_values[r.row[DELETED]], 0.0);
1767  }
1768  }
1769 }
1770 
1771 // --------------------------------------------------------
1772 // UnconstrainedVariablePreprocessor
1773 // --------------------------------------------------------
1774 
1775 namespace {
1776 
1777 // Does the constraint block the variable to go to infinity in the given
1778 // direction? direction is either positive or negative and row is the index of
1779 // the constraint.
1780 bool IsConstraintBlockingVariable(const LinearProgram& lp, Fractional direction,
1781  RowIndex row) {
1782  return direction > 0.0 ? lp.constraint_upper_bounds()[row] != kInfinity
1784 }
1785 
1786 } // namespace
1787 
1789  ColIndex col, Fractional target_bound, LinearProgram* lp) {
1790  DCHECK_EQ(0.0, lp->objective_coefficients()[col]);
1791  if (rhs_.empty()) {
1792  rhs_.resize(lp->num_constraints(), 0.0);
1793  activity_sign_correction_.resize(lp->num_constraints(), 1.0);
1794  is_unbounded_.resize(lp->num_variables(), false);
1795  }
1796  const bool is_unbounded_up = (target_bound == kInfinity);
1797  const SparseColumn& column = lp->GetSparseColumn(col);
1798  for (const SparseColumn::Entry e : column) {
1799  const RowIndex row = e.row();
1800  if (!row_deletion_helper_.IsRowMarked(row)) {
1801  row_deletion_helper_.MarkRowForDeletion(row);
1802  rows_saver_.SaveColumn(
1803  RowToColIndex(row),
1805  }
1806  const bool is_constraint_upper_bound_relevant =
1807  e.coefficient() > 0.0 ? !is_unbounded_up : is_unbounded_up;
1808  activity_sign_correction_[row] =
1809  is_constraint_upper_bound_relevant ? 1.0 : -1.0;
1810  rhs_[row] = is_constraint_upper_bound_relevant
1811  ? lp->constraint_upper_bounds()[row]
1812  : lp->constraint_lower_bounds()[row];
1813  DCHECK(IsFinite(rhs_[row]));
1814 
1815  // TODO(user): Here, we may render the row free, so subsequent columns
1816  // processed by the columns loop in Run() have more chance to be removed.
1817  // However, we need to be more careful during the postsolve() if we do that.
1818  }
1819  is_unbounded_[col] = true;
1820  Fractional initial_feasible_value = MinInMagnitudeOrZeroIfInfinite(
1822  column_deletion_helper_.MarkColumnForDeletionWithState(
1823  col, initial_feasible_value,
1824  ComputeVariableStatus(initial_feasible_value,
1825  lp->variable_lower_bounds()[col],
1826  lp->variable_upper_bounds()[col]));
1827 }
1828 
1831  RETURN_VALUE_IF_NULL(lp, false);
1832 
1833  // To simplify the problem if something is almost zero, we use the low
1834  // tolerance (1e-9 by default) to be defensive. But to detect an infeasibility
1835  // we want to be sure (especially since the problem is not scaled in the
1836  // presolver) so we use an higher tolerance.
1837  //
1838  // TODO(user): Expose it as a parameter. We could rename both to
1839  // preprocessor_low_tolerance and preprocessor_high_tolerance.
1840  const Fractional low_tolerance = parameters_.preprocessor_zero_tolerance();
1841  const Fractional high_tolerance = 1e-4;
1842 
1843  // We start by the dual variable bounds from the constraints.
1844  const RowIndex num_rows = lp->num_constraints();
1845  dual_lb_.assign(num_rows, -kInfinity);
1846  dual_ub_.assign(num_rows, kInfinity);
1847  for (RowIndex row(0); row < num_rows; ++row) {
1848  if (lp->constraint_lower_bounds()[row] == -kInfinity) {
1849  dual_ub_[row] = 0.0;
1850  }
1851  if (lp->constraint_upper_bounds()[row] == kInfinity) {
1852  dual_lb_[row] = 0.0;
1853  }
1854  }
1855 
1856  const ColIndex num_cols = lp->num_variables();
1857  may_have_participated_lb_.assign(num_cols, false);
1858  may_have_participated_ub_.assign(num_cols, false);
1859 
1860  // We maintain a queue of columns to process.
1861  std::deque<ColIndex> columns_to_process;
1862  DenseBooleanRow in_columns_to_process(num_cols, true);
1863  std::vector<RowIndex> changed_rows;
1864  for (ColIndex col(0); col < num_cols; ++col) {
1865  columns_to_process.push_back(col);
1866  }
1867 
1868  // Arbitrary limit to avoid corner cases with long loops.
1869  // TODO(user): expose this as a parameter? IMO it isn't really needed as we
1870  // shouldn't reach this limit except in corner cases.
1871  const int limit = 5 * num_cols.value();
1872  for (int count = 0; !columns_to_process.empty() && count < limit; ++count) {
1873  const ColIndex col = columns_to_process.front();
1874  columns_to_process.pop_front();
1875  in_columns_to_process[col] = false;
1876  if (column_deletion_helper_.IsColumnMarked(col)) continue;
1877 
1878  const SparseColumn& column = lp->GetSparseColumn(col);
1879  const Fractional col_cost =
1881  const Fractional col_lb = lp->variable_lower_bounds()[col];
1882  const Fractional col_ub = lp->variable_upper_bounds()[col];
1883 
1884  // Compute the bounds on the reduced costs of this column.
1887  rc_lb.Add(col_cost);
1888  rc_ub.Add(col_cost);
1889  for (const SparseColumn::Entry e : column) {
1890  if (row_deletion_helper_.IsRowMarked(e.row())) continue;
1891  const Fractional coeff = e.coefficient();
1892  if (coeff > 0.0) {
1893  rc_lb.Add(-coeff * dual_ub_[e.row()]);
1894  rc_ub.Add(-coeff * dual_lb_[e.row()]);
1895  } else {
1896  rc_lb.Add(-coeff * dual_lb_[e.row()]);
1897  rc_ub.Add(-coeff * dual_ub_[e.row()]);
1898  }
1899  }
1900 
1901  // If the reduced cost domain do not contain zero (modulo the tolerance), we
1902  // can move the variable to its corresponding bound. Note that we need to be
1903  // careful that this variable didn't participate in creating the used
1904  // reduced cost bound in the first place.
1905  bool can_be_removed = false;
1907  bool rc_is_away_from_zero;
1908  if (rc_ub.Sum() <= low_tolerance) {
1909  can_be_removed = true;
1910  target_bound = col_ub;
1911  if (in_mip_context_ && lp->IsVariableInteger(col)) {
1912  target_bound = std::floor(target_bound + high_tolerance);
1913  }
1914 
1915  rc_is_away_from_zero = rc_ub.Sum() <= -high_tolerance;
1916  can_be_removed = !may_have_participated_ub_[col];
1917  }
1918  if (rc_lb.Sum() >= -low_tolerance) {
1919  // The second condition is here for the case we can choose one of the two
1920  // directions.
1921  if (!can_be_removed || !IsFinite(target_bound)) {
1922  can_be_removed = true;
1923  target_bound = col_lb;
1924  if (in_mip_context_ && lp->IsVariableInteger(col)) {
1925  target_bound = std::ceil(target_bound - high_tolerance);
1926  }
1927 
1928  rc_is_away_from_zero = rc_lb.Sum() >= high_tolerance;
1929  can_be_removed = !may_have_participated_lb_[col];
1930  }
1931  }
1932 
1933  if (can_be_removed) {
1934  if (IsFinite(target_bound)) {
1935  // Note that in MIP context, this assumes that the bounds of an integer
1936  // variable are integer.
1937  column_deletion_helper_.MarkColumnForDeletionWithState(
1938  col, target_bound,
1939  ComputeVariableStatus(target_bound, col_lb, col_ub));
1940  continue;
1941  }
1942 
1943  // If the target bound is infinite and the reduced cost bound is non-zero,
1944  // then the problem is ProblemStatus::INFEASIBLE_OR_UNBOUNDED.
1945  if (rc_is_away_from_zero) {
1946  VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, variable " << col
1947  << " can move to " << target_bound
1948  << " and its reduced cost is in [" << rc_lb.Sum() << ", "
1949  << rc_ub.Sum() << "]";
1951  return false;
1952  } else {
1953  // We can remove this column and all its constraints! We just need to
1954  // choose proper variable values during the call to RecoverSolution()
1955  // that make all the constraints satisfiable. Unfortunately, this is not
1956  // so easy to do in the general case, so we only deal with a simpler
1957  // case when the cost of the variable is zero, and none of the
1958  // constraints (even the deleted one) block the variable moving to its
1959  // infinite target_bound.
1960  //
1961  // TODO(user): deal with the more generic case.
1962  if (col_cost != 0.0) continue;
1963 
1964  const double sign_correction = (target_bound == kInfinity) ? 1.0 : -1.0;
1965  bool skip = false;
1966  for (const SparseColumn::Entry e : column) {
1967  // Note that it is important to check the rows that are already
1968  // deleted here, otherwise the post-solve will not work.
1969  if (IsConstraintBlockingVariable(
1970  *lp, sign_correction * e.coefficient(), e.row())) {
1971  skip = true;
1972  break;
1973  }
1974  }
1975  if (skip) continue;
1976 
1977  // TODO(user): this also works if the variable is integer, but we must
1978  // choose an integer value during the post-solve. Implement this.
1979  if (in_mip_context_) continue;
1981  continue;
1982  }
1983  }
1984 
1985  // The rest of the code will update the dual bounds. There is no need to do
1986  // it if the column was removed or if it is not unconstrained in some
1987  // direction.
1988  DCHECK(!can_be_removed);
1989  if (col_lb != -kInfinity && col_ub != kInfinity) continue;
1990 
1991  // For MIP, we only exploit the constraints. TODO(user): It should probably
1992  // work with only small modification, investigate.
1993  if (in_mip_context_) continue;
1994 
1995  changed_rows.clear();
1996  for (const SparseColumn::Entry e : column) {
1997  if (row_deletion_helper_.IsRowMarked(e.row())) continue;
1998  const Fractional c = e.coefficient();
1999  const RowIndex row = e.row();
2000  if (col_ub == kInfinity) {
2001  if (c > 0.0) {
2002  const Fractional candidate =
2003  rc_ub.SumWithoutUb(-c * dual_lb_[row]) / c;
2004  if (candidate < dual_ub_[row]) {
2005  dual_ub_[row] = candidate;
2006  may_have_participated_lb_[col] = true;
2007  changed_rows.push_back(row);
2008  }
2009  } else {
2010  const Fractional candidate =
2011  rc_ub.SumWithoutUb(-c * dual_ub_[row]) / c;
2012  if (candidate > dual_lb_[row]) {
2013  dual_lb_[row] = candidate;
2014  may_have_participated_lb_[col] = true;
2015  changed_rows.push_back(row);
2016  }
2017  }
2018  }
2019  if (col_lb == -kInfinity) {
2020  if (c > 0.0) {
2021  const Fractional candidate =
2022  rc_lb.SumWithoutLb(-c * dual_ub_[row]) / c;
2023  if (candidate > dual_lb_[row]) {
2024  dual_lb_[row] = candidate;
2025  may_have_participated_ub_[col] = true;
2026  changed_rows.push_back(row);
2027  }
2028  } else {
2029  const Fractional candidate =
2030  rc_lb.SumWithoutLb(-c * dual_lb_[row]) / c;
2031  if (candidate < dual_ub_[row]) {
2032  dual_ub_[row] = candidate;
2033  may_have_participated_ub_[col] = true;
2034  changed_rows.push_back(row);
2035  }
2036  }
2037  }
2038  }
2039 
2040  if (!changed_rows.empty()) {
2041  const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
2042  for (const RowIndex row : changed_rows) {
2043  for (const SparseColumn::Entry entry :
2044  transpose.column(RowToColIndex(row))) {
2045  const ColIndex col = RowToColIndex(entry.row());
2046  if (!in_columns_to_process[col]) {
2047  columns_to_process.push_back(col);
2048  in_columns_to_process[col] = true;
2049  }
2050  }
2051  }
2052  }
2053  }
2054 
2055  // Change the rhs to reflect the fixed variables. Note that is important to do
2056  // that after all the calls to RemoveZeroCostUnconstrainedVariable() because
2057  // RemoveZeroCostUnconstrainedVariable() needs to store the rhs before this
2058  // modification!
2059  const ColIndex end = column_deletion_helper_.GetMarkedColumns().size();
2060  for (ColIndex col(0); col < end; ++col) {
2061  if (column_deletion_helper_.IsColumnMarked(col)) {
2062  const Fractional target_bound =
2063  column_deletion_helper_.GetStoredValue()[col];
2064  SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
2065  }
2066  }
2067 
2068  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
2069  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2070  return !column_deletion_helper_.IsEmpty() || !row_deletion_helper_.IsEmpty();
2071 }
2072 
2074  ProblemSolution* solution) const {
2076  RETURN_IF_NULL(solution);
2077  column_deletion_helper_.RestoreDeletedColumns(solution);
2078  row_deletion_helper_.RestoreDeletedRows(solution);
2079 
2080  struct DeletionEntry {
2081  RowIndex row;
2082  ColIndex col;
2084  };
2085  std::vector<DeletionEntry> entries;
2086 
2087  // Compute the last deleted column index for each deleted rows.
2088  const RowIndex num_rows = solution->dual_values.size();
2089  RowToColMapping last_deleted_column(num_rows, kInvalidCol);
2090  for (RowIndex row(0); row < num_rows; ++row) {
2091  if (!row_deletion_helper_.IsRowMarked(row)) continue;
2092 
2093  ColIndex last_col = kInvalidCol;
2094  Fractional last_coefficient;
2095  for (const SparseColumn::Entry e :
2096  rows_saver_.SavedColumn(RowToColIndex(row))) {
2097  const ColIndex col = RowToColIndex(e.row());
2098  if (is_unbounded_[col]) {
2099  last_col = col;
2100  last_coefficient = e.coefficient();
2101  }
2102  }
2103  if (last_col != kInvalidCol) {
2104  entries.push_back({row, last_col, last_coefficient});
2105  }
2106  }
2107 
2108  // Sort by col first and then row.
2109  std::sort(entries.begin(), entries.end(),
2110  [](const DeletionEntry& a, const DeletionEntry& b) {
2111  if (a.col == b.col) return a.row < b.row;
2112  return a.col < b.col;
2113  });
2114 
2115  // Note that this will be empty if there were no deleted rows.
2116  for (int i = 0; i < entries.size();) {
2117  const ColIndex col = entries[i].col;
2118  CHECK(is_unbounded_[col]);
2119 
2120  Fractional primal_value_shift = 0.0;
2121  RowIndex row_at_bound = kInvalidRow;
2122  for (; i < entries.size(); ++i) {
2123  if (entries[i].col != col) break;
2124  const RowIndex row = entries[i].row;
2125 
2126  // This is for VariableStatus::FREE rows.
2127  //
2128  // TODO(user): In presence of free row, we must move them to 0.
2129  // Note that currently VariableStatus::FREE rows should be removed before
2130  // this is called.
2131  DCHECK(IsFinite(rhs_[row]));
2132  if (!IsFinite(rhs_[row])) continue;
2133 
2134  const SparseColumn& row_as_column =
2135  rows_saver_.SavedColumn(RowToColIndex(row));
2136  const Fractional activity =
2137  rhs_[row] - ScalarProduct(solution->primal_values, row_as_column);
2138 
2139  // activity and sign correction must have the same sign or be zero. If
2140  // not, we find the first unbounded variable and change it accordingly.
2141  // Note that by construction, the variable value will move towards its
2142  // unbounded direction.
2143  if (activity * activity_sign_correction_[row] < 0.0) {
2144  const Fractional bound = activity / entries[i].coefficient;
2145  if (std::abs(bound) > std::abs(primal_value_shift)) {
2146  primal_value_shift = bound;
2147  row_at_bound = row;
2148  }
2149  }
2150  }
2151  solution->primal_values[col] += primal_value_shift;
2152  if (row_at_bound != kInvalidRow) {
2154  solution->constraint_statuses[row_at_bound] =
2155  activity_sign_correction_[row_at_bound] == 1.0
2158  }
2159  }
2160 }
2161 
2162 // --------------------------------------------------------
2163 // FreeConstraintPreprocessor
2164 // --------------------------------------------------------
2165 
2168  RETURN_VALUE_IF_NULL(lp, false);
2169  const RowIndex num_rows = lp->num_constraints();
2170  for (RowIndex row(0); row < num_rows; ++row) {
2173  if (lower_bound == -kInfinity && upper_bound == kInfinity) {
2174  row_deletion_helper_.MarkRowForDeletion(row);
2175  }
2176  }
2177  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2178  return !row_deletion_helper_.IsEmpty();
2179 }
2180 
2182  ProblemSolution* solution) const {
2184  RETURN_IF_NULL(solution);
2185  row_deletion_helper_.RestoreDeletedRows(solution);
2186 }
2187 
2188 // --------------------------------------------------------
2189 // EmptyConstraintPreprocessor
2190 // --------------------------------------------------------
2191 
2194  RETURN_VALUE_IF_NULL(lp, false);
2195  const RowIndex num_rows(lp->num_constraints());
2196  const ColIndex num_cols(lp->num_variables());
2197 
2198  // Compute degree.
2199  StrictITIVector<RowIndex, int> degree(num_rows, 0);
2200  for (ColIndex col(0); col < num_cols; ++col) {
2201  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
2202  ++degree[e.row()];
2203  }
2204  }
2205 
2206  // Delete degree 0 rows.
2207  for (RowIndex row(0); row < num_rows; ++row) {
2208  if (degree[row] == 0) {
2209  // We need to check that 0.0 is allowed by the constraint bounds,
2210  // otherwise, the problem is ProblemStatus::PRIMAL_INFEASIBLE.
2212  lp->constraint_lower_bounds()[row], 0) ||
2214  0, lp->constraint_upper_bounds()[row])) {
2215  VLOG(1) << "Problem PRIMAL_INFEASIBLE, constraint " << row
2216  << " is empty and its range ["
2217  << lp->constraint_lower_bounds()[row] << ","
2218  << lp->constraint_upper_bounds()[row] << "] doesn't contain 0.";
2220  return false;
2221  }
2222  row_deletion_helper_.MarkRowForDeletion(row);
2223  }
2224  }
2225  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2226  return !row_deletion_helper_.IsEmpty();
2227 }
2228 
2230  ProblemSolution* solution) const {
2232  RETURN_IF_NULL(solution);
2233  row_deletion_helper_.RestoreDeletedRows(solution);
2234 }
2235 
2236 // --------------------------------------------------------
2237 // SingletonPreprocessor
2238 // --------------------------------------------------------
2239 
2241  MatrixEntry e, ConstraintStatus status)
2242  : type_(type),
2243  is_maximization_(lp.IsMaximizationProblem()),
2244  e_(e),
2245  cost_(lp.objective_coefficients()[e.col]),
2246  variable_lower_bound_(lp.variable_lower_bounds()[e.col]),
2247  variable_upper_bound_(lp.variable_upper_bounds()[e.col]),
2248  constraint_lower_bound_(lp.constraint_lower_bounds()[e.row]),
2249  constraint_upper_bound_(lp.constraint_upper_bounds()[e.row]),
2250  constraint_status_(status) {}
2251 
2253  const SparseColumn& saved_column,
2254  const SparseColumn& saved_row,
2255  ProblemSolution* solution) const {
2256  switch (type_) {
2257  case SINGLETON_ROW:
2258  SingletonRowUndo(saved_column, solution);
2259  break;
2261  ZeroCostSingletonColumnUndo(parameters, saved_row, solution);
2262  break;
2264  SingletonColumnInEqualityUndo(parameters, saved_row, solution);
2265  break;
2267  MakeConstraintAnEqualityUndo(solution);
2268  break;
2269  }
2270 }
2271 
2272 void SingletonPreprocessor::DeleteSingletonRow(MatrixEntry e,
2273  LinearProgram* lp) {
2274  Fractional implied_lower_bound =
2275  lp->constraint_lower_bounds()[e.row] / e.coeff;
2276  Fractional implied_upper_bound =
2277  lp->constraint_upper_bounds()[e.row] / e.coeff;
2278  if (e.coeff < 0.0) {
2279  std::swap(implied_lower_bound, implied_upper_bound);
2280  }
2281 
2282  const Fractional old_lower_bound = lp->variable_lower_bounds()[e.col];
2283  const Fractional old_upper_bound = lp->variable_upper_bounds()[e.col];
2284 
2285  const Fractional potential_error =
2286  std::abs(parameters_.preprocessor_zero_tolerance() / e.coeff);
2287  Fractional new_lower_bound =
2288  implied_lower_bound - potential_error > old_lower_bound
2289  ? implied_lower_bound
2290  : old_lower_bound;
2291  Fractional new_upper_bound =
2292  implied_upper_bound + potential_error < old_upper_bound
2293  ? implied_upper_bound
2294  : old_upper_bound;
2295 
2296  // This can happen if we ask for 1e-300 * x to be >= 1e9.
2297  if (new_upper_bound == -kInfinity || new_lower_bound == kInfinity) {
2298  VLOG(1) << "Problem ProblemStatus::PRIMAL_INFEASIBLE, singleton "
2299  "row causes the bound of the variable "
2300  << e.col << " to go to infinity.";
2302  return;
2303  }
2304 
2305  if (new_upper_bound < new_lower_bound) {
2306  if (!IsSmallerWithinFeasibilityTolerance(new_lower_bound,
2307  new_upper_bound)) {
2308  VLOG(1) << "Problem ProblemStatus::PRIMAL_INFEASIBLE, singleton "
2309  "row causes the bound of the variable "
2310  << e.col << " to be infeasible by "
2311  << new_lower_bound - new_upper_bound;
2313  return;
2314  }
2315 
2316  // Otherwise, fix the variable to one of its bounds.
2317  if (new_lower_bound == lp->variable_lower_bounds()[e.col]) {
2318  new_upper_bound = new_lower_bound;
2319  }
2320  if (new_upper_bound == lp->variable_upper_bounds()[e.col]) {
2321  new_lower_bound = new_upper_bound;
2322  }
2323 
2324  // When both new bounds are coming from the constraint and are crossing, it
2325  // means the constraint bounds where originally crossing too. We arbitrarily
2326  // choose one of the bound in this case.
2327  //
2328  // TODO(user): The code in this file shouldn't create crossing bounds at
2329  // any point, so we could decide which bound to use directly on the user
2330  // given problem before running any presolve.
2331  new_upper_bound = new_lower_bound;
2332  }
2333  row_deletion_helper_.MarkRowForDeletion(e.row);
2334  undo_stack_.push_back(SingletonUndo(SingletonUndo::SINGLETON_ROW, *lp, e,
2336  columns_saver_.SaveColumnIfNotAlreadyDone(e.col, lp->GetSparseColumn(e.col));
2337 
2338  lp->SetVariableBounds(e.col, new_lower_bound, new_upper_bound);
2339 }
2340 
2341 // The dual value of the row needs to be corrected to stay at the optimal.
2342 void SingletonUndo::SingletonRowUndo(const SparseColumn& saved_column,
2343  ProblemSolution* solution) const {
2344  DCHECK_EQ(0, solution->dual_values[e_.row]);
2345 
2346  // If the variable is basic or free, we can just keep the constraint
2347  // VariableStatus::BASIC and 0.0 as the dual value.
2348  const VariableStatus status = solution->variable_statuses[e_.col];
2349  if (status == VariableStatus::BASIC || status == VariableStatus::FREE) return;
2350 
2351  // Compute whether or not the variable bounds changed.
2352  Fractional implied_lower_bound = constraint_lower_bound_ / e_.coeff;
2353  Fractional implied_upper_bound = constraint_upper_bound_ / e_.coeff;
2354  if (e_.coeff < 0.0) {
2355  std::swap(implied_lower_bound, implied_upper_bound);
2356  }
2357  const bool lower_bound_changed = implied_lower_bound > variable_lower_bound_;
2358  const bool upper_bound_changed = implied_upper_bound < variable_upper_bound_;
2359 
2360  if (!lower_bound_changed && !upper_bound_changed) return;
2361  if (status == VariableStatus::AT_LOWER_BOUND && !lower_bound_changed) return;
2362  if (status == VariableStatus::AT_UPPER_BOUND && !upper_bound_changed) return;
2363 
2364  // This is the reduced cost of the variable before the singleton constraint is
2365  // added back.
2366  const Fractional reduced_cost =
2367  cost_ - ScalarProduct(solution->dual_values, saved_column);
2368  const Fractional reduced_cost_for_minimization =
2369  is_maximization_ ? -reduced_cost : reduced_cost;
2370 
2371  if (status == VariableStatus::FIXED_VALUE) {
2372  DCHECK(lower_bound_changed || upper_bound_changed);
2373  if (reduced_cost_for_minimization >= 0.0 && !lower_bound_changed) {
2374  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2375  return;
2376  }
2377  if (reduced_cost_for_minimization <= 0.0 && !upper_bound_changed) {
2378  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2379  return;
2380  }
2381  }
2382 
2383  // If one of the variable bounds changes, and the variable is no longer at one
2384  // of its bounds, then its reduced cost needs to be set to 0.0 and the
2385  // variable becomes a basic variable. This is what the line below do, since
2386  // the new reduced cost of the variable will be equal to:
2387  // old_reduced_cost - coeff * solution->dual_values[row]
2388  //
2389  // TODO(user): This code is broken for integer variable.
2390  // Say our singleton row is 2 * y <= 5, and y was at its implied bound y = 2
2391  // at postsolve. The problem is that we can end up with an AT_UPPER_BOUND
2392  // status for the constraint 2 * y <= 5 which is not correct since the
2393  // activity is 4, and that break later preconditions. Maybe there is a way to
2394  // fix everything, but it seems tough to be sure.
2395  solution->dual_values[e_.row] = reduced_cost / e_.coeff;
2396  ConstraintStatus new_constraint_status = VariableToConstraintStatus(status);
2397  if (status == VariableStatus::FIXED_VALUE &&
2398  (!lower_bound_changed || !upper_bound_changed)) {
2399  new_constraint_status = lower_bound_changed
2402  }
2403  if (e_.coeff < 0.0) {
2404  if (new_constraint_status == ConstraintStatus::AT_LOWER_BOUND) {
2405  new_constraint_status = ConstraintStatus::AT_UPPER_BOUND;
2406  } else if (new_constraint_status == ConstraintStatus::AT_UPPER_BOUND) {
2407  new_constraint_status = ConstraintStatus::AT_LOWER_BOUND;
2408  }
2409  }
2410  solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2411  solution->constraint_statuses[e_.row] = new_constraint_status;
2412 }
2413 
2414 void SingletonPreprocessor::UpdateConstraintBoundsWithVariableBounds(
2415  MatrixEntry e, LinearProgram* lp) {
2416  Fractional lower_delta = -e.coeff * lp->variable_upper_bounds()[e.col];
2417  Fractional upper_delta = -e.coeff * lp->variable_lower_bounds()[e.col];
2418  if (e.coeff < 0.0) {
2419  std::swap(lower_delta, upper_delta);
2420  }
2421  lp->SetConstraintBounds(e.row,
2422  lp->constraint_lower_bounds()[e.row] + lower_delta,
2423  lp->constraint_upper_bounds()[e.row] + upper_delta);
2424 }
2425 
2426 bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable(
2427  const MatrixEntry& matrix_entry, const LinearProgram& lp) const {
2429  DCHECK(lp.IsVariableInteger(matrix_entry.col));
2430  const SparseMatrix& transpose = lp.GetTransposeSparseMatrix();
2431  for (const SparseColumn::Entry entry :
2432  transpose.column(RowToColIndex(matrix_entry.row))) {
2433  // Check if the variable is integer.
2434  if (!lp.IsVariableInteger(RowToColIndex(entry.row()))) {
2435  return false;
2436  }
2437 
2438  const Fractional coefficient = entry.coefficient();
2439  const Fractional coefficient_ratio = coefficient / matrix_entry.coeff;
2440  // Check if coefficient_ratio is integer.
2442  coefficient_ratio, parameters_.solution_feasibility_tolerance())) {
2443  return false;
2444  }
2445  }
2446  const Fractional constraint_lb =
2447  lp.constraint_lower_bounds()[matrix_entry.row];
2448  if (IsFinite(constraint_lb)) {
2449  const Fractional lower_bound_ratio = constraint_lb / matrix_entry.coeff;
2451  lower_bound_ratio, parameters_.solution_feasibility_tolerance())) {
2452  return false;
2453  }
2454  }
2455  const Fractional constraint_ub =
2456  lp.constraint_upper_bounds()[matrix_entry.row];
2457  if (IsFinite(constraint_ub)) {
2458  const Fractional upper_bound_ratio = constraint_ub / matrix_entry.coeff;
2460  upper_bound_ratio, parameters_.solution_feasibility_tolerance())) {
2461  return false;
2462  }
2463  }
2464  return true;
2465 }
2466 
2467 void SingletonPreprocessor::DeleteZeroCostSingletonColumn(
2468  const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2469  const ColIndex transpose_col = RowToColIndex(e.row);
2470  undo_stack_.push_back(SingletonUndo(SingletonUndo::ZERO_COST_SINGLETON_COLUMN,
2471  *lp, e, ConstraintStatus::FREE));
2472  const SparseColumn& row_as_col = transpose.column(transpose_col);
2473  rows_saver_.SaveColumnIfNotAlreadyDone(RowToColIndex(e.row), row_as_col);
2474  UpdateConstraintBoundsWithVariableBounds(e, lp);
2475  column_deletion_helper_.MarkColumnForDeletion(e.col);
2476 }
2477 
2478 // We need to restore the variable value in order to satisfy the constraint.
2479 void SingletonUndo::ZeroCostSingletonColumnUndo(
2480  const GlopParameters& parameters, const SparseColumn& saved_row,
2481  ProblemSolution* solution) const {
2482  // If the variable was fixed, this is easy. Note that this is the only
2483  // possible case if the current constraint status is FIXED, except if the
2484  // variable bounds are small compared to the constraint bounds, like adding
2485  // 1e-100 to a fixed == 1 constraint.
2486  if (variable_upper_bound_ == variable_lower_bound_) {
2487  solution->primal_values[e_.col] = variable_lower_bound_;
2488  solution->variable_statuses[e_.col] = VariableStatus::FIXED_VALUE;
2489  return;
2490  }
2491 
2492  const ConstraintStatus ct_status = solution->constraint_statuses[e_.row];
2493  if (ct_status == ConstraintStatus::FIXED_VALUE) {
2494  const Fractional corrected_dual = is_maximization_
2495  ? -solution->dual_values[e_.row]
2496  : solution->dual_values[e_.row];
2497  if (corrected_dual > 0) {
2498  DCHECK(IsFinite(variable_lower_bound_));
2499  solution->primal_values[e_.col] = variable_lower_bound_;
2500  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2501  } else {
2502  DCHECK(IsFinite(variable_upper_bound_));
2503  solution->primal_values[e_.col] = variable_upper_bound_;
2504  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2505  }
2506  return;
2507  } else if (ct_status == ConstraintStatus::AT_LOWER_BOUND ||
2508  ct_status == ConstraintStatus::AT_UPPER_BOUND) {
2509  if ((ct_status == ConstraintStatus::AT_UPPER_BOUND && e_.coeff > 0.0) ||
2510  (ct_status == ConstraintStatus::AT_LOWER_BOUND && e_.coeff < 0.0)) {
2511  DCHECK(IsFinite(variable_lower_bound_));
2512  solution->primal_values[e_.col] = variable_lower_bound_;
2513  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2514  } else {
2515  DCHECK(IsFinite(variable_upper_bound_));
2516  solution->primal_values[e_.col] = variable_upper_bound_;
2517  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2518  }
2519  if (constraint_upper_bound_ == constraint_lower_bound_) {
2520  solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2521  }
2522  return;
2523  }
2524 
2525  // This is the activity of the constraint before the singleton variable is
2526  // added back to it.
2527  const Fractional activity = ScalarProduct(solution->primal_values, saved_row);
2528 
2529  // First we try to fix the variable at its lower or upper bound and leave the
2530  // constraint VariableStatus::BASIC. Note that we use the same logic as in
2531  // Preprocessor::IsSmallerWithinPreprocessorZeroTolerance() which we can't use
2532  // here because we are not deriving from the Preprocessor class.
2533  const Fractional tolerance = parameters.preprocessor_zero_tolerance();
2534  const auto is_smaller_with_tolerance = [tolerance](Fractional a,
2535  Fractional b) {
2537  };
2538  if (variable_lower_bound_ != -kInfinity) {
2539  const Fractional activity_at_lb =
2540  activity + e_.coeff * variable_lower_bound_;
2541  if (is_smaller_with_tolerance(constraint_lower_bound_, activity_at_lb) &&
2542  is_smaller_with_tolerance(activity_at_lb, constraint_upper_bound_)) {
2543  solution->primal_values[e_.col] = variable_lower_bound_;
2544  solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2545  return;
2546  }
2547  }
2548  if (variable_upper_bound_ != kInfinity) {
2549  const Fractional activity_at_ub =
2550  activity + e_.coeff * variable_upper_bound_;
2551  if (is_smaller_with_tolerance(constraint_lower_bound_, activity_at_ub) &&
2552  is_smaller_with_tolerance(activity_at_ub, constraint_upper_bound_)) {
2553  solution->primal_values[e_.col] = variable_upper_bound_;
2554  solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2555  return;
2556  }
2557  }
2558 
2559  // If the current constraint is UNBOUNDED, then the variable is too
2560  // because of the two cases above. We just set its status to
2561  // VariableStatus::FREE.
2562  if (constraint_lower_bound_ == -kInfinity &&
2563  constraint_upper_bound_ == kInfinity) {
2564  solution->primal_values[e_.col] = 0.0;
2565  solution->variable_statuses[e_.col] = VariableStatus::FREE;
2566  return;
2567  }
2568 
2569  // If the previous cases didn't apply, the constraint will be fixed to its
2570  // bounds and the variable will be made VariableStatus::BASIC.
2571  solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2572  if (constraint_lower_bound_ == constraint_upper_bound_) {
2573  solution->primal_values[e_.col] =
2574  (constraint_lower_bound_ - activity) / e_.coeff;
2575  solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2576  return;
2577  }
2578 
2579  bool set_constraint_to_lower_bound;
2580  if (constraint_lower_bound_ == -kInfinity) {
2581  set_constraint_to_lower_bound = false;
2582  } else if (constraint_upper_bound_ == kInfinity) {
2583  set_constraint_to_lower_bound = true;
2584  } else {
2585  // In this case we select the value that is the most inside the variable
2586  // bound.
2587  const Fractional to_lb = (constraint_lower_bound_ - activity) / e_.coeff;
2588  const Fractional to_ub = (constraint_upper_bound_ - activity) / e_.coeff;
2589  set_constraint_to_lower_bound =
2590  std::max(variable_lower_bound_ - to_lb, to_lb - variable_upper_bound_) <
2591  std::max(variable_lower_bound_ - to_ub, to_ub - variable_upper_bound_);
2592  }
2593 
2594  if (set_constraint_to_lower_bound) {
2595  solution->primal_values[e_.col] =
2596  (constraint_lower_bound_ - activity) / e_.coeff;
2597  solution->constraint_statuses[e_.row] = ConstraintStatus::AT_LOWER_BOUND;
2598  } else {
2599  solution->primal_values[e_.col] =
2600  (constraint_upper_bound_ - activity) / e_.coeff;
2601  solution->constraint_statuses[e_.row] = ConstraintStatus::AT_UPPER_BOUND;
2602  }
2603 }
2604 
2605 void SingletonPreprocessor::DeleteSingletonColumnInEquality(
2606  const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2607  // Save information for the undo.
2608  const ColIndex transpose_col = RowToColIndex(e.row);
2609  const SparseColumn& row_as_column = transpose.column(transpose_col);
2610  undo_stack_.push_back(
2611  SingletonUndo(SingletonUndo::SINGLETON_COLUMN_IN_EQUALITY, *lp, e,
2613  rows_saver_.SaveColumnIfNotAlreadyDone(RowToColIndex(e.row), row_as_column);
2614 
2615  // Update the objective function using the equality constraint. We have
2616  // v_col*coeff + expression = rhs,
2617  // so the contribution of this variable to the cost function (v_col * cost)
2618  // can be rewritten as:
2619  // (rhs * cost - expression * cost) / coeff.
2620  const Fractional rhs = lp->constraint_upper_bounds()[e.row];
2621  const Fractional cost = lp->objective_coefficients()[e.col];
2622  const Fractional multiplier = cost / e.coeff;
2623  lp->SetObjectiveOffset(lp->objective_offset() + rhs * multiplier);
2624  for (const SparseColumn::Entry e : row_as_column) {
2625  const ColIndex col = RowToColIndex(e.row());
2626  if (!column_deletion_helper_.IsColumnMarked(col)) {
2627  Fractional new_cost =
2628  lp->objective_coefficients()[col] - e.coefficient() * multiplier;
2629 
2630  // TODO(user): It is important to avoid having non-zero costs which are
2631  // the result of numerical error. This is because we still miss some
2632  // tolerances in a few preprocessors. Like an empty column with a cost of
2633  // 1e-17 and unbounded towards infinity is currently implying that the
2634  // problem is unbounded. This will need fixing.
2635  if (std::abs(new_cost) < parameters_.preprocessor_zero_tolerance()) {
2636  new_cost = 0.0;
2637  }
2638  lp->SetObjectiveCoefficient(col, new_cost);
2639  }
2640  }
2641 
2642  // Now delete the column like a singleton column without cost.
2643  UpdateConstraintBoundsWithVariableBounds(e, lp);
2644  column_deletion_helper_.MarkColumnForDeletion(e.col);
2645 }
2646 
2647 void SingletonUndo::SingletonColumnInEqualityUndo(
2648  const GlopParameters& parameters, const SparseColumn& saved_row,
2649  ProblemSolution* solution) const {
2650  // First do the same as a zero-cost singleton column.
2651  ZeroCostSingletonColumnUndo(parameters, saved_row, solution);
2652 
2653  // Then, restore the dual optimal value taking into account the cost
2654  // modification.
2655  solution->dual_values[e_.row] += cost_ / e_.coeff;
2656  if (solution->constraint_statuses[e_.row] == ConstraintStatus::BASIC) {
2657  solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2658  solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2659  }
2660 }
2661 
2662 void SingletonUndo::MakeConstraintAnEqualityUndo(
2663  ProblemSolution* solution) const {
2664  if (solution->constraint_statuses[e_.row] == ConstraintStatus::FIXED_VALUE) {
2665  solution->constraint_statuses[e_.row] = constraint_status_;
2666  }
2667 }
2668 
2669 bool SingletonPreprocessor::MakeConstraintAnEqualityIfPossible(
2670  const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2671  // TODO(user): We could skip early if the relevant constraint bound is
2672  // infinity.
2673  const Fractional cst_lower_bound = lp->constraint_lower_bounds()[e.row];
2674  const Fractional cst_upper_bound = lp->constraint_upper_bounds()[e.row];
2675  if (cst_lower_bound == cst_upper_bound) return true;
2676 
2677  // To be efficient, we only process a row once and cache the domain that an
2678  // "artificial" extra variable x with coefficient 1.0 could take while still
2679  // making the constraint feasible. The domain bounds for the constraint e.row
2680  // will be stored in row_lb_sum_[e.row] and row_ub_sum_[e.row].
2681  const DenseRow& variable_ubs = lp->variable_upper_bounds();
2682  const DenseRow& variable_lbs = lp->variable_lower_bounds();
2683  if (e.row >= row_sum_is_cached_.size() || !row_sum_is_cached_[e.row]) {
2684  if (e.row >= row_sum_is_cached_.size()) {
2685  const int new_size = e.row.value() + 1;
2686  row_sum_is_cached_.resize(new_size);
2687  row_lb_sum_.resize(new_size);
2688  row_ub_sum_.resize(new_size);
2689  }
2690  row_sum_is_cached_[e.row] = true;
2691  row_lb_sum_[e.row].Add(cst_lower_bound);
2692  row_ub_sum_[e.row].Add(cst_upper_bound);
2693  for (const SparseColumn::Entry entry :
2694  transpose.column(RowToColIndex(e.row))) {
2695  const ColIndex row_as_col = RowToColIndex(entry.row());
2696 
2697  // Tricky: Even if later more columns are deleted, these "cached" sums
2698  // will actually still be valid because we only delete columns in a
2699  // compatible way.
2700  //
2701  // TODO(user): Find a more robust way? it seems easy to add new deletion
2702  // rules that may break this assumption.
2703  if (column_deletion_helper_.IsColumnMarked(row_as_col)) continue;
2704  if (entry.coefficient() > 0.0) {
2705  row_lb_sum_[e.row].Add(-entry.coefficient() * variable_ubs[row_as_col]);
2706  row_ub_sum_[e.row].Add(-entry.coefficient() * variable_lbs[row_as_col]);
2707  } else {
2708  row_lb_sum_[e.row].Add(-entry.coefficient() * variable_lbs[row_as_col]);
2709  row_ub_sum_[e.row].Add(-entry.coefficient() * variable_ubs[row_as_col]);
2710  }
2711 
2712  // TODO(user): Abort early if both sums contain more than 1 infinity?
2713  }
2714  }
2715 
2716  // Now that the lb/ub sum for the row is cached, we can use it to compute the
2717  // implied bounds on the variable from this constraint and the other
2718  // variables.
2719  const Fractional c = e.coeff;
2720  const Fractional lb =
2721  c > 0.0 ? row_lb_sum_[e.row].SumWithoutLb(-c * variable_ubs[e.col]) / c
2722  : row_ub_sum_[e.row].SumWithoutUb(-c * variable_ubs[e.col]) / c;
2723  const Fractional ub =
2724  c > 0.0 ? row_ub_sum_[e.row].SumWithoutUb(-c * variable_lbs[e.col]) / c
2725  : row_lb_sum_[e.row].SumWithoutLb(-c * variable_lbs[e.col]) / c;
2726 
2727  // Note that we could do the same for singleton variables with a cost of
2728  // 0.0, but such variable are already dealt with by
2729  // DeleteZeroCostSingletonColumn() so there is no point.
2730  const Fractional cost =
2731  lp->GetObjectiveCoefficientForMinimizationVersion(e.col);
2732  DCHECK_NE(cost, 0.0);
2733 
2734  // Note that some of the tests below will be always true if the bounds of
2735  // the column of index col are infinite. This is the desired behavior.
2738  ub, lp->variable_upper_bounds()[e.col])) {
2739  if (e.coeff > 0) {
2740  if (cst_upper_bound == kInfinity) {
2742  } else {
2743  relaxed_status = ConstraintStatus::AT_UPPER_BOUND;
2744  lp->SetConstraintBounds(e.row, cst_upper_bound, cst_upper_bound);
2745  }
2746  } else {
2747  if (cst_lower_bound == -kInfinity) {
2749  } else {
2750  relaxed_status = ConstraintStatus::AT_LOWER_BOUND;
2751  lp->SetConstraintBounds(e.row, cst_lower_bound, cst_lower_bound);
2752  }
2753  }
2754 
2756  DCHECK_EQ(ub, kInfinity);
2757  VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2758  "variable "
2759  << e.col << " has a cost (for minimization) of " << cost
2760  << " and is unbounded towards kInfinity.";
2761  return false;
2762  }
2763 
2764  // This is important but tricky: The upper bound of the variable needs to
2765  // be relaxed. This is valid because the implied bound is lower than the
2766  // original upper bound here. This is needed, so that the optimal
2767  // primal/dual values of the new problem will also be optimal of the
2768  // original one.
2769  //
2770  // Let's prove the case coeff > 0.0 for a minimization problem. In the new
2771  // problem, because the variable is unbounded towards +infinity, its
2772  // reduced cost must satisfy at optimality rc = cost - coeff * dual_v >=
2773  // 0. But this implies dual_v <= cost / coeff <= 0. This is exactly what
2774  // is needed for the optimality of the initial problem since the
2775  // constraint will be at its upper bound, and the corresponding slack
2776  // condition is that the dual value needs to be <= 0.
2777  lp->SetVariableBounds(e.col, lp->variable_lower_bounds()[e.col], kInfinity);
2778  }
2780  lp->variable_lower_bounds()[e.col], lb)) {
2781  if (e.coeff > 0) {
2782  if (cst_lower_bound == -kInfinity) {
2784  } else {
2785  relaxed_status = ConstraintStatus::AT_LOWER_BOUND;
2786  lp->SetConstraintBounds(e.row, cst_lower_bound, cst_lower_bound);
2787  }
2788  } else {
2789  if (cst_upper_bound == kInfinity) {
2791  } else {
2792  relaxed_status = ConstraintStatus::AT_UPPER_BOUND;
2793  lp->SetConstraintBounds(e.row, cst_upper_bound, cst_upper_bound);
2794  }
2795  }
2796 
2798  DCHECK_EQ(lb, -kInfinity);
2799  VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2800  "variable "
2801  << e.col << " has a cost (for minimization) of " << cost
2802  << " and is unbounded towards -kInfinity.";
2803  return false;
2804  }
2805 
2806  // Same remark as above for a lower bounded variable this time.
2807  lp->SetVariableBounds(e.col, -kInfinity,
2808  lp->variable_upper_bounds()[e.col]);
2809  }
2810 
2811  if (lp->constraint_lower_bounds()[e.row] ==
2812  lp->constraint_upper_bounds()[e.row]) {
2813  undo_stack_.push_back(SingletonUndo(
2814  SingletonUndo::MAKE_CONSTRAINT_AN_EQUALITY, *lp, e, relaxed_status));
2815  return true;
2816  }
2817  return false;
2818 }
2819 
2822  RETURN_VALUE_IF_NULL(lp, false);
2823  const SparseMatrix& matrix = lp->GetSparseMatrix();
2824  const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
2825 
2826  // Initialize column_to_process with the current singleton columns.
2827  ColIndex num_cols(matrix.num_cols());
2828  RowIndex num_rows(matrix.num_rows());
2829  StrictITIVector<ColIndex, EntryIndex> column_degree(num_cols, EntryIndex(0));
2830  std::vector<ColIndex> column_to_process;
2831  for (ColIndex col(0); col < num_cols; ++col) {
2832  column_degree[col] = matrix.column(col).num_entries();
2833  if (column_degree[col] == 1) {
2834  column_to_process.push_back(col);
2835  }
2836  }
2837 
2838  // Initialize row_to_process with the current singleton rows.
2839  StrictITIVector<RowIndex, EntryIndex> row_degree(num_rows, EntryIndex(0));
2840  std::vector<RowIndex> row_to_process;
2841  for (RowIndex row(0); row < num_rows; ++row) {
2842  row_degree[row] = transpose.column(RowToColIndex(row)).num_entries();
2843  if (row_degree[row] == 1) {
2844  row_to_process.push_back(row);
2845  }
2846  }
2847 
2848  // Process current singleton rows/columns and enqueue new ones.
2849  while (status_ == ProblemStatus::INIT &&
2850  (!column_to_process.empty() || !row_to_process.empty())) {
2851  while (status_ == ProblemStatus::INIT && !column_to_process.empty()) {
2852  const ColIndex col = column_to_process.back();
2853  column_to_process.pop_back();
2854  if (column_degree[col] <= 0) continue;
2855  const MatrixEntry e = GetSingletonColumnMatrixEntry(col, matrix);
2856  if (in_mip_context_ && lp->IsVariableInteger(e.col) &&
2857  !IntegerSingletonColumnIsRemovable(e, *lp)) {
2858  continue;
2859  }
2860 
2861  // TODO(user): It seems better to process all the singleton columns with
2862  // a cost of zero first.
2863  if (lp->objective_coefficients()[col] == 0.0) {
2864  DeleteZeroCostSingletonColumn(transpose, e, lp);
2865  } else if (MakeConstraintAnEqualityIfPossible(transpose, e, lp)) {
2866  DeleteSingletonColumnInEquality(transpose, e, lp);
2867  } else {
2868  continue;
2869  }
2870  --row_degree[e.row];
2871  if (row_degree[e.row] == 1) {
2872  row_to_process.push_back(e.row);
2873  }
2874  }
2875  while (status_ == ProblemStatus::INIT && !row_to_process.empty()) {
2876  const RowIndex row = row_to_process.back();
2877  row_to_process.pop_back();
2878  if (row_degree[row] <= 0) continue;
2879  const MatrixEntry e = GetSingletonRowMatrixEntry(row, transpose);
2880 
2881  // TODO(user): We should be able to restrict the variable bounds with the
2882  // ones of the constraint all the time. However, some situation currently
2883  // break the presolve, and it seems hard to fix in a 100% safe way.
2884  if (in_mip_context_ && lp->IsVariableInteger(e.col) &&
2885  !IntegerSingletonColumnIsRemovable(e, *lp)) {
2886  continue;
2887  }
2888 
2889  DeleteSingletonRow(e, lp);
2890  --column_degree[e.col];
2891  if (column_degree[e.col] == 1) {
2892  column_to_process.push_back(e.col);
2893  }
2894  }
2895  }
2896 
2897  if (status_ != ProblemStatus::INIT) return false;
2898  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
2899  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2900  return !column_deletion_helper_.IsEmpty() || !row_deletion_helper_.IsEmpty();
2901 }
2902 
2905  RETURN_IF_NULL(solution);
2906 
2907  // Note that the two deletion helpers must restore 0.0 values in the positions
2908  // that will be used during Undo(). That is, all the calls done by this class
2909  // to MarkColumnForDeletion() should be done with 0.0 as the value to restore
2910  // (which is already the case when using MarkRowForDeletion()).
2911  // This is important because the various Undo() functions assume that a
2912  // primal/dual variable value which isn't restored yet has the value of 0.0.
2913  column_deletion_helper_.RestoreDeletedColumns(solution);
2914  row_deletion_helper_.RestoreDeletedRows(solution);
2915 
2916  // It is important to undo the operations in the correct order, i.e. in the
2917  // reverse order in which they were done.
2918  for (int i = undo_stack_.size() - 1; i >= 0; --i) {
2919  const SparseColumn& saved_col =
2920  columns_saver_.SavedOrEmptyColumn(undo_stack_[i].Entry().col);
2921  const SparseColumn& saved_row = rows_saver_.SavedOrEmptyColumn(
2922  RowToColIndex(undo_stack_[i].Entry().row));
2923  undo_stack_[i].Undo(parameters_, saved_col, saved_row, solution);
2924  }
2925 }
2926 
2927 MatrixEntry SingletonPreprocessor::GetSingletonColumnMatrixEntry(
2928  ColIndex col, const SparseMatrix& matrix) {
2929  for (const SparseColumn::Entry e : matrix.column(col)) {
2930  if (!row_deletion_helper_.IsRowMarked(e.row())) {
2931  DCHECK_NE(0.0, e.coefficient());
2932  return MatrixEntry(e.row(), col, e.coefficient());
2933  }
2934  }
2935  // This shouldn't happen.
2936  LOG(DFATAL) << "No unmarked entry in a column that is supposed to have one.";
2938  return MatrixEntry(RowIndex(0), ColIndex(0), 0.0);
2939 }
2940 
2941 MatrixEntry SingletonPreprocessor::GetSingletonRowMatrixEntry(
2942  RowIndex row, const SparseMatrix& transpose) {
2943  for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) {
2944  const ColIndex col = RowToColIndex(e.row());
2945  if (!column_deletion_helper_.IsColumnMarked(col)) {
2946  DCHECK_NE(0.0, e.coefficient());
2947  return MatrixEntry(row, col, e.coefficient());
2948  }
2949  }
2950  // This shouldn't happen.
2951  LOG(DFATAL) << "No unmarked entry in a row that is supposed to have one.";
2953  return MatrixEntry(RowIndex(0), ColIndex(0), 0.0);
2954 }
2955 
2956 // --------------------------------------------------------
2957 // RemoveNearZeroEntriesPreprocessor
2958 // --------------------------------------------------------
2959 
2962  RETURN_VALUE_IF_NULL(lp, false);
2963  const ColIndex num_cols = lp->num_variables();
2964  if (num_cols == 0) return false;
2965 
2966  // We will use a different threshold for each row depending on its degree.
2967  // We use Fractionals for convenience since they will be used as such below.
2968  const RowIndex num_rows = lp->num_constraints();
2969  DenseColumn row_degree(num_rows, 0.0);
2970  Fractional num_non_zero_objective_coefficients = 0.0;
2971  for (ColIndex col(0); col < num_cols; ++col) {
2972  for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
2973  row_degree[e.row()] += 1.0;
2974  }
2975  if (lp->objective_coefficients()[col] != 0.0) {
2976  num_non_zero_objective_coefficients += 1.0;
2977  }
2978  }
2979 
2980  // To not have too many parameters, we use the preprocessor_zero_tolerance.
2981  const Fractional allowed_impact = parameters_.preprocessor_zero_tolerance();
2982 
2983  // TODO(user): Our criteria ensure that during presolve a primal feasible
2984  // solution will stay primal feasible. However, we have no guarantee on the
2985  // dual-feasibility (because the dual variable values range is not taken into
2986  // account). Fix that? or find a better criteria since it seems that on all
2987  // our current problems, this preprocessor helps and doesn't introduce errors.
2988  const EntryIndex initial_num_entries = lp->num_entries();
2989  int num_zeroed_objective_coefficients = 0;
2990  for (ColIndex col(0); col < num_cols; ++col) {
2993 
2994  // TODO(user): Write a small class that takes a matrix, its transpose, row
2995  // and column bounds, and "propagate" the bounds as much as possible so we
2996  // can use this better estimate here and remove more near-zero entries.
2997  const Fractional max_magnitude =
2998  std::max(std::abs(lower_bound), std::abs(upper_bound));
2999  if (max_magnitude == kInfinity || max_magnitude == 0) continue;
3000  const Fractional threshold = allowed_impact / max_magnitude;
3002  threshold, row_degree);
3003 
3004  if (lp->objective_coefficients()[col] != 0.0 &&
3005  num_non_zero_objective_coefficients *
3006  std::abs(lp->objective_coefficients()[col]) <
3007  threshold) {
3008  lp->SetObjectiveCoefficient(col, 0.0);
3009  ++num_zeroed_objective_coefficients;
3010  }
3011  }
3012 
3013  const EntryIndex num_entries = lp->num_entries();
3014  if (num_entries != initial_num_entries) {
3015  VLOG(1) << "Removed " << initial_num_entries - num_entries
3016  << " near-zero entries.";
3017  }
3018  if (num_zeroed_objective_coefficients > 0) {
3019  VLOG(1) << "Removed " << num_zeroed_objective_coefficients
3020  << " near-zero objective coefficients.";
3021  }
3022 
3023  // No post-solve is required.
3024  return false;
3025 }
3026 
3028  ProblemSolution* solution) const {}
3029 
3030 // --------------------------------------------------------
3031 // SingletonColumnSignPreprocessor
3032 // --------------------------------------------------------
3033 
3036  RETURN_VALUE_IF_NULL(lp, false);
3037  const ColIndex num_cols = lp->num_variables();
3038  if (num_cols == 0) return false;
3039 
3040  changed_columns_.clear();
3041  int num_singletons = 0;
3042  for (ColIndex col(0); col < num_cols; ++col) {
3043  SparseColumn* sparse_column = lp->GetMutableSparseColumn(col);
3044  const Fractional cost = lp->objective_coefficients()[col];
3045  if (sparse_column->num_entries() == 1) {
3046  ++num_singletons;
3047  }
3048  if (sparse_column->num_entries() == 1 &&
3049  sparse_column->GetFirstCoefficient() < 0) {
3050  sparse_column->MultiplyByConstant(-1.0);
3052  -lp->variable_lower_bounds()[col]);
3054  changed_columns_.push_back(col);
3055  }
3056  }
3057  VLOG(1) << "Changed the sign of " << changed_columns_.size() << " columns.";
3058  VLOG(1) << num_singletons << " singleton columns left.";
3059  return !changed_columns_.empty();
3060 }
3061 
3063  ProblemSolution* solution) const {
3065  RETURN_IF_NULL(solution);
3066  for (int i = 0; i < changed_columns_.size(); ++i) {
3067  const ColIndex col = changed_columns_[i];
3068  solution->primal_values[col] = -solution->primal_values[col];
3069  const VariableStatus status = solution->variable_statuses[col];
3072  } else if (status == VariableStatus::AT_LOWER_BOUND) {
3074  }
3075  }
3076 }
3077 
3078 // --------------------------------------------------------
3079 // DoubletonEqualityRowPreprocessor
3080 // --------------------------------------------------------
3081 
3084  RETURN_VALUE_IF_NULL(lp, false);
3085 
3086  // This is needed at postsolve.
3087  //
3088  // TODO(user): Get rid of the FIXED status instead to avoid spending
3089  // time/memory for no good reason here.
3090  saved_row_lower_bounds_ = lp->constraint_lower_bounds();
3091  saved_row_upper_bounds_ = lp->constraint_upper_bounds();
3092 
3093  // This is needed for postsolving dual.
3094  saved_objective_ = lp->objective_coefficients();
3095 
3096  // Note that we don't update the transpose during this preprocessor run.
3097  const SparseMatrix& original_transpose = lp->GetTransposeSparseMatrix();
3098 
3099  // Heuristic: We try to subtitute sparse columns first to avoid a complexity
3100  // explosion. Note that if we do long chain of substitution, we can still end
3101  // up with a complexity of O(num_rows x num_cols) instead of O(num_entries).
3102  //
3103  // TODO(user): There is probably some more robust ways.
3104  std::vector<std::pair<int64_t, RowIndex>> sorted_rows;
3105  const RowIndex num_rows(lp->num_constraints());
3106  for (RowIndex row(0); row < num_rows; ++row) {
3107  const SparseColumn& original_row =
3108  original_transpose.column(RowToColIndex(row));
3109  if (original_row.num_entries() != 2 ||
3110  lp->constraint_lower_bounds()[row] !=
3111  lp->constraint_upper_bounds()[row]) {
3112  continue;
3113  }
3114  int64_t score = 0;
3115  for (const SparseColumn::Entry e : original_row) {
3116  const ColIndex col = RowToColIndex(e.row());
3117  score += lp->GetSparseColumn(col).num_entries().value();
3118  }
3119  sorted_rows.push_back({score, row});
3120  }
3121  std::sort(sorted_rows.begin(), sorted_rows.end());
3122 
3123  // Iterate over the rows that were already doubletons before this preprocessor
3124  // run, and whose items don't belong to a column that we deleted during this
3125  // run. This implies that the rows are only ever touched once per run, because
3126  // we only modify rows that have an item on a deleted column.
3127  for (const auto p : sorted_rows) {
3128  const RowIndex row = p.second;
3129  const SparseColumn& original_row =
3130  original_transpose.column(RowToColIndex(row));
3131 
3132  // Collect the two row items. Skip the ones involving a deleted column.
3133  // Note: we filled r.col[] and r.coeff[] by item order, and currently we
3134  // always pick the first column as the to-be-deleted one.
3135  // TODO(user): make a smarter choice of which column to delete, and
3136  // swap col[] and coeff[] accordingly.
3137  RestoreInfo r; // Use a short name since we're using it everywhere.
3138  int entry_index = 0;
3139  for (const SparseColumn::Entry e : original_row) {
3140  const ColIndex col = RowToColIndex(e.row());
3141  if (column_deletion_helper_.IsColumnMarked(col)) continue;
3142  r.col[entry_index] = col;
3143  r.coeff[entry_index] = e.coefficient();
3144  DCHECK_NE(0.0, r.coeff[entry_index]);
3145  ++entry_index;
3146  }
3147 
3148  // Discard some cases that will be treated by other preprocessors, or by
3149  // another run of this one.
3150  // 1) One or two of the items were in a deleted column.
3151  if (entry_index < 2) continue;
3152 
3153  // Fill the RestoreInfo, even if we end up not using it (because we
3154  // give up on preprocessing this row): it has a bunch of handy shortcuts.
3155  r.row = row;
3156  r.rhs = lp->constraint_lower_bounds()[row];
3157  for (int col_choice = 0; col_choice < NUM_DOUBLETON_COLS; ++col_choice) {
3158  const ColIndex col = r.col[col_choice];
3159  r.lb[col_choice] = lp->variable_lower_bounds()[col];
3160  r.ub[col_choice] = lp->variable_upper_bounds()[col];
3161  r.objective_coefficient[col_choice] = lp->objective_coefficients()[col];
3162  }
3163 
3164  // 2) One of the columns is fixed: don't bother, it will be treated
3165  // by the FixedVariablePreprocessor.
3166  if (r.lb[DELETED] == r.ub[DELETED] || r.lb[MODIFIED] == r.ub[MODIFIED]) {
3167  continue;
3168  }
3169 
3170  // Look at the bounds of both variables and exit early if we can delegate
3171  // to another pre-processor; otherwise adjust the bounds of the remaining
3172  // variable as necessary.
3173  // If the current row is: aX + bY = c, then the bounds of Y must be
3174  // adjusted to satisfy Y = c/b + (-a/b)X
3175  //
3176  // Note: when we compute the coefficients of these equations, we can cause
3177  // underflows/overflows that could be avoided if we did the computations
3178  // more carefully; but for now we just treat those cases as
3179  // ProblemStatus::ABNORMAL.
3180  // TODO(user): consider skipping the problematic rows in this preprocessor,
3181  // or trying harder to avoid the under/overflow.
3182  {
3183  const Fractional carry_over_offset = r.rhs / r.coeff[MODIFIED];
3184  const Fractional carry_over_factor =
3185  -r.coeff[DELETED] / r.coeff[MODIFIED];
3186  if (!IsFinite(carry_over_offset) || !IsFinite(carry_over_factor) ||
3187  carry_over_factor == 0.0) {
3189  break;
3190  }
3191 
3192  Fractional lb = r.lb[MODIFIED];
3193  Fractional ub = r.ub[MODIFIED];
3194  Fractional carried_over_lb =
3195  r.lb[DELETED] * carry_over_factor + carry_over_offset;
3196  Fractional carried_over_ub =
3197  r.ub[DELETED] * carry_over_factor + carry_over_offset;
3198  if (carry_over_factor < 0) {
3199  std::swap(carried_over_lb, carried_over_ub);
3200  }
3201  if (carried_over_lb <= lb) {
3202  // Default (and simplest) case: the lower bound didn't change.
3203  r.bound_backtracking_at_lower_bound = RestoreInfo::ColChoiceAndStatus(
3204  MODIFIED, VariableStatus::AT_LOWER_BOUND, lb);
3205  } else {
3206  lb = carried_over_lb;
3207  r.bound_backtracking_at_lower_bound = RestoreInfo::ColChoiceAndStatus(
3208  DELETED,
3209  carry_over_factor > 0 ? VariableStatus::AT_LOWER_BOUND
3211  carry_over_factor > 0 ? r.lb[DELETED] : r.ub[DELETED]);
3212  }
3213  if (carried_over_ub >= ub) {
3214  // Default (and simplest) case: the upper bound didn't change.
3215  r.bound_backtracking_at_upper_bound = RestoreInfo::ColChoiceAndStatus(
3216  MODIFIED, VariableStatus::AT_UPPER_BOUND, ub);
3217  } else {
3218  ub = carried_over_ub;
3219  r.bound_backtracking_at_upper_bound = RestoreInfo::ColChoiceAndStatus(
3220  DELETED,
3221  carry_over_factor > 0 ? VariableStatus::AT_UPPER_BOUND
3223  carry_over_factor > 0 ? r.ub[DELETED] : r.lb[DELETED]);
3224  }
3225  // 3) If the new bounds are fixed (the domain is a singleton) or
3226  // infeasible, then we let the
3227  // ForcingAndImpliedFreeConstraintPreprocessor do the work.
3228  if (IsSmallerWithinPreprocessorZeroTolerance(ub, lb)) continue;
3229  lp->SetVariableBounds(r.col[MODIFIED], lb, ub);
3230  }
3231 
3232  restore_stack_.push_back(r);
3233 
3234  // Now, perform the substitution. If the current row is: aX + bY = c
3235  // then any other row containing 'X' with coefficient x can remove the
3236  // entry in X, and instead add an entry on 'Y' with coefficient x(-b/a)
3237  // and a constant offset x(c/a).
3238  // Looking at the matrix, this translates into colY += (-b/a) colX.
3239  DCHECK_NE(r.coeff[DELETED], 0.0);
3240  const Fractional substitution_factor =
3241  -r.coeff[MODIFIED] / r.coeff[DELETED]; // -b/a
3242  const Fractional constant_offset_factor = r.rhs / r.coeff[DELETED]; // c/a
3243  // Again we don't bother too much with over/underflows.
3244  if (!IsFinite(substitution_factor) || substitution_factor == 0.0 ||
3245  !IsFinite(constant_offset_factor)) {
3247  break;
3248  }
3249 
3250  // Note that we do not save again a saved column, so that we only save
3251  // columns from the initial LP. This is important to limit the memory usage.
3252  // It complexify a bit the postsolve though.
3253  for (const int col_choice : {DELETED, MODIFIED}) {
3254  const ColIndex col = r.col[col_choice];
3255  columns_saver_.SaveColumnIfNotAlreadyDone(col, lp->GetSparseColumn(col));
3256  }
3257 
3258  lp->GetSparseColumn(r.col[DELETED])
3260  substitution_factor, r.row, parameters_.drop_tolerance(),
3261  lp->GetMutableSparseColumn(r.col[MODIFIED]));
3262 
3263  // Apply similar operations on the objective coefficients.
3264  // Note that the offset is being updated by
3265  // SubtractColumnMultipleFromConstraintBound() below.
3266  {
3267  const Fractional new_objective =
3268  r.objective_coefficient[MODIFIED] +
3269  substitution_factor * r.objective_coefficient[DELETED];
3270  if (std::abs(new_objective) > parameters_.drop_tolerance()) {
3271  lp->SetObjectiveCoefficient(r.col[MODIFIED], new_objective);
3272  } else {
3273  lp->SetObjectiveCoefficient(r.col[MODIFIED], 0.0);
3274  }
3275  }
3276 
3277  // Carry over the constant factor of the substitution as well.
3278  // TODO(user): rename that method to reflect the fact that it also updates
3279  // the objective offset, in the other direction.
3280  SubtractColumnMultipleFromConstraintBound(r.col[DELETED],
3281  constant_offset_factor, lp);
3282 
3283  // If we keep substituing the same "dense" columns over and over, we can
3284  // have a memory in O(num_rows * num_cols) which can be order of magnitude
3285  // larger than the original problem. It is important to reclaim the memory
3286  // of the deleted column right away.
3287  lp->GetMutableSparseColumn(r.col[DELETED])->ClearAndRelease();
3288 
3289  // Mark the column and the row for deletion.
3290  column_deletion_helper_.MarkColumnForDeletion(r.col[DELETED]);
3291  row_deletion_helper_.MarkRowForDeletion(r.row);
3292  }
3293  if (status_ != ProblemStatus::INIT) return false;
3294  lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
3295  lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
3296 
3297  return !column_deletion_helper_.IsEmpty();
3298 }
3299 
3301  ProblemSolution* solution) const {
3303  RETURN_IF_NULL(solution);
3304  column_deletion_helper_.RestoreDeletedColumns(solution);
3305  row_deletion_helper_.RestoreDeletedRows(solution);
3306 
3307  const ColIndex num_cols = solution->variable_statuses.size();
3308  StrictITIVector<ColIndex, bool> new_basic_columns(num_cols, false);
3309 
3310  for (const RestoreInfo& r : Reverse(restore_stack_)) {
3311  switch (solution->variable_statuses[r.col[MODIFIED]]) {
3313  LOG(DFATAL) << "FIXED variable produced by DoubletonPreprocessor!";
3314  // In non-fastbuild mode, we rely on the rest of the code producing an
3315  // ProblemStatus::ABNORMAL status here.
3316  break;
3317  // When the modified variable is either basic or free, we keep it as is,
3318  // and simply make the deleted one basic.
3319  case VariableStatus::FREE:
3320  ABSL_FALLTHROUGH_INTENDED;
3321  case VariableStatus::BASIC:
3322  // Several code paths set the deleted column as basic. The code that
3323  // sets its value in that case is below, after the switch() block.
3324  solution->variable_statuses[r.col[DELETED]] = VariableStatus::BASIC;
3325  new_basic_columns[r.col[DELETED]] = true;
3326  break;
3328  ABSL_FALLTHROUGH_INTENDED;
3330  // The bound was induced by a bound of one of the two original
3331  // variables. Put that original variable at its bound, and make
3332  // the other one basic.
3333  const RestoreInfo::ColChoiceAndStatus& bound_backtracking =
3334  solution->variable_statuses[r.col[MODIFIED]] ==
3336  ? r.bound_backtracking_at_lower_bound
3337  : r.bound_backtracking_at_upper_bound;
3338  const ColIndex bounded_var = r.col[bound_backtracking.col_choice];
3339  const ColIndex basic_var =
3340  r.col[OtherColChoice(bound_backtracking.col_choice)];
3341  solution->variable_statuses[bounded_var] = bound_backtracking.status;
3342  solution->primal_values[bounded_var] = bound_backtracking.value;
3343  solution->variable_statuses[basic_var] = VariableStatus::BASIC;
3344  new_basic_columns[basic_var] = true;
3345  // If the modified column is VariableStatus::BASIC, then its value is
3346  // already set correctly. If it's the deleted column that is basic, its
3347  // value is set below the switch() block.
3348  }
3349  }
3350 
3351  // Restore the value of the deleted column if it is VariableStatus::BASIC.
3352  if (solution->variable_statuses[r.col[DELETED]] == VariableStatus::BASIC) {
3353  solution->primal_values[r.col[DELETED]] =
3354  (r.rhs -
3355  solution->primal_values[r.col[MODIFIED]] * r.coeff[MODIFIED]) /
3356  r.coeff[DELETED];
3357  }
3358 
3359  // Make the deleted constraint status FIXED.
3361  }
3362 
3363  // Now we need to reconstruct the dual. This is a bit tricky and is basically
3364  // the same as inverting a really structed and easy to invert matrix. For n
3365  // doubleton rows, looking only at the new_basic_columns, there is exactly n
3366  // by construction (one per row). We consider only this n x n matrix, and we
3367  // must choose dual row values so that we make the reduced costs zero on all
3368  // these columns.
3369  //
3370  // There is always an order that make this matrix triangular. We start with a
3371  // singleton column which fix its corresponding row and then work on the
3372  // square submatrix left. We can always start and continue, because if we take
3373  // the first substitued row of the current submatrix, if its deleted column
3374  // was in the submatrix we have a singleton column. If it is outside, we have
3375  // 2 n - 1 entries for a matrix with n columns, so one must be singleton.
3376  //
3377  // Note(user): Another advantage of working on the "original" matrix before
3378  // this presolve is an increased precision.
3379  //
3380  // TODO(user): We can probably use something better than a vector of set,
3381  // but the number of entry is really sparse though. And the size of a set<int>
3382  // is 24 bytes, same as a std::vector<int>.
3383  StrictITIVector<ColIndex, std::set<int>> col_to_index(num_cols);
3384  for (int i = 0; i < restore_stack_.size(); ++i) {
3385  const RestoreInfo& r = restore_stack_[i];
3386  col_to_index[r.col[MODIFIED]].insert(i);
3387  col_to_index[r.col[DELETED]].insert(i);
3388  }
3389  std::vector<ColIndex> singleton_col;
3390  for (ColIndex col(0); col < num_cols; ++col) {
3391  if (!new_basic_columns[col]) continue;
3392  if (col_to_index[col].size() == 1) singleton_col.push_back(col);
3393  }
3394  while (!singleton_col.empty()) {
3395  const ColIndex col = singleton_col.back();
3396  singleton_col.pop_back();
3397  if (!new_basic_columns[col]) continue;
3398  if (col_to_index[col].empty()) continue;
3399  CHECK_EQ(col_to_index[col].size(), 1);
3400  const int index = *col_to_index[col].begin();
3401  const RestoreInfo& r = restore_stack_[index];
3402 
3403  const ColChoice col_choice = r.col[MODIFIED] == col ? MODIFIED : DELETED;
3404 
3405  // Adjust the dual value of the deleted constraint so that col have a
3406  // reduced costs of zero.
3407  CHECK_EQ(solution->dual_values[r.row], 0.0);
3408  const SparseColumn& saved_col =
3409  columns_saver_.SavedColumn(r.col[col_choice]);
3410  const Fractional current_reduced_cost =
3411  saved_objective_[r.col[col_choice]] -
3412  PreciseScalarProduct(solution->dual_values, saved_col);
3413  solution->dual_values[r.row] = current_reduced_cost / r.coeff[col_choice];
3414 
3415  // Update singleton
3416  col_to_index[r.col[DELETED]].erase(index);
3417  col_to_index[r.col[MODIFIED]].erase(index);
3418  if (col_to_index[r.col[DELETED]].size() == 1) {
3419  singleton_col.push_back(r.col[DELETED]);
3420  }
3421  if (col_to_index[r.col[MODIFIED]].size() == 1) {
3422  singleton_col.push_back(r.col[MODIFIED]);
3423  }
3424  }
3425 
3426  // Fix potential bad ConstraintStatus::FIXED_VALUE statuses.
3427  FixConstraintWithFixedStatuses(saved_row_lower_bounds_,
3428  saved_row_upper_bounds_, solution);
3429 }
3430 
3431 void FixConstraintWithFixedStatuses(const DenseColumn& row_lower_bounds,
3432  const DenseColumn& row_upper_bounds,
3433  ProblemSolution* solution) {
3434  const RowIndex num_rows = solution->constraint_statuses.size();
3435  DCHECK_EQ(row_lower_bounds.size(), num_rows);
3436  DCHECK_EQ(row_upper_bounds.size(), num_rows);
3437  for (RowIndex row(0); row < num_rows; ++row) {
3439  continue;
3440  }
3441  if (row_lower_bounds[row] == row_upper_bounds[row]) continue;
3442 
3443  // We need to fix the status and we just need to make sure that the bound we
3444  // choose satisfies the LP optimality conditions.
3445  if (solution->dual_values[row] > 0) {
3447  } else {
3449  }
3450  }
3451 }
3452 
3453 void DoubletonEqualityRowPreprocessor::
3454  SwapDeletedAndModifiedVariableRestoreInfo(RestoreInfo* r) {
3455  using std::swap;
3456  swap(r->col[DELETED], r->col[MODIFIED]);
3457  swap(r->coeff[DELETED], r->coeff[MODIFIED]);
3458  swap(r->lb[DELETED], r->lb[MODIFIED]);
3459  swap(r->ub[DELETED], r->ub[MODIFIED]);
3460  swap(r->objective_coefficient[DELETED], r->objective_coefficient[MODIFIED]);
3461 }
3462 
3463 // --------------------------------------------------------
3464 // DualizerPreprocessor
3465 // --------------------------------------------------------
3466 
3469  RETURN_VALUE_IF_NULL(lp, false);
3471  return false;
3472  }
3473 
3474  // Store the original problem size and direction.
3475  primal_num_cols_ = lp->num_variables();
3476  primal_num_rows_ = lp->num_constraints();
3477  primal_is_maximization_problem_ = lp->IsMaximizationProblem();
3478 
3479  // If we need to decide whether or not to take the dual, we only take it when
3480  // the matrix has more rows than columns. The number of rows of a linear
3481  // program gives the size of the square matrices we need to invert and the
3482  // order of iterations of the simplex method. So solving a program with less
3483  // rows is likely a better alternative. Note that the number of row of the
3484  // dual is the number of column of the primal.
3485  //
3486  // Note however that the default is a conservative factor because if the
3487  // user gives us a primal program, we assume he knows what he is doing and
3488  // sometimes a problem is a lot faster to solve in a given formulation
3489  // even if its dimension would say otherwise.
3490  //
3491  // Another reason to be conservative, is that the number of columns of the
3492  // dual is the number of rows of the primal plus up to two times the number of
3493  // columns of the primal.
3494  //
3495  // TODO(user): This effect can be lowered if we use some of the extra
3496  // variables as slack variable which we are not doing at this point.
3498  if (1.0 * primal_num_rows_.value() <
3499  parameters_.dualizer_threshold() * primal_num_cols_.value()) {
3500  return false;
3501  }
3502  }
3503 
3504  // Save the linear program bounds.
3505  // Also make sure that all the bounded variable have at least one bound set to
3506  // zero. This will be needed to post-solve a dual-basic solution into a
3507  // primal-basic one.
3508  const ColIndex num_cols = lp->num_variables();
3509  variable_lower_bounds_.assign(num_cols, 0.0);
3510  variable_upper_bounds_.assign(num_cols, 0.0);
3511  for (ColIndex col(0); col < num_cols; ++col) {
3512  const Fractional lower = lp->variable_lower_bounds()[col];
3513  const Fractional upper = lp->variable_upper_bounds()[col];
3514 
3515  // We need to shift one of the bound to zero.
3516  variable_lower_bounds_[col] = lower;
3517  variable_upper_bounds_[col] = upper;
3518  const Fractional value = MinInMagnitudeOrZeroIfInfinite(lower, upper);
3519  if (value != 0.0) {
3520  lp->SetVariableBounds(col, lower - value, upper - value);
3521  SubtractColumnMultipleFromConstraintBound(col, value, lp);
3522  }
3523  }
3524 
3525  // Fill the information that will be needed during postsolve.
3526  //
3527  // TODO(user): This will break if PopulateFromDual() is changed. so document
3528  // the convention or make the function fill these vectors?
3529  dual_status_correspondence_.clear();
3530  for (RowIndex row(0); row < primal_num_rows_; ++row) {
3533  if (lower_bound == upper_bound) {
3534  dual_status_correspondence_.push_back(VariableStatus::FIXED_VALUE);
3535  } else if (upper_bound != kInfinity) {
3536  dual_status_correspondence_.push_back(VariableStatus::AT_UPPER_BOUND);
3537  } else if (lower_bound != -kInfinity) {
3538  dual_status_correspondence_.push_back(VariableStatus::AT_LOWER_BOUND);
3539  } else {
3540  LOG(DFATAL) << "There should be no free constraint in this lp.";
3541  }
3542  }
3543  slack_or_surplus_mapping_.clear();
3544  for (ColIndex col(0); col < primal_num_cols_; ++col) {
3547  if (lower_bound != -kInfinity) {
3548  dual_status_correspondence_.push_back(
3551  slack_or_surplus_mapping_.push_back(col);
3552  }
3553  }
3554  for (ColIndex col(0); col < primal_num_cols_; ++col) {
3557  if (upper_bound != kInfinity) {
3558  dual_status_correspondence_.push_back(
3561  slack_or_surplus_mapping_.push_back(col);
3562  }
3563  }
3564 
3565  // TODO(user): There are two different ways to deal with ranged rows when
3566  // taking the dual. The default way is to duplicate such rows, see
3567  // PopulateFromDual() for details. Another way is to call
3568  // lp->AddSlackVariablesForFreeAndBoxedRows() before calling
3569  // PopulateFromDual(). Adds an option to switch between the two as this may
3570  // change the running time?
3571  //
3572  // Note however that the default algorithm is likely to result in a faster
3573  // solving time because the dual program will have less rows.
3574  LinearProgram dual;
3575  dual.PopulateFromDual(*lp, &duplicated_rows_);
3576  dual.Swap(lp);
3577  return true;
3578 }
3579 
3580 // Note(user): This assumes that LinearProgram.PopulateFromDual() uses
3581 // the first ColIndex and RowIndex for the rows and columns of the given
3582 // problem.
3585  RETURN_IF_NULL(solution);
3586 
3587  DenseRow new_primal_values(primal_num_cols_, 0.0);
3588  VariableStatusRow new_variable_statuses(primal_num_cols_,
3590  DCHECK_LE(primal_num_cols_, RowToColIndex(solution->dual_values.size()));
3591  for (ColIndex col(0); col < primal_num_cols_; ++col) {
3592  RowIndex row = ColToRowIndex(col);
3593  const Fractional lower = variable_lower_bounds_[col];
3594  const Fractional upper = variable_upper_bounds_[col];
3595 
3596  // The new variable value corresponds to the dual value of the dual.
3597  // The shift applied during presolve needs to be removed.
3598  const Fractional shift = MinInMagnitudeOrZeroIfInfinite(lower, upper);
3599  new_primal_values[col] = solution->dual_values[row] + shift;
3600 
3601  // A variable will be VariableStatus::BASIC if the dual constraint is not.
3602  if (solution->constraint_statuses[row] != ConstraintStatus::BASIC) {
3603  new_variable_statuses[col] = VariableStatus::BASIC;
3604  } else {
3605  // Otherwise, the dual value must be zero (if the solution is feasible),
3606  // and the variable is at an exact bound or zero if it is
3607  // VariableStatus::FREE. Note that this works because the bounds are
3608  // shifted to 0.0 in the presolve!
3609  new_variable_statuses[col] = ComputeVariableStatus(shift, lower, upper);
3610  }
3611  }
3612 
3613  // A basic variable that corresponds to slack/surplus variable is the same as
3614  // a basic row. The new variable status (that was just set to
3615  // VariableStatus::BASIC above)
3616  // needs to be corrected and depends on the variable type (slack/surplus).
3617  const ColIndex begin = RowToColIndex(primal_num_rows_);
3618  const ColIndex end = dual_status_correspondence_.size();
3619  DCHECK_GE(solution->variable_statuses.size(), end);
3620  DCHECK_EQ(end - begin, slack_or_surplus_mapping_.size());
3621  for (ColIndex index(begin); index < end; ++index) {
3622  if (solution->variable_statuses[index] == VariableStatus::BASIC) {
3623  const ColIndex col = slack_or_surplus_mapping_[index - begin];
3624  const VariableStatus status = dual_status_correspondence_[index];
3625 
3626  // The new variable value is set to its exact bound because the dual
3627  // variable value can be imprecise.
3628  new_variable_statuses[col] = status;
3631  new_primal_values[col] = variable_upper_bounds_[col];
3632  } else {
3634  new_primal_values[col] = variable_lower_bounds_[col];
3635  }
3636  }
3637  }
3638 
3639  // Note the <= in the DCHECK, since we may need to add variables when taking
3640  // the dual.
3641  DCHECK_LE(primal_num_rows_, ColToRowIndex(solution->primal_values.size()));
3642  DenseColumn new_dual_values(primal_num_rows_, 0.0);
3643  ConstraintStatusColumn new_constraint_statuses(primal_num_rows_,
3645 
3646  // Note that the sign need to be corrected because of the special behavior of
3647  // PopulateFromDual() on a maximization problem, see the comment in the
3648  // declaration of PopulateFromDual().
3649  Fractional sign = primal_is_maximization_problem_ ? -1 : 1;
3650  for (RowIndex row(0); row < primal_num_rows_; ++row) {
3651  const ColIndex col = RowToColIndex(row);
3652  new_dual_values[row] = sign * solution->primal_values[col];
3653 
3654  // A constraint will be ConstraintStatus::BASIC if the dual variable is not.
3655  if (solution->variable_statuses[col] != VariableStatus::BASIC) {
3656  new_constraint_statuses[row] = ConstraintStatus::BASIC;
3657  if (duplicated_rows_[row] != kInvalidCol) {
3658  if (solution->variable_statuses[duplicated_rows_[row]] ==
3660  // The duplicated row is always about the lower bound.
3661  new_constraint_statuses[row] = ConstraintStatus::AT_LOWER_BOUND;
3662  }
3663  }
3664  } else {
3665  // ConstraintStatus::AT_LOWER_BOUND/ConstraintStatus::AT_UPPER_BOUND/
3666  // ConstraintStatus::FIXED depend on the type of the constraint at this
3667  // position.
3668  new_constraint_statuses[row] =
3669  VariableToConstraintStatus(dual_status_correspondence_[col]);
3670  }
3671 
3672  // If the original row was duplicated, we need to take into account the
3673  // value of the corresponding dual column.
3674  if (duplicated_rows_[row] != kInvalidCol) {
3675  new_dual_values[row] +=
3676  sign * solution->primal_values[duplicated_rows_[row]];
3677  }
3678 
3679  // Because non-basic variable values are exactly at one of their bounds, a
3680  // new basic constraint will have a dual value exactly equal to zero.
3681  DCHECK(new_dual_values[row] == 0 ||
3682  new_constraint_statuses[row] != ConstraintStatus::BASIC);
3683  }
3684 
3685  solution->status = ChangeStatusToDualStatus(solution->status);
3686  new_primal_values.swap(solution->primal_values);
3687  new_dual_values.swap(solution->dual_values);
3688  new_variable_statuses.swap(solution->variable_statuses);
3689  new_constraint_statuses.swap(solution->constraint_statuses);
3690 }
3691 
3693  ProblemStatus status) const {
3694  switch (status) {
3707  default:
3708  return status;
3709  }
3710 }
3711 
3712 // --------------------------------------------------------
3713 // ShiftVariableBoundsPreprocessor
3714 // --------------------------------------------------------
3715 
3718  RETURN_VALUE_IF_NULL(lp, false);
3719 
3720  // Save the linear program bounds before shifting them.
3721  bool all_variable_domains_contain_zero = true;
3722  const ColIndex num_cols = lp->num_variables();
3723  variable_initial_lbs_.assign(num_cols, 0.0);
3724  variable_initial_ubs_.assign(num_cols, 0.0);
3725  for (ColIndex col(0); col < num_cols; ++col) {
3726  variable_initial_lbs_[col] = lp->variable_lower_bounds()[col];
3727  variable_initial_ubs_[col] = lp->variable_upper_bounds()[col];
3728  if (0.0 < variable_initial_lbs_[col] || 0.0 > variable_initial_ubs_[col]) {
3729  all_variable_domains_contain_zero = false;
3730  }
3731  }
3732  VLOG(1) << "Maximum variable bounds magnitude (before shift): "
3733  << ComputeMaxVariableBoundsMagnitude(*lp);
3734 
3735  // Abort early if there is nothing to do.
3736  if (all_variable_domains_contain_zero) return false;
3737 
3738  // Shift the variable bounds and compute the changes to the constraint bounds
3739  // and objective offset in a precise way.
3740  int num_bound_shifts = 0;
3741  const RowIndex num_rows = lp->num_constraints();
3742  KahanSum objective_offset;
3743  absl::StrongVector<RowIndex, KahanSum> row_offsets(num_rows.value());
3744  offsets_.assign(num_cols, 0.0);
3745  for (ColIndex col(0); col < num_cols; ++col) {
3746  if (0.0 < variable_initial_lbs_[col] || 0.0 > variable_initial_ubs_[col]) {
3747  Fractional offset = MinInMagnitudeOrZeroIfInfinite(
3748  variable_initial_lbs_[col], variable_initial_ubs_[col]);
3749  if (in_mip_context_ && lp->IsVariableInteger(col)) {
3750  // In the integer case, we truncate the number because if for instance
3751  // the lower bound is a positive integer + epsilon, we only want to
3752  // shift by the integer and leave the lower bound at epsilon.
3753  //
3754  // TODO(user): This would not be needed, if we always make the bound
3755  // of an integer variable integer before applying this preprocessor.
3756  offset = trunc(offset);
3757  } else {
3758  DCHECK_NE(offset, 0.0);
3759  }
3760  offsets_[col] = offset;
3761  lp->SetVariableBounds(col, variable_initial_lbs_[col] - offset,
3762  variable_initial_ubs_[col] - offset);
3763  const SparseColumn& sparse_column = lp->GetSparseColumn(col);
3764  for (const SparseColumn::Entry e : sparse_column) {
3765  row_offsets[e.row()].Add(e.coefficient() * offset);
3766  }
3767  objective_offset.Add(lp->objective_coefficients()[col] * offset);
3768  ++num_bound_shifts;
3769  }
3770  }
3771  VLOG(1) << "Maximum variable bounds magnitude (after " << num_bound_shifts
3772  << " shifts): " << ComputeMaxVariableBoundsMagnitude(*lp);
3773 
3774  // Apply the changes to the constraint bound and objective offset.
3775  for (RowIndex row(0); row < num_rows; ++row) {
3776  if (!std::isfinite(row_offsets[row].Value())) {
3777  // This can happen for bad input where we get floating point overflow.
3778  // We can even get nan if we have two overflow in opposite direction.
3779  VLOG(1) << "Shifting variable bounds causes a floating point overflow "
3780  "for constraint "
3781  << row << ".";
3783  return false;
3784  }
3785  lp->SetConstraintBounds(
3786  row, lp->constraint_lower_bounds()[row] - row_offsets[row].Value(),
3787  lp->constraint_upper_bounds()[row] - row_offsets[row].Value());
3788  }
3789  if (!std::isfinite(objective_offset.Value())) {
3790  VLOG(1) << "Shifting variable bounds causes a floating point overflow "
3791  "for the objective.";
3793  return false;
3794  }
3795  lp->SetObjectiveOffset(lp->objective_offset() + objective_offset.Value());
3796  return true;
3797 }
3798 
3800  ProblemSolution* solution) const {
3802  RETURN_IF_NULL(solution);
3803  const ColIndex num_cols = solution->variable_statuses.size();
3804  for (ColIndex col(0); col < num_cols; ++col) {
3805  if (in_mip_context_) {
3806  solution->primal_values[col] += offsets_[col];
3807  } else {
3808  switch (solution->variable_statuses[col]) {
3810  ABSL_FALLTHROUGH_INTENDED;
3812  solution->primal_values[col] = variable_initial_lbs_[col];
3813  break;
3815  solution->primal_values[col] = variable_initial_ubs_[col];
3816  break;
3817  case VariableStatus::BASIC:
3818  solution->primal_values[col] += offsets_[col];
3819  break;
3820  case VariableStatus::FREE:
3821  break;
3822  }
3823  }
3824  }
3825 }
3826 
3827 // --------------------------------------------------------
3828 // ScalingPreprocessor
3829 // --------------------------------------------------------
3830 
3833  RETURN_VALUE_IF_NULL(lp, false);
3834  if (!parameters_.use_scaling()) return false;
3835 
3836  // Save the linear program bounds before scaling them.
3837  const ColIndex num_cols = lp->num_variables();
3838  variable_lower_bounds_.assign(num_cols, 0.0);
3839  variable_upper_bounds_.assign(num_cols, 0.0);
3840  for (ColIndex col(0); col < num_cols; ++col) {
3841  variable_lower_bounds_[col] = lp->variable_lower_bounds()[col];
3842  variable_upper_bounds_[col] = lp->variable_upper_bounds()[col];
3843  }
3844 
3845  // See the doc of these functions for more details.
3846  // It is important to call Scale() before the other two.
3847  Scale(lp, &scaler_, parameters_.scaling_method());
3848  cost_scaling_factor_ = lp->ScaleObjective(parameters_.cost_scaling());
3849  bound_scaling_factor_ = lp->ScaleBounds();
3850 
3851  return true;
3852 }
3853 
3856  RETURN_IF_NULL(solution);
3857 
3858  scaler_.ScaleRowVector(false, &(solution->primal_values));
3859  for (ColIndex col(0); col < solution->primal_values.size(); ++col) {
3860  solution->primal_values[col] *= bound_scaling_factor_;
3861  }
3862 
3863  scaler_.ScaleColumnVector(false, &(solution->dual_values));
3864  for (RowIndex row(0); row < solution->dual_values.size(); ++row) {
3865  solution->dual_values[row] *= cost_scaling_factor_;
3866  }
3867 
3868  // Make sure the variable are at they exact bounds according to their status.
3869  // This just remove a really low error (about 1e-15) but allows to keep the
3870  // variables at their exact bounds.
3871  const ColIndex num_cols = solution->primal_values.size();
3872  for (ColIndex col(0); col < num_cols; ++col) {
3873  switch (solution->variable_statuses[col]) {
3875  ABSL_FALLTHROUGH_INTENDED;
3877  solution->primal_values[col] = variable_upper_bounds_[col];
3878  break;
3880  solution->primal_values[col] = variable_lower_bounds_[col];
3881  break;
3882  case VariableStatus::FREE:
3883  ABSL_FALLTHROUGH_INTENDED;
3884  case VariableStatus::BASIC:
3885  break;
3886  }
3887  }
3888 }
3889 
3890 // --------------------------------------------------------
3891 // ToMinimizationPreprocessor
3892 // --------------------------------------------------------
3893 
3896  RETURN_VALUE_IF_NULL(lp, false);
3897  if (lp->IsMaximizationProblem()) {
3898  for (ColIndex col(0); col < lp->num_variables(); ++col) {
3899  const Fractional coeff = lp->objective_coefficients()[col];
3900  if (coeff != 0.0) {
3901  lp->SetObjectiveCoefficient(col, -coeff);
3902  }
3903  }
3904  lp->SetMaximizationProblem(false);
3907  }
3908  return false;
3909 }
3910 
3912  ProblemSolution* solution) const {}
3913 
3914 // --------------------------------------------------------
3915 // AddSlackVariablesPreprocessor
3916 // --------------------------------------------------------
3917 
3920  RETURN_VALUE_IF_NULL(lp, false);
3922  /*detect_integer_constraints=*/true);
3923  first_slack_col_ = lp->GetFirstSlackVariable();
3924  return true;
3925 }
3926 
3928  ProblemSolution* solution) const {
3930  RETURN_IF_NULL(solution);
3931 
3932  // Compute constraint statuses from statuses of slack variables.
3933  const RowIndex num_rows = solution->dual_values.size();
3934  for (RowIndex row(0); row < num_rows; ++row) {
3935  const ColIndex slack_col = first_slack_col_ + RowToColIndex(row);
3936  const VariableStatus variable_status =
3937  solution->variable_statuses[slack_col];
3938  ConstraintStatus constraint_status = ConstraintStatus::FREE;
3939  // The slack variables have reversed bounds - if the value of the variable
3940  // is at one bound, the value of the constraint is at the opposite bound.
3941  switch (variable_status) {
3943  constraint_status = ConstraintStatus::AT_UPPER_BOUND;
3944  break;
3946  constraint_status = ConstraintStatus::AT_LOWER_BOUND;
3947  break;
3948  default:
3949  constraint_status = VariableToConstraintStatus(variable_status);
3950  break;
3951  }
3952  solution->constraint_statuses[row] = constraint_status;
3953  }
3954 
3955  // Drop the primal values and variable statuses for slack variables.
3956  solution->primal_values.resize(first_slack_col_, 0.0);
3957  solution->variable_statuses.resize(first_slack_col_, VariableStatus::FREE);
3958 }
3959 
3960 } // namespace glop
3961 } // namespace operations_research
#define CHECK(condition)
Definition: base/logging.h:495
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:106
int64_t bound
void RecoverSolution(ProblemSolution *solution) const final
void Add(const FpNumber &value)
Definition: accurate_sum.h:29
static constexpr SolverBehavior NEVER_DO
Fractional LookUpCoefficient(Index index) const
void RecoverSolution(ProblemSolution *solution) const final
int64_t min
Definition: alldiff_cst.cc:139
ColIndex col
void RecoverSolution(ProblemSolution *solution) const final
void RemoveZeroCostUnconstrainedVariable(ColIndex col, Fractional target_bound, LinearProgram *lp)
#define SOLVER_LOG(logger,...)
Definition: util/logging.h:69
const SparseMatrix & GetTransposeSparseMatrix() const
Definition: lp_data.cc:376
void SetObjectiveCoefficient(ColIndex col, Fractional value)
Definition: lp_data.cc:326
bool IsVariableInteger(ColIndex col) const
Definition: lp_data.cc:295
void Swap(LinearProgram *linear_program)
Definition: lp_data.cc:1031
ModelSharedTimeLimit * time_limit
const DenseBooleanRow & GetMarkedColumns() const
Definition: preprocessor.h:200
bool IsSmallerWithinTolerance(FloatType x, FloatType y, FloatType tolerance)
Definition: fp_utils.h:157
EntryIndex num_entries
void AddMultipleToSparseVectorAndDeleteCommonIndex(Fractional multiplier, Index removed_common_index, Fractional drop_tolerance, SparseVector *accumulator_vector) const
void DeleteColumns(const DenseBooleanRow &columns_to_delete)
Definition: lp_data.cc:1065
iterator erase(const_iterator pos)
#define VLOG(verboselevel)
Definition: base/logging.h:983
std::vector< double > lower_bounds
const std::string name
const ColIndex kInvalidCol(-1)
void Scale(LinearProgram *lp, SparseMatrixScaler *scaler)
void swap(IdMap< K, V > &a, IdMap< K, V > &b)
Definition: id_map.h:263
void SetLogToStdOut(bool enable)
Definition: util/logging.h:45
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
#define LOG(severity)
Definition: base/logging.h:420
#define RETURN_VALUE_IF_NULL(x, v)
Definition: return_macros.h:26
void RemoveNearZeroEntriesWithWeights(Fractional threshold, const DenseVector &weights)
void swap(StrongVector &x)
void MultiplyByConstant(Fractional factor)
void RecoverSolution(ProblemSolution *solution) const final
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:309
void SetObjectiveOffset(Fractional objective_offset)
Definition: lp_data.cc:331
SingletonUndo(OperationType type, const LinearProgram &lp, MatrixEntry e, ConstraintStatus status)
bool IsIntegerWithinTolerance(FloatType x, FloatType tolerance)
Definition: fp_utils.h:165
RowIndex row
Definition: markowitz.cc:182
int64_t coefficient
void RecoverSolution(ProblemSolution *solution) const final
void assign(IntType size, const T &v)
Definition: lp_types.h:278
SparseMatrix * GetMutableTransposeSparseMatrix()
Definition: lp_data.cc:386
bool IsFinite(Fractional value)
Definition: lp_types.h:91
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
int64_t b
std::function< int64_t(const Model &)> Value(IntegerVariable v)
Definition: integer.h:1673
const DenseRow & objective_coefficients() const
Definition: lp_data.h:223
const DenseColumn & constraint_upper_bounds() const
Definition: lp_data.h:218
void RecoverSolution(ProblemSolution *solution) const final
Fractional SumWithoutLb(Fractional c) const
ReverseView< Container > reversed_view(const Container &c)
int64_t max
Definition: alldiff_cst.cc:140
#define SCOPED_INSTRUCTION_COUNT(time_limit)
Definition: stats.h:439
Fractional objective_scaling_factor() const
Definition: lp_data.h:261
double upper_bound
void resize(size_type new_size)
void RecoverSolution(ProblemSolution *solution) const final
const DenseColumn & constraint_lower_bounds() const
Definition: lp_data.h:215
bool empty() const
static constexpr SolverBehavior LET_SOLVER_DECIDE
iterator insert(const_iterator pos, const value_type &x)
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:891
const SparseMatrix & GetSparseMatrix() const
Definition: lp_data.h:175
Fractional GetObjectiveCoefficientForMinimizationVersion(ColIndex col) const
Definition: lp_data.cc:419
double lower_bound
DenseColumn * mutable_constraint_lower_bounds()
Definition: lp_data.h:550
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
void DestructiveRecoverSolution(ProblemSolution *solution)
BeginEndReverseIteratorWrapper< Container > Reverse(const Container &c)
Definition: iterators.h:98
void RecoverSolution(ProblemSolution *solution) const final
const double kInfinity
Definition: lp_types.h:84
void push_back(const value_type &x)
int index
Definition: pack.cc:509
const SparseColumn & column(ColIndex col) const
Definition: sparse.h:181
::operations_research::glop::GlopParameters_SolverBehavior solve_dual_problem() const
Fractional target_bound
void FixConstraintWithFixedStatuses(const DenseColumn &row_lower_bounds, const DenseColumn &row_upper_bounds, ProblemSolution *solution)
SparseColumn * GetMutableSparseColumn(ColIndex col)
Definition: lp_data.cc:413
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:52
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:894
void RecoverSolution(ProblemSolution *solution) const final
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:702
const RowIndex kInvalidRow(-1)
int64_t delta
Definition: resource.cc:1692
size_type size() const
ConstraintStatusColumn constraint_statuses
Definition: lp_data.h:686
int64_t cost
::operations_research::glop::GlopParameters_CostScalingAlgorithm cost_scaling() const
void ScaleRowVector(bool up, DenseRow *row_vector) const
#define DCHECK(condition)
Definition: base/logging.h:889
const DenseRow & variable_upper_bounds() const
Definition: lp_data.h:232
void MarkColumnForDeletionWithState(ColIndex col, Fractional value, VariableStatus status)
ColIndex representative
ColMapping FindProportionalColumns(const SparseMatrix &matrix, Fractional tolerance)
const DenseRow & variable_lower_bounds() const
Definition: lp_data.h:229
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:890
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:249
void RecoverSolution(ProblemSolution *solution) const final
SparseColumn * mutable_column(ColIndex col)
Definition: sparse.h:182
const SparseColumn & SavedColumn(ColIndex col) const
void RecoverSolution(ProblemSolution *solution) const final
void DeleteRows(const DenseBooleanColumn &rows_to_delete)
Definition: lp_data.cc:1258
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
const DenseBooleanColumn & GetMarkedRows() const
void RecoverSolution(ProblemSolution *solution) const final
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:892
void SetMaximizationProblem(bool maximize)
Definition: lp_data.cc:343
DenseColumn * mutable_constraint_upper_bounds()
Definition: lp_data.h:553
void RestoreDeletedRows(ProblemSolution *solution) const
Collection of objects used to extend the Constraint Solver library.
ProblemStatus ChangeStatusToDualStatus(ProblemStatus status) const
const SparseColumn & SavedOrEmptyColumn(ColIndex col) const
SatParameters parameters
Fractional ScalarProduct(const DenseRowOrColumn1 &u, const DenseRowOrColumn2 &v)
bool IsSmallerWithinPreprocessorZeroTolerance(Fractional a, Fractional b) const
Definition: preprocessor.h:84
std::vector< double > upper_bounds
void SetObjectiveScalingFactor(Fractional objective_scaling_factor)
Definition: lp_data.cc:336
::operations_research::glop::GlopParameters_ScalingAlgorithm scaling_method() const
Fractional ScaleObjective(GlopParameters::CostScalingAlgorithm method)
Definition: lp_data.cc:1188
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const override
Fractional scaled_cost
void Undo(const GlopParameters &parameters, const SparseColumn &saved_column, const SparseColumn &saved_row, ProblemSolution *solution) const
void RecoverSolution(ProblemSolution *solution) const final
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:303
Fractional SumWithoutUb(Fractional c) const
void RecoverSolution(ProblemSolution *solution) const final
bool IsSmallerWithinFeasibilityTolerance(Fractional a, Fractional b) const
Definition: preprocessor.h:80
void RestoreDeletedColumns(ProblemSolution *solution) const
int64_t value
void SaveColumn(ColIndex col, const SparseColumn &column)
Preprocessor(const GlopParameters *parameters)
Definition: preprocessor.cc:48
const GlopParameters & parameters_
Definition: preprocessor.h:92
void SaveColumnIfNotAlreadyDone(ColIndex col, const SparseColumn &column)
const SparseColumn & GetSparseColumn(ColIndex col) const
Definition: lp_data.cc:409
#define RUN_PREPROCESSOR(name)
Definition: preprocessor.cc:60
void PopulateFromDual(const LinearProgram &dual, RowToColMapping *duplicated_rows)
Definition: lp_data.cc:764
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void ScaleColumnVector(bool up, DenseColumn *column_vector) const
int64_t a
void AddSlackVariablesWhereNecessary(bool detect_integer_constraints)
Definition: lp_data.cc:697