OR-Tools  9.2
preprocessor.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <cstdint>
17#include <limits>
18
19#include "absl/strings/str_format.h"
23#include "ortools/glop/status.h"
28
29namespace operations_research {
30namespace glop {
31
33
34namespace {
35// Returns an interval as an human readable string for debugging.
36std::string IntervalString(Fractional lb, Fractional ub) {
37 return absl::StrFormat("[%g, %g]", lb, ub);
38}
39
40#if defined(_MSC_VER)
41double trunc(double d) { return d > 0 ? floor(d) : ceil(d); }
42#endif
43} // namespace
44
45// --------------------------------------------------------
46// Preprocessor
47// --------------------------------------------------------
49 : status_(ProblemStatus::INIT),
50 parameters_(*parameters),
51 in_mip_context_(false),
52 infinite_time_limit_(TimeLimit::Infinite()),
53 time_limit_(infinite_time_limit_.get()) {}
55
56// --------------------------------------------------------
57// MainLpPreprocessor
58// --------------------------------------------------------
59
60#define RUN_PREPROCESSOR(name) \
61 RunAndPushIfRelevant(std::unique_ptr<Preprocessor>(new name(&parameters_)), \
62 #name, time_limit_, lp)
63
65 RETURN_VALUE_IF_NULL(lp, false);
66
68 default_logger_.SetLogToStdOut(parameters_.log_to_stdout());
69
70 SOLVER_LOG(logger_, "");
71 SOLVER_LOG(logger_, "Starting presolve...");
72
73 initial_num_rows_ = lp->num_constraints();
74 initial_num_cols_ = lp->num_variables();
75 initial_num_entries_ = lp->num_entries();
78
79 // We run it a few times because running one preprocessor may allow another
80 // one to remove more stuff.
81 const int kMaxNumPasses = 20;
82 for (int i = 0; i < kMaxNumPasses; ++i) {
83 const int old_stack_size = preprocessors_.size();
92
93 // Abort early if none of the preprocessors did something. Technically
94 // this is true if none of the preprocessors above needs postsolving,
95 // which has exactly the same meaning for these particular preprocessors.
96 if (preprocessors_.size() == old_stack_size) {
97 // We use i here because the last pass did nothing.
98 SOLVER_LOG(logger_, "Reached fixed point after presolve pass #", i);
99 break;
100 }
101 }
104
105 // TODO(user): Run them in the loop above if the effect on the running time
106 // is good. This needs more investigation.
109
110 // If DualizerPreprocessor was run, we need to do some extra preprocessing.
111 // This is because it currently adds a lot of zero-cost singleton columns.
112 const int old_stack_size = preprocessors_.size();
113
114 // TODO(user): We probably want to scale the costs before and after this
115 // preprocessor so that the rhs/objective of the dual are with a good
116 // magnitude.
118 if (old_stack_size != preprocessors_.size()) {
124 }
125
127 }
128
129 // The scaling is controlled by use_scaling, not use_preprocessing.
131
132 return !preprocessors_.empty();
133}
134
135#undef RUN_PREPROCESSOR
136
137void MainLpPreprocessor::RunAndPushIfRelevant(
138 std::unique_ptr<Preprocessor> preprocessor, const std::string& name,
140 RETURN_IF_NULL(preprocessor);
142 if (status_ != ProblemStatus::INIT || time_limit->LimitReached()) return;
143
144 const double start_time = time_limit->GetElapsedTime();
145 preprocessor->SetTimeLimit(time_limit);
146
147 // No need to run the preprocessor if the lp is empty.
148 // TODO(user): without this test, the code is failing as of 2013-03-18.
149 if (lp->num_variables() == 0 && lp->num_constraints() == 0) {
151 return;
152 }
153
154 if (preprocessor->Run(lp)) {
155 const EntryIndex new_num_entries = lp->num_entries();
156 const double preprocess_time = time_limit->GetElapsedTime() - start_time;
157 SOLVER_LOG(logger_,
158 absl::StrFormat(
159 "%-45s: %d(%d) rows, %d(%d) columns, %d(%d) entries. (%fs)",
160 name, lp->num_constraints().value(),
161 (lp->num_constraints() - initial_num_rows_).value(),
162 lp->num_variables().value(),
163 (lp->num_variables() - initial_num_cols_).value(),
164 // static_cast<int64_t> is needed because the Android port
165 // uses int32_t.
166 static_cast<int64_t>(new_num_entries.value()),
167 static_cast<int64_t>(new_num_entries.value() -
168 initial_num_entries_.value()),
169 preprocess_time));
170 status_ = preprocessor->status();
171 preprocessors_.push_back(std::move(preprocessor));
172 return;
173 } else {
174 // Even if a preprocessor returns false (i.e. no need for postsolve), it
175 // can detect an issue with the problem.
176 status_ = preprocessor->status();
178 SOLVER_LOG(logger_, name, " detected that the problem is ",
180 }
181 }
182}
183
186 for (const auto& p : gtl::reversed_view(preprocessors_)) {
187 p->RecoverSolution(solution);
188 }
189}
190
193 while (!preprocessors_.empty()) {
194 preprocessors_.back()->RecoverSolution(solution);
195 preprocessors_.pop_back();
196 }
197}
198
199// --------------------------------------------------------
200// ColumnDeletionHelper
201// --------------------------------------------------------
202
203void ColumnsSaver::SaveColumn(ColIndex col, const SparseColumn& column) {
204 const int index = saved_columns_.size();
205 CHECK(saved_columns_index_.insert({col, index}).second);
206 saved_columns_.push_back(column);
207}
208
210 const SparseColumn& column) {
211 const int index = saved_columns_.size();
212 const bool inserted = saved_columns_index_.insert({col, index}).second;
213 if (inserted) saved_columns_.push_back(column);
214}
215
217 const auto it = saved_columns_index_.find(col);
218 CHECK(it != saved_columns_index_.end());
219 return saved_columns_[it->second];
220}
221
223 const auto it = saved_columns_index_.find(col);
224 return it == saved_columns_index_.end() ? empty_column_
225 : saved_columns_[it->second];
226}
227
229 is_column_deleted_.clear();
230 stored_value_.clear();
231}
232
235}
236
238 ColIndex col, Fractional fixed_value, VariableStatus status) {
239 DCHECK_GE(col, 0);
240 if (col >= is_column_deleted_.size()) {
241 is_column_deleted_.resize(col + 1, false);
242 stored_value_.resize(col + 1, 0.0);
243 stored_status_.resize(col + 1, VariableStatus::FREE);
244 }
245 is_column_deleted_[col] = true;
246 stored_value_[col] = fixed_value;
247 stored_status_[col] = status;
248}
249
251 ProblemSolution* solution) const {
252 DenseRow new_primal_values;
253 VariableStatusRow new_variable_statuses;
254 ColIndex old_index(0);
255 for (ColIndex col(0); col < is_column_deleted_.size(); ++col) {
256 if (is_column_deleted_[col]) {
257 new_primal_values.push_back(stored_value_[col]);
258 new_variable_statuses.push_back(stored_status_[col]);
259 } else {
260 new_primal_values.push_back(solution->primal_values[old_index]);
261 new_variable_statuses.push_back(solution->variable_statuses[old_index]);
262 ++old_index;
263 }
264 }
265
266 // Copy the end of the vectors and swap them with the ones in solution.
267 const ColIndex num_cols = solution->primal_values.size();
268 DCHECK_EQ(num_cols, solution->variable_statuses.size());
269 for (; old_index < num_cols; ++old_index) {
270 new_primal_values.push_back(solution->primal_values[old_index]);
271 new_variable_statuses.push_back(solution->variable_statuses[old_index]);
272 }
273 new_primal_values.swap(solution->primal_values);
274 new_variable_statuses.swap(solution->variable_statuses);
275}
276
277// --------------------------------------------------------
278// RowDeletionHelper
279// --------------------------------------------------------
280
281void RowDeletionHelper::Clear() { is_row_deleted_.clear(); }
282
284 DCHECK_GE(row, 0);
285 if (row >= is_row_deleted_.size()) {
286 is_row_deleted_.resize(row + 1, false);
287 }
288 is_row_deleted_[row] = true;
289}
290
292 if (row >= is_row_deleted_.size()) return;
293 is_row_deleted_[row] = false;
294}
295
297 return is_row_deleted_;
298}
299
301 DenseColumn new_dual_values;
302 ConstraintStatusColumn new_constraint_statuses;
303 RowIndex old_index(0);
304 const RowIndex end = is_row_deleted_.size();
305 for (RowIndex row(0); row < end; ++row) {
306 if (is_row_deleted_[row]) {
307 new_dual_values.push_back(0.0);
308 new_constraint_statuses.push_back(ConstraintStatus::BASIC);
309 } else {
310 new_dual_values.push_back(solution->dual_values[old_index]);
311 new_constraint_statuses.push_back(
312 solution->constraint_statuses[old_index]);
313 ++old_index;
314 }
315 }
316
317 // Copy the end of the vectors and swap them with the ones in solution.
318 const RowIndex num_rows = solution->dual_values.size();
319 DCHECK_EQ(num_rows, solution->constraint_statuses.size());
320 for (; old_index < num_rows; ++old_index) {
321 new_dual_values.push_back(solution->dual_values[old_index]);
322 new_constraint_statuses.push_back(solution->constraint_statuses[old_index]);
323 }
324 new_dual_values.swap(solution->dual_values);
325 new_constraint_statuses.swap(solution->constraint_statuses);
326}
327
328// --------------------------------------------------------
329// EmptyColumnPreprocessor
330// --------------------------------------------------------
331
332namespace {
333
334// Computes the status of a variable given its value and bounds. This only works
335// with a value exactly at one of the bounds, or a value of 0.0 for free
336// variables.
339 if (lower_bound == upper_bound) {
343 }
344 if (value == lower_bound) {
347 }
348 if (value == upper_bound) {
351 }
352
353 // TODO(user): restrict this to unbounded variables with a value of zero.
354 // We can't do that when postsolving infeasible problem. Don't call postsolve
355 // on an infeasible problem?
357}
358
359// Returns the input with the smallest magnitude or zero if both are infinite.
360Fractional MinInMagnitudeOrZeroIfInfinite(Fractional a, Fractional b) {
361 const Fractional value = std::abs(a) < std::abs(b) ? a : b;
362 return IsFinite(value) ? value : 0.0;
363}
364
365Fractional MagnitudeOrZeroIfInfinite(Fractional value) {
366 return IsFinite(value) ? std::abs(value) : 0.0;
367}
368
369// Returns the maximum magnitude of the finite variable bounds of the given
370// linear program.
371Fractional ComputeMaxVariableBoundsMagnitude(const LinearProgram& lp) {
372 Fractional max_bounds_magnitude = 0.0;
373 const ColIndex num_cols = lp.num_variables();
374 for (ColIndex col(0); col < num_cols; ++col) {
375 max_bounds_magnitude = std::max(
376 max_bounds_magnitude,
377 std::max(MagnitudeOrZeroIfInfinite(lp.variable_lower_bounds()[col]),
378 MagnitudeOrZeroIfInfinite(lp.variable_upper_bounds()[col])));
379 }
380 return max_bounds_magnitude;
381}
382
383} // namespace
384
387 RETURN_VALUE_IF_NULL(lp, false);
388 column_deletion_helper_.Clear();
389 const ColIndex num_cols = lp->num_variables();
390 for (ColIndex col(0); col < num_cols; ++col) {
391 if (lp->GetSparseColumn(col).IsEmpty()) {
394 const Fractional objective_coefficient =
397 if (objective_coefficient == 0) {
398 // Any feasible value will do.
399 if (upper_bound != kInfinity) {
401 } else {
402 if (lower_bound != -kInfinity) {
404 } else {
405 value = Fractional(0.0);
406 }
407 }
408 } else {
409 value = objective_coefficient > 0 ? lower_bound : upper_bound;
410 if (!IsFinite(value)) {
411 VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, empty column " << col
412 << " has a minimization cost of " << objective_coefficient
413 << " and bounds"
414 << " [" << lower_bound << "," << upper_bound << "]";
416 return false;
417 }
420 }
421 column_deletion_helper_.MarkColumnForDeletionWithState(
422 col, value, ComputeVariableStatus(value, lower_bound, upper_bound));
423 }
424 }
425 lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
426 return !column_deletion_helper_.IsEmpty();
427}
428
431 RETURN_IF_NULL(solution);
432 column_deletion_helper_.RestoreDeletedColumns(solution);
433}
434
435// --------------------------------------------------------
436// ProportionalColumnPreprocessor
437// --------------------------------------------------------
438
439namespace {
440
441// Subtracts 'multiple' times the column col of the given linear program from
442// the constraint bounds. That is, for a non-zero entry of coefficient c,
443// c * multiple is subtracted from both the constraint upper and lower bound.
444void SubtractColumnMultipleFromConstraintBound(ColIndex col,
445 Fractional multiple,
446 LinearProgram* lp) {
449 for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
450 const RowIndex row = e.row();
451 const Fractional delta = multiple * e.coefficient();
452 (*lbs)[row] -= delta;
453 (*ubs)[row] -= delta;
454 }
455 // While not needed for correctness, this allows the presolved problem to
456 // have the same objective value as the original one.
458 lp->objective_coefficients()[col] * multiple);
459}
460
461// Struct used to detect proportional columns with the same cost. For that, a
462// vector of such struct will be sorted, and only the columns that end up
463// together need to be compared.
464struct ColumnWithRepresentativeAndScaledCost {
465 ColumnWithRepresentativeAndScaledCost(ColIndex _col, ColIndex _representative,
466 Fractional _scaled_cost)
467 : col(_col), representative(_representative), scaled_cost(_scaled_cost) {}
468 ColIndex col;
471
472 bool operator<(const ColumnWithRepresentativeAndScaledCost& other) const {
473 if (representative == other.representative) {
474 if (scaled_cost == other.scaled_cost) {
475 return col < other.col;
476 }
477 return scaled_cost < other.scaled_cost;
478 }
479 return representative < other.representative;
480 }
481};
482
483} // namespace
484
487 RETURN_VALUE_IF_NULL(lp, false);
490
491 // Compute some statistics and make each class representative point to itself
492 // in the mapping. Also store the columns that are proportional to at least
493 // another column in proportional_columns to iterate on them more efficiently.
494 //
495 // TODO(user): Change FindProportionalColumns for this?
496 int num_proportionality_classes = 0;
497 std::vector<ColIndex> proportional_columns;
498 for (ColIndex col(0); col < mapping.size(); ++col) {
499 const ColIndex representative = mapping[col];
501 if (mapping[representative] == kInvalidCol) {
502 proportional_columns.push_back(representative);
503 ++num_proportionality_classes;
505 }
506 proportional_columns.push_back(col);
507 }
508 }
509 if (proportional_columns.empty()) return false;
510 VLOG(1) << "The problem contains " << proportional_columns.size()
511 << " columns which belong to " << num_proportionality_classes
512 << " proportionality classes.";
513
514 // Note(user): using the first coefficient may not give the best precision.
515 const ColIndex num_cols = lp->num_variables();
516 column_factors_.assign(num_cols, 0.0);
517 for (const ColIndex col : proportional_columns) {
518 const SparseColumn& column = lp->GetSparseColumn(col);
519 column_factors_[col] = column.GetFirstCoefficient();
520 }
521
522 // This is only meaningful for column representative.
523 //
524 // The reduced cost of a column is 'cost - dual_values.column' and we know
525 // that for all proportional columns, 'dual_values.column /
526 // column_factors_[col]' is the same. Here, we bound this quantity which is
527 // related to the cost 'slope' of a proportional column:
528 // cost / column_factors_[col].
529 DenseRow slope_lower_bound(num_cols, -kInfinity);
530 DenseRow slope_upper_bound(num_cols, +kInfinity);
531 for (const ColIndex col : proportional_columns) {
532 const ColIndex representative = mapping[col];
533
534 // We reason in terms of a minimization problem here.
535 const bool is_rc_positive_or_zero =
537 const bool is_rc_negative_or_zero =
539 bool is_slope_upper_bounded = is_rc_positive_or_zero;
540 bool is_slope_lower_bounded = is_rc_negative_or_zero;
541 if (column_factors_[col] < 0.0) {
542 std::swap(is_slope_lower_bounded, is_slope_upper_bounded);
543 }
544 const Fractional slope =
546 column_factors_[col];
547 if (is_slope_lower_bounded) {
548 slope_lower_bound[representative] =
549 std::max(slope_lower_bound[representative], slope);
550 }
551 if (is_slope_upper_bounded) {
552 slope_upper_bound[representative] =
553 std::min(slope_upper_bound[representative], slope);
554 }
555 }
556
557 // Deal with empty slope intervals.
558 for (const ColIndex col : proportional_columns) {
559 const ColIndex representative = mapping[col];
560
561 // This is only needed for class representative columns.
562 if (representative == col) {
564 slope_lower_bound[representative],
565 slope_upper_bound[representative])) {
566 VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, no feasible dual values"
567 << " can satisfy the constraints of the proportional columns"
568 << " with representative " << representative << "."
569 << " the associated quantity must be in ["
570 << slope_lower_bound[representative] << ","
571 << slope_upper_bound[representative] << "].";
573 return false;
574 }
575 }
576 }
577
578 // Now, fix the columns that can be fixed to one of their bounds.
579 for (const ColIndex col : proportional_columns) {
580 const ColIndex representative = mapping[col];
581 const Fractional slope =
583 column_factors_[col];
584
585 // The scaled reduced cost is slope - quantity.
586 bool variable_can_be_fixed = false;
588
592 slope)) {
593 // The scaled reduced cost is < 0.
594 variable_can_be_fixed = true;
595 target_bound = (column_factors_[col] >= 0.0) ? upper_bound : lower_bound;
597 slope, slope_upper_bound[representative])) {
598 // The scaled reduced cost is > 0.
599 variable_can_be_fixed = true;
600 target_bound = (column_factors_[col] >= 0.0) ? lower_bound : upper_bound;
601 }
602
603 if (variable_can_be_fixed) {
604 // Clear mapping[col] so this column will not be considered for the next
605 // stage.
606 mapping[col] = kInvalidCol;
607 if (!IsFinite(target_bound)) {
608 VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED.";
610 return false;
611 } else {
612 SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
613 column_deletion_helper_.MarkColumnForDeletionWithState(
615 ComputeVariableStatus(target_bound, lower_bound, upper_bound));
616 }
617 }
618 }
619
620 // Merge the variables with the same scaled cost.
621 std::vector<ColumnWithRepresentativeAndScaledCost> sorted_columns;
622 for (const ColIndex col : proportional_columns) {
623 const ColIndex representative = mapping[col];
624
625 // This test is needed because we already removed some columns.
626 if (mapping[col] != kInvalidCol) {
627 sorted_columns.push_back(ColumnWithRepresentativeAndScaledCost(
629 lp->objective_coefficients()[col] / column_factors_[col]));
630 }
631 }
632 std::sort(sorted_columns.begin(), sorted_columns.end());
633
634 // All this will be needed during postsolve.
635 merged_columns_.assign(num_cols, kInvalidCol);
636 lower_bounds_.assign(num_cols, -kInfinity);
637 upper_bounds_.assign(num_cols, kInfinity);
638 new_lower_bounds_.assign(num_cols, -kInfinity);
639 new_upper_bounds_.assign(num_cols, kInfinity);
640
641 for (int i = 0; i < sorted_columns.size();) {
642 const ColIndex target_col = sorted_columns[i].col;
643 const ColIndex target_representative = sorted_columns[i].representative;
644 const Fractional target_scaled_cost = sorted_columns[i].scaled_cost;
645
646 // Save the initial bounds before modifying them.
647 lower_bounds_[target_col] = lp->variable_lower_bounds()[target_col];
648 upper_bounds_[target_col] = lp->variable_upper_bounds()[target_col];
649
650 int num_merged = 0;
651 for (++i; i < sorted_columns.size(); ++i) {
652 if (sorted_columns[i].representative != target_representative) break;
653 if (std::abs(sorted_columns[i].scaled_cost - target_scaled_cost) >=
655 break;
656 }
657 ++num_merged;
658 const ColIndex col = sorted_columns[i].col;
661 lower_bounds_[col] = lower_bound;
662 upper_bounds_[col] = upper_bound;
663 merged_columns_[col] = target_col;
664
665 // This is a bit counter intuitive, but when a column is divided by x,
666 // the corresponding bounds have to be multiplied by x.
667 const Fractional bound_factor =
668 column_factors_[col] / column_factors_[target_col];
669
670 // We need to shift the variable so that a basic solution of the new
671 // problem can easily be converted to a basic solution of the original
672 // problem.
673
674 // A feasible value for the variable must be chosen, and the variable must
675 // be shifted by this value. This is done to make sure that it will be
676 // possible to recreate a basic solution of the original problem from a
677 // basic solution of the pre-solved problem during post-solve.
678 const Fractional target_value =
679 MinInMagnitudeOrZeroIfInfinite(lower_bound, upper_bound);
680 Fractional lower_diff = (lower_bound - target_value) * bound_factor;
681 Fractional upper_diff = (upper_bound - target_value) * bound_factor;
682 if (bound_factor < 0.0) {
683 std::swap(lower_diff, upper_diff);
684 }
686 target_col, lp->variable_lower_bounds()[target_col] + lower_diff,
687 lp->variable_upper_bounds()[target_col] + upper_diff);
688 SubtractColumnMultipleFromConstraintBound(col, target_value, lp);
689 column_deletion_helper_.MarkColumnForDeletionWithState(
690 col, target_value,
691 ComputeVariableStatus(target_value, lower_bound, upper_bound));
692 }
693
694 // If at least one column was merged, the target column must be shifted like
695 // the other columns in the same equivalence class for the same reason (see
696 // above).
697 if (num_merged > 0) {
698 merged_columns_[target_col] = target_col;
699 const Fractional target_value = MinInMagnitudeOrZeroIfInfinite(
700 lower_bounds_[target_col], upper_bounds_[target_col]);
702 target_col, lp->variable_lower_bounds()[target_col] - target_value,
703 lp->variable_upper_bounds()[target_col] - target_value);
704 SubtractColumnMultipleFromConstraintBound(target_col, target_value, lp);
705 new_lower_bounds_[target_col] = lp->variable_lower_bounds()[target_col];
706 new_upper_bounds_[target_col] = lp->variable_upper_bounds()[target_col];
707 }
708 }
709
710 lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
711 return !column_deletion_helper_.IsEmpty();
712}
713
715 ProblemSolution* solution) const {
717 RETURN_IF_NULL(solution);
718 column_deletion_helper_.RestoreDeletedColumns(solution);
719
720 // The rest of this function is to unmerge the columns so that the solution be
721 // primal-feasible.
722 const ColIndex num_cols = merged_columns_.size();
723 DenseBooleanRow is_representative_basic(num_cols, false);
724 DenseBooleanRow is_distance_to_upper_bound(num_cols, false);
725 DenseRow distance_to_bound(num_cols, 0.0);
726 DenseRow wanted_value(num_cols, 0.0);
727
728 // The first pass is a loop over the representatives to compute the current
729 // distance to the new bounds.
730 for (ColIndex col(0); col < num_cols; ++col) {
731 if (merged_columns_[col] == col) {
732 const Fractional value = solution->primal_values[col];
733 const Fractional distance_to_upper_bound = new_upper_bounds_[col] - value;
734 const Fractional distance_to_lower_bound = value - new_lower_bounds_[col];
735 if (distance_to_upper_bound < distance_to_lower_bound) {
736 distance_to_bound[col] = distance_to_upper_bound;
737 is_distance_to_upper_bound[col] = true;
738 } else {
739 distance_to_bound[col] = distance_to_lower_bound;
740 is_distance_to_upper_bound[col] = false;
741 }
742 is_representative_basic[col] =
744
745 // Restore the representative value to a feasible value of the initial
746 // variable. Now all the merged variable are at a feasible value.
747 wanted_value[col] = value;
748 solution->primal_values[col] = MinInMagnitudeOrZeroIfInfinite(
749 lower_bounds_[col], upper_bounds_[col]);
750 solution->variable_statuses[col] = ComputeVariableStatus(
751 solution->primal_values[col], lower_bounds_[col], upper_bounds_[col]);
752 }
753 }
754
755 // Second pass to correct the values.
756 for (ColIndex col(0); col < num_cols; ++col) {
757 const ColIndex representative = merged_columns_[col];
759 if (IsFinite(distance_to_bound[representative])) {
760 // If the distance is finite, then each variable is set to its
761 // corresponding bound (the one from which the distance is computed) and
762 // is then changed by as much as possible until the distance is zero.
763 const Fractional bound_factor =
764 column_factors_[col] / column_factors_[representative];
765 const Fractional scaled_distance =
766 distance_to_bound[representative] / std::abs(bound_factor);
767 const Fractional width = upper_bounds_[col] - lower_bounds_[col];
768 const bool to_upper_bound =
769 (bound_factor > 0.0) == is_distance_to_upper_bound[representative];
770 if (width <= scaled_distance) {
771 solution->primal_values[col] =
772 to_upper_bound ? lower_bounds_[col] : upper_bounds_[col];
773 solution->variable_statuses[col] =
774 ComputeVariableStatus(solution->primal_values[col],
775 lower_bounds_[col], upper_bounds_[col]);
776 distance_to_bound[representative] -= width * std::abs(bound_factor);
777 } else {
778 solution->primal_values[col] =
779 to_upper_bound ? upper_bounds_[col] - scaled_distance
780 : lower_bounds_[col] + scaled_distance;
781 solution->variable_statuses[col] =
782 is_representative_basic[representative]
784 : ComputeVariableStatus(solution->primal_values[col],
785 lower_bounds_[col],
786 upper_bounds_[col]);
787 distance_to_bound[representative] = 0.0;
788 is_representative_basic[representative] = false;
789 }
790 } else {
791 // If the distance is not finite, then only one variable needs to be
792 // changed from its current feasible value in order to have a
793 // primal-feasible solution.
794 const Fractional error = wanted_value[representative];
795 if (error == 0.0) {
796 if (is_representative_basic[representative]) {
798 is_representative_basic[representative] = false;
799 }
800 } else {
801 const Fractional bound_factor =
802 column_factors_[col] / column_factors_[representative];
803 const bool use_this_variable =
804 (error * bound_factor > 0.0) ? (upper_bounds_[col] == kInfinity)
805 : (lower_bounds_[col] == -kInfinity);
806 if (use_this_variable) {
807 wanted_value[representative] = 0.0;
808 solution->primal_values[col] += error / bound_factor;
809 if (is_representative_basic[representative]) {
811 is_representative_basic[representative] = false;
812 } else {
813 // This should not happen on an OPTIMAL or FEASIBLE solution.
814 DCHECK(solution->status != ProblemStatus::OPTIMAL &&
817 }
818 }
819 }
820 }
821 }
822 }
823}
824
825// --------------------------------------------------------
826// ProportionalRowPreprocessor
827// --------------------------------------------------------
828
831 RETURN_VALUE_IF_NULL(lp, false);
832 const RowIndex num_rows = lp->num_constraints();
833 const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
834
835 // Use the first coefficient of each row to compute the proportionality
836 // factor. Note that the sign is important.
837 //
838 // Note(user): using the first coefficient may not give the best precision.
839 row_factors_.assign(num_rows, 0.0);
840 for (RowIndex row(0); row < num_rows; ++row) {
841 const SparseColumn& row_transpose = transpose.column(RowToColIndex(row));
842 if (!row_transpose.IsEmpty()) {
843 row_factors_[row] = row_transpose.GetFirstCoefficient();
844 }
845 }
846
847 // The new row bounds (only meaningful for the proportional rows).
850
851 // Where the new bounds are coming from. Only for the constraints that stay
852 // in the lp and are modified, kInvalidRow otherwise.
853 upper_bound_sources_.assign(num_rows, kInvalidRow);
854 lower_bound_sources_.assign(num_rows, kInvalidRow);
855
856 // Initialization.
857 // We need the first representative of each proportional row class to point to
858 // itself for the loop below. TODO(user): Already return such a mapping from
859 // FindProportionalColumns()?
862 DenseBooleanColumn is_a_representative(num_rows, false);
863 int num_proportional_rows = 0;
864 for (RowIndex row(0); row < num_rows; ++row) {
865 const ColIndex representative_row_as_col = mapping[RowToColIndex(row)];
866 if (representative_row_as_col != kInvalidCol) {
867 mapping[representative_row_as_col] = representative_row_as_col;
868 is_a_representative[ColToRowIndex(representative_row_as_col)] = true;
869 ++num_proportional_rows;
870 }
871 }
872
873 // Compute the bound of each representative as implied by the rows
874 // which are proportional to it. Also keep the source row of each bound.
875 for (RowIndex row(0); row < num_rows; ++row) {
876 const ColIndex row_as_col = RowToColIndex(row);
877 if (mapping[row_as_col] != kInvalidCol) {
878 // For now, delete all the rows that are proportional to another one.
879 // Note that we will unmark the final representative of this class later.
880 row_deletion_helper_.MarkRowForDeletion(row);
881 const RowIndex representative_row = ColToRowIndex(mapping[row_as_col]);
882
883 const Fractional factor =
884 row_factors_[representative_row] / row_factors_[row];
885 Fractional implied_lb = factor * lp->constraint_lower_bounds()[row];
886 Fractional implied_ub = factor * lp->constraint_upper_bounds()[row];
887 if (factor < 0.0) {
888 std::swap(implied_lb, implied_ub);
889 }
890
891 // TODO(user): if the bounds are equal, use the largest row in magnitude?
892 if (implied_lb >= lower_bounds[representative_row]) {
893 lower_bounds[representative_row] = implied_lb;
894 lower_bound_sources_[representative_row] = row;
895 }
896 if (implied_ub <= upper_bounds[representative_row]) {
897 upper_bounds[representative_row] = implied_ub;
898 upper_bound_sources_[representative_row] = row;
899 }
900 }
901 }
902
903 // For maximum precision, and also to simplify the postsolve, we choose
904 // a representative for each class of proportional columns that has at least
905 // one of the two tightest bounds.
906 for (RowIndex row(0); row < num_rows; ++row) {
907 if (!is_a_representative[row]) continue;
908 const RowIndex lower_source = lower_bound_sources_[row];
909 const RowIndex upper_source = upper_bound_sources_[row];
910 lower_bound_sources_[row] = kInvalidRow;
911 upper_bound_sources_[row] = kInvalidRow;
912 DCHECK_NE(lower_source, kInvalidRow);
913 DCHECK_NE(upper_source, kInvalidRow);
914 if (lower_source == upper_source) {
915 // In this case, a simple change of representative is enough.
916 // The constraint bounds of the representative will not change.
917 DCHECK_NE(lower_source, kInvalidRow);
918 row_deletion_helper_.UnmarkRow(lower_source);
919 } else {
920 // Report ProblemStatus::PRIMAL_INFEASIBLE if the new lower bound is not
921 // lower than the new upper bound modulo the default tolerance.
923 upper_bounds[row])) {
925 return false;
926 }
927
928 // Special case for fixed rows.
929 if (lp->constraint_lower_bounds()[lower_source] ==
930 lp->constraint_upper_bounds()[lower_source]) {
931 row_deletion_helper_.UnmarkRow(lower_source);
932 continue;
933 }
934 if (lp->constraint_lower_bounds()[upper_source] ==
935 lp->constraint_upper_bounds()[upper_source]) {
936 row_deletion_helper_.UnmarkRow(upper_source);
937 continue;
938 }
939
940 // This is the only case where a more complex postsolve is needed.
941 // To maximize precision, the class representative is changed to either
942 // upper_source or lower_source depending of which row has the largest
943 // proportionality factor.
944 RowIndex new_representative = lower_source;
945 RowIndex other = upper_source;
946 if (std::abs(row_factors_[new_representative]) <
947 std::abs(row_factors_[other])) {
948 std::swap(new_representative, other);
949 }
950
951 // Initialize the new bounds with the implied ones.
952 const Fractional factor =
953 row_factors_[new_representative] / row_factors_[other];
954 Fractional new_lb = factor * lp->constraint_lower_bounds()[other];
955 Fractional new_ub = factor * lp->constraint_upper_bounds()[other];
956 if (factor < 0.0) {
957 std::swap(new_lb, new_ub);
958 }
959
960 lower_bound_sources_[new_representative] = new_representative;
961 upper_bound_sources_[new_representative] = new_representative;
962
963 if (new_lb > lp->constraint_lower_bounds()[new_representative]) {
964 lower_bound_sources_[new_representative] = other;
965 } else {
966 new_lb = lp->constraint_lower_bounds()[new_representative];
967 }
968 if (new_ub < lp->constraint_upper_bounds()[new_representative]) {
969 upper_bound_sources_[new_representative] = other;
970 } else {
971 new_ub = lp->constraint_upper_bounds()[new_representative];
972 }
973 const RowIndex new_lower_source =
974 lower_bound_sources_[new_representative];
975 if (new_lower_source == upper_bound_sources_[new_representative]) {
976 row_deletion_helper_.UnmarkRow(new_lower_source);
977 lower_bound_sources_[new_representative] = kInvalidRow;
978 upper_bound_sources_[new_representative] = kInvalidRow;
979 continue;
980 }
981
982 // Take care of small numerical imprecision by making sure that lb <= ub.
983 // Note that if the imprecision was greater than the tolerance, the code
984 // at the beginning of this block would have reported
985 // ProblemStatus::PRIMAL_INFEASIBLE.
987 if (new_lb > new_ub) {
988 if (lower_bound_sources_[new_representative] == new_representative) {
989 new_ub = lp->constraint_lower_bounds()[new_representative];
990 } else {
991 new_lb = lp->constraint_upper_bounds()[new_representative];
992 }
993 }
994 row_deletion_helper_.UnmarkRow(new_representative);
995 lp->SetConstraintBounds(new_representative, new_lb, new_ub);
996 }
997 }
998
999 lp_is_maximization_problem_ = lp->IsMaximizationProblem();
1000 lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1001 return !row_deletion_helper_.IsEmpty();
1002}
1003
1005 ProblemSolution* solution) const {
1007 RETURN_IF_NULL(solution);
1008 row_deletion_helper_.RestoreDeletedRows(solution);
1009
1010 // Make sure that all non-zero dual values on the proportional rows are
1011 // assigned to the correct row with the correct sign and that the statuses
1012 // are correct.
1013 const RowIndex num_rows = solution->dual_values.size();
1014 for (RowIndex row(0); row < num_rows; ++row) {
1015 const RowIndex lower_source = lower_bound_sources_[row];
1016 const RowIndex upper_source = upper_bound_sources_[row];
1017 if (lower_source == kInvalidRow && upper_source == kInvalidRow) continue;
1018 DCHECK_NE(lower_source, upper_source);
1019 DCHECK(lower_source == row || upper_source == row);
1020
1021 // If the representative is ConstraintStatus::BASIC, then all rows in this
1022 // class will be ConstraintStatus::BASIC and there is nothing to do.
1024 if (status == ConstraintStatus::BASIC) continue;
1025
1026 // If the row is FIXED it will behave as a row
1027 // ConstraintStatus::AT_UPPER_BOUND or
1028 // ConstraintStatus::AT_LOWER_BOUND depending on the corresponding dual
1029 // variable sign.
1031 const Fractional corrected_dual_value = lp_is_maximization_problem_
1032 ? -solution->dual_values[row]
1033 : solution->dual_values[row];
1034 if (corrected_dual_value != 0.0) {
1035 status = corrected_dual_value > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1037 }
1038 }
1039
1040 // If one of the two conditions below are true, set the row status to
1041 // ConstraintStatus::BASIC.
1042 // Note that the source which is not row can't be FIXED (see presolve).
1043 if (lower_source != row && status == ConstraintStatus::AT_LOWER_BOUND) {
1044 DCHECK_EQ(0.0, solution->dual_values[lower_source]);
1045 const Fractional factor = row_factors_[row] / row_factors_[lower_source];
1046 solution->dual_values[lower_source] = factor * solution->dual_values[row];
1047 solution->dual_values[row] = 0.0;
1049 solution->constraint_statuses[lower_source] =
1052 }
1053 if (upper_source != row && status == ConstraintStatus::AT_UPPER_BOUND) {
1054 DCHECK_EQ(0.0, solution->dual_values[upper_source]);
1055 const Fractional factor = row_factors_[row] / row_factors_[upper_source];
1056 solution->dual_values[upper_source] = factor * solution->dual_values[row];
1057 solution->dual_values[row] = 0.0;
1059 solution->constraint_statuses[upper_source] =
1062 }
1063
1064 // If the row status is still ConstraintStatus::FIXED_VALUE, we need to
1065 // relax its status.
1067 solution->constraint_statuses[row] =
1068 lower_source != row ? ConstraintStatus::AT_UPPER_BOUND
1070 }
1071 }
1072}
1073
1074// --------------------------------------------------------
1075// FixedVariablePreprocessor
1076// --------------------------------------------------------
1077
1080 RETURN_VALUE_IF_NULL(lp, false);
1081 const ColIndex num_cols = lp->num_variables();
1082 for (ColIndex col(0); col < num_cols; ++col) {
1085 if (lower_bound == upper_bound) {
1086 const Fractional fixed_value = lower_bound;
1087 DCHECK(IsFinite(fixed_value));
1088
1089 // We need to change the constraint bounds.
1090 SubtractColumnMultipleFromConstraintBound(col, fixed_value, lp);
1091 column_deletion_helper_.MarkColumnForDeletionWithState(
1092 col, fixed_value, VariableStatus::FIXED_VALUE);
1093 }
1094 }
1095
1096 lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
1097 return !column_deletion_helper_.IsEmpty();
1098}
1099
1101 ProblemSolution* solution) const {
1103 RETURN_IF_NULL(solution);
1104 column_deletion_helper_.RestoreDeletedColumns(solution);
1105}
1106
1107// --------------------------------------------------------
1108// ForcingAndImpliedFreeConstraintPreprocessor
1109// --------------------------------------------------------
1110
1113 RETURN_VALUE_IF_NULL(lp, false);
1114 const RowIndex num_rows = lp->num_constraints();
1115
1116 // Compute the implied constraint bounds from the variable bounds.
1117 DenseColumn implied_lower_bounds(num_rows, 0);
1118 DenseColumn implied_upper_bounds(num_rows, 0);
1119 const ColIndex num_cols = lp->num_variables();
1120 StrictITIVector<RowIndex, int> row_degree(num_rows, 0);
1121 for (ColIndex col(0); col < num_cols; ++col) {
1122 const Fractional lower = lp->variable_lower_bounds()[col];
1123 const Fractional upper = lp->variable_upper_bounds()[col];
1124 for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1125 const RowIndex row = e.row();
1126 const Fractional coeff = e.coefficient();
1127 if (coeff > 0.0) {
1128 implied_lower_bounds[row] += lower * coeff;
1129 implied_upper_bounds[row] += upper * coeff;
1130 } else {
1131 implied_lower_bounds[row] += upper * coeff;
1132 implied_upper_bounds[row] += lower * coeff;
1133 }
1134 ++row_degree[row];
1135 }
1136 }
1137
1138 // Note that the ScalingPreprocessor is currently executed last, so here the
1139 // problem has not been scaled yet.
1140 int num_implied_free_constraints = 0;
1141 int num_forcing_constraints = 0;
1142 is_forcing_up_.assign(num_rows, false);
1143 DenseBooleanColumn is_forcing_down(num_rows, false);
1144 for (RowIndex row(0); row < num_rows; ++row) {
1145 if (row_degree[row] == 0) continue;
1146 Fractional lower = lp->constraint_lower_bounds()[row];
1147 Fractional upper = lp->constraint_upper_bounds()[row];
1148
1149 // Check for infeasibility.
1151 implied_upper_bounds[row]) ||
1152 !IsSmallerWithinFeasibilityTolerance(implied_lower_bounds[row],
1153 upper)) {
1154 VLOG(1) << "implied bound " << implied_lower_bounds[row] << " "
1155 << implied_upper_bounds[row];
1156 VLOG(1) << "constraint bound " << lower << " " << upper;
1158 return false;
1159 }
1160
1161 // Check if the constraint is forcing. That is, all the variables that
1162 // appear in it must be at one of their bounds.
1163 if (IsSmallerWithinPreprocessorZeroTolerance(implied_upper_bounds[row],
1164 lower)) {
1165 is_forcing_down[row] = true;
1166 ++num_forcing_constraints;
1167 continue;
1168 }
1170 implied_lower_bounds[row])) {
1171 is_forcing_up_[row] = true;
1172 ++num_forcing_constraints;
1173 continue;
1174 }
1175
1176 // We relax the constraint bounds only if the constraint is implied to be
1177 // free. Such constraints will later be deleted by the
1178 // FreeConstraintPreprocessor.
1179 //
1180 // Note that we could relax only one of the two bounds, but the impact this
1181 // would have on the revised simplex algorithm is unclear at this point.
1183 implied_lower_bounds[row]) &&
1184 IsSmallerWithinPreprocessorZeroTolerance(implied_upper_bounds[row],
1185 upper)) {
1187 ++num_implied_free_constraints;
1188 }
1189 }
1190
1191 if (num_implied_free_constraints > 0) {
1192 VLOG(1) << num_implied_free_constraints << " implied free constraints.";
1193 }
1194
1195 if (num_forcing_constraints > 0) {
1196 VLOG(1) << num_forcing_constraints << " forcing constraints.";
1197 lp_is_maximization_problem_ = lp->IsMaximizationProblem();
1198 costs_.resize(num_cols, 0.0);
1199 for (ColIndex col(0); col < num_cols; ++col) {
1200 const SparseColumn& column = lp->GetSparseColumn(col);
1201 const Fractional lower = lp->variable_lower_bounds()[col];
1202 const Fractional upper = lp->variable_upper_bounds()[col];
1203 bool is_forced = false;
1205 for (const SparseColumn::Entry e : column) {
1206 if (is_forcing_down[e.row()]) {
1207 const Fractional candidate = e.coefficient() < 0.0 ? lower : upper;
1208 if (is_forced && candidate != target_bound) {
1209 // The bounds are really close, so we fix to the bound with
1210 // the lowest magnitude. As of 2019/11/19, this is "better" than
1211 // fixing to the mid-point, because at postsolve, we always put
1212 // non-basic variables to their exact bounds (so, with mid-point
1213 // there would be a difference of epsilon/2 between the inner
1214 // solution and the postsolved one, which might cause issues).
1215 if (IsSmallerWithinPreprocessorZeroTolerance(upper, lower)) {
1216 target_bound = std::abs(lower) < std::abs(upper) ? lower : upper;
1217 continue;
1218 }
1219 VLOG(1) << "A variable is forced in both directions! bounds: ["
1220 << std::fixed << std::setprecision(10) << lower << ", "
1221 << upper << "]. coeff:" << e.coefficient();
1223 return false;
1224 }
1225 target_bound = candidate;
1226 is_forced = true;
1227 }
1228 if (is_forcing_up_[e.row()]) {
1229 const Fractional candidate = e.coefficient() < 0.0 ? upper : lower;
1230 if (is_forced && candidate != target_bound) {
1231 // The bounds are really close, so we fix to the bound with
1232 // the lowest magnitude.
1233 if (IsSmallerWithinPreprocessorZeroTolerance(upper, lower)) {
1234 target_bound = std::abs(lower) < std::abs(upper) ? lower : upper;
1235 continue;
1236 }
1237 VLOG(1) << "A variable is forced in both directions! bounds: ["
1238 << std::fixed << std::setprecision(10) << lower << ", "
1239 << upper << "]. coeff:" << e.coefficient();
1241 return false;
1242 }
1243 target_bound = candidate;
1244 is_forced = true;
1245 }
1246 }
1247 if (is_forced) {
1248 // Fix the variable, update the constraint bounds and save this column
1249 // and its cost for the postsolve.
1250 SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
1251 column_deletion_helper_.MarkColumnForDeletionWithState(
1253 ComputeVariableStatus(target_bound, lower, upper));
1254 columns_saver_.SaveColumn(col, column);
1255 costs_[col] = lp->objective_coefficients()[col];
1256 }
1257 }
1258 for (RowIndex row(0); row < num_rows; ++row) {
1259 // In theory, an M exists such that for any magnitude >= M, we will be at
1260 // an optimal solution. However, because of numerical errors, if the value
1261 // is too large, it causes problem when verifying the solution. So we
1262 // select the smallest such M (at least a resonably small one) during
1263 // postsolve. It is the reason why we need to store the columns that were
1264 // fixed.
1265 if (is_forcing_down[row] || is_forcing_up_[row]) {
1266 row_deletion_helper_.MarkRowForDeletion(row);
1267 }
1268 }
1269 }
1270
1271 lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
1272 lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1273 return !column_deletion_helper_.IsEmpty();
1274}
1275
1277 ProblemSolution* solution) const {
1279 RETURN_IF_NULL(solution);
1280 column_deletion_helper_.RestoreDeletedColumns(solution);
1281 row_deletion_helper_.RestoreDeletedRows(solution);
1282
1283 struct DeletionEntry {
1284 RowIndex row;
1285 ColIndex col;
1287 };
1288 std::vector<DeletionEntry> entries;
1289
1290 // Compute for each deleted columns the last deleted row in which it appears.
1291 const ColIndex size = column_deletion_helper_.GetMarkedColumns().size();
1292 for (ColIndex col(0); col < size; ++col) {
1293 if (!column_deletion_helper_.IsColumnMarked(col)) continue;
1294
1295 RowIndex last_row = kInvalidRow;
1296 Fractional last_coefficient;
1297 for (const SparseColumn::Entry e : columns_saver_.SavedColumn(col)) {
1298 const RowIndex row = e.row();
1299 if (row_deletion_helper_.IsRowMarked(row)) {
1300 last_row = row;
1301 last_coefficient = e.coefficient();
1302 }
1303 }
1304 if (last_row != kInvalidRow) {
1305 entries.push_back({last_row, col, last_coefficient});
1306 }
1307 }
1308
1309 // Sort by row first and then col.
1310 std::sort(entries.begin(), entries.end(),
1311 [](const DeletionEntry& a, const DeletionEntry& b) {
1312 if (a.row == b.row) return a.col < b.col;
1313 return a.row < b.row;
1314 });
1315
1316 // For each deleted row (in order), compute a bound on the dual values so
1317 // that all the deleted columns for which this row is the last deleted row are
1318 // dual-feasible. Note that for the other columns, it will always be possible
1319 // to make them dual-feasible with a later row.
1320 // There are two possible outcomes:
1321 // - The dual value stays 0.0, and nothing changes.
1322 // - The bounds enforce a non-zero dual value, and one column will have a
1323 // reduced cost of 0.0. This column becomes VariableStatus::BASIC, and the
1324 // constraint status is changed to ConstraintStatus::AT_LOWER_BOUND,
1325 // ConstraintStatus::AT_UPPER_BOUND or ConstraintStatus::FIXED_VALUE.
1326 for (int i = 0; i < entries.size();) {
1327 const RowIndex row = entries[i].row;
1328 DCHECK(row_deletion_helper_.IsRowMarked(row));
1329
1330 // Process column with this last deleted row.
1331 Fractional new_dual_value = 0.0;
1332 ColIndex new_basic_column = kInvalidCol;
1333 for (; i < entries.size(); ++i) {
1334 if (entries[i].row != row) break;
1335 const ColIndex col = entries[i].col;
1336
1337 const Fractional scalar_product =
1338 ScalarProduct(solution->dual_values, columns_saver_.SavedColumn(col));
1339 const Fractional reduced_cost = costs_[col] - scalar_product;
1340 const Fractional bound = reduced_cost / entries[i].coefficient;
1341 if (is_forcing_up_[row] == !lp_is_maximization_problem_) {
1342 if (bound < new_dual_value) {
1343 new_dual_value = bound;
1344 new_basic_column = col;
1345 }
1346 } else {
1347 if (bound > new_dual_value) {
1348 new_dual_value = bound;
1349 new_basic_column = col;
1350 }
1351 }
1352 }
1353 if (new_basic_column != kInvalidCol) {
1354 solution->dual_values[row] = new_dual_value;
1355 solution->variable_statuses[new_basic_column] = VariableStatus::BASIC;
1356 solution->constraint_statuses[row] =
1357 is_forcing_up_[row] ? ConstraintStatus::AT_UPPER_BOUND
1359 }
1360 }
1361}
1362
1363// --------------------------------------------------------
1364// ImpliedFreePreprocessor
1365// --------------------------------------------------------
1366
1367namespace {
1368struct ColWithDegree {
1369 ColIndex col;
1370 EntryIndex num_entries;
1371 ColWithDegree(ColIndex c, EntryIndex n) : col(c), num_entries(n) {}
1372 bool operator<(const ColWithDegree& other) const {
1373 if (num_entries == other.num_entries) {
1374 return col < other.col;
1375 }
1376 return num_entries < other.num_entries;
1377 }
1378};
1379} // namespace
1380
1383 RETURN_VALUE_IF_NULL(lp, false);
1384 if (!parameters_.use_implied_free_preprocessor()) return false;
1385 const RowIndex num_rows = lp->num_constraints();
1386 const ColIndex num_cols = lp->num_variables();
1387
1388 // For each constraint with n entries and each of its variable, we want the
1389 // bounds implied by the (n - 1) other variables and the constraint. We
1390 // use two handy utility classes that allow us to do that efficiently while
1391 // dealing properly with infinite bounds.
1392 const int size = num_rows.value();
1393 // TODO(user) : Replace SumWithNegativeInfiniteAndOneMissing and
1394 // SumWithPositiveInfiniteAndOneMissing with IntervalSumWithOneMissing.
1396 size);
1398 size);
1399
1400 // Initialize the sums by adding all the bounds of the variables.
1401 for (ColIndex col(0); col < num_cols; ++col) {
1404 for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1405 Fractional entry_lb = e.coefficient() * lower_bound;
1406 Fractional entry_ub = e.coefficient() * upper_bound;
1407 if (e.coefficient() < 0.0) std::swap(entry_lb, entry_ub);
1408 lb_sums[e.row()].Add(entry_lb);
1409 ub_sums[e.row()].Add(entry_ub);
1410 }
1411 }
1412
1413 // The inequality
1414 // constraint_lb <= sum(entries) <= constraint_ub
1415 // can be rewritten as:
1416 // sum(entries) + (-activity) = 0,
1417 // where (-activity) has bounds [-constraint_ub, -constraint_lb].
1418 // We use this latter convention to simplify our code.
1419 for (RowIndex row(0); row < num_rows; ++row) {
1420 lb_sums[row].Add(-lp->constraint_upper_bounds()[row]);
1421 ub_sums[row].Add(-lp->constraint_lower_bounds()[row]);
1422 }
1423
1424 // Once a variable is freed, none of the rows in which it appears can be
1425 // used to make another variable free.
1426 DenseBooleanColumn used_rows(num_rows, false);
1427 postsolve_status_of_free_variables_.assign(num_cols, VariableStatus::FREE);
1428 variable_offsets_.assign(num_cols, 0.0);
1429
1430 // It is better to process columns with a small degree first:
1431 // - Degree-two columns make it possible to remove a row from the problem.
1432 // - This way there is more chance to make more free columns.
1433 // - It is better to have low degree free columns since a free column will
1434 // always end up in the simplex basis (except if there is more than the
1435 // number of rows in the problem).
1436 //
1437 // TODO(user): Only process degree-two so in subsequent passes more degree-two
1438 // columns could be made free. And only when no other reduction can be
1439 // applied, process the higher degree column?
1440 //
1441 // TODO(user): Be smarter about the order that maximizes the number of free
1442 // column. For instance if we have 3 doubleton columns that use the rows (1,2)
1443 // (2,3) and (3,4) then it is better not to make (2,3) free so the two other
1444 // two can be made free.
1445 std::vector<ColWithDegree> col_by_degree;
1446 for (ColIndex col(0); col < num_cols; ++col) {
1447 col_by_degree.push_back(
1448 ColWithDegree(col, lp->GetSparseColumn(col).num_entries()));
1449 }
1450 std::sort(col_by_degree.begin(), col_by_degree.end());
1451
1452 // Now loop over the columns in order and make all implied-free columns free.
1453 int num_already_free_variables = 0;
1454 int num_implied_free_variables = 0;
1455 int num_fixed_variables = 0;
1456 for (ColWithDegree col_with_degree : col_by_degree) {
1457 const ColIndex col = col_with_degree.col;
1458
1459 // If the variable is already free or fixed, we do nothing.
1463 ++num_already_free_variables;
1464 continue;
1465 }
1466 if (lower_bound == upper_bound) continue;
1467
1468 // Detect if the variable is implied free.
1469 Fractional overall_implied_lb = -kInfinity;
1470 Fractional overall_implied_ub = kInfinity;
1471 for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1472 // If the row contains another implied free variable, then the bounds
1473 // implied by it will just be [-kInfinity, kInfinity] so we can skip it.
1474 if (used_rows[e.row()]) continue;
1475
1476 // This is the contribution of this column to the sum above.
1477 const Fractional coeff = e.coefficient();
1478 Fractional entry_lb = coeff * lower_bound;
1479 Fractional entry_ub = coeff * upper_bound;
1480 if (coeff < 0.0) std::swap(entry_lb, entry_ub);
1481
1482 // If X is the variable with index col and Y the sum of all the other
1483 // variables and of (-activity), then coeff * X + Y = 0. Since Y's bounds
1484 // are [lb_sum without X, ub_sum without X], it is easy to derive the
1485 // implied bounds on X.
1486 //
1487 // Important: If entry_lb (resp. entry_ub) are large, we cannot have a
1488 // good precision on the sum without. So we do add a defensive tolerance
1489 // that depends on these magnitude.
1490 const Fractional implied_lb =
1491 coeff > 0.0 ? -ub_sums[e.row()].SumWithoutUb(entry_ub) / coeff
1492 : -lb_sums[e.row()].SumWithoutLb(entry_lb) / coeff;
1493 const Fractional implied_ub =
1494 coeff > 0.0 ? -lb_sums[e.row()].SumWithoutLb(entry_lb) / coeff
1495 : -ub_sums[e.row()].SumWithoutUb(entry_ub) / coeff;
1496
1497 overall_implied_lb = std::max(overall_implied_lb, implied_lb);
1498 overall_implied_ub = std::min(overall_implied_ub, implied_ub);
1499 }
1500
1501 // Detect infeasible cases.
1502 if (!IsSmallerWithinFeasibilityTolerance(overall_implied_lb, upper_bound) ||
1503 !IsSmallerWithinFeasibilityTolerance(lower_bound, overall_implied_ub) ||
1504 !IsSmallerWithinFeasibilityTolerance(overall_implied_lb,
1505 overall_implied_ub)) {
1507 return false;
1508 }
1509
1510 // Detect fixed variable cases (there are two kinds).
1511 // Note that currently we don't do anything here except counting them.
1513 overall_implied_lb) ||
1515 lower_bound)) {
1516 // This case is already dealt with by the
1517 // ForcingAndImpliedFreeConstraintPreprocessor since it means that (at
1518 // least) one of the row is forcing.
1519 ++num_fixed_variables;
1520 continue;
1521 } else if (IsSmallerWithinPreprocessorZeroTolerance(overall_implied_ub,
1522 overall_implied_lb)) {
1523 // TODO(user): As of July 2013, with our preprocessors this case is never
1524 // triggered on the Netlib. Note however that if it appears it can have a
1525 // big impact since by fixing the variable, the two involved constraints
1526 // are forcing and can be removed too (with all the variables they touch).
1527 // The postsolve step is quite involved though.
1528 ++num_fixed_variables;
1529 continue;
1530 }
1531
1532 // Is the variable implied free? Note that for an infinite lower_bound or
1533 // upper_bound the respective inequality is always true.
1535 overall_implied_lb) &&
1537 upper_bound)) {
1538 ++num_implied_free_variables;
1540 for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
1541 used_rows[e.row()] = true;
1542 }
1543
1544 // This is a tricky part. We're freeing this variable, which means that
1545 // after solve, the modified variable will have status either
1546 // VariableStatus::FREE or VariableStatus::BASIC. In the former case
1547 // (VariableStatus::FREE, value = 0.0), we need to "fix" the
1548 // status (technically, our variable isn't free!) to either
1549 // VariableStatus::AT_LOWER_BOUND or VariableStatus::AT_UPPER_BOUND
1550 // (note that we skipped fixed variables), and "fix" the value to that
1551 // bound's value as well. We make the decision and the precomputation
1552 // here: we simply offset the variable by one of its bounds, and store
1553 // which bound that was. Note that if the modified variable turns out to
1554 // be VariableStatus::BASIC, we'll simply un-offset its value too;
1555 // and let the status be VariableStatus::BASIC.
1556 //
1557 // TODO(user): This trick is already used in the DualizerPreprocessor,
1558 // maybe we should just have a preprocessor that shifts all the variables
1559 // bounds to have at least one of them at 0.0, will that improve precision
1560 // and speed of the simplex? One advantage is that we can compute the
1561 // new constraint bounds with better precision using AccurateSum.
1563 const Fractional offset =
1564 MinInMagnitudeOrZeroIfInfinite(lower_bound, upper_bound);
1565 if (offset != 0.0) {
1566 variable_offsets_[col] = offset;
1567 SubtractColumnMultipleFromConstraintBound(col, offset, lp);
1568 }
1569 postsolve_status_of_free_variables_[col] =
1570 ComputeVariableStatus(offset, lower_bound, upper_bound);
1571 }
1572 }
1573 VLOG(1) << num_already_free_variables << " free variables in the problem.";
1574 VLOG(1) << num_implied_free_variables << " implied free columns.";
1575 VLOG(1) << num_fixed_variables << " variables can be fixed.";
1576
1577 return num_implied_free_variables > 0;
1578}
1579
1582 RETURN_IF_NULL(solution);
1583 const ColIndex num_cols = solution->variable_statuses.size();
1584 for (ColIndex col(0); col < num_cols; ++col) {
1585 // Skip variables that the preprocessor didn't change.
1586 if (postsolve_status_of_free_variables_[col] == VariableStatus::FREE) {
1587 DCHECK_EQ(0.0, variable_offsets_[col]);
1588 continue;
1589 }
1590 if (solution->variable_statuses[col] == VariableStatus::FREE) {
1591 solution->variable_statuses[col] =
1592 postsolve_status_of_free_variables_[col];
1593 } else {
1595 }
1596 solution->primal_values[col] += variable_offsets_[col];
1597 }
1598}
1599
1600// --------------------------------------------------------
1601// DoubletonFreeColumnPreprocessor
1602// --------------------------------------------------------
1603
1606 RETURN_VALUE_IF_NULL(lp, false);
1607 // We will modify the matrix transpose and then push the change to the linear
1608 // program by calling lp->UseTransposeMatrixAsReference(). Note
1609 // that original_matrix will not change during this preprocessor run.
1610 const SparseMatrix& original_matrix = lp->GetSparseMatrix();
1612
1613 const ColIndex num_cols(lp->num_variables());
1614 for (ColIndex doubleton_col(0); doubleton_col < num_cols; ++doubleton_col) {
1615 // Only consider doubleton free columns.
1616 if (original_matrix.column(doubleton_col).num_entries() != 2) continue;
1617 if (lp->variable_lower_bounds()[doubleton_col] != -kInfinity) continue;
1618 if (lp->variable_upper_bounds()[doubleton_col] != kInfinity) continue;
1619
1620 // Collect the two column items. Note that we skip a column involving a
1621 // deleted row since it is no longer a doubleton then.
1622 RestoreInfo r;
1623 r.col = doubleton_col;
1624 r.objective_coefficient = lp->objective_coefficients()[r.col];
1625 int index = 0;
1626 for (const SparseColumn::Entry e : original_matrix.column(r.col)) {
1627 if (row_deletion_helper_.IsRowMarked(e.row())) break;
1628 r.row[index] = e.row();
1629 r.coeff[index] = e.coefficient();
1630 DCHECK_NE(0.0, e.coefficient());
1631 ++index;
1632 }
1633 if (index != NUM_ROWS) continue;
1634
1635 // Since the column didn't touch any previously deleted row, we are sure
1636 // that the coefficients were left untouched.
1637 DCHECK_EQ(r.coeff[DELETED], transpose->column(RowToColIndex(r.row[DELETED]))
1639 DCHECK_EQ(r.coeff[MODIFIED],
1640 transpose->column(RowToColIndex(r.row[MODIFIED]))
1642
1643 // We prefer deleting the row with the larger coefficient magnitude because
1644 // we will divide by this magnitude. TODO(user): Impact?
1645 if (std::abs(r.coeff[DELETED]) < std::abs(r.coeff[MODIFIED])) {
1646 std::swap(r.coeff[DELETED], r.coeff[MODIFIED]);
1647 std::swap(r.row[DELETED], r.row[MODIFIED]);
1648 }
1649
1650 // Save the deleted row for postsolve. Note that we remove it from the
1651 // transpose at the same time. This last operation is not strictly needed,
1652 // but it is faster to do it this way (both here and later when we will take
1653 // the transpose of the final transpose matrix).
1654 r.deleted_row_as_column.Swap(
1655 transpose->mutable_column(RowToColIndex(r.row[DELETED])));
1656
1657 // Move the bound of the deleted constraint to the initially free variable.
1658 {
1659 Fractional new_variable_lb =
1660 lp->constraint_lower_bounds()[r.row[DELETED]];
1661 Fractional new_variable_ub =
1662 lp->constraint_upper_bounds()[r.row[DELETED]];
1663 new_variable_lb /= r.coeff[DELETED];
1664 new_variable_ub /= r.coeff[DELETED];
1665 if (r.coeff[DELETED] < 0.0) std::swap(new_variable_lb, new_variable_ub);
1666 lp->SetVariableBounds(r.col, new_variable_lb, new_variable_ub);
1667 }
1668
1669 // Add a multiple of the deleted row to the modified row except on
1670 // column r.col where the coefficient will be left unchanged.
1671 r.deleted_row_as_column.AddMultipleToSparseVectorAndIgnoreCommonIndex(
1672 -r.coeff[MODIFIED] / r.coeff[DELETED], ColToRowIndex(r.col),
1674 transpose->mutable_column(RowToColIndex(r.row[MODIFIED])));
1675
1676 // We also need to correct the objective value of the variables involved in
1677 // the deleted row.
1678 if (r.objective_coefficient != 0.0) {
1679 for (const SparseColumn::Entry e : r.deleted_row_as_column) {
1680 const ColIndex col = RowToColIndex(e.row());
1681 if (col == r.col) continue;
1682 const Fractional new_objective =
1684 e.coefficient() * r.objective_coefficient / r.coeff[DELETED];
1685
1686 // This detects if the objective should actually be zero, but because of
1687 // the numerical error in the formula above, we have a really low
1688 // objective instead. The logic is the same as in
1689 // AddMultipleToSparseVectorAndIgnoreCommonIndex().
1690 if (std::abs(new_objective) > parameters_.drop_tolerance()) {
1691 lp->SetObjectiveCoefficient(col, new_objective);
1692 } else {
1693 lp->SetObjectiveCoefficient(col, 0.0);
1694 }
1695 }
1696 }
1697 row_deletion_helper_.MarkRowForDeletion(r.row[DELETED]);
1698 restore_stack_.push_back(r);
1699 }
1700
1701 if (!row_deletion_helper_.IsEmpty()) {
1702 // The order is important.
1704 lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
1705 return true;
1706 }
1707 return false;
1708}
1709
1711 ProblemSolution* solution) const {
1713 row_deletion_helper_.RestoreDeletedRows(solution);
1714 for (const RestoreInfo& r : Reverse(restore_stack_)) {
1715 // Correct the constraint status.
1716 switch (solution->variable_statuses[r.col]) {
1718 solution->constraint_statuses[r.row[DELETED]] =
1720 break;
1722 solution->constraint_statuses[r.row[DELETED]] =
1723 r.coeff[DELETED] > 0.0 ? ConstraintStatus::AT_UPPER_BOUND
1725 break;
1727 solution->constraint_statuses[r.row[DELETED]] =
1728 r.coeff[DELETED] > 0.0 ? ConstraintStatus::AT_LOWER_BOUND
1730 break;
1732 solution->constraint_statuses[r.row[DELETED]] = ConstraintStatus::FREE;
1733 break;
1735 // The default is good here:
1736 DCHECK_EQ(solution->constraint_statuses[r.row[DELETED]],
1738 break;
1739 }
1740
1741 // Correct the primal variable value.
1742 {
1743 Fractional new_variable_value = solution->primal_values[r.col];
1744 for (const SparseColumn::Entry e : r.deleted_row_as_column) {
1745 const ColIndex col = RowToColIndex(e.row());
1746 if (col == r.col) continue;
1747 new_variable_value -= (e.coefficient() / r.coeff[DELETED]) *
1748 solution->primal_values[RowToColIndex(e.row())];
1749 }
1750 solution->primal_values[r.col] = new_variable_value;
1751 }
1752
1753 // In all cases, we will make the variable r.col VariableStatus::BASIC, so
1754 // we need to adjust the dual value of the deleted row so that the variable
1755 // reduced cost is zero. Note that there is nothing to do if the variable
1756 // was already basic.
1757 if (solution->variable_statuses[r.col] != VariableStatus::BASIC) {
1758 solution->variable_statuses[r.col] = VariableStatus::BASIC;
1759 Fractional current_reduced_cost =
1760 r.objective_coefficient -
1761 r.coeff[MODIFIED] * solution->dual_values[r.row[MODIFIED]];
1762 // We want current_reduced_cost - dual * coeff = 0, so:
1763 solution->dual_values[r.row[DELETED]] =
1764 current_reduced_cost / r.coeff[DELETED];
1765 } else {
1766 DCHECK_EQ(solution->dual_values[r.row[DELETED]], 0.0);
1767 }
1768 }
1769}
1770
1771// --------------------------------------------------------
1772// UnconstrainedVariablePreprocessor
1773// --------------------------------------------------------
1774
1775namespace {
1776
1777// Does the constraint block the variable to go to infinity in the given
1778// direction? direction is either positive or negative and row is the index of
1779// the constraint.
1780bool IsConstraintBlockingVariable(const LinearProgram& lp, Fractional direction,
1781 RowIndex row) {
1782 return direction > 0.0 ? lp.constraint_upper_bounds()[row] != kInfinity
1784}
1785
1786} // namespace
1787
1789 ColIndex col, Fractional target_bound, LinearProgram* lp) {
1791 if (rhs_.empty()) {
1792 rhs_.resize(lp->num_constraints(), 0.0);
1793 activity_sign_correction_.resize(lp->num_constraints(), 1.0);
1794 is_unbounded_.resize(lp->num_variables(), false);
1795 }
1796 const bool is_unbounded_up = (target_bound == kInfinity);
1797 const SparseColumn& column = lp->GetSparseColumn(col);
1798 for (const SparseColumn::Entry e : column) {
1799 const RowIndex row = e.row();
1800 if (!row_deletion_helper_.IsRowMarked(row)) {
1801 row_deletion_helper_.MarkRowForDeletion(row);
1802 rows_saver_.SaveColumn(
1805 }
1806 const bool is_constraint_upper_bound_relevant =
1807 e.coefficient() > 0.0 ? !is_unbounded_up : is_unbounded_up;
1808 activity_sign_correction_[row] =
1809 is_constraint_upper_bound_relevant ? 1.0 : -1.0;
1810 rhs_[row] = is_constraint_upper_bound_relevant
1813 DCHECK(IsFinite(rhs_[row]));
1814
1815 // TODO(user): Here, we may render the row free, so subsequent columns
1816 // processed by the columns loop in Run() have more chance to be removed.
1817 // However, we need to be more careful during the postsolve() if we do that.
1818 }
1819 is_unbounded_[col] = true;
1820 Fractional initial_feasible_value = MinInMagnitudeOrZeroIfInfinite(
1822 column_deletion_helper_.MarkColumnForDeletionWithState(
1823 col, initial_feasible_value,
1824 ComputeVariableStatus(initial_feasible_value,
1826 lp->variable_upper_bounds()[col]));
1827}
1828
1831 RETURN_VALUE_IF_NULL(lp, false);
1832
1833 // To simplify the problem if something is almost zero, we use the low
1834 // tolerance (1e-9 by default) to be defensive. But to detect an infeasibility
1835 // we want to be sure (especially since the problem is not scaled in the
1836 // presolver) so we use an higher tolerance.
1837 //
1838 // TODO(user): Expose it as a parameter. We could rename both to
1839 // preprocessor_low_tolerance and preprocessor_high_tolerance.
1840 const Fractional low_tolerance = parameters_.preprocessor_zero_tolerance();
1841 const Fractional high_tolerance = 1e-4;
1842
1843 // We start by the dual variable bounds from the constraints.
1844 const RowIndex num_rows = lp->num_constraints();
1845 dual_lb_.assign(num_rows, -kInfinity);
1846 dual_ub_.assign(num_rows, kInfinity);
1847 for (RowIndex row(0); row < num_rows; ++row) {
1848 if (lp->constraint_lower_bounds()[row] == -kInfinity) {
1849 dual_ub_[row] = 0.0;
1850 }
1851 if (lp->constraint_upper_bounds()[row] == kInfinity) {
1852 dual_lb_[row] = 0.0;
1853 }
1854 }
1855
1856 const ColIndex num_cols = lp->num_variables();
1857 may_have_participated_lb_.assign(num_cols, false);
1858 may_have_participated_ub_.assign(num_cols, false);
1859
1860 // We maintain a queue of columns to process.
1861 std::deque<ColIndex> columns_to_process;
1862 DenseBooleanRow in_columns_to_process(num_cols, true);
1863 std::vector<RowIndex> changed_rows;
1864 for (ColIndex col(0); col < num_cols; ++col) {
1865 columns_to_process.push_back(col);
1866 }
1867
1868 // Arbitrary limit to avoid corner cases with long loops.
1869 // TODO(user): expose this as a parameter? IMO it isn't really needed as we
1870 // shouldn't reach this limit except in corner cases.
1871 const int limit = 5 * num_cols.value();
1872 for (int count = 0; !columns_to_process.empty() && count < limit; ++count) {
1873 const ColIndex col = columns_to_process.front();
1874 columns_to_process.pop_front();
1875 in_columns_to_process[col] = false;
1876 if (column_deletion_helper_.IsColumnMarked(col)) continue;
1877
1878 const SparseColumn& column = lp->GetSparseColumn(col);
1879 const Fractional col_cost =
1881 const Fractional col_lb = lp->variable_lower_bounds()[col];
1882 const Fractional col_ub = lp->variable_upper_bounds()[col];
1883
1884 // Compute the bounds on the reduced costs of this column.
1887 rc_lb.Add(col_cost);
1888 rc_ub.Add(col_cost);
1889 for (const SparseColumn::Entry e : column) {
1890 if (row_deletion_helper_.IsRowMarked(e.row())) continue;
1891 const Fractional coeff = e.coefficient();
1892 if (coeff > 0.0) {
1893 rc_lb.Add(-coeff * dual_ub_[e.row()]);
1894 rc_ub.Add(-coeff * dual_lb_[e.row()]);
1895 } else {
1896 rc_lb.Add(-coeff * dual_lb_[e.row()]);
1897 rc_ub.Add(-coeff * dual_ub_[e.row()]);
1898 }
1899 }
1900
1901 // If the reduced cost domain do not contain zero (modulo the tolerance), we
1902 // can move the variable to its corresponding bound. Note that we need to be
1903 // careful that this variable didn't participate in creating the used
1904 // reduced cost bound in the first place.
1905 bool can_be_removed = false;
1907 bool rc_is_away_from_zero;
1908 if (rc_ub.Sum() <= low_tolerance) {
1909 can_be_removed = true;
1910 target_bound = col_ub;
1912 target_bound = std::floor(target_bound + high_tolerance);
1913 }
1914
1915 rc_is_away_from_zero = rc_ub.Sum() <= -high_tolerance;
1916 can_be_removed = !may_have_participated_ub_[col];
1917 }
1918 if (rc_lb.Sum() >= -low_tolerance) {
1919 // The second condition is here for the case we can choose one of the two
1920 // directions.
1921 if (!can_be_removed || !IsFinite(target_bound)) {
1922 can_be_removed = true;
1923 target_bound = col_lb;
1925 target_bound = std::ceil(target_bound - high_tolerance);
1926 }
1927
1928 rc_is_away_from_zero = rc_lb.Sum() >= high_tolerance;
1929 can_be_removed = !may_have_participated_lb_[col];
1930 }
1931 }
1932
1933 if (can_be_removed) {
1934 if (IsFinite(target_bound)) {
1935 // Note that in MIP context, this assumes that the bounds of an integer
1936 // variable are integer.
1937 column_deletion_helper_.MarkColumnForDeletionWithState(
1939 ComputeVariableStatus(target_bound, col_lb, col_ub));
1940 continue;
1941 }
1942
1943 // If the target bound is infinite and the reduced cost bound is non-zero,
1944 // then the problem is ProblemStatus::INFEASIBLE_OR_UNBOUNDED.
1945 if (rc_is_away_from_zero) {
1946 VLOG(1) << "Problem INFEASIBLE_OR_UNBOUNDED, variable " << col
1947 << " can move to " << target_bound
1948 << " and its reduced cost is in [" << rc_lb.Sum() << ", "
1949 << rc_ub.Sum() << "]";
1951 return false;
1952 } else {
1953 // We can remove this column and all its constraints! We just need to
1954 // choose proper variable values during the call to RecoverSolution()
1955 // that make all the constraints satisfiable. Unfortunately, this is not
1956 // so easy to do in the general case, so we only deal with a simpler
1957 // case when the cost of the variable is zero, and none of the
1958 // constraints (even the deleted one) block the variable moving to its
1959 // infinite target_bound.
1960 //
1961 // TODO(user): deal with the more generic case.
1962 if (col_cost != 0.0) continue;
1963
1964 const double sign_correction = (target_bound == kInfinity) ? 1.0 : -1.0;
1965 bool skip = false;
1966 for (const SparseColumn::Entry e : column) {
1967 // Note that it is important to check the rows that are already
1968 // deleted here, otherwise the post-solve will not work.
1969 if (IsConstraintBlockingVariable(
1970 *lp, sign_correction * e.coefficient(), e.row())) {
1971 skip = true;
1972 break;
1973 }
1974 }
1975 if (skip) continue;
1976
1977 // TODO(user): this also works if the variable is integer, but we must
1978 // choose an integer value during the post-solve. Implement this.
1979 if (in_mip_context_) continue;
1981 continue;
1982 }
1983 }
1984
1985 // The rest of the code will update the dual bounds. There is no need to do
1986 // it if the column was removed or if it is not unconstrained in some
1987 // direction.
1988 DCHECK(!can_be_removed);
1989 if (col_lb != -kInfinity && col_ub != kInfinity) continue;
1990
1991 // For MIP, we only exploit the constraints. TODO(user): It should probably
1992 // work with only small modification, investigate.
1993 if (in_mip_context_) continue;
1994
1995 changed_rows.clear();
1996 for (const SparseColumn::Entry e : column) {
1997 if (row_deletion_helper_.IsRowMarked(e.row())) continue;
1998 const Fractional c = e.coefficient();
1999 const RowIndex row = e.row();
2000 if (col_ub == kInfinity) {
2001 if (c > 0.0) {
2002 const Fractional candidate =
2003 rc_ub.SumWithoutUb(-c * dual_lb_[row]) / c;
2004 if (candidate < dual_ub_[row]) {
2005 dual_ub_[row] = candidate;
2006 may_have_participated_lb_[col] = true;
2007 changed_rows.push_back(row);
2008 }
2009 } else {
2010 const Fractional candidate =
2011 rc_ub.SumWithoutUb(-c * dual_ub_[row]) / c;
2012 if (candidate > dual_lb_[row]) {
2013 dual_lb_[row] = candidate;
2014 may_have_participated_lb_[col] = true;
2015 changed_rows.push_back(row);
2016 }
2017 }
2018 }
2019 if (col_lb == -kInfinity) {
2020 if (c > 0.0) {
2021 const Fractional candidate =
2022 rc_lb.SumWithoutLb(-c * dual_ub_[row]) / c;
2023 if (candidate > dual_lb_[row]) {
2024 dual_lb_[row] = candidate;
2025 may_have_participated_ub_[col] = true;
2026 changed_rows.push_back(row);
2027 }
2028 } else {
2029 const Fractional candidate =
2030 rc_lb.SumWithoutLb(-c * dual_lb_[row]) / c;
2031 if (candidate < dual_ub_[row]) {
2032 dual_ub_[row] = candidate;
2033 may_have_participated_ub_[col] = true;
2034 changed_rows.push_back(row);
2035 }
2036 }
2037 }
2038 }
2039
2040 if (!changed_rows.empty()) {
2041 const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
2042 for (const RowIndex row : changed_rows) {
2043 for (const SparseColumn::Entry entry :
2044 transpose.column(RowToColIndex(row))) {
2045 const ColIndex col = RowToColIndex(entry.row());
2046 if (!in_columns_to_process[col]) {
2047 columns_to_process.push_back(col);
2048 in_columns_to_process[col] = true;
2049 }
2050 }
2051 }
2052 }
2053 }
2054
2055 // Change the rhs to reflect the fixed variables. Note that is important to do
2056 // that after all the calls to RemoveZeroCostUnconstrainedVariable() because
2057 // RemoveZeroCostUnconstrainedVariable() needs to store the rhs before this
2058 // modification!
2059 const ColIndex end = column_deletion_helper_.GetMarkedColumns().size();
2060 for (ColIndex col(0); col < end; ++col) {
2061 if (column_deletion_helper_.IsColumnMarked(col)) {
2062 const Fractional target_bound =
2063 column_deletion_helper_.GetStoredValue()[col];
2064 SubtractColumnMultipleFromConstraintBound(col, target_bound, lp);
2065 }
2066 }
2067
2068 lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
2069 lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2070 return !column_deletion_helper_.IsEmpty() || !row_deletion_helper_.IsEmpty();
2071}
2072
2074 ProblemSolution* solution) const {
2076 RETURN_IF_NULL(solution);
2077 column_deletion_helper_.RestoreDeletedColumns(solution);
2078 row_deletion_helper_.RestoreDeletedRows(solution);
2079
2080 struct DeletionEntry {
2081 RowIndex row;
2082 ColIndex col;
2084 };
2085 std::vector<DeletionEntry> entries;
2086
2087 // Compute the last deleted column index for each deleted rows.
2088 const RowIndex num_rows = solution->dual_values.size();
2089 RowToColMapping last_deleted_column(num_rows, kInvalidCol);
2090 for (RowIndex row(0); row < num_rows; ++row) {
2091 if (!row_deletion_helper_.IsRowMarked(row)) continue;
2092
2093 ColIndex last_col = kInvalidCol;
2094 Fractional last_coefficient;
2095 for (const SparseColumn::Entry e :
2096 rows_saver_.SavedColumn(RowToColIndex(row))) {
2097 const ColIndex col = RowToColIndex(e.row());
2098 if (is_unbounded_[col]) {
2099 last_col = col;
2100 last_coefficient = e.coefficient();
2101 }
2102 }
2103 if (last_col != kInvalidCol) {
2104 entries.push_back({row, last_col, last_coefficient});
2105 }
2106 }
2107
2108 // Sort by col first and then row.
2109 std::sort(entries.begin(), entries.end(),
2110 [](const DeletionEntry& a, const DeletionEntry& b) {
2111 if (a.col == b.col) return a.row < b.row;
2112 return a.col < b.col;
2113 });
2114
2115 // Note that this will be empty if there were no deleted rows.
2116 for (int i = 0; i < entries.size();) {
2117 const ColIndex col = entries[i].col;
2118 CHECK(is_unbounded_[col]);
2119
2120 Fractional primal_value_shift = 0.0;
2121 RowIndex row_at_bound = kInvalidRow;
2122 for (; i < entries.size(); ++i) {
2123 if (entries[i].col != col) break;
2124 const RowIndex row = entries[i].row;
2125
2126 // This is for VariableStatus::FREE rows.
2127 //
2128 // TODO(user): In presence of free row, we must move them to 0.
2129 // Note that currently VariableStatus::FREE rows should be removed before
2130 // this is called.
2131 DCHECK(IsFinite(rhs_[row]));
2132 if (!IsFinite(rhs_[row])) continue;
2133
2134 const SparseColumn& row_as_column =
2135 rows_saver_.SavedColumn(RowToColIndex(row));
2136 const Fractional activity =
2137 rhs_[row] - ScalarProduct(solution->primal_values, row_as_column);
2138
2139 // activity and sign correction must have the same sign or be zero. If
2140 // not, we find the first unbounded variable and change it accordingly.
2141 // Note that by construction, the variable value will move towards its
2142 // unbounded direction.
2143 if (activity * activity_sign_correction_[row] < 0.0) {
2144 const Fractional bound = activity / entries[i].coefficient;
2145 if (std::abs(bound) > std::abs(primal_value_shift)) {
2146 primal_value_shift = bound;
2147 row_at_bound = row;
2148 }
2149 }
2150 }
2151 solution->primal_values[col] += primal_value_shift;
2152 if (row_at_bound != kInvalidRow) {
2154 solution->constraint_statuses[row_at_bound] =
2155 activity_sign_correction_[row_at_bound] == 1.0
2158 }
2159 }
2160}
2161
2162// --------------------------------------------------------
2163// FreeConstraintPreprocessor
2164// --------------------------------------------------------
2165
2168 RETURN_VALUE_IF_NULL(lp, false);
2169 const RowIndex num_rows = lp->num_constraints();
2170 for (RowIndex row(0); row < num_rows; ++row) {
2174 row_deletion_helper_.MarkRowForDeletion(row);
2175 }
2176 }
2177 lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2178 return !row_deletion_helper_.IsEmpty();
2179}
2180
2182 ProblemSolution* solution) const {
2184 RETURN_IF_NULL(solution);
2185 row_deletion_helper_.RestoreDeletedRows(solution);
2186}
2187
2188// --------------------------------------------------------
2189// EmptyConstraintPreprocessor
2190// --------------------------------------------------------
2191
2194 RETURN_VALUE_IF_NULL(lp, false);
2195 const RowIndex num_rows(lp->num_constraints());
2196 const ColIndex num_cols(lp->num_variables());
2197
2198 // Compute degree.
2199 StrictITIVector<RowIndex, int> degree(num_rows, 0);
2200 for (ColIndex col(0); col < num_cols; ++col) {
2201 for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
2202 ++degree[e.row()];
2203 }
2204 }
2205
2206 // Delete degree 0 rows.
2207 for (RowIndex row(0); row < num_rows; ++row) {
2208 if (degree[row] == 0) {
2209 // We need to check that 0.0 is allowed by the constraint bounds,
2210 // otherwise, the problem is ProblemStatus::PRIMAL_INFEASIBLE.
2212 lp->constraint_lower_bounds()[row], 0) ||
2214 0, lp->constraint_upper_bounds()[row])) {
2215 VLOG(1) << "Problem PRIMAL_INFEASIBLE, constraint " << row
2216 << " is empty and its range ["
2217 << lp->constraint_lower_bounds()[row] << ","
2218 << lp->constraint_upper_bounds()[row] << "] doesn't contain 0.";
2220 return false;
2221 }
2222 row_deletion_helper_.MarkRowForDeletion(row);
2223 }
2224 }
2225 lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2226 return !row_deletion_helper_.IsEmpty();
2227}
2228
2230 ProblemSolution* solution) const {
2232 RETURN_IF_NULL(solution);
2233 row_deletion_helper_.RestoreDeletedRows(solution);
2234}
2235
2236// --------------------------------------------------------
2237// SingletonPreprocessor
2238// --------------------------------------------------------
2239
2241 MatrixEntry e, ConstraintStatus status)
2242 : type_(type),
2243 is_maximization_(lp.IsMaximizationProblem()),
2244 e_(e),
2245 cost_(lp.objective_coefficients()[e.col]),
2246 variable_lower_bound_(lp.variable_lower_bounds()[e.col]),
2247 variable_upper_bound_(lp.variable_upper_bounds()[e.col]),
2248 constraint_lower_bound_(lp.constraint_lower_bounds()[e.row]),
2249 constraint_upper_bound_(lp.constraint_upper_bounds()[e.row]),
2250 constraint_status_(status) {}
2251
2253 const SparseColumn& saved_column,
2254 const SparseColumn& saved_row,
2255 ProblemSolution* solution) const {
2256 switch (type_) {
2257 case SINGLETON_ROW:
2258 SingletonRowUndo(saved_column, solution);
2259 break;
2261 ZeroCostSingletonColumnUndo(parameters, saved_row, solution);
2262 break;
2264 SingletonColumnInEqualityUndo(parameters, saved_row, solution);
2265 break;
2267 MakeConstraintAnEqualityUndo(solution);
2268 break;
2269 }
2270}
2271
2272void SingletonPreprocessor::DeleteSingletonRow(MatrixEntry e,
2273 LinearProgram* lp) {
2274 Fractional implied_lower_bound =
2275 lp->constraint_lower_bounds()[e.row] / e.coeff;
2276 Fractional implied_upper_bound =
2277 lp->constraint_upper_bounds()[e.row] / e.coeff;
2278 if (e.coeff < 0.0) {
2279 std::swap(implied_lower_bound, implied_upper_bound);
2280 }
2281
2282 const Fractional old_lower_bound = lp->variable_lower_bounds()[e.col];
2283 const Fractional old_upper_bound = lp->variable_upper_bounds()[e.col];
2284
2285 const Fractional potential_error =
2286 std::abs(parameters_.preprocessor_zero_tolerance() / e.coeff);
2287 Fractional new_lower_bound =
2288 implied_lower_bound - potential_error > old_lower_bound
2289 ? implied_lower_bound
2290 : old_lower_bound;
2291 Fractional new_upper_bound =
2292 implied_upper_bound + potential_error < old_upper_bound
2293 ? implied_upper_bound
2294 : old_upper_bound;
2295
2296 // This can happen if we ask for 1e-300 * x to be >= 1e9.
2297 if (new_upper_bound == -kInfinity || new_lower_bound == kInfinity) {
2298 VLOG(1) << "Problem ProblemStatus::PRIMAL_INFEASIBLE, singleton "
2299 "row causes the bound of the variable "
2300 << e.col << " to go to infinity.";
2302 return;
2303 }
2304
2305 if (new_upper_bound < new_lower_bound) {
2306 if (!IsSmallerWithinFeasibilityTolerance(new_lower_bound,
2307 new_upper_bound)) {
2308 VLOG(1) << "Problem ProblemStatus::PRIMAL_INFEASIBLE, singleton "
2309 "row causes the bound of the variable "
2310 << e.col << " to be infeasible by "
2311 << new_lower_bound - new_upper_bound;
2313 return;
2314 }
2315
2316 // Otherwise, fix the variable to one of its bounds.
2317 if (new_lower_bound == lp->variable_lower_bounds()[e.col]) {
2318 new_upper_bound = new_lower_bound;
2319 }
2320 if (new_upper_bound == lp->variable_upper_bounds()[e.col]) {
2321 new_lower_bound = new_upper_bound;
2322 }
2323
2324 // When both new bounds are coming from the constraint and are crossing, it
2325 // means the constraint bounds where originally crossing too. We arbitrarily
2326 // choose one of the bound in this case.
2327 //
2328 // TODO(user): The code in this file shouldn't create crossing bounds at
2329 // any point, so we could decide which bound to use directly on the user
2330 // given problem before running any presolve.
2331 new_upper_bound = new_lower_bound;
2332 }
2333 row_deletion_helper_.MarkRowForDeletion(e.row);
2334 undo_stack_.push_back(SingletonUndo(SingletonUndo::SINGLETON_ROW, *lp, e,
2336 columns_saver_.SaveColumnIfNotAlreadyDone(e.col, lp->GetSparseColumn(e.col));
2337
2338 lp->SetVariableBounds(e.col, new_lower_bound, new_upper_bound);
2339}
2340
2341// The dual value of the row needs to be corrected to stay at the optimal.
2342void SingletonUndo::SingletonRowUndo(const SparseColumn& saved_column,
2343 ProblemSolution* solution) const {
2344 DCHECK_EQ(0, solution->dual_values[e_.row]);
2345
2346 // If the variable is basic or free, we can just keep the constraint
2347 // VariableStatus::BASIC and 0.0 as the dual value.
2348 const VariableStatus status = solution->variable_statuses[e_.col];
2350
2351 // Compute whether or not the variable bounds changed.
2352 Fractional implied_lower_bound = constraint_lower_bound_ / e_.coeff;
2353 Fractional implied_upper_bound = constraint_upper_bound_ / e_.coeff;
2354 if (e_.coeff < 0.0) {
2355 std::swap(implied_lower_bound, implied_upper_bound);
2356 }
2357 const bool lower_bound_changed = implied_lower_bound > variable_lower_bound_;
2358 const bool upper_bound_changed = implied_upper_bound < variable_upper_bound_;
2359
2360 if (!lower_bound_changed && !upper_bound_changed) return;
2361 if (status == VariableStatus::AT_LOWER_BOUND && !lower_bound_changed) return;
2362 if (status == VariableStatus::AT_UPPER_BOUND && !upper_bound_changed) return;
2363
2364 // This is the reduced cost of the variable before the singleton constraint is
2365 // added back.
2366 const Fractional reduced_cost =
2367 cost_ - ScalarProduct(solution->dual_values, saved_column);
2368 const Fractional reduced_cost_for_minimization =
2369 is_maximization_ ? -reduced_cost : reduced_cost;
2370
2372 DCHECK(lower_bound_changed || upper_bound_changed);
2373 if (reduced_cost_for_minimization >= 0.0 && !lower_bound_changed) {
2374 solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2375 return;
2376 }
2377 if (reduced_cost_for_minimization <= 0.0 && !upper_bound_changed) {
2378 solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2379 return;
2380 }
2381 }
2382
2383 // If one of the variable bounds changes, and the variable is no longer at one
2384 // of its bounds, then its reduced cost needs to be set to 0.0 and the
2385 // variable becomes a basic variable. This is what the line below do, since
2386 // the new reduced cost of the variable will be equal to:
2387 // old_reduced_cost - coeff * solution->dual_values[row]
2388 //
2389 // TODO(user): This code is broken for integer variable.
2390 // Say our singleton row is 2 * y <= 5, and y was at its implied bound y = 2
2391 // at postsolve. The problem is that we can end up with an AT_UPPER_BOUND
2392 // status for the constraint 2 * y <= 5 which is not correct since the
2393 // activity is 4, and that break later preconditions. Maybe there is a way to
2394 // fix everything, but it seems tough to be sure.
2395 solution->dual_values[e_.row] = reduced_cost / e_.coeff;
2396 ConstraintStatus new_constraint_status = VariableToConstraintStatus(status);
2398 (!lower_bound_changed || !upper_bound_changed)) {
2399 new_constraint_status = lower_bound_changed
2402 }
2403 if (e_.coeff < 0.0) {
2404 if (new_constraint_status == ConstraintStatus::AT_LOWER_BOUND) {
2405 new_constraint_status = ConstraintStatus::AT_UPPER_BOUND;
2406 } else if (new_constraint_status == ConstraintStatus::AT_UPPER_BOUND) {
2407 new_constraint_status = ConstraintStatus::AT_LOWER_BOUND;
2408 }
2409 }
2410 solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2411 solution->constraint_statuses[e_.row] = new_constraint_status;
2412}
2413
2414void SingletonPreprocessor::UpdateConstraintBoundsWithVariableBounds(
2415 MatrixEntry e, LinearProgram* lp) {
2416 Fractional lower_delta = -e.coeff * lp->variable_upper_bounds()[e.col];
2417 Fractional upper_delta = -e.coeff * lp->variable_lower_bounds()[e.col];
2418 if (e.coeff < 0.0) {
2419 std::swap(lower_delta, upper_delta);
2420 }
2421 lp->SetConstraintBounds(e.row,
2422 lp->constraint_lower_bounds()[e.row] + lower_delta,
2423 lp->constraint_upper_bounds()[e.row] + upper_delta);
2424}
2425
2426bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable(
2427 const MatrixEntry& matrix_entry, const LinearProgram& lp) const {
2429 DCHECK(lp.IsVariableInteger(matrix_entry.col));
2430 const SparseMatrix& transpose = lp.GetTransposeSparseMatrix();
2431 for (const SparseColumn::Entry entry :
2432 transpose.column(RowToColIndex(matrix_entry.row))) {
2433 // Check if the variable is integer.
2434 if (!lp.IsVariableInteger(RowToColIndex(entry.row()))) {
2435 return false;
2436 }
2437
2438 const Fractional coefficient = entry.coefficient();
2439 const Fractional coefficient_ratio = coefficient / matrix_entry.coeff;
2440 // Check if coefficient_ratio is integer.
2442 coefficient_ratio, parameters_.solution_feasibility_tolerance())) {
2443 return false;
2444 }
2445 }
2446 const Fractional constraint_lb =
2447 lp.constraint_lower_bounds()[matrix_entry.row];
2448 if (IsFinite(constraint_lb)) {
2449 const Fractional lower_bound_ratio = constraint_lb / matrix_entry.coeff;
2451 lower_bound_ratio, parameters_.solution_feasibility_tolerance())) {
2452 return false;
2453 }
2454 }
2455 const Fractional constraint_ub =
2456 lp.constraint_upper_bounds()[matrix_entry.row];
2457 if (IsFinite(constraint_ub)) {
2458 const Fractional upper_bound_ratio = constraint_ub / matrix_entry.coeff;
2460 upper_bound_ratio, parameters_.solution_feasibility_tolerance())) {
2461 return false;
2462 }
2463 }
2464 return true;
2465}
2466
2467void SingletonPreprocessor::DeleteZeroCostSingletonColumn(
2468 const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2469 const ColIndex transpose_col = RowToColIndex(e.row);
2470 undo_stack_.push_back(SingletonUndo(SingletonUndo::ZERO_COST_SINGLETON_COLUMN,
2471 *lp, e, ConstraintStatus::FREE));
2472 const SparseColumn& row_as_col = transpose.column(transpose_col);
2473 rows_saver_.SaveColumnIfNotAlreadyDone(RowToColIndex(e.row), row_as_col);
2474 UpdateConstraintBoundsWithVariableBounds(e, lp);
2475 column_deletion_helper_.MarkColumnForDeletion(e.col);
2476}
2477
2478// We need to restore the variable value in order to satisfy the constraint.
2479void SingletonUndo::ZeroCostSingletonColumnUndo(
2480 const GlopParameters& parameters, const SparseColumn& saved_row,
2481 ProblemSolution* solution) const {
2482 // If the variable was fixed, this is easy. Note that this is the only
2483 // possible case if the current constraint status is FIXED, except if the
2484 // variable bounds are small compared to the constraint bounds, like adding
2485 // 1e-100 to a fixed == 1 constraint.
2486 if (variable_upper_bound_ == variable_lower_bound_) {
2487 solution->primal_values[e_.col] = variable_lower_bound_;
2488 solution->variable_statuses[e_.col] = VariableStatus::FIXED_VALUE;
2489 return;
2490 }
2491
2492 const ConstraintStatus ct_status = solution->constraint_statuses[e_.row];
2493 if (ct_status == ConstraintStatus::FIXED_VALUE) {
2494 const Fractional corrected_dual = is_maximization_
2495 ? -solution->dual_values[e_.row]
2496 : solution->dual_values[e_.row];
2497 if (corrected_dual > 0) {
2498 DCHECK(IsFinite(variable_lower_bound_));
2499 solution->primal_values[e_.col] = variable_lower_bound_;
2500 solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2501 } else {
2502 DCHECK(IsFinite(variable_upper_bound_));
2503 solution->primal_values[e_.col] = variable_upper_bound_;
2504 solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2505 }
2506 return;
2507 } else if (ct_status == ConstraintStatus::AT_LOWER_BOUND ||
2508 ct_status == ConstraintStatus::AT_UPPER_BOUND) {
2509 if ((ct_status == ConstraintStatus::AT_UPPER_BOUND && e_.coeff > 0.0) ||
2510 (ct_status == ConstraintStatus::AT_LOWER_BOUND && e_.coeff < 0.0)) {
2511 DCHECK(IsFinite(variable_lower_bound_));
2512 solution->primal_values[e_.col] = variable_lower_bound_;
2513 solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2514 } else {
2515 DCHECK(IsFinite(variable_upper_bound_));
2516 solution->primal_values[e_.col] = variable_upper_bound_;
2517 solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2518 }
2519 if (constraint_upper_bound_ == constraint_lower_bound_) {
2520 solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2521 }
2522 return;
2523 }
2524
2525 // This is the activity of the constraint before the singleton variable is
2526 // added back to it.
2527 const Fractional activity = ScalarProduct(solution->primal_values, saved_row);
2528
2529 // First we try to fix the variable at its lower or upper bound and leave the
2530 // constraint VariableStatus::BASIC. Note that we use the same logic as in
2531 // Preprocessor::IsSmallerWithinPreprocessorZeroTolerance() which we can't use
2532 // here because we are not deriving from the Preprocessor class.
2533 const Fractional tolerance = parameters.preprocessor_zero_tolerance();
2534 const auto is_smaller_with_tolerance = [tolerance](Fractional a,
2535 Fractional b) {
2537 };
2538 if (variable_lower_bound_ != -kInfinity) {
2539 const Fractional activity_at_lb =
2540 activity + e_.coeff * variable_lower_bound_;
2541 if (is_smaller_with_tolerance(constraint_lower_bound_, activity_at_lb) &&
2542 is_smaller_with_tolerance(activity_at_lb, constraint_upper_bound_)) {
2543 solution->primal_values[e_.col] = variable_lower_bound_;
2544 solution->variable_statuses[e_.col] = VariableStatus::AT_LOWER_BOUND;
2545 return;
2546 }
2547 }
2548 if (variable_upper_bound_ != kInfinity) {
2549 const Fractional activity_at_ub =
2550 activity + e_.coeff * variable_upper_bound_;
2551 if (is_smaller_with_tolerance(constraint_lower_bound_, activity_at_ub) &&
2552 is_smaller_with_tolerance(activity_at_ub, constraint_upper_bound_)) {
2553 solution->primal_values[e_.col] = variable_upper_bound_;
2554 solution->variable_statuses[e_.col] = VariableStatus::AT_UPPER_BOUND;
2555 return;
2556 }
2557 }
2558
2559 // If the current constraint is UNBOUNDED, then the variable is too
2560 // because of the two cases above. We just set its status to
2561 // VariableStatus::FREE.
2562 if (constraint_lower_bound_ == -kInfinity &&
2563 constraint_upper_bound_ == kInfinity) {
2564 solution->primal_values[e_.col] = 0.0;
2565 solution->variable_statuses[e_.col] = VariableStatus::FREE;
2566 return;
2567 }
2568
2569 // If the previous cases didn't apply, the constraint will be fixed to its
2570 // bounds and the variable will be made VariableStatus::BASIC.
2571 solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2572 if (constraint_lower_bound_ == constraint_upper_bound_) {
2573 solution->primal_values[e_.col] =
2574 (constraint_lower_bound_ - activity) / e_.coeff;
2575 solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2576 return;
2577 }
2578
2579 bool set_constraint_to_lower_bound;
2580 if (constraint_lower_bound_ == -kInfinity) {
2581 set_constraint_to_lower_bound = false;
2582 } else if (constraint_upper_bound_ == kInfinity) {
2583 set_constraint_to_lower_bound = true;
2584 } else {
2585 // In this case we select the value that is the most inside the variable
2586 // bound.
2587 const Fractional to_lb = (constraint_lower_bound_ - activity) / e_.coeff;
2588 const Fractional to_ub = (constraint_upper_bound_ - activity) / e_.coeff;
2589 set_constraint_to_lower_bound =
2590 std::max(variable_lower_bound_ - to_lb, to_lb - variable_upper_bound_) <
2591 std::max(variable_lower_bound_ - to_ub, to_ub - variable_upper_bound_);
2592 }
2593
2594 if (set_constraint_to_lower_bound) {
2595 solution->primal_values[e_.col] =
2596 (constraint_lower_bound_ - activity) / e_.coeff;
2597 solution->constraint_statuses[e_.row] = ConstraintStatus::AT_LOWER_BOUND;
2598 } else {
2599 solution->primal_values[e_.col] =
2600 (constraint_upper_bound_ - activity) / e_.coeff;
2601 solution->constraint_statuses[e_.row] = ConstraintStatus::AT_UPPER_BOUND;
2602 }
2603}
2604
2605void SingletonPreprocessor::DeleteSingletonColumnInEquality(
2606 const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2607 // Save information for the undo.
2608 const ColIndex transpose_col = RowToColIndex(e.row);
2609 const SparseColumn& row_as_column = transpose.column(transpose_col);
2610 undo_stack_.push_back(
2611 SingletonUndo(SingletonUndo::SINGLETON_COLUMN_IN_EQUALITY, *lp, e,
2613 rows_saver_.SaveColumnIfNotAlreadyDone(RowToColIndex(e.row), row_as_column);
2614
2615 // Update the objective function using the equality constraint. We have
2616 // v_col*coeff + expression = rhs,
2617 // so the contribution of this variable to the cost function (v_col * cost)
2618 // can be rewritten as:
2619 // (rhs * cost - expression * cost) / coeff.
2620 const Fractional rhs = lp->constraint_upper_bounds()[e.row];
2621 const Fractional cost = lp->objective_coefficients()[e.col];
2622 const Fractional multiplier = cost / e.coeff;
2623 lp->SetObjectiveOffset(lp->objective_offset() + rhs * multiplier);
2624 for (const SparseColumn::Entry e : row_as_column) {
2625 const ColIndex col = RowToColIndex(e.row());
2626 if (!column_deletion_helper_.IsColumnMarked(col)) {
2627 Fractional new_cost =
2628 lp->objective_coefficients()[col] - e.coefficient() * multiplier;
2629
2630 // TODO(user): It is important to avoid having non-zero costs which are
2631 // the result of numerical error. This is because we still miss some
2632 // tolerances in a few preprocessors. Like an empty column with a cost of
2633 // 1e-17 and unbounded towards infinity is currently implying that the
2634 // problem is unbounded. This will need fixing.
2635 if (std::abs(new_cost) < parameters_.preprocessor_zero_tolerance()) {
2636 new_cost = 0.0;
2637 }
2638 lp->SetObjectiveCoefficient(col, new_cost);
2639 }
2640 }
2641
2642 // Now delete the column like a singleton column without cost.
2643 UpdateConstraintBoundsWithVariableBounds(e, lp);
2644 column_deletion_helper_.MarkColumnForDeletion(e.col);
2645}
2646
2647void SingletonUndo::SingletonColumnInEqualityUndo(
2648 const GlopParameters& parameters, const SparseColumn& saved_row,
2649 ProblemSolution* solution) const {
2650 // First do the same as a zero-cost singleton column.
2651 ZeroCostSingletonColumnUndo(parameters, saved_row, solution);
2652
2653 // Then, restore the dual optimal value taking into account the cost
2654 // modification.
2655 solution->dual_values[e_.row] += cost_ / e_.coeff;
2656 if (solution->constraint_statuses[e_.row] == ConstraintStatus::BASIC) {
2657 solution->variable_statuses[e_.col] = VariableStatus::BASIC;
2658 solution->constraint_statuses[e_.row] = ConstraintStatus::FIXED_VALUE;
2659 }
2660}
2661
2662void SingletonUndo::MakeConstraintAnEqualityUndo(
2663 ProblemSolution* solution) const {
2664 if (solution->constraint_statuses[e_.row] == ConstraintStatus::FIXED_VALUE) {
2665 solution->constraint_statuses[e_.row] = constraint_status_;
2666 }
2667}
2668
2669bool SingletonPreprocessor::MakeConstraintAnEqualityIfPossible(
2670 const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
2671 // TODO(user): We could skip early if the relevant constraint bound is
2672 // infinity.
2673 const Fractional cst_lower_bound = lp->constraint_lower_bounds()[e.row];
2674 const Fractional cst_upper_bound = lp->constraint_upper_bounds()[e.row];
2675 if (cst_lower_bound == cst_upper_bound) return true;
2676
2677 // To be efficient, we only process a row once and cache the domain that an
2678 // "artificial" extra variable x with coefficient 1.0 could take while still
2679 // making the constraint feasible. The domain bounds for the constraint e.row
2680 // will be stored in row_lb_sum_[e.row] and row_ub_sum_[e.row].
2681 const DenseRow& variable_ubs = lp->variable_upper_bounds();
2682 const DenseRow& variable_lbs = lp->variable_lower_bounds();
2683 if (e.row >= row_sum_is_cached_.size() || !row_sum_is_cached_[e.row]) {
2684 if (e.row >= row_sum_is_cached_.size()) {
2685 const int new_size = e.row.value() + 1;
2686 row_sum_is_cached_.resize(new_size);
2687 row_lb_sum_.resize(new_size);
2688 row_ub_sum_.resize(new_size);
2689 }
2690 row_sum_is_cached_[e.row] = true;
2691 row_lb_sum_[e.row].Add(cst_lower_bound);
2692 row_ub_sum_[e.row].Add(cst_upper_bound);
2693 for (const SparseColumn::Entry entry :
2694 transpose.column(RowToColIndex(e.row))) {
2695 const ColIndex row_as_col = RowToColIndex(entry.row());
2696
2697 // Tricky: Even if later more columns are deleted, these "cached" sums
2698 // will actually still be valid because we only delete columns in a
2699 // compatible way.
2700 //
2701 // TODO(user): Find a more robust way? it seems easy to add new deletion
2702 // rules that may break this assumption.
2703 if (column_deletion_helper_.IsColumnMarked(row_as_col)) continue;
2704 if (entry.coefficient() > 0.0) {
2705 row_lb_sum_[e.row].Add(-entry.coefficient() * variable_ubs[row_as_col]);
2706 row_ub_sum_[e.row].Add(-entry.coefficient() * variable_lbs[row_as_col]);
2707 } else {
2708 row_lb_sum_[e.row].Add(-entry.coefficient() * variable_lbs[row_as_col]);
2709 row_ub_sum_[e.row].Add(-entry.coefficient() * variable_ubs[row_as_col]);
2710 }
2711
2712 // TODO(user): Abort early if both sums contain more than 1 infinity?
2713 }
2714 }
2715
2716 // Now that the lb/ub sum for the row is cached, we can use it to compute the
2717 // implied bounds on the variable from this constraint and the other
2718 // variables.
2719 const Fractional c = e.coeff;
2720 const Fractional lb =
2721 c > 0.0 ? row_lb_sum_[e.row].SumWithoutLb(-c * variable_ubs[e.col]) / c
2722 : row_ub_sum_[e.row].SumWithoutUb(-c * variable_ubs[e.col]) / c;
2723 const Fractional ub =
2724 c > 0.0 ? row_ub_sum_[e.row].SumWithoutUb(-c * variable_lbs[e.col]) / c
2725 : row_lb_sum_[e.row].SumWithoutLb(-c * variable_lbs[e.col]) / c;
2726
2727 // Note that we could do the same for singleton variables with a cost of
2728 // 0.0, but such variable are already dealt with by
2729 // DeleteZeroCostSingletonColumn() so there is no point.
2730 const Fractional cost =
2731 lp->GetObjectiveCoefficientForMinimizationVersion(e.col);
2732 DCHECK_NE(cost, 0.0);
2733
2734 // Note that some of the tests below will be always true if the bounds of
2735 // the column of index col are infinite. This is the desired behavior.
2738 ub, lp->variable_upper_bounds()[e.col])) {
2739 if (e.coeff > 0) {
2740 if (cst_upper_bound == kInfinity) {
2742 } else {
2743 relaxed_status = ConstraintStatus::AT_UPPER_BOUND;
2744 lp->SetConstraintBounds(e.row, cst_upper_bound, cst_upper_bound);
2745 }
2746 } else {
2747 if (cst_lower_bound == -kInfinity) {
2749 } else {
2750 relaxed_status = ConstraintStatus::AT_LOWER_BOUND;
2751 lp->SetConstraintBounds(e.row, cst_lower_bound, cst_lower_bound);
2752 }
2753 }
2754
2756 DCHECK_EQ(ub, kInfinity);
2757 VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2758 "variable "
2759 << e.col << " has a cost (for minimization) of " << cost
2760 << " and is unbounded towards kInfinity.";
2761 return false;
2762 }
2763
2764 // This is important but tricky: The upper bound of the variable needs to
2765 // be relaxed. This is valid because the implied bound is lower than the
2766 // original upper bound here. This is needed, so that the optimal
2767 // primal/dual values of the new problem will also be optimal of the
2768 // original one.
2769 //
2770 // Let's prove the case coeff > 0.0 for a minimization problem. In the new
2771 // problem, because the variable is unbounded towards +infinity, its
2772 // reduced cost must satisfy at optimality rc = cost - coeff * dual_v >=
2773 // 0. But this implies dual_v <= cost / coeff <= 0. This is exactly what
2774 // is needed for the optimality of the initial problem since the
2775 // constraint will be at its upper bound, and the corresponding slack
2776 // condition is that the dual value needs to be <= 0.
2777 lp->SetVariableBounds(e.col, lp->variable_lower_bounds()[e.col], kInfinity);
2778 }
2780 lp->variable_lower_bounds()[e.col], lb)) {
2781 if (e.coeff > 0) {
2782 if (cst_lower_bound == -kInfinity) {
2784 } else {
2785 relaxed_status = ConstraintStatus::AT_LOWER_BOUND;
2786 lp->SetConstraintBounds(e.row, cst_lower_bound, cst_lower_bound);
2787 }
2788 } else {
2789 if (cst_upper_bound == kInfinity) {
2791 } else {
2792 relaxed_status = ConstraintStatus::AT_UPPER_BOUND;
2793 lp->SetConstraintBounds(e.row, cst_upper_bound, cst_upper_bound);
2794 }
2795 }
2796
2798 DCHECK_EQ(lb, -kInfinity);
2799 VLOG(1) << "Problem ProblemStatus::INFEASIBLE_OR_UNBOUNDED, singleton "
2800 "variable "
2801 << e.col << " has a cost (for minimization) of " << cost
2802 << " and is unbounded towards -kInfinity.";
2803 return false;
2804 }
2805
2806 // Same remark as above for a lower bounded variable this time.
2807 lp->SetVariableBounds(e.col, -kInfinity,
2808 lp->variable_upper_bounds()[e.col]);
2809 }
2810
2811 if (lp->constraint_lower_bounds()[e.row] ==
2812 lp->constraint_upper_bounds()[e.row]) {
2813 undo_stack_.push_back(SingletonUndo(
2814 SingletonUndo::MAKE_CONSTRAINT_AN_EQUALITY, *lp, e, relaxed_status));
2815 return true;
2816 }
2817 return false;
2818}
2819
2822 RETURN_VALUE_IF_NULL(lp, false);
2823 const SparseMatrix& matrix = lp->GetSparseMatrix();
2824 const SparseMatrix& transpose = lp->GetTransposeSparseMatrix();
2825
2826 // Initialize column_to_process with the current singleton columns.
2827 ColIndex num_cols(matrix.num_cols());
2828 RowIndex num_rows(matrix.num_rows());
2829 StrictITIVector<ColIndex, EntryIndex> column_degree(num_cols, EntryIndex(0));
2830 std::vector<ColIndex> column_to_process;
2831 for (ColIndex col(0); col < num_cols; ++col) {
2832 column_degree[col] = matrix.column(col).num_entries();
2833 if (column_degree[col] == 1) {
2834 column_to_process.push_back(col);
2835 }
2836 }
2837
2838 // Initialize row_to_process with the current singleton rows.
2839 StrictITIVector<RowIndex, EntryIndex> row_degree(num_rows, EntryIndex(0));
2840 std::vector<RowIndex> row_to_process;
2841 for (RowIndex row(0); row < num_rows; ++row) {
2842 row_degree[row] = transpose.column(RowToColIndex(row)).num_entries();
2843 if (row_degree[row] == 1) {
2844 row_to_process.push_back(row);
2845 }
2846 }
2847
2848 // Process current singleton rows/columns and enqueue new ones.
2849 while (status_ == ProblemStatus::INIT &&
2850 (!column_to_process.empty() || !row_to_process.empty())) {
2851 while (status_ == ProblemStatus::INIT && !column_to_process.empty()) {
2852 const ColIndex col = column_to_process.back();
2853 column_to_process.pop_back();
2854 if (column_degree[col] <= 0) continue;
2855 const MatrixEntry e = GetSingletonColumnMatrixEntry(col, matrix);
2856 if (in_mip_context_ && lp->IsVariableInteger(e.col) &&
2857 !IntegerSingletonColumnIsRemovable(e, *lp)) {
2858 continue;
2859 }
2860
2861 // TODO(user): It seems better to process all the singleton columns with
2862 // a cost of zero first.
2863 if (lp->objective_coefficients()[col] == 0.0) {
2864 DeleteZeroCostSingletonColumn(transpose, e, lp);
2865 } else {
2866 // We don't want to do a substitution if the entry is too small and
2867 // should be probably set to zero.
2868 if (std::abs(e.coeff) < parameters_.preprocessor_zero_tolerance()) {
2869 continue;
2870 }
2871 if (MakeConstraintAnEqualityIfPossible(transpose, e, lp)) {
2872 DeleteSingletonColumnInEquality(transpose, e, lp);
2873 } else {
2874 continue;
2875 }
2876 }
2877 --row_degree[e.row];
2878 if (row_degree[e.row] == 1) {
2879 row_to_process.push_back(e.row);
2880 }
2881 }
2882 while (status_ == ProblemStatus::INIT && !row_to_process.empty()) {
2883 const RowIndex row = row_to_process.back();
2884 row_to_process.pop_back();
2885 if (row_degree[row] <= 0) continue;
2886 const MatrixEntry e = GetSingletonRowMatrixEntry(row, transpose);
2887
2888 // TODO(user): We should be able to restrict the variable bounds with the
2889 // ones of the constraint all the time. However, some situation currently
2890 // break the presolve, and it seems hard to fix in a 100% safe way.
2891 if (in_mip_context_ && lp->IsVariableInteger(e.col) &&
2892 !IntegerSingletonColumnIsRemovable(e, *lp)) {
2893 continue;
2894 }
2895
2896 DeleteSingletonRow(e, lp);
2897 --column_degree[e.col];
2898 if (column_degree[e.col] == 1) {
2899 column_to_process.push_back(e.col);
2900 }
2901 }
2902 }
2903
2904 if (status_ != ProblemStatus::INIT) return false;
2905 lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
2906 lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
2907 return !column_deletion_helper_.IsEmpty() || !row_deletion_helper_.IsEmpty();
2908}
2909
2912 RETURN_IF_NULL(solution);
2913
2914 // Note that the two deletion helpers must restore 0.0 values in the positions
2915 // that will be used during Undo(). That is, all the calls done by this class
2916 // to MarkColumnForDeletion() should be done with 0.0 as the value to restore
2917 // (which is already the case when using MarkRowForDeletion()).
2918 // This is important because the various Undo() functions assume that a
2919 // primal/dual variable value which isn't restored yet has the value of 0.0.
2920 column_deletion_helper_.RestoreDeletedColumns(solution);
2921 row_deletion_helper_.RestoreDeletedRows(solution);
2922
2923 // It is important to undo the operations in the correct order, i.e. in the
2924 // reverse order in which they were done.
2925 for (int i = undo_stack_.size() - 1; i >= 0; --i) {
2926 const SparseColumn& saved_col =
2927 columns_saver_.SavedOrEmptyColumn(undo_stack_[i].Entry().col);
2928 const SparseColumn& saved_row = rows_saver_.SavedOrEmptyColumn(
2929 RowToColIndex(undo_stack_[i].Entry().row));
2930 undo_stack_[i].Undo(parameters_, saved_col, saved_row, solution);
2931 }
2932}
2933
2934MatrixEntry SingletonPreprocessor::GetSingletonColumnMatrixEntry(
2935 ColIndex col, const SparseMatrix& matrix) {
2936 for (const SparseColumn::Entry e : matrix.column(col)) {
2937 if (!row_deletion_helper_.IsRowMarked(e.row())) {
2938 DCHECK_NE(0.0, e.coefficient());
2939 return MatrixEntry(e.row(), col, e.coefficient());
2940 }
2941 }
2942 // This shouldn't happen.
2943 LOG(DFATAL) << "No unmarked entry in a column that is supposed to have one.";
2945 return MatrixEntry(RowIndex(0), ColIndex(0), 0.0);
2946}
2947
2948MatrixEntry SingletonPreprocessor::GetSingletonRowMatrixEntry(
2949 RowIndex row, const SparseMatrix& transpose) {
2950 for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) {
2951 const ColIndex col = RowToColIndex(e.row());
2952 if (!column_deletion_helper_.IsColumnMarked(col)) {
2953 DCHECK_NE(0.0, e.coefficient());
2954 return MatrixEntry(row, col, e.coefficient());
2955 }
2956 }
2957 // This shouldn't happen.
2958 LOG(DFATAL) << "No unmarked entry in a row that is supposed to have one.";
2960 return MatrixEntry(RowIndex(0), ColIndex(0), 0.0);
2961}
2962
2963// --------------------------------------------------------
2964// RemoveNearZeroEntriesPreprocessor
2965// --------------------------------------------------------
2966
2969 RETURN_VALUE_IF_NULL(lp, false);
2970 const ColIndex num_cols = lp->num_variables();
2971 if (num_cols == 0) return false;
2972
2973 // We will use a different threshold for each row depending on its degree.
2974 // We use Fractionals for convenience since they will be used as such below.
2975 const RowIndex num_rows = lp->num_constraints();
2976 DenseColumn row_degree(num_rows, 0.0);
2977 Fractional num_non_zero_objective_coefficients = 0.0;
2978 for (ColIndex col(0); col < num_cols; ++col) {
2979 for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
2980 row_degree[e.row()] += 1.0;
2981 }
2982 if (lp->objective_coefficients()[col] != 0.0) {
2983 num_non_zero_objective_coefficients += 1.0;
2984 }
2985 }
2986
2987 // To not have too many parameters, we use the preprocessor_zero_tolerance.
2988 const Fractional allowed_impact = parameters_.preprocessor_zero_tolerance();
2989
2990 // TODO(user): Our criteria ensure that during presolve a primal feasible
2991 // solution will stay primal feasible. However, we have no guarantee on the
2992 // dual-feasibility (because the dual variable values range is not taken into
2993 // account). Fix that? or find a better criteria since it seems that on all
2994 // our current problems, this preprocessor helps and doesn't introduce errors.
2995 const EntryIndex initial_num_entries = lp->num_entries();
2996 int num_zeroed_objective_coefficients = 0;
2997 for (ColIndex col(0); col < num_cols; ++col) {
3000
3001 // TODO(user): Write a small class that takes a matrix, its transpose, row
3002 // and column bounds, and "propagate" the bounds as much as possible so we
3003 // can use this better estimate here and remove more near-zero entries.
3004 const Fractional max_magnitude =
3005 std::max(std::abs(lower_bound), std::abs(upper_bound));
3006 if (max_magnitude == kInfinity || max_magnitude == 0) continue;
3007 const Fractional threshold = allowed_impact / max_magnitude;
3009 threshold, row_degree);
3010
3011 if (lp->objective_coefficients()[col] != 0.0 &&
3012 num_non_zero_objective_coefficients *
3013 std::abs(lp->objective_coefficients()[col]) <
3014 threshold) {
3015 lp->SetObjectiveCoefficient(col, 0.0);
3016 ++num_zeroed_objective_coefficients;
3017 }
3018 }
3019
3020 const EntryIndex num_entries = lp->num_entries();
3021 if (num_entries != initial_num_entries) {
3022 VLOG(1) << "Removed " << initial_num_entries - num_entries
3023 << " near-zero entries.";
3024 }
3025 if (num_zeroed_objective_coefficients > 0) {
3026 VLOG(1) << "Removed " << num_zeroed_objective_coefficients
3027 << " near-zero objective coefficients.";
3028 }
3029
3030 // No post-solve is required.
3031 return false;
3032}
3033
3035 ProblemSolution* solution) const {}
3036
3037// --------------------------------------------------------
3038// SingletonColumnSignPreprocessor
3039// --------------------------------------------------------
3040
3043 RETURN_VALUE_IF_NULL(lp, false);
3044 const ColIndex num_cols = lp->num_variables();
3045 if (num_cols == 0) return false;
3046
3047 changed_columns_.clear();
3048 int num_singletons = 0;
3049 for (ColIndex col(0); col < num_cols; ++col) {
3050 SparseColumn* sparse_column = lp->GetMutableSparseColumn(col);
3052 if (sparse_column->num_entries() == 1) {
3053 ++num_singletons;
3054 }
3055 if (sparse_column->num_entries() == 1 &&
3056 sparse_column->GetFirstCoefficient() < 0) {
3057 sparse_column->MultiplyByConstant(-1.0);
3059 -lp->variable_lower_bounds()[col]);
3061 changed_columns_.push_back(col);
3062 }
3063 }
3064 VLOG(1) << "Changed the sign of " << changed_columns_.size() << " columns.";
3065 VLOG(1) << num_singletons << " singleton columns left.";
3066 return !changed_columns_.empty();
3067}
3068
3070 ProblemSolution* solution) const {
3072 RETURN_IF_NULL(solution);
3073 for (int i = 0; i < changed_columns_.size(); ++i) {
3074 const ColIndex col = changed_columns_[i];
3075 solution->primal_values[col] = -solution->primal_values[col];
3076 const VariableStatus status = solution->variable_statuses[col];
3079 } else if (status == VariableStatus::AT_LOWER_BOUND) {
3081 }
3082 }
3083}
3084
3085// --------------------------------------------------------
3086// DoubletonEqualityRowPreprocessor
3087// --------------------------------------------------------
3088
3091 RETURN_VALUE_IF_NULL(lp, false);
3092
3093 // This is needed at postsolve.
3094 //
3095 // TODO(user): Get rid of the FIXED status instead to avoid spending
3096 // time/memory for no good reason here.
3097 saved_row_lower_bounds_ = lp->constraint_lower_bounds();
3098 saved_row_upper_bounds_ = lp->constraint_upper_bounds();
3099
3100 // This is needed for postsolving dual.
3101 saved_objective_ = lp->objective_coefficients();
3102
3103 // Note that we don't update the transpose during this preprocessor run.
3104 const SparseMatrix& original_transpose = lp->GetTransposeSparseMatrix();
3105
3106 // Heuristic: We try to subtitute sparse columns first to avoid a complexity
3107 // explosion. Note that if we do long chain of substitution, we can still end
3108 // up with a complexity of O(num_rows x num_cols) instead of O(num_entries).
3109 //
3110 // TODO(user): There is probably some more robust ways.
3111 std::vector<std::pair<int64_t, RowIndex>> sorted_rows;
3112 const RowIndex num_rows(lp->num_constraints());
3113 for (RowIndex row(0); row < num_rows; ++row) {
3114 const SparseColumn& original_row =
3115 original_transpose.column(RowToColIndex(row));
3116 if (original_row.num_entries() != 2 ||
3119 continue;
3120 }
3121 int64_t score = 0;
3122 for (const SparseColumn::Entry e : original_row) {
3123 const ColIndex col = RowToColIndex(e.row());
3124 score += lp->GetSparseColumn(col).num_entries().value();
3125 }
3126 sorted_rows.push_back({score, row});
3127 }
3128 std::sort(sorted_rows.begin(), sorted_rows.end());
3129
3130 // Iterate over the rows that were already doubletons before this preprocessor
3131 // run, and whose items don't belong to a column that we deleted during this
3132 // run. This implies that the rows are only ever touched once per run, because
3133 // we only modify rows that have an item on a deleted column.
3134 for (const auto p : sorted_rows) {
3135 const RowIndex row = p.second;
3136 const SparseColumn& original_row =
3137 original_transpose.column(RowToColIndex(row));
3138
3139 // Collect the two row items. Skip the ones involving a deleted column.
3140 // Note: we filled r.col[] and r.coeff[] by item order, and currently we
3141 // always pick the first column as the to-be-deleted one.
3142 // TODO(user): make a smarter choice of which column to delete, and
3143 // swap col[] and coeff[] accordingly.
3144 RestoreInfo r; // Use a short name since we're using it everywhere.
3145 int entry_index = 0;
3146 for (const SparseColumn::Entry e : original_row) {
3147 const ColIndex col = RowToColIndex(e.row());
3148 if (column_deletion_helper_.IsColumnMarked(col)) continue;
3149 r.col[entry_index] = col;
3150 r.coeff[entry_index] = e.coefficient();
3151 DCHECK_NE(0.0, r.coeff[entry_index]);
3152 ++entry_index;
3153 }
3154
3155 // Discard some cases that will be treated by other preprocessors, or by
3156 // another run of this one.
3157 // 1) One or two of the items were in a deleted column.
3158 if (entry_index < 2) continue;
3159
3160 // Fill the RestoreInfo, even if we end up not using it (because we
3161 // give up on preprocessing this row): it has a bunch of handy shortcuts.
3162 r.row = row;
3163 r.rhs = lp->constraint_lower_bounds()[row];
3164 for (int col_choice = 0; col_choice < NUM_DOUBLETON_COLS; ++col_choice) {
3165 const ColIndex col = r.col[col_choice];
3166 r.lb[col_choice] = lp->variable_lower_bounds()[col];
3167 r.ub[col_choice] = lp->variable_upper_bounds()[col];
3168 r.objective_coefficient[col_choice] = lp->objective_coefficients()[col];
3169 }
3170
3171 // 2) One of the columns is fixed: don't bother, it will be treated
3172 // by the FixedVariablePreprocessor.
3173 if (r.lb[DELETED] == r.ub[DELETED] || r.lb[MODIFIED] == r.ub[MODIFIED]) {
3174 continue;
3175 }
3176
3177 // Look at the bounds of both variables and exit early if we can delegate
3178 // to another pre-processor; otherwise adjust the bounds of the remaining
3179 // variable as necessary.
3180 // If the current row is: aX + bY = c, then the bounds of Y must be
3181 // adjusted to satisfy Y = c/b + (-a/b)X
3182 //
3183 // Note: when we compute the coefficients of these equations, we can cause
3184 // underflows/overflows that could be avoided if we did the computations
3185 // more carefully; but for now we just treat those cases as
3186 // ProblemStatus::ABNORMAL.
3187 // TODO(user): consider skipping the problematic rows in this preprocessor,
3188 // or trying harder to avoid the under/overflow.
3189 {
3190 const Fractional carry_over_offset = r.rhs / r.coeff[MODIFIED];
3191 const Fractional carry_over_factor =
3192 -r.coeff[DELETED] / r.coeff[MODIFIED];
3193 if (!IsFinite(carry_over_offset) || !IsFinite(carry_over_factor) ||
3194 carry_over_factor == 0.0) {
3196 break;
3197 }
3198
3199 Fractional lb = r.lb[MODIFIED];
3200 Fractional ub = r.ub[MODIFIED];
3201 Fractional carried_over_lb =
3202 r.lb[DELETED] * carry_over_factor + carry_over_offset;
3203 Fractional carried_over_ub =
3204 r.ub[DELETED] * carry_over_factor + carry_over_offset;
3205 if (carry_over_factor < 0) {
3206 std::swap(carried_over_lb, carried_over_ub);
3207 }
3208 if (carried_over_lb <= lb) {
3209 // Default (and simplest) case: the lower bound didn't change.
3210 r.bound_backtracking_at_lower_bound = RestoreInfo::ColChoiceAndStatus(
3211 MODIFIED, VariableStatus::AT_LOWER_BOUND, lb);
3212 } else {
3213 lb = carried_over_lb;
3214 r.bound_backtracking_at_lower_bound = RestoreInfo::ColChoiceAndStatus(
3215 DELETED,
3216 carry_over_factor > 0 ? VariableStatus::AT_LOWER_BOUND
3218 carry_over_factor > 0 ? r.lb[DELETED] : r.ub[DELETED]);
3219 }
3220 if (carried_over_ub >= ub) {
3221 // Default (and simplest) case: the upper bound didn't change.
3222 r.bound_backtracking_at_upper_bound = RestoreInfo::ColChoiceAndStatus(
3223 MODIFIED, VariableStatus::AT_UPPER_BOUND, ub);
3224 } else {
3225 ub = carried_over_ub;
3226 r.bound_backtracking_at_upper_bound = RestoreInfo::ColChoiceAndStatus(
3227 DELETED,
3228 carry_over_factor > 0 ? VariableStatus::AT_UPPER_BOUND
3230 carry_over_factor > 0 ? r.ub[DELETED] : r.lb[DELETED]);
3231 }
3232 // 3) If the new bounds are fixed (the domain is a singleton) or
3233 // infeasible, then we let the
3234 // ForcingAndImpliedFreeConstraintPreprocessor do the work.
3235 if (IsSmallerWithinPreprocessorZeroTolerance(ub, lb)) continue;
3236 lp->SetVariableBounds(r.col[MODIFIED], lb, ub);
3237 }
3238
3239 restore_stack_.push_back(r);
3240
3241 // Now, perform the substitution. If the current row is: aX + bY = c
3242 // then any other row containing 'X' with coefficient x can remove the
3243 // entry in X, and instead add an entry on 'Y' with coefficient x(-b/a)
3244 // and a constant offset x(c/a).
3245 // Looking at the matrix, this translates into colY += (-b/a) colX.
3246 DCHECK_NE(r.coeff[DELETED], 0.0);
3247 const Fractional substitution_factor =
3248 -r.coeff[MODIFIED] / r.coeff[DELETED]; // -b/a
3249 const Fractional constant_offset_factor = r.rhs / r.coeff[DELETED]; // c/a
3250 // Again we don't bother too much with over/underflows.
3251 if (!IsFinite(substitution_factor) || substitution_factor == 0.0 ||
3252 !IsFinite(constant_offset_factor)) {
3254 break;
3255 }
3256
3257 // Note that we do not save again a saved column, so that we only save
3258 // columns from the initial LP. This is important to limit the memory usage.
3259 // It complexify a bit the postsolve though.
3260 for (const int col_choice : {DELETED, MODIFIED}) {
3261 const ColIndex col = r.col[col_choice];
3262 columns_saver_.SaveColumnIfNotAlreadyDone(col, lp->GetSparseColumn(col));
3263 }
3264
3265 lp->GetSparseColumn(r.col[DELETED])
3267 substitution_factor, r.row, parameters_.drop_tolerance(),
3268 lp->GetMutableSparseColumn(r.col[MODIFIED]));
3269
3270 // Apply similar operations on the objective coefficients.
3271 // Note that the offset is being updated by
3272 // SubtractColumnMultipleFromConstraintBound() below.
3273 {
3274 const Fractional new_objective =
3275 r.objective_coefficient[MODIFIED] +
3276 substitution_factor * r.objective_coefficient[DELETED];
3277 if (std::abs(new_objective) > parameters_.drop_tolerance()) {
3278 lp->SetObjectiveCoefficient(r.col[MODIFIED], new_objective);
3279 } else {
3280 lp->SetObjectiveCoefficient(r.col[MODIFIED], 0.0);
3281 }
3282 }
3283
3284 // Carry over the constant factor of the substitution as well.
3285 // TODO(user): rename that method to reflect the fact that it also updates
3286 // the objective offset, in the other direction.
3287 SubtractColumnMultipleFromConstraintBound(r.col[DELETED],
3288 constant_offset_factor, lp);
3289
3290 // If we keep substituing the same "dense" columns over and over, we can
3291 // have a memory in O(num_rows * num_cols) which can be order of magnitude
3292 // larger than the original problem. It is important to reclaim the memory
3293 // of the deleted column right away.
3294 lp->GetMutableSparseColumn(r.col[DELETED])->ClearAndRelease();
3295
3296 // Mark the column and the row for deletion.
3297 column_deletion_helper_.MarkColumnForDeletion(r.col[DELETED]);
3298 row_deletion_helper_.MarkRowForDeletion(r.row);
3299 }
3300 if (status_ != ProblemStatus::INIT) return false;
3301 lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
3302 lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
3303
3304 return !column_deletion_helper_.IsEmpty();
3305}
3306
3308 ProblemSolution* solution) const {
3310 RETURN_IF_NULL(solution);
3311 column_deletion_helper_.RestoreDeletedColumns(solution);
3312 row_deletion_helper_.RestoreDeletedRows(solution);
3313
3314 const ColIndex num_cols = solution->variable_statuses.size();
3315 StrictITIVector<ColIndex, bool> new_basic_columns(num_cols, false);
3316
3317 for (const RestoreInfo& r : Reverse(restore_stack_)) {
3318 switch (solution->variable_statuses[r.col[MODIFIED]]) {
3320 LOG(DFATAL) << "FIXED variable produced by DoubletonPreprocessor!";
3321 // In non-fastbuild mode, we rely on the rest of the code producing an
3322 // ProblemStatus::ABNORMAL status here.
3323 break;
3324 // When the modified variable is either basic or free, we keep it as is,
3325 // and simply make the deleted one basic.
3327 ABSL_FALLTHROUGH_INTENDED;
3329 // Several code paths set the deleted column as basic. The code that
3330 // sets its value in that case is below, after the switch() block.
3331 solution->variable_statuses[r.col[DELETED]] = VariableStatus::BASIC;
3332 new_basic_columns[r.col[DELETED]] = true;
3333 break;
3335 ABSL_FALLTHROUGH_INTENDED;
3337 // The bound was induced by a bound of one of the two original
3338 // variables. Put that original variable at its bound, and make
3339 // the other one basic.
3340 const RestoreInfo::ColChoiceAndStatus& bound_backtracking =
3341 solution->variable_statuses[r.col[MODIFIED]] ==
3343 ? r.bound_backtracking_at_lower_bound
3344 : r.bound_backtracking_at_upper_bound;
3345 const ColIndex bounded_var = r.col[bound_backtracking.col_choice];
3346 const ColIndex basic_var =
3347 r.col[OtherColChoice(bound_backtracking.col_choice)];
3348 solution->variable_statuses[bounded_var] = bound_backtracking.status;
3349 solution->primal_values[bounded_var] = bound_backtracking.value;
3350 solution->variable_statuses[basic_var] = VariableStatus::BASIC;
3351 new_basic_columns[basic_var] = true;
3352 // If the modified column is VariableStatus::BASIC, then its value is
3353 // already set correctly. If it's the deleted column that is basic, its
3354 // value is set below the switch() block.
3355 }
3356 }
3357
3358 // Restore the value of the deleted column if it is VariableStatus::BASIC.
3359 if (solution->variable_statuses[r.col[DELETED]] == VariableStatus::BASIC) {
3360 solution->primal_values[r.col[DELETED]] =
3361 (r.rhs -
3362 solution->primal_values[r.col[MODIFIED]] * r.coeff[MODIFIED]) /
3363 r.coeff[DELETED];
3364 }
3365
3366 // Make the deleted constraint status FIXED.
3368 }
3369
3370 // Now we need to reconstruct the dual. This is a bit tricky and is basically
3371 // the same as inverting a really structed and easy to invert matrix. For n
3372 // doubleton rows, looking only at the new_basic_columns, there is exactly n
3373 // by construction (one per row). We consider only this n x n matrix, and we
3374 // must choose dual row values so that we make the reduced costs zero on all
3375 // these columns.
3376 //
3377 // There is always an order that make this matrix triangular. We start with a
3378 // singleton column which fix its corresponding row and then work on the
3379 // square submatrix left. We can always start and continue, because if we take
3380 // the first substitued row of the current submatrix, if its deleted column
3381 // was in the submatrix we have a singleton column. If it is outside, we have
3382 // 2 n - 1 entries for a matrix with n columns, so one must be singleton.
3383 //
3384 // Note(user): Another advantage of working on the "original" matrix before
3385 // this presolve is an increased precision.
3386 //
3387 // TODO(user): We can probably use something better than a vector of set,
3388 // but the number of entry is really sparse though. And the size of a set<int>
3389 // is 24 bytes, same as a std::vector<int>.
3390 StrictITIVector<ColIndex, std::set<int>> col_to_index(num_cols);
3391 for (int i = 0; i < restore_stack_.size(); ++i) {
3392 const RestoreInfo& r = restore_stack_[i];
3393 col_to_index[r.col[MODIFIED]].insert(i);
3394 col_to_index[r.col[DELETED]].insert(i);
3395 }
3396 std::vector<ColIndex> singleton_col;
3397 for (ColIndex col(0); col < num_cols; ++col) {
3398 if (!new_basic_columns[col]) continue;
3399 if (col_to_index[col].size() == 1) singleton_col.push_back(col);
3400 }
3401 while (!singleton_col.empty()) {
3402 const ColIndex col = singleton_col.back();
3403 singleton_col.pop_back();
3404 if (!new_basic_columns[col]) continue;
3405 if (col_to_index[col].empty()) continue;
3406 CHECK_EQ(col_to_index[col].size(), 1);
3407 const int index = *col_to_index[col].begin();
3408 const RestoreInfo& r = restore_stack_[index];
3409
3410 const ColChoice col_choice = r.col[MODIFIED] == col ? MODIFIED : DELETED;
3411
3412 // Adjust the dual value of the deleted constraint so that col have a
3413 // reduced costs of zero.
3414 CHECK_EQ(solution->dual_values[r.row], 0.0);
3415 const SparseColumn& saved_col =
3416 columns_saver_.SavedColumn(r.col[col_choice]);
3417 const Fractional current_reduced_cost =
3418 saved_objective_[r.col[col_choice]] -
3419 PreciseScalarProduct(solution->dual_values, saved_col);
3420 solution->dual_values[r.row] = current_reduced_cost / r.coeff[col_choice];
3421
3422 // Update singleton
3423 col_to_index[r.col[DELETED]].erase(index);
3424 col_to_index[r.col[MODIFIED]].erase(index);
3425 if (col_to_index[r.col[DELETED]].size() == 1) {
3426 singleton_col.push_back(r.col[DELETED]);
3427 }
3428 if (col_to_index[r.col[MODIFIED]].size() == 1) {
3429 singleton_col.push_back(r.col[MODIFIED]);
3430 }
3431 }
3432
3433 // Fix potential bad ConstraintStatus::FIXED_VALUE statuses.
3434 FixConstraintWithFixedStatuses(saved_row_lower_bounds_,
3435 saved_row_upper_bounds_, solution);
3436}
3437
3438void FixConstraintWithFixedStatuses(const DenseColumn& row_lower_bounds,
3439 const DenseColumn& row_upper_bounds,
3440 ProblemSolution* solution) {
3441 const RowIndex num_rows = solution->constraint_statuses.size();
3442 DCHECK_EQ(row_lower_bounds.size(), num_rows);
3443 DCHECK_EQ(row_upper_bounds.size(), num_rows);
3444 for (RowIndex row(0); row < num_rows; ++row) {
3446 continue;
3447 }
3448 if (row_lower_bounds[row] == row_upper_bounds[row]) continue;
3449
3450 // We need to fix the status and we just need to make sure that the bound we
3451 // choose satisfies the LP optimality conditions.
3452 if (solution->dual_values[row] > 0) {
3454 } else {
3456 }
3457 }
3458}
3459
3460void DoubletonEqualityRowPreprocessor::
3461 SwapDeletedAndModifiedVariableRestoreInfo(RestoreInfo* r) {
3462 using std::swap;
3463 swap(r->col[DELETED], r->col[MODIFIED]);
3464 swap(r->coeff[DELETED], r->coeff[MODIFIED]);
3465 swap(r->lb[DELETED], r->lb[MODIFIED]);
3466 swap(r->ub[DELETED], r->ub[MODIFIED]);
3467 swap(r->objective_coefficient[DELETED], r->objective_coefficient[MODIFIED]);
3468}
3469
3470// --------------------------------------------------------
3471// DualizerPreprocessor
3472// --------------------------------------------------------
3473
3476 RETURN_VALUE_IF_NULL(lp, false);
3478 return false;
3479 }
3480
3481 // Store the original problem size and direction.
3482 primal_num_cols_ = lp->num_variables();
3483 primal_num_rows_ = lp->num_constraints();
3484 primal_is_maximization_problem_ = lp->IsMaximizationProblem();
3485
3486 // If we need to decide whether or not to take the dual, we only take it when
3487 // the matrix has more rows than columns. The number of rows of a linear
3488 // program gives the size of the square matrices we need to invert and the
3489 // order of iterations of the simplex method. So solving a program with less
3490 // rows is likely a better alternative. Note that the number of row of the
3491 // dual is the number of column of the primal.
3492 //
3493 // Note however that the default is a conservative factor because if the
3494 // user gives us a primal program, we assume he knows what he is doing and
3495 // sometimes a problem is a lot faster to solve in a given formulation
3496 // even if its dimension would say otherwise.
3497 //
3498 // Another reason to be conservative, is that the number of columns of the
3499 // dual is the number of rows of the primal plus up to two times the number of
3500 // columns of the primal.
3501 //
3502 // TODO(user): This effect can be lowered if we use some of the extra
3503 // variables as slack variable which we are not doing at this point.
3505 if (1.0 * primal_num_rows_.value() <
3506 parameters_.dualizer_threshold() * primal_num_cols_.value()) {
3507 return false;
3508 }
3509 }
3510
3511 // Save the linear program bounds.
3512 // Also make sure that all the bounded variable have at least one bound set to
3513 // zero. This will be needed to post-solve a dual-basic solution into a
3514 // primal-basic one.
3515 const ColIndex num_cols = lp->num_variables();
3516 variable_lower_bounds_.assign(num_cols, 0.0);
3517 variable_upper_bounds_.assign(num_cols, 0.0);
3518 for (ColIndex col(0); col < num_cols; ++col) {
3519 const Fractional lower = lp->variable_lower_bounds()[col];
3520 const Fractional upper = lp->variable_upper_bounds()[col];
3521
3522 // We need to shift one of the bound to zero.
3523 variable_lower_bounds_[col] = lower;
3524 variable_upper_bounds_[col] = upper;
3525 const Fractional value = MinInMagnitudeOrZeroIfInfinite(lower, upper);
3526 if (value != 0.0) {
3527 lp->SetVariableBounds(col, lower - value, upper - value);
3528 SubtractColumnMultipleFromConstraintBound(col, value, lp);
3529 }
3530 }
3531
3532 // Fill the information that will be needed during postsolve.
3533 //
3534 // TODO(user): This will break if PopulateFromDual() is changed. so document
3535 // the convention or make the function fill these vectors?
3536 dual_status_correspondence_.clear();
3537 for (RowIndex row(0); row < primal_num_rows_; ++row) {
3540 if (lower_bound == upper_bound) {
3541 dual_status_correspondence_.push_back(VariableStatus::FIXED_VALUE);
3542 } else if (upper_bound != kInfinity) {
3543 dual_status_correspondence_.push_back(VariableStatus::AT_UPPER_BOUND);
3544 } else if (lower_bound != -kInfinity) {
3545 dual_status_correspondence_.push_back(VariableStatus::AT_LOWER_BOUND);
3546 } else {
3547 LOG(DFATAL) << "There should be no free constraint in this lp.";
3548 }
3549 }
3550 slack_or_surplus_mapping_.clear();
3551 for (ColIndex col(0); col < primal_num_cols_; ++col) {
3554 if (lower_bound != -kInfinity) {
3555 dual_status_correspondence_.push_back(
3558 slack_or_surplus_mapping_.push_back(col);
3559 }
3560 }
3561 for (ColIndex col(0); col < primal_num_cols_; ++col) {
3564 if (upper_bound != kInfinity) {
3565 dual_status_correspondence_.push_back(
3568 slack_or_surplus_mapping_.push_back(col);
3569 }
3570 }
3571
3572 // TODO(user): There are two different ways to deal with ranged rows when
3573 // taking the dual. The default way is to duplicate such rows, see
3574 // PopulateFromDual() for details. Another way is to call
3575 // lp->AddSlackVariablesForFreeAndBoxedRows() before calling
3576 // PopulateFromDual(). Adds an option to switch between the two as this may
3577 // change the running time?
3578 //
3579 // Note however that the default algorithm is likely to result in a faster
3580 // solving time because the dual program will have less rows.
3581 LinearProgram dual;
3582 dual.PopulateFromDual(*lp, &duplicated_rows_);
3583 dual.Swap(lp);
3584 return true;
3585}
3586
3587// Note(user): This assumes that LinearProgram.PopulateFromDual() uses
3588// the first ColIndex and RowIndex for the rows and columns of the given
3589// problem.
3592 RETURN_IF_NULL(solution);
3593
3594 DenseRow new_primal_values(primal_num_cols_, 0.0);
3595 VariableStatusRow new_variable_statuses(primal_num_cols_,
3597 DCHECK_LE(primal_num_cols_, RowToColIndex(solution->dual_values.size()));
3598 for (ColIndex col(0); col < primal_num_cols_; ++col) {
3599 RowIndex row = ColToRowIndex(col);
3600 const Fractional lower = variable_lower_bounds_[col];
3601 const Fractional upper = variable_upper_bounds_[col];
3602
3603 // The new variable value corresponds to the dual value of the dual.
3604 // The shift applied during presolve needs to be removed.
3605 const Fractional shift = MinInMagnitudeOrZeroIfInfinite(lower, upper);
3606 new_primal_values[col] = solution->dual_values[row] + shift;
3607
3608 // A variable will be VariableStatus::BASIC if the dual constraint is not.
3610 new_variable_statuses[col] = VariableStatus::BASIC;
3611 } else {
3612 // Otherwise, the dual value must be zero (if the solution is feasible),
3613 // and the variable is at an exact bound or zero if it is
3614 // VariableStatus::FREE. Note that this works because the bounds are
3615 // shifted to 0.0 in the presolve!
3616 new_variable_statuses[col] = ComputeVariableStatus(shift, lower, upper);
3617 }
3618 }
3619
3620 // A basic variable that corresponds to slack/surplus variable is the same as
3621 // a basic row. The new variable status (that was just set to
3622 // VariableStatus::BASIC above)
3623 // needs to be corrected and depends on the variable type (slack/surplus).
3624 const ColIndex begin = RowToColIndex(primal_num_rows_);
3625 const ColIndex end = dual_status_correspondence_.size();
3626 DCHECK_GE(solution->variable_statuses.size(), end);
3627 DCHECK_EQ(end - begin, slack_or_surplus_mapping_.size());
3628 for (ColIndex index(begin); index < end; ++index) {
3629 if (solution->variable_statuses[index] == VariableStatus::BASIC) {
3630 const ColIndex col = slack_or_surplus_mapping_[index - begin];
3631 const VariableStatus status = dual_status_correspondence_[index];
3632
3633 // The new variable value is set to its exact bound because the dual
3634 // variable value can be imprecise.
3635 new_variable_statuses[col] = status;
3638 new_primal_values[col] = variable_upper_bounds_[col];
3639 } else {
3641 new_primal_values[col] = variable_lower_bounds_[col];
3642 }
3643 }
3644 }
3645
3646 // Note the <= in the DCHECK, since we may need to add variables when taking
3647 // the dual.
3648 DCHECK_LE(primal_num_rows_, ColToRowIndex(solution->primal_values.size()));
3649 DenseColumn new_dual_values(primal_num_rows_, 0.0);
3650 ConstraintStatusColumn new_constraint_statuses(primal_num_rows_,
3652
3653 // Note that the sign need to be corrected because of the special behavior of
3654 // PopulateFromDual() on a maximization problem, see the comment in the
3655 // declaration of PopulateFromDual().
3656 Fractional sign = primal_is_maximization_problem_ ? -1 : 1;
3657 for (RowIndex row(0); row < primal_num_rows_; ++row) {
3658 const ColIndex col = RowToColIndex(row);
3659 new_dual_values[row] = sign * solution->primal_values[col];
3660
3661 // A constraint will be ConstraintStatus::BASIC if the dual variable is not.
3662 if (solution->variable_statuses[col] != VariableStatus::BASIC) {
3663 new_constraint_statuses[row] = ConstraintStatus::BASIC;
3664 if (duplicated_rows_[row] != kInvalidCol) {
3665 if (solution->variable_statuses[duplicated_rows_[row]] ==
3667 // The duplicated row is always about the lower bound.
3668 new_constraint_statuses[row] = ConstraintStatus::AT_LOWER_BOUND;
3669 }
3670 }
3671 } else {
3672 // ConstraintStatus::AT_LOWER_BOUND/ConstraintStatus::AT_UPPER_BOUND/
3673 // ConstraintStatus::FIXED depend on the type of the constraint at this
3674 // position.
3675 new_constraint_statuses[row] =
3676 VariableToConstraintStatus(dual_status_correspondence_[col]);
3677 }
3678
3679 // If the original row was duplicated, we need to take into account the
3680 // value of the corresponding dual column.
3681 if (duplicated_rows_[row] != kInvalidCol) {
3682 new_dual_values[row] +=
3683 sign * solution->primal_values[duplicated_rows_[row]];
3684 }
3685
3686 // Because non-basic variable values are exactly at one of their bounds, a
3687 // new basic constraint will have a dual value exactly equal to zero.
3688 DCHECK(new_dual_values[row] == 0 ||
3689 new_constraint_statuses[row] != ConstraintStatus::BASIC);
3690 }
3691
3692 solution->status = ChangeStatusToDualStatus(solution->status);
3693 new_primal_values.swap(solution->primal_values);
3694 new_dual_values.swap(solution->dual_values);
3695 new_variable_statuses.swap(solution->variable_statuses);
3696 new_constraint_statuses.swap(solution->constraint_statuses);
3697}
3698
3700 ProblemStatus status) const {
3701 switch (status) {
3714 default:
3715 return status;
3716 }
3717}
3718
3719// --------------------------------------------------------
3720// ShiftVariableBoundsPreprocessor
3721// --------------------------------------------------------
3722
3725 RETURN_VALUE_IF_NULL(lp, false);
3726
3727 // Save the linear program bounds before shifting them.
3728 bool all_variable_domains_contain_zero = true;
3729 const ColIndex num_cols = lp->num_variables();
3730 variable_initial_lbs_.assign(num_cols, 0.0);
3731 variable_initial_ubs_.assign(num_cols, 0.0);
3732 for (ColIndex col(0); col < num_cols; ++col) {
3733 variable_initial_lbs_[col] = lp->variable_lower_bounds()[col];
3734 variable_initial_ubs_[col] = lp->variable_upper_bounds()[col];
3735 if (0.0 < variable_initial_lbs_[col] || 0.0 > variable_initial_ubs_[col]) {
3736 all_variable_domains_contain_zero = false;
3737 }
3738 }
3739 VLOG(1) << "Maximum variable bounds magnitude (before shift): "
3740 << ComputeMaxVariableBoundsMagnitude(*lp);
3741
3742 // Abort early if there is nothing to do.
3743 if (all_variable_domains_contain_zero) return false;
3744
3745 // Shift the variable bounds and compute the changes to the constraint bounds
3746 // and objective offset in a precise way.
3747 int num_bound_shifts = 0;
3748 const RowIndex num_rows = lp->num_constraints();
3749 KahanSum objective_offset;
3750 absl::StrongVector<RowIndex, KahanSum> row_offsets(num_rows.value());
3751 offsets_.assign(num_cols, 0.0);
3752 for (ColIndex col(0); col < num_cols; ++col) {
3753 if (0.0 < variable_initial_lbs_[col] || 0.0 > variable_initial_ubs_[col]) {
3754 Fractional offset = MinInMagnitudeOrZeroIfInfinite(
3755 variable_initial_lbs_[col], variable_initial_ubs_[col]);
3757 // In the integer case, we truncate the number because if for instance
3758 // the lower bound is a positive integer + epsilon, we only want to
3759 // shift by the integer and leave the lower bound at epsilon.
3760 //
3761 // TODO(user): This would not be needed, if we always make the bound
3762 // of an integer variable integer before applying this preprocessor.
3763 offset = trunc(offset);
3764 } else {
3765 DCHECK_NE(offset, 0.0);
3766 }
3767 offsets_[col] = offset;
3768 lp->SetVariableBounds(col, variable_initial_lbs_[col] - offset,
3769 variable_initial_ubs_[col] - offset);
3770 const SparseColumn& sparse_column = lp->GetSparseColumn(col);
3771 for (const SparseColumn::Entry e : sparse_column) {
3772 row_offsets[e.row()].Add(e.coefficient() * offset);
3773 }
3774 objective_offset.Add(lp->objective_coefficients()[col] * offset);
3775 ++num_bound_shifts;
3776 }
3777 }
3778 VLOG(1) << "Maximum variable bounds magnitude (after " << num_bound_shifts
3779 << " shifts): " << ComputeMaxVariableBoundsMagnitude(*lp);
3780
3781 // Apply the changes to the constraint bound and objective offset.
3782 for (RowIndex row(0); row < num_rows; ++row) {
3783 if (!std::isfinite(row_offsets[row].Value())) {
3784 // This can happen for bad input where we get floating point overflow.
3785 // We can even get nan if we have two overflow in opposite direction.
3786 VLOG(1) << "Shifting variable bounds causes a floating point overflow "
3787 "for constraint "
3788 << row << ".";
3790 return false;
3791 }
3793 row, lp->constraint_lower_bounds()[row] - row_offsets[row].Value(),
3794 lp->constraint_upper_bounds()[row] - row_offsets[row].Value());
3795 }
3796 if (!std::isfinite(objective_offset.Value())) {
3797 VLOG(1) << "Shifting variable bounds causes a floating point overflow "
3798 "for the objective.";
3800 return false;
3801 }
3802 lp->SetObjectiveOffset(lp->objective_offset() + objective_offset.Value());
3803 return true;
3804}
3805
3807 ProblemSolution* solution) const {
3809 RETURN_IF_NULL(solution);
3810 const ColIndex num_cols = solution->variable_statuses.size();
3811 for (ColIndex col(0); col < num_cols; ++col) {
3812 if (in_mip_context_) {
3813 solution->primal_values[col] += offsets_[col];
3814 } else {
3815 switch (solution->variable_statuses[col]) {
3817 ABSL_FALLTHROUGH_INTENDED;
3819 solution->primal_values[col] = variable_initial_lbs_[col];
3820 break;
3822 solution->primal_values[col] = variable_initial_ubs_[col];
3823 break;
3825 solution->primal_values[col] += offsets_[col];
3826 break;
3828 break;
3829 }
3830 }
3831 }
3832}
3833
3834// --------------------------------------------------------
3835// ScalingPreprocessor
3836// --------------------------------------------------------
3837
3840 RETURN_VALUE_IF_NULL(lp, false);
3841 if (!parameters_.use_scaling()) return false;
3842
3843 // Save the linear program bounds before scaling them.
3844 const ColIndex num_cols = lp->num_variables();
3845 variable_lower_bounds_.assign(num_cols, 0.0);
3846 variable_upper_bounds_.assign(num_cols, 0.0);
3847 for (ColIndex col(0); col < num_cols; ++col) {
3848 variable_lower_bounds_[col] = lp->variable_lower_bounds()[col];
3849 variable_upper_bounds_[col] = lp->variable_upper_bounds()[col];
3850 }
3851
3852 // See the doc of these functions for more details.
3853 // It is important to call Scale() before the other two.
3854 Scale(lp, &scaler_, parameters_.scaling_method());
3855 cost_scaling_factor_ = lp->ScaleObjective(parameters_.cost_scaling());
3856 bound_scaling_factor_ = lp->ScaleBounds();
3857
3858 return true;
3859}
3860
3863 RETURN_IF_NULL(solution);
3864
3865 scaler_.ScaleRowVector(false, &(solution->primal_values));
3866 for (ColIndex col(0); col < solution->primal_values.size(); ++col) {
3867 solution->primal_values[col] *= bound_scaling_factor_;
3868 }
3869
3870 scaler_.ScaleColumnVector(false, &(solution->dual_values));
3871 for (RowIndex row(0); row < solution->dual_values.size(); ++row) {
3872 solution->dual_values[row] *= cost_scaling_factor_;
3873 }
3874
3875 // Make sure the variable are at they exact bounds according to their status.
3876 // This just remove a really low error (about 1e-15) but allows to keep the
3877 // variables at their exact bounds.
3878 const ColIndex num_cols = solution->primal_values.size();
3879 for (ColIndex col(0); col < num_cols; ++col) {
3880 switch (solution->variable_statuses[col]) {
3882 ABSL_FALLTHROUGH_INTENDED;
3884 solution->primal_values[col] = variable_upper_bounds_[col];
3885 break;
3887 solution->primal_values[col] = variable_lower_bounds_[col];
3888 break;
3890 ABSL_FALLTHROUGH_INTENDED;
3892 break;
3893 }
3894 }
3895}
3896
3897// --------------------------------------------------------
3898// ToMinimizationPreprocessor
3899// --------------------------------------------------------
3900
3903 RETURN_VALUE_IF_NULL(lp, false);
3904 if (lp->IsMaximizationProblem()) {
3905 for (ColIndex col(0); col < lp->num_variables(); ++col) {
3907 if (coeff != 0.0) {
3909 }
3910 }
3911 lp->SetMaximizationProblem(false);
3914 }
3915 return false;
3916}
3917
3919 ProblemSolution* solution) const {}
3920
3921// --------------------------------------------------------
3922// AddSlackVariablesPreprocessor
3923// --------------------------------------------------------
3924
3927 RETURN_VALUE_IF_NULL(lp, false);
3929 /*detect_integer_constraints=*/true);
3930 first_slack_col_ = lp->GetFirstSlackVariable();
3931 return true;
3932}
3933
3935 ProblemSolution* solution) const {
3937 RETURN_IF_NULL(solution);
3938
3939 // Compute constraint statuses from statuses of slack variables.
3940 const RowIndex num_rows = solution->dual_values.size();
3941 for (RowIndex row(0); row < num_rows; ++row) {
3942 const ColIndex slack_col = first_slack_col_ + RowToColIndex(row);
3943 const VariableStatus variable_status =
3944 solution->variable_statuses[slack_col];
3945 ConstraintStatus constraint_status = ConstraintStatus::FREE;
3946 // The slack variables have reversed bounds - if the value of the variable
3947 // is at one bound, the value of the constraint is at the opposite bound.
3948 switch (variable_status) {
3950 constraint_status = ConstraintStatus::AT_UPPER_BOUND;
3951 break;
3953 constraint_status = ConstraintStatus::AT_LOWER_BOUND;
3954 break;
3955 default:
3956 constraint_status = VariableToConstraintStatus(variable_status);
3957 break;
3958 }
3959 solution->constraint_statuses[row] = constraint_status;
3960 }
3961
3962 // Drop the primal values and variable statuses for slack variables.
3963 solution->primal_values.resize(first_slack_col_, 0.0);
3964 solution->variable_statuses.resize(first_slack_col_, VariableStatus::FREE);
3965}
3966
3967} // namespace glop
3968} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:495
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:892
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:891
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:702
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:894
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:889
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:890
#define VLOG(verboselevel)
Definition: base/logging.h:983
iterator erase(const_iterator pos)
iterator insert(const_iterator pos, const value_type &x)
void resize(size_type new_size)
size_type size() const
bool empty() const
void push_back(const value_type &x)
void swap(StrongVector &x)
void Add(const FpNumber &value)
Definition: accurate_sum.h:29
void SetLogToStdOut(bool enable)
Definition: util/logging.h:45
A simple class to enforce both an elapsed time limit and a deterministic time limit in the same threa...
Definition: time_limit.h:106
void RecoverSolution(ProblemSolution *solution) const final
void MarkColumnForDeletionWithState(ColIndex col, Fractional value, VariableStatus status)
const DenseBooleanRow & GetMarkedColumns() const
Definition: preprocessor.h:200
void RestoreDeletedColumns(ProblemSolution *solution) const
const SparseColumn & SavedOrEmptyColumn(ColIndex col) const
void SaveColumnIfNotAlreadyDone(ColIndex col, const SparseColumn &column)
void SaveColumn(ColIndex col, const SparseColumn &column)
const SparseColumn & SavedColumn(ColIndex col) const
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
ProblemStatus ChangeStatusToDualStatus(ProblemStatus status) const
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
::operations_research::glop::GlopParameters_SolverBehavior solve_dual_problem() const
static constexpr SolverBehavior LET_SOLVER_DECIDE
::operations_research::glop::GlopParameters_ScalingAlgorithm scaling_method() const
static constexpr SolverBehavior NEVER_DO
::operations_research::glop::GlopParameters_CostScalingAlgorithm cost_scaling() const
void RecoverSolution(ProblemSolution *solution) const final
SparseMatrix * GetMutableTransposeSparseMatrix()
Definition: lp_data.cc:386
void SetObjectiveScalingFactor(Fractional objective_scaling_factor)
Definition: lp_data.cc:336
void SetVariableBounds(ColIndex col, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:249
DenseColumn * mutable_constraint_upper_bounds()
Definition: lp_data.h:553
const SparseMatrix & GetTransposeSparseMatrix() const
Definition: lp_data.cc:376
void SetObjectiveOffset(Fractional objective_offset)
Definition: lp_data.cc:331
Fractional ScaleObjective(GlopParameters::CostScalingAlgorithm method)
Definition: lp_data.cc:1188
const DenseColumn & constraint_lower_bounds() const
Definition: lp_data.h:215
Fractional GetObjectiveCoefficientForMinimizationVersion(ColIndex col) const
Definition: lp_data.cc:419
void SetConstraintBounds(RowIndex row, Fractional lower_bound, Fractional upper_bound)
Definition: lp_data.cc:309
void Swap(LinearProgram *linear_program)
Definition: lp_data.cc:1031
const DenseRow & variable_upper_bounds() const
Definition: lp_data.h:232
DenseColumn * mutable_constraint_lower_bounds()
Definition: lp_data.h:550
SparseColumn * GetMutableSparseColumn(ColIndex col)
Definition: lp_data.cc:413
const DenseColumn & constraint_upper_bounds() const
Definition: lp_data.h:218
const DenseRow & objective_coefficients() const
Definition: lp_data.h:223
void AddSlackVariablesWhereNecessary(bool detect_integer_constraints)
Definition: lp_data.cc:697
bool IsVariableInteger(ColIndex col) const
Definition: lp_data.cc:295
void SetObjectiveCoefficient(ColIndex col, Fractional value)
Definition: lp_data.cc:326
void DeleteRows(const DenseBooleanColumn &rows_to_delete)
Definition: lp_data.cc:1258
void DeleteColumns(const DenseBooleanRow &columns_to_delete)
Definition: lp_data.cc:1065
void PopulateFromDual(const LinearProgram &dual, RowToColMapping *duplicated_rows)
Definition: lp_data.cc:764
const SparseMatrix & GetSparseMatrix() const
Definition: lp_data.h:175
const DenseRow & variable_lower_bounds() const
Definition: lp_data.h:229
Fractional objective_scaling_factor() const
Definition: lp_data.h:261
void SetMaximizationProblem(bool maximize)
Definition: lp_data.cc:343
const SparseColumn & GetSparseColumn(ColIndex col) const
Definition: lp_data.cc:409
void RecoverSolution(ProblemSolution *solution) const override
void DestructiveRecoverSolution(ProblemSolution *solution)
bool IsSmallerWithinPreprocessorZeroTolerance(Fractional a, Fractional b) const
Definition: preprocessor.h:84
Preprocessor(const GlopParameters *parameters)
Definition: preprocessor.cc:48
const GlopParameters & parameters_
Definition: preprocessor.h:92
bool IsSmallerWithinFeasibilityTolerance(Fractional a, Fractional b) const
Definition: preprocessor.h:80
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RestoreDeletedRows(ProblemSolution *solution) const
const DenseBooleanColumn & GetMarkedRows() const
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void RecoverSolution(ProblemSolution *solution) const final
void Undo(const GlopParameters &parameters, const SparseColumn &saved_column, const SparseColumn &saved_row, ProblemSolution *solution) const
SingletonUndo(OperationType type, const LinearProgram &lp, MatrixEntry e, ConstraintStatus status)
SparseColumn * mutable_column(ColIndex col)
Definition: sparse.h:182
const SparseColumn & column(ColIndex col) const
Definition: sparse.h:181
void ScaleColumnVector(bool up, DenseColumn *column_vector) const
void ScaleRowVector(bool up, DenseRow *row_vector) const
Fractional LookUpCoefficient(Index index) const
void RemoveNearZeroEntriesWithWeights(Fractional threshold, const DenseVector &weights)
void AddMultipleToSparseVectorAndDeleteCommonIndex(Fractional multiplier, Index removed_common_index, Fractional drop_tolerance, SparseVector *accumulator_vector) const
void assign(IntType size, const T &v)
Definition: lp_types.h:278
Fractional SumWithoutUb(Fractional c) const
Fractional SumWithoutLb(Fractional c) const
void RecoverSolution(ProblemSolution *solution) const final
void RemoveZeroCostUnconstrainedVariable(ColIndex col, Fractional target_bound, LinearProgram *lp)
void RecoverSolution(ProblemSolution *solution) const final
int64_t b
int64_t a
SatParameters parameters
ModelSharedTimeLimit * time_limit
const std::string name
int64_t value
absl::Status status
Definition: g_gurobi.cc:35
double upper_bound
double lower_bound
RowIndex row
Definition: markowitz.cc:182
ReverseView< Container > reversed_view(const Container &c)
const RowIndex kInvalidRow(-1)
Fractional ScalarProduct(const DenseRowOrColumn1 &u, const DenseRowOrColumn2 &v)
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
StrictITIVector< ColIndex, Fractional > DenseRow
Definition: lp_types.h:303
std::string GetProblemStatusString(ProblemStatus problem_status)
Definition: lp_types.cc:19
void FixConstraintWithFixedStatuses(const DenseColumn &row_lower_bounds, const DenseColumn &row_upper_bounds, ProblemSolution *solution)
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
bool IsFinite(Fractional value)
Definition: lp_types.h:91
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:52
ConstraintStatus VariableToConstraintStatus(VariableStatus status)
Definition: lp_types.cc:109
ColMapping FindProportionalColumns(const SparseMatrix &matrix, Fractional tolerance)
void Scale(LinearProgram *lp, SparseMatrixScaler *scaler)
const double kInfinity
Definition: lp_types.h:84
const ColIndex kInvalidCol(-1)
void swap(IdMap< K, V > &a, IdMap< K, V > &b)
Definition: id_map.h:262
std::function< int64_t(const Model &)> Value(IntegerVariable v)
Definition: integer.h:1673
Collection of objects used to extend the Constraint Solver library.
bool IsSmallerWithinTolerance(FloatType x, FloatType y, FloatType tolerance)
Definition: fp_utils.h:157
bool IsIntegerWithinTolerance(FloatType x, FloatType tolerance)
Definition: fp_utils.h:165
BeginEndReverseIteratorWrapper< Container > Reverse(const Container &c)
Definition: iterators.h:98
int index
Definition: pack.cc:509
EntryIndex num_entries
Fractional scaled_cost
ColIndex col
ColIndex representative
#define RUN_PREPROCESSOR(name)
Definition: preprocessor.cc:60
int64_t delta
Definition: resource.cc:1692
#define RETURN_IF_NULL(x)
Definition: return_macros.h:20
#define RETURN_VALUE_IF_NULL(x, v)
Definition: return_macros.h:26
Fractional target_bound
int64_t bound
int64_t coefficient
int64_t cost
std::vector< double > lower_bounds
std::vector< double > upper_bounds
std::optional< int64_t > end
#define SCOPED_INSTRUCTION_COUNT(time_limit)
Definition: stats.h:439
ConstraintStatusColumn constraint_statuses
Definition: lp_data.h:686
#define SOLVER_LOG(logger,...)
Definition: util/logging.h:69
const double coeff