OR-Tools  9.0
reduced_costs.cc
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <random>
17 
18 #ifdef OMP
19 #include <omp.h>
20 #endif
21 
23 
24 namespace operations_research {
25 namespace glop {
26 
28  const DenseRow& objective,
29  const RowToColMapping& basis,
30  const VariablesInfo& variables_info,
31  const BasisFactorization& basis_factorization,
32  absl::BitGenRef random)
33  : matrix_(matrix),
34  objective_(objective),
35  basis_(basis),
36  variables_info_(variables_info),
37  basis_factorization_(basis_factorization),
38  random_(random),
39  parameters_(),
40  stats_(),
41  must_refactorize_basis_(false),
42  recompute_basic_objective_left_inverse_(true),
43  recompute_basic_objective_(true),
44  recompute_reduced_costs_(true),
45  are_reduced_costs_precise_(false),
46  are_reduced_costs_recomputed_(false),
47  basic_objective_(),
48  reduced_costs_(),
49  basic_objective_left_inverse_(),
50  dual_feasibility_tolerance_() {}
51 
53  return must_refactorize_basis_;
54 }
55 
57  ColIndex entering_col, const ScatteredColumn& direction,
58  Fractional* reduced_cost) {
59  SCOPED_TIME_STAT(&stats_);
60  if (recompute_basic_objective_) {
61  ComputeBasicObjective();
62  }
63  const Fractional old_reduced_cost = reduced_costs_[entering_col];
64  const Fractional precise_reduced_cost =
65  objective_[entering_col] + cost_perturbations_[entering_col] -
66  PreciseScalarProduct(basic_objective_, direction);
67 
68  // Update the reduced cost of the entering variable with the precise version.
69  reduced_costs_[entering_col] = precise_reduced_cost;
70  *reduced_cost = precise_reduced_cost;
71 
72  if (!IsValidPrimalEnteringCandidate(entering_col)) {
73  VLOG(1) << "Entering candidate is not valid under precise reduced costs.";
74 
75  // If we don't have the reduced cost with maximum precision, we
76  // return false and the next ChooseEnteringColumn() will recompute them.
77  // If they are already precise, we will skip this one (since it is no
78  // longer a candidate).
79  if (!are_reduced_costs_precise_) {
81  }
82  return false;
83  }
84 
85  // At this point, we have an entering variable that will move the objective in
86  // the good direction. We check the precision of the reduced cost and edges
87  // norm, but even if they are imprecise, we finish this pivot and will
88  // recompute them during the next call to ChooseEnteringColumn().
89 
90  // Estimate the accuracy of the reduced costs using the entering variable.
91  if (!recompute_reduced_costs_) {
92  const Fractional estimated_reduced_costs_accuracy =
93  old_reduced_cost - precise_reduced_cost;
94  const Fractional scale =
95  (std::abs(precise_reduced_cost) <= 1.0) ? 1.0 : precise_reduced_cost;
96  stats_.reduced_costs_accuracy.Add(estimated_reduced_costs_accuracy / scale);
97  if (std::abs(estimated_reduced_costs_accuracy) / scale >
98  parameters_.recompute_reduced_costs_threshold()) {
99  VLOG(1) << "Recomputing reduced costs, value = " << precise_reduced_cost
100  << " error = "
101  << std::abs(precise_reduced_cost - old_reduced_cost);
103  }
104  }
105  return true;
106 }
107 
109  SCOPED_TIME_STAT(&stats_);
110  DCHECK(!recompute_reduced_costs_);
111  if (recompute_reduced_costs_) return 0.0;
112 
113  // The current reduced costs of the slack columns are the opposite of the dual
114  // values. Note that they are updated by UpdateBeforeBasisPivot().
115  const RowIndex num_rows = matrix_.num_rows();
116  const ColIndex first_slack_col = matrix_.num_cols() - RowToColIndex(num_rows);
117  DenseRow dual_values(RowToColIndex(num_rows), 0.0);
118  for (RowIndex row(0); row < num_rows; ++row) {
119  const ColIndex row_as_col = RowToColIndex(row);
120  const ColIndex slack_col = first_slack_col + row_as_col;
121  dual_values[row_as_col] = objective_[slack_col] +
122  cost_perturbations_[slack_col] -
123  reduced_costs_[slack_col];
124  }
125  Fractional dual_residual_error(0.0);
126  for (RowIndex row(0); row < num_rows; ++row) {
127  const ColIndex basic_col = basis_[row];
128  const Fractional residual =
129  objective_[basic_col] + cost_perturbations_[basic_col] -
130  matrix_.ColumnScalarProduct(basic_col, dual_values);
131  dual_residual_error = std::max(dual_residual_error, std::abs(residual));
132  }
133  return dual_residual_error;
134 }
135 
137  SCOPED_TIME_STAT(&stats_);
138  DCHECK(!recompute_reduced_costs_);
139  if (recompute_reduced_costs_) return 0.0;
140  Fractional maximum_dual_infeasibility = 0.0;
141  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
142  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
143  for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
144  const Fractional rc = reduced_costs_[col];
145  if ((can_increase.IsSet(col) && rc < 0.0) ||
146  (can_decrease.IsSet(col) && rc > 0.0)) {
147  maximum_dual_infeasibility =
148  std::max(maximum_dual_infeasibility, std::abs(rc));
149  }
150  }
151  return maximum_dual_infeasibility;
152 }
153 
155  const {
156  SCOPED_TIME_STAT(&stats_);
157  Fractional maximum_dual_infeasibility = 0.0;
158  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
159  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
160  const DenseBitRow& is_boxed = variables_info_.GetNonBasicBoxedVariables();
161  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
162  if (is_boxed[col]) continue;
163  const Fractional rc = reduced_costs_[col];
164  if ((can_increase.IsSet(col) && rc < 0.0) ||
165  (can_decrease.IsSet(col) && rc > 0.0)) {
166  maximum_dual_infeasibility =
167  std::max(maximum_dual_infeasibility, std::abs(rc));
168  }
169  }
170  return maximum_dual_infeasibility;
171 }
172 
174  SCOPED_TIME_STAT(&stats_);
175  DCHECK(!recompute_reduced_costs_);
176  if (recompute_reduced_costs_) return 0.0;
177  Fractional dual_infeasibility_sum = 0.0;
178  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
179  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
180  for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
181  const Fractional rc = reduced_costs_[col];
182  if ((can_increase.IsSet(col) && rc < 0.0) ||
183  (can_decrease.IsSet(col) && rc > 0.0)) {
184  dual_infeasibility_sum += std::abs(std::abs(rc));
185  }
186  }
187  return dual_infeasibility_sum;
188 }
189 
190 void ReducedCosts::UpdateBeforeBasisPivot(ColIndex entering_col,
191  RowIndex leaving_row,
192  const ScatteredColumn& direction,
193  UpdateRow* update_row) {
194  SCOPED_TIME_STAT(&stats_);
195  const ColIndex leaving_col = basis_[leaving_row];
196  DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
197  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
198 
199  // If we are recomputing everything when requested, no need to update.
200  if (!recompute_reduced_costs_) {
201  UpdateReducedCosts(entering_col, leaving_col, leaving_row,
202  direction[leaving_row], update_row);
203  }
204 
205  // Note that it is important to update basic_objective_ AFTER calling
206  // UpdateReducedCosts().
207  UpdateBasicObjective(entering_col, leaving_row);
208 }
209 
211  Fractional* current_cost) {
212  DCHECK_NE(variables_info_.GetStatusRow()[col], VariableStatus::BASIC);
213  DCHECK_EQ(current_cost, &objective_[col]);
214  reduced_costs_[col] -= objective_[col];
215  *current_cost = 0.0;
216 }
217 
218 void ReducedCosts::SetParameters(const GlopParameters& parameters) {
219  parameters_ = parameters;
220 }
221 
223  SCOPED_TIME_STAT(&stats_);
224  recompute_basic_objective_ = true;
225  recompute_basic_objective_left_inverse_ = true;
226  are_reduced_costs_precise_ = false;
227  SetRecomputeReducedCostsAndNotifyWatchers();
228 }
229 
231  SCOPED_TIME_STAT(&stats_);
232  recompute_basic_objective_ = true;
233  recompute_basic_objective_left_inverse_ = true;
234 }
235 
237  SCOPED_TIME_STAT(&stats_);
238  if (are_reduced_costs_precise_) return;
239  must_refactorize_basis_ = true;
240  recompute_basic_objective_left_inverse_ = true;
241  SetRecomputeReducedCostsAndNotifyWatchers();
242 }
243 
245  SCOPED_TIME_STAT(&stats_);
246  VLOG(1) << "Perturbing the costs ... ";
247 
248  Fractional max_cost_magnitude = 0.0;
249  const ColIndex structural_size =
250  matrix_.num_cols() - RowToColIndex(matrix_.num_rows());
251  for (ColIndex col(0); col < structural_size; ++col) {
252  max_cost_magnitude =
253  std::max(max_cost_magnitude, std::abs(objective_[col]));
254  }
255 
256  cost_perturbations_.AssignToZero(matrix_.num_cols());
257  for (ColIndex col(0); col < structural_size; ++col) {
258  const Fractional objective = objective_[col];
259  const Fractional magnitude =
260  (1.0 + std::uniform_real_distribution<double>()(random_)) *
261  (parameters_.relative_cost_perturbation() * std::abs(objective) +
262  parameters_.relative_max_cost_perturbation() * max_cost_magnitude);
263  DCHECK_GE(magnitude, 0.0);
264 
265  // The perturbation direction is such that a dual-feasible solution stays
266  // feasible. This is important.
267  const VariableType type = variables_info_.GetTypeRow()[col];
268  switch (type) {
270  break;
272  break;
274  cost_perturbations_[col] = magnitude;
275  break;
277  cost_perturbations_[col] = -magnitude;
278  break;
280  // Here we don't necessarily maintain the dual-feasibility of a dual
281  // feasible solution, however we can always shift the variable to its
282  // other bound (because it is boxed) to restore dual-feasiblity. This is
283  // done by MakeBoxedVariableDualFeasible() at the end of the dual
284  // phase-I algorithm.
285  if (objective > 0.0) {
286  cost_perturbations_[col] = magnitude;
287  } else if (objective < 0.0) {
288  cost_perturbations_[col] = -magnitude;
289  }
290  break;
291  }
292  }
293 }
294 
295 void ReducedCosts::ShiftCostIfNeeded(bool increasing_rc_is_needed,
296  ColIndex col) {
297  SCOPED_TIME_STAT(&stats_);
298 
299  // We always want a minimum step size, so if we have a negative step or
300  // a step that is really small, we will shift the cost of the given column.
301  const Fractional minimum_delta =
302  parameters_.degenerate_ministep_factor() * dual_feasibility_tolerance_;
303  if (increasing_rc_is_needed && reduced_costs_[col] <= -minimum_delta) return;
304  if (!increasing_rc_is_needed && reduced_costs_[col] >= minimum_delta) return;
305 
306  const Fractional delta =
307  increasing_rc_is_needed ? minimum_delta : -minimum_delta;
308  IF_STATS_ENABLED(stats_.cost_shift.Add(reduced_costs_[col] + delta));
309  cost_perturbations_[col] -= reduced_costs_[col] + delta;
310  reduced_costs_[col] = -delta;
311  has_cost_shift_ = true;
312 }
313 
314 bool ReducedCosts::StepIsDualDegenerate(bool increasing_rc_is_needed,
315  ColIndex col) {
316  if (increasing_rc_is_needed && reduced_costs_[col] >= 0.0) return true;
317  if (!increasing_rc_is_needed && reduced_costs_[col] <= 0.0) return true;
318  return false;
319 }
320 
322  SCOPED_TIME_STAT(&stats_);
323  has_cost_shift_ = false;
324  cost_perturbations_.AssignToZero(matrix_.num_cols());
325  recompute_basic_objective_ = true;
326  recompute_basic_objective_left_inverse_ = true;
327  are_reduced_costs_precise_ = false;
328  SetRecomputeReducedCostsAndNotifyWatchers();
329 }
330 
332  SCOPED_TIME_STAT(&stats_);
333  if (!are_reduced_costs_recomputed_) {
334  SetRecomputeReducedCostsAndNotifyWatchers();
335  }
336  return GetReducedCosts();
337 }
338 
340  SCOPED_TIME_STAT(&stats_);
341  if (basis_factorization_.IsRefactorized()) {
342  must_refactorize_basis_ = false;
343  }
344  if (recompute_reduced_costs_) {
345  ComputeReducedCosts();
346  }
347  return reduced_costs_;
348 }
349 
351  SCOPED_TIME_STAT(&stats_);
352  ComputeBasicObjectiveLeftInverse();
353  return Transpose(basic_objective_left_inverse_.values);
354 }
355 
356 void ReducedCosts::ComputeBasicObjective() {
357  SCOPED_TIME_STAT(&stats_);
358  const ColIndex num_cols_in_basis = RowToColIndex(matrix_.num_rows());
359  cost_perturbations_.resize(matrix_.num_cols(), 0.0);
360  basic_objective_.resize(num_cols_in_basis, 0.0);
361  for (ColIndex col(0); col < num_cols_in_basis; ++col) {
362  const ColIndex basis_col = basis_[ColToRowIndex(col)];
363  basic_objective_[col] =
364  objective_[basis_col] + cost_perturbations_[basis_col];
365  }
366  recompute_basic_objective_ = false;
367  recompute_basic_objective_left_inverse_ = true;
368 }
369 
370 void ReducedCosts::ComputeReducedCosts() {
371  SCOPED_TIME_STAT(&stats_);
372  if (recompute_basic_objective_left_inverse_) {
373  ComputeBasicObjectiveLeftInverse();
374  }
375  Fractional dual_residual_error(0.0);
376  const ColIndex num_cols = matrix_.num_cols();
377 
378  reduced_costs_.resize(num_cols, 0.0);
379  const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
380 #ifdef OMP
381  const int num_omp_threads = parameters_.num_omp_threads();
382 #else
383  const int num_omp_threads = 1;
384 #endif
385  if (num_omp_threads == 1) {
386  for (ColIndex col(0); col < num_cols; ++col) {
387  reduced_costs_[col] = objective_[col] + cost_perturbations_[col] -
388  matrix_.ColumnScalarProduct(
389  col, basic_objective_left_inverse_.values);
390 
391  // We also compute the dual residual error y.B - c_B.
392  if (is_basic.IsSet(col)) {
393  dual_residual_error =
394  std::max(dual_residual_error, std::abs(reduced_costs_[col]));
395  }
396  }
397  } else {
398 #ifdef OMP
399  // In the multi-threaded case, perform the same computation as in the
400  // single-threaded case above.
401  std::vector<Fractional> thread_local_dual_residual_error(num_omp_threads,
402  0.0);
403  const int parallel_loop_size = num_cols.value();
404 #pragma omp parallel for num_threads(num_omp_threads)
405  for (int i = 0; i < parallel_loop_size; i++) {
406  const ColIndex col(i);
407  reduced_costs_[col] = objective_[col] + objective_perturbation_[col] -
408  matrix_.ColumnScalarProduct(
409  col, basic_objective_left_inverse_.values);
410 
411  if (is_basic.IsSet(col)) {
412  thread_local_dual_residual_error[omp_get_thread_num()] =
413  std::max(thread_local_dual_residual_error[omp_get_thread_num()],
414  std::abs(reduced_costs_[col]));
415  }
416  }
417  // end of omp parallel for
418  for (int i = 0; i < num_omp_threads; i++) {
419  dual_residual_error =
420  std::max(dual_residual_error, thread_local_dual_residual_error[i]);
421  }
422 #endif // OMP
423  }
424 
425  deterministic_time_ +=
427  recompute_reduced_costs_ = false;
428  are_reduced_costs_recomputed_ = true;
429  are_reduced_costs_precise_ = basis_factorization_.IsRefactorized();
430 
431  // It is not resonable to have a dual tolerance lower than the current
432  // dual_residual_error, otherwise we may never terminate (This is happening on
433  // dfl001.mps with a low dual_feasibility_tolerance). Note that since we
434  // recompute the reduced costs with maximum precision before really exiting,
435  // it is fine to do a couple of iterations with a high zero tolerance.
436  dual_feasibility_tolerance_ = parameters_.dual_feasibility_tolerance();
437  if (dual_residual_error > dual_feasibility_tolerance_) {
438  VLOG(2) << "Changing dual_feasibility_tolerance to " << dual_residual_error;
439  dual_feasibility_tolerance_ = dual_residual_error;
440  }
441 }
442 
443 void ReducedCosts::ComputeBasicObjectiveLeftInverse() {
444  SCOPED_TIME_STAT(&stats_);
445  if (recompute_basic_objective_) {
446  ComputeBasicObjective();
447  }
448  basic_objective_left_inverse_.values = basic_objective_;
449  basic_objective_left_inverse_.non_zeros.clear();
450  basis_factorization_.LeftSolve(&basic_objective_left_inverse_);
451  recompute_basic_objective_left_inverse_ = false;
452  IF_STATS_ENABLED(stats_.basic_objective_left_inverse_density.Add(
453  Density(basic_objective_left_inverse_.values)));
454 
455  // TODO(user): Estimate its accuracy by a few scalar products, and refactorize
456  // if it is above a threshold?
457 }
458 
459 // Note that the update is such than the entering reduced cost is always set to
460 // 0.0. In particular, because of this we can step in the wrong direction for
461 // the dual method if the reduced cost is slightly infeasible.
462 void ReducedCosts::UpdateReducedCosts(ColIndex entering_col,
463  ColIndex leaving_col,
464  RowIndex leaving_row, Fractional pivot,
465  UpdateRow* update_row) {
466  DCHECK_NE(entering_col, leaving_col);
467  DCHECK_NE(pivot, 0.0);
468  if (recompute_reduced_costs_) return;
469 
470  // Note that this is precise because of the CheckPrecision().
471  const Fractional entering_reduced_cost = reduced_costs_[entering_col];
472 
473  // Nothing to do if the entering reduced cost is 0.0.
474  // This correspond to a dual degenerate pivot.
475  if (entering_reduced_cost == 0.0) {
476  VLOG(2) << "Reduced costs didn't change.";
477 
478  // TODO(user): the reduced costs may still be "precise" in this case, but
479  // other parts of the code assume that if they are precise then the basis
480  // was just refactorized in order to recompute them which is not the case
481  // here. Clean this up.
482  are_reduced_costs_precise_ = false;
483  return;
484  }
485 
486  are_reduced_costs_recomputed_ = false;
487  update_row->ComputeUpdateRow(leaving_row);
488  SCOPED_TIME_STAT(&stats_);
489 
490  const ColIndex first_slack_col =
491  matrix_.num_cols() - RowToColIndex(matrix_.num_rows());
492 
493  // Update the leaving variable reduced cost.
494  // '-pivot' is the value of the entering_edge at 'leaving_row'.
495  // The edge of the 'leaving_col' in the new basis is equal to
496  // 'entering_edge / -pivot'.
497  const Fractional new_leaving_reduced_cost = entering_reduced_cost / -pivot;
498  for (const ColIndex col : update_row->GetNonZeroPositions()) {
499  // Because the columns are in order, it is safe to break here.
500  if (col >= first_slack_col) break;
501  const Fractional coeff = update_row->GetCoefficient(col);
502  reduced_costs_[col] += new_leaving_reduced_cost * coeff;
503  }
504  are_reduced_costs_precise_ = false;
505 
506  // Always update the slack variable position so we have the dual values and
507  // we can use them in ComputeCurrentDualResidualError().
508  const ScatteredRow& unit_row_left_inverse =
509  update_row->GetUnitRowLeftInverse();
510  if (unit_row_left_inverse.non_zeros.empty()) {
511  const ColIndex num_cols = unit_row_left_inverse.values.size();
512  for (ColIndex col(0); col < num_cols; ++col) {
513  const ColIndex slack_col = first_slack_col + col;
514  const Fractional coeff = unit_row_left_inverse[col];
515  reduced_costs_[slack_col] += new_leaving_reduced_cost * coeff;
516  }
517  } else {
518  for (const ColIndex col : unit_row_left_inverse.non_zeros) {
519  const ColIndex slack_col = first_slack_col + col;
520  const Fractional coeff = unit_row_left_inverse[col];
521  reduced_costs_[slack_col] += new_leaving_reduced_cost * coeff;
522  }
523  }
524  reduced_costs_[leaving_col] = new_leaving_reduced_cost;
525 
526  // In the dual, since we compute the update before selecting the entering
527  // variable, this cost is still in the update_position_list, so we make sure
528  // it is 0 here.
529  reduced_costs_[entering_col] = 0.0;
530 }
531 
533  const Fractional reduced_cost = reduced_costs_[col];
534  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
535  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
536  const Fractional tolerance = dual_feasibility_tolerance_;
537  return (can_increase.IsSet(col) && (reduced_cost < -tolerance)) ||
538  (can_decrease.IsSet(col) && (reduced_cost > tolerance));
539 }
540 
541 void ReducedCosts::UpdateBasicObjective(ColIndex entering_col,
542  RowIndex leaving_row) {
543  SCOPED_TIME_STAT(&stats_);
544  basic_objective_[RowToColIndex(leaving_row)] =
545  objective_[entering_col] + cost_perturbations_[entering_col];
546  recompute_basic_objective_left_inverse_ = true;
547 }
548 
549 void ReducedCosts::SetRecomputeReducedCostsAndNotifyWatchers() {
550  recompute_reduced_costs_ = true;
551  for (bool* watcher : watchers_) *watcher = true;
552 }
553 
554 PrimalPrices::PrimalPrices(absl::BitGenRef random,
555  const VariablesInfo& variables_info,
556  PrimalEdgeNorms* primal_edge_norms,
557  ReducedCosts* reduced_costs)
558  : prices_(random),
559  variables_info_(variables_info),
560  primal_edge_norms_(primal_edge_norms),
561  reduced_costs_(reduced_costs) {
562  reduced_costs_->AddRecomputationWatcher(&recompute_);
563  primal_edge_norms->AddRecomputationWatcher(&recompute_);
564 }
565 
566 void PrimalPrices::UpdateBeforeBasisPivot(ColIndex entering_col,
567  UpdateRow* update_row) {
568  // If we are recomputing everything when requested, no need to update.
569  if (recompute_) return;
570 
571  // Note that the set of positions works because both the reduced costs
572  // and the primal edge norms are updated on the same positions which are
573  // given by the update_row.
574  UpdateEnteringCandidates</*from_clean_state=*/false>(
575  update_row->GetNonZeroPositions());
576 }
577 
579  if (recompute_) return;
580  if (reduced_costs_->IsValidPrimalEnteringCandidate(col)) {
581  const DenseRow& squared_norms = primal_edge_norms_->GetSquaredNorms();
582  const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts();
583  DCHECK_NE(0.0, squared_norms[col]);
584  prices_.AddOrUpdate(col, Square(reduced_costs[col]) / squared_norms[col]);
585  } else {
586  prices_.Remove(col);
587  }
588 }
589 
591  // If we need a recomputation, we cannot assumes that the reduced costs are
592  // valid until we are about to recompute the prices.
593  if (recompute_) return;
594 
595  DCHECK(!reduced_costs_->IsValidPrimalEnteringCandidate(col));
596  prices_.Remove(col);
597 }
598 
600  if (recompute_) {
601  const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts();
602  prices_.ClearAndResize(reduced_costs.size());
603  UpdateEnteringCandidates</*from_clean_state=*/true>(
604  variables_info_.GetIsRelevantBitRow());
605  recompute_ = false;
606  }
607  return prices_.GetMaximum();
608 }
609 
610 // A variable is an entering candidate if it can move in a direction that
611 // minimizes the objective. That is, its value needs to increase if its
612 // reduced cost is negative or it needs to decrease if its reduced cost is
613 // positive (see the IsValidPrimalEnteringCandidate() function). Note that
614 // this is the same as a dual-infeasible variable.
615 template <bool from_clean_state, typename ColumnsToUpdate>
616 void PrimalPrices::UpdateEnteringCandidates(const ColumnsToUpdate& cols) {
617  const Fractional tolerance = reduced_costs_->GetDualFeasibilityTolerance();
618  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
619  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
620  const DenseRow& squared_norms = primal_edge_norms_->GetSquaredNorms();
621  const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts();
622  for (const ColIndex col : cols) {
623  const Fractional reduced_cost = reduced_costs[col];
624 
625  // Optimization for speed (The function is about 30% faster than the code in
626  // IsValidPrimalEnteringCandidate() or a switch() on variable_status[col]).
627  // This relies on the fact that (double1 > double2) returns a 1 or 0 result
628  // when converted to an int. It also uses an XOR (which appears to be
629  // faster) since the two conditions on the reduced cost are exclusive.
630  const bool is_dual_infeasible = Bitset64<ColIndex>::ConditionalXorOfTwoBits(
631  col, reduced_cost > tolerance, can_decrease, reduced_cost < -tolerance,
632  can_increase);
633  if (is_dual_infeasible) {
634  DCHECK(reduced_costs_->IsValidPrimalEnteringCandidate(col));
635  const Fractional price = Square(reduced_cost) / squared_norms[col];
636  prices_.AddOrUpdate(col, price);
637  } else {
638  DCHECK(!reduced_costs_->IsValidPrimalEnteringCandidate(col));
639  if (!from_clean_state) prices_.Remove(col);
640  }
641  }
642 }
643 
644 } // namespace glop
645 } // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:894
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:897
#define DCHECK(condition)
Definition: base/logging.h:892
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:893
#define VLOG(verboselevel)
Definition: base/logging.h:986
bool IsSet(IndexType i) const
Definition: bitset.h:485
static uint64_t ConditionalXorOfTwoBits(IndexType i, uint64_t use1, const Bitset64< IndexType > &set1, uint64_t use2, const Bitset64< IndexType > &set2)
Definition: bitset.h:643
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition: sparse.h:382
void AddOrUpdate(Index position, Fractional value)
Definition: pricing.h:185
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
void UpdateBeforeBasisPivot(ColIndex entering_col, UpdateRow *update_row)
PrimalPrices(absl::BitGenRef random, const VariablesInfo &variables_info, PrimalEdgeNorms *primal_edge_norms, ReducedCosts *reduced_costs)
ReducedCosts(const CompactSparseMatrix &matrix_, const DenseRow &objective, const RowToColMapping &basis, const VariablesInfo &variables_info, const BasisFactorization &basis_factorization, absl::BitGenRef random)
bool TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction, Fractional *reduced_cost)
bool IsValidPrimalEnteringCandidate(ColIndex col) const
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
bool StepIsDualDegenerate(bool increasing_rc_is_needed, ColIndex col)
Fractional ComputeMaximumDualInfeasibilityOnNonBoxedVariables() const
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
Fractional GetDualFeasibilityTolerance() const
Fractional ComputeMaximumDualInfeasibility() const
Fractional ComputeSumOfDualInfeasibilities() const
void ShiftCostIfNeeded(bool increasing_rc_is_needed, ColIndex col)
void SetParameters(const GlopParameters &parameters)
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:168
const DenseBitRow & GetIsBasicBitRow() const
const DenseBitRow & GetNonBasicBoxedVariables() const
const DenseBitRow & GetCanIncreaseBitRow() const
const DenseBitRow & GetCanDecreaseBitRow() const
const VariableTypeRow & GetTypeRow() const
const DenseBitRow & GetNotBasicBitRow() const
const VariableStatusRow & GetStatusRow() const
const DenseBitRow & GetIsRelevantBitRow() const
SatParameters parameters
ColIndex col
Definition: markowitz.cc:183
RowIndex row
Definition: markowitz.cc:182
Fractional Square(Fractional f)
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
double Density(const DenseRow &row)
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
const DenseRow & Transpose(const DenseColumn &col)
Bitset64< ColIndex > DenseBitRow
Definition: lp_types.h:324
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:52
static double DeterministicTimeForFpOperations(int64_t n)
Definition: lp_types.h:380
Collection of objects used to extend the Constraint Solver library.
int64_t delta
Definition: resource.cc:1692
IntVar *const objective_
Definition: search.cc:2966
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
StrictITIVector< Index, Fractional > values