OR-Tools  9.1
reduced_costs.cc
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
15 
16 #include <random>
17 
18 #ifdef OMP
19 #include <omp.h>
20 #endif
21 
23 
24 namespace operations_research {
25 namespace glop {
26 
28  const DenseRow& objective,
29  const RowToColMapping& basis,
30  const VariablesInfo& variables_info,
31  const BasisFactorization& basis_factorization,
32  absl::BitGenRef random)
33  : matrix_(matrix),
34  objective_(objective),
35  basis_(basis),
36  variables_info_(variables_info),
37  basis_factorization_(basis_factorization),
38  random_(random),
39  parameters_(),
40  stats_(),
41  must_refactorize_basis_(false),
42  recompute_basic_objective_left_inverse_(true),
43  recompute_basic_objective_(true),
44  recompute_reduced_costs_(true),
45  are_reduced_costs_precise_(false),
46  are_reduced_costs_recomputed_(false),
47  basic_objective_(),
48  reduced_costs_(),
49  basic_objective_left_inverse_(),
50  dual_feasibility_tolerance_() {}
51 
53  return must_refactorize_basis_;
54 }
55 
57  ColIndex entering_col, const ScatteredColumn& direction) {
58  SCOPED_TIME_STAT(&stats_);
59  if (recompute_basic_objective_) {
60  ComputeBasicObjective();
61  }
62  const Fractional old_reduced_cost = reduced_costs_[entering_col];
63  const Fractional precise_reduced_cost =
64  objective_[entering_col] + cost_perturbations_[entering_col] -
65  PreciseScalarProduct(basic_objective_, direction);
66 
67  // Update the reduced cost of the entering variable with the precise version.
68  reduced_costs_[entering_col] = precise_reduced_cost;
69 
70  // At this point, we have an entering variable that will move the objective in
71  // the good direction. We check the precision of the reduced cost and edges
72  // norm, but even if they are imprecise, we finish this pivot and will
73  // recompute them during the next call to ChooseEnteringColumn().
74 
75  // Estimate the accuracy of the reduced costs using the entering variable.
76  if (!recompute_reduced_costs_) {
77  const Fractional estimated_reduced_costs_accuracy =
78  old_reduced_cost - precise_reduced_cost;
79  const Fractional scale =
80  (std::abs(precise_reduced_cost) <= 1.0) ? 1.0 : precise_reduced_cost;
81  stats_.reduced_costs_accuracy.Add(estimated_reduced_costs_accuracy / scale);
82  if (std::abs(estimated_reduced_costs_accuracy) / scale >
83  parameters_.recompute_reduced_costs_threshold()) {
84  VLOG(1) << "Recomputing reduced costs, value = " << precise_reduced_cost
85  << " error = "
86  << std::abs(precise_reduced_cost - old_reduced_cost);
88  }
89  }
90 
91  return precise_reduced_cost;
92 }
93 
95  SCOPED_TIME_STAT(&stats_);
96  Fractional dual_residual_error(0.0);
97  const RowIndex num_rows = matrix_.num_rows();
98  const DenseRow& dual_values = Transpose(GetDualValues());
99  for (RowIndex row(0); row < num_rows; ++row) {
100  const ColIndex basic_col = basis_[row];
101  const Fractional residual =
102  objective_[basic_col] + cost_perturbations_[basic_col] -
103  matrix_.ColumnScalarProduct(basic_col, dual_values);
104  dual_residual_error = std::max(dual_residual_error, std::abs(residual));
105  }
106  return dual_residual_error;
107 }
108 
110  SCOPED_TIME_STAT(&stats_);
111 
112  // Trigger a recomputation if needed so that reduced_costs_ is valid.
113  GetReducedCosts();
114 
115  Fractional maximum_dual_infeasibility = 0.0;
116  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
117  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
118  for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
119  const Fractional rc = reduced_costs_[col];
120  if ((can_increase.IsSet(col) && rc < 0.0) ||
121  (can_decrease.IsSet(col) && rc > 0.0)) {
122  maximum_dual_infeasibility =
123  std::max(maximum_dual_infeasibility, std::abs(rc));
124  }
125  }
126  return maximum_dual_infeasibility;
127 }
128 
130  SCOPED_TIME_STAT(&stats_);
131 
132  // Trigger a recomputation if needed so that reduced_costs_ is valid.
133  GetReducedCosts();
134 
135  Fractional maximum_dual_infeasibility = 0.0;
136  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
137  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
138  const DenseBitRow& is_boxed = variables_info_.GetNonBasicBoxedVariables();
139  for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
140  if (is_boxed[col]) continue;
141  const Fractional rc = reduced_costs_[col];
142  if ((can_increase.IsSet(col) && rc < 0.0) ||
143  (can_decrease.IsSet(col) && rc > 0.0)) {
144  maximum_dual_infeasibility =
145  std::max(maximum_dual_infeasibility, std::abs(rc));
146  }
147  }
148  return maximum_dual_infeasibility;
149 }
150 
152  SCOPED_TIME_STAT(&stats_);
153 
154  // Trigger a recomputation if needed so that reduced_costs_ is valid.
155  GetReducedCosts();
156 
157  Fractional dual_infeasibility_sum = 0.0;
158  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
159  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
160  for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
161  const Fractional rc = reduced_costs_[col];
162  if ((can_increase.IsSet(col) && rc < 0.0) ||
163  (can_decrease.IsSet(col) && rc > 0.0)) {
164  dual_infeasibility_sum += std::abs(std::abs(rc));
165  }
166  }
167  return dual_infeasibility_sum;
168 }
169 
170 void ReducedCosts::UpdateBeforeBasisPivot(ColIndex entering_col,
171  RowIndex leaving_row,
172  const ScatteredColumn& direction,
173  UpdateRow* update_row) {
174  SCOPED_TIME_STAT(&stats_);
175  const ColIndex leaving_col = basis_[leaving_row];
176  DCHECK(!variables_info_.GetIsBasicBitRow().IsSet(entering_col));
177  DCHECK(variables_info_.GetIsBasicBitRow().IsSet(leaving_col));
178 
179  // If we are recomputing everything when requested, no need to update.
180  if (!recompute_reduced_costs_) {
181  UpdateReducedCosts(entering_col, leaving_col, leaving_row,
182  direction[leaving_row], update_row);
183  }
184 
185  // Note that it is important to update basic_objective_ AFTER calling
186  // UpdateReducedCosts().
187  UpdateBasicObjective(entering_col, leaving_row);
188 }
189 
191  Fractional* current_cost) {
192  DCHECK_NE(variables_info_.GetStatusRow()[col], VariableStatus::BASIC);
193  DCHECK_EQ(current_cost, &objective_[col]);
194  reduced_costs_[col] -= objective_[col];
195  *current_cost = 0.0;
196 }
197 
199  parameters_ = parameters;
200 }
201 
203  SCOPED_TIME_STAT(&stats_);
204  recompute_basic_objective_ = true;
205  recompute_basic_objective_left_inverse_ = true;
206  are_reduced_costs_precise_ = false;
207  SetRecomputeReducedCostsAndNotifyWatchers();
208 }
209 
211  SCOPED_TIME_STAT(&stats_);
212  recompute_basic_objective_ = true;
213  recompute_basic_objective_left_inverse_ = true;
214 }
215 
217  SCOPED_TIME_STAT(&stats_);
218  if (are_reduced_costs_precise_) return;
219  must_refactorize_basis_ = true;
220  recompute_basic_objective_left_inverse_ = true;
221  SetRecomputeReducedCostsAndNotifyWatchers();
222 }
223 
225  SCOPED_TIME_STAT(&stats_);
226  VLOG(1) << "Perturbing the costs ... ";
227 
228  Fractional max_cost_magnitude = 0.0;
229  const ColIndex structural_size =
230  matrix_.num_cols() - RowToColIndex(matrix_.num_rows());
231  for (ColIndex col(0); col < structural_size; ++col) {
232  max_cost_magnitude =
233  std::max(max_cost_magnitude, std::abs(objective_[col]));
234  }
235 
236  cost_perturbations_.AssignToZero(matrix_.num_cols());
237  for (ColIndex col(0); col < structural_size; ++col) {
238  const Fractional objective = objective_[col];
239  const Fractional magnitude =
240  (1.0 + std::uniform_real_distribution<double>()(random_)) *
241  (parameters_.relative_cost_perturbation() * std::abs(objective) +
242  parameters_.relative_max_cost_perturbation() * max_cost_magnitude);
243  DCHECK_GE(magnitude, 0.0);
244 
245  // The perturbation direction is such that a dual-feasible solution stays
246  // feasible. This is important.
247  const VariableType type = variables_info_.GetTypeRow()[col];
248  switch (type) {
250  break;
252  break;
254  cost_perturbations_[col] = magnitude;
255  break;
257  cost_perturbations_[col] = -magnitude;
258  break;
260  // Here we don't necessarily maintain the dual-feasibility of a dual
261  // feasible solution, however we can always shift the variable to its
262  // other bound (because it is boxed) to restore dual-feasiblity. This is
263  // done by MakeBoxedVariableDualFeasible() at the end of the dual
264  // phase-I algorithm.
265  if (objective > 0.0) {
266  cost_perturbations_[col] = magnitude;
267  } else if (objective < 0.0) {
268  cost_perturbations_[col] = -magnitude;
269  }
270  break;
271  }
272  }
273 }
274 
275 void ReducedCosts::ShiftCostIfNeeded(bool increasing_rc_is_needed,
276  ColIndex col) {
277  SCOPED_TIME_STAT(&stats_);
278 
279  // We always want a minimum step size, so if we have a negative step or
280  // a step that is really small, we will shift the cost of the given column.
281  const Fractional minimum_delta =
282  parameters_.degenerate_ministep_factor() * dual_feasibility_tolerance_;
283  if (increasing_rc_is_needed && reduced_costs_[col] <= -minimum_delta) return;
284  if (!increasing_rc_is_needed && reduced_costs_[col] >= minimum_delta) return;
285 
286  const Fractional delta =
287  increasing_rc_is_needed ? minimum_delta : -minimum_delta;
288  IF_STATS_ENABLED(stats_.cost_shift.Add(reduced_costs_[col] + delta));
289  cost_perturbations_[col] -= reduced_costs_[col] + delta;
290  reduced_costs_[col] = -delta;
291  has_cost_shift_ = true;
292 }
293 
294 bool ReducedCosts::StepIsDualDegenerate(bool increasing_rc_is_needed,
295  ColIndex col) {
296  if (increasing_rc_is_needed && reduced_costs_[col] >= 0.0) return true;
297  if (!increasing_rc_is_needed && reduced_costs_[col] <= 0.0) return true;
298  return false;
299 }
300 
302  SCOPED_TIME_STAT(&stats_);
303  has_cost_shift_ = false;
304  cost_perturbations_.AssignToZero(matrix_.num_cols());
305  recompute_basic_objective_ = true;
306  recompute_basic_objective_left_inverse_ = true;
307  are_reduced_costs_precise_ = false;
308  SetRecomputeReducedCostsAndNotifyWatchers();
309 }
310 
312  SCOPED_TIME_STAT(&stats_);
313  if (!are_reduced_costs_recomputed_) {
314  SetRecomputeReducedCostsAndNotifyWatchers();
315  }
316  return GetReducedCosts();
317 }
318 
320  SCOPED_TIME_STAT(&stats_);
321  if (basis_factorization_.IsRefactorized()) {
322  must_refactorize_basis_ = false;
323  }
324  if (recompute_reduced_costs_) {
325  ComputeReducedCosts();
326  }
327  return reduced_costs_;
328 }
329 
331  SCOPED_TIME_STAT(&stats_);
332  ComputeBasicObjectiveLeftInverse();
333  return Transpose(basic_objective_left_inverse_.values);
334 }
335 
336 void ReducedCosts::ComputeBasicObjective() {
337  SCOPED_TIME_STAT(&stats_);
338  const ColIndex num_cols_in_basis = RowToColIndex(matrix_.num_rows());
339  cost_perturbations_.resize(matrix_.num_cols(), 0.0);
340  basic_objective_.resize(num_cols_in_basis, 0.0);
341  for (ColIndex col(0); col < num_cols_in_basis; ++col) {
342  const ColIndex basis_col = basis_[ColToRowIndex(col)];
343  basic_objective_[col] =
344  objective_[basis_col] + cost_perturbations_[basis_col];
345  }
346  recompute_basic_objective_ = false;
347  recompute_basic_objective_left_inverse_ = true;
348 }
349 
350 void ReducedCosts::ComputeReducedCosts() {
351  SCOPED_TIME_STAT(&stats_);
352  if (recompute_basic_objective_left_inverse_) {
353  ComputeBasicObjectiveLeftInverse();
354  }
355  Fractional dual_residual_error(0.0);
356  const ColIndex num_cols = matrix_.num_cols();
357 
358  reduced_costs_.resize(num_cols, 0.0);
359  const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow();
360 #ifdef OMP
361  const int num_omp_threads = parameters_.num_omp_threads();
362 #else
363  const int num_omp_threads = 1;
364 #endif
365  if (num_omp_threads == 1) {
366  for (ColIndex col(0); col < num_cols; ++col) {
367  reduced_costs_[col] = objective_[col] + cost_perturbations_[col] -
368  matrix_.ColumnScalarProduct(
369  col, basic_objective_left_inverse_.values);
370 
371  // We also compute the dual residual error y.B - c_B.
372  if (is_basic.IsSet(col)) {
373  dual_residual_error =
374  std::max(dual_residual_error, std::abs(reduced_costs_[col]));
375  }
376  }
377  } else {
378 #ifdef OMP
379  // In the multi-threaded case, perform the same computation as in the
380  // single-threaded case above.
381  std::vector<Fractional> thread_local_dual_residual_error(num_omp_threads,
382  0.0);
383  const int parallel_loop_size = num_cols.value();
384 #pragma omp parallel for num_threads(num_omp_threads)
385  for (int i = 0; i < parallel_loop_size; i++) {
386  const ColIndex col(i);
387  reduced_costs_[col] = objective_[col] + objective_perturbation_[col] -
388  matrix_.ColumnScalarProduct(
389  col, basic_objective_left_inverse_.values);
390 
391  if (is_basic.IsSet(col)) {
392  thread_local_dual_residual_error[omp_get_thread_num()] =
393  std::max(thread_local_dual_residual_error[omp_get_thread_num()],
394  std::abs(reduced_costs_[col]));
395  }
396  }
397  // end of omp parallel for
398  for (int i = 0; i < num_omp_threads; i++) {
399  dual_residual_error =
400  std::max(dual_residual_error, thread_local_dual_residual_error[i]);
401  }
402 #endif // OMP
403  }
404 
405  deterministic_time_ +=
407  recompute_reduced_costs_ = false;
408  are_reduced_costs_recomputed_ = true;
409  are_reduced_costs_precise_ = basis_factorization_.IsRefactorized();
410 
411  // It is not resonable to have a dual tolerance lower than the current
412  // dual_residual_error, otherwise we may never terminate (This is happening on
413  // dfl001.mps with a low dual_feasibility_tolerance). Note that since we
414  // recompute the reduced costs with maximum precision before really exiting,
415  // it is fine to do a couple of iterations with a high zero tolerance.
416  dual_feasibility_tolerance_ = parameters_.dual_feasibility_tolerance();
417  if (dual_residual_error > dual_feasibility_tolerance_) {
418  VLOG(2) << "Changing dual_feasibility_tolerance to " << dual_residual_error;
419  dual_feasibility_tolerance_ = dual_residual_error;
420  }
421 }
422 
423 void ReducedCosts::ComputeBasicObjectiveLeftInverse() {
424  SCOPED_TIME_STAT(&stats_);
425  if (recompute_basic_objective_) {
426  ComputeBasicObjective();
427  }
428  basic_objective_left_inverse_.values = basic_objective_;
429  basic_objective_left_inverse_.non_zeros.clear();
430  basis_factorization_.LeftSolve(&basic_objective_left_inverse_);
431  recompute_basic_objective_left_inverse_ = false;
432  IF_STATS_ENABLED(stats_.basic_objective_left_inverse_density.Add(
433  Density(basic_objective_left_inverse_.values)));
434 
435  // TODO(user): Estimate its accuracy by a few scalar products, and refactorize
436  // if it is above a threshold?
437 }
438 
439 // Note that the update is such than the entering reduced cost is always set to
440 // 0.0. In particular, because of this we can step in the wrong direction for
441 // the dual method if the reduced cost is slightly infeasible.
442 void ReducedCosts::UpdateReducedCosts(ColIndex entering_col,
443  ColIndex leaving_col,
444  RowIndex leaving_row, Fractional pivot,
445  UpdateRow* update_row) {
446  DCHECK_NE(entering_col, leaving_col);
447  DCHECK_NE(pivot, 0.0);
448  if (recompute_reduced_costs_) return;
449 
450  // Note that this is precise because of the CheckPrecision().
451  const Fractional entering_reduced_cost = reduced_costs_[entering_col];
452 
453  // Nothing to do if the entering reduced cost is 0.0.
454  // This correspond to a dual degenerate pivot.
455  if (entering_reduced_cost == 0.0) {
456  VLOG(2) << "Reduced costs didn't change.";
457 
458  // TODO(user): the reduced costs may still be "precise" in this case, but
459  // other parts of the code assume that if they are precise then the basis
460  // was just refactorized in order to recompute them which is not the case
461  // here. Clean this up.
462  are_reduced_costs_precise_ = false;
463  return;
464  }
465 
466  are_reduced_costs_recomputed_ = false;
467  are_reduced_costs_precise_ = false;
468  update_row->ComputeUpdateRow(leaving_row);
469  SCOPED_TIME_STAT(&stats_);
470 
471  // Update the leaving variable reduced cost.
472  // '-pivot' is the value of the entering_edge at 'leaving_row'.
473  // The edge of the 'leaving_col' in the new basis is equal to
474  // 'entering_edge / -pivot'.
475  const Fractional new_leaving_reduced_cost = entering_reduced_cost / -pivot;
476  for (const ColIndex col : update_row->GetNonZeroPositions()) {
477  const Fractional coeff = update_row->GetCoefficient(col);
478  reduced_costs_[col] += new_leaving_reduced_cost * coeff;
479  }
480  reduced_costs_[leaving_col] = new_leaving_reduced_cost;
481 
482  // In the dual, since we compute the update before selecting the entering
483  // variable, this cost is still in the update_position_list, so we make sure
484  // it is 0 here.
485  reduced_costs_[entering_col] = 0.0;
486 }
487 
489  const Fractional reduced_cost = reduced_costs_[col];
490  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
491  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
492  const Fractional tolerance = dual_feasibility_tolerance_;
493  return (can_increase.IsSet(col) && (reduced_cost < -tolerance)) ||
494  (can_decrease.IsSet(col) && (reduced_cost > tolerance));
495 }
496 
497 void ReducedCosts::UpdateBasicObjective(ColIndex entering_col,
498  RowIndex leaving_row) {
499  SCOPED_TIME_STAT(&stats_);
500  basic_objective_[RowToColIndex(leaving_row)] =
501  objective_[entering_col] + cost_perturbations_[entering_col];
502  recompute_basic_objective_left_inverse_ = true;
503 }
504 
505 void ReducedCosts::SetRecomputeReducedCostsAndNotifyWatchers() {
506  recompute_reduced_costs_ = true;
507  for (bool* watcher : watchers_) *watcher = true;
508 }
509 
510 PrimalPrices::PrimalPrices(absl::BitGenRef random,
511  const VariablesInfo& variables_info,
512  PrimalEdgeNorms* primal_edge_norms,
513  ReducedCosts* reduced_costs)
514  : prices_(random),
515  variables_info_(variables_info),
516  primal_edge_norms_(primal_edge_norms),
517  reduced_costs_(reduced_costs) {
518  reduced_costs_->AddRecomputationWatcher(&recompute_);
519  primal_edge_norms->AddRecomputationWatcher(&recompute_);
520 }
521 
522 void PrimalPrices::UpdateBeforeBasisPivot(ColIndex entering_col,
523  UpdateRow* update_row) {
524  // If we are recomputing everything when requested, no need to update.
525  if (recompute_) return;
526 
527  // Note that the set of positions works because both the reduced costs
528  // and the primal edge norms are updated on the same positions which are
529  // given by the update_row.
530  UpdateEnteringCandidates</*from_clean_state=*/false>(
531  update_row->GetNonZeroPositions());
532 }
533 
535  if (recompute_) return;
536  if (reduced_costs_->IsValidPrimalEnteringCandidate(col)) {
537  const DenseRow& squared_norms = primal_edge_norms_->GetSquaredNorms();
538  const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts();
539  DCHECK_NE(0.0, squared_norms[col]);
540  prices_.AddOrUpdate(col, Square(reduced_costs[col]) / squared_norms[col]);
541  } else {
542  prices_.Remove(col);
543  }
544 }
545 
547  // If we need a recomputation, we cannot assumes that the reduced costs are
548  // valid until we are about to recompute the prices.
549  if (recompute_) return;
550 
551  DCHECK(!reduced_costs_->IsValidPrimalEnteringCandidate(col));
552  prices_.Remove(col);
553 }
554 
556  if (recompute_) {
557  const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts();
558  prices_.ClearAndResize(reduced_costs.size());
559  UpdateEnteringCandidates</*from_clean_state=*/true>(
560  variables_info_.GetIsRelevantBitRow());
561  recompute_ = false;
562  }
563  return prices_.GetMaximum();
564 }
565 
566 // A variable is an entering candidate if it can move in a direction that
567 // minimizes the objective. That is, its value needs to increase if its
568 // reduced cost is negative or it needs to decrease if its reduced cost is
569 // positive (see the IsValidPrimalEnteringCandidate() function). Note that
570 // this is the same as a dual-infeasible variable.
571 template <bool from_clean_state, typename ColumnsToUpdate>
572 void PrimalPrices::UpdateEnteringCandidates(const ColumnsToUpdate& cols) {
573  const Fractional tolerance = reduced_costs_->GetDualFeasibilityTolerance();
574  const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
575  const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
576  const DenseRow& squared_norms = primal_edge_norms_->GetSquaredNorms();
577  const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts();
578  for (const ColIndex col : cols) {
579  const Fractional reduced_cost = reduced_costs[col];
580 
581  // Optimization for speed (The function is about 30% faster than the code in
582  // IsValidPrimalEnteringCandidate() or a switch() on variable_status[col]).
583  // This relies on the fact that (double1 > double2) returns a 1 or 0 result
584  // when converted to an int. It also uses an XOR (which appears to be
585  // faster) since the two conditions on the reduced cost are exclusive.
586  const bool is_dual_infeasible = Bitset64<ColIndex>::ConditionalXorOfTwoBits(
587  col, reduced_cost > tolerance, can_decrease, reduced_cost < -tolerance,
588  can_increase);
589  if (is_dual_infeasible) {
590  DCHECK(reduced_costs_->IsValidPrimalEnteringCandidate(col));
591  const Fractional price = Square(reduced_cost) / squared_norms[col];
592  prices_.AddOrUpdate(col, price);
593  } else {
594  DCHECK(!reduced_costs_->IsValidPrimalEnteringCandidate(col));
595  if (!from_clean_state) prices_.Remove(col);
596  }
597  }
598 }
599 
600 } // namespace glop
601 } // namespace operations_research
const DenseRow & Transpose(const DenseColumn &col)
ColIndex RowToColIndex(RowIndex row)
Definition: lp_types.h:49
const DenseBitRow & GetIsBasicBitRow() const
Fractional ComputeMaximumDualInfeasibilityOnNonBoxedVariables()
void ShiftCostIfNeeded(bool increasing_rc_is_needed, ColIndex col)
const DenseBitRow & GetCanDecreaseBitRow() const
ReducedCosts(const CompactSparseMatrix &matrix_, const DenseRow &objective, const RowToColMapping &basis, const VariablesInfo &variables_info, const BasisFactorization &basis_factorization, absl::BitGenRef random)
void SetNonBasicVariableCostToZero(ColIndex col, Fractional *current_cost)
static uint64_t ConditionalXorOfTwoBits(IndexType i, uint64_t use1, const Bitset64< IndexType > &set1, uint64_t use2, const Bitset64< IndexType > &set2)
Definition: bitset.h:643
const DenseBitRow & GetNotBasicBitRow() const
const DenseBitRow & GetCanIncreaseBitRow() const
#define VLOG(verboselevel)
Definition: base/logging.h:979
Fractional GetDualFeasibilityTolerance() const
ColIndex col
Definition: markowitz.cc:183
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
RowIndex row
Definition: markowitz.cc:182
Fractional PreciseScalarProduct(const DenseRowOrColumn &u, const DenseRowOrColumn2 &v)
Fractional TestEnteringReducedCostPrecision(ColIndex entering_col, const ScatteredColumn &direction)
void SetParameters(const GlopParameters &parameters)
int64_t max
Definition: alldiff_cst.cc:140
PrimalPrices(absl::BitGenRef random, const VariablesInfo &variables_info, PrimalEdgeNorms *primal_edge_norms, ReducedCosts *reduced_costs)
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row, const ScatteredColumn &direction, UpdateRow *update_row)
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:887
StrictITIVector< Index, Fractional > values
void UpdateBeforeBasisPivot(ColIndex entering_col, UpdateRow *update_row)
IntVar *const objective_
Definition: search.cc:2966
Fractional ColumnScalarProduct(ColIndex col, const DenseRow &vector) const
Definition: sparse.h:387
void AddOrUpdate(Index position, Fractional value)
Definition: pricing.h:185
const VariableTypeRow & GetTypeRow() const
const DenseBitRow & GetNonBasicBoxedVariables() const
RowIndex ColToRowIndex(ColIndex col)
Definition: lp_types.h:52
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:890
const ColIndexVector & GetNonZeroPositions() const
Definition: update_row.cc:167
double Density(const DenseRow &row)
int64_t delta
Definition: resource.cc:1692
Bitset64< ColIndex > DenseBitRow
Definition: lp_types.h:327
::PROTOBUF_NAMESPACE_ID::int32 num_omp_threads() const
#define DCHECK(condition)
Definition: base/logging.h:885
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:886
bool IsValidPrimalEnteringCandidate(ColIndex col) const
bool IsSet(IndexType i) const
Definition: bitset.h:485
Fractional Square(Fractional f)
Collection of objects used to extend the Constraint Solver library.
const VariableStatusRow & GetStatusRow() const
SatParameters parameters
static double DeterministicTimeForFpOperations(int64_t n)
Definition: lp_types.h:383
const DenseBitRow & GetIsRelevantBitRow() const
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
void SetAndDebugCheckThatColumnIsDualFeasible(ColIndex col)
bool StepIsDualDegenerate(bool increasing_rc_is_needed, ColIndex col)