diff --git a/ortools/glop/entering_variable.cc b/ortools/glop/entering_variable.cc index 8492d479a8..5fdc4ccc8e 100644 --- a/ortools/glop/entering_variable.cc +++ b/ortools/glop/entering_variable.cc @@ -89,9 +89,8 @@ Status EnteringVariable::PrimalChooseEnteringColumn(ColIndex* entering_col) { Status EnteringVariable::DualChooseEnteringColumn( bool nothing_to_recompute, const UpdateRow& update_row, Fractional cost_variation, std::vector* bound_flip_candidates, - ColIndex* entering_col, Fractional* step) { + ColIndex* entering_col) { GLOP_RETURN_ERROR_IF_NULL(entering_col); - GLOP_RETURN_ERROR_IF_NULL(step); const DenseRow& update_coefficient = update_row.GetCoefficients(); const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts(); SCOPED_TIME_STAT(&stats_); @@ -122,6 +121,12 @@ Status EnteringVariable::DualChooseEnteringColumn( reduced_costs_->GetDualFeasibilityTolerance(); Fractional harris_ratio = std::numeric_limits::max(); + // Like for the primal, we always allow a positive ministep, even if a + // variable is already infeasible by more than the tolerance. + const Fractional minimum_delta = + parameters_.degenerate_ministep_factor() * + reduced_costs_->GetDualFeasibilityTolerance(); + num_operations_ += 10 * update_row.GetNonZeroPositions().size(); for (const ColIndex col : update_row.GetNonZeroPositions()) { // We will add ratio * coeff to this column with a ratio positive or zero. @@ -137,7 +142,7 @@ Status EnteringVariable::DualChooseEnteringColumn( if (-reduced_costs[col] > harris_ratio * coeff) continue; harris_ratio = std::min( harris_ratio, (-reduced_costs[col] + harris_tolerance) / coeff); - harris_ratio = std::max(0.0, harris_ratio); + harris_ratio = std::max(minimum_delta / coeff, harris_ratio); } breakpoints_.push_back(ColWithRatio(col, -reduced_costs[col], coeff)); continue; @@ -150,7 +155,7 @@ Status EnteringVariable::DualChooseEnteringColumn( if (reduced_costs[col] > harris_ratio * -coeff) continue; harris_ratio = std::min( harris_ratio, (reduced_costs[col] + harris_tolerance) / -coeff); - harris_ratio = std::max(0.0, harris_ratio); + harris_ratio = std::max(minimum_delta / -coeff, harris_ratio); } breakpoints_.push_back(ColWithRatio(col, reduced_costs[col], -coeff)); continue; @@ -177,6 +182,7 @@ Status EnteringVariable::DualChooseEnteringColumn( *entering_col = kInvalidCol; bound_flip_candidates->clear(); + Fractional step = 0.0; Fractional best_coeff = -1.0; Fractional variation_magnitude = std::abs(cost_variation); equivalent_entering_choices_.clear(); @@ -222,9 +228,10 @@ Status EnteringVariable::DualChooseEnteringColumn( // negative. In this case we set it to 0.0, allowing any infeasible // position to enter the basis. This is quite important because its // helps in the choice of a stable pivot. - harris_ratio = std::max(harris_ratio, 0.0); + harris_ratio = + std::max(harris_ratio, minimum_delta / top.coeff_magnitude); - if (top.coeff_magnitude == best_coeff && top.ratio == *step) { + if (top.coeff_magnitude == best_coeff && top.ratio == step) { DCHECK_NE(*entering_col, kInvalidCol); equivalent_entering_choices_.push_back(top.col); } else { @@ -234,7 +241,7 @@ Status EnteringVariable::DualChooseEnteringColumn( // Note that the step is not directly used, so it is okay to leave it // negative. - *step = top.ratio; + step = top.ratio; } } @@ -268,48 +275,18 @@ Status EnteringVariable::DualChooseEnteringColumn( VLOG(1) << "Used bound flip to avoid bad pivot. Before: " << best_coeff << " now: " << std::abs(update_coefficient[col]); - const Fractional coeff = (cost_variation > 0.0) - ? update_coefficient[col] - : -update_coefficient[col]; *entering_col = col; - if (can_decrease.IsSet(col) && coeff > threshold) { - ColWithRatio temp(col, -reduced_costs[col], coeff); - *step = temp.ratio; - } - if (can_increase.IsSet(col) && coeff < -threshold) { - ColWithRatio temp(col, reduced_costs[col], -coeff); - *step = temp.ratio; - } break; } } - // If the step is 0.0, we make sure the reduced cost is 0.0 so - // UpdateReducedCosts() will not take a step that goes in the wrong way (a few - // experiments seems to indicate that this is not a good idea). See comment - // at the top of UpdateReducedCosts(). - // - // Note that ShiftCost() actually shifts the cost a bit more in order to do a - // non-zero step. This helps on degenerate problems. See the comment of - // ShiftCost() for more detail. - // - // TODO(user): Do not do that if we do not end up using this pivot? - if (*step <= 0.0) { - // In order to be mathematically consistent, we shift the cost of the - // entering column in such a way that its reduced cost is indeed zero. This - // is called cost-shifting or perturbation in the literature and it does - // really help on degenerate problems. The pertubation will be removed once - // the pertubed problem is solved to the optimal. - reduced_costs_->ShiftCost(*entering_col); - } return Status::OK(); } Status EnteringVariable::DualPhaseIChooseEnteringColumn( bool nothing_to_recompute, const UpdateRow& update_row, - Fractional cost_variation, ColIndex* entering_col, Fractional* step) { + Fractional cost_variation, ColIndex* entering_col) { GLOP_RETURN_ERROR_IF_NULL(entering_col); - GLOP_RETURN_ERROR_IF_NULL(step); const DenseRow& update_coefficient = update_row.GetCoefficients(); const DenseRow& reduced_costs = reduced_costs_->GetReducedCosts(); SCOPED_TIME_STAT(&stats_); @@ -319,12 +296,16 @@ Status EnteringVariable::DualPhaseIChooseEnteringColumn( breakpoints_.clear(); breakpoints_.reserve(update_row.GetNonZeroPositions().size()); - // Ratio test. const Fractional threshold = nothing_to_recompute ? parameters_.minimum_acceptable_pivot() : parameters_.ratio_test_zero_threshold(); const Fractional dual_feasibility_tolerance = reduced_costs_->GetDualFeasibilityTolerance(); + const Fractional harris_tolerance = + parameters_.harris_tolerance_ratio() * dual_feasibility_tolerance; + const Fractional minimum_delta = + parameters_.degenerate_ministep_factor() * dual_feasibility_tolerance; + const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow(); const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow(); const VariableTypeRow& variable_type = variables_info_.GetTypeRow(); @@ -350,25 +331,31 @@ Status EnteringVariable::DualPhaseIChooseEnteringColumn( : -update_coefficient[col]; // Only proceed if there is a transition, note that if reduced_costs[col] - // is close to zero, then the variable is supposed to be dual-feasible. + // is close to zero, then the variable is counted as dual-feasible. if (std::abs(reduced_costs[col]) <= dual_feasibility_tolerance) { // Continue if the variation goes in the dual-feasible direction. if (coeff > 0 && !can_decrease.IsSet(col)) continue; if (coeff < 0 && !can_increase.IsSet(col)) continue; - // Note that here, a variable which is already dual-infeasible will still - // have a positive ratio. This may sounds weird, but the idea is to put - // first in the sorted breakpoint list a variable which has a reduced - // costs close to zero in order to minimize the magnitude of a step in the - // wrong direction. + // For an already dual-infeasible variable, we allow to push it until + // the harris_tolerance. But if it is past that or close to it, we also + // always enforce a minimum push. + if (coeff * reduced_costs[col] > 0.0) { + breakpoints_.push_back(ColWithRatio( + col, + std::max(minimum_delta, + harris_tolerance - std::abs(reduced_costs[col])), + std::abs(coeff))); + continue; + } } else { // If the two are of the same sign, there is no transition, skip. - if (coeff * reduced_costs[col] > 0) continue; + if (coeff * reduced_costs[col] > 0.0) continue; } // We are sure there is a transition, add it to the set of breakpoints. - breakpoints_.push_back( - ColWithRatio(col, std::abs(reduced_costs[col]), std::abs(coeff))); + breakpoints_.push_back(ColWithRatio( + col, std::abs(reduced_costs[col]) + harris_tolerance, std::abs(coeff))); } // Process the breakpoints in priority order. @@ -382,17 +369,17 @@ Status EnteringVariable::DualPhaseIChooseEnteringColumn( // Select the last breakpoint that still improves the infeasibility and has a // numerically stable pivot. *entering_col = kInvalidCol; - *step = -1.0; + Fractional step = -1.0; Fractional improvement = std::abs(cost_variation); while (!breakpoints_.empty()) { const ColWithRatio top = breakpoints_.front(); // We keep the greatest coeff_magnitude for the same ratio. - DCHECK(top.ratio > *step || - (top.ratio == *step && top.coeff_magnitude <= pivot_magnitude)); - if (top.ratio > *step && top.coeff_magnitude >= pivot_magnitude) { + DCHECK(top.ratio > step || + (top.ratio == step && top.coeff_magnitude <= pivot_magnitude)); + if (top.ratio > step && top.coeff_magnitude >= pivot_magnitude) { *entering_col = top.col; - *step = top.ratio; + step = top.ratio; pivot_magnitude = top.coeff_magnitude; } improvement -= top.coeff_magnitude; diff --git a/ortools/glop/entering_variable.h b/ortools/glop/entering_variable.h index e92a521986..0295a70621 100644 --- a/ortools/glop/entering_variable.h +++ b/ortools/glop/entering_variable.h @@ -72,7 +72,7 @@ class EnteringVariable { ABSL_MUST_USE_RESULT Status DualChooseEnteringColumn( bool nothing_to_recompute, const UpdateRow& update_row, Fractional cost_variation, std::vector* bound_flip_candidates, - ColIndex* entering_col, Fractional* step); + ColIndex* entering_col); // Dual feasibility phase (i.e. phase I) ratio test. // Similar to the optimization phase test, but allows a step that increases @@ -80,7 +80,7 @@ class EnteringVariable { // the one that minimize the sum of infeasibilities when applied. ABSL_MUST_USE_RESULT Status DualPhaseIChooseEnteringColumn( bool nothing_to_recompute, const UpdateRow& update_row, - Fractional cost_variation, ColIndex* entering_col, Fractional* step); + Fractional cost_variation, ColIndex* entering_col); // Sets the pricing parameters. This does not change the pricing rule. void SetParameters(const GlopParameters& parameters); diff --git a/ortools/glop/reduced_costs.cc b/ortools/glop/reduced_costs.cc index 97c6c9dbeb..53637ce87a 100644 --- a/ortools/glop/reduced_costs.cc +++ b/ortools/glop/reduced_costs.cc @@ -307,19 +307,35 @@ void ReducedCosts::PerturbCosts() { } } -void ReducedCosts::ShiftCost(ColIndex col) { +void ReducedCosts::ShiftCostIfNeeded(bool increasing_rc_is_needed, + ColIndex col) { SCOPED_TIME_STAT(&stats_); - const Fractional kToleranceFactor = parameters_.degenerate_ministep_factor(); - const Fractional small_step = - dual_feasibility_tolerance_ * - (reduced_costs_[col] > 0.0 ? kToleranceFactor : -kToleranceFactor); - IF_STATS_ENABLED(stats_.cost_shift.Add(reduced_costs_[col] + small_step)); - cost_perturbations_[col] -= reduced_costs_[col] + small_step; - reduced_costs_[col] = -small_step; + + // We always want a minimum step size, so if we have a negative step or + // a step that is really small, we will shift the cost of the given column. + const Fractional minimum_delta = + parameters_.degenerate_ministep_factor() * dual_feasibility_tolerance_; + if (increasing_rc_is_needed && reduced_costs_[col] <= -minimum_delta) return; + if (!increasing_rc_is_needed && reduced_costs_[col] >= minimum_delta) return; + + const Fractional delta = + increasing_rc_is_needed ? minimum_delta : -minimum_delta; + IF_STATS_ENABLED(stats_.cost_shift.Add(reduced_costs_[col] + delta)); + cost_perturbations_[col] -= reduced_costs_[col] + delta; + reduced_costs_[col] = -delta; + has_cost_shift_ = true; +} + +bool ReducedCosts::StepIsDualDegenerate(bool increasing_rc_is_needed, + ColIndex col) { + if (increasing_rc_is_needed && reduced_costs_[col] >= 0.0) return true; + if (!increasing_rc_is_needed && reduced_costs_[col] <= 0.0) return true; + return false; } void ReducedCosts::ClearAndRemoveCostShifts() { SCOPED_TIME_STAT(&stats_); + has_cost_shift_ = false; cost_perturbations_.AssignToZero(matrix_.num_cols()); recompute_basic_objective_ = true; recompute_basic_objective_left_inverse_ = true; diff --git a/ortools/glop/reduced_costs.h b/ortools/glop/reduced_costs.h index 88a29a271f..e63b0154bd 100644 --- a/ortools/glop/reduced_costs.h +++ b/ortools/glop/reduced_costs.h @@ -133,15 +133,23 @@ class ReducedCosts { void PerturbCosts(); // Shifts the cost of the given non-basic column such that its current reduced - // cost becomes 0.0. Actually, this shifts the cost a bit more, so that the - // reduced cost becomes slightly of the other sign. + // cost becomes 0.0. Actually, this shifts the cost a bit more according to + // the positive_direction parameter. // // This is explained in Koberstein's thesis (section 6.2.2.3) and helps on // degenerate problems. As of july 2013, this allowed to pass dano3mip and // dbic1 without cycling forever. Note that contrary to what is explained in // the thesis, we do not shift any other variable costs. If any becomes // infeasible, it will be selected and shifted in subsequent iterations. - void ShiftCost(ColIndex col); + void ShiftCostIfNeeded(bool increasing_rc_is_needed, ColIndex col); + + // Returns true if ShiftCostIfNeeded() was applied since the last + // ClearAndRemoveCostShifts(). + bool HasCostShift() const { return has_cost_shift_; } + + // Returns true if this step direction make the given column even more + // infeasible. This is just used for reporting stats. + bool StepIsDualDegenerate(bool increasing_rc_is_needed, ColIndex col); // Removes any cost shift and cost perturbation. This also lazily forces a // recomputation of all the derived quantities. This effectively resets the @@ -263,6 +271,8 @@ class ReducedCosts { bool are_reduced_costs_precise_; bool are_reduced_costs_recomputed_; + bool has_cost_shift_ = false; + // Values of the objective on the columns of the basis. The order is given by // the basis_ mapping. It is usually denoted as 'c_B' in the literature . DenseRow basic_objective_; diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index 56003e4836..7c61846f09 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -438,9 +438,25 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) { problem_status_ = ProblemStatus::IMPRECISE; } } else if (primal_infeasibility > primal_tolerance) { + if (num_optims == parameters_.max_number_of_reoptimizations()) { + if (log_info) { + LOG(INFO) << "The primal infeasibility is still higher than the " + "requested internal tolerance, but the maximum " + "number of optimization is reached."; + } + break; + } if (log_info) LOG(INFO) << "Re-optimizing with dual simplex ... "; problem_status_ = ProblemStatus::DUAL_FEASIBLE; } else if (dual_infeasibility > dual_tolerance) { + if (num_optims == parameters_.max_number_of_reoptimizations()) { + if (log_info) { + LOG(INFO) << "The dual infeasibility is still higher than the " + "requested internal tolerance, but the maximum " + "number of optimization is reached."; + } + break; + } if (log_info) LOG(INFO) << "Re-optimizing with primal simplex ... "; problem_status_ = ProblemStatus::PRIMAL_FEASIBLE; } @@ -450,7 +466,7 @@ Status RevisedSimplex::Solve(const LinearProgram& lp, TimeLimit* time_limit) { // Check that the return status is "precise". // - // TODO(user): we curretnly skip the DUAL_INFEASIBLE status because the + // TODO(user): we currently skip the DUAL_INFEASIBLE status because the // quantities are not up to date in this case. if (parameters_.change_status_to_imprecise() && problem_status_ != ProblemStatus::DUAL_INFEASIBLE) { @@ -2778,7 +2794,6 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase, // Entering variable. ColIndex entering_col; - Fractional ratio; while (true) { // TODO(user): we may loop a bit more than the actual number of iteration. @@ -2876,8 +2891,12 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase, &leaving_row, &cost_variation, &target_bound)); } if (leaving_row == kInvalidRow) { - if (!basis_factorization_.IsRefactorized()) { + // TODO(user): integrate this with the main "re-optimization" loop. + // Also distinguish cost perturbation and shifts? + if (!basis_factorization_.IsRefactorized() || + reduced_costs_.HasCostShift()) { VLOG(1) << "Optimal reached, double checking."; + reduced_costs_.ClearAndRemoveCostShifts(); refactorize = true; continue; } @@ -2889,6 +2908,7 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase, if (num_dual_infeasible_positions_ == 0) { problem_status_ = ProblemStatus::DUAL_FEASIBLE; } else { + VLOG(1) << "DUAL infeasible in dual phase I."; problem_status_ = ProblemStatus::DUAL_INFEASIBLE; } } else { @@ -2901,11 +2921,11 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase, if (feasibility_phase) { GLOP_RETURN_IF_ERROR(entering_variable_.DualPhaseIChooseEnteringColumn( reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation, - &entering_col, &ratio)); + &entering_col)); } else { GLOP_RETURN_IF_ERROR(entering_variable_.DualChooseEnteringColumn( reduced_costs_.AreReducedCostsPrecise(), update_row_, cost_variation, - &bound_flip_candidates_, &entering_col, &ratio)); + &bound_flip_candidates_, &entering_col)); } // No entering_col: dual unbounded (i.e. primal infeasible). @@ -2977,8 +2997,22 @@ Status RevisedSimplex::DualMinimize(bool feasibility_phase, return Status::OK(); } + // Before we update the reduced costs, if its sign is already dual + // infeasible and the update direction will make it worse we make sure the + // reduced cost is 0.0 so UpdateReducedCosts() will not take a step that + // goes in the wrong direction (a few experiments seems to indicate that + // this is not a good idea). See comment at the top of UpdateReducedCosts(). + // + // Note that ShiftCostIfNeeded() actually shifts the cost a bit more in + // order to do a non-zero step. This helps on degenerate problems. Like the + // pertubation, we will remove all these shifts at the end. + const bool increasing_rc_is_needed = + (cost_variation > 0.0) == (entering_coeff > 0.0); + reduced_costs_.ShiftCostIfNeeded(increasing_rc_is_needed, entering_col); + IF_STATS_ENABLED({ - if (ratio == 0.0) { + if (reduced_costs_.StepIsDualDegenerate(increasing_rc_is_needed, + entering_col)) { timer.AlsoUpdate(&iteration_stats_.degenerate); } else { timer.AlsoUpdate(&iteration_stats_.normal);