Scale the objective before applying the revised simplex algorithm.

This commit is contained in:
Vincent Furnon
2015-05-30 01:12:55 +02:00
parent 41b4799646
commit 50e00a3b10
11 changed files with 437 additions and 240 deletions

View File

@@ -438,6 +438,9 @@ BopOptimizerBase::Status LinearRelaxation::Optimize(
problem_already_solved_ = true;
if (lp_status == glop::ProblemStatus::INIT) {
return BopOptimizerBase::LIMIT_REACHED;
}
if (lp_status != glop::ProblemStatus::OPTIMAL &&
lp_status != glop::ProblemStatus::IMPRECISE &&
lp_status != glop::ProblemStatus::PRIMAL_FEASIBLE) {
@@ -473,10 +476,9 @@ BopOptimizerBase::Status LinearRelaxation::Optimize(
CHECK(learned_info->solution.IsFeasible());
return BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
return BopOptimizerBase::INFORMATION_FOUND;
}
return BopOptimizerBase::LIMIT_REACHED;
return BopOptimizerBase::INFORMATION_FOUND;
}
// TODO(user): It is possible to stop the search earlier using the glop

View File

@@ -32,9 +32,10 @@
#include "util/fp_utils.h"
#include "util/proto_tools.h"
DEFINE_bool(lp_solver_enable_fp_exceptions, true,
"NaNs and division / zero produce errors. "
"It is recommended to turn this off for production usage.");
DEFINE_bool(lp_solver_enable_fp_exceptions, false,
"When true, NaNs and division / zero produce errors. "
"This is very useful for debugging, but incompatible with LLVM. "
"It is recommended to set this to false for production usage.");
DEFINE_bool(lp_dump_to_proto_file, false,
"Tells whether do dump the problem to a protobuf file.");
DEFINE_bool(lp_dump_compressed_file, true,
@@ -138,6 +139,7 @@ ProblemStatus LPSolver::Solve(const LinearProgram& lp) {
// Make an internal copy of the problem for the preprocessing.
VLOG(1) << "Initial problem: " << lp.GetDimensionString();
VLOG(1) << "Objective stats: " << lp.GetObjectiveStatsString();
initial_num_entries_ = lp.num_entries();
initial_num_rows_ = lp.num_constraints();
initial_num_cols_ = lp.num_variables();
@@ -163,6 +165,22 @@ void LPSolver::Clear() {
revised_simplex_.reset(nullptr);
}
namespace {
// Computes the "real" problem objective from the one without offset nor
// scaling.
Fractional ProblemObjectiveValue(const LinearProgram& lp, Fractional value) {
return lp.objective_scaling_factor() * (value + lp.objective_offset());
}
// Returns the allowed error magnitude for something that should evaluate to
// value under the given tolerance.
Fractional AllowedError(Fractional tolerance, Fractional value) {
return tolerance * std::max(1.0, std::abs(value));
}
} // namespace
// TODO(user): Try to also check the precision of an INFEASIBLE or UNBOUNDED
// return status.
ProblemStatus LPSolver::LoadAndVerifySolution(const LinearProgram& lp,
const ProblemSolution& solution) {
if (!IsProblemSolutionConsistent(lp, solution)) {
@@ -178,47 +196,61 @@ ProblemStatus LPSolver::LoadAndVerifySolution(const LinearProgram& lp,
constraint_statuses_ = solution.constraint_statuses;
status_ = solution.status;
// Objective before eventually moving the primal/dual values inside their
// bounds.
ComputeReducedCosts(lp);
const Fractional primal_objective_value = ComputeObjective(lp);
const Fractional dual_objective_value = ComputeDualObjective(lp);
VLOG(1) << "Primal objective (before moving primal/dual values) = "
<< StringPrintf("%.15E",
ProblemObjectiveValue(lp, primal_objective_value));
VLOG(1) << "Dual objective (before moving primal/dual values) = "
<< StringPrintf("%.15E",
ProblemObjectiveValue(lp, dual_objective_value));
// Eventually move the primal/dual values inside their bounds.
if (status_ == ProblemStatus::OPTIMAL &&
parameters_.provide_strong_optimal_guarantee()) {
MovePrimalValuesWithinBounds(lp);
MoveDualValuesWithinBounds(lp);
}
// The reported objective to the user.
problem_objective_value_ = ProblemObjectiveValue(lp, ComputeObjective(lp));
VLOG(1) << "Primal objective (after moving primal/dual values) = "
<< StringPrintf("%.15E", problem_objective_value_);
ComputeReducedCosts(lp);
// Note that this needs primal_values_ and reduced_costs_ to be up to date.
if (status_ == ProblemStatus::OPTIMAL &&
parameters_.provide_strong_optimal_guarantee()) {
Fractional cost_perturbation =
GetMaxCostPerturbationToEnforceOptimality(lp);
VLOG(1) << "Cost perturbation = " << cost_perturbation;
if (cost_perturbation > parameters_.solution_feasibility_tolerance()) {
VLOG(1) << "The needed cost perturbation is too high !!";
status_ = ProblemStatus::IMPRECISE;
}
}
ComputeConstraintActivities(lp);
const double primal_infeasibility = ComputePrimalValueInfeasibility(lp);
const double dual_infeasibility = ComputeDualValueInfeasibility(lp);
const double primal_residual = ComputeActivityInfeasibility(lp);
const double dual_residual = ComputeReducedCostInfeasibility(lp);
// These will be set to true if the associated "infeasibility" is too large.
//
// The tolerance used is the parameter solution_feasibility_tolerance. To be
// somewhat independent of the original problem scaling, the thresholds used
// depend of the quantity involved and of its coordinates:
// - tolerance * std::max(1.0, abs(cost[col])) when a reduced cost is infeasible.
// - tolerance * std::max(1.0, abs(bound)) when a bound is crossed.
// - tolerance for an infeasible dual value (because the limit is always 0.0).
bool rhs_perturbation_is_too_large = false;
bool cost_perturbation_is_too_large = false;
bool primal_infeasibility_is_too_large = false;
bool dual_infeasibility_is_too_large = false;
bool primal_residual_is_too_large = false;
bool dual_residual_is_too_large = false;
const double objective_value = ComputeObjective(lp);
const double dual_objective_value = ComputeDualObjective(lp);
objective_value_with_offset_ = objective_value + lp.objective_offset();
// If the primal/dual values were moved to the bounds, then the primal/dual
// infeasibilities should be exactly zero (but not the residuals).
if (status_ == ProblemStatus::OPTIMAL &&
parameters_.provide_strong_optimal_guarantee()) {
if (primal_infeasibility != 0.0 || dual_infeasibility != 0.0) {
VLOG(1) << "If you see this, there is a bug in "
"MovePrimalValuesWithinBounds() or in "
"MoveDualValuesWithinBounds().";
}
}
// Computes all the infeasiblities and update the Booleans above.
ComputeMaxRhsPerturbationToEnforceOptimality(lp,
&rhs_perturbation_is_too_large);
ComputeMaxCostPerturbationToEnforceOptimality(
lp, &cost_perturbation_is_too_large);
const double primal_infeasibility =
ComputePrimalValueInfeasibility(lp, &primal_infeasibility_is_too_large);
const double dual_infeasibility =
ComputeDualValueInfeasibility(lp, &dual_infeasibility_is_too_large);
const double primal_residual =
ComputeActivityInfeasibility(lp, &primal_residual_is_too_large);
const double dual_residual =
ComputeReducedCostInfeasibility(lp, &dual_residual_is_too_large);
// TODO(user): the name is not really consistent since in practice those are
// the "residual" since the primal/dual infeasibility are zero when
@@ -227,49 +259,60 @@ ProblemStatus LPSolver::LoadAndVerifySolution(const LinearProgram& lp,
std::max(primal_infeasibility, primal_residual);
max_absolute_dual_infeasibility_ =
std::max(dual_infeasibility, dual_residual);
// Check precision and optimality of the result. See Chvatal pp. 61-62.
//
// For that we check that:
// - The primal solution is primal-feasible (within a small tolerance).
// - The dual solution is dual-feasible (within a small tolerance).
// - The difference between the primal and dual objectives is small.
//
// Note that measuring infeasibilities using an absolute error makes sense
// because what is important is by how much a given bound was crossed.
// However, for the objective gap, it is trickier because if the objective
// value is for example 1E10, then we cannot hope for an absolute precision
// better than 1E-6. Note that this relative precision is computed without
// taking into account a possibly non-zero objective offset.
VLOG(1) << "Max. primal infeasibility = "
<< max_absolute_primal_infeasibility_;
VLOG(1) << "Max. dual infeasibility = " << max_absolute_dual_infeasibility_;
VLOG(1) << "Objective (with offset) = " << objective_value_with_offset_;
VLOG(1) << "Objective gap = " << objective_value - dual_objective_value;
// Now that all the relevant quantities are computed, we check the precision
// and optimality of the result. See Chvatal pp. 61-62. If any of the tests
// fail, we return the IMPRECISE status.
const double objective_error_ub = ComputeMaxExpectedObjectiveError(lp);
VLOG(1) << "Objective error <= " << objective_error_ub;
if (status_ == ProblemStatus::OPTIMAL &&
parameters_.provide_strong_optimal_guarantee()) {
// If the primal/dual values were moved to the bounds, then the primal/dual
// infeasibilities should be exactly zero (but not the residuals).
if (primal_infeasibility != 0.0 || dual_infeasibility != 0.0) {
LOG(ERROR) << "Primal/dual values have been moved to their bounds. "
<< "Therefore the primal/dual infeasibilities should be "
<< "exactly zero (but not the residuals). If this message "
<< "appears, there is probably a bug in "
<< "MovePrimalValuesWithinBounds() or in "
<< "MoveDualValuesWithinBounds().";
}
if (rhs_perturbation_is_too_large) {
VLOG(1) << "The needed rhs perturbation is too large !!";
status_ = ProblemStatus::IMPRECISE;
}
if (cost_perturbation_is_too_large) {
VLOG(1) << "The needed cost perturbation is too large !!";
status_ = ProblemStatus::IMPRECISE;
}
}
// Note that we compare the values without offset nor scaling. We also need to
// compare them before we move the primal/dual values, otherwise we lose some
// precision since the values are modified independently of each other.
if (status_ == ProblemStatus::OPTIMAL) {
if (std::abs(primal_objective_value - dual_objective_value) >
objective_error_ub) {
VLOG(1) << "The objective gap of the final solution is too large.";
status_ = ProblemStatus::IMPRECISE;
}
}
if ((status_ == ProblemStatus::OPTIMAL ||
status_ == ProblemStatus::PRIMAL_FEASIBLE) &&
(max_absolute_primal_infeasibility_ >
parameters_.solution_feasibility_tolerance())) {
VLOG(1) << "The final solution primal infeasibility is too high !!";
(primal_residual_is_too_large || primal_infeasibility_is_too_large)) {
VLOG(1) << "The primal infeasibility of the final solution is too large.";
status_ = ProblemStatus::IMPRECISE;
}
if ((status_ == ProblemStatus::OPTIMAL ||
status_ == ProblemStatus::DUAL_FEASIBLE) &&
(max_absolute_dual_infeasibility_ >
parameters_.solution_feasibility_tolerance())) {
VLOG(1) << "The final solution dual infeasibility is too high !!";
(dual_residual_is_too_large || dual_infeasibility_is_too_large)) {
VLOG(1) << "The dual infeasibility of the final solution is too large.";
status_ = ProblemStatus::IMPRECISE;
}
if (status_ == ProblemStatus::OPTIMAL) {
if (!AreWithinAbsoluteOrRelativeTolerances(
objective_value, dual_objective_value,
parameters_.solution_objective_gap_tolerance(),
parameters_.solution_objective_gap_tolerance())) {
VLOG(1) << "The final solution objective gap is too high !!";
status_ = ProblemStatus::IMPRECISE;
}
}
may_have_multiple_solutions_ =
(status_ == ProblemStatus::OPTIMAL) ?
@@ -314,7 +357,7 @@ bool LPSolver::IsOptimalSolutionOnFacet(const LinearProgram& lp) {
}
Fractional LPSolver::GetObjectiveValue() const {
return objective_value_with_offset_;
return problem_objective_value_;
}
Fractional LPSolver::GetMaximumPrimalInfeasibility() const {
@@ -341,33 +384,42 @@ double LPSolver::DeterministicTime() const {
void LPSolver::MovePrimalValuesWithinBounds(const LinearProgram& lp) {
const ColIndex num_cols = lp.num_variables();
DCHECK_EQ(num_cols, primal_values_.size());
Fractional error = 0.0;
for (ColIndex col(0); col < num_cols; ++col) {
const Fractional lower_bound = lp.variable_lower_bounds()[col];
const Fractional upper_bound = lp.variable_upper_bounds()[col];
DCHECK_LE(lower_bound, upper_bound);
error = std::max(error, primal_values_[col] - upper_bound);
error = std::max(error, lower_bound - primal_values_[col]);
primal_values_[col] = std::min(primal_values_[col], upper_bound);
primal_values_[col] = std::max(primal_values_[col], lower_bound);
}
VLOG(1) << "Max. primal values move = " << error;
}
void LPSolver::MoveDualValuesWithinBounds(const LinearProgram& lp) {
const RowIndex num_rows = lp.num_constraints();
DCHECK_EQ(num_rows, dual_values_.size());
const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0;
Fractional error = 0.0;
for (RowIndex row(0); row < num_rows; ++row) {
const Fractional lower_bound = lp.constraint_lower_bounds()[row];
const Fractional upper_bound = lp.constraint_upper_bounds()[row];
// For a minimization problem, we want a lower bound.
Fractional minimization_dual_value = optimization_sign * dual_values_[row];
if (lower_bound == -kInfinity) {
if (minimization_dual_value > 0.0) minimization_dual_value = 0.0;
if (lower_bound == -kInfinity && minimization_dual_value > 0.0) {
error = std::max(error, minimization_dual_value);
minimization_dual_value = 0.0;
}
if (upper_bound == kInfinity) {
if (minimization_dual_value < 0.0) minimization_dual_value = 0.0;
if (upper_bound == kInfinity && minimization_dual_value < 0.0) {
error = std::max(error, -minimization_dual_value);
minimization_dual_value = 0.0;
}
dual_values_[row] = optimization_sign * minimization_dual_value;
}
VLOG(1) << "Max. dual values move = " << error;
}
void LPSolver::ResizeSolution(RowIndex num_rows, ColIndex num_cols) {
@@ -384,6 +436,8 @@ void LPSolver::ResizeSolution(RowIndex num_rows, ColIndex num_cols) {
RunAndPushIfRelevant(std::unique_ptr<Preprocessor>(new name()), #name, \
time_limit)
// TODO(user): This should probably be moved to a MainLpPreprocessor class so it
// can be used in other places.
void LPSolver::RunPreprocessors(const TimeLimit& time_limit) {
if (parameters_.use_preprocessing()) {
RUN_PREPROCESSOR(ShiftVariableBoundsPreprocessor);
@@ -423,6 +477,10 @@ void LPSolver::RunPreprocessors(const TimeLimit& time_limit) {
// If DualizerPreprocessor was run, we need to do some extra preprocessing.
// This is because it currently adds a lot of zero-cost singleton columns.
const int old_stack_size = preprocessors_.size();
// TODO(user): We probably want to scale the costs before and after this
// preprocessor so that the rhs/objective of the dual are with a good
// magnitude.
RUN_PREPROCESSOR(DualizerPreprocessor);
if (old_stack_size != preprocessors_.size()) {
RUN_PREPROCESSOR(SingletonPreprocessor);
@@ -633,6 +691,11 @@ bool LPSolver::IsProblemSolutionConsistent(
}
break;
case ConstraintStatus::FREE:
if (dual_value != 0.0) {
VLOG(1) << "Constraint " << row << " is FREE, but its dual value is "
<< dual_value << " instead of 0.";
return false;
}
if (lb != -kInfinity || ub != kInfinity) {
LogConstraintStatusError(row, status, lb, ub);
return false;
@@ -650,77 +713,71 @@ bool LPSolver::IsProblemSolutionConsistent(
return true;
}
Fractional LPSolver::GetMaxCostPerturbationToEnforceOptimality(
const LinearProgram& lp) {
// This computes by how much the objective must be perturbed to enforce the
// following complementary slackness conditions:
// - Reduced cost is exactly zero for FREE and BASIC variables.
// - Reduced cost is of the correct sign for variables at their bounds.
Fractional LPSolver::ComputeMaxCostPerturbationToEnforceOptimality(
const LinearProgram& lp, bool* is_too_large) {
Fractional max_cost_correction = 0.0;
const ColIndex num_cols = lp.num_variables();
const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0;
const Fractional tolerance = parameters_.solution_feasibility_tolerance();
for (ColIndex col(0); col < num_cols; ++col) {
const Fractional variable_value = primal_values_[col];
const Fractional lower_bound = lp.variable_lower_bounds()[col];
const Fractional upper_bound = lp.variable_upper_bounds()[col];
DCHECK_GE(variable_value, lower_bound);
DCHECK_LE(variable_value, upper_bound);
// We correct the reduced cost, so we have a minimization problem and
// thus the dual objective value will be a lower bound of the primal
// objective.
const Fractional reduced_cost = optimization_sign * reduced_costs_[col];
// Perturbation needed to move the cost to 0.0
const Fractional cost_delta = std::abs(reduced_cost);
// We are enforcing the complementary slackness conditions, see the comment
// in ComputeDualObjective() for more explanations. By default, we move the
// cost to zero, and if we decide to move the variable to the bound, then we
// continue the loop.
if (upper_bound == kInfinity) {
if (lower_bound != -kInfinity) {
if (reduced_cost > 0) {
// We move the variable to the bound OR we move the cost to zero.
const Fractional variable_delta = primal_values_[col] - lower_bound;
if (variable_delta < cost_delta) {
primal_values_[col] = lower_bound;
continue;
}
}
}
} else {
if (lower_bound == -kInfinity) {
if (reduced_cost < 0) {
// We move the variable to the bound OR we move the cost to zero.
const Fractional variable_delta = upper_bound - primal_values_[col];
if (variable_delta < cost_delta) {
primal_values_[col] = upper_bound;
continue;
}
}
} else {
// Boxed variable, 3 options.
const Fractional variable_delta_up = upper_bound - variable_value;
const Fractional variable_delta_down = variable_value - lower_bound;
if (variable_delta_down < cost_delta ||
variable_delta_up < cost_delta) {
if (variable_delta_up < variable_delta_down) {
primal_values_[col] = upper_bound;
} else {
primal_values_[col] = lower_bound;
}
continue;
}
}
const VariableStatus status = variable_statuses_[col];
if (status == VariableStatus::BASIC || status == VariableStatus::FREE ||
(status == VariableStatus::AT_UPPER_BOUND && reduced_cost > 0.0) ||
(status == VariableStatus::AT_LOWER_BOUND && reduced_cost < 0.0)) {
max_cost_correction =
std::max(max_cost_correction, std::abs(reduced_cost));
*is_too_large |=
std::abs(reduced_cost) >
AllowedError(tolerance, lp.objective_coefficients()[col]);
}
max_cost_correction = std::max(max_cost_correction, cost_delta);
}
VLOG(1) << "Max. cost perturbation = " << max_cost_correction;
return max_cost_correction;
}
// This computes by how much the rhs must be perturbed to enforce the fact that
// the constraint activities exactly reflect their status.
Fractional LPSolver::ComputeMaxRhsPerturbationToEnforceOptimality(
const LinearProgram& lp, bool* is_too_large) {
Fractional max_rhs_correction = 0.0;
const RowIndex num_rows = lp.num_constraints();
const Fractional tolerance = parameters_.solution_feasibility_tolerance();
for (RowIndex row(0); row < num_rows; ++row) {
const Fractional lower_bound = lp.constraint_lower_bounds()[row];
const Fractional upper_bound = lp.constraint_upper_bounds()[row];
const Fractional activity = constraint_activities_[row];
const ConstraintStatus status = constraint_statuses_[row];
Fractional rhs_error = 0.0;
Fractional allowed_error = 0.0;
if (status == ConstraintStatus::AT_LOWER_BOUND || activity < lower_bound) {
rhs_error = std::abs(activity - lower_bound);
allowed_error = AllowedError(tolerance, lower_bound);
} else if (status == ConstraintStatus::AT_UPPER_BOUND ||
activity > upper_bound) {
rhs_error = std::abs(activity - upper_bound);
allowed_error = AllowedError(tolerance, upper_bound);
}
max_rhs_correction = std::max(max_rhs_correction, rhs_error);
*is_too_large |= rhs_error > allowed_error;
}
VLOG(1) << "Max. rhs perturbation = " << max_rhs_correction;
return max_rhs_correction;
}
void LPSolver::ComputeConstraintActivities(const LinearProgram& lp) {
const RowIndex num_rows = lp.num_constraints();
const ColIndex num_cols = lp.num_variables();
constraint_activities_.assign(num_rows, 0.0);
DCHECK_EQ(num_cols, primal_values_.size());
constraint_activities_.assign(num_rows, 0.0);
for (ColIndex col(0); col < num_cols; ++col) {
lp.GetSparseColumn(col)
.AddMultipleToDenseVector(primal_values_[col], &constraint_activities_);
@@ -748,10 +805,23 @@ double LPSolver::ComputeObjective(const LinearProgram& lp) {
return sum.Value();
}
// By the duality theorem, the dual "objective" is a bound on the primal
// objective obtained by taking the linear combinaison of the constraints
// given by dual_values_.
//
// As it is written now, this has no real precise meaning since we ignore
// infeasible reduced costs. This is almost the same as computing the objective
// to the perturbed problem, but then we don't use the pertubed rhs. It is just
// here as an extra "consistency" check.
//
// Note(user): We could actually compute an EXACT lower bound for the cost of
// the non-cost perturbed problem. The idea comes from "Safe bounds in linear
// and mixed-integer linear programming", Arnold Neumaier , Oleg Shcherbina,
// Math Prog, 2003. Note that this requires having some variable bounds that may
// not be in the original problem so that the current dual solution is always
// feasible. It also involves changing the rounding mode to obtain exact
// confidence intervals on the reduced costs.
double LPSolver::ComputeDualObjective(const LinearProgram& lp) {
// By the duality theorem, the dual objective is a bound on the primal
// objective obtained by taking the linear combinaison of the constraints
// given by dual_values_.
KahanSum dual_objective;
// Compute the part coming from the row constraints.
@@ -763,28 +833,27 @@ double LPSolver::ComputeDualObjective(const LinearProgram& lp) {
// We correct the optimization_sign so we have to compute a lower bound.
const Fractional corrected_value = optimization_sign * dual_values_[row];
if (corrected_value > 0 && lower_bound != -kInfinity) {
if (corrected_value > 0.0 && lower_bound != -kInfinity) {
dual_objective.Add(dual_values_[row] * lower_bound);
}
if (corrected_value < 0 && upper_bound != kInfinity) {
if (corrected_value < 0.0 && upper_bound != kInfinity) {
dual_objective.Add(dual_values_[row] * upper_bound);
}
}
// We also need to take into account the reduced costs: for a given column
// associated to a variable x, we want to find a lower bound for c.x (where c
// is the objective coefficient for this column). If we write a.x the linear
// combination of the constraints at this column we have:
// For a given column associated to a variable x, we want to find a lower
// bound for c.x (where c is the objective coefficient for this column). If we
// write a.x the linear combination of the constraints at this column we have:
// (c + a - c) * x = a * x, and so
// c * x = a * x + (c - a) * x
// Now, if we suppose for example that the reduced cost 'c - a' is positive
// and that x is lower-bounded by 'l' then the best bound we can get is
// c * x >= a * x + (c - a) * l.
// and that x is lower-bounded by 'lb' then the best bound we can get is
// c * x >= a * x + (c - a) * lb.
//
// Note: when summing over all variables, the left side is the primal
// objective and the right side is the dual objective. In particular, a
// necessary and sufficient condition for both objectives to be the same is
// that all the single variable inequalities above be equalities.
// objective and the right side is a lower bound to the objective. In
// particular, a necessary and sufficient condition for both objectives to be
// the same is that all the single variable inequalities above be equalities.
// This is possible only if c == a or if x is at its bound (modulo the
// optimization_sign of the reduced cost), or both (this is one side of the
// complementary slackness conditions, see Chvatal p. 62).
@@ -797,38 +866,43 @@ double LPSolver::ComputeDualObjective(const LinearProgram& lp) {
// thus a dual objective that is a lower bound of the primal objective.
const Fractional reduced_cost = optimization_sign * reduced_costs_[col];
// We do not do any correction if the reduced cost is 'infeasible', which is
// the same as computing the objective of the perturbed problem.
Fractional correction = 0.0;
if (lower_bound != -kInfinity && reduced_cost > 0.0) {
// Do not do any correction if the reduced cost is 'infeasible', which is
// the same as computing the objective of the perturbed problem.
if (variable_statuses_[col] == VariableStatus::AT_LOWER_BOUND &&
reduced_cost > 0.0) {
correction = reduced_cost * lower_bound;
} else if (
upper_bound != kInfinity && reduced_cost < 0.0
#ifdef MEMORY_SANITIZER
// Note(user): The redundant condition with isfinite() below is a
// workaround for Memory Sanitizer breaking the code and triggering a
// SIGFPE. The SIGFPE disappears if the data are printed. This condition
// seems to bring the data "back into sight" for Memory Sanitizer. This
// workaround is brittle as any change in the code itself. For example,
// changing the code a bit makes it necessary to change the variable to
// pass to isfinite(). (!)
// TODO(user): Remove this when Memory Sanitizer matures.
&&
std::isfinite(upper_bound)
#endif
) {
// Same note as above.
} else if (variable_statuses_[col] == VariableStatus::AT_UPPER_BOUND &&
reduced_cost < 0.0) {
correction = reduced_cost * upper_bound;
} else if (variable_statuses_[col] == VariableStatus::FIXED_VALUE) {
correction = reduced_cost * upper_bound;
}
// Now apply the correction in the right direction!
dual_objective.Add(optimization_sign * correction);
}
return dual_objective.Value();
}
double LPSolver::ComputePrimalValueInfeasibility(const LinearProgram& lp) {
double LPSolver::ComputeMaxExpectedObjectiveError(const LinearProgram& lp) {
const ColIndex num_cols = lp.num_variables();
DCHECK_EQ(num_cols, primal_values_.size());
const Fractional tolerance = parameters_.solution_feasibility_tolerance();
Fractional primal_objective_error = 0.0;
for (ColIndex col(0); col < num_cols; ++col) {
// TODO(user): Be more precise since the non-BASIC variables are exactly at
// their bounds, so for them the error bound is just the term magnitude
// times std::numeric_limits<double>::epsilon() with KahanSum.
primal_objective_error += std::abs(lp.objective_coefficients()[col]) *
AllowedError(tolerance, primal_values_[col]);
}
return primal_objective_error;
}
double LPSolver::ComputePrimalValueInfeasibility(const LinearProgram& lp,
bool* is_too_large) {
double infeasibility = 0.0;
const Fractional tolerance = parameters_.solution_feasibility_tolerance();
const ColIndex num_cols = lp.num_variables();
for (ColIndex col(0); col < num_cols; ++col) {
const Fractional lower_bound = lp.variable_lower_bounds()[col];
@@ -836,35 +910,40 @@ double LPSolver::ComputePrimalValueInfeasibility(const LinearProgram& lp) {
DCHECK(IsFinite(primal_values_[col]));
if (lower_bound == upper_bound) {
infeasibility =
std::max(infeasibility, std::abs(primal_values_[col] - upper_bound));
const Fractional error = std::abs(primal_values_[col] - upper_bound);
infeasibility = std::max(infeasibility, error);
*is_too_large |= error > AllowedError(tolerance, upper_bound);
continue;
}
if (primal_values_[col] > upper_bound) {
infeasibility =
std::max(infeasibility, primal_values_[col] - upper_bound);
const Fractional error = primal_values_[col] - upper_bound;
infeasibility = std::max(infeasibility, error);
*is_too_large |= error > AllowedError(tolerance, upper_bound);
}
if (primal_values_[col] < lower_bound) {
infeasibility =
std::max(infeasibility, lower_bound - primal_values_[col]);
const Fractional error = lower_bound - primal_values_[col];
infeasibility = std::max(infeasibility, error);
*is_too_large |= error > AllowedError(tolerance, lower_bound);
}
}
return infeasibility;
}
double LPSolver::ComputeActivityInfeasibility(const LinearProgram& lp) {
double LPSolver::ComputeActivityInfeasibility(const LinearProgram& lp,
bool* is_too_large) {
double infeasibility = 0.0;
int num_problematic_rows(0);
const RowIndex num_rows = lp.num_constraints();
const Fractional tolerance = parameters_.solution_feasibility_tolerance();
for (RowIndex row(0); row < num_rows; ++row) {
const Fractional activity = constraint_activities_[row];
const Fractional lower_bound = lp.constraint_lower_bounds()[row];
const Fractional upper_bound = lp.constraint_upper_bounds()[row];
const Fractional tolerance = parameters_.solution_feasibility_tolerance();
DCHECK(IsFinite(activity));
if (lower_bound == upper_bound) {
if (std::abs(activity - upper_bound) > tolerance) {
if (std::abs(activity - upper_bound) >
AllowedError(tolerance, upper_bound)) {
VLOG(2) << "Row " << row.value() << " has activity " << activity
<< " which is different from " << upper_bound << " by "
<< activity - upper_bound;
@@ -875,7 +954,7 @@ double LPSolver::ComputeActivityInfeasibility(const LinearProgram& lp) {
}
if (activity > upper_bound) {
const Fractional row_excess = activity - upper_bound;
if (row_excess > tolerance) {
if (row_excess > AllowedError(tolerance, upper_bound)) {
VLOG(2) << "Row " << row.value() << " has activity " << activity
<< ", exceeding its upper bound " << upper_bound << " by "
<< row_excess;
@@ -885,7 +964,7 @@ double LPSolver::ComputeActivityInfeasibility(const LinearProgram& lp) {
}
if (activity < lower_bound) {
const Fractional row_deficit = lower_bound - activity;
if (row_deficit > tolerance) {
if (row_deficit > AllowedError(tolerance, lower_bound)) {
VLOG(2) << "Row " << row.value() << " has activity " << activity
<< ", below its lower bound " << lower_bound << " by "
<< row_deficit;
@@ -894,11 +973,16 @@ double LPSolver::ComputeActivityInfeasibility(const LinearProgram& lp) {
infeasibility = std::max(infeasibility, row_deficit);
}
}
VLOG(1) << "Number of problematic rows: " << num_problematic_rows;
if (num_problematic_rows > 0) {
*is_too_large = true;
VLOG(1) << "Number of infeasible rows = " << num_problematic_rows;
}
return infeasibility;
}
double LPSolver::ComputeDualValueInfeasibility(const LinearProgram& lp) {
double LPSolver::ComputeDualValueInfeasibility(const LinearProgram& lp,
bool* is_too_large) {
const Fractional allowed_error = parameters_.solution_feasibility_tolerance();
const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0;
double infeasibility = 0.0;
const RowIndex num_rows = lp.num_constraints();
@@ -909,19 +993,23 @@ double LPSolver::ComputeDualValueInfeasibility(const LinearProgram& lp) {
DCHECK(IsFinite(dual_value));
const Fractional minimization_dual_value = optimization_sign * dual_value;
if (lower_bound == -kInfinity) {
*is_too_large |= minimization_dual_value > allowed_error;
infeasibility = std::max(infeasibility, minimization_dual_value);
}
if (upper_bound == kInfinity) {
*is_too_large |= -minimization_dual_value > allowed_error;
infeasibility = std::max(infeasibility, -minimization_dual_value);
}
}
return infeasibility;
}
double LPSolver::ComputeReducedCostInfeasibility(const LinearProgram& lp) {
double LPSolver::ComputeReducedCostInfeasibility(const LinearProgram& lp,
bool* is_too_large) {
const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0;
double infeasibility = 0.0;
const ColIndex num_cols = lp.num_variables();
const Fractional tolerance = parameters_.solution_feasibility_tolerance();
for (ColIndex col(0); col < num_cols; ++col) {
const Fractional reduced_cost = reduced_costs_[col];
const Fractional lower_bound = lp.variable_lower_bounds()[col];
@@ -929,10 +1017,14 @@ double LPSolver::ComputeReducedCostInfeasibility(const LinearProgram& lp) {
DCHECK(IsFinite(reduced_cost));
const Fractional minimization_reduced_cost =
optimization_sign * reduced_cost;
const Fractional allowed_error =
AllowedError(tolerance, lp.objective_coefficients()[col]);
if (lower_bound == -kInfinity) {
*is_too_large |= minimization_reduced_cost > allowed_error;
infeasibility = std::max(infeasibility, minimization_reduced_cost);
}
if (upper_bound == kInfinity) {
*is_too_large |= -minimization_reduced_cost > allowed_error;
infeasibility = std::max(infeasibility, -minimization_reduced_cost);
}
}

View File

@@ -70,7 +70,7 @@ class LPSolver {
ProblemStatus LoadAndVerifySolution(const LinearProgram& lp,
const ProblemSolution& solution);
// Returns the objective value of the solution with its offset.
// Returns the objective value of the solution with its offset and scaling.
Fractional GetObjectiveValue() const;
// Accessors to information related to variables.
@@ -83,6 +83,9 @@ class LPSolver {
// Accessors to information related to constraints. The activity of a
// constraint is the sum of its linear terms evaluated with variables taking
// their values at the current solution.
//
// Note that the dual_values() do not take into account an eventual objective
// scaling of the solved LinearProgram.
const DenseColumn& dual_values() const { return dual_values_; }
const DenseColumn& constraint_activities() const {
return constraint_activities_;
@@ -166,24 +169,6 @@ class LPSolver {
// its bounds (a.x = r or a.x = l) and has its dual value close to zero.
bool IsOptimalSolutionOnFacet(const LinearProgram& lp);
// For each variable, either moves it to one of its bounds or computes by how
// much the cost on this variable needs to change to enforce optimality of the
// solution. Returns the infinity norm of the cost perturbation.
// This requires that:
// - The primal variable are within their bounds.
// - The dual variable are within their bounds.
// Once the primal variable values have been perturbed, we need to compute by
// how much the right hand side needs to be perturbed to get a FEASIBLE primal
// solution.
//
// If we perturb the cost by the first perturbation and the right hand side by
// the second perturbation, then the pair (primal values, dual values) is an
// OPTIMAL solution of the perturbed problem.
//
// TODO(user): Now that we have the correct status information, we no longer
// need that. We also need to revisit how we compute the infeasibilities.
Fractional GetMaxCostPerturbationToEnforceOptimality(const LinearProgram& lp);
// Computes derived quantities from the solution.
void ComputeReducedCosts(const LinearProgram& lp);
void ComputeConstraintActivities(const LinearProgram& lp);
@@ -193,13 +178,46 @@ class LPSolver {
double ComputeObjective(const LinearProgram& lp);
double ComputeDualObjective(const LinearProgram& lp);
// Given a relative precision on the primal values of up to
// solution_feasibility_tolerance(), this returns an upper bound on the
// expected precision of the objective.
double ComputeMaxExpectedObjectiveError(const LinearProgram& lp);
// Returns the max absolute cost pertubation (resp. rhs perturbation) so that
// the pair (primal values, dual values) is an EXACT optimal solution to the
// perturbed problem. Note that this assumes that
// MovePrimalValuesWithinBounds() and MoveDualValuesWithinBounds() have
// already been called. The Boolean is_too_large is set to true if any of the
// perturbation exceed the tolerance (which depends of the coordinate).
//
// These bounds are computed using the variable and constraint statuses by
// enforcing the complementary slackness optimal conditions. Note that they
// are almost the same as ComputeActivityInfeasibility() and
// ComputeReducedCostInfeasibility() but looks for optimality rather than just
// feasibility.
//
// Note(user): We could get EXACT bounds on these perturbations by changing
// the rounding mode appropriately during these computations. But this is
// probably not needed.
Fractional ComputeMaxCostPerturbationToEnforceOptimality(
const LinearProgram& lp, bool* is_too_large);
Fractional ComputeMaxRhsPerturbationToEnforceOptimality(
const LinearProgram& lp, bool* is_too_large);
// Computes the maximum of the infeasibilities associated with each values.
// The returned infeasibilities are the maximum of the "absolute" errors of
// each vector coefficients.
double ComputePrimalValueInfeasibility(const LinearProgram& lp);
double ComputeActivityInfeasibility(const LinearProgram& lp);
double ComputeDualValueInfeasibility(const LinearProgram& lp);
double ComputeReducedCostInfeasibility(const LinearProgram& lp);
//
// These function also set is_too_large to true if any infeasibility is
// greater than the tolerance (which depends of the coordinate).
double ComputePrimalValueInfeasibility(const LinearProgram& lp,
bool* is_too_large);
double ComputeActivityInfeasibility(const LinearProgram& lp,
bool* is_too_large);
double ComputeDualValueInfeasibility(const LinearProgram& lp,
bool* is_too_large);
double ComputeReducedCostInfeasibility(const LinearProgram& lp,
bool* is_too_large);
// Dimension of the linear program given to the last Solve().
// This is used for displaying purpose only.
@@ -232,7 +250,7 @@ class LPSolver {
// Quantities computed from the solution and the linear program.
DenseRow reduced_costs_;
DenseColumn constraint_activities_;
Fractional objective_value_with_offset_;
Fractional problem_objective_value_;
bool may_have_multiple_solutions_;
Fractional max_absolute_primal_infeasibility_;
Fractional max_absolute_dual_infeasibility_;

View File

@@ -124,10 +124,11 @@ message GlopParameters {
//
// Note that this value can temporarily increase during the execution of the
// algorithm if the estimated precision of the reduced costs is higher than
// this tolerance.
// this tolerance. Note also that we always scale the costs so that the
// maximum cost magnitude is 1.0 (in the presolve step).
//
// This is also known as the optimality tolerance in other solvers.
optional double dual_feasibility_tolerance = 11 [default = 1e-8];
optional double dual_feasibility_tolerance = 11 [default = 1e-10];
// During the primal simplex (resp. dual simplex), the coefficients of the
// direction (resp. update row) with a magnitude lower than this threshold are
@@ -188,31 +189,34 @@ message GlopParameters {
// of variables.
optional double dualizer_threshold = 21 [default = 1.5];
// When the problem status is OPTIMAL, we check the infeasibility of the
// solution and change the status to ABNORMAL_BASIS if the primal/dual max
// absolute infeasibility is greater than the solution_feasibility_tolerance
// or if the relative difference between the primal/dual objective is greater
// than the solution_objective_gap_tolerance.
// When the problem status is OPTIMAL, we check the optimality using this
// relative tolerance and change the status to IMPRECISE if an issue is
// detected.
//
// The tolerance is "relative" in the sense that our thresholds are:
// - tolerance * max(1.0, abs(bound)) for crossing a given bound.
// - tolerance * max(1.0, abs(cost)) for an infeasible reduced cost.
// - tolerance for an infeasible dual value.
optional double solution_feasibility_tolerance = 22 [default = 1e-6];
optional double solution_objective_gap_tolerance = 23 [default = 1e-6];
// If true, then when the solver returns a solution with an OPTIMAL status,
// we can guarantee that:
// - The primal variable are in their bounds.
// - The dual variable are in their bounds.
// - If we modify each component of the right-hand side by at most epsilon,
// and each component of the objective function by at most epsilon, then
// the pair (primal values, dual values) is an EXACT optimal solution of
// the perturbed problem.
// - The epsilon above is smaller than solution_feasibility_tolerance (*).
// - If we modify each component of the right-hand side a bit and each
// component of the objective function a bit, then the pair (primal values,
// dual values) is an EXACT optimal solution of the perturbed problem.
// - The modifications above are smaller than the associated tolerances as
// defined in the comment for solution_feasibility_tolerance (*).
//
// (*): This is the only place where the guarantee is not tight since we
// compute the upper bounds with scalar product of the primal/dual
// solution and the initial problem coefficients with only double precision.
//
// Note that whether or not this option is true, we still check the
// primal/dual infeasibility and objective gap as explained in the comment of
// solution_feasibility_tolerance and solution_objective_gap_tolerance.
// primal/dual infeasibility and objective gap. However if it is false, we
// don't move the primal/dual values within their bounds and leave them
// untouched.
optional bool provide_strong_optimal_guarantee = 24 [default = true];
// Threshold for LU-factorization: for stability reasons, the magnitude of the

View File

@@ -3110,20 +3110,45 @@ void ShiftVariableBoundsPreprocessor::StoreSolution(
// ScalingPreprocessor
// --------------------------------------------------------
bool ScalingPreprocessor::Run(LinearProgram* linear_program) {
RETURN_VALUE_IF_NULL(linear_program, false);
bool ScalingPreprocessor::Run(LinearProgram* lp) {
RETURN_VALUE_IF_NULL(lp, false);
if (!parameters_.use_scaling()) return false;
// Save the linear program bounds before scaling them.
const ColIndex num_cols = linear_program->num_variables();
const ColIndex num_cols = lp->num_variables();
variable_lower_bounds_.assign(num_cols, 0.0);
variable_upper_bounds_.assign(num_cols, 0.0);
for (ColIndex col(0); col < num_cols; ++col) {
variable_lower_bounds_[col] = linear_program->variable_lower_bounds()[col];
variable_upper_bounds_[col] = linear_program->variable_upper_bounds()[col];
variable_lower_bounds_[col] = lp->variable_lower_bounds()[col];
variable_upper_bounds_[col] = lp->variable_upper_bounds()[col];
}
linear_program->Scale(&scaler_);
lp->Scale(&scaler_);
// We scale the costs to always have a maximum cost magnitude of 1.0. Note
// that this makes a lot of sense since internally we use absolute tolerances.
// We don't want to have a completely different behavior just because the user
// changed the units in the objective for instance.
cost_scaling_factor_ = 0.0;
for (ColIndex col(0); col < num_cols; ++col) {
cost_scaling_factor_ = std::max(
cost_scaling_factor_, std::abs(lp->objective_coefficients()[col]));
}
VLOG(1) << "Objective stats (before objective scaling): "
<< lp->GetObjectiveStatsString();
if (cost_scaling_factor_ == 0.0) {
// This is needed for pure feasibility problems.
cost_scaling_factor_ = 1.0;
} else {
for (ColIndex col(0); col < num_cols; ++col) {
lp->SetObjectiveCoefficient(
col, lp->objective_coefficients()[col] / cost_scaling_factor_);
}
}
VLOG(1) << "Objective stats: " << lp->GetObjectiveStatsString();
lp->SetObjectiveScalingFactor(lp->objective_scaling_factor() *
cost_scaling_factor_);
lp->SetObjectiveOffset(lp->objective_offset() / cost_scaling_factor_);
return true;
}
@@ -3131,6 +3156,9 @@ void ScalingPreprocessor::StoreSolution(ProblemSolution* solution) const {
RETURN_IF_NULL(solution);
scaler_.ScaleRowVector(false, &(solution->primal_values));
scaler_.ScaleColumnVector(false, &(solution->dual_values));
for (RowIndex row(0); row < solution->dual_values.size(); ++row) {
solution->dual_values[row] *= cost_scaling_factor_;
}
// Make sure the variable are at they exact bounds according to their status.
// This just remove a really low error (about 1e-15) but allows to keep the

View File

@@ -878,6 +878,7 @@ class ScalingPreprocessor : public Preprocessor {
private:
DenseRow variable_lower_bounds_;
DenseRow variable_upper_bounds_;
Fractional cost_scaling_factor_;
SparseMatrixScaler scaler_;
DISALLOW_COPY_AND_ASSIGN(ScalingPreprocessor);

View File

@@ -353,12 +353,12 @@ void ReducedCosts::ComputeReducedCosts() {
// It is not resonable to have a dual tolerance lower than the current
// dual_residual_error, otherwise we may never terminate (This is happening on
// dfl001.mps with a high dual_feasibility_tolerance). Note that since we
// dfl001.mps with a low dual_feasibility_tolerance). Note that since we
// recompute the reduced costs with maximum precision before really exiting,
// it is fine to do a couple of iterations with a high zero tolerance.
dual_feasibility_tolerance_ = parameters_.dual_feasibility_tolerance();
if (dual_residual_error > dual_feasibility_tolerance_) {
VLOG(1) << "Changing dual_feasibility_tolerance to " << dual_residual_error;
VLOG(2) << "Changing dual_feasibility_tolerance to " << dual_residual_error;
dual_feasibility_tolerance_ = dual_residual_error;
}
}

View File

@@ -672,31 +672,39 @@ bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
}
objective_[col] = 0.0;
}
// Sets the members needed to display the objective correctly.
objective_offset_ = lp.objective_offset();
is_maximization_problem_ = lp.IsMaximizationProblem();
objective_scaling_factor_ = lp.objective_scaling_factor();
if (lp.IsMaximizationProblem()) {
objective_offset_ = -objective_offset_;
objective_scaling_factor_ = -objective_scaling_factor_;
}
return is_objective_unchanged;
}
void RevisedSimplex::InitializeObjectiveLimit(const LinearProgram& lp) {
const Fractional shifted_lower_limit =
parameters_.objective_lower_limit() - objective_offset_;
parameters_.objective_lower_limit() / lp.objective_scaling_factor() -
lp.objective_offset();
const Fractional shifted_upper_limit =
parameters_.objective_upper_limit() - objective_offset_;
parameters_.objective_upper_limit() / lp.objective_scaling_factor() -
lp.objective_offset();
if (lp.IsMaximizationProblem()) {
if (parameters_.use_dual_simplex()) {
objective_limit_ = -shifted_lower_limit *
(1.0 + parameters_.solution_objective_gap_tolerance());
(1.0 + parameters_.solution_feasibility_tolerance());
} else {
objective_limit_ = -shifted_upper_limit *
(1.0 - parameters_.solution_objective_gap_tolerance());
(1.0 - parameters_.solution_feasibility_tolerance());
}
} else {
if (parameters_.use_dual_simplex()) {
objective_limit_ = shifted_upper_limit *
(1.0 + parameters_.solution_objective_gap_tolerance());
(1.0 + parameters_.solution_feasibility_tolerance());
} else {
objective_limit_ = shifted_lower_limit *
(1.0 - parameters_.solution_objective_gap_tolerance());
(1.0 - parameters_.solution_feasibility_tolerance());
}
}
}
@@ -2588,7 +2596,7 @@ Fractional RevisedSimplex::ComputeInitialProblemObjectiveValue() const {
SCOPED_TIME_STAT(&function_stats_);
const Fractional sum = PreciseScalarProduct(
objective_, Transpose(variable_values_.GetDenseRow()));
return (is_maximization_problem_ ? -sum : sum) + objective_offset_;
return objective_scaling_factor_ * (sum + objective_offset_);
}
void RevisedSimplex::SetParameters(const GlopParameters& parameters) {

View File

@@ -271,7 +271,8 @@ class RevisedSimplex {
Fractional ComputeObjectiveValue() const;
// Returns the current objective of the linear program given to Solve() using
// the initial costs, maximization direction and objective offset.
// the initial costs, maximization direction, objective offset and objective
// scaling factor.
Fractional ComputeInitialProblemObjectiveValue() const;
// Assigns names to variables. Variables in the input will be named
@@ -564,12 +565,11 @@ class RevisedSimplex {
// Indexed by column number. Used in Phase-II.
DenseRow objective_;
// Objective offset and optimization direction of the linear program given to
// Solve(). This is used to display the correct objective values in the logs
// (see ComputeInitialProblemObjectiveValue()) and to compute the correct
// objective_limit_ (see InitializeObjectiveLimit()).
// Objective offset and scaling factor of the linear program given to Solve().
// This is used to display the correct objective values in the logs with
// ComputeInitialProblemObjectiveValue().
Fractional objective_offset_;
bool is_maximization_problem_;
Fractional objective_scaling_factor_;
// Array of values representing variable bounds. Indexed by column number.
DenseRow lower_bound_;

View File

@@ -120,6 +120,7 @@ LinearProgram::LinearProgram()
variable_table_(),
constraint_table_(),
objective_offset_(0.0),
objective_scaling_factor_(1.0),
maximize_(false),
columns_are_known_to_be_clean_(true),
transpose_matrix_is_consistent_(true),
@@ -146,6 +147,7 @@ void LinearProgram::Clear() {
maximize_ = false;
objective_offset_ = 0.0;
objective_scaling_factor_ = 1.0;
columns_are_known_to_be_clean_ = true;
transpose_matrix_is_consistent_ = true;
integer_variables_list_is_consistent_ = true;
@@ -290,6 +292,13 @@ void LinearProgram::SetObjectiveOffset(Fractional objective_offset) {
objective_offset_ = objective_offset;
}
void LinearProgram::SetObjectiveScalingFactor(
Fractional objective_scaling_factor) {
DCHECK(IsFinite(objective_scaling_factor));
DCHECK_LT(0.0, objective_scaling_factor);
objective_scaling_factor_ = objective_scaling_factor;
}
void LinearProgram::SetMaximizationProblem(bool maximize) {
maximize_ = maximize;
}
@@ -374,6 +383,25 @@ std::string LinearProgram::GetDimensionString() const {
num_entries().value());
}
std::string LinearProgram::GetObjectiveStatsString() const {
int64 num_non_zeros = 0;
Fractional min_value = +kInfinity;
Fractional max_value = -kInfinity;
for (ColIndex col(0); col < objective_coefficients_.size(); ++col) {
const Fractional value = objective_coefficients_[col];
if (value == 0) continue;
min_value = std::min(min_value, value);
max_value = std::max(max_value, value);
++num_non_zeros;
}
if (num_non_zeros == 0) {
return "No objective term. This is a pure feasibility problem.";
} else {
return StringPrintf("%lld non-zeros, range [%e, %e]", num_non_zeros,
min_value, max_value);
}
}
bool LinearProgram::IsSolutionFeasible(
const DenseRow& solution, const Fractional absolute_tolerance) const {
if (solution.size() != num_variables()) return false;
@@ -554,8 +582,10 @@ void LinearProgram::PopulateFromDual(const LinearProgram& dual,
// be a maximization problem.
SetMaximizationProblem(true);
// The objective offset is the same.
// Taking the dual does not change the offset nor the objective scaling
// factor.
SetObjectiveOffset(dual.objective_offset());
SetObjectiveScalingFactor(dual.objective_scaling_factor());
// Create the dual variables y, with bounds depending on the type
// of constraints in the primal.
@@ -700,6 +730,7 @@ void LinearProgram::PopulateNameObjectiveAndVariablesFromLinearProgram(
maximize_ = linear_program.maximize_;
objective_offset_ = linear_program.objective_offset_;
objective_scaling_factor_ = linear_program.objective_scaling_factor_;
columns_are_known_to_be_clean_ =
linear_program.columns_are_known_to_be_clean_;
name_ = linear_program.name_;
@@ -777,6 +808,8 @@ void LinearProgram::Swap(LinearProgram* linear_program) {
std::swap(maximize_, linear_program->maximize_);
std::swap(objective_offset_, linear_program->objective_offset_);
std::swap(objective_scaling_factor_,
linear_program->objective_scaling_factor_);
std::swap(columns_are_known_to_be_clean_,
linear_program->columns_are_known_to_be_clean_);
std::swap(transpose_matrix_is_consistent_,
@@ -893,6 +926,8 @@ void LinearProgram::DeleteRows(const DenseBooleanColumn& row_to_delete) {
bool LinearProgram::IsValid() const {
if (!IsFinite(objective_offset_)) return false;
if (!IsFinite(objective_scaling_factor_)) return false;
if (objective_scaling_factor_ <= 0.0) return false;
const ColIndex num_cols = num_variables();
for (ColIndex col(0); col < num_cols; ++col) {
if (!AreBoundsValid(variable_lower_bounds()[col],

View File

@@ -111,8 +111,11 @@ class LinearProgram {
// It is set to 0.0 by default.
void SetObjectiveCoefficient(ColIndex col, Fractional value);
// Defines the objective offset. It is 0.0 by default.
// Define the objective offset (0.0 by default) and scaling factor (positive
// and equal to 1.0 by default). This is mainly used for displaying purpose
// and the real objective is factor * (objective + offset).
void SetObjectiveOffset(Fractional objective_offset);
void SetObjectiveScalingFactor(Fractional objective_scaling_factor);
// Defines the optimization direction. When maximize is true (resp. false),
// the objective is maximized (resp. minimized). The default is false.
@@ -222,9 +225,11 @@ class LinearProgram {
// maximization problem.
Fractional GetObjectiveCoefficientForMinimizationVersion(ColIndex col) const;
// Returns the objective offset, i.e. value of the objective when all
// variables are set to zero.
// Returns the objective offset and scaling factor.
Fractional objective_offset() const { return objective_offset_; }
Fractional objective_scaling_factor() const {
return objective_scaling_factor_;
}
// Tests if the solution is LP-feasible within the given tolerance,
// i.e., satisfies all linear constraints within the absolute tolerance level.
@@ -237,6 +242,9 @@ class LinearProgram {
// A short std::string with the problem dimension.
std::string GetDimensionString() const;
// A short line with some stats on the objective coefficients.
std::string GetObjectiveStatsString() const;
// Returns a stringified LinearProgram. We use the LP file format used by
// lp_solve (see http://lpsolve.sourceforge.net/5.1/index.htm).
std::string Dump() const;
@@ -450,6 +458,7 @@ class LinearProgram {
// Offset of the objective, i.e. value of the objective when all variables
// are set to zero.
Fractional objective_offset_;
Fractional objective_scaling_factor_;
// Boolean true (resp. false) when the problem is a maximization
// (resp. minimization) problem.