reorganize info sharing in parallel search

This commit is contained in:
Laurent Perron
2018-12-21 13:59:58 +01:00
parent 769d9d3ff4
commit 3055dcaa50
11 changed files with 366 additions and 293 deletions

View File

@@ -1622,6 +1622,7 @@ objs/sat/integer_search.$O: ortools/sat/integer_search.cc \
ortools/glop/variable_values.h ortools/lp_data/lp_print_utils.h \
ortools/lp_data/sparse_row.h ortools/lp_data/matrix_scaler.h \
ortools/sat/cuts.h ortools/sat/linear_constraint.h \
ortools/sat/integer_expr.h ortools/sat/precedences.h \
ortools/sat/linear_constraint_manager.h ortools/sat/util.h | $(OBJ_DIR)/sat
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Sortools$Ssat$Sinteger_search.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Sinteger_search.$O
@@ -1715,7 +1716,8 @@ objs/sat/linear_programming_constraint.$O: \
ortools/sat/restart.h ortools/sat/sat_decision.h \
ortools/util/integer_pq.h ortools/util/rev.h \
ortools/util/saturated_arithmetic.h ortools/util/sorted_interval_list.h \
ortools/sat/linear_constraint.h ortools/sat/linear_constraint_manager.h \
ortools/sat/linear_constraint.h ortools/sat/integer_expr.h \
ortools/sat/precedences.h ortools/sat/linear_constraint_manager.h \
ortools/glop/preprocessor.h \
ortools/graph/strongly_connected_components.h | $(OBJ_DIR)/sat
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Sortools$Ssat$Slinear_programming_constraint.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Slinear_programming_constraint.$O

View File

@@ -1178,7 +1178,6 @@ CpSolverResponse SolveCpModelInternal(
const CpModelProto& model_proto, bool is_real_solve,
const std::function<void(const CpSolverResponse&)>&
external_solution_observer,
bool watch_objective_lower_bound,
SharedBoundsManager* shared_bounds_manager, Model* model) {
// Timing.
WallTimer wall_timer;
@@ -1401,9 +1400,15 @@ CpSolverResponse SolveCpModelInternal(
external_solution_observer(response);
};
if (watch_objective_lower_bound && model_proto.has_objective()) {
// Objective bounds reporting and sharing.
ObjectiveSynchronizationHelper* helper =
model->GetOrCreate<ObjectiveSynchronizationHelper>();
const bool worker_is_in_parallel_search = model->Get<WorkerInfo>() != nullptr;
if (!model->GetOrCreate<SatParameters>()->use_lns() &&
model_proto.has_objective()) {
// Detect sequential mode, register callbacks in that case.
if (model->Get<WorkerInfo>() == nullptr) { // Sequential mode.
if (!worker_is_in_parallel_search) {
model->GetOrCreate<WorkerInfo>()->global_timer = &wall_timer;
model->GetOrCreate<WorkerInfo>()->worker_id = 0;
auto* integer_trail = model->Get<IntegerTrail>();
@@ -1428,15 +1433,31 @@ CpSolverResponse SolveCpModelInternal(
.value());
};
ObjectiveSynchronizationHelper* helper =
model->GetOrCreate<ObjectiveSynchronizationHelper>();
helper->get_external_best_objective = std::move(get_objective_value);
helper->get_external_best_bound = std::move(get_objective_best_bound);
helper->broadcast_lower_bound = false;
}
RegisterVariableBoundsLevelZeroWatcher(
&model_proto, external_solution_observer, VLOG_IS_ON(1), objective_var,
shared_bounds_manager, model);
// Watch improved objective best bounds in regular search, or core based
// search. It should be disabled for LNS.
RegisterObjectiveBestBoundExport(model_proto, external_solution_observer,
VLOG_IS_ON(1), objective_var, model);
}
// Import objective bounds.
// TODO(user): Support bounds import in LNS and Core based search.
if (model->GetOrCreate<SatParameters>()->share_objective_bounds() &&
worker_is_in_parallel_search) {
RegisterObjectiveBoundsImport(objective_var, model);
}
// Level zero variable bounds sharing.
if (shared_bounds_manager != nullptr) {
RegisterVariableBoundsLevelZeroExport(model_proto, shared_bounds_manager,
model);
RegisterVariableBoundsLevelZeroImport(model_proto, shared_bounds_manager,
model);
}
// Load solution hint.
@@ -1606,7 +1627,6 @@ void PostsolveResponse(const CpModelProto& model_proto,
}
const CpSolverResponse postsolve_response = SolveCpModelInternal(
mapping_proto, false, [](const CpSolverResponse&) {},
/*watch_objective_lower_bound=*/false,
/*shared_bounds_manager=*/nullptr, &postsolve_model);
CHECK_EQ(postsolve_response.status(), CpSolverStatus::FEASIBLE);
@@ -1819,7 +1839,6 @@ CpSolverResponse SolveCpModelWithLNS(
} else {
response =
SolveCpModelInternal(model_proto, /*is_real_solve=*/true, observer,
/*watch_objective_lower_bound=*/false,
/*shared_bounds_manager=*/nullptr, model);
}
if (response.status() != CpSolverStatus::FEASIBLE) {
@@ -1918,7 +1937,6 @@ CpSolverResponse SolveCpModelWithLNS(
local_response = SolveCpModelInternal(
local_problem, /*is_real_solve=*/true,
[](const CpSolverResponse& response) {},
/*watch_objective_lower_bound=*/false,
/*shared_bounds_manager=*/nullptr, &local_model);
PostsolveResponse(model_proto, mapping_proto, postsolve_mapping,
&local_response);
@@ -2044,7 +2062,6 @@ CpSolverResponse SolveCpModelParallel(
stopped);
const CpSolverResponse local_response = SolveCpModelInternal(
model_proto, true, [](const CpSolverResponse& response) {},
/*watch_objective_lower_bound=*/false,
/*shared_bounds_manager=*/nullptr, &local_model);
absl::MutexLock lock(&mutex);
@@ -2141,7 +2158,8 @@ CpSolverResponse SolveCpModelParallel(
std::move(objective_synchronization);
helper->get_external_best_bound =
std::move(objective_bound_synchronization);
helper->broadcast_lower_bound = true;
helper->broadcast_lower_bound =
local_model.GetOrCreate<SatParameters>()->share_objective_bounds();
CpSolverResponse thread_response;
if (local_params.use_lns()) {
@@ -2154,7 +2172,6 @@ CpSolverResponse SolveCpModelParallel(
} else {
thread_response =
SolveCpModelInternal(model_proto, true, solution_observer,
/*watch_objective_lower_bound=*/true,
shared_bounds_manager.get(), &local_model);
}
@@ -2343,10 +2360,9 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
const int random_seed = model->GetOrCreate<SatParameters>()->random_seed();
response = SolveCpModelWithLNS(new_model, observer_function, 1, random_seed,
model);
} else {
} else { // Normal sequential run.
response = SolveCpModelInternal(new_model, /*is_real_solve=*/true,
observer_function,
/*watch_objective_lower_bound=*/true,
/*shared_bounds_manager=*/nullptr, model);
}

View File

@@ -1437,11 +1437,12 @@ void GenericLiteralWatcher::UpdateCallingNeeds(Trail* trail) {
}
}
if (trail->CurrentDecisionLevel() == 0 &&
level_zero_modified_variable_callback_ != nullptr &&
!modified_vars_.PositionsSetAtLeastOnce().empty()) {
level_zero_modified_variable_callback_(
modified_vars_.PositionsSetAtLeastOnce());
if (trail->CurrentDecisionLevel() == 0) {
const std::vector<IntegerVariable>& modified_vars =
modified_vars_.PositionsSetAtLeastOnce();
for (const auto& callback : level_zero_modified_variable_callback_) {
callback(modified_vars);
}
}
modified_vars_.ClearAndResize(integer_trail_->NumIntegerVariables());

View File

@@ -973,7 +973,7 @@ class GenericLiteralWatcher : public SatPropagator {
// cycle if we fix variables in "stages".
void RegisterLevelZeroModifiedVariablesCallback(
const std::function<void(const std::vector<IntegerVariable>&)> cb) {
level_zero_modified_variable_callback_ = cb;
level_zero_modified_variable_callback_.push_back(cb);
}
private:
@@ -1008,8 +1008,8 @@ class GenericLiteralWatcher : public SatPropagator {
std::vector<int> id_to_priority_;
std::vector<int> id_to_idempotence_;
std::function<void(const std::vector<IntegerVariable>&)>
level_zero_modified_variable_callback_ = nullptr;
std::vector<std::function<void(const std::vector<IntegerVariable>&)>>
level_zero_modified_variable_callback_;
DISALLOW_COPY_AND_ASSIGN(GenericLiteralWatcher);
};

View File

@@ -86,8 +86,10 @@ bool IntegerSumLE::Propagate() {
// unassigned enforcement literal.
if (num_unassigned_enforcement_literal > 1) return true;
// Save the current number of fixed variables.
rev_integer_value_repository_->SaveState(&rev_lb_fixed_vars_);
// Save the current sum of fixed variables.
if (is_registered_) {
rev_integer_value_repository_->SaveState(&rev_lb_fixed_vars_);
}
// Compute the new lower bound and update the reversible structures.
IntegerValue lb_unfixed_vars = IntegerValue(0);
@@ -135,11 +137,9 @@ bool IntegerSumLE::Propagate() {
for (int i = rev_num_fixed_vars_; i < num_vars; ++i) {
const IntegerVariable var = vars_[i];
const IntegerValue coeff = coeffs_[i];
const IntegerValue var_slack =
integer_trail_->UpperBound(var) - integer_trail_->LowerBound(var);
if (var_slack * coeff > slack) {
const IntegerValue div = slack / coeff;
const IntegerValue new_ub = integer_trail_->LowerBound(var) + div;
const IntegerValue div = slack / coeff;
const IntegerValue new_ub = integer_trail_->LowerBound(var) + div;
if (new_ub < integer_trail_->UpperBound(var)) {
const IntegerValue propagation_slack = (div + 1) * coeff - slack - 1;
if (!integer_trail_->Enqueue(
IntegerLiteral::LowerOrEqual(var, new_ub),
@@ -179,6 +179,7 @@ bool IntegerSumLE::Propagate() {
}
void IntegerSumLE::RegisterWith(GenericLiteralWatcher* watcher) {
is_registered_ = true;
const int id = watcher->Register(this);
for (const IntegerVariable& var : vars_) {
watcher->WatchLowerBound(var, id);

View File

@@ -77,6 +77,7 @@ class IntegerSumLE : public PropagatorInterface {
RevIntegerValueRepository* rev_integer_value_repository_;
// Reversible sum of the lower bound of the fixed variables.
bool is_registered_ = false;
IntegerValue rev_lb_fixed_vars_;
// Reversible number of fixed variables.

View File

@@ -392,7 +392,8 @@ SatSolver::Status SolveProblemWithPortfolioSearch(
const bool synchronize_objective =
solver->AssumptionLevel() == 0 && helper != nullptr &&
helper->get_external_best_objective != nullptr &&
helper->objective_var != kNoIntegerVariable;
helper->objective_var != kNoIntegerVariable &&
model->GetOrCreate<SatParameters>()->share_objective_bounds();
// Note that it is important to do the level-zero propagation if it wasn't
// already done because EnqueueDecisionAndBackjumpOnConflict() assumes that
@@ -415,21 +416,11 @@ SatSolver::Status SolveProblemWithPortfolioSearch(
policy_index = (policy_index + 1) % num_policies;
}
if (solver->CurrentDecisionLevel() == 0) {
auto* level_zero_callbacks =
model->GetOrCreate<LevelZeroCallbackHelper>();
for (const auto& cb : level_zero_callbacks->callbacks) {
if (!cb()) {
return SatSolver::INFEASIBLE;
}
}
}
// Check external objective, and restart if a better one is supplied.
// This code has to be run before the level_zero_propagate_callbacks are
// triggered, as one of them will actually import the new objective bounds.
// TODO(user): Maybe do not check this at each decision.
// TODO(user): Split the code in 2:
// - Restart part
// - Bounds application as a level_zero_callbacks.
// TODO(user): Move restart code to the restart part?
if (synchronize_objective) {
const double external_bound = helper->get_external_best_objective();
CHECK(helper->get_external_best_bound != nullptr);
@@ -452,24 +443,16 @@ SatSolver::Status SolveProblemWithPortfolioSearch(
if (!solver->RestoreSolverToAssumptionLevel()) {
return solver->UnsatStatus();
}
DCHECK_EQ(solver->CurrentDecisionLevel(), 0);
if (new_objective_upper_bound < current_objective_upper_bound &&
!integer_trail->Enqueue(
IntegerLiteral::LowerOrEqual(helper->objective_var,
new_objective_upper_bound),
{}, {})) {
}
}
if (solver->CurrentDecisionLevel() == 0) {
auto* level_zero_callbacks =
model->GetOrCreate<LevelZeroCallbackHelper>();
for (const auto& cb : level_zero_callbacks->callbacks) {
if (!cb()) {
return SatSolver::INFEASIBLE;
}
if (new_objective_lower_bound > current_objective_lower_bound &&
!integer_trail->Enqueue(
IntegerLiteral::GreaterOrEqual(helper->objective_var,
new_objective_lower_bound),
{}, {})) {
return SatSolver::INFEASIBLE;
}
if (!solver->FinishPropagation()) {
return solver->UnsatStatus();
}
}
}

View File

@@ -43,6 +43,7 @@ const double LinearProgrammingConstraint::kLpEpsilon = 1e-6;
// a constraint was added will have no effect on this class.
LinearProgrammingConstraint::LinearProgrammingConstraint(Model* model)
: sat_parameters_(*(model->GetOrCreate<SatParameters>())),
model_(model),
time_limit_(model->GetOrCreate<TimeLimit>()),
integer_trail_(model->GetOrCreate<IntegerTrail>()),
trail_(model->GetOrCreate<Trail>()),
@@ -207,9 +208,11 @@ void LinearProgrammingConstraint::RegisterWith(Model* model) {
// Registering it with the trail make sure this class is always in sync when
// it is used in the decision heuristics.
integer_trail_->RegisterReversibleClass(this);
watcher->RegisterReversibleInt(watcher_id, &rev_optimal_constraints_size_);
}
void LinearProgrammingConstraint::SetLevel(int level) {
optimal_constraints_.resize(rev_optimal_constraints_size_);
if (lp_solution_is_set_ && level < lp_solution_level_) {
lp_solution_is_set_ = false;
}
@@ -255,6 +258,11 @@ bool LinearProgrammingConstraint::IncrementalPropagate(
// TODO(user): The saved lp solution is still valid given the current variable
// bounds, so the LP optimal didn't change. However we might still want to add
// new cuts or new lazy constraints?
//
// TODO(user): Propagate the last optimal_constraint? Note that we need
// to be careful since the reversible int in IntegerSumLE are not registered.
// However, because we delete "optimalconstraints" on backtrack, we might not
// care.
return true;
}
@@ -423,24 +431,19 @@ bool LinearProgrammingConstraint::Propagate() {
// TODO(user): Maybe do a bit less computation when we cannot propagate
// anything.
const IntegerValue old_lb = integer_trail_->LowerBound(objective_cp_);
IntegerValue new_lb;
if (sat_parameters_.use_exact_lp_reason()) {
new_lb = ExactLpReasonning();
if (!ExactLpReasonning()) return false;
// A difference of 1 happens relatively often, so we just display when
// there is more. Note that when we are over the objective upper bound,
// we relax new_lb for a better reason, so we ignore this case.
if (new_lb <= kMinIntegerValue) {
VLOG(2) << "Overflow during exact LP reasoning.";
} else if (new_lb <= integer_trail_->UpperBound(objective_cp_) &&
std::abs((approximate_new_lb - new_lb).value()) > 1) {
// there is more.
const IntegerValue propagated_lb =
integer_trail_->LowerBound(objective_cp_);
if (std::abs((approximate_new_lb - propagated_lb).value()) > 1) {
VLOG(2) << "LP objective lower bound approx = " << approximate_new_lb;
VLOG(2) << " exact = " << new_lb;
VLOG(2) << " exact = " << propagated_lb;
}
} else {
FillReducedCostsReason();
new_lb = approximate_new_lb;
const double objective_cp_ub =
ToDouble(integer_trail_->UpperBound(objective_cp_));
ReducedCostStrengtheningDeductions(objective_cp_ub -
@@ -450,26 +453,26 @@ bool LinearProgrammingConstraint::Propagate() {
deductions_reason_.push_back(
integer_trail_->UpperBoundAsLiteral(objective_cp_));
}
}
// Push new objective lb.
if (old_lb < new_lb) {
const IntegerLiteral deduction =
IntegerLiteral::GreaterOrEqual(objective_cp_, new_lb);
if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
return false;
}
}
// Push reduced cost strengthening bounds.
if (!deductions_.empty()) {
const int trail_index_with_same_reason = integer_trail_->Index();
for (const IntegerLiteral deduction : deductions_) {
if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_,
trail_index_with_same_reason)) {
// Push new objective lb.
if (approximate_new_lb > integer_trail_->LowerBound(objective_cp_)) {
const IntegerLiteral deduction =
IntegerLiteral::GreaterOrEqual(objective_cp_, approximate_new_lb);
if (!integer_trail_->Enqueue(deduction, {}, integer_reason_)) {
return false;
}
}
// Push reduced cost strengthening bounds.
if (!deductions_.empty()) {
const int trail_index_with_same_reason = integer_trail_->Index();
for (const IntegerLiteral deduction : deductions_) {
if (!integer_trail_->Enqueue(deduction, {}, deductions_reason_,
trail_index_with_same_reason)) {
return false;
}
}
}
}
}
@@ -585,6 +588,28 @@ IntegerValue LinearProgrammingConstraint::GetImpliedLowerBound(
return lower_bound;
}
bool LinearProgrammingConstraint::PossibleOverflow(
const std::vector<IntegerVariable>& vars,
const std::vector<IntegerValue>& coeffs, IntegerValue ub) {
IntegerValue lower_bound(0);
const int size = vars.size();
for (int i = 0; i < size; ++i) {
const IntegerVariable var = vars[i];
const IntegerValue coeff = coeffs[i];
CHECK_NE(coeff, 0);
const IntegerValue bound = coeff > 0 ? integer_trail_->LowerBound(var)
: integer_trail_->UpperBound(var);
const int64 prod = CapProd(bound.value(), coeff.value());
if (prod == kint64min || prod == kint64max) return true;
const int64 new_lb = CapAdd(lower_bound.value(), prod);
if (new_lb == kint64min || new_lb == kint64max) return true;
lower_bound = new_lb;
}
const int64 slack = CapAdd(lower_bound.value(), -ub.value());
if (slack == kint64min || slack == kint64max) return true;
return false;
}
// TODO(user): combine this with RelaxLinearReason() to avoid the extra
// magnitude vector and the weird precondition of RelaxLinearReason().
void LinearProgrammingConstraint::SetImpliedLowerBoundReason(
@@ -728,19 +753,17 @@ bool LinearProgrammingConstraint::ComputeNewLinearConstraint(
//
// Given any INTEGER linear combination of the LP constraints, we can create a
// new integer constraint that is valid (its computation must not overflow
// though). Lets call this new_constraint. We can then always write for the
// objective linear expression:
// objective = (objective - new_constraint) + new_constraint
// And we can compute a lower bound as folllow:
// objective >= ImpliedLB(objective - new_constraint) + new_constraint_lb
// though). Lets call this "linear_combination <= ub". We can then always add to
// it the inequality "objective_terms <= objective_var", so we get:
// ImpliedLB(objective_terms + linear_combination) - ub <= objective_var.
// where ImpliedLB() is computed from the variable current bounds.
//
// Now, if we use for the linear combination and approximation of the optimal
// dual LP values (by scaling them and rounding them to integer), we will get an
// EXACT objective lower bound that is more or less the same as the inexact
// bound given by the LP relaxation. This allows to derive exact reasons for any
// propagation done by this constraint.
IntegerValue LinearProgrammingConstraint::ExactLpReasonning() {
// negated dual LP values (by scaling them and rounding them to integer), we
// will get an EXACT objective lower bound that is more or less the same as the
// inexact bound given by the LP relaxation. This allows to derive exact reasons
// for any propagation done by this constraint.
bool LinearProgrammingConstraint::ExactLpReasonning() {
// Clear old reason and deductions.
integer_reason_.clear();
deductions_.clear();
@@ -757,120 +780,59 @@ IntegerValue LinearProgrammingConstraint::ExactLpReasonning() {
Fractional scaling;
gtl::ITIVector<ColIndex, IntegerValue> reduced_costs;
IntegerValue negative_lb;
IntegerValue rc_ub;
if (!ComputeNewLinearConstraint(/*take_objective_into_account=*/true,
lp_multipliers, &scaling, &reduced_costs,
&negative_lb)) {
return kMinIntegerValue; // Overflow.
&rc_ub)) {
VLOG(2) << "Overflow during exact LP reasoning.";
return true;
}
// The "objective constraint" behave like if the unscaled cp multiplier was
// 1.0, so we will multiply it by this number and add it to reduced_costs.
const IntegerValue obj_scale(std::round(scaling));
if (obj_scale == 0) {
return kMinIntegerValue; // Overflow.
VLOG(2) << "Overflow during exact LP reasoning.";
return true;
}
if (!AddLinearExpressionMultiple(obj_scale, integer_objective_,
&reduced_costs)) {
return kMinIntegerValue; // Overflow.
VLOG(2) << "Overflow during exact LP reasoning.";
return true;
}
// TODO(user): We could correct little imprecision by heuristically computing
// for each row the best multiple to improve the scaled_objective_lb below
// while keeping the reduced_costs of the same sign. This should improve the
// while keeping the coefficient of the same sign. This should improve the
// objective lower bound.
// Compute the objective lower bound, and the reason for it.
const LinearExpression new_constraint =
GetSparseRepresentation(reduced_costs);
const IntegerValue lb = GetImpliedLowerBound(new_constraint);
if (lb == kMinIntegerValue) return kMinIntegerValue; // Overflow.
const IntegerValue scaled_objective_lb(
CapAdd(lb.value(), -negative_lb.value()));
if (scaled_objective_lb == kint64min || scaled_objective_lb == kint64max) {
return kMinIntegerValue;
}
const IntegerValue objective_ub = integer_trail_->UpperBound(objective_cp_);
const IntegerValue scaled_objective_ub(
CapProd(objective_ub.value(), obj_scale.value()));
if (scaled_objective_ub == kint64min || scaled_objective_ub == kint64max) {
return kMinIntegerValue; // Overflow.
}
IntegerValue exact_objective_lb(CeilRatio(scaled_objective_lb, obj_scale));
if (exact_objective_lb > objective_ub) {
// We will have a conflict, so we can can relax more!
exact_objective_lb = objective_ub + 1;
} else {
// Reduced cost strenghtening.
//
// Remark: This is nothing else than basic bound propagation of the
// new_constraint with the given feasibility_slack between its implied lower
// bound and its upper bound.
IntegerValue explanation_slack = kMaxIntegerValue;
const IntegerValue feasibility_slack = IntegerValue(
CapSub(scaled_objective_ub.value(), scaled_objective_lb.value()));
CHECK_GE(feasibility_slack, 0);
if (feasibility_slack != kint64max) {
for (const auto& term : new_constraint) {
const IntegerVariable var = integer_variables_[term.first.value()];
const IntegerValue coeff = term.second;
CHECK_NE(coeff, 0);
// Any change by more than this will make scaled_objective_lb go past
// the objective upper bound
const IntegerValue allowed_change(FloorRatio(
feasibility_slack, IntegerValue(std::abs(coeff.value()))));
CHECK_GE(allowed_change, 0);
if (coeff > 0) {
const IntegerValue new_ub =
integer_trail_->LowerBound(var) + allowed_change;
if (new_ub < integer_trail_->UpperBound(var)) {
explanation_slack =
std::min(explanation_slack,
(allowed_change + 1) * coeff - feasibility_slack - 1);
deductions_.push_back(IntegerLiteral::LowerOrEqual(var, new_ub));
}
} else { // coeff < 0
const IntegerValue new_lb =
integer_trail_->UpperBound(var) - allowed_change;
if (new_lb > integer_trail_->LowerBound(var)) {
explanation_slack =
std::min(explanation_slack,
(allowed_change + 1) * -coeff - feasibility_slack - 1);
deductions_.push_back(IntegerLiteral::GreaterOrEqual(var, new_lb));
}
}
}
}
if (!deductions_.empty()) {
// TODO(user): Instead of taking explanation_slack as the min of the slack
// of all deductions, we could use different reason for each push instead.
// Experiment! Maybe there is some tradeoff depending on the number of
// push.
//
// TODO(user): The individual reason are even smaller because we can
// ignore the term corresponding to the variable we push.
//
// TODO(user): The proper fix might be to add a lazy reason code
// that can reconstruct the relaxed reason on demand from a base one.
// So we have better reason, and not more work at propagation time.
// Also, this code should be shared with the one in IntegerSumLE since
// they are the same, and it will facilitate unit-testing.
SetImpliedLowerBoundReason(new_constraint, explanation_slack);
deductions_reason_ = integer_reason_;
deductions_reason_.push_back(
integer_trail_->UpperBoundAsLiteral(objective_cp_));
// Create the IntegerSumLE that will allow to propagate the objective and more
// generally do the reduced cost fixing.
//
// TODO(user): Make sure there cannot be any overflow if we want to reuse the
// constraint for different lower-bounds of the variables later.
std::vector<IntegerVariable> vars;
std::vector<IntegerValue> coeffs;
for (ColIndex col(0); col < reduced_costs.size(); ++col) {
if (reduced_costs[col] != 0) {
vars.push_back(integer_variables_[col.value()]);
coeffs.push_back(reduced_costs[col]);
}
}
vars.push_back(objective_cp_);
coeffs.push_back(-obj_scale);
// Relax the lower bound reason.
const IntegerValue min_value = (exact_objective_lb - 1) * obj_scale + 1;
const IntegerValue slack = scaled_objective_lb - min_value;
SetImpliedLowerBoundReason(new_constraint, slack);
return exact_objective_lb;
// Check for possible overflow in IntegerSumLE::Propagate().
if (PossibleOverflow(vars, coeffs, rc_ub)) {
VLOG(2) << "Overflow during exact LP reasoning.";
return true;
}
IntegerSumLE* cp_constraint =
new IntegerSumLE({}, vars, coeffs, rc_ub, model_);
optimal_constraints_.emplace_back(cp_constraint);
rev_optimal_constraints_size_ = optimal_constraints_.size();
return cp_constraint->Propagate();
}
bool LinearProgrammingConstraint::FillExactDualRayReason() {

View File

@@ -25,6 +25,7 @@
#include "ortools/lp_data/matrix_scaler.h"
#include "ortools/sat/cuts.h"
#include "ortools/sat/integer.h"
#include "ortools/sat/integer_expr.h"
#include "ortools/sat/linear_constraint.h"
#include "ortools/sat/linear_constraint_manager.h"
#include "ortools/sat/model.h"
@@ -168,10 +169,8 @@ class LinearProgrammingConstraint : public PropagatorInterface,
// Use the dual optimal lp values to compute an EXACT lower bound on the
// objective. Fills its reason and perform reduced cost strenghtening.
// Returns the exact objective lower bound.
//
// TODO(user): Split into subfunctions.
IntegerValue ExactLpReasonning();
// Returns false in case of conflict.
bool ExactLpReasonning();
// Same as FillReducedCostReason() but for the case of a DUAL_UNBOUNDED
// problem. This exploit the dual ray as a reason for the primal infeasiblity.
@@ -205,6 +204,12 @@ class LinearProgrammingConstraint : public PropagatorInterface,
// current variable bound. Return kMinIntegerValue in case of overflow.
IntegerValue GetImpliedLowerBound(const LinearExpression& terms) const;
// Tests for possible overflow in the propagation of the given linear
// constraint.
bool PossibleOverflow(const std::vector<IntegerVariable>& vars,
const std::vector<IntegerValue>& coeffs,
IntegerValue ub);
// Fills integer_reason_ with the reason for the implied lower bound of the
// given linear expression. We relax the reason if we have some slack.
void SetImpliedLowerBoundReason(const LinearExpression& terms,
@@ -270,6 +275,7 @@ class LinearProgrammingConstraint : public PropagatorInterface,
// Singletons from Model.
const SatParameters& sat_parameters_;
Model* model_;
TimeLimit* time_limit_;
IntegerTrail* integer_trail_;
Trail* trail_;
@@ -284,6 +290,14 @@ class LinearProgrammingConstraint : public PropagatorInterface,
std::vector<IntegerLiteral> deductions_;
std::vector<IntegerLiteral> deductions_reason_;
// Repository of IntegerSumLE that needs to be kept around for the lazy
// reasons. Those are new integer constraint that are created each time we
// solve the LP to a dual-feasible solution. Propagating these constraints
// both improve the objective lower bound but also perform reduced cost
// fixing.
int rev_optimal_constraints_size_ = 0;
std::vector<std::unique_ptr<IntegerSumLE>> optimal_constraints_;
// Last OPTIMAL solution found by a call to the underlying LP solver.
// On IncrementalPropagate(), if the bound updates do not invalidate this
// solution, Propagate() will not find domain reductions, no need to call it.

View File

@@ -74,7 +74,7 @@ void SharedBoundsManager::ReportPotentialNewBounds(
}
}
if (fixed_domains > 0 || modified_domains > 0) {
VLOG(1) << "Worker " << worker_id << ": fixed domains=" << fixed_domains
VLOG(2) << "Worker " << worker_id << ": fixed domains=" << fixed_domains
<< ", modified domains=" << modified_domains << " out of "
<< variables.size() << " events";
}
@@ -102,108 +102,68 @@ void SharedBoundsManager::GetChangedBounds(
}
}
void RegisterVariableBoundsLevelZeroWatcher(
const CpModelProto* model_proto,
const std::function<void(const CpSolverResponse&)>&
external_solution_observer,
bool log_progress, IntegerVariable objective_var,
SharedBoundsManager* shared_bounds_manager, Model* model) {
const bool share_objective =
model->GetOrCreate<SatParameters>()->share_objective_bounds();
const auto broadcast_lower_bound =
[model_proto, external_solution_observer, objective_var, model,
shared_bounds_manager, log_progress,
share_objective](const std::vector<IntegerVariable>& modified_vars) {
void RegisterVariableBoundsLevelZeroExport(
const CpModelProto& model_proto, SharedBoundsManager* shared_bounds_manager,
Model* model) {
CHECK(shared_bounds_manager != nullptr);
const auto broadcast_level_zero_bounds =
[&model_proto, model, shared_bounds_manager](
const std::vector<IntegerVariable>& modified_vars) {
auto* integer_trail = model->Get<IntegerTrail>();
const WorkerInfo* const worker_info = model->GetOrCreate<WorkerInfo>();
CpModelMapping* const mapping = model->GetOrCreate<CpModelMapping>();
// Share bounds of modified variables.
if (shared_bounds_manager != nullptr) {
std::vector<int> model_variables;
std::vector<int64> new_lower_bounds;
std::vector<int64> new_upper_bounds;
absl::flat_hash_set<int> visited_variables;
for (const IntegerVariable& var : modified_vars) {
const IntegerVariable positive_var = PositiveVariable(var);
const int model_var =
mapping->GetProtoVariableFromIntegerVariable(positive_var);
if (model_var == -1 ||
gtl::ContainsKey(visited_variables, model_var)) {
continue;
} else {
visited_variables.insert(model_var);
}
const IntegerVariableProto& var_proto =
model_proto->variables(model_var);
const int64 new_lb =
integer_trail->LevelZeroLowerBound(positive_var).value();
const int64 new_ub =
integer_trail->LevelZeroUpperBound(positive_var).value();
// TODO(user): We could imagine an API based on atomic<int64>
// that could preemptively check if this new bounds are improving.
model_variables.push_back(model_var);
new_lower_bounds.push_back(new_lb);
new_upper_bounds.push_back(new_ub);
if (!var_proto.name().empty()) {
VLOG(2) << worker_info->worker_name << " write "
<< var_proto.name() << "(" << model_var << ")[" << new_lb
<< ", " << new_ub << "]";
} else {
VLOG(2) << worker_info->worker_name << " write anonymous_var("
<< model_var << ")[" << new_lb << ", " << new_ub << "]";
}
std::vector<int> model_variables;
std::vector<int64> new_lower_bounds;
std::vector<int64> new_upper_bounds;
absl::flat_hash_set<int> visited_variables;
for (const IntegerVariable& var : modified_vars) {
const IntegerVariable positive_var = PositiveVariable(var);
const int model_var =
mapping->GetProtoVariableFromIntegerVariable(positive_var);
if (model_var == -1 ||
gtl::ContainsKey(visited_variables, model_var)) {
continue;
} else {
visited_variables.insert(model_var);
}
if (!model_variables.empty()) {
shared_bounds_manager->ReportPotentialNewBounds(
worker_info->worker_id, model_variables, new_lower_bounds,
new_upper_bounds);
const IntegerVariableProto& var_proto =
model_proto.variables(model_var);
const int64 new_lb =
integer_trail->LevelZeroLowerBound(positive_var).value();
const int64 new_ub =
integer_trail->LevelZeroUpperBound(positive_var).value();
// TODO(user): We could imagine an API based on atomic<int64>
// that could preemptively check if this new bounds are improving.
model_variables.push_back(model_var);
new_lower_bounds.push_back(new_lb);
new_upper_bounds.push_back(new_ub);
if (!var_proto.name().empty()) {
VLOG(2) << worker_info->worker_name << " write " << var_proto.name()
<< "(" << model_var << ")[" << new_lb << ", " << new_ub
<< "]";
} else {
VLOG(2) << worker_info->worker_name << " write anonymous_var("
<< model_var << ")[" << new_lb << ", " << new_ub << "]";
}
}
if (share_objective) {
const ObjectiveSynchronizationHelper* const helper =
model->GetOrCreate<ObjectiveSynchronizationHelper>();
const CpObjectiveProto& obj = model_proto->objective();
const double new_best_bound = ScaleObjectiveValue(
obj, integer_trail->LevelZeroLowerBound(objective_var).value());
const double current_best_bound = helper->get_external_best_bound();
const double current_objective_value =
helper->get_external_best_objective();
// TODO(user): Unit test this lambda.
if ((helper->scaling_factor >= 0 && // Unset -> = 0.0 -> minimize.
new_best_bound > current_best_bound) ||
(helper->scaling_factor < 0 &&
new_best_bound < current_best_bound)) {
if (log_progress) {
if (new_best_bound > current_best_bound) { // minimization.
LogNewSolution("ObjLb", worker_info->global_timer->Get(),
new_best_bound, current_objective_value,
worker_info->worker_name);
} else {
LogNewSolution("ObjUb", worker_info->global_timer->Get(),
current_objective_value, new_best_bound,
worker_info->worker_name);
}
}
if (helper->broadcast_lower_bound) {
// Broadcast a best bound improving solution.
CpSolverResponse lb_response;
lb_response.set_status(CpSolverStatus::UNKNOWN);
lb_response.set_objective_value(current_objective_value);
lb_response.set_best_objective_bound(new_best_bound);
external_solution_observer(lb_response);
}
}
if (!model_variables.empty()) {
shared_bounds_manager->ReportPotentialNewBounds(
worker_info->worker_id, model_variables, new_lower_bounds,
new_upper_bounds);
}
};
model->GetOrCreate<GenericLiteralWatcher>()
->RegisterLevelZeroModifiedVariablesCallback(broadcast_lower_bound);
->RegisterLevelZeroModifiedVariablesCallback(broadcast_level_zero_bounds);
}
if (shared_bounds_manager == nullptr) return;
const auto& import_lower_bounds = [model_proto, shared_bounds_manager,
void RegisterVariableBoundsLevelZeroImport(
const CpModelProto& model_proto, SharedBoundsManager* shared_bounds_manager,
Model* model) {
CHECK(shared_bounds_manager != nullptr);
const auto& import_lower_bounds = [&model_proto, shared_bounds_manager,
model]() {
auto* integer_trail = model->GetOrCreate<IntegerTrail>();
const WorkerInfo* const worker_info = model->GetOrCreate<WorkerInfo>();
@@ -222,7 +182,7 @@ void RegisterVariableBoundsLevelZeroWatcher(
const IntegerValue new_lb(new_lower_bounds[i]);
const IntegerValue new_ub(new_upper_bounds[i]);
VLOG(2) << worker_info->worker_name << " read "
<< model_proto->variables(model_variables[i]).name() << "["
<< model_proto.variables(model_variables[i]).name() << "["
<< new_lb << ", " << new_ub << "]";
if (!integer_trail->Enqueue(IntegerLiteral::GreaterOrEqual(var, new_lb),
{}, {})) {
@@ -242,5 +202,114 @@ void RegisterVariableBoundsLevelZeroWatcher(
import_lower_bounds);
}
void RegisterObjectiveBestBoundExport(
const CpModelProto& model_proto,
const std::function<void(const CpSolverResponse&)>&
external_solution_observer,
bool log_progress, IntegerVariable objective_var, Model* model) {
const auto broadcast_objective_lower_bound =
[&model_proto, external_solution_observer, objective_var, model,
log_progress](const std::vector<IntegerVariable>& modified_vars) {
auto* integer_trail = model->Get<IntegerTrail>();
const WorkerInfo* const worker_info = model->GetOrCreate<WorkerInfo>();
const ObjectiveSynchronizationHelper* const helper =
model->GetOrCreate<ObjectiveSynchronizationHelper>();
const CpObjectiveProto& obj = model_proto.objective();
const double new_best_bound = ScaleObjectiveValue(
obj, integer_trail->LevelZeroLowerBound(objective_var).value());
const double current_best_bound = helper->get_external_best_bound();
const double current_objective_value =
helper->get_external_best_objective();
// TODO(user): Unit test this lambda.
if ((helper->scaling_factor >= 0 && // Unset -> = 0.0 -> minimize.
new_best_bound > current_best_bound) ||
(helper->scaling_factor < 0 &&
new_best_bound < current_best_bound)) {
if (log_progress) {
if (new_best_bound > current_best_bound) { // minimization.
LogNewSolution("ObjLb", worker_info->global_timer->Get(),
new_best_bound, current_objective_value,
worker_info->worker_name);
} else {
LogNewSolution("ObjUb", worker_info->global_timer->Get(),
current_objective_value, new_best_bound,
worker_info->worker_name);
}
}
if (helper->broadcast_lower_bound) {
// Broadcast a best bound improving solution.
CpSolverResponse lb_response;
lb_response.set_status(CpSolverStatus::UNKNOWN);
lb_response.set_objective_value(current_objective_value);
lb_response.set_best_objective_bound(new_best_bound);
external_solution_observer(lb_response);
}
}
};
model->GetOrCreate<GenericLiteralWatcher>()
->RegisterLevelZeroModifiedVariablesCallback(
broadcast_objective_lower_bound);
}
void RegisterObjectiveBoundsImport(IntegerVariable objective_var,
Model* model) {
const auto import_objective_bounds = [objective_var, model]() {
SatSolver* const solver = model->GetOrCreate<SatSolver>();
if (solver->AssumptionLevel() != 0) return true;
const WorkerInfo* const worker_info = model->GetOrCreate<WorkerInfo>();
const ObjectiveSynchronizationHelper* const helper =
model->GetOrCreate<ObjectiveSynchronizationHelper>();
CHECK(helper->get_external_best_bound != nullptr);
const double external_bound = helper->get_external_best_objective();
const double external_best_bound = helper->get_external_best_bound();
IntegerTrail* const integer_trail = model->GetOrCreate<IntegerTrail>();
const IntegerValue current_objective_upper_bound(
integer_trail->UpperBound(helper->objective_var));
const IntegerValue current_objective_lower_bound(
integer_trail->LowerBound(helper->objective_var));
const IntegerValue new_objective_upper_bound(
std::isfinite(external_bound)
? helper->UnscaledObjective(external_bound) - 1
: current_objective_upper_bound.value());
const IntegerValue new_objective_lower_bound(
std::isfinite(external_best_bound)
? helper->UnscaledObjective(external_best_bound)
: current_objective_lower_bound.value());
if (new_objective_upper_bound < current_objective_upper_bound ||
new_objective_lower_bound > current_objective_lower_bound) {
VLOG(1) << worker_info->worker_name << " imports objective bounds ["
<< std::max(new_objective_lower_bound.value(),
current_objective_lower_bound.value())
<< ", "
<< std::min(new_objective_upper_bound.value(),
current_objective_upper_bound.value())
<< "]";
}
if (new_objective_upper_bound < current_objective_upper_bound &&
!integer_trail->Enqueue(
IntegerLiteral::LowerOrEqual(helper->objective_var,
new_objective_upper_bound),
{}, {})) {
return false;
}
if (new_objective_lower_bound > current_objective_lower_bound &&
!integer_trail->Enqueue(
IntegerLiteral::GreaterOrEqual(helper->objective_var,
new_objective_lower_bound),
{}, {})) {
return false;
}
if (!solver->FinishPropagation()) {
return false;
}
return true;
};
model->GetOrCreate<LevelZeroCallbackHelper>()->callbacks.push_back(
import_objective_bounds);
}
} // namespace sat
} // namespace operations_research

View File

@@ -58,12 +58,36 @@ class SharedBoundsManager {
absl::Mutex mutex_;
};
void RegisterVariableBoundsLevelZeroWatcher(
const CpModelProto* model_proto,
// Registers a callback to import new variables bounds stored in the
// shared_bounds_manager. These bounds are imported at level 0 of the search
// in the linear scan minimize function.
void RegisterVariableBoundsLevelZeroImport(
const CpModelProto& model_proto, SharedBoundsManager* shared_bounds_manager,
Model* model);
// Registers a callback that will export variables bounds fixed at level 0 of
// the search. This should not be registered to a LNS search.
void RegisterVariableBoundsLevelZeroExport(
const CpModelProto& model_proto, SharedBoundsManager* shared_bounds_manager,
Model* model);
// Registers a callback to import new objective bounds. It will use callbacks
// stored in the ObjectiveSynchronizationHelper to query external objective
// bounds.
//
// Currently, standard search works fine with it.
// LNS search and Core based search do not support it
void RegisterObjectiveBoundsImport(IntegerVariable objective_var, Model* model);
// Registers a callback that will report improving objective best bound.
// If synchronization_helper->broadcast_lower_bound is true, it will create a
// fake CpSolverResponse (status UNKNOWN, new best bound, best know objective
// value) and call the external_solution_observer with that response.
void RegisterObjectiveBestBoundExport(
const CpModelProto& model_proto,
const std::function<void(const CpSolverResponse&)>&
external_solution_observer,
bool log_progress, IntegerVariable objective_var,
SharedBoundsManager* shared_bounds_manager, Model* model);
bool log_progress, IntegerVariable objective_var, Model* model);
// Stores information on the worker in the parallel context.
struct WorkerInfo {