lot of work on sat/maxsat

This commit is contained in:
lperron@google.com
2014-06-11 20:11:19 +00:00
parent b892531843
commit a7055d7c16
10 changed files with 1347 additions and 230 deletions

View File

@@ -219,12 +219,23 @@ std::string LinearBooleanProblemToCnfString(const LinearBooleanProblem& problem)
int64 hard_weigth = 1;
if (is_wcnf) {
int i = 0;
for (const int64 weight : objective.coefficients()) {
CHECK_GT(weight, 0); // Assumption.
for (int64 weight : objective.coefficients()) {
CHECK_NE(weight, 0);
int signed_literal = objective.literals(i);
// There is no direct support for an objective offset in the wcnf format.
// So this is not a perfect translation of the objective. It is however
// possible to achieve the same effect by adding a new variable x, and two
// soft clauses: x with weight offset, and -x with weight offset.
//
// TODO(user): implement this trick.
if (weight < 0) {
signed_literal = -signed_literal;
weight = -weight;
}
literal_to_weight[objective.literals(i)] = weight;
if (Literal(objective.literals(i)).Variable() < first_slack_variable) {
non_slack_objective.push_back(
std::make_pair(objective.literals(i), weight));
if (Literal(signed_literal).Variable() < first_slack_variable) {
non_slack_objective.push_back(std::make_pair(signed_literal, weight));
}
hard_weigth += weight;
++i;
@@ -447,6 +458,21 @@ Graph* GenerateGraphForSymmetryDetection(
}
void MakeAllLiteralsPositive(LinearBooleanProblem* problem) {
// Objective.
LinearObjective* mutable_objective = problem->mutable_objective();
int64 objective_offset = 0;
for (int i = 0; i < mutable_objective->literals_size(); ++i) {
const int signed_literal = mutable_objective->literals(i);
if (signed_literal < 0) {
const int64 coefficient = mutable_objective->coefficients(i);
mutable_objective->set_literals(i, -signed_literal);
mutable_objective->set_coefficients(i, -coefficient);
objective_offset += coefficient;
}
}
mutable_objective->set_offset(mutable_objective->offset() + objective_offset);
// Constraints.
for (LinearBooleanConstraint& constraint :
*(problem->mutable_constraints())) {
int64 sum = 0;

View File

@@ -70,8 +70,6 @@ void ExtractSubproblem(const LinearBooleanProblem& problem,
// Modifies the given LinearBooleanProblem so that all the literals appearing
// inside are positive.
//
// TODO(user): Also do that on the objective literals + tests.
void MakeAllLiteralsPositive(LinearBooleanProblem* problem);
// Returns a list of generators of the symmetry group of the given problem. Each

View File

@@ -46,6 +46,163 @@ std::string CnfObjectiveLine(const LinearBooleanProblem& problem,
return StringPrintf("o %lld", static_cast<int64>(scaled_objective));
}
struct LiteralWithCoreIndex {
LiteralWithCoreIndex(Literal l, int i) : literal(l), core_index(i) {}
Literal literal;
int core_index;
};
// Deletes the given indices from a vector. The given indices must be sorted in
// increasing order. The order of the non-deleted entries in the vector is
// preserved.
template <typename Vector>
void DeleteVectorIndices(const std::vector<int> indices, Vector* v) {
int new_size = 0;
int indices_index = 0;
for (int i = 0; i < v->size(); ++i) {
if (indices_index < indices.size() && i == indices[indices_index]) {
++indices_index;
} else {
(*v)[new_size] = (*v)[i];
++new_size;
}
}
v->resize(new_size);
}
// In the Fu & Malik algorithm (or in WPM1), when two cores overlap, we
// artifically introduce symmetries. More precisely:
//
// The picture below shows two cores with index 0 and 1, with one blocking
// variable per '-' and with the variables ordered from left to right (by their
// assumptions index). The blocking variables will be the one added to "relax"
// the core for the next iteration.
//
// 1: -------------------------------
// 0: ------------------------------------
//
// The 2 following assignment of the blocking variables are equivalent.
// Remember that exactly one blocking variable per core must be assigned to 1.
//
// 1: ----------------------1--------
// 0: --------1---------------------------
//
// and
//
// 1: ---------------------------1---
// 0: ---1--------------------------------
//
// This class allows to add binary constraints excluding the second possibility.
// Basically, each time a new core is added, if two of its blocking variables
// (b1, b2) have the same assumption index of two blocking variables from
// another core (c1, c2), then we forbid the assignment c1 true and b2 true.
//
// Reference: C Ansótegui, ML Bonet, J Levy, "Sat-based maxsat algorithms",
// Artificial Intelligence, 2013 - Elsevier.
class FuMalikSymmetryBreaker {
public:
FuMalikSymmetryBreaker() {}
// Must be called before a new core is processed.
void StartResolvingNewCore(int new_core_index) {
literal_by_core_.resize(new_core_index);
for (int i = 0; i < new_core_index; ++i) {
literal_by_core_[i].clear();
}
}
// This should be called for each blocking literal b of the new core. The
// assumption_index identify the soft clause associated to the given blocking
// literal. Note that between two StartResolvingNewCore() calls,
// ProcessLiteral() is assumed to be called with different assumption_index.
//
// Changing the order of the calls will not change the correctness, but will
// change the symmetry-breaking clause produced.
//
// Returns a set of literals which can't be true at the same time as b (under
// symmetry breaking).
std::vector<Literal> ProcessLiteral(int assumption_index, Literal b) {
if (assumption_index >= info_by_assumption_index_.size()) {
info_by_assumption_index_.resize(assumption_index + 1);
}
// Compute the function result.
// info_by_assumption_index_[assumption_index] will contain all the pairs
// (blocking_literal, core) of the previous resolved cores at the same
// assumption index as b.
std::vector<Literal> result;
for (LiteralWithCoreIndex data :
info_by_assumption_index_[assumption_index]) {
// literal_by_core_ will contain all the blocking literal of a given core
// with an assumption_index that was used in one of the ProcessLiteral()
// calls since the last StartResolvingNewCore().
//
// Note that there can be only one such literal by core, so we will not
// add duplicates.
result.insert(result.end(), literal_by_core_[data.core_index].begin(),
literal_by_core_[data.core_index].end());
}
// Update the internal data structure.
for (LiteralWithCoreIndex data :
info_by_assumption_index_[assumption_index]) {
literal_by_core_[data.core_index].push_back(data.literal);
}
info_by_assumption_index_[assumption_index].push_back(
LiteralWithCoreIndex(b, literal_by_core_.size()));
return result;
}
// Deletes the given assumption indices.
void DeleteIndices(const std::vector<int>& indices) {
DeleteVectorIndices(indices, &info_by_assumption_index_);
}
// This is only used in WPM1 to forget all the information related to a given
// assumption_index.
void ClearInfo(int assumption_index) {
CHECK_LE(assumption_index, info_by_assumption_index_.size());
info_by_assumption_index_[assumption_index].clear();
}
// This is only used in WPM1 when a new assumption_index is created.
void AddInfo(int assumption_index, Literal b) {
CHECK_GE(assumption_index, info_by_assumption_index_.size());
info_by_assumption_index_.resize(assumption_index + 1);
info_by_assumption_index_[assumption_index].push_back(
LiteralWithCoreIndex(b, literal_by_core_.size()));
}
private:
std::vector<std::vector<LiteralWithCoreIndex>> info_by_assumption_index_;
std::vector<std::vector<Literal>> literal_by_core_;
DISALLOW_COPY_AND_ASSIGN(FuMalikSymmetryBreaker);
};
// Minimize the given UNSAT core with a really simple heuristic.
// The idea is to remove literals that are consequences of others in the core.
// We already know that in the initial order, no literal is propagated by the
// one before it, so we just look for propagation in the reverse order.
void MinimizeCore(SatSolver* solver, std::vector<Literal>* core) {
std::vector<Literal> temp = *core;
reverse(temp.begin(), temp.end());
solver->Backtrack(0);
// Note that this Solve() is really fast, since the solver should detect that
// the assumptions are unsat with unit propagation only. This is just a
// convenient way to remove assumptions that are propagated by the one before
// them.
CHECK_EQ(solver->ResetAndSolveWithGivenAssumptions(temp),
SatSolver::ASSUMPTIONS_UNSAT);
temp = solver->GetLastIncompatibleDecisions();
if (temp.size() < core->size()) {
LOG(INFO) << "old core size " << core->size();
std::reverse(temp.begin(), temp.end());
*core = temp;
}
}
} // namespace
// This algorithm works by exploiting the unsat core returned by the SAT solver
@@ -53,10 +210,11 @@ std::string CnfObjectiveLine(const LinearBooleanProblem& problem,
// where all the objective variables are set to their value with minimal cost,
// and relax in each step some of these fixed variables until the problem
// becomes satisfiable.
SatSolver::Status SolveWithFuMalik(const LinearBooleanProblem& problem,
SatSolver* solver, std::vector<bool>* solution,
LogBehavior log) {
SatSolver::Status SolveWithFuMalik(LogBehavior log,
const LinearBooleanProblem& problem,
SatSolver* solver, std::vector<bool>* solution) {
Logger logger(log);
FuMalikSymmetryBreaker symmetry;
CHECK_EQ(problem.type(), LinearBooleanProblem::MINIMIZATION);
// blocking_clauses will contains a set of clauses that are currently added to
@@ -84,15 +242,15 @@ SatSolver::Status SolveWithFuMalik(const LinearBooleanProblem& problem,
// Initialize blocking_clauses and assumptions.
const LinearObjective& objective = problem.objective();
CHECK_GT(objective.coefficients_size(), 0);
const Coefficient unique_objective_coeff(objective.coefficients(0));
const Coefficient unique_objective_coeff(std::abs(objective.coefficients(0)));
for (int i = 0; i < objective.literals_size(); ++i) {
CHECK_EQ(objective.coefficients(i), unique_objective_coeff)
CHECK_EQ(std::abs(objective.coefficients(i)), unique_objective_coeff)
<< "The basic Fu & Malik algorithm needs constant objective coeffs.";
const Literal literal(objective.literals(i));
// We want to minimize the cost when this literal is true.
const Literal min_literal =
unique_objective_coeff > 0 ? literal.Negated() : literal;
objective.coefficients(i) > 0 ? literal.Negated() : literal;
blocking_clauses.push_back(std::vector<Literal>(1, min_literal));
// Note that initialy, we do not create any extra variables.
@@ -100,7 +258,9 @@ SatSolver::Status SolveWithFuMalik(const LinearBooleanProblem& problem,
}
// Print the number of variable with a non-zero cost.
logger.Log(StringPrintf("c #weigths:%zu", assumptions.size()));
logger.Log(StringPrintf("c #weigths:%zu #vars:%d #constraints:%d",
assumptions.size(), problem.num_variables(),
problem.constraints_size()));
// Starts the algorithm. Each loop will solve the problem under the given
// assumptions, and if unsat, will relax exactly one of the objective
@@ -124,7 +284,8 @@ SatSolver::Status SolveWithFuMalik(const LinearBooleanProblem& problem,
// variable appearing in the core. Moreover, we will only relax as little
// as possible (to not miss the optimal), so we will enforce that the sum
// of the b_i is exactly one.
const std::vector<Literal> core = solver->GetLastIncompatibleDecisions();
std::vector<Literal> core = solver->GetLastIncompatibleDecisions();
MinimizeCore(solver, &core);
solver->Backtrack(0);
// Print the search progress.
@@ -139,9 +300,7 @@ SatSolver::Status SolveWithFuMalik(const LinearBooleanProblem& problem,
assumptions.begin();
CHECK_LT(index, assumptions.size());
// Fix it. We also fix all the associated blocking variables if any. Note
// that if blocking_clauses[index] contains just one variable, it will be
// fixed twice, but that is ok.
// Fix it. We also fix all the associated blocking variables if any.
if (!solver->AddUnitClause(core[0].Negated()))
return SatSolver::MODEL_UNSAT;
for (Literal b : blocking_clauses[index]) {
@@ -149,12 +308,23 @@ SatSolver::Status SolveWithFuMalik(const LinearBooleanProblem& problem,
}
// Erase this entry from the current "objective"
assumptions.erase(assumptions.begin() + index);
blocking_clauses.erase(blocking_clauses.begin() + index);
std::vector<int> to_delete(1, index);
DeleteVectorIndices(to_delete, &assumptions);
DeleteVectorIndices(to_delete, &blocking_clauses);
symmetry.DeleteIndices(to_delete);
} else {
symmetry.StartResolvingNewCore(iter);
// We will add 2 * |core.size()| variables.
const int old_num_variables = solver->NumVariables();
solver->SetNumVariables(old_num_variables + 2 * core.size());
if (core.size() == 2) {
// Special case. If core.size() == 2, we can use only one blocking
// variable (the other one beeing its negation). This actually do happen
// quite often in practice, so it is worth it.
solver->SetNumVariables(old_num_variables + 3);
} else {
solver->SetNumVariables(old_num_variables + 2 * core.size());
}
// Temporary vectors for the constraint (sum new blocking variable == 1).
std::vector<LiteralWithCoeff> at_most_one_constraint;
@@ -176,60 +346,38 @@ SatSolver::Status SolveWithFuMalik(const LinearBooleanProblem& problem,
CHECK_LT(index, assumptions.size());
// The new blocking and assumption variables for this core entry.
const Literal b(VariableIndex(old_num_variables + i), true);
const Literal a(VariableIndex(old_num_variables + core.size() + i),
true);
// There are two ways to encode the algorithm in SAT.
// TODO(user): The first algo seems better, but experiment more.
if (true) {
// First, fix the old "assumption" variable to false, which has the
// effect of deleting the old clause from the solver.
if (assumptions[index].Variable() >= problem.num_variables()) {
CHECK(solver->AddUnitClause(assumptions[index].Negated()));
}
// Add the new blocking variable.
blocking_clauses[index].push_back(b);
// Add the new clause to the solver. Temporary including the
// assumption, but removing it right afterwards.
blocking_clauses[index].push_back(a);
ok &= solver->AddProblemClause(blocking_clauses[index]);
blocking_clauses[index].pop_back();
} else {
// a false & b false => previous assumptions (which was false).
const Literal old_a = assumptions[index];
std::vector<Literal> vec;
vec.push_back(a);
vec.push_back(b);
vec.push_back(old_a);
ok &= solver->AddProblemClause(vec);
// Also add the two implications a => x and b => x where x is the
// negation of the previous assumption variable.
vec.clear();
vec.push_back(a.Negated());
vec.push_back(old_a.Negated());
ok &= solver->AddProblemClause(vec);
vec.clear();
vec.push_back(b.Negated());
vec.push_back(old_a.Negated());
ok &= solver->AddProblemClause(vec);
// TODO(user): Also add the exclusion between a and b?
if (false) {
vec.clear();
vec.push_back(a.Negated());
vec.push_back(b.Negated());
ok &= solver->AddProblemClause(vec);
vec.clear();
vec.push_back(b.Negated());
vec.push_back(a.Negated());
ok &= solver->AddProblemClause(vec);
}
const Literal a(VariableIndex(old_num_variables + i), true);
Literal b(VariableIndex(old_num_variables + core.size() + i), true);
if (core.size() == 2) {
b = Literal(VariableIndex(old_num_variables + 2), true);
if (i == 1) b = b.Negated();
}
// Symmetry breaking clauses.
for (Literal l : symmetry.ProcessLiteral(index, b)) {
ok &= solver->AddBinaryClause(l.Negated(), b.Negated());
}
// Note(user): There is more than one way to encode the algorithm in
// SAT. Here we "delete" the old blocking clause and add a new one. In
// the WPM1 algorithm below, the blocking clause is decomposed into
// 3-SAT and we don't need to delete anything.
// First, fix the old "assumption" variable to false, which has the
// effect of deleting the old clause from the solver.
if (assumptions[index].Variable() >= problem.num_variables()) {
CHECK(solver->AddUnitClause(assumptions[index].Negated()));
}
// Add the new blocking variable.
blocking_clauses[index].push_back(b);
// Add the new clause to the solver. Temporary including the
// assumption, but removing it right afterwards.
blocking_clauses[index].push_back(a);
ok &= solver->AddProblemClause(blocking_clauses[index]);
blocking_clauses[index].pop_back();
// For the "== 1" constraint on the blocking literals.
at_most_one_constraint.push_back(LiteralWithCoeff(b, 1.0));
at_least_one_constraint.push_back(b);
@@ -256,6 +404,302 @@ SatSolver::Status SolveWithFuMalik(const LinearBooleanProblem& problem,
}
}
SatSolver::Status SolveWithWPM1(LogBehavior log,
const LinearBooleanProblem& problem,
SatSolver* solver, std::vector<bool>* solution) {
Logger logger(log);
FuMalikSymmetryBreaker symmetry;
CHECK_EQ(problem.type(), LinearBooleanProblem::MINIMIZATION);
// The curent lower_bound on the cost.
// It will be correct after the initialization.
Coefficient lower_bound(problem.objective().offset());
Coefficient upper_bound(kint64max);
// The assumption literals and their associated cost.
std::vector<Literal> assumptions;
std::vector<Coefficient> costs;
std::vector<Literal> reference;
// Initialization.
const LinearObjective& objective = problem.objective();
CHECK_GT(objective.coefficients_size(), 0);
for (int i = 0; i < objective.literals_size(); ++i) {
const Literal literal(objective.literals(i));
const Coefficient coeff(objective.coefficients(i));
// We want to minimize the cost when the assumption is true.
// Note that initially, we do not create any extra variables.
if (coeff > 0) {
assumptions.push_back(literal.Negated());
costs.push_back(coeff);
} else {
assumptions.push_back(literal);
costs.push_back(-coeff);
lower_bound += coeff;
}
}
reference = assumptions;
// This is used by the "stratified" approach.
Coefficient stratified_lower_bound =
*std::max_element(costs.begin(), costs.end());
// Print the number of variables with a non-zero cost.
logger.Log(StringPrintf("c #weigths:%zu #vars:%d #constraints:%d",
assumptions.size(), problem.num_variables(),
problem.constraints_size()));
for (int iter = 0;; ++iter) {
// This is called "hardening" in the literature.
// Basically, we know that there is only hardening_threshold weight left
// to distribute, so any assumption with a greater cost than this can never
// be false. We fix it instead of treating it as an assumption.
solver->Backtrack(0);
const Coefficient hardening_threshold = upper_bound - lower_bound;
CHECK_GE(hardening_threshold, 0);
std::vector<int> to_delete;
int num_above_threshold = 0;
for (int i = 0; i < assumptions.size(); ++i) {
if (costs[i] > hardening_threshold) {
if (!solver->AddUnitClause(assumptions[i])) {
return SatSolver::MODEL_UNSAT;
}
to_delete.push_back(i);
++num_above_threshold;
} else {
// This impact the stratification heuristic.
if (solver->Assignment().IsLiteralTrue(assumptions[i])) {
to_delete.push_back(i);
}
}
}
if (!to_delete.empty()) {
logger.Log(StringPrintf("c fixed %zu assumptions, %d with cost > %lld",
to_delete.size(), num_above_threshold,
hardening_threshold.value()));
DeleteVectorIndices(to_delete, &assumptions);
DeleteVectorIndices(to_delete, &costs);
DeleteVectorIndices(to_delete, &reference);
symmetry.DeleteIndices(to_delete);
}
// This is the "stratification" part.
// Extract the assumptions with a cost >= stratified_lower_bound.
std::vector<Literal> assumptions_subset;
for (int i = 0; i < assumptions.size(); ++i) {
if (costs[i] >= stratified_lower_bound) {
assumptions_subset.push_back(assumptions[i]);
}
}
const SatSolver::Status result =
solver->ResetAndSolveWithGivenAssumptions(assumptions_subset);
if (result == SatSolver::MODEL_SAT) {
// If not all assumptions where taken, continue with a lower stratified
// bound. Otherwise we have an optimal solution!
//
// TODO(user): Try more advanced variant where the bound is lowered by
// more than this minimal amount.
const Coefficient old_lower_bound = stratified_lower_bound;
for (Coefficient cost : costs) {
if (cost < old_lower_bound) {
if (stratified_lower_bound == old_lower_bound ||
cost > stratified_lower_bound) {
stratified_lower_bound = cost;
}
}
}
ExtractAssignment(problem, *solver, solution);
DCHECK(IsAssignmentValid(problem, *solution));
Coefficient objective = ComputeObjectiveValue(problem, *solution);
if (objective + problem.objective().offset() < upper_bound) {
logger.Log(CnfObjectiveLine(problem, objective));
upper_bound = objective + problem.objective().offset();
}
if (stratified_lower_bound < old_lower_bound) continue;
return SatSolver::MODEL_SAT;
}
if (result != SatSolver::ASSUMPTIONS_UNSAT) return result;
// The interesting case: we have an unsat core.
//
// We need to add new "blocking" variables b_i for all the objective
// variables appearing in the core. Moreover, we will only relax as little
// as possible (to not miss the optimal), so we will enforce that the sum
// of the b_i is exactly one.
std::vector<Literal> core = solver->GetLastIncompatibleDecisions();
MinimizeCore(solver, &core);
solver->Backtrack(0);
// Compute the min cost of all the assertions in the core.
// The lower bound will be updated by that much.
Coefficient min_cost = kCoefficientMax;
{
int index = 0;
for (int i = 0; i < core.size(); ++i) {
index =
std::find(assumptions.begin() + index, assumptions.end(), core[i]) -
assumptions.begin();
CHECK_LT(index, assumptions.size());
min_cost = std::min(min_cost, costs[index]);
}
}
lower_bound += min_cost;
// Print the search progress.
logger.Log(
StringPrintf("c iter:%d core:%zu lb:%lld min_cost:%lld strat:%lld",
iter, core.size(), lower_bound.value(), min_cost.value(),
stratified_lower_bound.value()));
// This simple line helps a lot on the packup-wpms instances!
//
// TODO(user): That was because of a bug before in the way
// stratified_lower_bound was decremented, not sure it helps that much now.
if (min_cost > stratified_lower_bound) {
stratified_lower_bound = min_cost;
}
// Special case for a singleton core.
if (core.size() == 1) {
// Find the index of the "objective" variable that need to be fixed in
// its "costly" state.
const int index =
std::find(assumptions.begin(), assumptions.end(), core[0]) -
assumptions.begin();
CHECK_LT(index, assumptions.size());
// Fix it.
if (!solver->AddUnitClause(core[0].Negated())) {
return SatSolver::MODEL_UNSAT;
}
// Erase this entry from the current "objective".
std::vector<int> to_delete(1, index);
DeleteVectorIndices(to_delete, &assumptions);
DeleteVectorIndices(to_delete, &costs);
DeleteVectorIndices(to_delete, &reference);
symmetry.DeleteIndices(to_delete);
} else {
symmetry.StartResolvingNewCore(iter);
// We will add 2 * |core.size()| variables.
const int old_num_variables = solver->NumVariables();
if (core.size() == 2) {
// Special case. If core.size() == 2, we can use only one blocking
// variable (the other one beeing its negation). This actually do happen
// quite often in practice, so it is worth it.
solver->SetNumVariables(old_num_variables + 3);
} else {
solver->SetNumVariables(old_num_variables + 2 * core.size());
}
// Temporary vectors for the constraint (sum new blocking variable == 1).
std::vector<LiteralWithCoeff> at_most_one_constraint;
std::vector<Literal> at_least_one_constraint;
// This will be set to false if the problem becomes unsat while adding a
// new clause. This is unlikely, but may be possible.
bool ok = true;
// Loop over the core.
int index = 0;
for (int i = 0; i < core.size(); ++i) {
// Since the assumptions appear in order in the core, we can find the
// relevant "objective" variable efficiently with a simple linear scan
// in the assumptions vector (done with index).
index =
std::find(assumptions.begin() + index, assumptions.end(), core[i]) -
assumptions.begin();
CHECK_LT(index, assumptions.size());
// The new blocking and assumption variables for this core entry.
const Literal a(VariableIndex(old_num_variables + i), true);
Literal b(VariableIndex(old_num_variables + core.size() + i), true);
if (core.size() == 2) {
b = Literal(VariableIndex(old_num_variables + 2), true);
if (i == 1) b = b.Negated();
}
// a false & b false => previous assumptions (which was false).
const Literal old_a = assumptions[index];
ok &= solver->AddTernaryClause(a, b, old_a);
// Optional. Also add the two implications a => x and b => x where x is
// the negation of the previous assumption variable.
ok &= solver->AddBinaryClause(a.Negated(), old_a.Negated());
ok &= solver->AddBinaryClause(b.Negated(), old_a.Negated());
// Optional. Also add the implication a => not(b).
ok &= solver->AddBinaryClause(a.Negated(), b.Negated());
// This is the difference with the Fu & Malik algorithm.
// If the soft clause protected by old_a has a cost greater than
// min_cost then:
// - its cost is disminished by min_cost.
// - an identical clause with cost min_cost is artifically added to
// the problem.
CHECK_GE(costs[index], min_cost);
if (costs[index] == min_cost) {
// The new assumption variable replaces the old one.
assumptions[index] = a.Negated();
// Symmetry breaking clauses.
for (Literal l : symmetry.ProcessLiteral(index, b)) {
ok &= solver->AddBinaryClause(l.Negated(), b.Negated());
}
} else {
// Since the cost of the given index changes, we need to start a new
// "equivalence" class for the symmetry breaking algo and clear the
// old one.
symmetry.AddInfo(assumptions.size(), b);
symmetry.ClearInfo(index);
// Reduce the cost of the old assumption.
costs[index] -= min_cost;
// We add the new assumption with a cost of min_cost.
//
// Note(user): I think it is nice that these are added after old_a
// because assuming old_a will implies all the derived assumptions to
// true, and thus they will never appear in a core until old_a is not
// an assumption anymore.
assumptions.push_back(a.Negated());
costs.push_back(min_cost);
reference.push_back(reference[index]);
}
// For the "<= 1" constraint on the blocking literals.
// Note(user): we don't add the ">= 1" side because it is not needed for
// the correctness and it doesn't seems to help.
at_most_one_constraint.push_back(LiteralWithCoeff(b, 1.0));
// Because we have a core, we know that at least one of the initial
// problem variables must be true. This seems to help a bit.
//
// TODO(user): Experiment more.
at_least_one_constraint.push_back(reference[index].Negated());
}
// Add the "<= 1" side of the "== 1" constraint.
ok &= solver->AddLinearConstraint(false, Coefficient(0), true,
Coefficient(1.0),
&at_most_one_constraint);
// Optional. Add the ">= 1" constraint on the initial problem variables.
ok &= solver->AddProblemClause(at_least_one_constraint);
if (!ok) {
LOG(INFO) << "Unsat while adding a clause.";
return SatSolver::MODEL_UNSAT;
}
}
}
}
void RandomizeDecisionHeuristic(MTRandom* random, SatParameters* parameters) {
// Random preferred variable order.
const google::protobuf::EnumDescriptor* order_d =
@@ -272,23 +716,14 @@ void RandomizeDecisionHeuristic(MTRandom* random, SatParameters* parameters) {
// Other random parameters.
parameters->set_use_phase_saving(random->OneIn(2));
std::vector<double> ratios;
ratios.push_back(0.0);
ratios.push_back(0.0);
ratios.push_back(0.0);
ratios.push_back(0.01);
ratios.push_back(1.0);
parameters->set_random_polarity_ratio(ratios[random->Uniform(ratios.size())]);
// IMPORTANT: SetParameters() will reinitialize the seed, so we must change
// it.
parameters->set_random_seed(parameters->random_seed() + 1);
parameters->set_random_polarity_ratio(random->OneIn(2) ? 0.01 : 0.0);
parameters->set_random_branches_ratio(random->OneIn(2) ? 0.01 : 0.0);
}
SatSolver::Status SolveWithRandomParameters(const LinearBooleanProblem& problem,
SatSolver::Status SolveWithRandomParameters(LogBehavior log,
const LinearBooleanProblem& problem,
int num_times, SatSolver* solver,
std::vector<bool>* solution,
LogBehavior log) {
std::vector<bool>* solution) {
Logger logger(log);
CHECK_EQ(problem.type(), LinearBooleanProblem::MINIMIZATION);
const SatParameters initial_parameters = solver->parameters();
@@ -297,9 +732,10 @@ SatSolver::Status SolveWithRandomParameters(const LinearBooleanProblem& problem,
SatParameters parameters = initial_parameters;
TimeLimit time_limit(parameters.max_time_in_seconds());
// We start with a low limit (increased on each SatSolver::LIMIT_REACHED).
// We start with a low conflict limit and increase it until we are able to
// solve the problem at least once. After this, the limit stays the same.
int max_number_of_conflicts = 5;
parameters.set_log_search_progress(false);
parameters.set_max_number_of_conflicts(10);
Coefficient min_seen(std::numeric_limits<int64>::max());
Coefficient max_seen(std::numeric_limits<int64>::min());
@@ -307,7 +743,10 @@ SatSolver::Status SolveWithRandomParameters(const LinearBooleanProblem& problem,
for (int i = 0; i < num_times; ++i) {
solver->Backtrack(0);
RandomizeDecisionHeuristic(&random, &parameters);
parameters.set_max_number_of_conflicts(max_number_of_conflicts);
parameters.set_max_time_in_seconds(time_limit.GetTimeLeft());
parameters.set_random_seed(i);
solver->SetParameters(parameters);
solver->ResetDecisionHeuristic();
@@ -315,10 +754,16 @@ SatSolver::Status SolveWithRandomParameters(const LinearBooleanProblem& problem,
if (use_obj) UseObjectiveForSatAssignmentPreference(problem, solver);
const SatSolver::Status result = solver->Solve();
if (result == SatSolver::MODEL_UNSAT) return SatSolver::MODEL_UNSAT;
if (result == SatSolver::MODEL_UNSAT) {
// If the problem is UNSAT after we over-constrained the objective, then
// we found an optimal solution, otherwise, even the decision problem is
// UNSAT.
if (best == kCoefficientMax) return SatSolver::MODEL_UNSAT;
return SatSolver::MODEL_SAT;
}
if (result == SatSolver::LIMIT_REACHED) {
parameters.set_max_number_of_conflicts(
static_cast<int64>(1.1 * parameters.max_number_of_conflicts()));
// We augment the number of conflict until we have one feasible solution.
if (best == kCoefficientMax) ++max_number_of_conflicts;
if (time_limit.LimitReached()) return SatSolver::LIMIT_REACHED;
continue;
}
@@ -332,6 +777,13 @@ SatSolver::Status SolveWithRandomParameters(const LinearBooleanProblem& problem,
*solution = candidate;
best = objective;
logger.Log(CnfObjectiveLine(problem, objective));
// Overconstrain the objective.
solver->Backtrack(0);
if (!AddObjectiveConstraint(problem, false, Coefficient(0), true,
objective - 1, solver)) {
return SatSolver::MODEL_SAT;
}
}
min_seen = std::min(min_seen, objective);
max_seen = std::max(max_seen, objective);
@@ -349,9 +801,10 @@ SatSolver::Status SolveWithRandomParameters(const LinearBooleanProblem& problem,
return SatSolver::LIMIT_REACHED;
}
SatSolver::Status SolveWithLinearScan(const LinearBooleanProblem& problem,
SatSolver* solver, std::vector<bool>* solution,
LogBehavior log) {
SatSolver::Status SolveWithLinearScan(LogBehavior log,
const LinearBooleanProblem& problem,
SatSolver* solver,
std::vector<bool>* solution) {
Logger logger(log);
CHECK_EQ(problem.type(), LinearBooleanProblem::MINIMIZATION);

View File

@@ -52,18 +52,30 @@ enum LogBehavior { DEFAULT_LOG, STDOUT_LOG };
//
// TODO(user): double-check the correctness if the objective coefficients are
// negative.
SatSolver::Status SolveWithFuMalik(const LinearBooleanProblem& problem,
SatSolver* solver, std::vector<bool>* solution,
LogBehavior log);
SatSolver::Status SolveWithFuMalik(LogBehavior log,
const LinearBooleanProblem& problem,
SatSolver* solver, std::vector<bool>* solution);
// The WPM1 algorithm is a generalization of the Fu & Malik algorithm to
// weighted problems. Note that if all objective weights are the same, this is
// almost the same as SolveWithFuMalik() but the encoding of the constraints is
// slightly different.
//
// Ansotegui, C., Bonet, M.L., Levy, J.: Solving (weighted) partial MaxSAT
// through satisfiability testing. In: Proc. of the 12th Int. Conf. on Theory and
// Applications of Satisfiability Testing (SAT09). pp. 427440 (2009)
SatSolver::Status SolveWithWPM1(LogBehavior log,
const LinearBooleanProblem& problem,
SatSolver* solver, std::vector<bool>* solution);
// Solves num_times the decision version of the given problem with different
// random parameters. Keep the best solution (regarding the objective) and
// returns it in solution. The problem is assumed to be already loaded into the
// given solver.
SatSolver::Status SolveWithRandomParameters(const LinearBooleanProblem& problem,
SatSolver::Status SolveWithRandomParameters(LogBehavior log,
const LinearBooleanProblem& problem,
int num_times, SatSolver* solver,
std::vector<bool>* solution,
LogBehavior log);
std::vector<bool>* solution);
// Starts by solving the decision version of the given LinearBooleanProblem and
// then simply add a constraint to find a lower objective that the current best
@@ -72,9 +84,10 @@ SatSolver::Status SolveWithRandomParameters(const LinearBooleanProblem& problem,
// The problem is assumed to be already loaded into the given solver. If
// solution is initially a feasible solution, the search will starts from there.
// solution will be updated with the best solution found so far.
SatSolver::Status SolveWithLinearScan(const LinearBooleanProblem& problem,
SatSolver* solver, std::vector<bool>* solution,
LogBehavior log);
SatSolver::Status SolveWithLinearScan(LogBehavior log,
const LinearBooleanProblem& problem,
SatSolver* solver,
std::vector<bool>* solution);
} // namespace sat
} // namespace operations_research

View File

@@ -197,6 +197,8 @@ bool CanonicalBooleanLinearProblem::AddConstraint(
void MutableUpperBoundedLinearConstraint::ClearAndResize(int num_variables) {
terms_.assign(num_variables, Coefficient(0));
non_zeros_.ClearAndResize(VariableIndex(num_variables));
rhs_ = 0;
max_sum_ = 0;
}
void MutableUpperBoundedLinearConstraint::ClearAll() {
@@ -206,26 +208,24 @@ void MutableUpperBoundedLinearConstraint::ClearAll() {
}
non_zeros_.ClearAll();
rhs_ = 0;
max_sum_ = 0;
}
// TODO(user): Also reduce the trivially false literal when coeff > rhs_ ?
void MutableUpperBoundedLinearConstraint::ReduceCoefficients() {
// TODO(user): avoid the max_sum computation? we could maintain it
// incrementally.
Coefficient max_sum(0);
CHECK_LT(rhs_, max_sum_) << "Trivially sat.";
Coefficient removed_sum(0);
const Coefficient bound = max_sum_ - rhs_;
for (VariableIndex var : PossibleNonZeros()) {
CHECK(SafeAddInto(GetCoefficient(var), &max_sum));
}
CHECK_LT(rhs_, max_sum) << "Trivially sat.";
const Coefficient bound = max_sum - rhs_;
for (VariableIndex var : PossibleNonZeros()) {
const Coefficient coeff = GetCoefficient(var);
if (coeff > bound) {
rhs_ -= coeff - bound;
const Coefficient diff = GetCoefficient(var) - bound;
if (diff > 0) {
removed_sum += diff;
terms_[var] = (terms_[var] > 0) ? bound : -bound;
}
}
// TODO(user): Also reduce the trivially false literal when coeff > rhs_ ?
rhs_ -= removed_sum;
max_sum_ -= removed_sum;
DCHECK_EQ(max_sum_, ComputeMaxSum());
}
std::string MutableUpperBoundedLinearConstraint::DebugString() {
@@ -242,24 +242,54 @@ std::string MutableUpperBoundedLinearConstraint::DebugString() {
// TODO(user): Keep this for DCHECK(), but maintain the slack incrementally
// instead of recomputing it.
Coefficient MutableUpperBoundedLinearConstraint::ComputeSlackForTrailPrefix(
const Trail& trail, int trail_index) {
Coefficient sum(0);
const Trail& trail, int trail_index) const {
Coefficient activity(0);
for (VariableIndex var : PossibleNonZeros()) {
if (GetCoefficient(var) == 0) continue;
if (trail.Assignment().IsLiteralTrue(GetLiteral(var)) &&
trail.Info(var).trail_index < trail_index) {
sum += GetCoefficient(var);
CHECK_GE(sum, 0) << "Overflow!";
activity += GetCoefficient(var);
}
}
return rhs_ - sum;
return rhs_ - activity;
}
void MutableUpperBoundedLinearConstraint::ReduceSlackTo(const Trail& trail,
int trail_index,
Coefficient target) {
Coefficient MutableUpperBoundedLinearConstraint::
ReduceCoefficientsAndComputeSlackForTrailPrefix(const Trail& trail,
int trail_index) {
Coefficient activity(0);
Coefficient removed_sum(0);
const Coefficient bound = max_sum_ - rhs_;
for (VariableIndex var : PossibleNonZeros()) {
if (GetCoefficient(var) == 0) continue;
const Coefficient diff = GetCoefficient(var) - bound;
if (trail.Assignment().IsLiteralTrue(GetLiteral(var)) &&
trail.Info(var).trail_index < trail_index) {
if (diff > 0) {
removed_sum += diff;
terms_[var] = (terms_[var] > 0) ? bound : -bound;
}
activity += GetCoefficient(var);
} else {
// Because we assume the slack (rhs - activity) to be negative, we have
// coeff + rhs - max_sum_ <= coeff + rhs - (activity + coeff)
// <= slack
// < 0
CHECK_LE(diff, 0);
}
}
rhs_ -= removed_sum;
max_sum_ -= removed_sum;
DCHECK_EQ(max_sum_, ComputeMaxSum());
return rhs_ - activity;
}
void MutableUpperBoundedLinearConstraint::ReduceSlackTo(
const Trail& trail, int trail_index, Coefficient initial_slack,
Coefficient target) {
// Positive slack.
const Coefficient slack = ComputeSlackForTrailPrefix(trail, trail_index);
const Coefficient slack = initial_slack;
DCHECK_EQ(slack, ComputeSlackForTrailPrefix(trail, trail_index));
CHECK_LE(target, slack);
CHECK_GE(target, 0);
@@ -282,10 +312,13 @@ void MutableUpperBoundedLinearConstraint::ReduceSlackTo(const Trail& trail,
}
if (GetCoefficient(var) > diff) {
terms_[var] = (terms_[var] > 0) ? terms_[var] - diff : terms_[var] + diff;
max_sum_ -= diff;
} else {
max_sum_ -= GetCoefficient(var);
terms_[var] = 0;
}
}
DCHECK_EQ(max_sum_, ComputeMaxSum());
}
void MutableUpperBoundedLinearConstraint::CopyIntoVector(
@@ -300,9 +333,21 @@ void MutableUpperBoundedLinearConstraint::CopyIntoVector(
std::sort(output->begin(), output->end(), CoeffComparator);
}
Coefficient MutableUpperBoundedLinearConstraint::ComputeMaxSum() const {
Coefficient result(0);
for (VariableIndex var : non_zeros_.PositionsSetAtLeastOnce()) {
result += GetCoefficient(var);
}
return result;
}
UpperBoundedLinearConstraint::UpperBoundedLinearConstraint(
const std::vector<LiteralWithCoeff>& cst, ResolutionNode* node)
: node_(node) {
: is_marked_for_deletion_(false),
is_learned_(false),
first_reason_trail_index_(-1),
activity_(0.0),
node_(node) {
DCHECK(!cst.empty());
DCHECK(std::is_sorted(cst.begin(), cst.end(), CoeffComparator));
literals_.reserve(cst.size());
@@ -364,6 +409,7 @@ bool UpperBoundedLinearConstraint::InitializeRhs(Coefficient rhs,
const int last_level = trail->CurrentDecisionLevel();
std::vector<Coefficient> sum_at_previous_level(last_level + 2, Coefficient(0));
int max_relevant_trail_index = 0;
if (trail_index > 0) {
int literal_index = 0;
int coeff_index = 0;
@@ -372,6 +418,8 @@ bool UpperBoundedLinearConstraint::InitializeRhs(Coefficient rhs,
const Coefficient coeff = coeffs_[coeff_index];
if (trail->Assignment().IsLiteralTrue(literal) &&
trail->Info(var).trail_index < trail_index) {
max_relevant_trail_index =
std::max(max_relevant_trail_index, trail->Info(var).trail_index);
slack -= coeff;
sum_at_previous_level[trail->Info(var).level + 1] += coeff;
}
@@ -405,13 +453,18 @@ bool UpperBoundedLinearConstraint::InitializeRhs(Coefficient rhs,
}
// Initial propagation.
// TODO(user): The trail_index for the propagation reason can be higher than
// necessary, fix this.
//
// TODO(user): The source trail index for the propagation reason (i.e.
// max_relevant_trail_index) may be higher than necessary (for some of the
// propagated literals). Currently this works with FillReason(), but it was a
// source of a really nasty bug (see CL 68906167) because of the (rhs == 1)
// optim. Find a good way to test the logic.
index_ = coeffs_.size() - 1;
already_propagated_end_ = literals_.size();
Update(slack, threshold);
return *threshold < 0 ? Propagate(trail_index - 1, threshold, trail, conflict)
: true;
return *threshold < 0
? Propagate(max_relevant_trail_index, threshold, trail, conflict)
: true;
}
bool UpperBoundedLinearConstraint::Propagate(int trail_index,
@@ -438,6 +491,9 @@ bool UpperBoundedLinearConstraint::Propagate(int trail_index,
} else {
// Propagation.
if (first_propagated_variable < 0) {
if (first_reason_trail_index_ == -1) {
first_reason_trail_index_ = trail->Index();
}
trail->EnqueueWithPbReason(literals_[i].Negated(), trail_index, this);
first_propagated_variable = literals_[i].Variable();
} else {
@@ -458,9 +514,11 @@ void UpperBoundedLinearConstraint::FillReason(const Trail& trail,
int source_trail_index,
VariableIndex propagated_variable,
std::vector<Literal>* reason) {
reason->clear();
// Optimization for an "at most one" constraint.
// Note that the source_trail_index set by InitializeRhs() is ok in this case.
if (rhs_ == 1) {
reason->clear();
reason->push_back(trail[source_trail_index].Negated());
return;
}
@@ -469,7 +527,7 @@ void UpperBoundedLinearConstraint::FillReason(const Trail& trail,
const bool include_level_zero = trail.NeedFixedLiteralsInReason();
// Optimization: This will be set to the index of the last literal in the
// reason, that is the one with smallest indices.
// reason.
int last_i = 0;
int last_coeff_index = 0;
@@ -477,7 +535,6 @@ void UpperBoundedLinearConstraint::FillReason(const Trail& trail,
// constraint that were assigned to true at the time of the propagation.
// We remove literals with a level of 0 since they are not needed.
// We also compute the slack at the time.
reason->clear();
Coefficient slack = rhs_;
Coefficient propagated_variable_coefficient(0);
int coeff_index = coeffs_.size() - 1;
@@ -528,9 +585,27 @@ void UpperBoundedLinearConstraint::FillReason(const Trail& trail,
DCHECK_GE(limit, 1);
}
Coefficient UpperBoundedLinearConstraint::ComputeCancelation(
const Trail& trail, int trail_index,
const MutableUpperBoundedLinearConstraint& conflict) {
Coefficient result(0);
int literal_index = 0;
int coeff_index = 0;
for (Literal literal : literals_) {
if (!trail.Assignment().IsVariableAssigned(literal.Variable()) ||
trail.Info(literal.Variable()).trail_index >= trail_index) {
result += conflict.CancelationAmount(literal, coeffs_[coeff_index]);
}
++literal_index;
if (literal_index == starts_[coeff_index + 1]) ++coeff_index;
}
return result;
}
void UpperBoundedLinearConstraint::ResolvePBConflict(
const Trail& trail, VariableIndex var,
MutableUpperBoundedLinearConstraint* conflict) {
MutableUpperBoundedLinearConstraint* conflict,
Coefficient* conflict_slack) {
const int limit_trail_index = trail.Info(var).trail_index;
// Compute the constraint activity at the time and the coefficient of the
@@ -569,6 +644,9 @@ void UpperBoundedLinearConstraint::ResolvePBConflict(
// processed by PropagateNext().
conflict->ClearAll();
AddToConflict(conflict);
*conflict_slack = rhs_ - activity;
DCHECK_EQ(*conflict_slack,
conflict->ComputeSlackForTrailPrefix(trail, limit_trail_index));
return;
}
@@ -577,25 +655,29 @@ void UpperBoundedLinearConstraint::ResolvePBConflict(
CHECK_GE(slack, 0);
// This is the slack of the conflict at the same level.
// TODO(user): This is potentially called again in ReduceSlackTo(), fix.
const Coefficient conflict_slack =
conflict->ComputeSlackForTrailPrefix(trail, limit_trail_index);
DCHECK_EQ(*conflict_slack,
conflict->ComputeSlackForTrailPrefix(trail, limit_trail_index));
// TODO(user): If there is more "cancelation" than the min_coeffs below when
// we add the two constraints, the resulting slack may be even lower. Taking
// that into account is probably good.
const Coefficient cancelation =
DEBUG_MODE ? ComputeCancelation(trail, limit_trail_index, *conflict)
: Coefficient(0);
// When we add the two constraints together, the slack of the result for the
// trail < limit_trail_index - 1 must be negative. We know that its value is
// <= slack1 + slack2 - std::min(coeffs), so we have nothing to do if this bound is
// already negative.
//
// TODO(user): If there is more "cancelation" (like it is the case for var)
// when we add the two constraints, the resulting slack may be even lower.
// Taking that into account is probably good.
const Coefficient conflict_var_coeff = conflict->GetCoefficient(var);
const Coefficient min_coeffs = std::min(var_coeff, conflict_var_coeff);
const Coefficient new_slack_ub = slack + conflict_slack - min_coeffs;
CHECK_LT(conflict_slack, conflict_var_coeff);
const Coefficient new_slack_ub = slack + *conflict_slack - min_coeffs;
CHECK_LT(*conflict_slack, conflict_var_coeff);
CHECK_LT(slack, var_coeff);
if (new_slack_ub < 0) {
AddToConflict(conflict);
DCHECK_EQ(*conflict_slack + slack - cancelation,
conflict->ComputeSlackForTrailPrefix(trail, limit_trail_index));
return;
}
@@ -624,15 +706,16 @@ void UpperBoundedLinearConstraint::ResolvePBConflict(
//
// TODO(user): The best will be to relax as little as possible.
diff = slack;
conflict_diff = conflict_slack;
conflict_diff = *conflict_slack;
}
// Relax the conflict.
CHECK_GE(conflict_diff, 0);
CHECK_LE(conflict_diff, conflict_slack);
CHECK_LE(conflict_diff, *conflict_slack);
if (conflict_diff > 0) {
conflict->ReduceSlackTo(trail, limit_trail_index,
conflict_slack - conflict_diff);
conflict->ReduceSlackTo(trail, limit_trail_index, *conflict_slack,
*conflict_slack - conflict_diff);
*conflict_slack -= conflict_diff;
}
// We apply the same algorithm as the one in ReduceSlackTo() but on
@@ -654,6 +737,8 @@ void UpperBoundedLinearConstraint::ResolvePBConflict(
} else {
const Coefficient new_coeff = coeffs_[coeff_index] - diff;
if (new_coeff > 0) {
// TODO(user): track the cancelation here so we can update
// *conflict_slack properly.
conflict->AddTerm(literal, new_coeff);
}
}
@@ -665,10 +750,14 @@ void UpperBoundedLinearConstraint::ResolvePBConflict(
conflict->AddToRhs(rhs_ - diff);
}
void UpperBoundedLinearConstraint::Untrail(Coefficient* threshold) {
void UpperBoundedLinearConstraint::Untrail(Coefficient* threshold,
int trail_index) {
const Coefficient slack = GetSlackFromThreshold(*threshold);
while (index_ + 1 < coeffs_.size() && coeffs_[index_ + 1] <= slack) ++index_;
Update(slack, threshold);
if (first_reason_trail_index_ >= trail_index) {
first_reason_trail_index_ = -1;
}
}
// TODO(user): This is relatively slow. Take the "transpose" all at once, and
@@ -686,14 +775,14 @@ bool PbConstraints::AddConstraint(const std::vector<LiteralWithCoeff>& cst,
// Optimization if the constraint terms are the same as the one of the last
// added constraint.
if (!constraints_.empty() && constraints_.back().HasIdenticalTerms(cst)) {
if (rhs < constraints_.back().Rhs()) {
if (!constraints_.empty() && constraints_.back()->HasIdenticalTerms(cst)) {
if (rhs < constraints_.back()->Rhs()) {
// The new constraint is tighther, so we also replace the ResolutionNode.
// TODO(user): The old one could be unlocked at this point.
constraints_.back().ChangeResolutionNode(node);
return constraints_.back().InitializeRhs(rhs, propagation_trail_index_,
&thresholds_.back(), trail_,
&conflict_scratchpad_);
constraints_.back()->ChangeResolutionNode(node);
return constraints_.back()->InitializeRhs(rhs, propagation_trail_index_,
&thresholds_.back(), trail_,
&conflict_scratchpad_);
} else {
// The constraint is redundant, so there is nothing to do.
return true;
@@ -701,16 +790,17 @@ bool PbConstraints::AddConstraint(const std::vector<LiteralWithCoeff>& cst,
}
const ConstraintIndex cst_index(constraints_.size());
constraints_.emplace_back(UpperBoundedLinearConstraint(cst, node));
constraints_.emplace_back(new UpperBoundedLinearConstraint(cst, node));
thresholds_.push_back(Coefficient(0));
if (!constraints_.back().InitializeRhs(rhs, propagation_trail_index_,
&thresholds_.back(), trail_,
&conflict_scratchpad_)) {
if (!constraints_.back()->InitializeRhs(rhs, propagation_trail_index_,
&thresholds_.back(), trail_,
&conflict_scratchpad_)) {
thresholds_.pop_back();
constraints_.pop_back();
return false;
}
for (LiteralWithCoeff term : cst) {
DCHECK_LT(term.literal.Index(), to_update_.size());
to_update_[term.literal.Index()].push_back(ConstraintIndexWithCoeff(
trail_->Assignment().IsVariableAssigned(term.literal.Variable()),
cst_index, term.coefficient));
@@ -718,6 +808,21 @@ bool PbConstraints::AddConstraint(const std::vector<LiteralWithCoeff>& cst,
return true;
}
bool PbConstraints::AddLearnedConstraint(const std::vector<LiteralWithCoeff>& cst,
Coefficient rhs,
ResolutionNode* node) {
DeleteSomeLearnedConstraintIfNeeded();
const int old_num_constraints = constraints_.size();
const bool result = AddConstraint(cst, rhs, node);
// The second test is to avoid marking a problem constraint as learned because
// of the "reuse last constraint" optimization.
if (result && constraints_.size() > old_num_constraints) {
constraints_.back()->set_is_learned(true);
}
return result;
}
bool PbConstraints::PropagateNext() {
SCOPED_TIME_STAT(&stats_);
DCHECK(PropagationNeeded());
@@ -736,13 +841,17 @@ bool PbConstraints::PropagateNext() {
if (threshold < 0 && !conflict) {
update.need_untrail_inspection = true;
++num_constraint_lookups_;
if (!constraints_[update.index.value()].Propagate(
if (!constraints_[update.index.value()]->Propagate(
order, &thresholds_[update.index], trail_,
&conflict_scratchpad_)) {
trail_->SetFailingClause(ClauseRef(conflict_scratchpad_));
trail_->SetFailingResolutionNode(
constraints_[update.index.value()].ResolutionNodePointer());
constraints_[update.index.value()]->ResolutionNodePointer());
conflicting_constraint_index_ = update.index;
conflict = true;
// We bump the activity of the conflict.
BumpActivity(constraints_[update.index.value()].get());
}
}
}
@@ -767,7 +876,147 @@ void PbConstraints::Untrail(int trail_index) {
}
}
for (ConstraintIndex cst_index : to_untrail_.PositionsSetAtLeastOnce()) {
constraints_[cst_index.value()].Untrail(&(thresholds_[cst_index]));
constraints_[cst_index.value()]->Untrail(&(thresholds_[cst_index]),
trail_index);
}
}
// TODO(user): Because num_constraints also include problem constraints, the
// policy may not be what we want if there is a big number of problem
// constraints. Fix this.
void PbConstraints::ComputeNewLearnedConstraintLimit() {
const int num_constraints = constraints_.size();
target_number_of_learned_constraint_ =
num_constraints + parameters_.pb_cleanup_increment();
num_learned_constraint_before_cleanup_ =
target_number_of_learned_constraint_ / parameters_.pb_cleanup_ratio() -
num_constraints;
}
void PbConstraints::DeleteSomeLearnedConstraintIfNeeded() {
if (num_learned_constraint_before_cleanup_ == 0) {
// First time.
ComputeNewLearnedConstraintLimit();
return;
}
--num_learned_constraint_before_cleanup_;
if (num_learned_constraint_before_cleanup_ > 0) return;
SCOPED_TIME_STAT(&stats_);
// Mark the constraint that needs to be deleted.
// We do that in two pass, first we extract the activities.
std::vector<double> activities;
for (int i = 0; i < constraints_.size(); ++i) {
const UpperBoundedLinearConstraint& constraint = *(constraints_[i].get());
if (constraint.is_learned() && !constraint.is_used_as_a_reason()) {
activities.push_back(constraint.activity());
}
}
// Then we compute the cutoff threshold.
// Note that we can't delete constraint used as a reason!!
std::sort(activities.begin(), activities.end());
const int num_constraints_to_delete =
constraints_.size() - target_number_of_learned_constraint_;
CHECK_GT(num_constraints_to_delete, 0);
if (num_constraints_to_delete >= activities.size()) {
// Unlikely, but may happen, so in this case, we just delete all the
// constraint that can possibly be deleted
for (int i = 0; i < constraints_.size(); ++i) {
UpperBoundedLinearConstraint& constraint = *(constraints_[i].get());
if (constraint.is_learned() && !constraint.is_used_as_a_reason()) {
constraint.MarkForDeletion();
}
}
} else {
const double limit_activity = activities[num_constraints_to_delete];
int num_constraint_at_limit_activity = 0;
for (int i = num_constraints_to_delete; i >= 0; --i) {
if (activities[i] == limit_activity) {
++num_constraint_at_limit_activity;
} else {
break;
}
}
// Mark for deletion all the constraints under this threshold.
// We only keep the most recent constraint amongst the one with the activity
// exactly equal ot limit_activity, it is why the loop is in the reverse
// order.
for (int i = constraints_.size() - 1; i >= 0; --i) {
UpperBoundedLinearConstraint& constraint = *(constraints_[i].get());
if (constraint.is_learned() && !constraint.is_used_as_a_reason()) {
if (constraint.activity() <= limit_activity) {
if (constraint.activity() == limit_activity &&
num_constraint_at_limit_activity > 0) {
--num_constraint_at_limit_activity;
} else {
constraint.MarkForDeletion();
}
}
}
}
}
// Finally we delete the marked constraint and compute the next limit.
DeleteConstraintMarkedForDeletion();
ComputeNewLearnedConstraintLimit();
}
void PbConstraints::BumpActivity(UpperBoundedLinearConstraint* constraint) {
if (!constraint->is_learned()) return;
const double max_activity = parameters_.max_clause_activity_value();
constraint->set_activity(constraint->activity() +
constraint_activity_increment_);
if (constraint->activity() > max_activity) {
RescaleActivities(1.0 / max_activity);
}
}
void PbConstraints::RescaleActivities(double scaling_factor) {
constraint_activity_increment_ *= scaling_factor;
for (int i = 0; i < constraints_.size(); ++i) {
constraints_[i]->set_activity(constraints_[i]->activity() * scaling_factor);
}
}
void PbConstraints::UpdateActivityIncrement() {
const double decay = parameters_.clause_activity_decay();
constraint_activity_increment_ *= 1.0 / decay;
}
void PbConstraints::DeleteConstraintMarkedForDeletion() {
ITIVector<ConstraintIndex, ConstraintIndex> index_mapping(
constraints_.size(), ConstraintIndex(-1));
ConstraintIndex new_index(0);
for (ConstraintIndex i(0); i < constraints_.size(); ++i) {
if (!constraints_[i.value()]->is_marked_for_deletion()) {
index_mapping[i] = new_index;
if (new_index < i) {
constraints_[new_index.value()].reset(
constraints_[i.value()].release());
thresholds_[new_index] = thresholds_[i];
}
++new_index;
}
}
constraints_.resize(new_index.value());
thresholds_.resize(new_index.value());
// This is the slow part, we need to remap all the ConstraintIndex to the
// new ones.
for (LiteralIndex lit(0); lit < to_update_.size(); ++lit) {
std::vector<ConstraintIndexWithCoeff>& updates = to_update_[lit];
int new_index = 0;
for (int i = 0; i < updates.size(); ++i) {
const ConstraintIndex m = index_mapping[updates[i].index];
if (m != -1) {
updates[new_index] = updates[i];
updates[new_index].index = m;
++new_index;
}
}
updates.resize(new_index);
}
}

View File

@@ -16,6 +16,7 @@
#include <deque>
#include <limits>
#include "sat/sat_base.h"
#include "sat/sat_parameters.pb.h"
#include "util/stats.h"
namespace operations_research {
@@ -172,9 +173,34 @@ class MutableUpperBoundedLinearConstraint {
// Not that this operation also change the original rhs of the constraint.
void ReduceCoefficients();
// Same as ReduceCoefficients() but only consider the coefficient of the given
// variable.
void ReduceGivenCoefficient(VariableIndex var) {
const Coefficient bound = max_sum_ - rhs_;
const Coefficient diff = GetCoefficient(var) - bound;
if (diff > 0) {
rhs_ -= diff;
max_sum_ -= diff;
terms_[var] = (terms_[var] > 0) ? bound : -bound;
}
}
// Compute the constraint slack assuming that only the variables with index <
// trail_index are assigned.
Coefficient ComputeSlackForTrailPrefix(const Trail& trail, int trail_index);
Coefficient ComputeSlackForTrailPrefix(const Trail& trail,
int trail_index) const;
// Same as ReduceCoefficients() followed by ComputeSlackForTrailPrefix(). It
// allows to loop only once over all the terms of the constraint instead of
// doing it twice. This helps since doing that can be the main bottleneck.
//
// Note that this function assumes that the returned slack will be negative.
// This allow to DCHECK some assumptions on what coefficients can be reduced
// or not.
//
// TODO(user): Ideally the slack should be maitainable incrementally.
Coefficient ReduceCoefficientsAndComputeSlackForTrailPrefix(
const Trail& trail, int trail_index);
// Relaxes the constraint so that:
// - ComputeSlackForTrailPrefix(trail, trail_index) == target;
@@ -198,17 +224,19 @@ class MutableUpperBoundedLinearConstraint {
// P1 <= rhs_ - slack <= rhs_ - diff is always true. If at least one of the
// P2' variable is true, then P2 >= P2' + diff and we have
// P1 + P2' + diff <= P1 + P2 <= rhs_.
void ReduceSlackTo(const Trail& trail, int trail_index, Coefficient target);
void ReduceSlackTo(const Trail& trail, int trail_index,
Coefficient initial_slack, Coefficient target);
// Copies this constraint into a std::vector<LiteralWithCoeff> representation.
void CopyIntoVector(std::vector<LiteralWithCoeff>* output);
// Adds a positive value to this constraint Rhs().
// Adds a non-negative value to this constraint Rhs().
void AddToRhs(Coefficient value) {
CHECK_GT(value, 0);
CHECK_GE(value, 0);
rhs_ += value;
}
Coefficient Rhs() const { return rhs_; }
Coefficient MaxSum() const { return max_sum_; }
// Adds a term to this constraint. This is in the .h for efficiency.
// The encoding used internally is described below in the terms_ comment.
@@ -217,23 +245,31 @@ class MutableUpperBoundedLinearConstraint {
const VariableIndex var = literal.Variable();
const Coefficient term_encoding = literal.IsPositive() ? coeff : -coeff;
if (literal != GetLiteral(var)) {
// The two terms are of opposite sign.
// The two terms are of opposite sign, a "cancelation" happens.
// We need to change the encoding of the lower magnitude term.
// - If term > 0, term . x -> term . (x - 1) + term
// - If term < 0, term . (x - 1) -> term . x - term
// In both cases, rhs -= abs(term).
const Coefficient old_rhs = rhs_;
rhs_ -= std::min(AbsCoefficient(term_encoding), AbsCoefficient(terms_[var]));
CHECK_LE(rhs_, old_rhs) << "Overflow!";
rhs_ -= std::min(coeff, AbsCoefficient(terms_[var]));
max_sum_ += AbsCoefficient(term_encoding + terms_[var]) -
AbsCoefficient(terms_[var]);
} else {
// Both terms are of the same sign (or terms_[var] is zero).
// We test for overflow.
CHECK_EQ(term_encoding > 0, terms_[var] + term_encoding > 0);
max_sum_ += coeff;
}
CHECK_GE(max_sum_, 0) << "Overflow";
terms_[var] += term_encoding;
non_zeros_.Set(var);
}
// Returns the "cancelation" amount of AddTerm(literal, coeff).
Coefficient CancelationAmount(Literal literal, Coefficient coeff) const {
DCHECK_GT(coeff, 0);
const VariableIndex var = literal.Variable();
if (literal == GetLiteral(var)) return Coefficient(0);
return std::min(coeff, AbsCoefficient(terms_[var]));
}
// Returns a set of positions that contains all the non-zeros terms of the
// constraint. Note that this set can also contains some zero terms.
const std::vector<VariableIndex>& PossibleNonZeros() const {
@@ -246,6 +282,9 @@ class MutableUpperBoundedLinearConstraint {
private:
Coefficient AbsCoefficient(Coefficient a) const { return a > 0 ? a : -a; }
// Only used for DCHECK_EQ(max_sum_, ComputeMaxSum());
Coefficient ComputeMaxSum() const;
// The encoding is special:
// - If terms_[x] > 0, then the associated term is 'terms_[x] . x'
// - If terms_[x] < 0, then the associated term is 'terms_[x] . (x - 1)'
@@ -254,6 +293,10 @@ class MutableUpperBoundedLinearConstraint {
// The right hand side of the constraint (sum terms <= rhs_).
Coefficient rhs_;
// The constraint maximum sum (i.e. sum of the absolute term coefficients).
// Note that checking the integer overflow on this sum is enough.
Coefficient max_sum_;
// Contains the possibly non-zeros terms_ value.
SparseBitset<VariableIndex> non_zeros_;
};
@@ -309,7 +352,7 @@ class UpperBoundedLinearConstraint {
// Propagate(). Each time a literal in unassigned, the threshold value must
// have been increased by its coefficient. This update the threshold to its
// new value.
void Untrail(Coefficient* threshold);
void Untrail(Coefficient* threshold, int trail_index);
// Provided that the literal with given source_trail_index was the one that
// propagated the conflict or the literal we wants to explain, then this will
@@ -332,7 +375,8 @@ class UpperBoundedLinearConstraint {
// Same operation as SatSolver::ResolvePBConflict(), the only difference is
// that here the reason for var is *this.
void ResolvePBConflict(const Trail& trail, VariableIndex var,
MutableUpperBoundedLinearConstraint* conflict);
MutableUpperBoundedLinearConstraint* conflict,
Coefficient* conflict_slack);
// Adds this pb constraint into the given mutable one.
//
@@ -341,6 +385,15 @@ class UpperBoundedLinearConstraint {
// MutableUpperBoundedLinearConstraint.
void AddToConflict(MutableUpperBoundedLinearConstraint* conflict);
// Compute the sum of the "cancelation" in AddTerm() if *this is added to
// the given conflict. The sum doesn't take into account literal assigned with
// a trail index smaller than the given one.
//
// Note(user): Currently, this is only used in DCHECKs.
Coefficient ComputeCancelation(
const Trail& trail, int trail_index,
const MutableUpperBoundedLinearConstraint& conflict);
// Returns the resolution node associated to this constraint. Note that it can
// be nullptr if the solver is not configured to compute the reason for an
// unsatisfiable problem or if this constraint is not relevant for the current
@@ -348,6 +401,21 @@ class UpperBoundedLinearConstraint {
ResolutionNode* ResolutionNodePointer() const { return node_; }
void ChangeResolutionNode(ResolutionNode* node) { node_ = node; }
// API to mark a constraint for deletion before actually deleting it.
void MarkForDeletion() { is_marked_for_deletion_ = true; }
bool is_marked_for_deletion() const { return is_marked_for_deletion_; }
// Only learned constraint are considered for deletion during the constraint
// cleanup phase. We also can't delete variables used as a reason.
void set_is_learned(bool is_learned) { is_learned_ = is_learned; }
bool is_learned() const { return is_learned_; }
bool is_used_as_a_reason() const { return first_reason_trail_index_ != -1; }
// Activity of the constraint. Only low activity constraint will be deleted
// during the constraint cleanup phase.
void set_activity(double activity) { activity_ = activity; }
double activity() const { return activity_; }
private:
Coefficient GetSlackFromThreshold(Coefficient threshold) {
return (index_ < 0) ? threshold : coeffs_[index_] + threshold;
@@ -357,9 +425,16 @@ class UpperBoundedLinearConstraint {
already_propagated_end_ = starts_[index_ + 1];
}
// Constraint management fields.
// TODO(user): Rearrange and specify bit size to minimize memory usage.
bool is_marked_for_deletion_;
bool is_learned_;
int first_reason_trail_index_;
double activity_;
// Constraint propagation fields.
int index_;
int already_propagated_end_;
Coefficient rhs_;
// In the internal representation, we merge the terms with the same
// coefficient.
@@ -371,7 +446,9 @@ class UpperBoundedLinearConstraint {
std::vector<Coefficient> coeffs_;
std::vector<int> starts_;
std::vector<Literal> literals_;
Coefficient rhs_;
// This is only used for UNSAT core computation.
ResolutionNode* node_;
};
@@ -383,6 +460,8 @@ class PbConstraints {
: trail_(trail),
propagation_trail_index_(0),
conflicting_constraint_index_(-1),
num_learned_constraint_before_cleanup_(0),
constraint_activity_increment_(1.0),
stats_("PbConstraints"),
num_constraint_lookups_(0),
num_threshold_updates_(0) {}
@@ -397,6 +476,11 @@ class PbConstraints {
// Changes the number of variables.
void Resize(int num_variables) { to_update_.resize(num_variables << 1); }
// Parameters management.
void SetParameters(const SatParameters& parameters) {
parameters_ = parameters;
}
// Adds a constraint in canonical form to the set of managed constraints.
//
// There are some preconditions, and the function will return false if they
@@ -409,6 +493,13 @@ class PbConstraints {
// same as the one we are trying to add.
bool AddConstraint(const std::vector<LiteralWithCoeff>& cst, Coefficient rhs,
ResolutionNode* node);
// Same as AddConstraint(), but also marks the added constraint as learned
// so that it can be deleted during the constraint cleanup phase.
bool AddLearnedConstraint(const std::vector<LiteralWithCoeff>& cst,
Coefficient rhs, ResolutionNode* node);
// Returns the number of constraints managed by this class.
int NumberOfConstraints() const { return constraints_.size(); }
// If some literals enqueued on the trail haven't been processed by this class
@@ -444,14 +535,42 @@ class PbConstraints {
void ClearConflictingConstraint() { conflicting_constraint_index_ = -1; }
UpperBoundedLinearConstraint* ConflictingConstraint() {
if (conflicting_constraint_index_ == -1) return nullptr;
return &(constraints_[conflicting_constraint_index_.value()]);
return constraints_[conflicting_constraint_index_.value()].get();
}
// Activity update functions.
// TODO(user): Remove duplication with other activity update functions.
void BumpActivity(UpperBoundedLinearConstraint* constraint);
void RescaleActivities(double scaling_factor);
void UpdateActivityIncrement();
// Only used for testing.
void DeleteConstraint(int index) {
constraints_[index]->MarkForDeletion();
DeleteConstraintMarkedForDeletion();
}
private:
// Same function as the clause related one is SatSolver().
// TODO(user): Remove duplication.
void ComputeNewLearnedConstraintLimit();
void DeleteSomeLearnedConstraintIfNeeded();
// Deletes all the UpperBoundedLinearConstraint for which
// is_marked_for_deletion() is true. This is relatively slow in O(number of
// terms in all constraints).
void DeleteConstraintMarkedForDeletion();
// Each constraint managed by this class is associated with an index.
// The set of indices is always [0, num_constraints_).
//
// Note(user): this complicate things during deletion, but the propagation is
// about two times faster with this implementation than one with direct
// pointer to an UpperBoundedLinearConstraint. The main reason for this is
// probably that the thresholds_ vector is a lot more efficient cache-wise.
DEFINE_INT_TYPE(ConstraintIndex, int32);
struct ConstraintIndexWithCoeff {
ConstraintIndexWithCoeff() {} // Needed for vector.resize()
ConstraintIndexWithCoeff(bool n, ConstraintIndex i, Coefficient c)
: need_untrail_inspection(n), index(i), coefficient(c) {}
bool need_untrail_inspection;
@@ -470,9 +589,8 @@ class PbConstraints {
// Temporary vector to hold the last conflict of a pseudo-Boolean propagation.
mutable std::vector<Literal> conflict_scratchpad_;
// We use a dequeue to store the pseudo-Boolean constraint because we want
// pointers to its elements to be still valid after more push_back().
std::deque<UpperBoundedLinearConstraint> constraints_;
// The set of all pseudo-boolean constraint managed by this class.
std::vector<std::unique_ptr<UpperBoundedLinearConstraint>> constraints_;
// The current value of the threshold for each constraints.
ITIVector<ConstraintIndex, Coefficient> thresholds_;
@@ -488,6 +606,14 @@ class PbConstraints {
// ClearConflictingConstraint() is called.
ConstraintIndex conflicting_constraint_index_;
// Used for the constraint cleaning policy.
int target_number_of_learned_constraint_;
int num_learned_constraint_before_cleanup_;
double constraint_activity_increment_;
// Algorithm parameters.
SatParameters parameters_;
// Some statistics.
mutable StatsGroup stats_;
int64 num_constraint_lookups_;

View File

@@ -385,6 +385,16 @@ class Trail {
info_[var].resolution_node = node;
}
// Print the current literals on the trail.
std::string DebugString() {
std::string result;
for (int i = 0; i < trail_index_; ++i) {
if (!result.empty()) result += " ";
result += trail_[i].DebugString();
}
return result;
}
private:
int64 num_enqueues_;
int trail_index_;

View File

@@ -96,6 +96,10 @@ message SatParameters {
// Deletes this ratio of clauses during each cleanup.
optional double clause_cleanup_ratio = 13 [default = 0.5];
// Same as for the clauses, but for the learned pseudo-Boolean constraints.
optional double pb_cleanup_increment = 46 [default = 200];
optional double pb_cleanup_ratio = 47 [default = 0.5];
// Variable activity parameters.
//
// Each time a conflict is found, the activities of some variables are
@@ -196,10 +200,26 @@ message SatParameters {
// down the solver a bit.
optional bool unsat_proof = 42 [default = false];
// Highly experimental!
// Whether to use pseudo-Boolean resolution to analyze a conflict. Note that
// this option only make sense if your problem is modelized using
// pseudo-Boolean constraints. If you only have clauses, this shouldn't change
// anything (except slow the solver down).
optional bool use_pb_resolution = 43 [default = false];
// A different algorithm during PB resolution. It minimizes the number of
// calls to ReduceCoefficients() which can be time consuming. However, the
// search space will be different and if the coefficients are large, this may
// lead to integer overflows that could otherwise be prevented.
optional bool minimize_reduction_during_pb_resolution = 48 [default = false];
// Whether or not the assumption levels are taken into account during the LBD
// computation. According to the reference below, not counting them improves
// the solver in some situation. Note that this only impact solves under
// assumptions.
//
// Gilles Audemard, Jean-Marie Lagniez, Laurent Simon, "Improving Glucose for
// Incremental SAT Solving with Assumptions: Application to MUS Extraction"
// Theory and Applications of Satisfiability Testing SAT 2013, Lecture Notes
// in Computer Science Volume 7962, 2013, pp 309-317.
optional bool count_assumption_levels_in_lbd = 49 [default = true];
}

View File

@@ -127,6 +127,7 @@ void SatSolver::SetParameters(const SatParameters& parameters) {
SCOPED_TIME_STAT(&stats_);
parameters_ = parameters;
watched_clauses_.SetParameters(parameters);
pb_constraints_.SetParameters(parameters);
trail_.SetNeedFixedLiteralsInReason(parameters.unsat_proof());
random_.Reset(parameters_.random_seed());
InitRestart();
@@ -165,6 +166,29 @@ bool SatSolver::AddUnitClause(Literal true_literal) {
return true;
}
bool SatSolver::AddBinaryClause(Literal a, Literal b) {
SCOPED_TIME_STAT(&stats_);
tmp_pb_constraint_.clear();
tmp_pb_constraint_.push_back(LiteralWithCoeff(a, 1));
tmp_pb_constraint_.push_back(LiteralWithCoeff(b, 1));
return AddLinearConstraint(
/*use_lower_bound=*/true, /*lower_bound=*/Coefficient(1),
/*use_upper_bound=*/false, /*upper_bound=*/Coefficient(0),
&tmp_pb_constraint_);
}
bool SatSolver::AddTernaryClause(Literal a, Literal b, Literal c) {
SCOPED_TIME_STAT(&stats_);
tmp_pb_constraint_.clear();
tmp_pb_constraint_.push_back(LiteralWithCoeff(a, 1));
tmp_pb_constraint_.push_back(LiteralWithCoeff(b, 1));
tmp_pb_constraint_.push_back(LiteralWithCoeff(c, 1));
return AddLinearConstraint(
/*use_lower_bound=*/true, /*lower_bound=*/Coefficient(1),
/*use_upper_bound=*/false, /*upper_bound=*/Coefficient(0),
&tmp_pb_constraint_);
}
bool SatSolver::AddProblemClause(const std::vector<Literal>& literals) {
SCOPED_TIME_STAT(&stats_);
@@ -176,8 +200,8 @@ bool SatSolver::AddProblemClause(const std::vector<Literal>& literals) {
tmp_pb_constraint_.push_back(LiteralWithCoeff(lit, 1));
}
return AddLinearConstraint(
/*has_lower_bound=*/true, /*lower_bound=*/Coefficient(1),
/*has_lower_bound=*/false, /*upper_bound=*/Coefficient(0),
/*use_lower_bound=*/true, /*lower_bound=*/Coefficient(1),
/*use_upper_bound=*/false, /*upper_bound=*/Coefficient(0),
&tmp_pb_constraint_);
}
@@ -346,6 +370,59 @@ void SatSolver::AddLearnedClauseAndEnqueueUnitPropagation(
}
}
namespace {
// Returns the UpperBoundedLinearConstraint used as a reason if var was
// propagated by such constraint, or nullptr otherwise.
UpperBoundedLinearConstraint* PBReasonOrNull(VariableIndex var,
const Trail& trail) {
const AssignmentInfo& info = trail.Info(var);
if (trail.InitialAssignmentType(var) == AssignmentInfo::PB_PROPAGATION) {
return info.pb_constraint;
}
if (trail.InitialAssignmentType(var) == AssignmentInfo::SAME_REASON_AS &&
trail.InitialAssignmentType(info.reference_var) ==
AssignmentInfo::PB_PROPAGATION) {
const AssignmentInfo& ref_info = trail.Info(info.reference_var);
return ref_info.pb_constraint;
}
return nullptr;
}
} // namespace
void SatSolver::SaveDebugAssignment() {
debug_assignment_.Resize(num_variables_.value());
for (VariableIndex i(0); i < num_variables_; ++i) {
debug_assignment_.AssignFromTrueLiteral(
trail_.Assignment().GetTrueLiteralForAssignedVariable(i));
}
}
bool SatSolver::ClauseIsValidUnderDebugAssignement(
const std::vector<Literal>& clause) const {
for (Literal l : clause) {
if (l.Variable() >= debug_assignment_.NumberOfVariables() ||
debug_assignment_.IsLiteralTrue(l)) {
return true;
}
}
return false;
}
bool SatSolver::PBConstraintIsValidUnderDebugAssignment(
const std::vector<LiteralWithCoeff>& cst, const Coefficient rhs) const {
Coefficient sum(0.0);
for (LiteralWithCoeff term : cst) {
if (term.literal.Variable() >= debug_assignment_.NumberOfVariables())
continue;
if (debug_assignment_.IsLiteralTrue(term.literal)) {
sum += term.coefficient;
}
}
return sum <= rhs;
}
int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
SCOPED_TIME_STAT(&stats_);
CHECK_EQ(propagation_trail_index_, trail_.Index());
@@ -387,6 +464,7 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
return kUnsatTrailIndex;
}
DCHECK(IsConflictValid(learned_conflict_));
DCHECK(ClauseIsValidUnderDebugAssignement(learned_conflict_));
// Update the activity of all the variables in the first UIP clause.
// Also update the activity of the last level variables expanded (and
@@ -410,6 +488,7 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
// Decay the activities.
UpdateVariableActivityIncrement();
UpdateClauseActivityIncrement();
pb_constraints_.UpdateActivityIncrement();
// Decrement the restart counter if needed.
if (conflicts_until_next_restart_ > 0) {
@@ -427,10 +506,27 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
}
// PB resolution.
//
// TODO(user): Note that we use the clause above to update the activites.
// There is no point using this if the conflict and all the reasons involved
// in its resolution where clauses.
bool compute_pb_conflict = false;
if (parameters_.use_pb_resolution()) {
compute_pb_conflict =
(pb_constraints_.ConflictingConstraint() != nullptr);
if (!compute_pb_conflict) {
for (Literal lit : reason_used_to_infer_the_conflict_) {
if (PBReasonOrNull(lit.Variable(), trail_) != nullptr) {
compute_pb_conflict = true;
break;
}
}
}
}
// TODO(user): Note that we use the clause above to update the variable
// activites and not the pb conflict. Experiment.
if (compute_pb_conflict) {
pb_conflict_.ClearAll();
Coefficient initial_slack(-1);
if (pb_constraints_.ConflictingConstraint() == nullptr) {
// Generic clause case.
Coefficient num_literals(0);
@@ -443,10 +539,13 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
// We have a pseudo-Boolean conflict, so we start from there.
pb_constraints_.ConflictingConstraint()->AddToConflict(&pb_conflict_);
pb_constraints_.ClearConflictingConstraint();
initial_slack = pb_conflict_.ComputeSlackForTrailPrefix(
trail_, max_trail_index + 1);
}
int pb_backjump_level;
ComputePBConflict(max_trail_index, &pb_conflict_, &pb_backjump_level);
ComputePBConflict(max_trail_index, initial_slack, &pb_conflict_,
&pb_backjump_level);
if (pb_backjump_level == -1) {
is_model_unsat_ = true;
return kUnsatTrailIndex;
@@ -455,6 +554,7 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
// Convert the conflict into the std::vector<LiteralWithCoeff> form.
std::vector<LiteralWithCoeff> cst;
pb_conflict_.CopyIntoVector(&cst);
DCHECK(PBConstraintIsValidUnderDebugAssignment(cst, pb_conflict_.Rhs()));
// Check if the learned PB conflict is just a clause:
// all its coefficient must be 1, and the rhs must be its size minus 1.
@@ -473,7 +573,8 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
CHECK_LT(pb_backjump_level, CurrentDecisionLevel());
Backtrack(pb_backjump_level);
first_propagation_index = trail_.Index();
CHECK(pb_constraints_.AddConstraint(cst, pb_conflict_.Rhs(), nullptr));
CHECK(pb_constraints_.AddLearnedConstraint(cst, pb_conflict_.Rhs(),
nullptr));
CHECK_GT(trail_.Index(), first_propagation_index);
counters_.num_learned_pb_literals_ += cst.size();
continue;
@@ -483,9 +584,11 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
// if it has a lower backjump level.
if (pb_backjump_level < ComputeBacktrackLevel(learned_conflict_)) {
learned_conflict_.clear();
is_marked_.ClearAndResize(num_variables_);
int max_level = 0;
int max_index = 0;
for (LiteralWithCoeff term : cst) {
DCHECK(Assignment().IsLiteralTrue(term.literal));
DCHECK_EQ(term.coefficient, 1);
const int level = trail_.Info(term.literal.Variable()).level;
if (level == 0) continue;
@@ -494,6 +597,10 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
max_index = learned_conflict_.size();
}
learned_conflict_.push_back(term.literal.Negated());
// The minimization functions below expect the conflict to be marked!
// TODO(user): This is error prone, find a better way?
is_marked_.Set(term.literal.Variable());
}
CHECK(!learned_conflict_.empty());
std::swap(learned_conflict_.front(), learned_conflict_[max_index]);
@@ -506,6 +613,7 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
// this way. Second, more variables may be marked (in is_marked_) and
// MinimizeConflict() can take advantage of that. Because of this, the
// LBD of the learned conflict can change.
DCHECK(ClauseIsValidUnderDebugAssignement(learned_conflict_));
if (binary_implication_graph_.NumberOfImplications() != 0 &&
parameters_.binary_minimization_algorithm() ==
SatParameters::BINARY_MINIMIZATION_FIRST) {
@@ -552,6 +660,8 @@ int SatSolver::EnqueueDecisionAndBackjumpOnConflict(Literal true_literal) {
counters_.num_literals_learned += learned_conflict_.size();
Backtrack(ComputeBacktrackLevel(learned_conflict_));
first_propagation_index = trail_.Index();
DCHECK(ClauseIsValidUnderDebugAssignement(learned_conflict_));
AddLearnedClauseAndEnqueueUnitPropagation(learned_conflict_, node);
}
return first_propagation_index;
@@ -562,6 +672,7 @@ SatSolver::Status SatSolver::ReapplyDecisionsUpTo(
SCOPED_TIME_STAT(&stats_);
int decision_index = current_decision_level_;
while (decision_index <= max_level) {
DCHECK_GE(decision_index, current_decision_level_);
const Literal previous_decision = decisions_[decision_index].literal;
++decision_index;
if (Assignment().IsLiteralTrue(previous_decision)) continue;
@@ -691,6 +802,21 @@ SatSolver::Status SatSolver::StatusWithLog(Status status) {
return status;
}
namespace {
// A simple class to reset the given int to 0 uppon destruction.
class ScopedIntReset {
public:
explicit ScopedIntReset(int* p) : p_(p) {}
~ScopedIntReset() { *p_ = 0; }
private:
int* p_;
DISALLOW_COPY_AND_ASSIGN(ScopedIntReset);
};
} // namespace
SatSolver::Status SatSolver::SolveInternal(int initial_assumption_level) {
SCOPED_TIME_STAT(&stats_);
if (is_model_unsat_) return MODEL_UNSAT;
@@ -733,7 +859,12 @@ SatSolver::Status SatSolver::SolveInternal(int initial_assumption_level) {
// Note that this can change if assumptions are (or become) consequences of
// the ones before them.
int assumption_level = initial_assumption_level;
assumption_level_ = initial_assumption_level;
// We always want the assumption_level_ to be 0 when the Solve() is done.
// This is because we don't want to mess-up the ComputeLbd() for users
// that directly use the EnqueueDecision*() functions.
ScopedIntReset resetter(&assumption_level_);
// Starts search.
for (;;) {
@@ -774,11 +905,12 @@ SatSolver::Status SatSolver::SolveInternal(int initial_assumption_level) {
}
// We need to reapply any assumptions that are not currently applied.
if (CurrentDecisionLevel() < assumption_level) {
if (CurrentDecisionLevel() < assumption_level_) {
int unused = 0;
const Status status = ReapplyDecisionsUpTo(assumption_level - 1, &unused);
const Status status =
ReapplyDecisionsUpTo(assumption_level_ - 1, &unused);
if (status != MODEL_SAT) return StatusWithLog(status);
assumption_level = CurrentDecisionLevel();
assumption_level_ = CurrentDecisionLevel();
}
// At a leaf?
@@ -791,10 +923,11 @@ SatSolver::Status SatSolver::SolveInternal(int initial_assumption_level) {
restart_count_++;
conflicts_until_next_restart_ =
parameters_.restart_period() * SUniv(restart_count_ + 1);
Backtrack(assumption_level);
Backtrack(assumption_level_);
}
// Choose the next decision variable.
DCHECK_GE(CurrentDecisionLevel(), assumption_level_);
const Literal next_branch = NextBranch();
if (EnqueueDecisionAndBackjumpOnConflict(next_branch) == -1) {
return StatusWithLog(MODEL_UNSAT);
@@ -869,9 +1002,15 @@ void SatSolver::BumpReasonActivities(const std::vector<Literal>& literals) {
SCOPED_TIME_STAT(&stats_);
for (const Literal literal : literals) {
const VariableIndex var = literal.Variable();
if (DecisionLevel(var) > 0 &&
trail_.Info(var).type == AssignmentInfo::CLAUSE_PROPAGATION) {
BumpClauseActivity(trail_.Info(var).sat_clause);
if (DecisionLevel(var) > 0) {
if (trail_.Info(var).type == AssignmentInfo::CLAUSE_PROPAGATION) {
BumpClauseActivity(trail_.Info(var).sat_clause);
} else if (trail_.InitialAssignmentType(var) ==
AssignmentInfo::PB_PROPAGATION) {
// TODO(user): Because one pb constraint may propagate many literals,
// this may bias the constraint activity... investigate other policy.
pb_constraints_.BumpActivity(trail_.Info(var).pb_constraint);
}
}
}
}
@@ -961,6 +1100,9 @@ int SatSolver::ComputeBacktrackLevel(const std::vector<Literal>& literals) {
template <typename LiteralList>
int SatSolver::ComputeLbd(const LiteralList& conflict) {
SCOPED_TIME_STAT(&stats_);
const int limit =
parameters_.count_assumption_levels_in_lbd() ? 0 : assumption_level_;
// We know that the first literal of the conflict is always of the highest
// level.
is_level_marked_.ClearAndResize(
@@ -968,7 +1110,7 @@ int SatSolver::ComputeLbd(const LiteralList& conflict) {
for (const Literal literal : conflict) {
const SatDecisionLevel level(DecisionLevel(literal.Variable()));
DCHECK_GE(level, 0);
if (level > 0 && !is_level_marked_[level]) {
if (level > limit && !is_level_marked_[level]) {
is_level_marked_.Set(level);
}
}
@@ -1221,21 +1363,20 @@ ClauseRef SatSolver::Reason(VariableIndex var) {
}
}
void SatSolver::ResolvePBConflict(
VariableIndex var, MutableUpperBoundedLinearConstraint* conflict) {
bool SatSolver::ResolvePBConflict(VariableIndex var,
MutableUpperBoundedLinearConstraint* conflict,
Coefficient* slack) {
const AssignmentInfo& info = trail_.Info(var);
// Special pseudo-Boolean cases.
if (trail_.InitialAssignmentType(var) == AssignmentInfo::PB_PROPAGATION) {
info.pb_constraint->ResolvePBConflict(trail_, var, conflict);
return;
}
if (trail_.InitialAssignmentType(var) == AssignmentInfo::SAME_REASON_AS &&
trail_.InitialAssignmentType(info.reference_var) ==
AssignmentInfo::PB_PROPAGATION) {
const AssignmentInfo& ref_info = trail_.Info(info.reference_var);
ref_info.pb_constraint->ResolvePBConflict(trail_, var, conflict);
return;
// This is the slack of the conflict < info.trail_index
DCHECK_EQ(*slack,
conflict->ComputeSlackForTrailPrefix(trail_, info.trail_index));
// Pseudo-Boolean case.
UpperBoundedLinearConstraint* pb_reason = PBReasonOrNull(var, trail_);
if (pb_reason != nullptr) {
pb_reason->ResolvePBConflict(trail_, var, conflict, slack);
return false;
}
// Generic clause case.
@@ -1247,12 +1388,11 @@ void SatSolver::ResolvePBConflict(
case 1:
// We reduce the conflict slack to 0 before adding the clause.
// The advantage of this method is that the coefficients stay small.
conflict->ReduceSlackTo(trail_, info.trail_index, Coefficient(0));
conflict->ReduceSlackTo(trail_, info.trail_index, *slack, Coefficient(0));
break;
case 2:
// No reduction, we add the lower possible multiple.
multiplier =
conflict->ComputeSlackForTrailPrefix(trail_, info.trail_index) + 1;
multiplier = *slack + 1;
break;
default:
// No reduction, the multiple is chosen to cancel var.
@@ -1264,14 +1404,23 @@ void SatSolver::ResolvePBConflict(
trail_.Assignment().GetTrueLiteralForAssignedVariable(var).Negated(),
multiplier);
for (Literal literal : Reason(var)) {
DCHECK_NE(literal.Variable(), var);
DCHECK(Assignment().IsLiteralFalse(literal));
conflict->AddTerm(literal.Negated(), multiplier);
++num_literals;
}
conflict->AddToRhs((num_literals - 1) * multiplier);
// All the algorithms above result in a new slack of -1.
*slack = -1;
DCHECK_EQ(*slack,
conflict->ComputeSlackForTrailPrefix(trail_, info.trail_index));
return true;
}
void SatSolver::NewDecision(Literal literal) {
SCOPED_TIME_STAT(&stats_);
CHECK(!Assignment().IsVariableAssigned(literal.Variable()));
counters_.num_branches++;
decisions_[current_decision_level_] = Decision(trail_.Index(), literal);
++current_decision_level_;
@@ -1673,6 +1822,7 @@ void SatSolver::ComputeFirstUIPConflict(
// TODO(user): Remove the literals assigned at level 0.
void SatSolver::ComputePBConflict(int max_trail_index,
Coefficient initial_slack,
MutableUpperBoundedLinearConstraint* conflict,
int* pb_backjump_level) {
SCOPED_TIME_STAT(&stats_);
@@ -1680,8 +1830,9 @@ void SatSolver::ComputePBConflict(int max_trail_index,
// First compute the slack of the current conflict for the assignment up to
// trail_index. It must be negative since this is a conflict.
Coefficient slack =
conflict->ComputeSlackForTrailPrefix(trail_, trail_index + 1);
Coefficient slack = initial_slack;
DCHECK_EQ(slack,
conflict->ComputeSlackForTrailPrefix(trail_, trail_index + 1));
CHECK_LT(slack, 0) << "We don't have a conflict!";
// Iterate backward over the trail.
@@ -1692,13 +1843,21 @@ void SatSolver::ComputePBConflict(int max_trail_index,
if (conflict->GetCoefficient(var) > 0 &&
trail_.Assignment().IsLiteralTrue(conflict->GetLiteral(var))) {
// This can't happen at the beginning, but may happen later.
if (conflict->GetCoefficient(var) + slack < 0) {
// Even without var assigned, we still have a conflict.
slack += conflict->GetCoefficient(var);
continue;
if (parameters_.minimize_reduction_during_pb_resolution()) {
// When this parameter is true, we don't call ReduceCoefficients() at
// every loop. However, it is still important to reduce the "current"
// variable coefficient, because this can impact the value of the new
// slack below.
conflict->ReduceGivenCoefficient(var);
}
// This is the slack one level before (< Info(var).trail_index).
slack += conflict->GetCoefficient(var);
// This can't happen at the beginning, but may happen later.
// It means that even without var assigned, we still have a conflict.
if (slack < 0) continue;
// At this point, just removing the last assignment lift the conflict.
// So we can abort if the true assignment before that is at a lower level
// TODO(user): Somewhat inefficient.
@@ -1720,18 +1879,39 @@ void SatSolver::ComputePBConflict(int max_trail_index,
}
// We can't abort, So resolve the current variable.
CHECK_NE(trail_.Info(var).type, AssignmentInfo::SEARCH_DECISION);
ResolvePBConflict(var, conflict);
// Note(user): this seems actually quite important, since it may
// impact the ComputeSlackForTrailPrefix() because it lower the rhs...
conflict->ReduceCoefficients();
DCHECK_NE(trail_.Info(var).type, AssignmentInfo::SEARCH_DECISION);
const bool clause_used = ResolvePBConflict(var, conflict, &slack);
// At this point, we have a negative slack. Note that ReduceCoefficients()
// will not change it. However it may change the slack value of the next
// iteration (when we will no longer take into account the true literal
// with highest trail index).
//
// Note that the trail_index has already been decremented, it is why
// we need the +1 here.
slack = conflict->ComputeSlackForTrailPrefix(trail_, trail_index + 1);
CHECK_LT(slack, 0);
// we need the +1 in the slack computation.
const Coefficient slack_only_for_debug =
DEBUG_MODE
? conflict->ComputeSlackForTrailPrefix(trail_, trail_index + 1)
: Coefficient(0);
if (clause_used) {
// If a clause was used, we know that slack has the correct value.
if (!parameters_.minimize_reduction_during_pb_resolution()) {
conflict->ReduceCoefficients();
}
} else {
// TODO(user): The function below can take most of the running time on
// some instances. The goal is to have slack updated to its new value
// incrementally, but we are not here yet.
if (parameters_.minimize_reduction_during_pb_resolution()) {
slack = conflict->ComputeSlackForTrailPrefix(trail_, trail_index + 1);
} else {
slack = conflict->ReduceCoefficientsAndComputeSlackForTrailPrefix(
trail_, trail_index + 1);
}
}
DCHECK_EQ(slack, slack_only_for_debug);
CHECK_LT(slack, 0);
if (conflict->Rhs() < 0) {
*pb_backjump_level = -1;
return;
@@ -1739,6 +1919,12 @@ void SatSolver::ComputePBConflict(int max_trail_index,
}
}
// Reduce the conflit coefficients if it is not already done.
// This is important to avoid integer overflow.
if (!parameters_.minimize_reduction_during_pb_resolution()) {
conflict->ReduceCoefficients();
}
// Double check.
// The sum of the literal with level <= backjump_level must propagate.
std::vector<Coefficient> sum_for_le_level(backjump_level + 2, Coefficient(0));

View File

@@ -76,8 +76,17 @@ class SatSolver {
// Returns false if the problem is detected to be UNSAT.
bool AddUnitClause(Literal true_literal);
// Same as AddProblemClause() below, but for small clauses.
//
// TODO(user): Remove this and AddUnitClause() when initializer lists can be
// used in the open-source code like in AddClause({a, b}).
bool AddBinaryClause(Literal a, Literal b);
bool AddTernaryClause(Literal a, Literal b, Literal c);
// Adds a clause to the problem. Returns false if the problem is detected to
// be UNSAT.
//
// TODO(user): Rename this to AddClause().
bool AddProblemClause(const std::vector<Literal>& literals);
// Adds a pseudo-Boolean constraint to the problem. Returns false if the
@@ -232,6 +241,9 @@ class SatSolver {
// In any case, the new decisions stack will be the largest valid "prefix"
// of the old stack. Note that decisions that are now consequence of the ones
// before them will no longer be decisions.
//
// Note(user): This function can be called with an already assigned literal,
// in which case, it will just do nothing.
int EnqueueDecisionAndBacktrackOnConflict(Literal true_literal);
// Tries to enqueue the given decision and performs the propagation.
@@ -264,7 +276,21 @@ class SatSolver {
int64 num_failures() const;
int64 num_propagations() const;
// Only used for debugging. Save the current assignment in debug_assignment_.
// The idea is that if we know that a given assignment is satisfiable, then
// all the learned clauses or PB constraints must be satisfiable by it. In
// debug mode, and after this is called, all the learned clauses are tested to
// satisfy this saved assignement.
void SaveDebugAssignment();
private:
// See SaveDebugAssignment(). Note that these functions only consider the
// variables at the time the debug_assignment_ was saved. If new variables
// where added since that time, they will be considered unassigned.
bool ClauseIsValidUnderDebugAssignement(const std::vector<Literal>& clause) const;
bool PBConstraintIsValidUnderDebugAssignment(
const std::vector<LiteralWithCoeff>& cst, const Coefficient rhs) const;
// Logs the given status if parameters_.log_search_progress() is true.
// Also returns it.
Status StatusWithLog(Status status);
@@ -322,8 +348,12 @@ class SatSolver {
// - The conflict propagates it to not(l)
// The goal of the operation is to combine the two constraints in order to
// have a new conflict at a lower trail_index.
void ResolvePBConflict(VariableIndex var,
MutableUpperBoundedLinearConstraint* conflict);
//
// Returns true if the reason for var was a normal clause. In this case,
// the *slack is updated to its new value.
bool ResolvePBConflict(VariableIndex var,
MutableUpperBoundedLinearConstraint* conflict,
Coefficient* slack);
// Returns true if the clause is the reason for an assigned variable or was
// the reason the last time a variable was assigned.
@@ -426,7 +456,7 @@ class SatSolver {
// time ResolvePBConflict() on the current conflict until we have a conflict
// that allow us to propagate more at a lower decision level. This level
// is the one returned in backjump_level.
void ComputePBConflict(int max_trail_index,
void ComputePBConflict(int max_trail_index, Coefficient initial_slack,
MutableUpperBoundedLinearConstraint* conflict,
int* backjump_level);
@@ -549,6 +579,9 @@ class SatSolver {
// The solver trail.
Trail trail_;
// Used for debugging only. See SaveDebugAssignment().
VariablesAssignment debug_assignment_;
// The stack of decisions taken by the solver. They are stored in [0,
// current_decision_level_). The vector is of size num_variables_ so it can
// store all the decisions. This is done this way because in some situation we
@@ -556,6 +589,9 @@ class SatSolver {
int current_decision_level_;
std::vector<Decision> decisions_;
// The assumption level. See SolveWithAssumptions().
int assumption_level_;
// The index of the first non-propagated literal on the trail. The first index
// is for non-binary clauses propagation and the second index is for binary
// clauses propagation.