improve cp-sat internals
This commit is contained in:
@@ -95,6 +95,18 @@ bool InsertOrUpdate(Collection* const collection, const Key& key,
|
||||
return true;
|
||||
}
|
||||
|
||||
// Insert a new key into a set.
|
||||
// If the key is not present in the set the key is
|
||||
// inserted, otherwise nothing happens. True indicates that an insert
|
||||
// took place, false indicates the key was already present.
|
||||
template <class Collection>
|
||||
bool InsertIfNotPresent(Collection* const collection,
|
||||
const typename Collection::value_type& value) {
|
||||
std::pair<typename Collection::iterator, bool> ret =
|
||||
collection->insert(value);
|
||||
return ret.second;
|
||||
}
|
||||
|
||||
// Insert a new key and value into a map or std::unordered_map.
|
||||
// If the key is not present in the map the key and value are
|
||||
// inserted, otherwise nothing happens. True indicates that an insert
|
||||
@@ -107,6 +119,15 @@ bool InsertIfNotPresent(Collection* const collection, const Key& key,
|
||||
return ret.second;
|
||||
}
|
||||
|
||||
// Inserts a new std::pair<key,value> into a map or std::unordered_map.
|
||||
// Insert a new key into a set or std::unordered_set.
|
||||
// Dies if the key is already present.
|
||||
template <class Collection>
|
||||
void InsertOrDieNoPrint(Collection* const collection,
|
||||
const typename Collection::value_type& value) {
|
||||
CHECK(collection->insert(value).second);
|
||||
}
|
||||
|
||||
// Inserts a new std::pair<key,value> into a map or std::unordered_map.
|
||||
// Insert a new key into a set or std::unordered_set.
|
||||
// Dies if the key is already present.
|
||||
|
||||
@@ -905,14 +905,13 @@ void BinaryImplicationGraph::TransformIntoMaxCliques(
|
||||
clique = ExpandAtMostOne(clique);
|
||||
}
|
||||
std::sort(clique.begin(), clique.end());
|
||||
if (max_cliques.count(clique)) {
|
||||
if (!gtl::InsertIfNotPresent(&max_cliques, clique)) {
|
||||
++num_removed;
|
||||
clique.clear();
|
||||
continue;
|
||||
}
|
||||
|
||||
const int clique_index = max_cliques.size();
|
||||
max_cliques.insert(clique);
|
||||
for (const Literal l : clique) {
|
||||
max_cliques_containing[l.Index()].push_back(clique_index);
|
||||
}
|
||||
|
||||
@@ -86,8 +86,7 @@ std::string ValidateIntegerVariable(const CpModelProto& model, int v) {
|
||||
std::string ValidateArgumentReferencesInConstraint(const CpModelProto& model,
|
||||
int c) {
|
||||
const ConstraintProto& ct = model.constraints(c);
|
||||
IndexReferences references;
|
||||
AddReferencesUsedByConstraint(ct, &references);
|
||||
IndexReferences references = GetReferencesUsedByConstraint(ct);
|
||||
for (const int v : references.variables) {
|
||||
if (!VariableReferenceIsValid(model, v)) {
|
||||
return absl::StrCat("Out of bound integer variable ", v,
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
|
||||
#include <numeric>
|
||||
|
||||
#include "ortools/sat/cp_model.pb.h"
|
||||
#include "ortools/sat/cp_model_utils.h"
|
||||
#include "ortools/util/random_engine.h"
|
||||
|
||||
@@ -85,31 +86,38 @@ bool NeighborhoodGeneratorHelper::IsConstant(int var) const {
|
||||
model_proto_.variables(var).domain(1);
|
||||
}
|
||||
|
||||
CpModelProto NeighborhoodGeneratorHelper::FixGivenVariables(
|
||||
Neighborhood NeighborhoodGeneratorHelper::FixGivenVariables(
|
||||
const CpSolverResponse& initial_solution,
|
||||
const std::vector<int>& variables_to_fix) const {
|
||||
CpModelProto result = model_proto_;
|
||||
CHECK_EQ(initial_solution.solution_size(), result.variables_size());
|
||||
Neighborhood neighborhood;
|
||||
neighborhood.is_reduced = !variables_to_fix.empty();
|
||||
neighborhood.cp_model = model_proto_;
|
||||
if (!neighborhood.is_reduced) return neighborhood;
|
||||
CHECK_EQ(initial_solution.solution_size(),
|
||||
neighborhood.cp_model.variables_size());
|
||||
for (const int var : variables_to_fix) {
|
||||
result.mutable_variables(var)->clear_domain();
|
||||
result.mutable_variables(var)->add_domain(initial_solution.solution(var));
|
||||
result.mutable_variables(var)->add_domain(initial_solution.solution(var));
|
||||
neighborhood.cp_model.mutable_variables(var)->clear_domain();
|
||||
neighborhood.cp_model.mutable_variables(var)->add_domain(
|
||||
initial_solution.solution(var));
|
||||
neighborhood.cp_model.mutable_variables(var)->add_domain(
|
||||
initial_solution.solution(var));
|
||||
}
|
||||
|
||||
// Set the current solution as a hint.
|
||||
result.clear_solution_hint();
|
||||
for (int var = 0; var < result.variables_size(); ++var) {
|
||||
result.mutable_solution_hint()->add_vars(var);
|
||||
result.mutable_solution_hint()->add_values(initial_solution.solution(var));
|
||||
neighborhood.cp_model.clear_solution_hint();
|
||||
for (int var = 0; var < neighborhood.cp_model.variables_size(); ++var) {
|
||||
neighborhood.cp_model.mutable_solution_hint()->add_vars(var);
|
||||
neighborhood.cp_model.mutable_solution_hint()->add_values(
|
||||
initial_solution.solution(var));
|
||||
}
|
||||
|
||||
// TODO(user): force better objective? Note that this is already done when the
|
||||
// hint above is sucessfully loaded (i.e. if it passes the presolve correctly)
|
||||
// since the solver will try to find better solution than the current one.
|
||||
return result;
|
||||
return neighborhood;
|
||||
}
|
||||
|
||||
CpModelProto NeighborhoodGeneratorHelper::RelaxGivenVariables(
|
||||
Neighborhood NeighborhoodGeneratorHelper::RelaxGivenVariables(
|
||||
const CpSolverResponse& initial_solution,
|
||||
const std::vector<int>& relaxed_variables) const {
|
||||
std::vector<bool> relaxed_variables_set(model_proto_.variables_size(), false);
|
||||
@@ -138,7 +146,7 @@ void GetRandomSubset(int seed, double relative_size, std::vector<int>* base) {
|
||||
|
||||
} // namespace
|
||||
|
||||
CpModelProto SimpleNeighborhoodGenerator::Generate(
|
||||
Neighborhood SimpleNeighborhoodGenerator::Generate(
|
||||
const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const {
|
||||
std::vector<int> fixed_variables = helper_.ActiveVariables();
|
||||
@@ -146,13 +154,18 @@ CpModelProto SimpleNeighborhoodGenerator::Generate(
|
||||
return helper_.FixGivenVariables(initial_solution, fixed_variables);
|
||||
}
|
||||
|
||||
CpModelProto VariableGraphNeighborhoodGenerator::Generate(
|
||||
Neighborhood VariableGraphNeighborhoodGenerator::Generate(
|
||||
const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const {
|
||||
const int num_active_vars = helper_.ActiveVariables().size();
|
||||
const int num_model_vars = helper_.ModelProto().variables_size();
|
||||
const int target_size = std::ceil(difficulty * num_active_vars);
|
||||
if (target_size == num_active_vars) return helper_.ModelProto();
|
||||
if (target_size == num_active_vars) {
|
||||
Neighborhood neighborhood;
|
||||
neighborhood.is_reduced = false;
|
||||
neighborhood.cp_model = helper_.ModelProto();
|
||||
return neighborhood;
|
||||
}
|
||||
CHECK_GT(target_size, 0);
|
||||
|
||||
random_engine_t random;
|
||||
@@ -198,15 +211,19 @@ CpModelProto VariableGraphNeighborhoodGenerator::Generate(
|
||||
return helper_.RelaxGivenVariables(initial_solution, relaxed_variables);
|
||||
}
|
||||
|
||||
CpModelProto ConstraintGraphNeighborhoodGenerator::Generate(
|
||||
Neighborhood ConstraintGraphNeighborhoodGenerator::Generate(
|
||||
const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const {
|
||||
const int num_active_vars = helper_.ActiveVariables().size();
|
||||
const int num_model_vars = helper_.ModelProto().variables_size();
|
||||
const int target_size = std::ceil(difficulty * num_active_vars);
|
||||
const int num_constraints = helper_.ConstraintToVar().size();
|
||||
if (num_constraints == 0) return helper_.ModelProto();
|
||||
if (target_size == num_active_vars) return helper_.ModelProto();
|
||||
if (num_constraints == 0 || target_size == num_active_vars) {
|
||||
Neighborhood neighborhood;
|
||||
neighborhood.is_reduced = false;
|
||||
neighborhood.cp_model = helper_.ModelProto();
|
||||
return neighborhood;
|
||||
}
|
||||
CHECK_GT(target_size, 0);
|
||||
|
||||
random_engine_t random;
|
||||
@@ -260,24 +277,24 @@ CpModelProto ConstraintGraphNeighborhoodGenerator::Generate(
|
||||
return helper_.RelaxGivenVariables(initial_solution, relaxed_variables);
|
||||
}
|
||||
|
||||
CpModelProto SchedulingNeighborhoodGenerator::Generate(
|
||||
const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const {
|
||||
std::set<int> intervals_to_relax;
|
||||
{
|
||||
const auto span = helper_.TypeToConstraints(ConstraintProto::kInterval);
|
||||
std::vector<int> v(span.begin(), span.end());
|
||||
GetRandomSubset(seed, difficulty, &v);
|
||||
intervals_to_relax.insert(v.begin(), v.end());
|
||||
}
|
||||
|
||||
CpModelProto copy = helper_.ModelProto();
|
||||
Neighborhood GenerateSchedulingNeighborhoodForRelaxation(
|
||||
const absl::Span<const int> intervals_to_relax,
|
||||
const CpSolverResponse& initial_solution,
|
||||
const NeighborhoodGeneratorHelper& helper) {
|
||||
Neighborhood neighborhood;
|
||||
neighborhood.is_reduced =
|
||||
(intervals_to_relax.size() <
|
||||
helper.TypeToConstraints(ConstraintProto::kInterval).size());
|
||||
neighborhood.cp_model = helper.ModelProto();
|
||||
// We will extend the set with some interval that we cannot fix.
|
||||
std::set<int> ignored_intervals(intervals_to_relax.begin(),
|
||||
intervals_to_relax.end());
|
||||
|
||||
// Fix the presence/absence of non-relaxed intervals.
|
||||
for (const int i : helper_.TypeToConstraints(ConstraintProto::kInterval)) {
|
||||
if (intervals_to_relax.count(i)) continue;
|
||||
for (const int i : helper.TypeToConstraints(ConstraintProto::kInterval)) {
|
||||
if (ignored_intervals.count(i)) continue;
|
||||
|
||||
const ConstraintProto& interval_ct = copy.constraints(i);
|
||||
const ConstraintProto& interval_ct = neighborhood.cp_model.constraints(i);
|
||||
if (interval_ct.enforcement_literal().empty()) continue;
|
||||
|
||||
CHECK_EQ(interval_ct.enforcement_literal().size(), 1);
|
||||
@@ -286,23 +303,24 @@ CpModelProto SchedulingNeighborhoodGenerator::Generate(
|
||||
const int value = initial_solution.solution(enforcement_var);
|
||||
|
||||
// Fix the value.
|
||||
copy.mutable_variables(enforcement_var)->clear_domain();
|
||||
copy.mutable_variables(enforcement_var)->add_domain(value);
|
||||
copy.mutable_variables(enforcement_var)->add_domain(value);
|
||||
neighborhood.cp_model.mutable_variables(enforcement_var)->clear_domain();
|
||||
neighborhood.cp_model.mutable_variables(enforcement_var)->add_domain(value);
|
||||
neighborhood.cp_model.mutable_variables(enforcement_var)->add_domain(value);
|
||||
|
||||
// If the interval is ignored, skip for the loop below as there is no
|
||||
// point adding precedence on it.
|
||||
if (RefIsPositive(enforcement_ref) == (value == 0)) {
|
||||
intervals_to_relax.insert(i);
|
||||
ignored_intervals.insert(i);
|
||||
}
|
||||
}
|
||||
|
||||
for (const int c : helper_.TypeToConstraints(ConstraintProto::kNoOverlap)) {
|
||||
for (const int c : helper.TypeToConstraints(ConstraintProto::kNoOverlap)) {
|
||||
// Sort all non-relaxed intervals of this constraint by current start time.
|
||||
std::vector<std::pair<int64, int>> start_interval_pairs;
|
||||
for (const int i : copy.constraints(c).no_overlap().intervals()) {
|
||||
if (intervals_to_relax.count(i)) continue;
|
||||
const ConstraintProto& interval_ct = copy.constraints(i);
|
||||
for (const int i :
|
||||
neighborhood.cp_model.constraints(c).no_overlap().intervals()) {
|
||||
if (ignored_intervals.count(i)) continue;
|
||||
const ConstraintProto& interval_ct = neighborhood.cp_model.constraints(i);
|
||||
|
||||
// TODO(user): we ignore size zero for now.
|
||||
const int size_var = interval_ct.interval().size();
|
||||
@@ -317,14 +335,18 @@ CpModelProto SchedulingNeighborhoodGenerator::Generate(
|
||||
// Add precedence between the remaining intervals, forcing their order.
|
||||
for (int i = 0; i + 1 < start_interval_pairs.size(); ++i) {
|
||||
const int before_var =
|
||||
copy.constraints(start_interval_pairs[i].second).interval().end();
|
||||
const int after_var = copy.constraints(start_interval_pairs[i + 1].second)
|
||||
.interval()
|
||||
.start();
|
||||
neighborhood.cp_model.constraints(start_interval_pairs[i].second)
|
||||
.interval()
|
||||
.end();
|
||||
const int after_var =
|
||||
neighborhood.cp_model.constraints(start_interval_pairs[i + 1].second)
|
||||
.interval()
|
||||
.start();
|
||||
CHECK_LE(initial_solution.solution(before_var),
|
||||
initial_solution.solution(after_var));
|
||||
|
||||
LinearConstraintProto* linear = copy.add_constraints()->mutable_linear();
|
||||
LinearConstraintProto* linear =
|
||||
neighborhood.cp_model.add_constraints()->mutable_linear();
|
||||
linear->add_domain(kint64min);
|
||||
linear->add_domain(0);
|
||||
linear->add_vars(before_var);
|
||||
@@ -337,13 +359,54 @@ CpModelProto SchedulingNeighborhoodGenerator::Generate(
|
||||
// Set the current solution as a hint.
|
||||
//
|
||||
// TODO(user): Move to common function?
|
||||
copy.clear_solution_hint();
|
||||
for (int var = 0; var < copy.variables_size(); ++var) {
|
||||
copy.mutable_solution_hint()->add_vars(var);
|
||||
copy.mutable_solution_hint()->add_values(initial_solution.solution(var));
|
||||
neighborhood.cp_model.clear_solution_hint();
|
||||
for (int var = 0; var < neighborhood.cp_model.variables_size(); ++var) {
|
||||
neighborhood.cp_model.mutable_solution_hint()->add_vars(var);
|
||||
neighborhood.cp_model.mutable_solution_hint()->add_values(
|
||||
initial_solution.solution(var));
|
||||
}
|
||||
|
||||
return copy;
|
||||
return neighborhood;
|
||||
}
|
||||
|
||||
Neighborhood SchedulingNeighborhoodGenerator::Generate(
|
||||
const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const {
|
||||
const auto span = helper_.TypeToConstraints(ConstraintProto::kInterval);
|
||||
std::vector<int> intervals_to_relax(span.begin(), span.end());
|
||||
GetRandomSubset(seed, difficulty, &intervals_to_relax);
|
||||
|
||||
return GenerateSchedulingNeighborhoodForRelaxation(intervals_to_relax,
|
||||
initial_solution, helper_);
|
||||
}
|
||||
|
||||
Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate(
|
||||
const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const {
|
||||
std::vector<std::pair<int64, int>> start_interval_pairs;
|
||||
for (const int i : helper_.TypeToConstraints(ConstraintProto::kInterval)) {
|
||||
const ConstraintProto& interval_ct = helper_.ModelProto().constraints(i);
|
||||
|
||||
const int start_var = interval_ct.interval().start();
|
||||
const int64 start_value = initial_solution.solution(start_var);
|
||||
start_interval_pairs.push_back({start_value, i});
|
||||
}
|
||||
std::sort(start_interval_pairs.begin(), start_interval_pairs.end());
|
||||
const int relaxed_size = std::floor(difficulty * start_interval_pairs.size());
|
||||
random_engine_t random;
|
||||
random.seed(seed);
|
||||
|
||||
std::uniform_int_distribution<int> random_var(
|
||||
0, start_interval_pairs.size() - relaxed_size - 1);
|
||||
const int random_start_index = random_var(random);
|
||||
std::vector<int> intervals_to_relax;
|
||||
// TODO(user,user): Consider relaxing more than one time window intervals.
|
||||
// This seems to help with Giza models.
|
||||
for (int i = random_start_index; i < relaxed_size; ++i) {
|
||||
intervals_to_relax.push_back(start_interval_pairs[i].second);
|
||||
}
|
||||
return GenerateSchedulingNeighborhoodForRelaxation(intervals_to_relax,
|
||||
initial_solution, helper_);
|
||||
}
|
||||
|
||||
} // namespace sat
|
||||
|
||||
@@ -23,6 +23,18 @@
|
||||
namespace operations_research {
|
||||
namespace sat {
|
||||
|
||||
// Neighborhood returned by Neighborhood generators.
|
||||
struct Neighborhood {
|
||||
// True if the CpModelProto below is not the same as the base model.
|
||||
// This is not expected to happen often but allows to handle this case
|
||||
// properly.
|
||||
bool is_reduced;
|
||||
|
||||
// Relaxed model. Any feasible solution to this "local" model should be a
|
||||
// feasible solution to the base model too.
|
||||
CpModelProto cp_model;
|
||||
};
|
||||
|
||||
// Contains pre-computed information about a given CpModelProto that is meant
|
||||
// to be used to generate LNS neighborhood. This class can be shared between
|
||||
// more than one generator in order to reduce memory usage.
|
||||
@@ -35,13 +47,13 @@ class NeighborhoodGeneratorHelper {
|
||||
|
||||
// Returns the LNS fragment where the given variables are fixed to the value
|
||||
// they take in the given solution.
|
||||
CpModelProto FixGivenVariables(
|
||||
Neighborhood FixGivenVariables(
|
||||
const CpSolverResponse& initial_solution,
|
||||
const std::vector<int>& variables_to_fix) const;
|
||||
|
||||
// Returns the LNS fragment which will relax all inactive variables and all
|
||||
// variables in relaxed_variables.
|
||||
CpModelProto RelaxGivenVariables(
|
||||
Neighborhood RelaxGivenVariables(
|
||||
const CpSolverResponse& initial_solution,
|
||||
const std::vector<int>& relaxed_variables) const;
|
||||
|
||||
@@ -114,7 +126,7 @@ class NeighborhoodGenerator {
|
||||
// CPModelProto should also be valid solution to the same initial model.
|
||||
//
|
||||
// This function should be thread-safe.
|
||||
virtual CpModelProto Generate(const CpSolverResponse& initial_solution,
|
||||
virtual Neighborhood Generate(const CpSolverResponse& initial_solution,
|
||||
int64 seed, double difficulty) const = 0;
|
||||
|
||||
// Returns a short description of the generator.
|
||||
@@ -131,7 +143,7 @@ class SimpleNeighborhoodGenerator : public NeighborhoodGenerator {
|
||||
explicit SimpleNeighborhoodGenerator(
|
||||
NeighborhoodGeneratorHelper const* helper, const std::string& name)
|
||||
: NeighborhoodGenerator(name, helper) {}
|
||||
CpModelProto Generate(const CpSolverResponse& initial_solution, int64 seed,
|
||||
Neighborhood Generate(const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const final;
|
||||
};
|
||||
|
||||
@@ -144,7 +156,7 @@ class VariableGraphNeighborhoodGenerator : public NeighborhoodGenerator {
|
||||
explicit VariableGraphNeighborhoodGenerator(
|
||||
NeighborhoodGeneratorHelper const* helper, const std::string& name)
|
||||
: NeighborhoodGenerator(name, helper) {}
|
||||
CpModelProto Generate(const CpSolverResponse& initial_solution, int64 seed,
|
||||
Neighborhood Generate(const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const final;
|
||||
};
|
||||
|
||||
@@ -157,10 +169,18 @@ class ConstraintGraphNeighborhoodGenerator : public NeighborhoodGenerator {
|
||||
explicit ConstraintGraphNeighborhoodGenerator(
|
||||
NeighborhoodGeneratorHelper const* helper, const std::string& name)
|
||||
: NeighborhoodGenerator(name, helper) {}
|
||||
CpModelProto Generate(const CpSolverResponse& initial_solution, int64 seed,
|
||||
Neighborhood Generate(const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const final;
|
||||
};
|
||||
|
||||
// Helper method for the scheduling neighborhood generators. Returns the model
|
||||
// as neighborhood for the given set of intervals to relax. For each no_overlap
|
||||
// constraints, it adds strict relation order between the non-relaxed intervals.
|
||||
Neighborhood GenerateSchedulingNeighborhoodForRelaxation(
|
||||
const absl::Span<const int> intervals_to_relax,
|
||||
const CpSolverResponse& initial_solution,
|
||||
const NeighborhoodGeneratorHelper& helper);
|
||||
|
||||
// Only make sense for scheduling problem. This select a random set of interval
|
||||
// of the problem according to the difficulty. Then, for each no_overlap
|
||||
// constraints, it adds strict relation order between the non-relaxed intervals.
|
||||
@@ -172,7 +192,19 @@ class SchedulingNeighborhoodGenerator : public NeighborhoodGenerator {
|
||||
NeighborhoodGeneratorHelper const* helper, const std::string& name)
|
||||
: NeighborhoodGenerator(name, helper) {}
|
||||
|
||||
CpModelProto Generate(const CpSolverResponse& initial_solution, int64 seed,
|
||||
Neighborhood Generate(const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const final;
|
||||
};
|
||||
|
||||
// Similar to SchedulingNeighborhoodGenerator except the set of intervals that
|
||||
// are relaxed are from a specific random time interval.
|
||||
class SchedulingTimeWindowNeighborhoodGenerator : public NeighborhoodGenerator {
|
||||
public:
|
||||
explicit SchedulingTimeWindowNeighborhoodGenerator(
|
||||
NeighborhoodGeneratorHelper const* helper, const std::string& name)
|
||||
: NeighborhoodGenerator(name, helper) {}
|
||||
|
||||
Neighborhood Generate(const CpSolverResponse& initial_solution, int64 seed,
|
||||
double difficulty) const final;
|
||||
};
|
||||
|
||||
|
||||
@@ -109,23 +109,28 @@ void CpModelMapping::CreateVariables(const CpModelProto& model_proto,
|
||||
}
|
||||
} else {
|
||||
// Compute the integer variable references used by the model.
|
||||
IndexReferences references;
|
||||
absl::flat_hash_set<int> used_variables;
|
||||
|
||||
IndexReferences refs;
|
||||
for (int c = 0; c < model_proto.constraints_size(); ++c) {
|
||||
const ConstraintProto& ct = model_proto.constraints(c);
|
||||
AddReferencesUsedByConstraint(ct, &references);
|
||||
refs = GetReferencesUsedByConstraint(ct);
|
||||
for (const int ref : refs.variables) {
|
||||
used_variables.insert(PositiveRef(ref));
|
||||
}
|
||||
}
|
||||
|
||||
// Add the objectives and search heuristics variables that needs to be
|
||||
// referenceable as integer even if they are only used as Booleans.
|
||||
if (model_proto.has_objective()) {
|
||||
for (const int obj_var : model_proto.objective().vars()) {
|
||||
references.variables.insert(obj_var);
|
||||
used_variables.insert(PositiveRef(obj_var));
|
||||
}
|
||||
}
|
||||
for (const DecisionStrategyProto& strategy :
|
||||
model_proto.search_strategy()) {
|
||||
for (const int var : strategy.variables()) {
|
||||
references.variables.insert(var);
|
||||
used_variables.insert(PositiveRef(var));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,17 +138,13 @@ void CpModelMapping::CreateVariables(const CpModelProto& model_proto,
|
||||
// considered "used".
|
||||
for (int i = 0; i < num_proto_variables; ++i) {
|
||||
if (booleans_[i] == kNoBooleanVariable) {
|
||||
references.variables.insert(i);
|
||||
used_variables.insert(i);
|
||||
}
|
||||
}
|
||||
|
||||
// We want the variable in the problem order.
|
||||
// Warning: references.variables also contains negative reference.
|
||||
var_to_instantiate_as_integer.assign(references.variables.begin(),
|
||||
references.variables.end());
|
||||
for (int& ref : var_to_instantiate_as_integer) {
|
||||
if (!RefIsPositive(ref)) ref = PositiveRef(ref);
|
||||
}
|
||||
var_to_instantiate_as_integer.assign(used_variables.begin(),
|
||||
used_variables.end());
|
||||
gtl::STLSortAndRemoveDuplicates(&var_to_instantiate_as_integer);
|
||||
}
|
||||
integers_.resize(num_proto_variables, kNoIntegerVariable);
|
||||
@@ -435,7 +436,8 @@ void CpModelMapping::DetectOptionalVariables(const CpModelProto& model_proto,
|
||||
// properly exploit that afterwards though. Do some research!
|
||||
const int num_proto_variables = model_proto.variables_size();
|
||||
std::vector<bool> already_seen(num_proto_variables, false);
|
||||
std::vector<std::set<int>> enforcement_intersection(num_proto_variables);
|
||||
std::vector<std::vector<int>> enforcement_intersection(num_proto_variables);
|
||||
std::set<int> literals_set;
|
||||
for (int c = 0; c < model_proto.constraints_size(); ++c) {
|
||||
const ConstraintProto& ct = model_proto.constraints(c);
|
||||
if (ct.enforcement_literal().empty()) {
|
||||
@@ -444,21 +446,23 @@ void CpModelMapping::DetectOptionalVariables(const CpModelProto& model_proto,
|
||||
enforcement_intersection[var].clear();
|
||||
}
|
||||
} else {
|
||||
const std::set<int> literals{ct.enforcement_literal().begin(),
|
||||
ct.enforcement_literal().end()};
|
||||
literals_set.clear();
|
||||
literals_set.insert(ct.enforcement_literal().begin(),
|
||||
ct.enforcement_literal().end());
|
||||
for (const int var : UsedVariables(ct)) {
|
||||
if (!already_seen[var]) {
|
||||
enforcement_intersection[var] = literals;
|
||||
enforcement_intersection[var].assign(ct.enforcement_literal().begin(),
|
||||
ct.enforcement_literal().end());
|
||||
} else {
|
||||
// Take the intersection.
|
||||
for (auto it = enforcement_intersection[var].begin();
|
||||
it != enforcement_intersection[var].end();) {
|
||||
if (!gtl::ContainsKey(literals, *it)) {
|
||||
it = enforcement_intersection[var].erase(it);
|
||||
} else {
|
||||
++it;
|
||||
std::vector<int>& vector_ref = enforcement_intersection[var];
|
||||
int new_size = 0;
|
||||
for (const int literal : vector_ref) {
|
||||
if (gtl::ContainsKey(literals_set, literal)) {
|
||||
vector_ref[new_size++] = literal;
|
||||
}
|
||||
}
|
||||
vector_ref.resize(new_size);
|
||||
}
|
||||
already_seen[var] = true;
|
||||
}
|
||||
@@ -478,7 +482,7 @@ void CpModelMapping::DetectOptionalVariables(const CpModelProto& model_proto,
|
||||
|
||||
++num_optionals;
|
||||
integer_trail->MarkIntegerVariableAsOptional(
|
||||
Integer(var), Literal(*enforcement_intersection[var].begin()));
|
||||
Integer(var), Literal(enforcement_intersection[var].front()));
|
||||
}
|
||||
VLOG(2) << "Auto-detected " << num_optionals << " optional variables.";
|
||||
}
|
||||
@@ -590,7 +594,11 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) {
|
||||
IntegerValue min_sum(0);
|
||||
IntegerValue max_sum(0);
|
||||
auto* integer_trail = m->GetOrCreate<IntegerTrail>();
|
||||
bool all_booleans = true;
|
||||
for (int i = 0; i < vars.size(); ++i) {
|
||||
if (all_booleans && !mapping->IsBoolean(ct.linear().vars(i))) {
|
||||
all_booleans = false;
|
||||
}
|
||||
const IntegerValue term_a = coeffs[i] * integer_trail->LowerBound(vars[i]);
|
||||
const IntegerValue term_b = coeffs[i] * integer_trail->UpperBound(vars[i]);
|
||||
min_sum += std::min(term_a, term_b);
|
||||
@@ -604,20 +612,14 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) {
|
||||
if (max_sum <= ub) ub = kint64max;
|
||||
|
||||
if (!HasEnforcementLiteral(ct)) {
|
||||
// Detect if there is only Booleans in order to use a more efficient
|
||||
// propagator. TODO(user): we should probably also implement an
|
||||
// half-reified version of this constraint.
|
||||
bool all_booleans = true;
|
||||
std::vector<LiteralWithCoeff> cst;
|
||||
for (int i = 0; i < vars.size(); ++i) {
|
||||
const int ref = ct.linear().vars(i);
|
||||
if (!mapping->IsBoolean(ref)) {
|
||||
all_booleans = false;
|
||||
continue;
|
||||
}
|
||||
cst.push_back({mapping->Literal(ref), coeffs[i]});
|
||||
}
|
||||
if (all_booleans) {
|
||||
// TODO(user): we should probably also implement an
|
||||
// half-reified version of this constraint.
|
||||
std::vector<LiteralWithCoeff> cst;
|
||||
for (int i = 0; i < vars.size(); ++i) {
|
||||
const int ref = ct.linear().vars(i);
|
||||
cst.push_back({mapping->Literal(ref), coeffs[i]});
|
||||
}
|
||||
m->Add(BooleanLinearConstraint(lb, ub, &cst));
|
||||
} else {
|
||||
if (lb != kint64min) {
|
||||
|
||||
@@ -141,7 +141,7 @@ bool PresolveContext::DomainContains(int ref, int64 value) const {
|
||||
}
|
||||
|
||||
bool PresolveContext::IntersectDomainWith(int ref, const Domain& domain) {
|
||||
CHECK(!DomainIsEmpty(ref));
|
||||
DCHECK(!DomainIsEmpty(ref));
|
||||
const int var = PositiveRef(ref);
|
||||
|
||||
if (RefIsPositive(ref)) {
|
||||
@@ -939,18 +939,17 @@ void DivideLinearByGcd(ConstraintProto* ct, PresolveContext* context) {
|
||||
|
||||
bool CanonicalizeLinear(ConstraintProto* ct, PresolveContext* context) {
|
||||
// First regroup the terms on the same variables and sum the fixed ones.
|
||||
// Note that we use a map to sort the variables and because we expect most
|
||||
// constraints to be small.
|
||||
//
|
||||
// TODO(user): move the map in context to reuse its memory. Add a quick pass
|
||||
// TODO(user): move terms in context to reuse its memory? Add a quick pass
|
||||
// to skip most of the work below if the constraint is already in canonical
|
||||
// form (strictly increasing var, no-fixed var, gcd = 1).
|
||||
std::map<int, int64> var_to_coeff;
|
||||
std::vector<std::pair<int, int64>> terms;
|
||||
const bool was_affine = gtl::ContainsKey(context->affine_constraints, ct);
|
||||
|
||||
int64 sum_of_fixed_terms = 0;
|
||||
bool remapped = false;
|
||||
const int num_vars = ct->linear().vars().size();
|
||||
DCHECK_EQ(num_vars, ct->linear().coeffs().size());
|
||||
for (int i = 0; i < num_vars; ++i) {
|
||||
const int ref = ct->linear().vars(i);
|
||||
const int var = PositiveRef(ref);
|
||||
@@ -968,13 +967,9 @@ bool CanonicalizeLinear(ConstraintProto* ct, PresolveContext* context) {
|
||||
remapped = true;
|
||||
sum_of_fixed_terms += coeff * r.offset;
|
||||
}
|
||||
var_to_coeff[r.representative] += coeff * r.coeff;
|
||||
if (var_to_coeff[r.representative] == 0) {
|
||||
var_to_coeff.erase(r.representative);
|
||||
}
|
||||
terms.push_back({r.representative, coeff * r.coeff});
|
||||
} else {
|
||||
var_to_coeff[var] += coeff;
|
||||
if (var_to_coeff[var] == 0) var_to_coeff.erase(var);
|
||||
terms.push_back({var, coeff});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -986,10 +981,25 @@ bool CanonicalizeLinear(ConstraintProto* ct, PresolveContext* context) {
|
||||
|
||||
ct->mutable_linear()->clear_vars();
|
||||
ct->mutable_linear()->clear_coeffs();
|
||||
for (const auto entry : var_to_coeff) {
|
||||
std::sort(terms.begin(), terms.end());
|
||||
int current_var = 0;
|
||||
int64 current_coeff = 0;
|
||||
for (const auto entry : terms) {
|
||||
CHECK(RefIsPositive(entry.first));
|
||||
ct->mutable_linear()->add_vars(entry.first);
|
||||
ct->mutable_linear()->add_coeffs(entry.second);
|
||||
if (entry.first == current_var) {
|
||||
current_coeff += entry.second;
|
||||
} else {
|
||||
if (current_coeff != 0) {
|
||||
ct->mutable_linear()->add_vars(current_var);
|
||||
ct->mutable_linear()->add_coeffs(current_coeff);
|
||||
}
|
||||
current_var = entry.first;
|
||||
current_coeff = entry.second;
|
||||
}
|
||||
}
|
||||
if (current_coeff != 0) {
|
||||
ct->mutable_linear()->add_vars(current_var);
|
||||
ct->mutable_linear()->add_coeffs(current_coeff);
|
||||
}
|
||||
DivideLinearByGcd(ct, context);
|
||||
|
||||
@@ -998,7 +1008,7 @@ bool CanonicalizeLinear(ConstraintProto* ct, PresolveContext* context) {
|
||||
context->UpdateRuleStats("linear: remapped using affine relations");
|
||||
var_constraint_graph_changed = true;
|
||||
}
|
||||
if (var_to_coeff.size() < num_vars) {
|
||||
if (ct->linear().vars().size() < num_vars) {
|
||||
context->UpdateRuleStats("linear: fixed or dup variables");
|
||||
var_constraint_graph_changed = true;
|
||||
}
|
||||
@@ -1196,36 +1206,6 @@ bool PresolveLinear(ConstraintProto* ct, PresolveContext* context) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Fixes the variable at 'var_index' to 'fixed_value' in the constraint and
|
||||
// returns the modified RHS Domain.
|
||||
Domain FixVariableInLinearConstraint(const int var_index,
|
||||
const int64 fixed_value,
|
||||
ConstraintProto* ct,
|
||||
PresolveContext* context) {
|
||||
auto* arg = ct->mutable_linear();
|
||||
const int num_vars = arg->vars_size();
|
||||
CHECK_LT(var_index, num_vars);
|
||||
const int ref = arg->vars(var_index);
|
||||
CHECK(context->DomainOf(ref).Contains(fixed_value));
|
||||
const int64 coeff = arg->coeffs(var_index);
|
||||
// Subtract the fixed term from the domain.
|
||||
const Domain term_domain(coeff * fixed_value);
|
||||
const Domain rhs_domain = ReadDomainFromProto(ct->linear());
|
||||
const Domain new_rhs_domain = rhs_domain.AdditionWith(term_domain.Negation());
|
||||
// Copy coefficients of all variables except the fixed one.
|
||||
int new_size = 0;
|
||||
for (int i = 0; i < num_vars; ++i) {
|
||||
if (i == var_index) continue;
|
||||
arg->set_vars(new_size, arg->vars(i));
|
||||
arg->set_coeffs(new_size, arg->coeffs(i));
|
||||
++new_size;
|
||||
}
|
||||
arg->mutable_vars()->Truncate(new_size);
|
||||
arg->mutable_coeffs()->Truncate(new_size);
|
||||
FillDomainInProto(new_rhs_domain, arg);
|
||||
return new_rhs_domain;
|
||||
}
|
||||
|
||||
// Identify Boolean variable that makes the constraint always true when set to
|
||||
// true or false. Moves such literal to the constraint enforcement literals
|
||||
// list.
|
||||
@@ -1254,6 +1234,10 @@ void ExtractEnforcementLiteralFromLinearConstraint(ConstraintProto* ct,
|
||||
const bool not_upper_bounded = max_sum <= rhs_domain.Max();
|
||||
if (not_lower_bounded == not_upper_bounded) return;
|
||||
|
||||
// To avoid a quadratic loop, we will rewrite the linear expression at the
|
||||
// same time as we extract enforcement literals.
|
||||
int new_size = 0;
|
||||
LinearConstraintProto* mutable_arg = ct->mutable_linear();
|
||||
for (int i = 0; i < arg.vars_size(); ++i) {
|
||||
// Only work with binary variables.
|
||||
//
|
||||
@@ -1262,56 +1246,61 @@ void ExtractEnforcementLiteralFromLinearConstraint(ConstraintProto* ct,
|
||||
// variable at is min/max" and using this literal in the enforcement list.
|
||||
// It is thus a bit more involved, and might not be as useful.
|
||||
const int ref = arg.vars(i);
|
||||
if (context->MinOf(ref) != 0) continue;
|
||||
if (context->MaxOf(ref) != 1) continue;
|
||||
const int64 coeff = arg.coeffs(i);
|
||||
if (not_lower_bounded) {
|
||||
if (max_sum - std::abs(coeff) <= rhs_domain.front().end) {
|
||||
if (coeff > 0) {
|
||||
// Fix the variable to 1 in the constraint and add it as enforcement
|
||||
// literal.
|
||||
rhs_domain = FixVariableInLinearConstraint(i, 1, ct, context);
|
||||
ct->add_enforcement_literal(ref);
|
||||
// 'min_sum' remains unaffected.
|
||||
max_sum -= coeff;
|
||||
} else {
|
||||
// Fix the variable to 0 in the constraint and add its negation as
|
||||
// enforcement literal.
|
||||
rhs_domain = FixVariableInLinearConstraint(i, 0, ct, context);
|
||||
ct->add_enforcement_literal(NegatedRef(ref));
|
||||
// 'max_sum' remains unaffected.
|
||||
min_sum -= coeff;
|
||||
if (context->MinOf(ref) == 0 && context->MaxOf(ref) == 1) {
|
||||
const int64 coeff = arg.coeffs(i);
|
||||
if (not_lower_bounded) {
|
||||
if (max_sum - std::abs(coeff) <= rhs_domain.front().end) {
|
||||
if (coeff > 0) {
|
||||
// Fix the variable to 1 in the constraint and add it as enforcement
|
||||
// literal.
|
||||
rhs_domain = rhs_domain.AdditionWith(Domain(-coeff));
|
||||
ct->add_enforcement_literal(ref);
|
||||
// 'min_sum' remains unaffected.
|
||||
max_sum -= coeff;
|
||||
} else {
|
||||
// Fix the variable to 0 in the constraint and add its negation as
|
||||
// enforcement literal.
|
||||
ct->add_enforcement_literal(NegatedRef(ref));
|
||||
// 'max_sum' remains unaffected.
|
||||
min_sum -= coeff;
|
||||
}
|
||||
context->UpdateRuleStats(
|
||||
"linear: extracted enforcement literal from constraint");
|
||||
continue;
|
||||
}
|
||||
context->UpdateRuleStats(
|
||||
"linear: extracted enforcement literal from constraint");
|
||||
--i;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
DCHECK(not_upper_bounded);
|
||||
if (min_sum + std::abs(coeff) >= rhs_domain.back().start) {
|
||||
if (coeff > 0) {
|
||||
// Fix the variable to 0 in the constraint and add its negation as
|
||||
// enforcement literal.
|
||||
rhs_domain = FixVariableInLinearConstraint(i, 0, ct, context);
|
||||
ct->add_enforcement_literal(NegatedRef(ref));
|
||||
// 'min_sum' remains unaffected.
|
||||
max_sum -= coeff;
|
||||
} else {
|
||||
// Fix the variable to 1 in the constraint and add it as enforcement
|
||||
// literal.
|
||||
rhs_domain = FixVariableInLinearConstraint(i, 1, ct, context);
|
||||
ct->add_enforcement_literal(ref);
|
||||
// 'max_sum' remains unaffected.
|
||||
min_sum -= coeff;
|
||||
} else {
|
||||
DCHECK(not_upper_bounded);
|
||||
if (min_sum + std::abs(coeff) >= rhs_domain.back().start) {
|
||||
if (coeff > 0) {
|
||||
// Fix the variable to 0 in the constraint and add its negation as
|
||||
// enforcement literal.
|
||||
ct->add_enforcement_literal(NegatedRef(ref));
|
||||
// 'min_sum' remains unaffected.
|
||||
max_sum -= coeff;
|
||||
} else {
|
||||
// Fix the variable to 1 in the constraint and add it as enforcement
|
||||
// literal.
|
||||
rhs_domain = rhs_domain.AdditionWith(Domain(-coeff));
|
||||
ct->add_enforcement_literal(ref);
|
||||
// 'max_sum' remains unaffected.
|
||||
min_sum -= coeff;
|
||||
}
|
||||
context->UpdateRuleStats(
|
||||
"linear: extracted enforcement literal from constraint");
|
||||
continue;
|
||||
}
|
||||
context->UpdateRuleStats(
|
||||
"linear: extracted enforcement literal from constraint");
|
||||
--i;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// We keep this term.
|
||||
mutable_arg->set_vars(new_size, mutable_arg->vars(i));
|
||||
mutable_arg->set_coeffs(new_size, mutable_arg->coeffs(i));
|
||||
++new_size;
|
||||
}
|
||||
|
||||
mutable_arg->mutable_vars()->Truncate(new_size);
|
||||
mutable_arg->mutable_coeffs()->Truncate(new_size);
|
||||
FillDomainInProto(rhs_domain, mutable_arg);
|
||||
}
|
||||
|
||||
void ExtractAtMostOneFromLinear(ConstraintProto* ct, PresolveContext* context) {
|
||||
@@ -1659,7 +1648,7 @@ bool PresolveElement(ConstraintProto* ct, PresolveContext* context) {
|
||||
context->UpdateRuleStats("element: reduced index domain");
|
||||
}
|
||||
if (context->IntersectDomainWith(target_ref, infered_domain)) {
|
||||
if (context->DomainOf(target_ref).IsEmpty()) return true;
|
||||
if (context->DomainIsEmpty(target_ref)) return true;
|
||||
context->UpdateRuleStats("element: reduced target domain");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -880,7 +880,8 @@ IntegerVariable AddLPConstraints(const CpModelProto& model_proto,
|
||||
LinearRelaxation relaxation;
|
||||
|
||||
// Linearize the constraints.
|
||||
IndexReferences refs;
|
||||
absl::flat_hash_set<int> used_integer_variable;
|
||||
|
||||
auto* mapping = m->GetOrCreate<CpModelMapping>();
|
||||
auto* encoder = m->GetOrCreate<IntegerEncoder>();
|
||||
for (const auto& ct : model_proto.constraints()) {
|
||||
@@ -900,9 +901,7 @@ IntegerVariable AddLPConstraints(const CpModelProto& model_proto,
|
||||
// view is worth it.
|
||||
//
|
||||
// TODO(user): It should be possible to speed this up if needed.
|
||||
refs.variables.clear();
|
||||
refs.literals.clear();
|
||||
AddReferencesUsedByConstraint(ct, &refs);
|
||||
const IndexReferences refs = GetReferencesUsedByConstraint(ct);
|
||||
bool ok = true;
|
||||
for (const int literal_ref : refs.literals) {
|
||||
const Literal literal = mapping->Literal(literal_ref);
|
||||
@@ -1917,8 +1916,11 @@ CpSolverResponse SolveCpModelWithLNS(
|
||||
generators.push_back(absl::make_unique<ConstraintGraphNeighborhoodGenerator>(
|
||||
&helper, "cst_lns"));
|
||||
if (!helper.TypeToConstraints(ConstraintProto::kNoOverlap).empty()) {
|
||||
generators.push_back(
|
||||
absl::make_unique<SchedulingTimeWindowNeighborhoodGenerator>(
|
||||
&helper, "scheduling_time_window_lns"));
|
||||
generators.push_back(absl::make_unique<SchedulingNeighborhoodGenerator>(
|
||||
&helper, "scheduling_lns"));
|
||||
&helper, "scheduling_random_lns"));
|
||||
}
|
||||
|
||||
// The "optimal" difficulties do not have to be the same for different
|
||||
@@ -1966,8 +1968,9 @@ CpSolverResponse SolveCpModelWithLNS(
|
||||
difficulties[seed % generators.size()];
|
||||
const double saved_difficulty = difficulty.value();
|
||||
const int selected_generator = seed % generators.size();
|
||||
CpModelProto local_problem = generators[selected_generator]->Generate(
|
||||
Neighborhood neighborhood = generators[selected_generator]->Generate(
|
||||
response, num_workers * seed + worker_id, saved_difficulty);
|
||||
CpModelProto& local_problem = neighborhood.cp_model;
|
||||
const std::string solution_info = absl::StrFormat(
|
||||
"%s(d=%0.2f s=%i t=%0.2f)", generators[selected_generator]->name(),
|
||||
saved_difficulty, seed, deterministic_time);
|
||||
@@ -2010,57 +2013,64 @@ CpSolverResponse SolveCpModelWithLNS(
|
||||
wall_timer, &local_response);
|
||||
}
|
||||
|
||||
return [&num_no_progress, &model_proto, &response, &difficulty,
|
||||
&deterministic_time, saved_difficulty, local_response,
|
||||
&observer, limit, solution_info]() {
|
||||
// TODO(user): This is not ideal in multithread because even though
|
||||
// the saved_difficulty will be the same for all thread, we will
|
||||
// Increase()/Decrease() the difficuty sequentially more than once.
|
||||
if (local_response.status() == CpSolverStatus::OPTIMAL ||
|
||||
local_response.status() == CpSolverStatus::INFEASIBLE) {
|
||||
difficulty.Increase();
|
||||
} else {
|
||||
difficulty.Decrease();
|
||||
}
|
||||
if (local_response.status() == CpSolverStatus::FEASIBLE ||
|
||||
local_response.status() == CpSolverStatus::OPTIMAL) {
|
||||
// If the objective are the same, we override the solution,
|
||||
// otherwise we just ignore this local solution and increment
|
||||
// num_no_progress.
|
||||
double coeff = model_proto.objective().scaling_factor();
|
||||
if (coeff == 0.0) coeff = 1.0;
|
||||
if (local_response.objective_value() * coeff >=
|
||||
response.objective_value() * coeff) {
|
||||
if (local_response.objective_value() * coeff >
|
||||
response.objective_value() * coeff) {
|
||||
return;
|
||||
const bool neighborhood_is_reduced = neighborhood.is_reduced;
|
||||
return
|
||||
[neighborhood_is_reduced, &num_no_progress, &model_proto, &response,
|
||||
&difficulty, local_response, &observer, limit, solution_info]() {
|
||||
// TODO(user): This is not ideal in multithread because even
|
||||
// though the saved_difficulty will be the same for all thread, we
|
||||
// will Increase()/Decrease() the difficuty sequentially more than
|
||||
// once.
|
||||
if (local_response.status() == CpSolverStatus::OPTIMAL ||
|
||||
local_response.status() == CpSolverStatus::INFEASIBLE) {
|
||||
if (neighborhood_is_reduced) {
|
||||
difficulty.Increase();
|
||||
} else {
|
||||
// We solved the full model here.
|
||||
response = local_response;
|
||||
}
|
||||
} else {
|
||||
difficulty.Decrease();
|
||||
}
|
||||
++num_no_progress;
|
||||
} else {
|
||||
num_no_progress = 0;
|
||||
}
|
||||
if (local_response.status() == CpSolverStatus::FEASIBLE ||
|
||||
local_response.status() == CpSolverStatus::OPTIMAL) {
|
||||
// If the objective are the same, we override the solution,
|
||||
// otherwise we just ignore this local solution and increment
|
||||
// num_no_progress.
|
||||
double coeff = model_proto.objective().scaling_factor();
|
||||
if (coeff == 0.0) coeff = 1.0;
|
||||
if (local_response.objective_value() * coeff >=
|
||||
response.objective_value() * coeff) {
|
||||
if (local_response.objective_value() * coeff >
|
||||
response.objective_value() * coeff) {
|
||||
return;
|
||||
}
|
||||
++num_no_progress;
|
||||
} else {
|
||||
num_no_progress = 0;
|
||||
}
|
||||
|
||||
// Update the global response.
|
||||
*(response.mutable_solution()) = local_response.solution();
|
||||
response.set_objective_value(local_response.objective_value());
|
||||
response.set_wall_time(limit->GetElapsedTime());
|
||||
response.set_user_time(response.user_time() +
|
||||
local_response.user_time());
|
||||
response.set_deterministic_time(
|
||||
response.deterministic_time() +
|
||||
local_response.deterministic_time());
|
||||
if (DEBUG_MODE || FLAGS_cp_model_check_intermediate_solutions) {
|
||||
CHECK(SolutionIsFeasible(
|
||||
model_proto,
|
||||
std::vector<int64>(local_response.solution().begin(),
|
||||
local_response.solution().end())));
|
||||
}
|
||||
if (num_no_progress == 0) { // Improving solution.
|
||||
response.set_solution_info(solution_info);
|
||||
observer(response);
|
||||
}
|
||||
}
|
||||
};
|
||||
// Update the global response.
|
||||
*(response.mutable_solution()) = local_response.solution();
|
||||
response.set_objective_value(local_response.objective_value());
|
||||
response.set_wall_time(limit->GetElapsedTime());
|
||||
response.set_user_time(response.user_time() +
|
||||
local_response.user_time());
|
||||
response.set_deterministic_time(
|
||||
response.deterministic_time() +
|
||||
local_response.deterministic_time());
|
||||
if (DEBUG_MODE || FLAGS_cp_model_check_intermediate_solutions) {
|
||||
CHECK(SolutionIsFeasible(
|
||||
model_proto,
|
||||
std::vector<int64>(local_response.solution().begin(),
|
||||
local_response.solution().end())));
|
||||
}
|
||||
if (num_no_progress == 0) { // Improving solution.
|
||||
response.set_solution_info(solution_info);
|
||||
observer(response);
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
if (response.status() == CpSolverStatus::FEASIBLE) {
|
||||
|
||||
@@ -33,90 +33,91 @@ void AddIndices(const IntList& indices, std::vector<int>* output) {
|
||||
|
||||
} // namespace
|
||||
|
||||
void AddReferencesUsedByConstraint(const ConstraintProto& ct,
|
||||
IndexReferences* output) {
|
||||
IndexReferences GetReferencesUsedByConstraint(const ConstraintProto& ct) {
|
||||
IndexReferences output;
|
||||
switch (ct.constraint_case()) {
|
||||
case ConstraintProto::ConstraintCase::kBoolOr:
|
||||
AddIndices(ct.bool_or().literals(), &output->literals);
|
||||
AddIndices(ct.bool_or().literals(), &output.literals);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kBoolAnd:
|
||||
AddIndices(ct.bool_and().literals(), &output->literals);
|
||||
AddIndices(ct.bool_and().literals(), &output.literals);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kAtMostOne:
|
||||
AddIndices(ct.at_most_one().literals(), &output->literals);
|
||||
AddIndices(ct.at_most_one().literals(), &output.literals);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kBoolXor:
|
||||
AddIndices(ct.bool_xor().literals(), &output->literals);
|
||||
AddIndices(ct.bool_xor().literals(), &output.literals);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kIntDiv:
|
||||
output->variables.insert(ct.int_div().target());
|
||||
AddIndices(ct.int_div().vars(), &output->variables);
|
||||
output.variables.push_back(ct.int_div().target());
|
||||
AddIndices(ct.int_div().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kIntMod:
|
||||
output->variables.insert(ct.int_mod().target());
|
||||
AddIndices(ct.int_mod().vars(), &output->variables);
|
||||
output.variables.push_back(ct.int_mod().target());
|
||||
AddIndices(ct.int_mod().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kIntMax:
|
||||
output->variables.insert(ct.int_max().target());
|
||||
AddIndices(ct.int_max().vars(), &output->variables);
|
||||
output.variables.push_back(ct.int_max().target());
|
||||
AddIndices(ct.int_max().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kIntMin:
|
||||
output->variables.insert(ct.int_min().target());
|
||||
AddIndices(ct.int_min().vars(), &output->variables);
|
||||
output.variables.push_back(ct.int_min().target());
|
||||
AddIndices(ct.int_min().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kIntProd:
|
||||
output->variables.insert(ct.int_prod().target());
|
||||
AddIndices(ct.int_prod().vars(), &output->variables);
|
||||
output.variables.push_back(ct.int_prod().target());
|
||||
AddIndices(ct.int_prod().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kLinear:
|
||||
AddIndices(ct.linear().vars(), &output->variables);
|
||||
AddIndices(ct.linear().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kAllDiff:
|
||||
AddIndices(ct.all_diff().vars(), &output->variables);
|
||||
AddIndices(ct.all_diff().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kElement:
|
||||
output->variables.insert(ct.element().index());
|
||||
output->variables.insert(ct.element().target());
|
||||
AddIndices(ct.element().vars(), &output->variables);
|
||||
output.variables.push_back(ct.element().index());
|
||||
output.variables.push_back(ct.element().target());
|
||||
AddIndices(ct.element().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kCircuit:
|
||||
AddIndices(ct.circuit().literals(), &output->literals);
|
||||
AddIndices(ct.circuit().literals(), &output.literals);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kRoutes:
|
||||
AddIndices(ct.routes().literals(), &output->literals);
|
||||
AddIndices(ct.routes().literals(), &output.literals);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kCircuitCovering:
|
||||
AddIndices(ct.circuit_covering().nexts(), &output->variables);
|
||||
AddIndices(ct.circuit_covering().nexts(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kInverse:
|
||||
AddIndices(ct.inverse().f_direct(), &output->variables);
|
||||
AddIndices(ct.inverse().f_inverse(), &output->variables);
|
||||
AddIndices(ct.inverse().f_direct(), &output.variables);
|
||||
AddIndices(ct.inverse().f_inverse(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kReservoir:
|
||||
AddIndices(ct.reservoir().times(), &output->variables);
|
||||
AddIndices(ct.reservoir().times(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kTable:
|
||||
AddIndices(ct.table().vars(), &output->variables);
|
||||
AddIndices(ct.table().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kAutomaton:
|
||||
AddIndices(ct.automaton().vars(), &output->variables);
|
||||
AddIndices(ct.automaton().vars(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kInterval:
|
||||
output->variables.insert(ct.interval().start());
|
||||
output->variables.insert(ct.interval().end());
|
||||
output->variables.insert(ct.interval().size());
|
||||
output.variables.push_back(ct.interval().start());
|
||||
output.variables.push_back(ct.interval().end());
|
||||
output.variables.push_back(ct.interval().size());
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kNoOverlap:
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kNoOverlap2D:
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::kCumulative:
|
||||
output->variables.insert(ct.cumulative().capacity());
|
||||
AddIndices(ct.cumulative().demands(), &output->variables);
|
||||
output.variables.push_back(ct.cumulative().capacity());
|
||||
AddIndices(ct.cumulative().demands(), &output.variables);
|
||||
break;
|
||||
case ConstraintProto::ConstraintCase::CONSTRAINT_NOT_SET:
|
||||
break;
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
#define APPLY_TO_SINGULAR_FIELD(ct_name, field_name) \
|
||||
@@ -387,23 +388,19 @@ std::string ConstraintCaseName(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(user): Optimize this function, it appear in the presolve profile.
|
||||
// We could get rid of AddReferencesUsedByConstraint().
|
||||
std::vector<int> UsedVariables(const ConstraintProto& ct) {
|
||||
IndexReferences references;
|
||||
AddReferencesUsedByConstraint(ct, &references);
|
||||
std::vector<int> used_variables;
|
||||
for (const int var : references.variables) {
|
||||
used_variables.push_back(PositiveRef(var));
|
||||
IndexReferences references = GetReferencesUsedByConstraint(ct);
|
||||
for (int& ref : references.variables) {
|
||||
ref = PositiveRef(ref);
|
||||
}
|
||||
for (const int lit : references.literals) {
|
||||
used_variables.push_back(PositiveRef(lit));
|
||||
references.variables.push_back(PositiveRef(lit));
|
||||
}
|
||||
for (const int lit : ct.enforcement_literal()) {
|
||||
used_variables.push_back(PositiveRef(lit));
|
||||
references.variables.push_back(PositiveRef(lit));
|
||||
}
|
||||
gtl::STLSortAndRemoveDuplicates(&used_variables);
|
||||
return used_variables;
|
||||
gtl::STLSortAndRemoveDuplicates(&references.variables);
|
||||
return references.variables;
|
||||
}
|
||||
|
||||
std::vector<int> UsedIntervals(const ConstraintProto& ct) {
|
||||
|
||||
@@ -43,15 +43,13 @@ inline int EnforcementLiteral(const ConstraintProto& ct) {
|
||||
|
||||
// Collects all the references used by a constraint. This function is used in a
|
||||
// few places to have a "generic" code dealing with constraints. Note that the
|
||||
// enforcement_literal is NOT counted here.
|
||||
//
|
||||
// TODO(user): replace this by constant version of the Apply...() functions?
|
||||
// enforcement_literal is NOT counted here and that the vectors can have
|
||||
// duplicates.
|
||||
struct IndexReferences {
|
||||
absl::flat_hash_set<int> variables;
|
||||
absl::flat_hash_set<int> literals;
|
||||
std::vector<int> variables;
|
||||
std::vector<int> literals;
|
||||
};
|
||||
void AddReferencesUsedByConstraint(const ConstraintProto& ct,
|
||||
IndexReferences* output);
|
||||
IndexReferences GetReferencesUsedByConstraint(const ConstraintProto& ct);
|
||||
|
||||
// Applies the given function to all variables/literals/intervals indices of the
|
||||
// constraint. This function is used in a few places to have a "generic" code
|
||||
|
||||
@@ -119,42 +119,36 @@ IntegerEncoder::PartialDomainEncoding(IntegerVariable var) const {
|
||||
// Note that by not inserting the literal in "order" we can in the worst case
|
||||
// use twice as much implication (2 by literals) instead of only one between
|
||||
// consecutive literals.
|
||||
void IntegerEncoder::AddImplications(IntegerLiteral i_lit,
|
||||
Literal associated_lit) {
|
||||
if (i_lit.var >= encoding_by_var_.size()) {
|
||||
encoding_by_var_.resize(i_lit.var.value() + 1);
|
||||
}
|
||||
void IntegerEncoder::AddImplications(
|
||||
const std::map<IntegerValue, Literal>& map,
|
||||
std::map<IntegerValue, Literal>::const_iterator it,
|
||||
Literal associated_lit) {
|
||||
if (!add_implications_) return;
|
||||
DCHECK_EQ(it->second, associated_lit);
|
||||
|
||||
std::map<IntegerValue, Literal>& map_ref =
|
||||
encoding_by_var_[IntegerVariable(i_lit.var)];
|
||||
CHECK(!gtl::ContainsKey(map_ref, i_lit.bound));
|
||||
|
||||
if (add_implications_) {
|
||||
auto after_it = map_ref.lower_bound(i_lit.bound);
|
||||
if (after_it != map_ref.end()) {
|
||||
// Literal(after) => associated_lit
|
||||
if (sat_solver_->CurrentDecisionLevel() == 0) {
|
||||
sat_solver_->AddBinaryClause(after_it->second.Negated(),
|
||||
associated_lit);
|
||||
} else {
|
||||
sat_solver_->AddBinaryClauseDuringSearch(after_it->second.Negated(),
|
||||
associated_lit);
|
||||
}
|
||||
}
|
||||
if (after_it != map_ref.begin()) {
|
||||
// associated_lit => Literal(before)
|
||||
if (sat_solver_->CurrentDecisionLevel() == 0) {
|
||||
sat_solver_->AddBinaryClause(associated_lit.Negated(),
|
||||
(--after_it)->second);
|
||||
} else {
|
||||
sat_solver_->AddBinaryClauseDuringSearch(associated_lit.Negated(),
|
||||
(--after_it)->second);
|
||||
}
|
||||
// Literal(after) => associated_lit
|
||||
auto after_it = it;
|
||||
++after_it;
|
||||
if (after_it != map.end()) {
|
||||
if (sat_solver_->CurrentDecisionLevel() == 0) {
|
||||
sat_solver_->AddBinaryClause(after_it->second.Negated(), associated_lit);
|
||||
} else {
|
||||
sat_solver_->AddBinaryClauseDuringSearch(after_it->second.Negated(),
|
||||
associated_lit);
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new entry.
|
||||
map_ref[i_lit.bound] = associated_lit;
|
||||
// associated_lit => Literal(before)
|
||||
if (it != map.begin()) {
|
||||
auto before_it = it;
|
||||
--before_it;
|
||||
if (sat_solver_->CurrentDecisionLevel() == 0) {
|
||||
sat_solver_->AddBinaryClause(associated_lit.Negated(), before_it->second);
|
||||
} else {
|
||||
sat_solver_->AddBinaryClauseDuringSearch(associated_lit.Negated(),
|
||||
before_it->second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void IntegerEncoder::AddAllImplicationsBetweenAssociatedLiterals() {
|
||||
@@ -280,10 +274,13 @@ void IntegerEncoder::AssociateToIntegerEqualValue(Literal literal,
|
||||
}
|
||||
}
|
||||
|
||||
const std::pair<IntegerVariable, IntegerValue> key{var, value};
|
||||
if (gtl::ContainsKey(equality_to_associated_literal_, key)) {
|
||||
// We use the "do not insert if present" behavior of .insert() to do just one
|
||||
// lookup.
|
||||
const auto insert_result =
|
||||
equality_to_associated_literal_.insert({{var, value}, literal});
|
||||
if (!insert_result.second) {
|
||||
// If this key is already associated, make the two literals equal.
|
||||
const Literal representative = equality_to_associated_literal_[key];
|
||||
const Literal representative = insert_result.first->second;
|
||||
if (representative != literal) {
|
||||
DCHECK_EQ(sat_solver_->CurrentDecisionLevel(), 0);
|
||||
sat_solver_->AddBinaryClause(literal, representative.Negated());
|
||||
@@ -291,8 +288,8 @@ void IntegerEncoder::AssociateToIntegerEqualValue(Literal literal,
|
||||
}
|
||||
return;
|
||||
}
|
||||
equality_to_associated_literal_[key] = literal;
|
||||
equality_to_associated_literal_[{NegationOf(var), -value}] = literal;
|
||||
gtl::InsertOrDieNoPrint(&equality_to_associated_literal_,
|
||||
{{NegationOf(var), -value}, literal});
|
||||
|
||||
// Fix literal for value outside the domain or for singleton domain.
|
||||
if (!domain.Contains(value.value())) {
|
||||
@@ -354,8 +351,13 @@ void IntegerEncoder::HalfAssociateGivenLiteral(IntegerLiteral i_lit,
|
||||
}
|
||||
|
||||
// Associate the new literal to i_lit.
|
||||
if (!LiteralIsAssociated(i_lit)) {
|
||||
AddImplications(i_lit, literal);
|
||||
if (i_lit.var >= encoding_by_var_.size()) {
|
||||
encoding_by_var_.resize(i_lit.var.value() + 1);
|
||||
}
|
||||
auto& var_encoding = encoding_by_var_[i_lit.var];
|
||||
auto insert_result = var_encoding.insert({i_lit.bound, literal});
|
||||
if (insert_result.second) { // New item.
|
||||
AddImplications(var_encoding, insert_result.first, literal);
|
||||
if (sat_solver_->Assignment().LiteralIsTrue(literal)) {
|
||||
CHECK_EQ(sat_solver_->CurrentDecisionLevel(), 0);
|
||||
newly_fixed_integer_literals_.push_back(i_lit);
|
||||
@@ -365,7 +367,7 @@ void IntegerEncoder::HalfAssociateGivenLiteral(IntegerLiteral i_lit,
|
||||
reverse_encoding_[literal.Index()].push_back(i_lit);
|
||||
full_reverse_encoding_[literal.Index()].push_back(i_lit);
|
||||
} else {
|
||||
const Literal associated(GetAssociatedLiteral(i_lit));
|
||||
const Literal associated(insert_result.first->second);
|
||||
if (associated != literal) {
|
||||
DCHECK_EQ(sat_solver_->CurrentDecisionLevel(), 0);
|
||||
sat_solver_->AddBinaryClause(literal, associated.Negated());
|
||||
|
||||
@@ -412,9 +412,16 @@ class IntegerEncoder {
|
||||
// literal equivalent.
|
||||
void HalfAssociateGivenLiteral(IntegerLiteral i_lit, Literal literal);
|
||||
|
||||
// Adds the new associated_lit to encoding_by_var_.
|
||||
// Adds the implications: Literal(before) <= associated_lit <= Literal(after).
|
||||
void AddImplications(IntegerLiteral i, Literal associated_lit);
|
||||
// Adds the implications:
|
||||
// Literal(before) <= associated_lit <= Literal(after).
|
||||
// Arguments:
|
||||
// - map is just encoding_by_var_[associated_lit.var] and is passed as a
|
||||
// slight optimization.
|
||||
// - 'it' is the current position of associated_lit in map, i.e we must have
|
||||
// it->second == associated_lit.
|
||||
void AddImplications(const std::map<IntegerValue, Literal>& map,
|
||||
std::map<IntegerValue, Literal>::const_iterator it,
|
||||
Literal associated_lit);
|
||||
|
||||
SatSolver* sat_solver_;
|
||||
IntegerDomains* domains_;
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include "ortools/sat/integer_search.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <functional>
|
||||
|
||||
#include "ortools/sat/integer.h"
|
||||
#include "ortools/sat/linear_programming_constraint.h"
|
||||
@@ -274,29 +275,96 @@ std::function<LiteralIndex()> RandomizeOnRestartHeuristic(Model* model) {
|
||||
SatSolver* sat_solver = model->GetOrCreate<SatSolver>();
|
||||
SatDecisionPolicy* decision_policy = model->GetOrCreate<SatDecisionPolicy>();
|
||||
|
||||
// The duplication increase the probability of the first heuristics. This is
|
||||
// wanted because when we randomize the sat parameters, we have more than one
|
||||
// heuristic for choosing the phase of the decision.
|
||||
//
|
||||
// TODO(user): Add other policy and perform more experiments.
|
||||
std::function<LiteralIndex()> sat_policy = SatSolverHeuristic(model);
|
||||
std::vector<std::function<LiteralIndex()>> policies{
|
||||
sat_policy, sat_policy, ExploitLpSolution(sat_policy, model),
|
||||
ExploitLpSolution(SequentialSearch({PseudoCost(model), sat_policy}),
|
||||
model)};
|
||||
sat_policy, SequentialSearch({PseudoCost(model), sat_policy})};
|
||||
// The higher weight for the sat policy is because this policy actually
|
||||
// contains a lot of variation as we randomize the sat parameters.
|
||||
// TODO(user,user): Do more experiments to find better distribution.
|
||||
std::discrete_distribution<int> var_dist{3 /*sat_policy*/, 1 /*Pseudo cost*/};
|
||||
|
||||
// Value selection.
|
||||
std::vector<std::function<LiteralIndex(IntegerVariable)>>
|
||||
value_selection_heuristics;
|
||||
std::vector<int> value_selection_weight;
|
||||
|
||||
// LP Based value.
|
||||
value_selection_heuristics.push_back([model](IntegerVariable var) {
|
||||
if (LpSolutionIsExploitable(model)) {
|
||||
return SplitAroundLpValue(PositiveVariable(var), model);
|
||||
}
|
||||
return kNoLiteralIndex;
|
||||
});
|
||||
value_selection_weight.push_back(8);
|
||||
|
||||
// Solution based value.
|
||||
value_selection_heuristics.push_back([model](IntegerVariable var) {
|
||||
return SplitDomainUsingBestSolutionValue(var, model);
|
||||
});
|
||||
value_selection_weight.push_back(5);
|
||||
|
||||
// Middle value.
|
||||
value_selection_heuristics.push_back([model](IntegerVariable var) {
|
||||
return GreaterOrEqualToMiddleValue(var, model);
|
||||
});
|
||||
value_selection_weight.push_back(1);
|
||||
|
||||
// Min value.
|
||||
value_selection_heuristics.push_back(
|
||||
[model](IntegerVariable var) { return AtMinValue(var, model); });
|
||||
value_selection_weight.push_back(1);
|
||||
|
||||
// Special case: Don't change the decision value.
|
||||
value_selection_weight.push_back(10);
|
||||
|
||||
// TODO(user): These distribution values are just guessed values. They need
|
||||
// to be tuned.
|
||||
std::discrete_distribution<int> val_dist(value_selection_weight.begin(),
|
||||
value_selection_weight.end());
|
||||
|
||||
int policy_index = 0;
|
||||
return
|
||||
[sat_solver, decision_policy, policies, policy_index, model]() mutable {
|
||||
if (sat_solver->CurrentDecisionLevel() == 0) {
|
||||
RandomizeDecisionHeuristic(model->GetOrCreate<ModelRandomGenerator>(),
|
||||
model->GetOrCreate<SatParameters>());
|
||||
decision_policy->ResetDecisionHeuristic();
|
||||
std::uniform_int_distribution<int> dist(0, policies.size() - 1);
|
||||
policy_index = dist(*(model->GetOrCreate<ModelRandomGenerator>()));
|
||||
}
|
||||
return policies[policy_index]();
|
||||
};
|
||||
auto* encoder = model->GetOrCreate<IntegerEncoder>();
|
||||
auto* integer_trail = model->GetOrCreate<IntegerTrail>();
|
||||
int val_policy_index = 0;
|
||||
return [=]() mutable {
|
||||
if (sat_solver->CurrentDecisionLevel() == 0) {
|
||||
auto* random = model->GetOrCreate<ModelRandomGenerator>();
|
||||
RandomizeDecisionHeuristic(random, model->GetOrCreate<SatParameters>());
|
||||
decision_policy->ResetDecisionHeuristic();
|
||||
|
||||
// Select the variable selection heuristic.
|
||||
policy_index = var_dist(*(random));
|
||||
|
||||
// Select the value selection heuristic.
|
||||
val_policy_index = val_dist(*(random));
|
||||
}
|
||||
|
||||
// Get the current decision.
|
||||
const LiteralIndex current_decision = policies[policy_index]();
|
||||
if (current_decision == kNoLiteralIndex) return kNoLiteralIndex;
|
||||
|
||||
// Special case: Don't override the decision value.
|
||||
if (val_policy_index >= value_selection_heuristics.size()) {
|
||||
return current_decision;
|
||||
}
|
||||
|
||||
// Decode the decision and get the variable.
|
||||
for (const IntegerLiteral l :
|
||||
encoder->GetAllIntegerLiterals(Literal(current_decision))) {
|
||||
if (integer_trail->IsCurrentlyIgnored(l.var)) continue;
|
||||
|
||||
// Try the selected policy.
|
||||
const LiteralIndex new_decision =
|
||||
value_selection_heuristics[val_policy_index](l.var);
|
||||
if (new_decision != kNoLiteralIndex) {
|
||||
return new_decision;
|
||||
}
|
||||
}
|
||||
|
||||
// Selected policy failed. Revert back to original decision.
|
||||
return current_decision;
|
||||
};
|
||||
}
|
||||
|
||||
std::function<LiteralIndex()> FollowHint(
|
||||
|
||||
@@ -18,6 +18,84 @@
|
||||
namespace operations_research {
|
||||
namespace sat {
|
||||
|
||||
void LinearConstraintBuilder::AddTerm(IntegerVariable var, IntegerValue coeff) {
|
||||
// We can either add var or NegationOf(var), and we always choose the
|
||||
// positive one.
|
||||
if (VariableIsPositive(var)) {
|
||||
terms_.push_back({var, coeff});
|
||||
} else {
|
||||
const IntegerVariable negated_var = NegationOf(var);
|
||||
terms_.push_back({negated_var, -coeff});
|
||||
}
|
||||
}
|
||||
|
||||
ABSL_MUST_USE_RESULT bool LinearConstraintBuilder::AddLiteralTerm(
|
||||
Literal lit, IntegerValue coeff) {
|
||||
if (assignment_.LiteralIsTrue(lit)) {
|
||||
if (lb_ > kMinIntegerValue) lb_ -= coeff;
|
||||
if (ub_ < kMaxIntegerValue) ub_ -= coeff;
|
||||
return true;
|
||||
}
|
||||
if (assignment_.LiteralIsFalse(lit)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool has_direct_view = encoder_.GetLiteralView(lit) != kNoIntegerVariable;
|
||||
bool has_opposite_view =
|
||||
encoder_.GetLiteralView(lit.Negated()) != kNoIntegerVariable;
|
||||
|
||||
// If a literal has both views, we want to always keep the same
|
||||
// representative: the smallest IntegerVariable. Note that AddTerm() will
|
||||
// also make sure to use the associated positive variable.
|
||||
if (has_direct_view && has_opposite_view) {
|
||||
if (encoder_.GetLiteralView(lit) <=
|
||||
encoder_.GetLiteralView(lit.Negated())) {
|
||||
has_opposite_view = false;
|
||||
} else {
|
||||
has_direct_view = false;
|
||||
}
|
||||
}
|
||||
if (has_direct_view) {
|
||||
AddTerm(encoder_.GetLiteralView(lit), coeff);
|
||||
return true;
|
||||
}
|
||||
if (has_opposite_view) {
|
||||
AddTerm(encoder_.GetLiteralView(lit.Negated()), -coeff);
|
||||
if (lb_ > kMinIntegerValue) lb_ -= coeff;
|
||||
if (ub_ < kMaxIntegerValue) ub_ -= coeff;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
LinearConstraint LinearConstraintBuilder::Build() {
|
||||
LinearConstraint result;
|
||||
result.lb = lb_;
|
||||
result.ub = ub_;
|
||||
|
||||
// Sort and add coeff of duplicate variables.
|
||||
std::sort(terms_.begin(), terms_.end());
|
||||
IntegerVariable previous_var = kNoIntegerVariable;
|
||||
IntegerValue current_coeff(0);
|
||||
for (const auto entry : terms_) {
|
||||
if (previous_var == entry.first) {
|
||||
current_coeff += entry.second;
|
||||
} else {
|
||||
if (current_coeff != 0) {
|
||||
result.vars.push_back(previous_var);
|
||||
result.coeffs.push_back(current_coeff);
|
||||
}
|
||||
previous_var = entry.first;
|
||||
current_coeff = entry.second;
|
||||
}
|
||||
}
|
||||
if (current_coeff != 0) {
|
||||
result.vars.push_back(previous_var);
|
||||
result.coeffs.push_back(current_coeff);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
double ComputeActivity(const LinearConstraint& constraint,
|
||||
const gtl::ITIVector<IntegerVariable, double>& values) {
|
||||
double activity = 0;
|
||||
|
||||
@@ -77,9 +77,6 @@ struct LinearConstraint {
|
||||
|
||||
// Allow to build a LinearConstraint while making sure there is no duplicate
|
||||
// variables.
|
||||
//
|
||||
// TODO(user): Storing all coeff in the vector then sorting and merging
|
||||
// duplicates might be more efficient. Change if required.
|
||||
class LinearConstraintBuilder {
|
||||
public:
|
||||
// We support "sticky" kMinIntegerValue for lb and kMaxIntegerValue for ub
|
||||
@@ -90,75 +87,21 @@ class LinearConstraintBuilder {
|
||||
lb_(lb),
|
||||
ub_(ub) {}
|
||||
|
||||
int size() const { return terms_.size(); }
|
||||
bool IsEmpty() const { return terms_.empty(); }
|
||||
|
||||
// Adds var * coeff to the constraint.
|
||||
void AddTerm(IntegerVariable var, IntegerValue coeff) {
|
||||
// We can either add var or NegationOf(var), and we always choose the
|
||||
// positive one.
|
||||
if (VariableIsPositive(var)) {
|
||||
terms_[var] += coeff;
|
||||
if (terms_[var] == 0) terms_.erase(var);
|
||||
} else {
|
||||
const IntegerVariable minus_var = NegationOf(var);
|
||||
terms_[minus_var] -= coeff;
|
||||
if (terms_[minus_var] == 0) terms_.erase(minus_var);
|
||||
}
|
||||
}
|
||||
void AddTerm(IntegerVariable var, IntegerValue coeff);
|
||||
|
||||
// Add literal * coeff to the constaint. Returns false and do nothing if the
|
||||
// given literal didn't have an integer view.
|
||||
ABSL_MUST_USE_RESULT bool AddLiteralTerm(Literal lit, IntegerValue coeff) {
|
||||
if (assignment_.LiteralIsTrue(lit)) {
|
||||
if (lb_ > kMinIntegerValue) lb_ -= coeff;
|
||||
if (ub_ < kMaxIntegerValue) ub_ -= coeff;
|
||||
return true;
|
||||
}
|
||||
if (assignment_.LiteralIsFalse(lit)) {
|
||||
return true;
|
||||
}
|
||||
ABSL_MUST_USE_RESULT bool AddLiteralTerm(Literal lit, IntegerValue coeff);
|
||||
|
||||
bool has_direct_view = encoder_.GetLiteralView(lit) != kNoIntegerVariable;
|
||||
bool has_opposite_view =
|
||||
encoder_.GetLiteralView(lit.Negated()) != kNoIntegerVariable;
|
||||
|
||||
// If a literal has both views, we want to always keep the same
|
||||
// representative: the smallest IntegerVariable. Note that AddTerm() will
|
||||
// also make sure to use the associated positive variable.
|
||||
if (has_direct_view && has_opposite_view) {
|
||||
if (encoder_.GetLiteralView(lit) <=
|
||||
encoder_.GetLiteralView(lit.Negated())) {
|
||||
has_direct_view = true;
|
||||
has_opposite_view = false;
|
||||
} else {
|
||||
has_direct_view = false;
|
||||
has_opposite_view = true;
|
||||
}
|
||||
}
|
||||
if (has_direct_view) {
|
||||
AddTerm(encoder_.GetLiteralView(lit), coeff);
|
||||
return true;
|
||||
}
|
||||
if (has_opposite_view) {
|
||||
AddTerm(encoder_.GetLiteralView(lit.Negated()), -coeff);
|
||||
if (lb_ > kMinIntegerValue) lb_ -= coeff;
|
||||
if (ub_ < kMaxIntegerValue) ub_ -= coeff;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
LinearConstraint Build() {
|
||||
LinearConstraint result;
|
||||
result.lb = lb_;
|
||||
result.ub = ub_;
|
||||
for (const auto entry : terms_) {
|
||||
result.vars.push_back(entry.first);
|
||||
result.coeffs.push_back(entry.second);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
// Builds and return the corresponding constraint in a canonical form.
|
||||
// All the IntegerVariable will be positive and appear in increasing index
|
||||
// order.
|
||||
//
|
||||
// TODO(user): this doesn't invalidate the builder object, but if one wants
|
||||
// to do a lot of dynamic editing to the constraint, then then underlying
|
||||
// algorithm needs to be optimized of that.
|
||||
LinearConstraint Build();
|
||||
|
||||
private:
|
||||
const VariablesAssignment& assignment_;
|
||||
@@ -166,7 +109,10 @@ class LinearConstraintBuilder {
|
||||
IntegerValue lb_;
|
||||
IntegerValue ub_;
|
||||
IntegerValue offset_;
|
||||
std::map<IntegerVariable, IntegerValue> terms_;
|
||||
|
||||
// Initially we push all AddTerm() here, and during Build() we merge terms
|
||||
// on the same variable.
|
||||
std::vector<std::pair<IntegerVariable, IntegerValue>> terms_;
|
||||
};
|
||||
|
||||
// Returns the activity of the given constraint. That is the current value of
|
||||
|
||||
@@ -108,9 +108,6 @@ void LinearConstraintManager::Add(const LinearConstraint& ct) {
|
||||
}
|
||||
++num_merged_constraints_;
|
||||
} else {
|
||||
for (const IntegerVariable var : canonicalized.vars) {
|
||||
used_variables_.insert(var);
|
||||
}
|
||||
constraint_l2_norms_.push_back(ComputeL2Norm(canonicalized));
|
||||
equiv_constraints_[terms] = constraints_.size();
|
||||
constraint_is_in_lp_.push_back(false);
|
||||
|
||||
@@ -85,9 +85,6 @@ class LinearConstraintManager {
|
||||
|
||||
SatParameters sat_parameters_;
|
||||
|
||||
// The set of variables that appear in at least one constraint.
|
||||
std::set<IntegerVariable> used_variables_;
|
||||
|
||||
// Set at true by Add() and at false by ChangeLp().
|
||||
bool some_lp_constraint_bounds_changed_ = false;
|
||||
|
||||
|
||||
@@ -76,7 +76,8 @@ void LinearProgrammingConstraint::AddLinearConstraint(
|
||||
glop::ColIndex LinearProgrammingConstraint::GetOrCreateMirrorVariable(
|
||||
IntegerVariable positive_variable) {
|
||||
DCHECK(VariableIsPositive(positive_variable));
|
||||
if (!gtl::ContainsKey(mirror_lp_variable_, positive_variable)) {
|
||||
const auto it = mirror_lp_variable_.find(positive_variable);
|
||||
if (it == mirror_lp_variable_.end()) {
|
||||
const glop::ColIndex col(integer_variables_.size());
|
||||
mirror_lp_variable_[positive_variable] = col;
|
||||
integer_variables_.push_back(positive_variable);
|
||||
@@ -91,7 +92,7 @@ glop::ColIndex LinearProgrammingConstraint::GetOrCreateMirrorVariable(
|
||||
}
|
||||
return col;
|
||||
}
|
||||
return mirror_lp_variable_[positive_variable];
|
||||
return it->second;
|
||||
}
|
||||
|
||||
void LinearProgrammingConstraint::SetObjectiveCoefficient(IntegerVariable ivar,
|
||||
|
||||
@@ -42,15 +42,8 @@ bool AppendFullEncodingRelaxation(IntegerVariable var, const Model& model,
|
||||
if (!encoding_ct.AddLiteralTerm(lit, -value_literal.value)) return false;
|
||||
}
|
||||
|
||||
// TODO(user): also skip if var is fixed and there is just one term. Or more
|
||||
// generally, always add all constraints, but do not add the trivial ones to
|
||||
// the LP.
|
||||
if (!at_least_one.IsEmpty()) {
|
||||
relaxation->linear_constraints.push_back(at_least_one.Build());
|
||||
}
|
||||
if (!encoding_ct.IsEmpty()) {
|
||||
relaxation->linear_constraints.push_back(encoding_ct.Build());
|
||||
}
|
||||
relaxation->linear_constraints.push_back(at_least_one.Build());
|
||||
relaxation->linear_constraints.push_back(encoding_ct.Build());
|
||||
relaxation->at_most_ones.push_back(at_most_one);
|
||||
return true;
|
||||
}
|
||||
@@ -144,12 +137,8 @@ void AppendPartialEncodingRelaxation(IntegerVariable var, const Model& model,
|
||||
CHECK(
|
||||
encoding_ct.AddLiteralTerm(lit, IntegerValue(-value_literal.value)));
|
||||
}
|
||||
if (exactly_one_ct.size() > 1) {
|
||||
relaxation->linear_constraints.push_back(exactly_one_ct.Build());
|
||||
}
|
||||
if (encoding_ct.size() > 1) {
|
||||
relaxation->linear_constraints.push_back(encoding_ct.Build());
|
||||
}
|
||||
relaxation->linear_constraints.push_back(exactly_one_ct.Build());
|
||||
relaxation->linear_constraints.push_back(encoding_ct.Build());
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -171,15 +160,10 @@ void AppendPartialEncodingRelaxation(IntegerVariable var, const Model& model,
|
||||
d_max - value_literal.value));
|
||||
}
|
||||
|
||||
if (at_most_one_ct.size() > 1 && encoded_values.size() > 1) {
|
||||
relaxation->at_most_ones.push_back(at_most_one_ct);
|
||||
}
|
||||
if (lower_bound_ct.size() > 1) {
|
||||
relaxation->linear_constraints.push_back(lower_bound_ct.Build());
|
||||
}
|
||||
if (upper_bound_ct.size() > 1) {
|
||||
relaxation->linear_constraints.push_back(upper_bound_ct.Build());
|
||||
}
|
||||
// Note that empty/trivial constraints will be filtered later.
|
||||
relaxation->at_most_ones.push_back(at_most_one_ct);
|
||||
relaxation->linear_constraints.push_back(lower_bound_ct.Build());
|
||||
relaxation->linear_constraints.push_back(upper_bound_ct.Build());
|
||||
}
|
||||
|
||||
void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var,
|
||||
@@ -217,9 +201,7 @@ void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var,
|
||||
prev_used_bound = entry.first;
|
||||
prev_literal_index = literal_index;
|
||||
}
|
||||
if (!lb_constraint.IsEmpty()) {
|
||||
relaxation->linear_constraints.push_back(lb_constraint.Build());
|
||||
}
|
||||
relaxation->linear_constraints.push_back(lb_constraint.Build());
|
||||
}
|
||||
|
||||
// Do the same for the var <= side by using NegationOfVar().
|
||||
@@ -238,9 +220,7 @@ void AppendPartialGreaterThanEncodingRelaxation(IntegerVariable var,
|
||||
if (!lb_constraint.AddLiteralTerm(entry.second, diff)) continue;
|
||||
prev_used_bound = entry.first;
|
||||
}
|
||||
if (!lb_constraint.IsEmpty()) {
|
||||
relaxation->linear_constraints.push_back(lb_constraint.Build());
|
||||
}
|
||||
relaxation->linear_constraints.push_back(lb_constraint.Build());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,36 @@ using operations_research::MPConstraintProto;
|
||||
using operations_research::MPModelProto;
|
||||
using operations_research::MPVariableProto;
|
||||
|
||||
bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
std::vector<double> ScaleContinuousVariables(double scaling,
|
||||
MPModelProto* mp_model) {
|
||||
const int num_variables = mp_model->variable_size();
|
||||
std::vector<double> var_scaling(num_variables, 1.0);
|
||||
for (int i = 0; i < num_variables; ++i) {
|
||||
const MPVariableProto& mp_var = mp_model->variable(i);
|
||||
if (mp_var.is_integer()) continue;
|
||||
|
||||
const double old_lb = mp_var.lower_bound();
|
||||
const double old_ub = mp_var.upper_bound();
|
||||
const double old_obj = mp_var.objective_coefficient();
|
||||
|
||||
var_scaling[i] = scaling;
|
||||
mp_model->mutable_variable(i)->set_lower_bound(old_lb * scaling);
|
||||
mp_model->mutable_variable(i)->set_upper_bound(old_ub * scaling);
|
||||
mp_model->mutable_variable(i)->set_objective_coefficient(old_obj / scaling);
|
||||
}
|
||||
for (MPConstraintProto& mp_constraint : *mp_model->mutable_constraint()) {
|
||||
const int num_terms = mp_constraint.coefficient_size();
|
||||
for (int i = 0; i < num_terms; ++i) {
|
||||
const int var_index = mp_constraint.var_index(i);
|
||||
mp_constraint.set_coefficient(
|
||||
i, mp_constraint.coefficient(i) / var_scaling[var_index]);
|
||||
}
|
||||
}
|
||||
return var_scaling;
|
||||
}
|
||||
|
||||
bool ConvertMPModelProtoToCpModelProto(const SatParameters& params,
|
||||
const MPModelProto& mp_model,
|
||||
CpModelProto* cp_model) {
|
||||
const double kInfinity = std::numeric_limits<double>::infinity();
|
||||
CHECK(cp_model != nullptr);
|
||||
@@ -63,12 +92,12 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
// similar condition in disguise because problem with a difference of more
|
||||
// than 6 magnitudes between the variable values will likely run into numeric
|
||||
// trouble.
|
||||
const int64 kMaxVariableBound = static_cast<int64>(1e7);
|
||||
const int64 kMaxVariableBound = static_cast<int64>(params.mip_max_bound());
|
||||
|
||||
int num_truncated_bounds = 0;
|
||||
int num_small_domains = 0;
|
||||
const int64 kSmallDomainSize = 1000;
|
||||
const double kWantedCoefficientPrecision = 1e-6;
|
||||
const double kWantedPrecision = params.mip_wanted_precision();
|
||||
|
||||
// Add the variables.
|
||||
const int num_variables = mp_model.variable_size();
|
||||
@@ -109,12 +138,13 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
|
||||
// Note that the cast is "perfect" because we forbid large values.
|
||||
cp_var->add_domain(
|
||||
static_cast<int64>(lower ? std::ceil(bound) : std::floor(bound)));
|
||||
static_cast<int64>(lower ? std::ceil(bound - kWantedPrecision)
|
||||
: std::floor(bound + kWantedPrecision)));
|
||||
}
|
||||
|
||||
// Notify if a continuous variable has a small domain as this is likely to
|
||||
// make an all integer solution far from a continuous one.
|
||||
if (!mp_var.is_integer() &&
|
||||
if (!mp_var.is_integer() && cp_var->domain(0) != cp_var->domain(1) &&
|
||||
cp_var->domain(1) - cp_var->domain(0) < kSmallDomainSize) {
|
||||
++num_small_domains;
|
||||
}
|
||||
@@ -129,7 +159,7 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
|
||||
// Variables needed to scale the double coefficients into int64.
|
||||
double max_relative_coeff_error = 0.0;
|
||||
double max_scaled_sum_error = 0.0;
|
||||
double max_sum_error = 0.0;
|
||||
double max_scaling_factor = 0.0;
|
||||
double relative_coeff_error = 0.0;
|
||||
double scaled_sum_error = 0.0;
|
||||
@@ -138,10 +168,7 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
std::vector<double> lower_bounds;
|
||||
std::vector<double> upper_bounds;
|
||||
|
||||
// TODO(user): we could use up to kint64max here, but our code is not as
|
||||
// defensive as it should be regarding integer overflow. So we use the
|
||||
// precision of a double.
|
||||
const int64 kScalingTarget = 1LL << 53;
|
||||
const int64 kScalingTarget = 1LL << params.mip_max_activity_exponent();
|
||||
|
||||
// Add the constraints. We scale each of them individually.
|
||||
for (const MPConstraintProto& mp_constraint : mp_model.constraint()) {
|
||||
@@ -168,16 +195,25 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
scaling_factor = GetBestScalingOfDoublesToInt64(
|
||||
coefficients, lower_bounds, upper_bounds, kScalingTarget);
|
||||
|
||||
// Returns the smallest factor of the form 2^i that gives us a relative
|
||||
// coefficient precision of kWantedCoefficientPrecision and still make sure
|
||||
// we will have no integer overflow.
|
||||
// We use an absolute precision if the constraint domain contains a point in
|
||||
// [-1, 1], otherwise we use a relative error to the minimum absolute value
|
||||
// in the domain.
|
||||
Fractional lb = mp_constraint.lower_bound();
|
||||
Fractional ub = mp_constraint.upper_bound();
|
||||
double relative_ref = 1.0;
|
||||
if (lb > 1.0) relative_ref = lb;
|
||||
if (ub < -1.0) relative_ref = -ub;
|
||||
|
||||
// Returns the smallest factor of the form 2^i that gives us a relative sum
|
||||
// error of kWantedPrecision and still make sure we will have no integer
|
||||
// overflow.
|
||||
//
|
||||
// TODO(user): Make this faster.
|
||||
double x = std::min(scaling_factor, 1.0);
|
||||
for (; x <= scaling_factor; x *= 2) {
|
||||
ComputeScalingErrors(coefficients, lower_bounds, upper_bounds, x,
|
||||
&relative_coeff_error, &scaled_sum_error);
|
||||
if (relative_coeff_error < kWantedCoefficientPrecision) break;
|
||||
if (scaled_sum_error < kWantedPrecision * x * relative_ref) break;
|
||||
}
|
||||
scaling_factor = x;
|
||||
|
||||
@@ -186,22 +222,30 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
std::max(relative_coeff_error, max_relative_coeff_error);
|
||||
max_scaling_factor = std::max(scaling_factor / gcd, max_scaling_factor);
|
||||
|
||||
// We do not relax the constraint bound if all variables are integer and
|
||||
// we made no error at all during our scaling.
|
||||
bool relax_bound = scaled_sum_error > 0;
|
||||
|
||||
for (int i = 0; i < num_coeffs; ++i) {
|
||||
const double scaled_value = mp_constraint.coefficient(i) * scaling_factor;
|
||||
const int64 value = static_cast<int64>(std::round(scaled_value)) / gcd;
|
||||
if (value != 0) {
|
||||
if (!mp_model.variable(mp_constraint.var_index(i)).is_integer()) {
|
||||
relax_bound = true;
|
||||
}
|
||||
arg->add_vars(mp_constraint.var_index(i));
|
||||
arg->add_coeffs(value);
|
||||
}
|
||||
}
|
||||
max_scaled_sum_error =
|
||||
std::max(max_scaled_sum_error, scaled_sum_error / scaling_factor);
|
||||
max_sum_error = std::max(
|
||||
max_sum_error, scaled_sum_error / (scaling_factor * relative_ref));
|
||||
|
||||
// Add the constraint bounds. Because we are sure the scaled constraint fit
|
||||
// on an int64, if the scaled bounds are too large, the constraint is either
|
||||
// always true or always false.
|
||||
Fractional lb = mp_constraint.lower_bound();
|
||||
lb -= std::max(1.0, std::abs(lb)) * 1e-8;
|
||||
if (relax_bound) {
|
||||
lb -= std::max(1.0, std::abs(lb)) * kWantedPrecision;
|
||||
}
|
||||
const Fractional scaled_lb = std::ceil(lb * scaling_factor);
|
||||
if (lb == -kInfinity || scaled_lb <= kint64min) {
|
||||
arg->add_domain(kint64min);
|
||||
@@ -210,8 +254,10 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
IntegerValue(gcd))
|
||||
.value());
|
||||
}
|
||||
Fractional ub = mp_constraint.upper_bound();
|
||||
ub += std::max(1.0, std::abs(ub)) * 1e-8;
|
||||
|
||||
if (relax_bound) {
|
||||
ub += std::max(1.0, std::abs(ub)) * kWantedPrecision;
|
||||
}
|
||||
const Fractional scaled_ub = std::floor(ub * scaling_factor);
|
||||
if (ub == kInfinity || scaled_ub >= kint64max) {
|
||||
arg->add_domain(kint64max);
|
||||
@@ -221,8 +267,8 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
.value());
|
||||
}
|
||||
|
||||
// TODO(user): checks feasibility (contains zero) or support that in the
|
||||
// solver!
|
||||
// TODO(user): check feasibility (contains zero) or support that in the
|
||||
// solver.
|
||||
if (arg->vars_size() == 0) constraint->Clear();
|
||||
}
|
||||
|
||||
@@ -230,7 +276,7 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
VLOG(1) << "Maximum constraint coefficient relative error: "
|
||||
<< max_relative_coeff_error;
|
||||
VLOG(1) << "Maximum constraint worst-case sum absolute error: "
|
||||
<< max_scaled_sum_error;
|
||||
<< max_sum_error;
|
||||
VLOG(1) << "Maximum constraint scaling factor: " << max_scaling_factor;
|
||||
|
||||
// Add the objective.
|
||||
@@ -249,16 +295,16 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
scaling_factor = GetBestScalingOfDoublesToInt64(
|
||||
coefficients, lower_bounds, upper_bounds, kScalingTarget);
|
||||
|
||||
// Returns the smallest factor of the form 2^i that gives us a relative
|
||||
// coefficient precision of kWantedCoefficientPrecision and still make sure
|
||||
// we will have no integer overflow.
|
||||
// Returns the smallest factor of the form 2^i that gives us an absolute
|
||||
// error of kWantedPrecision and still make sure we will have no integer
|
||||
// overflow.
|
||||
//
|
||||
// TODO(user): Make this faster.
|
||||
double x = std::min(scaling_factor, 1.0);
|
||||
for (; x <= scaling_factor; x *= 2) {
|
||||
ComputeScalingErrors(coefficients, lower_bounds, upper_bounds, x,
|
||||
&relative_coeff_error, &scaled_sum_error);
|
||||
if (relative_coeff_error < kWantedCoefficientPrecision) break;
|
||||
if (scaled_sum_error < kWantedPrecision * x) break;
|
||||
}
|
||||
scaling_factor = x;
|
||||
|
||||
@@ -294,10 +340,12 @@ bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
}
|
||||
|
||||
// Test the precision of the conversion.
|
||||
const double kRelativeTolerance = 1e-4;
|
||||
if (max_relative_coeff_error > kRelativeTolerance) {
|
||||
const double allowed_error =
|
||||
std::max(params.mip_check_precision(), params.mip_wanted_precision());
|
||||
if (max_sum_error > allowed_error) {
|
||||
LOG(WARNING) << "The relative error during double -> int64 conversion "
|
||||
<< "is too high! error:" << max_relative_coeff_error;
|
||||
<< "is too high! error:" << max_sum_error
|
||||
<< " check_tolerance:" << allowed_error;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
@@ -20,24 +20,29 @@
|
||||
#include "ortools/lp_data/lp_data.h"
|
||||
#include "ortools/sat/boolean_problem.pb.h"
|
||||
#include "ortools/sat/cp_model.pb.h"
|
||||
#include "ortools/sat/sat_parameters.pb.h"
|
||||
#include "ortools/sat/sat_solver.h"
|
||||
|
||||
namespace operations_research {
|
||||
namespace sat {
|
||||
|
||||
// Multiplies all continuous variable by the given scaling parameters and change
|
||||
// the rest of the model accordingly. The returned vector contains the scaling
|
||||
// of each variable (currently either 1.0 or scaling) and can be used to recover
|
||||
// a solution of the unscaled problem from one of the new scaled problems by
|
||||
// dividing the variable values.
|
||||
//
|
||||
// TODO(user): Also scale the solution hint if any.
|
||||
std::vector<double> ScaleContinuousVariables(double scaling,
|
||||
MPModelProto* mp_model);
|
||||
|
||||
// Converts a MIP problem to a CpModel. Returns false if the coefficients
|
||||
// couldn't be converted to integers with a good enough precision.
|
||||
//
|
||||
// Caveats:
|
||||
// - We do not support bound larger than or equal to 2^30.
|
||||
// - We cap unbounded variable at 2^30.
|
||||
// - Non-integer variable must have integer bounds.
|
||||
// - We do not scale the variable bounds, so by assuming that a non-integer
|
||||
// variable is integer, we may change the problem significantly if the
|
||||
// domain is small (like [0.0, 1.0]).
|
||||
//
|
||||
// TODO(user): Try to remove some of the restrictions.
|
||||
bool ConvertMPModelProtoToCpModelProto(const MPModelProto& mp_model,
|
||||
// There is a bunch of caveats and you can find more details on the
|
||||
// SatParameters proto documentation for the mip_* parameters.
|
||||
bool ConvertMPModelProtoToCpModelProto(const SatParameters& params,
|
||||
const MPModelProto& mp_model,
|
||||
CpModelProto* cp_model);
|
||||
|
||||
// Converts an integer program with only binary variables to a Boolean
|
||||
|
||||
@@ -21,7 +21,7 @@ option java_multiple_files = true;
|
||||
// Contains the definitions for all the sat algorithm parameters and their
|
||||
// default values.
|
||||
//
|
||||
// NEXT TAG: 124
|
||||
// NEXT TAG: 129
|
||||
message SatParameters {
|
||||
// ==========================================================================
|
||||
// Branching and polarity
|
||||
@@ -685,4 +685,52 @@ message SatParameters {
|
||||
// compute EXACT propagation on the IP. So with this option, there is no
|
||||
// numerical imprecision issues.
|
||||
optional bool use_exact_lp_reason = 109 [default = true];
|
||||
|
||||
// ==========================================================================
|
||||
// MIP -> CP-SAT (i.e. IP with integer coeff) conversion parameters that are
|
||||
// used by our automatic "scaling" algorithm.
|
||||
//
|
||||
// Note that it is hard to do a meaningful conversion automatically and if
|
||||
// you have a model with continuous variables, it is best if you scale the
|
||||
// domain of the variable yourself so that you have a relevant precision for
|
||||
// the application at hand. Same for the coefficients and constraint bounds.
|
||||
// ==========================================================================
|
||||
|
||||
// We need to bound the maximum magnitude of the variables for CP-SAT, and
|
||||
// that is the bound we use. If the MIP model expect larger variable value in
|
||||
// the solution, then the converted model will likely not be relevant.
|
||||
optional double mip_max_bound = 124 [default = 1e7];
|
||||
|
||||
// All continuous variable of the problem will be multiplied by this factor.
|
||||
// By default, we don't do any variable scaling and rely on the MIP model to
|
||||
// specify continuous variable domain with the wanted precision.
|
||||
optional double mip_var_scaling = 125 [default = 1.0];
|
||||
|
||||
// When scaling constraint with double coefficients to integer coefficients,
|
||||
// we will multiply by a power of 2 and round the coefficients. We will choose
|
||||
// the lowest power such that we have this relative precision on each of the
|
||||
// constraint (resp. objective) coefficient.
|
||||
//
|
||||
// We also use this to decide by how much we relax the constraint bounds so
|
||||
// that we can have a feasible integer solution of constraints involving
|
||||
// continuous variable. This is required for instance when you have an == rhs
|
||||
// constraint as in many situation you cannot have a perfect equality with
|
||||
// integer variables and coefficients.
|
||||
optional double mip_wanted_precision = 126 [default = 1e-6];
|
||||
|
||||
// To avoid integer overflow, we always force the maximum possible constraint
|
||||
// activity (and objective value) according to the initial variable domain to
|
||||
// be smaller than 2 to this given power. Because of this, we cannot always
|
||||
// reach the "mip_wanted_precision" parameter above.
|
||||
//
|
||||
// This can go as high as 62, but some internal algo currently abort early if
|
||||
// they might run into integer overflow, so it is better to keep it a bit
|
||||
// lower than this.
|
||||
optional int32 mip_max_activity_exponent = 127 [default = 53];
|
||||
|
||||
// As explained in mip_precision and mip_max_activity_exponent, we cannot
|
||||
// always reach the wanted coefficient precision during scaling. When we
|
||||
// cannot, we will report MODEL_INVALID if the relative preicision is larger
|
||||
// than this parameter.
|
||||
optional double mip_check_precision = 128 [default = 1e-4];
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ SatPostsolver::SatPostsolver(int num_variables)
|
||||
}
|
||||
|
||||
void SatPostsolver::Add(Literal x, absl::Span<const Literal> clause) {
|
||||
CHECK(!clause.empty());
|
||||
DCHECK(!clause.empty());
|
||||
DCHECK(std::find(clause.begin(), clause.end(), x) != clause.end());
|
||||
associated_literal_.push_back(ApplyReverseMapping(x));
|
||||
clauses_start_.push_back(clauses_literals_.size());
|
||||
@@ -73,7 +73,7 @@ void SatPostsolver::ApplyMapping(
|
||||
new_mapping.resize(image.value() + 1, kNoBooleanVariable);
|
||||
}
|
||||
new_mapping[image] = reverse_mapping_[v];
|
||||
CHECK_NE(new_mapping[image], kNoBooleanVariable);
|
||||
DCHECK_NE(new_mapping[image], kNoBooleanVariable);
|
||||
}
|
||||
}
|
||||
std::swap(new_mapping, reverse_mapping_);
|
||||
@@ -89,7 +89,7 @@ Literal SatPostsolver::ApplyReverseMapping(Literal l) {
|
||||
}
|
||||
DCHECK_NE(reverse_mapping_[l.Variable()], kNoBooleanVariable);
|
||||
const Literal result(reverse_mapping_[l.Variable()], l.IsPositive());
|
||||
CHECK(!assignment_.LiteralIsAssigned(result));
|
||||
DCHECK(!assignment_.LiteralIsAssigned(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ std::vector<bool> SatPostsolver::ExtractAndPostsolveSolution(
|
||||
const SatSolver& solver) {
|
||||
std::vector<bool> solution(solver.NumVariables());
|
||||
for (BooleanVariable var(0); var < solver.NumVariables(); ++var) {
|
||||
CHECK(solver.Assignment().VariableIsAssigned(var));
|
||||
DCHECK(solver.Assignment().VariableIsAssigned(var));
|
||||
solution[var.value()] =
|
||||
solver.Assignment().LiteralIsTrue(Literal(var, true));
|
||||
}
|
||||
@@ -137,9 +137,9 @@ std::vector<bool> SatPostsolver::ExtractAndPostsolveSolution(
|
||||
std::vector<bool> SatPostsolver::PostsolveSolution(
|
||||
const std::vector<bool>& solution) {
|
||||
for (BooleanVariable var(0); var < solution.size(); ++var) {
|
||||
CHECK_LT(var, reverse_mapping_.size());
|
||||
CHECK_NE(reverse_mapping_[var], kNoBooleanVariable);
|
||||
CHECK(!assignment_.VariableIsAssigned(reverse_mapping_[var]));
|
||||
DCHECK_LT(var, reverse_mapping_.size());
|
||||
DCHECK_NE(reverse_mapping_[var], kNoBooleanVariable);
|
||||
DCHECK(!assignment_.VariableIsAssigned(reverse_mapping_[var]));
|
||||
assignment_.AssignFromTrueLiteral(
|
||||
Literal(reverse_mapping_[var], solution[var.value()]));
|
||||
}
|
||||
@@ -156,7 +156,7 @@ std::vector<bool> SatPostsolver::PostsolveSolution(
|
||||
void SatPresolver::AddBinaryClause(Literal a, Literal b) { AddClause({a, b}); }
|
||||
|
||||
void SatPresolver::AddClause(absl::Span<const Literal> clause) {
|
||||
CHECK_GT(clause.size(), 0) << "Added an empty clause to the presolver";
|
||||
DCHECK_GT(clause.size(), 0) << "Added an empty clause to the presolver";
|
||||
const ClauseIndex ci(clauses_.size());
|
||||
clauses_.push_back(std::vector<Literal>(clause.begin(), clause.end()));
|
||||
in_clause_to_process_.push_back(true);
|
||||
@@ -187,6 +187,10 @@ void SatPresolver::AddClause(absl::Span<const Literal> clause) {
|
||||
}
|
||||
}
|
||||
|
||||
// This needs to be done after the basic canonicalization above.
|
||||
signatures_.push_back(ComputeSignatureOfClauseVariables(ci));
|
||||
DCHECK_EQ(signatures_.size(), clauses_.size());
|
||||
|
||||
if (drat_proof_handler_ != nullptr && changed) {
|
||||
drat_proof_handler_->AddClause(clause_ref);
|
||||
drat_proof_handler_->DeleteClause(clause);
|
||||
@@ -218,18 +222,21 @@ void SatPresolver::AddClauseInternal(std::vector<Literal>* clause) {
|
||||
if (drat_proof_handler_ != nullptr) drat_proof_handler_->AddClause(*clause);
|
||||
|
||||
DCHECK(std::is_sorted(clause->begin(), clause->end()));
|
||||
CHECK_GT(clause->size(), 0) << "TODO(fdid): Unsat during presolve?";
|
||||
DCHECK_GT(clause->size(), 0) << "TODO(fdid): Unsat during presolve?";
|
||||
const ClauseIndex ci(clauses_.size());
|
||||
clauses_.push_back(std::vector<Literal>());
|
||||
clauses_.back().swap(*clause);
|
||||
in_clause_to_process_.push_back(true);
|
||||
clause_to_process_.push_back(ci);
|
||||
for (Literal e : clauses_.back()) {
|
||||
for (const Literal e : clauses_.back()) {
|
||||
literal_to_clauses_[e.Index()].push_back(ci);
|
||||
literal_to_clause_sizes_[e.Index()]++;
|
||||
UpdatePriorityQueue(e.Variable());
|
||||
UpdateBvaPriorityQueue(e.Index());
|
||||
}
|
||||
|
||||
signatures_.push_back(ComputeSignatureOfClauseVariables(ci));
|
||||
DCHECK_EQ(signatures_.size(), clauses_.size());
|
||||
}
|
||||
|
||||
gtl::ITIVector<BooleanVariable, BooleanVariable> SatPresolver::VariableMapping()
|
||||
@@ -256,6 +263,7 @@ void SatPresolver::LoadProblemIntoSatSolver(SatSolver* solver) {
|
||||
in_clause_to_process_.clear();
|
||||
clause_to_process_.clear();
|
||||
literal_to_clauses_.clear();
|
||||
signatures_.clear();
|
||||
|
||||
const gtl::ITIVector<BooleanVariable, BooleanVariable> mapping =
|
||||
VariableMapping();
|
||||
@@ -269,7 +277,7 @@ void SatPresolver::LoadProblemIntoSatSolver(SatSolver* solver) {
|
||||
for (std::vector<Literal>& clause_ref : clauses_) {
|
||||
temp.clear();
|
||||
for (Literal l : clause_ref) {
|
||||
CHECK_NE(mapping[l.Variable()], kNoBooleanVariable);
|
||||
DCHECK_NE(mapping[l.Variable()], kNoBooleanVariable);
|
||||
temp.push_back(Literal(mapping[l.Variable()], l.IsPositive()));
|
||||
}
|
||||
if (!temp.empty()) solver->AddProblemClause(temp);
|
||||
@@ -393,8 +401,8 @@ void SatPresolver::SimpleBva(LiteralIndex l) {
|
||||
new_m_lit_size * new_m_cls_size - new_m_cls_size - new_m_lit_size;
|
||||
|
||||
if (new_reduction <= reduction) break;
|
||||
CHECK_NE(1, new_m_lit_size);
|
||||
CHECK_NE(1, new_m_cls_size);
|
||||
DCHECK_NE(1, new_m_lit_size);
|
||||
DCHECK_NE(1, new_m_cls_size);
|
||||
|
||||
// TODO(user): Instead of looping and recomputing p_ again, we can instead
|
||||
// simply intersect the clause indices in p_. This should be a lot faster.
|
||||
@@ -420,7 +428,7 @@ void SatPresolver::SimpleBva(LiteralIndex l) {
|
||||
// reduce the overall number of clauses by that much. Here we can control
|
||||
// what kind of reduction we want to apply.
|
||||
if (reduction <= parameters_.presolve_bva_threshold()) return;
|
||||
CHECK_GT(m_lit_.size(), 1);
|
||||
DCHECK_GT(m_lit_.size(), 1);
|
||||
|
||||
// Create a new variable.
|
||||
const int old_size = literal_to_clauses_.size();
|
||||
@@ -440,7 +448,7 @@ void SatPresolver::SimpleBva(LiteralIndex l) {
|
||||
}
|
||||
for (const ClauseIndex ci : m_cls_) {
|
||||
tmp_new_clause_ = clauses_[ci];
|
||||
CHECK(!tmp_new_clause_.empty());
|
||||
DCHECK(!tmp_new_clause_.empty());
|
||||
for (Literal& ref : tmp_new_clause_) {
|
||||
if (ref.Index() == l) {
|
||||
ref = Literal(x_false);
|
||||
@@ -463,7 +471,7 @@ void SatPresolver::SimpleBva(LiteralIndex l) {
|
||||
// we want it to be as fast as possible.
|
||||
for (const ClauseIndex c : m_cls_) {
|
||||
const std::vector<Literal>& clause = clauses_[c];
|
||||
CHECK(!clause.empty());
|
||||
DCHECK(!clause.empty());
|
||||
const LiteralIndex l_min =
|
||||
FindLiteralWithShortestOccurrenceListExcluding(clause, Literal(l));
|
||||
for (const LiteralIndex lit : m_lit_) {
|
||||
@@ -491,12 +499,90 @@ void SatPresolver::SimpleBva(LiteralIndex l) {
|
||||
AddToBvaPriorityQueue(l);
|
||||
}
|
||||
|
||||
uint64 SatPresolver::ComputeSignatureOfClauseVariables(ClauseIndex ci) {
|
||||
uint64 signature = 0;
|
||||
for (const Literal l : clauses_[ci]) {
|
||||
signature |= (1ULL << (l.Variable().value() % 64));
|
||||
}
|
||||
DCHECK_EQ(signature == 0, clauses_[ci].empty());
|
||||
return signature;
|
||||
}
|
||||
|
||||
// We are looking for clause that contains lit and contains a superset of the
|
||||
// literals in clauses_[clauses_index] or a superset with just one literal of
|
||||
// clauses_[clause_index] negated.
|
||||
bool SatPresolver::ProcessClauseToSimplifyOthersUsingLiteral(
|
||||
ClauseIndex clause_index, Literal lit) {
|
||||
const std::vector<Literal>& clause = clauses_[clause_index];
|
||||
const uint64 clause_signature = signatures_[clause_index];
|
||||
LiteralIndex opposite_literal;
|
||||
|
||||
// Try to simplify the clauses containing 'lit'. We take advantage of this
|
||||
// loop to also detect if there is any empty clause, in which case we will
|
||||
// trigger a "cleaning" below.
|
||||
bool need_cleaning = false;
|
||||
for (const ClauseIndex ci : literal_to_clauses_[lit.Index()]) {
|
||||
const uint64 ci_signature = signatures_[ci];
|
||||
|
||||
// This allows to check for empty clause without fetching the memory at
|
||||
// clause_[ci]. It can have a huge time impact on large problems.
|
||||
DCHECK_EQ(ci_signature, ComputeSignatureOfClauseVariables(ci));
|
||||
if (ci_signature == 0) {
|
||||
need_cleaning = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Note that SimplifyClause() can return true only if the variables in
|
||||
// 'a' are a subset of the one in 'b'. We use the signatures to abort
|
||||
// early as a speed optimization.
|
||||
if (ci != clause_index && (clause_signature & ~ci_signature) == 0 &&
|
||||
SimplifyClause(clause, &clauses_[ci], &opposite_literal)) {
|
||||
if (opposite_literal == kNoLiteralIndex) {
|
||||
need_cleaning = true;
|
||||
Remove(ci);
|
||||
continue;
|
||||
} else {
|
||||
DCHECK_NE(opposite_literal, lit.Index());
|
||||
if (clauses_[ci].empty()) return false; // UNSAT.
|
||||
if (drat_proof_handler_ != nullptr) {
|
||||
// TODO(user): remove the old clauses_[ci] afterwards.
|
||||
drat_proof_handler_->AddClause(clauses_[ci]);
|
||||
}
|
||||
|
||||
// Recompute signature.
|
||||
signatures_[ci] = ComputeSignatureOfClauseVariables(ci);
|
||||
|
||||
// Remove ci from the occurrence list. Note that opposite_literal
|
||||
// cannot be literal or literal.Negated().
|
||||
gtl::STLEraseAllFromSequence(&literal_to_clauses_[opposite_literal],
|
||||
ci);
|
||||
--literal_to_clause_sizes_[opposite_literal];
|
||||
UpdatePriorityQueue(Literal(opposite_literal).Variable());
|
||||
|
||||
if (!in_clause_to_process_[ci]) {
|
||||
in_clause_to_process_[ci] = true;
|
||||
clause_to_process_.push_back(ci);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (need_cleaning) {
|
||||
int new_index = 0;
|
||||
auto& occurrence_list_ref = literal_to_clauses_[lit.Index()];
|
||||
for (const ClauseIndex ci : occurrence_list_ref) {
|
||||
if (signatures_[ci] != 0) occurrence_list_ref[new_index++] = ci;
|
||||
}
|
||||
occurrence_list_ref.resize(new_index);
|
||||
DCHECK_EQ(literal_to_clause_sizes_[lit.Index()], new_index);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO(user): Binary clauses are really common, and we can probably do this
|
||||
// more efficiently for them. For instance, we could just take the intersection
|
||||
// of two sorted lists to get the simplified clauses.
|
||||
//
|
||||
// TODO(user): SimplifyClause can returns true only if the variables in 'a' are
|
||||
// a subset of the one in 'b'. Use an int64 signature for speeding up the test.
|
||||
bool SatPresolver::ProcessClauseToSimplifyOthers(ClauseIndex clause_index) {
|
||||
const std::vector<Literal>& clause = clauses_[clause_index];
|
||||
if (clause.empty()) return true;
|
||||
@@ -505,71 +591,46 @@ bool SatPresolver::ProcessClauseToSimplifyOthers(ClauseIndex clause_index) {
|
||||
LiteralIndex opposite_literal;
|
||||
const Literal lit = FindLiteralWithShortestOccurrenceList(clause);
|
||||
|
||||
// Try to simplify the clauses containing 'lit'. We take advantage of this
|
||||
// loop to also remove the empty sets from the list.
|
||||
{
|
||||
int new_index = 0;
|
||||
std::vector<ClauseIndex>& occurrence_list_ref =
|
||||
literal_to_clauses_[lit.Index()];
|
||||
for (ClauseIndex ci : occurrence_list_ref) {
|
||||
if (clauses_[ci].empty()) continue;
|
||||
if (ci != clause_index &&
|
||||
SimplifyClause(clause, &clauses_[ci], &opposite_literal)) {
|
||||
if (opposite_literal == LiteralIndex(-1)) {
|
||||
Remove(ci);
|
||||
continue;
|
||||
} else {
|
||||
CHECK_NE(opposite_literal, lit.Index());
|
||||
if (clauses_[ci].empty()) return false; // UNSAT.
|
||||
if (drat_proof_handler_ != nullptr) {
|
||||
// TODO(user): remove the old clauses_[ci] afterwards.
|
||||
drat_proof_handler_->AddClause(clauses_[ci]);
|
||||
}
|
||||
|
||||
// Remove ci from the occurrence list. Note that the occurrence list
|
||||
// can't be shortest_list or its negation.
|
||||
auto iter =
|
||||
std::find(literal_to_clauses_[opposite_literal].begin(),
|
||||
literal_to_clauses_[opposite_literal].end(), ci);
|
||||
DCHECK(iter != literal_to_clauses_[opposite_literal].end());
|
||||
literal_to_clauses_[opposite_literal].erase(iter);
|
||||
|
||||
--literal_to_clause_sizes_[opposite_literal];
|
||||
UpdatePriorityQueue(Literal(opposite_literal).Variable());
|
||||
|
||||
if (!in_clause_to_process_[ci]) {
|
||||
in_clause_to_process_[ci] = true;
|
||||
clause_to_process_.push_back(ci);
|
||||
}
|
||||
}
|
||||
}
|
||||
occurrence_list_ref[new_index] = ci;
|
||||
++new_index;
|
||||
}
|
||||
occurrence_list_ref.resize(new_index);
|
||||
CHECK_EQ(literal_to_clause_sizes_[lit.Index()], new_index);
|
||||
literal_to_clause_sizes_[lit.Index()] = new_index;
|
||||
if (!ProcessClauseToSimplifyOthersUsingLiteral(clause_index, lit)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Now treat clause containing lit.Negated().
|
||||
// TODO(user): choose a potentially smaller list.
|
||||
{
|
||||
// If there is another "short" occurrence list, use it. Otherwise use the
|
||||
// one corresponding to the negation of lit.
|
||||
const LiteralIndex other_lit_index =
|
||||
FindLiteralWithShortestOccurrenceListExcluding(clause, lit);
|
||||
if (other_lit_index != kNoLiteralIndex &&
|
||||
literal_to_clause_sizes_[other_lit_index] <
|
||||
literal_to_clause_sizes_[lit.NegatedIndex()]) {
|
||||
return ProcessClauseToSimplifyOthersUsingLiteral(clause_index,
|
||||
Literal(other_lit_index));
|
||||
} else {
|
||||
// Treat the clauses containing lit.Negated().
|
||||
int new_index = 0;
|
||||
bool something_removed = false;
|
||||
std::vector<ClauseIndex>& occurrence_list_ref =
|
||||
literal_to_clauses_[lit.NegatedIndex()];
|
||||
for (ClauseIndex ci : occurrence_list_ref) {
|
||||
if (clauses_[ci].empty()) continue;
|
||||
auto& occurrence_list_ref = literal_to_clauses_[lit.NegatedIndex()];
|
||||
const uint64 clause_signature = signatures_[clause_index];
|
||||
for (const ClauseIndex ci : occurrence_list_ref) {
|
||||
const uint64 ci_signature = signatures_[ci];
|
||||
DCHECK_EQ(ci_signature, ComputeSignatureOfClauseVariables(ci));
|
||||
if (ci_signature == 0) continue;
|
||||
|
||||
// TODO(user): not super optimal since we could abort earlier if
|
||||
// opposite_literal is not the negation of shortest_list.
|
||||
if (SimplifyClause(clause, &clauses_[ci], &opposite_literal)) {
|
||||
CHECK_EQ(opposite_literal, lit.NegatedIndex());
|
||||
// opposite_literal is not the negation of shortest_list. Note that this
|
||||
// applies to the second call to
|
||||
// ProcessClauseToSimplifyOthersUsingLiteral() above too.
|
||||
if ((clause_signature & ~ci_signature) == 0 &&
|
||||
SimplifyClause(clause, &clauses_[ci], &opposite_literal)) {
|
||||
DCHECK_EQ(opposite_literal, lit.NegatedIndex());
|
||||
if (clauses_[ci].empty()) return false; // UNSAT.
|
||||
if (drat_proof_handler_ != nullptr) {
|
||||
// TODO(user): remove the old clauses_[ci] afterwards.
|
||||
drat_proof_handler_->AddClause(clauses_[ci]);
|
||||
}
|
||||
|
||||
// Recompute signature.
|
||||
signatures_[ci] = ComputeSignatureOfClauseVariables(ci);
|
||||
|
||||
if (!in_clause_to_process_[ci]) {
|
||||
in_clause_to_process_[ci] = true;
|
||||
clause_to_process_.push_back(ci);
|
||||
@@ -688,6 +749,7 @@ bool SatPresolver::CrossProduct(Literal x) {
|
||||
}
|
||||
|
||||
void SatPresolver::Remove(ClauseIndex ci) {
|
||||
signatures_[ci] = 0;
|
||||
for (Literal e : clauses_[ci]) {
|
||||
literal_to_clause_sizes_[e.Index()]--;
|
||||
UpdatePriorityQueue(e.Variable());
|
||||
@@ -707,12 +769,14 @@ void SatPresolver::RemoveAndRegisterForPostsolve(ClauseIndex ci, Literal x) {
|
||||
|
||||
Literal SatPresolver::FindLiteralWithShortestOccurrenceList(
|
||||
const std::vector<Literal>& clause) {
|
||||
CHECK(!clause.empty());
|
||||
DCHECK(!clause.empty());
|
||||
Literal result = clause.front();
|
||||
int best_size = literal_to_clause_sizes_[result.Index()];
|
||||
for (const Literal l : clause) {
|
||||
if (literal_to_clause_sizes_[l.Index()] <
|
||||
literal_to_clause_sizes_[result.Index()]) {
|
||||
const int size = literal_to_clause_sizes_[l.Index()];
|
||||
if (size < best_size) {
|
||||
result = l;
|
||||
best_size = size;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
@@ -720,7 +784,7 @@ Literal SatPresolver::FindLiteralWithShortestOccurrenceList(
|
||||
|
||||
LiteralIndex SatPresolver::FindLiteralWithShortestOccurrenceListExcluding(
|
||||
const std::vector<Literal>& clause, Literal to_exclude) {
|
||||
CHECK(!clause.empty());
|
||||
DCHECK(!clause.empty());
|
||||
LiteralIndex result = kNoLiteralIndex;
|
||||
int num_occurrences = std::numeric_limits<int>::max();
|
||||
for (const Literal l : clause) {
|
||||
@@ -759,7 +823,7 @@ void SatPresolver::InitializePriorityQueue() {
|
||||
|
||||
void SatPresolver::UpdateBvaPriorityQueue(LiteralIndex lit) {
|
||||
if (bva_pq_elements_.empty()) return; // not initialized.
|
||||
CHECK_LT(lit, bva_pq_elements_.size());
|
||||
DCHECK_LT(lit, bva_pq_elements_.size());
|
||||
BvaPqElement* element = &bva_pq_elements_[lit.value()];
|
||||
element->weight = literal_to_clause_sizes_[lit];
|
||||
if (bva_pq_.Contains(element)) {
|
||||
@@ -769,10 +833,10 @@ void SatPresolver::UpdateBvaPriorityQueue(LiteralIndex lit) {
|
||||
|
||||
void SatPresolver::AddToBvaPriorityQueue(LiteralIndex lit) {
|
||||
if (bva_pq_elements_.empty()) return; // not initialized.
|
||||
CHECK_LT(lit, bva_pq_elements_.size());
|
||||
DCHECK_LT(lit, bva_pq_elements_.size());
|
||||
BvaPqElement* element = &bva_pq_elements_[lit.value()];
|
||||
element->weight = literal_to_clause_sizes_[lit];
|
||||
CHECK(!bva_pq_.Contains(element));
|
||||
DCHECK(!bva_pq_.Contains(element));
|
||||
if (element->weight > 2) bva_pq_.Add(element);
|
||||
}
|
||||
|
||||
@@ -834,8 +898,8 @@ bool SimplifyClause(const std::vector<Literal>& a, std::vector<Literal>* b,
|
||||
|
||||
int num_diff = 0;
|
||||
std::vector<Literal>::const_iterator ia = a.begin();
|
||||
std::vector<Literal>::iterator ib = b->begin();
|
||||
std::vector<Literal>::iterator to_remove = b->begin();
|
||||
std::vector<Literal>::const_iterator ib = b->begin();
|
||||
std::vector<Literal>::const_iterator to_remove;
|
||||
|
||||
// Because we abort early when size_diff becomes negative, the second test
|
||||
// in the while loop is not needed.
|
||||
@@ -1057,11 +1121,11 @@ void ProbeAndFindEquivalentLiteral(
|
||||
|
||||
// We rely on the fact that the representative of a literal x and the one
|
||||
// of its negation are the same variable.
|
||||
CHECK_EQ(Literal(LiteralIndex(partition.GetRootAndCompressPath(
|
||||
representative.Index().value()))),
|
||||
Literal(LiteralIndex(partition.GetRootAndCompressPath(
|
||||
representative.NegatedIndex().value())))
|
||||
.Negated());
|
||||
DCHECK_EQ(Literal(LiteralIndex(partition.GetRootAndCompressPath(
|
||||
representative.Index().value()))),
|
||||
Literal(LiteralIndex(partition.GetRootAndCompressPath(
|
||||
representative.NegatedIndex().value())))
|
||||
.Negated());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -134,8 +134,7 @@ class SatPostsolver {
|
||||
// appearing in pseudo-Boolean constraints.
|
||||
class SatPresolver {
|
||||
public:
|
||||
// TODO(user): use IntType? not sure because that complexify the code, and
|
||||
// it is not really needed here.
|
||||
// TODO(user): use IntType!
|
||||
typedef int32 ClauseIndex;
|
||||
|
||||
explicit SatPresolver(SatPostsolver* postsolver)
|
||||
@@ -214,6 +213,10 @@ class SatPresolver {
|
||||
}
|
||||
|
||||
private:
|
||||
// Internal function used by ProcessClauseToSimplifyOthers().
|
||||
bool ProcessClauseToSimplifyOthersUsingLiteral(ClauseIndex clause_index,
|
||||
Literal lit);
|
||||
|
||||
// Internal function to add clauses generated during the presolve. The clause
|
||||
// must already be sorted with the default Literal order and will be cleared
|
||||
// after this call.
|
||||
@@ -252,6 +255,11 @@ class SatPresolver {
|
||||
// Display some statistics on the current clause database.
|
||||
void DisplayStats(double elapsed_seconds);
|
||||
|
||||
// Returns a hash of the given clause variables (not literal) in such a way
|
||||
// that hash1 & not(hash2) == 0 iff the set of variable of clause 1 is a
|
||||
// subset of the one of clause2.
|
||||
uint64 ComputeSignatureOfClauseVariables(ClauseIndex ci);
|
||||
|
||||
// The "active" variables on which we want to call CrossProduct() are kept
|
||||
// in a priority queue so that we process first the ones that occur the least
|
||||
// often in the clause database.
|
||||
@@ -318,6 +326,9 @@ class SatPresolver {
|
||||
// An empty clause means that it has been removed.
|
||||
std::vector<std::vector<Literal>> clauses_; // Indexed by ClauseIndex
|
||||
|
||||
// The cached value of ComputeSignatureOfClauseVariables() for each clause.
|
||||
std::vector<uint64> signatures_; // Indexed by ClauseIndex
|
||||
|
||||
// Occurrence list. For each literal, contains the ClauseIndex of the clause
|
||||
// that contains it (ordered by clause index).
|
||||
gtl::ITIVector<LiteralIndex, std::vector<ClauseIndex>> literal_to_clauses_;
|
||||
|
||||
@@ -209,6 +209,10 @@ class ThetaLambdaTree {
|
||||
std::vector<IntegerType> tree_max_of_energy_delta_;
|
||||
};
|
||||
|
||||
// Explicit instantiations in theta_Tree.cc.
|
||||
extern template class ThetaLambdaTree<IntegerValue>;
|
||||
extern template class ThetaLambdaTree<int64>;
|
||||
|
||||
} // namespace sat
|
||||
} // namespace operations_research
|
||||
|
||||
|
||||
@@ -33,31 +33,29 @@ void ComputeScalingErrors(const std::vector<double>& input,
|
||||
const std::vector<double>& ub, double scaling_factor,
|
||||
double* max_relative_coeff_error,
|
||||
double* max_scaled_sum_error) {
|
||||
const double kInfinity = std::numeric_limits<double>::infinity();
|
||||
|
||||
double max_positive_error = 0.0;
|
||||
double max_negative_error = 0.0;
|
||||
double max_error = 0.0;
|
||||
double min_error = 0.0;
|
||||
*max_relative_coeff_error = 0.0;
|
||||
const int size = input.size();
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const double x = input[i];
|
||||
if (x == 0.0) continue;
|
||||
if (use_bounds && lb[i] == 0 && ub[i] == 0) continue;
|
||||
const double scaled = std::abs(x * scaling_factor);
|
||||
const double scaled = x * scaling_factor;
|
||||
|
||||
if (scaled == 0.0) {
|
||||
*max_relative_coeff_error = kInfinity;
|
||||
*max_scaled_sum_error = kInfinity;
|
||||
return;
|
||||
*max_relative_coeff_error = std::numeric_limits<double>::infinity();
|
||||
} else {
|
||||
*max_relative_coeff_error = std::max(
|
||||
*max_relative_coeff_error, std::abs(std::round(scaled) / scaled - 1));
|
||||
}
|
||||
*max_relative_coeff_error = std::max(
|
||||
*max_relative_coeff_error, std::abs(std::round(scaled) / scaled - 1));
|
||||
|
||||
const double error = std::round(scaled) - scaled;
|
||||
const double error_a = error * (use_bounds ? x * lb[i] : -x);
|
||||
const double error_b = error * (use_bounds ? x * ub[i] : x);
|
||||
max_positive_error += std::max(0.0, std::max(error_a, error_b));
|
||||
max_negative_error += std::max(0.0, std::max(-error_a, -error_b));
|
||||
const double error_lb = (use_bounds ? error * lb[i] : -error);
|
||||
const double error_ub = (use_bounds ? error * ub[i] : error);
|
||||
max_error += std::max(error_lb, error_ub);
|
||||
min_error += std::min(error_lb, error_ub);
|
||||
}
|
||||
*max_scaled_sum_error = std::max(max_positive_error, max_negative_error);
|
||||
*max_scaled_sum_error = std::max(std::abs(max_error), std::abs(min_error));
|
||||
}
|
||||
|
||||
template <bool use_bounds>
|
||||
@@ -192,7 +190,7 @@ int64 ComputeGcdOfRoundedDoubles(const std::vector<double>& x,
|
||||
double scaling_factor) {
|
||||
int64 gcd = 0;
|
||||
for (int i = 0; i < x.size() && gcd != 1; ++i) {
|
||||
int64 value = round(fabs(x[i] * scaling_factor));
|
||||
int64 value = std::abs(std::round(x[i] * scaling_factor));
|
||||
DCHECK_GE(value, 0);
|
||||
if (value == 0) continue;
|
||||
if (gcd == 0) {
|
||||
|
||||
Reference in New Issue
Block a user