OR-Tools  9.1
encoding.h
Go to the documentation of this file.
1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
14 // Algorithms to encode constraints into their SAT representation. Currently,
15 // this contains one possible encoding of a cardinality constraint as used by
16 // the core-based optimization algorithm in optimization.h.
17 
18 #ifndef OR_TOOLS_SAT_ENCODING_H_
19 #define OR_TOOLS_SAT_ENCODING_H_
20 
21 #include <cstdint>
22 #include <deque>
23 #include <vector>
24 
25 #include "ortools/base/int_type.h"
27 #include "ortools/base/logging.h"
28 #include "ortools/base/macros.h"
31 #include "ortools/sat/sat_base.h"
32 #include "ortools/sat/sat_solver.h"
33 
34 namespace operations_research {
35 namespace sat {
36 
37 // This class represents a number in [0, ub]. The encoding uses ub binary
38 // variables x_i with i in [0, ub) where x_i means that the number is > i. It is
39 // called an EncodingNode, because it represents one node of the tree used to
40 // encode a cardinality constraint.
41 //
42 // In practice, not all literals are explicitly created:
43 // - Only the literals in [lb, current_ub) are "active" at a given time.
44 // - The represented number is known to be >= lb.
45 // - It may be greater than current_ub, but the extra literals will be only
46 // created lazily. In all our solves, the literal current_ub - 1, will always
47 // be assumed to false (i.e. the number will be <= current_ub - 1).
48 // - Note that lb may increase and ub decrease as more information is learned
49 // about this node by the sat solver.
50 //
51 // This is roughly based on the cardinality constraint encoding described in:
52 // Bailleux and Yacine Boufkhad, "Efficient CNF Encoding of Boolean Cardinality
53 // Constraints", In Proc. of CP 2003, pages 108-122, 2003.
54 class EncodingNode {
55  public:
57 
58  // Constructs a EncodingNode of size one, just formed by the given literal.
59  explicit EncodingNode(Literal l);
60 
61  // Creates a "full" encoding node on n new variables, the represented number
62  // beeing in [lb, ub = lb + n). The variables are added to the given solver
63  // with the basic implications linking them:
64  // literal(0) >= ... >= literal(n-1)
66  SatSolver* solver);
67 
68  // Creates a "lazy" encoding node representing the sum of a and b.
69  // Only one literals will be created by this operation. Note that no clauses
70  // linking it with a or b are added by this function.
72 
73  // Returns a literal with the meaning 'this node number is > i'.
74  // The given i must be in [lb_, current_ub).
75  Literal GreaterThan(int i) const { return literal(i - lb_); }
76 
77  // Accessors to size() and literals in [lb, current_ub).
78  int size() const { return literals_.size(); }
79  Literal literal(int i) const {
80  CHECK_GE(i, 0);
81  CHECK_LT(i, literals_.size());
82  return literals_[i];
83  }
84 
85  // Sort by decreasing depth first and then by increasing variable index.
86  // This is meant to be used by the priority queue in MergeAllNodesWithPQ().
87  bool operator<(const EncodingNode& other) const {
88  return depth_ > other.depth_ ||
89  (depth_ == other.depth_ && other.for_sorting_ > for_sorting_);
90  }
91 
92  // Creates a new literals and increases current_ub.
93  // Returns false if we were already at the upper bound for this node.
94  bool IncreaseCurrentUB(SatSolver* solver);
95 
96  // Removes the left-side literals fixed to 1 and returns the number of
97  // literals removed this way. Note that this increases lb_ and reduces the
98  // number of active literals. It also removes any right-side literals fixed to
99  // 0. If such a literal exists, ub is updated accordingly.
100  int Reduce(const SatSolver& solver);
101 
102  // Fix the right-side variables with indices >= to the given upper_bound to
103  // false.
104  void ApplyUpperBound(int64_t upper_bound, SatSolver* solver);
105 
106  void set_weight(Coefficient w) { weight_ = w; }
107  Coefficient weight() const { return weight_; }
108 
109  int depth() const { return depth_; }
110  int lb() const { return lb_; }
111  int current_ub() const { return lb_ + literals_.size(); }
112  int ub() const { return ub_; }
113  EncodingNode* child_a() const { return child_a_; }
114  EncodingNode* child_b() const { return child_b_; }
115 
116  private:
117  int depth_;
118  int lb_;
119  int ub_;
120  BooleanVariable for_sorting_;
121 
122  Coefficient weight_;
123  EncodingNode* child_a_;
124  EncodingNode* child_b_;
125 
126  // The literals of this node in order.
127  std::vector<Literal> literals_;
128 };
129 
130 // Note that we use <= because on 32 bits architecture, the size will actually
131 // be smaller than 64 bytes. One exception is with visual studio on windows, in
132 // debug mode, where the struct is bigger.
133 #if defined(_M_X64) && defined(_DEBUG)
134 // In debug, with msvc, std::Vector<T> is 32
135 static_assert(sizeof(EncodingNode) == 72,
136  "ERROR_EncodingNode_is_not_well_compacted");
137 #else
138 // Note that we use <= because on 32 bits architecture, the size will actually
139 // be smaller than 64 bytes.
140 static_assert(sizeof(EncodingNode) <= 64,
141  "ERROR_EncodingNode_is_not_well_compacted");
142 #endif
143 
144 // Merges the two given EncodingNodes by creating a new node that corresponds to
145 // the sum of the two given ones. Only the left-most binary variable is created
146 // for the parent node, the other ones will be created later when needed.
147 EncodingNode LazyMerge(EncodingNode* a, EncodingNode* b, SatSolver* solver);
148 
149 // Increases the size of the given node by one. To keep all the needed relations
150 // with its children, we also need to increase their size by one, and so on
151 // recursively. Also adds all the necessary clauses linking the newly added
152 // literals.
153 void IncreaseNodeSize(EncodingNode* node, SatSolver* solver);
154 
155 // Merges the two given EncodingNode by creating a new node that corresponds to
156 // the sum of the two given ones. The given upper_bound is interpreted as a
157 // bound on this sum, and allows creating fewer binary variables.
158 EncodingNode FullMerge(Coefficient upper_bound, EncodingNode* a,
159  EncodingNode* b, SatSolver* solver);
160 
161 // Merges all the given nodes two by two until there is only one left. Returns
162 // the final node which encodes the sum of all the given nodes.
164  const std::vector<EncodingNode*>& nodes,
165  SatSolver* solver,
166  std::deque<EncodingNode>* repository);
167 
168 // Same as MergeAllNodesWithDeque() but use a priority queue to merge in
169 // priority nodes with smaller sizes.
170 EncodingNode* LazyMergeAllNodeWithPQ(const std::vector<EncodingNode*>& nodes,
171  SatSolver* solver,
172  std::deque<EncodingNode>* repository);
173 
174 // Returns a vector with one new EncodingNode by variable in the given
175 // objective. Sets the offset to the negated sum of the negative coefficient,
176 // because in this case we negate the literals to have only positive
177 // coefficients.
178 std::vector<EncodingNode*> CreateInitialEncodingNodes(
179  const std::vector<Literal>& literals,
180  const std::vector<Coefficient>& coeffs, Coefficient* offset,
181  std::deque<EncodingNode>* repository);
182 std::vector<EncodingNode*> CreateInitialEncodingNodes(
183  const LinearObjective& objective_proto, Coefficient* offset,
184  std::deque<EncodingNode>* repository);
185 
186 // Reduces the nodes using the now fixed literals, update the lower-bound, and
187 // returns the set of assumptions for the next round of the core-based
188 // algorithm. Returns an empty set of assumptions if everything is fixed.
189 std::vector<Literal> ReduceNodesAndExtractAssumptions(
190  Coefficient upper_bound, Coefficient stratified_lower_bound,
191  Coefficient* lower_bound, std::vector<EncodingNode*>* nodes,
192  SatSolver* solver);
193 
194 // Returns the minimum weight of the nodes in the core. Note that the literal in
195 // the core must appear in the same order as the one in nodes.
196 Coefficient ComputeCoreMinWeight(const std::vector<EncodingNode*>& nodes,
197  const std::vector<Literal>& core);
198 
199 // Returns the maximum node weight under the given upper_bound. Returns zero if
200 // no such weight exist (note that a node weight is strictly positive, so this
201 // make sense).
202 Coefficient MaxNodeWeightSmallerThan(const std::vector<EncodingNode*>& nodes,
204 
205 // Updates the encoding using the given core. The literals in the core must
206 // match the order in nodes.
207 void ProcessCore(const std::vector<Literal>& core, Coefficient min_weight,
208  std::deque<EncodingNode>* repository,
209  std::vector<EncodingNode*>* nodes, SatSolver* solver);
210 
211 } // namespace sat
212 } // namespace operations_research
213 
214 #endif // OR_TOOLS_SAT_ENCODING_H_
void InitializeLazyNode(EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:56
#define CHECK_GE(val1, val2)
Definition: base/logging.h:702
EncodingNode * MergeAllNodesWithDeque(Coefficient upper_bound, const std::vector< EncodingNode * > &nodes, SatSolver *solver, std::deque< EncodingNode > *repository)
Definition: encoding.cc:264
bool operator<(const EncodingNode &other) const
Definition: encoding.h:87
void ApplyUpperBound(int64_t upper_bound, SatSolver *solver)
Definition: encoding.cc:98
Literal GreaterThan(int i) const
Definition: encoding.h:75
EncodingNode * LazyMergeAllNodeWithPQ(const std::vector< EncodingNode * > &nodes, SatSolver *solver, std::deque< EncodingNode > *repository)
Definition: encoding.cc:286
int64_t b
Coefficient ComputeCoreMinWeight(const std::vector< EncodingNode * > &nodes, const std::vector< Literal > &core)
Definition: encoding.cc:419
void InitializeFullNode(int n, EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:36
#define CHECK_LT(val1, val2)
Definition: base/logging.h:701
bool IncreaseCurrentUB(SatSolver *solver)
Definition: encoding.cc:72
double upper_bound
void IncreaseNodeSize(EncodingNode *node, SatSolver *solver)
Definition: encoding.cc:117
EncodingNode FullMerge(Coefficient upper_bound, EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:213
double lower_bound
EncodingNode * child_a() const
Definition: encoding.h:113
Coefficient MaxNodeWeightSmallerThan(const std::vector< EncodingNode * > &nodes, Coefficient upper_bound)
Definition: encoding.cc:434
std::vector< EncodingNode * > CreateInitialEncodingNodes(const std::vector< Literal > &literals, const std::vector< Coefficient > &coeffs, Coefficient *offset, std::deque< EncodingNode > *repository)
Definition: encoding.cc:303
Literal literal(int i) const
Definition: encoding.h:79
std::tuple< int64_t, int64_t, const double > Coefficient
Collection of objects used to extend the Constraint Solver library.
void ProcessCore(const std::vector< Literal > &core, Coefficient min_weight, std::deque< EncodingNode > *repository, std::vector< EncodingNode * > *nodes, SatSolver *solver)
Definition: encoding.cc:446
int Reduce(const SatSolver &solver)
Definition: encoding.cc:82
EncodingNode LazyMerge(EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:107
int nodes
EncodingNode * child_b() const
Definition: encoding.h:114
std::vector< Literal > ReduceNodesAndExtractAssumptions(Coefficient upper_bound, Coefficient stratified_lower_bound, Coefficient *lower_bound, std::vector< EncodingNode * > *nodes, SatSolver *solver)
Definition: encoding.cc:367
int64_t a