OR-Tools  9.1
encoding.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <cstdint>
18#include <deque>
19#include <memory>
20#include <queue>
21
23
24namespace operations_research {
25namespace sat {
26
28 : depth_(0),
29 lb_(0),
30 ub_(1),
31 for_sorting_(l.Variable()),
32 child_a_(nullptr),
33 child_b_(nullptr),
34 literals_(1, l) {}
35
37 SatSolver* solver) {
38 CHECK(literals_.empty()) << "Already initialized";
39 CHECK_GT(n, 0);
40 const BooleanVariable first_var_index(solver->NumVariables());
41 solver->SetNumVariables(solver->NumVariables() + n);
42 for (int i = 0; i < n; ++i) {
43 literals_.push_back(Literal(first_var_index + i, true));
44 if (i > 0) {
45 solver->AddBinaryClause(literal(i - 1), literal(i).Negated());
46 }
47 }
48 lb_ = a->lb_ + b->lb_;
49 ub_ = lb_ + n;
50 depth_ = 1 + std::max(a->depth_, b->depth_);
51 child_a_ = a;
52 child_b_ = b;
53 for_sorting_ = first_var_index;
54}
55
57 SatSolver* solver) {
58 CHECK(literals_.empty()) << "Already initialized";
59 const BooleanVariable first_var_index(solver->NumVariables());
60 solver->SetNumVariables(solver->NumVariables() + 1);
61 literals_.emplace_back(first_var_index, true);
62 child_a_ = a;
63 child_b_ = b;
64 ub_ = a->ub_ + b->ub_;
65 lb_ = a->lb_ + b->lb_;
66 depth_ = 1 + std::max(a->depth_, b->depth_);
67
68 // Merging the node of the same depth in order seems to help a bit.
69 for_sorting_ = std::min(a->for_sorting_, b->for_sorting_);
70}
71
73 CHECK(!literals_.empty());
74 if (current_ub() == ub_) return false;
75 literals_.emplace_back(BooleanVariable(solver->NumVariables()), true);
76 solver->SetNumVariables(solver->NumVariables() + 1);
77 solver->AddBinaryClause(literals_.back().Negated(),
78 literals_[literals_.size() - 2]);
79 return true;
80}
81
82int EncodingNode::Reduce(const SatSolver& solver) {
83 int i = 0;
84 while (i < literals_.size() &&
85 solver.Assignment().LiteralIsTrue(literals_[i])) {
86 ++i;
87 ++lb_;
88 }
89 literals_.erase(literals_.begin(), literals_.begin() + i);
90 while (!literals_.empty() &&
91 solver.Assignment().LiteralIsFalse(literals_.back())) {
92 literals_.pop_back();
93 ub_ = lb_ + literals_.size();
94 }
95 return i;
96}
97
99 if (size() <= upper_bound) return;
100 for (int i = upper_bound; i < size(); ++i) {
101 solver->AddUnitClause(literal(i).Negated());
102 }
103 literals_.resize(upper_bound);
104 ub_ = lb_ + literals_.size();
105}
106
108 EncodingNode n;
109 n.InitializeLazyNode(a, b, solver);
110 solver->AddBinaryClause(a->literal(0).Negated(), n.literal(0));
111 solver->AddBinaryClause(b->literal(0).Negated(), n.literal(0));
112 solver->AddTernaryClause(n.literal(0).Negated(), a->literal(0),
113 b->literal(0));
114 return n;
115}
116
118 if (!node->IncreaseCurrentUB(solver)) return;
119 std::vector<EncodingNode*> to_process;
120 to_process.push_back(node);
121
122 // Only one side of the constraint is mandatory (the one propagating the ones
123 // to the top of the encoding tree), and it seems more efficient not to encode
124 // the other side.
125 //
126 // TODO(user): Experiment more.
127 const bool complete_encoding = false;
128
129 while (!to_process.empty()) {
130 EncodingNode* n = to_process.back();
131 EncodingNode* a = n->child_a();
132 EncodingNode* b = n->child_b();
133 to_process.pop_back();
134
135 // Note that since we were able to increase its size, n must have children.
136 // n->GreaterThan(target) is the new literal of n.
137 CHECK(a != nullptr);
138 CHECK(b != nullptr);
139 CHECK_GE(n->size(), 2);
140 const int target = n->current_ub() - 1;
141
142 // Add a literal to a if needed.
143 // That is, now that the node n can go up to it new current_ub, if we need
144 // to increase the current_ub of a.
145 if (a->current_ub() != a->ub()) {
146 CHECK_GE(a->current_ub() - 1 + b->lb(), target - 1);
147 if (a->current_ub() - 1 + b->lb() < target) {
148 CHECK(a->IncreaseCurrentUB(solver));
149 to_process.push_back(a);
150 }
151 }
152
153 // Add a literal to b if needed.
154 if (b->current_ub() != b->ub()) {
155 CHECK_GE(b->current_ub() - 1 + a->lb(), target - 1);
156 if (b->current_ub() - 1 + a->lb() < target) {
157 CHECK(b->IncreaseCurrentUB(solver));
158 to_process.push_back(b);
159 }
160 }
161
162 // Wire the new literal of n correctly with its two children.
163 for (int ia = a->lb(); ia < a->current_ub(); ++ia) {
164 const int ib = target - ia;
165 if (complete_encoding && ib >= b->lb() && ib < b->current_ub()) {
166 // if x <= ia and y <= ib then x + y <= ia + ib.
167 solver->AddTernaryClause(n->GreaterThan(target).Negated(),
168 a->GreaterThan(ia), b->GreaterThan(ib));
169 }
170 if (complete_encoding && ib == b->ub()) {
171 solver->AddBinaryClause(n->GreaterThan(target).Negated(),
172 a->GreaterThan(ia));
173 }
174
175 if (ib - 1 == b->lb() - 1) {
176 solver->AddBinaryClause(n->GreaterThan(target),
177 a->GreaterThan(ia).Negated());
178 }
179 if ((ib - 1) >= b->lb() && (ib - 1) < b->current_ub()) {
180 // if x > ia and y > ib - 1 then x + y > ia + ib.
181 solver->AddTernaryClause(n->GreaterThan(target),
182 a->GreaterThan(ia).Negated(),
183 b->GreaterThan(ib - 1).Negated());
184 }
185 }
186
187 // Case ia = a->lb() - 1; a->GreaterThan(ia) always true.
188 {
189 const int ib = target - (a->lb() - 1);
190 if ((ib - 1) == b->lb() - 1) {
191 solver->AddUnitClause(n->GreaterThan(target));
192 }
193 if ((ib - 1) >= b->lb() && (ib - 1) < b->current_ub()) {
194 solver->AddBinaryClause(n->GreaterThan(target),
195 b->GreaterThan(ib - 1).Negated());
196 }
197 }
198
199 // case ia == a->ub; a->GreaterThan(ia) always false.
200 {
201 const int ib = target - a->ub();
202 if (complete_encoding && ib >= b->lb() && ib < b->current_ub()) {
203 solver->AddBinaryClause(n->GreaterThan(target).Negated(),
204 b->GreaterThan(ib));
205 }
206 if (ib == b->ub()) {
207 solver->AddUnitClause(n->GreaterThan(target).Negated());
208 }
209 }
210 }
211}
212
214 EncodingNode* b, SatSolver* solver) {
215 EncodingNode n;
216 const int size =
217 std::min(Coefficient(a->size() + b->size()), upper_bound).value();
218 n.InitializeFullNode(size, a, b, solver);
219 for (int ia = 0; ia < a->size(); ++ia) {
220 if (ia + b->size() < size) {
221 solver->AddBinaryClause(n.literal(ia + b->size()).Negated(),
222 a->literal(ia));
223 }
224 if (ia < size) {
225 solver->AddBinaryClause(n.literal(ia), a->literal(ia).Negated());
226 } else {
227 // Fix the variable to false because of the given upper_bound.
228 solver->AddUnitClause(a->literal(ia).Negated());
229 }
230 }
231 for (int ib = 0; ib < b->size(); ++ib) {
232 if (ib + a->size() < size) {
233 solver->AddBinaryClause(n.literal(ib + a->size()).Negated(),
234 b->literal(ib));
235 }
236 if (ib < size) {
237 solver->AddBinaryClause(n.literal(ib), b->literal(ib).Negated());
238 } else {
239 // Fix the variable to false because of the given upper_bound.
240 solver->AddUnitClause(b->literal(ib).Negated());
241 }
242 }
243 for (int ia = 0; ia < a->size(); ++ia) {
244 for (int ib = 0; ib < b->size(); ++ib) {
245 if (ia + ib < size) {
246 // if x <= ia and y <= ib, then x + y <= ia + ib.
247 solver->AddTernaryClause(n.literal(ia + ib).Negated(), a->literal(ia),
248 b->literal(ib));
249 }
250 if (ia + ib + 1 < size) {
251 // if x > ia and y > ib, then x + y > ia + ib + 1.
252 solver->AddTernaryClause(n.literal(ia + ib + 1),
253 a->literal(ia).Negated(),
254 b->literal(ib).Negated());
255 } else {
256 solver->AddBinaryClause(a->literal(ia).Negated(),
257 b->literal(ib).Negated());
258 }
259 }
260 }
261 return n;
262}
263
265 const std::vector<EncodingNode*>& nodes,
266 SatSolver* solver,
267 std::deque<EncodingNode>* repository) {
268 std::deque<EncodingNode*> dq(nodes.begin(), nodes.end());
269 while (dq.size() > 1) {
270 EncodingNode* a = dq.front();
271 dq.pop_front();
272 EncodingNode* b = dq.front();
273 dq.pop_front();
274 repository->push_back(FullMerge(upper_bound, a, b, solver));
275 dq.push_back(&repository->back());
276 }
277 return dq.front();
278}
279
280namespace {
281struct SortEncodingNodePointers {
282 bool operator()(EncodingNode* a, EncodingNode* b) const { return *a < *b; }
283};
284} // namespace
285
286EncodingNode* LazyMergeAllNodeWithPQ(const std::vector<EncodingNode*>& nodes,
287 SatSolver* solver,
288 std::deque<EncodingNode>* repository) {
289 std::priority_queue<EncodingNode*, std::vector<EncodingNode*>,
290 SortEncodingNodePointers>
291 pq(nodes.begin(), nodes.end());
292 while (pq.size() > 1) {
293 EncodingNode* a = pq.top();
294 pq.pop();
295 EncodingNode* b = pq.top();
296 pq.pop();
297 repository->push_back(LazyMerge(a, b, solver));
298 pq.push(&repository->back());
299 }
300 return pq.top();
301}
302
303std::vector<EncodingNode*> CreateInitialEncodingNodes(
304 const std::vector<Literal>& literals,
305 const std::vector<Coefficient>& coeffs, Coefficient* offset,
306 std::deque<EncodingNode>* repository) {
307 CHECK_EQ(literals.size(), coeffs.size());
308 *offset = 0;
309 std::vector<EncodingNode*> nodes;
310 for (int i = 0; i < literals.size(); ++i) {
311 // We want to maximize the cost when this literal is true.
312 if (coeffs[i] > 0) {
313 repository->emplace_back(literals[i]);
314 nodes.push_back(&repository->back());
315 nodes.back()->set_weight(coeffs[i]);
316 } else {
317 repository->emplace_back(literals[i].Negated());
318 nodes.push_back(&repository->back());
319 nodes.back()->set_weight(-coeffs[i]);
320
321 // Note that this increase the offset since the coeff is negative.
322 *offset -= coeffs[i];
323 }
324 }
325 return nodes;
326}
327
328std::vector<EncodingNode*> CreateInitialEncodingNodes(
329 const LinearObjective& objective_proto, Coefficient* offset,
330 std::deque<EncodingNode>* repository) {
331 *offset = 0;
332 std::vector<EncodingNode*> nodes;
333 for (int i = 0; i < objective_proto.literals_size(); ++i) {
334 const Literal literal(objective_proto.literals(i));
335
336 // We want to maximize the cost when this literal is true.
337 if (objective_proto.coefficients(i) > 0) {
338 repository->emplace_back(literal);
339 nodes.push_back(&repository->back());
340 nodes.back()->set_weight(Coefficient(objective_proto.coefficients(i)));
341 } else {
342 repository->emplace_back(literal.Negated());
343 nodes.push_back(&repository->back());
344 nodes.back()->set_weight(Coefficient(-objective_proto.coefficients(i)));
345
346 // Note that this increase the offset since the coeff is negative.
347 *offset -= objective_proto.coefficients(i);
348 }
349 }
350 return nodes;
351}
352
353namespace {
354
355bool EncodingNodeByWeight(const EncodingNode* a, const EncodingNode* b) {
356 return a->weight() < b->weight();
357}
358
359bool EncodingNodeByDepth(const EncodingNode* a, const EncodingNode* b) {
360 return a->depth() < b->depth();
361}
362
363bool EmptyEncodingNode(const EncodingNode* a) { return a->size() == 0; }
364
365} // namespace
366
368 Coefficient upper_bound, Coefficient stratified_lower_bound,
369 Coefficient* lower_bound, std::vector<EncodingNode*>* nodes,
370 SatSolver* solver) {
371 // Remove the left-most variables fixed to one from each node.
372 // Also update the lower_bound. Note that Reduce() needs the solver to be
373 // at the root node in order to work.
374 solver->Backtrack(0);
375 for (EncodingNode* n : *nodes) {
376 *lower_bound += n->Reduce(*solver) * n->weight();
377 }
378
379 // Fix the nodes right-most variables that are above the gap.
381 const Coefficient gap = upper_bound - *lower_bound;
382 if (gap <= 0) return {};
383 for (EncodingNode* n : *nodes) {
384 n->ApplyUpperBound((gap / n->weight()).value(), solver);
385 }
386 }
387
388 // Remove the empty nodes.
389 nodes->erase(std::remove_if(nodes->begin(), nodes->end(), EmptyEncodingNode),
390 nodes->end());
391
392 // Sort the nodes.
393 switch (solver->parameters().max_sat_assumption_order()) {
395 break;
397 std::sort(nodes->begin(), nodes->end(), EncodingNodeByDepth);
398 break;
400 std::sort(nodes->begin(), nodes->end(), EncodingNodeByWeight);
401 break;
402 }
404 // TODO(user): with DEFAULT_ASSUMPTION_ORDER, this will lead to a somewhat
405 // weird behavior, since we will reverse the nodes at each iteration...
406 std::reverse(nodes->begin(), nodes->end());
407 }
408
409 // Extract the assumptions from the nodes.
410 std::vector<Literal> assumptions;
411 for (EncodingNode* n : *nodes) {
412 if (n->weight() >= stratified_lower_bound) {
413 assumptions.push_back(n->literal(0).Negated());
414 }
415 }
416 return assumptions;
417}
418
419Coefficient ComputeCoreMinWeight(const std::vector<EncodingNode*>& nodes,
420 const std::vector<Literal>& core) {
421 Coefficient min_weight = kCoefficientMax;
422 int index = 0;
423 for (int i = 0; i < core.size(); ++i) {
424 for (;
425 index < nodes.size() && nodes[index]->literal(0).Negated() != core[i];
426 ++index) {
427 }
428 CHECK_LT(index, nodes.size());
429 min_weight = std::min(min_weight, nodes[index]->weight());
430 }
431 return min_weight;
432}
433
434Coefficient MaxNodeWeightSmallerThan(const std::vector<EncodingNode*>& nodes,
436 Coefficient result(0);
437 for (EncodingNode* n : nodes) {
438 CHECK_GT(n->weight(), 0);
439 if (n->weight() < upper_bound) {
440 result = std::max(result, n->weight());
441 }
442 }
443 return result;
444}
445
446void ProcessCore(const std::vector<Literal>& core, Coefficient min_weight,
447 std::deque<EncodingNode>* repository,
448 std::vector<EncodingNode*>* nodes, SatSolver* solver) {
449 // Backtrack to be able to add new constraints.
450 solver->Backtrack(0);
451
452 if (core.size() == 1) {
453 // The core will be reduced at the beginning of the next loop.
454 // Find the associated node, and call IncreaseNodeSize() on it.
455 CHECK(solver->Assignment().LiteralIsFalse(core[0]));
456 for (EncodingNode* n : *nodes) {
457 if (n->literal(0).Negated() == core[0]) {
458 IncreaseNodeSize(n, solver);
459 return;
460 }
461 }
462 LOG(FATAL) << "Node with literal " << core[0] << " not found!";
463 }
464
465 // Remove from nodes the EncodingNode in the core, merge them, and add the
466 // resulting EncodingNode at the back.
467 int index = 0;
468 int new_node_index = 0;
469 std::vector<EncodingNode*> to_merge;
470 for (int i = 0; i < core.size(); ++i) {
471 // Since the nodes appear in order in the core, we can find the
472 // relevant "objective" variable efficiently with a simple linear scan
473 // in the nodes vector (done with index).
474 for (; (*nodes)[index]->literal(0).Negated() != core[i]; ++index) {
475 CHECK_LT(index, nodes->size());
476 (*nodes)[new_node_index] = (*nodes)[index];
477 ++new_node_index;
478 }
479 CHECK_LT(index, nodes->size());
480 to_merge.push_back((*nodes)[index]);
481
482 // Special case if the weight > min_weight. we keep it, but reduce its
483 // cost. This is the same "trick" as in WPM1 used to deal with weight.
484 // We basically split a clause with a larger weight in two identical
485 // clauses, one with weight min_weight that will be merged and one with
486 // the remaining weight.
487 if ((*nodes)[index]->weight() > min_weight) {
488 (*nodes)[index]->set_weight((*nodes)[index]->weight() - min_weight);
489 (*nodes)[new_node_index] = (*nodes)[index];
490 ++new_node_index;
491 }
492 ++index;
493 }
494 for (; index < nodes->size(); ++index) {
495 (*nodes)[new_node_index] = (*nodes)[index];
496 ++new_node_index;
497 }
498 nodes->resize(new_node_index);
499 nodes->push_back(LazyMergeAllNodeWithPQ(to_merge, solver, repository));
500 IncreaseNodeSize(nodes->back(), solver);
501 nodes->back()->set_weight(min_weight);
502 CHECK(solver->AddUnitClause(nodes->back()->literal(0)));
503}
504
505} // namespace sat
506} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define CHECK(condition)
Definition: base/logging.h:491
#define CHECK_LT(val1, val2)
Definition: base/logging.h:701
#define CHECK_EQ(val1, val2)
Definition: base/logging.h:698
#define CHECK_GE(val1, val2)
Definition: base/logging.h:702
#define CHECK_GT(val1, val2)
Definition: base/logging.h:703
#define LOG(severity)
Definition: base/logging.h:416
int Reduce(const SatSolver &solver)
Definition: encoding.cc:82
bool IncreaseCurrentUB(SatSolver *solver)
Definition: encoding.cc:72
EncodingNode * child_b() const
Definition: encoding.h:114
void ApplyUpperBound(int64_t upper_bound, SatSolver *solver)
Definition: encoding.cc:98
void InitializeLazyNode(EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:56
void InitializeFullNode(int n, EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:36
Literal literal(int i) const
Definition: encoding.h:79
EncodingNode * child_a() const
Definition: encoding.h:113
Literal GreaterThan(int i) const
Definition: encoding.h:75
::PROTOBUF_NAMESPACE_ID::int64 coefficients(int index) const
::PROTOBUF_NAMESPACE_ID::int32 literals(int index) const
static constexpr MaxSatAssumptionOrder ORDER_ASSUMPTION_BY_DEPTH
::operations_research::sat::SatParameters_MaxSatAssumptionOrder max_sat_assumption_order() const
static constexpr MaxSatAssumptionOrder DEFAULT_ASSUMPTION_ORDER
static constexpr MaxSatAssumptionOrder ORDER_ASSUMPTION_BY_WEIGHT
void SetNumVariables(int num_variables)
Definition: sat_solver.cc:65
bool AddTernaryClause(Literal a, Literal b, Literal c)
Definition: sat_solver.cc:192
const SatParameters & parameters() const
Definition: sat_solver.cc:111
const VariablesAssignment & Assignment() const
Definition: sat_solver.h:363
bool AddBinaryClause(Literal a, Literal b)
Definition: sat_solver.cc:181
void Backtrack(int target_level)
Definition: sat_solver.cc:889
bool AddUnitClause(Literal true_literal)
Definition: sat_solver.cc:165
bool LiteralIsTrue(Literal literal) const
Definition: sat_base.h:151
bool LiteralIsFalse(Literal literal) const
Definition: sat_base.h:148
int64_t b
int64_t a
double upper_bound
double lower_bound
const int FATAL
Definition: log_severity.h:32
std::tuple< int64_t, int64_t, const double > Coefficient
Coefficient ComputeCoreMinWeight(const std::vector< EncodingNode * > &nodes, const std::vector< Literal > &core)
Definition: encoding.cc:419
EncodingNode * MergeAllNodesWithDeque(Coefficient upper_bound, const std::vector< EncodingNode * > &nodes, SatSolver *solver, std::deque< EncodingNode > *repository)
Definition: encoding.cc:264
std::vector< Literal > ReduceNodesAndExtractAssumptions(Coefficient upper_bound, Coefficient stratified_lower_bound, Coefficient *lower_bound, std::vector< EncodingNode * > *nodes, SatSolver *solver)
Definition: encoding.cc:367
EncodingNode * LazyMergeAllNodeWithPQ(const std::vector< EncodingNode * > &nodes, SatSolver *solver, std::deque< EncodingNode > *repository)
Definition: encoding.cc:286
void IncreaseNodeSize(EncodingNode *node, SatSolver *solver)
Definition: encoding.cc:117
EncodingNode LazyMerge(EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:107
EncodingNode FullMerge(Coefficient upper_bound, EncodingNode *a, EncodingNode *b, SatSolver *solver)
Definition: encoding.cc:213
void ProcessCore(const std::vector< Literal > &core, Coefficient min_weight, std::deque< EncodingNode > *repository, std::vector< EncodingNode * > *nodes, SatSolver *solver)
Definition: encoding.cc:446
Coefficient MaxNodeWeightSmallerThan(const std::vector< EncodingNode * > &nodes, Coefficient upper_bound)
Definition: encoding.cc:434
std::vector< EncodingNode * > CreateInitialEncodingNodes(const std::vector< Literal > &literals, const std::vector< Coefficient > &coeffs, Coefficient *offset, std::deque< EncodingNode > *repository)
Definition: encoding.cc:303
const Coefficient kCoefficientMax(std::numeric_limits< Coefficient::ValueType >::max())
Collection of objects used to extend the Constraint Solver library.
Literal literal
Definition: optimization.cc:85
int64_t weight
Definition: pack.cc:510
int index
Definition: pack.cc:509
int nodes