OR-Tools  9.2
sat/util.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14#include "ortools/sat/util.h"
15
16#include <algorithm>
17#include <cmath>
18#include <cstdint>
19
20#include "absl/numeric/int128.h"
23
24namespace operations_research {
25namespace sat {
26
27namespace {
28// This will be optimized into one division. I tested that in other places:
29//
30// Note that I am not 100% sure we need the indirection for the optimization
31// to kick in though, but this seemed safer given our weird r[i ^ 1] inputs.
32void QuotientAndRemainder(int64_t a, int64_t b, int64_t& q, int64_t& r) {
33 q = a / b;
34 r = a % b;
35}
36} // namespace
37
38void RandomizeDecisionHeuristic(absl::BitGenRef random,
40#if !defined(__PORTABLE_PLATFORM__)
41 // Random preferred variable order.
42 const google::protobuf::EnumDescriptor* order_d =
46 order_d->value(absl::Uniform(random, 0, order_d->value_count()))
47 ->number()));
48
49 // Random polarity initial value.
50 const google::protobuf::EnumDescriptor* polarity_d =
53 polarity_d->value(absl::Uniform(random, 0, polarity_d->value_count()))
54 ->number()));
55#endif // __PORTABLE_PLATFORM__
56 // Other random parameters.
57 parameters->set_use_phase_saving(absl::Bernoulli(random, 0.5));
58 parameters->set_random_polarity_ratio(absl::Bernoulli(random, 0.5) ? 0.01
59 : 0.0);
60 parameters->set_random_branches_ratio(absl::Bernoulli(random, 0.5) ? 0.01
61 : 0.0);
62}
63
64// Using the extended Euclidian algo, we find a and b such that a x + b m = gcd.
65// https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
66int64_t ModularInverse(int64_t x, int64_t m) {
67 DCHECK_GE(x, 0);
68 DCHECK_LT(x, m);
69
70 int64_t r[2] = {m, x};
71 int64_t t[2] = {0, 1};
72 int64_t q;
73
74 // We only keep the last two terms of the sequences with the "^1" trick:
75 //
76 // q = r[i-2] / r[i-1]
77 // r[i] = r[i-2] % r[i-1]
78 // t[i] = t[i-2] - t[i-1] * q
79 //
80 // We always have:
81 // - gcd(r[i], r[i - 1]) = gcd(r[i - 1], r[i - 2])
82 // - x * t[i] + m * t[i - 1] = r[i]
83 int i = 0;
84 for (; r[i ^ 1] != 0; i ^= 1) {
85 QuotientAndRemainder(r[i], r[i ^ 1], q, r[i]);
86 t[i] -= t[i ^ 1] * q;
87 }
88
89 // If the gcd is not one, there is no inverse, we returns 0.
90 if (r[i] != 1) return 0;
91
92 // Correct the result so that it is in [0, m). Note that abs(t[i]) is known to
93 // be less than or equal to x / 2, and we have thorough unit-tests.
94 if (t[i] < 0) t[i] += m;
95
96 return t[i];
97}
98
99int64_t PositiveMod(int64_t x, int64_t m) {
100 const int64_t r = x % m;
101 return r < 0 ? r + m : r;
102}
103
104int64_t ProductWithModularInverse(int64_t coeff, int64_t mod, int64_t rhs) {
105 DCHECK_NE(coeff, 0);
106 DCHECK_NE(mod, 0);
107
108 mod = std::abs(mod);
109 if (rhs == 0 || mod == 1) return 0;
110 DCHECK_EQ(std::gcd(std::abs(coeff), mod), 1);
111
112 // Make both in [0, mod).
113 coeff = PositiveMod(coeff, mod);
114 rhs = PositiveMod(rhs, mod);
115
116 // From X * coeff % mod = rhs
117 // We deduce that X % mod = rhs * inverse % mod
118 const int64_t inverse = ModularInverse(coeff, mod);
119 CHECK_NE(inverse, 0);
120
121 // We make the operation in 128 bits to be sure not to have any overflow here.
122 const absl::int128 p = absl::int128{inverse} * absl::int128{rhs};
123 return static_cast<int64_t>(p % absl::int128{mod});
124}
125
126bool SolveDiophantineEquationOfSizeTwo(int64_t& a, int64_t& b, int64_t& cte,
127 int64_t& x0, int64_t& y0) {
128 CHECK_NE(a, 0);
129 CHECK_NE(b, 0);
132
133 const int64_t gcd = std::gcd(std::abs(a), std::abs(b));
134 if (cte % gcd != 0) return false;
135 a /= gcd;
136 b /= gcd;
137 cte /= gcd;
138
139 // The simple case where (0, 0) is a solution.
140 if (cte == 0) {
141 x0 = y0 = 0;
142 return true;
143 }
144
145 // We solve a * X + b * Y = cte
146 // We take a valid x0 in [0, b) by considering the equation mod b.
147 x0 = ProductWithModularInverse(a, b, cte);
148
149 // We choose x0 of the same sign as cte.
150 if (cte < 0 && x0 != 0) x0 -= std::abs(b);
151
152 // By plugging X = x0 + b * Z
153 // We have a * (x0 + b * Z) + b * Y = cte
154 // so a * b * Z + b * Y = cte - a * x0;
155 // and y0 = (cte - a * x0) / b (with an exact division by construction).
156 const absl::int128 t = absl::int128{cte} - absl::int128{a} * absl::int128{x0};
157 DCHECK_EQ(t % absl::int128{b}, absl::int128{0});
158
159 // Overflow-wise, there is two cases for cte > 0:
160 // - a * x0 <= cte, in this case y0 will not overflow (<= cte).
161 // - a * x0 > cte, in this case y0 will be in (-a, 0].
162 const absl::int128 r = t / absl::int128{b};
165
166 y0 = static_cast<int64_t>(r);
167 return true;
168}
169
170// TODO(user): Find better implementation? In pratice passing via double is
171// almost always correct, but the CapProd() might be a bit slow. However this
172// is only called when we do propagate something.
173int64_t FloorSquareRoot(int64_t a) {
174 int64_t result =
175 static_cast<int64_t>(std::floor(std::sqrt(static_cast<double>(a))));
176 while (CapProd(result, result) > a) --result;
177 while (CapProd(result + 1, result + 1) <= a) ++result;
178 return result;
179}
180
181// TODO(user): Find better implementation?
182int64_t CeilSquareRoot(int64_t a) {
183 int64_t result =
184 static_cast<int64_t>(std::ceil(std::sqrt(static_cast<double>(a))));
185 while (CapProd(result, result) < a) ++result;
186 while ((result - 1) * (result - 1) >= a) --result;
187 return result;
188}
189
190int MoveOneUnprocessedLiteralLast(const std::set<LiteralIndex>& processed,
191 int relevant_prefix_size,
192 std::vector<Literal>* literals) {
193 if (literals->empty()) return -1;
194 if (!gtl::ContainsKey(processed, literals->back().Index())) {
195 return std::min<int>(relevant_prefix_size, literals->size());
196 }
197
198 // To get O(n log n) size of suffixes, we will first process the last n/2
199 // literals, we then move all of them first and process the n/2 literals left.
200 // We use the same algorithm recursively. The sum of the suffixes' size S(n)
201 // is thus S(n/2) + n + S(n/2). That gives us the correct complexity. The code
202 // below simulates one step of this algorithm and is made to be "robust" when
203 // from one call to the next, some literals have been removed (but the order
204 // of literals is preserved).
205 int num_processed = 0;
206 int num_not_processed = 0;
207 int target_prefix_size = literals->size() - 1;
208 for (int i = literals->size() - 1; i >= 0; i--) {
209 if (gtl::ContainsKey(processed, (*literals)[i].Index())) {
210 ++num_processed;
211 } else {
212 ++num_not_processed;
213 target_prefix_size = i;
214 }
215 if (num_not_processed >= num_processed) break;
216 }
217 if (num_not_processed == 0) return -1;
218 target_prefix_size = std::min(target_prefix_size, relevant_prefix_size);
219
220 // Once a prefix size has been decided, it is always better to
221 // enqueue the literal already processed first.
222 std::stable_partition(literals->begin() + target_prefix_size, literals->end(),
223 [&processed](Literal l) {
224 return gtl::ContainsKey(processed, l.Index());
225 });
226 return target_prefix_size;
227}
228
229void IncrementalAverage::Reset(double reset_value) {
230 num_records_ = 0;
231 average_ = reset_value;
232}
233
234void IncrementalAverage::AddData(double new_record) {
235 num_records_++;
236 average_ += (new_record - average_) / num_records_;
237}
238
239void ExponentialMovingAverage::AddData(double new_record) {
240 num_records_++;
241 average_ = (num_records_ == 1)
242 ? new_record
243 : (new_record + decaying_factor_ * (average_ - new_record));
244}
245
246void Percentile::AddRecord(double record) {
247 records_.push_front(record);
248 if (records_.size() > record_limit_) {
249 records_.pop_back();
250 }
251}
252
253double Percentile::GetPercentile(double percent) {
254 CHECK_GT(records_.size(), 0);
255 CHECK_LE(percent, 100.0);
256 CHECK_GE(percent, 0.0);
257 std::vector<double> sorted_records(records_.begin(), records_.end());
258 std::sort(sorted_records.begin(), sorted_records.end());
259 const int num_records = sorted_records.size();
260
261 const double percentile_rank =
262 static_cast<double>(num_records) * percent / 100.0 - 0.5;
263 if (percentile_rank <= 0) {
264 return sorted_records.front();
265 } else if (percentile_rank >= num_records - 1) {
266 return sorted_records.back();
267 }
268 // Interpolate.
269 DCHECK_GE(num_records, 2);
270 DCHECK_LT(percentile_rank, num_records - 1);
271 const int lower_rank = static_cast<int>(std::floor(percentile_rank));
272 DCHECK_LT(lower_rank, num_records - 1);
273 return sorted_records[lower_rank] +
274 (percentile_rank - lower_rank) *
275 (sorted_records[lower_rank + 1] - sorted_records[lower_rank]);
276}
277
278void CompressTuples(absl::Span<const int64_t> domain_sizes, int64_t any_value,
279 std::vector<std::vector<int64_t>>* tuples) {
280 if (tuples->empty()) return;
281
282 // Remove duplicates if any.
284
285 const int num_vars = (*tuples)[0].size();
286
287 std::vector<int> to_remove;
288 std::vector<int64_t> tuple_minus_var_i(num_vars - 1);
289 for (int i = 0; i < num_vars; ++i) {
290 const int domain_size = domain_sizes[i];
291 if (domain_size == 1) continue;
292 absl::flat_hash_map<const std::vector<int64_t>, std::vector<int>>
293 masked_tuples_to_indices;
294 for (int t = 0; t < tuples->size(); ++t) {
295 int out = 0;
296 for (int j = 0; j < num_vars; ++j) {
297 if (i == j) continue;
298 tuple_minus_var_i[out++] = (*tuples)[t][j];
299 }
300 masked_tuples_to_indices[tuple_minus_var_i].push_back(t);
301 }
302 to_remove.clear();
303 for (const auto& it : masked_tuples_to_indices) {
304 if (it.second.size() != domain_size) continue;
305 (*tuples)[it.second.front()][i] = any_value;
306 to_remove.insert(to_remove.end(), it.second.begin() + 1, it.second.end());
307 }
308 std::sort(to_remove.begin(), to_remove.end(), std::greater<int>());
309 for (const int t : to_remove) {
310 (*tuples)[t] = tuples->back();
311 tuples->pop_back();
312 }
313 }
314}
315
316} // namespace sat
317} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:892
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:891
#define CHECK_GE(val1, val2)
Definition: base/logging.h:706
#define CHECK_GT(val1, val2)
Definition: base/logging.h:707
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:894
#define CHECK_NE(val1, val2)
Definition: base/logging.h:703
#define DCHECK_LT(val1, val2)
Definition: base/logging.h:893
#define CHECK_LE(val1, val2)
Definition: base/logging.h:704
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:890
double GetPercentile(double percent)
Definition: sat/util.cc:253
void set_initial_polarity(::operations_research::sat::SatParameters_Polarity value)
static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor * Polarity_descriptor()
void set_preferred_variable_order(::operations_research::sat::SatParameters_VariableOrder value)
static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor * VariableOrder_descriptor()
int64_t b
int64_t a
SatParameters parameters
void STLSortAndRemoveDuplicates(T *v, const LessFunc &less_func)
Definition: stl_util.h:58
bool ContainsKey(const Collection &collection, const Key &key)
Definition: map_util.h:200
void RandomizeDecisionHeuristic(absl::BitGenRef random, SatParameters *parameters)
Definition: sat/util.cc:38
int MoveOneUnprocessedLiteralLast(const std::set< LiteralIndex > &processed, int relevant_prefix_size, std::vector< Literal > *literals)
Definition: sat/util.cc:190
void CompressTuples(absl::Span< const int64_t > domain_sizes, int64_t any_value, std::vector< std::vector< int64_t > > *tuples)
Definition: sat/util.cc:278
int64_t PositiveMod(int64_t x, int64_t m)
Definition: sat/util.cc:99
int64_t CeilSquareRoot(int64_t a)
Definition: sat/util.cc:182
bool SolveDiophantineEquationOfSizeTwo(int64_t &a, int64_t &b, int64_t &cte, int64_t &x0, int64_t &y0)
Definition: sat/util.cc:126
int64_t FloorSquareRoot(int64_t a)
Definition: sat/util.cc:173
int64_t ModularInverse(int64_t x, int64_t m)
Definition: sat/util.cc:66
int64_t ProductWithModularInverse(int64_t coeff, int64_t mod, int64_t rhs)
Definition: sat/util.cc:104
Collection of objects used to extend the Constraint Solver library.
int64_t CapProd(int64_t x, int64_t y)
const double coeff