cleanup; move parallel code from fz to sat

This commit is contained in:
Laurent Perron
2018-07-03 13:01:37 +02:00
parent bd04f656b3
commit 05d9faf4d7
15 changed files with 632 additions and 447 deletions

View File

@@ -29,6 +29,7 @@ BASE_DEPS = \
$(SRC_DIR)/ortools/base/memory.h \
$(SRC_DIR)/ortools/base/murmur.h \
$(SRC_DIR)/ortools/base/mutex.h \
$(SRC_DIR)/ortools/base/notification.h \
$(SRC_DIR)/ortools/base/numbers.h \
$(SRC_DIR)/ortools/base/port.h \
$(SRC_DIR)/ortools/base/protoutil.h \
@@ -63,6 +64,7 @@ BASE_LIB_OBJS = \
$(OBJ_DIR)/base/file.$O \
$(OBJ_DIR)/base/join.$O \
$(OBJ_DIR)/base/mutex.$O \
$(OBJ_DIR)/base/notification.$O \
$(OBJ_DIR)/base/numbers.$O \
$(OBJ_DIR)/base/random.$O \
$(OBJ_DIR)/base/recordio.$O \
@@ -239,6 +241,11 @@ $(OBJ_DIR)/base/mutex.$O: \
$(SRC_DIR)/ortools/base/mutex.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Sortools$Sbase$Smutex.cc $(OBJ_OUT)$(OBJ_DIR)$Sbase$Smutex.$O
$(OBJ_DIR)/base/notification.$O: \
$(SRC_DIR)/ortools/base/notification.cc \
$(SRC_DIR)/ortools/base/notification.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Sortools$Sbase$Snotification.cc $(OBJ_OUT)$(OBJ_DIR)$Sbase$Snotification.$O
$(OBJ_DIR)/base/numbers.$O: \
$(SRC_DIR)/ortools/base/numbers.cc \
$(SRC_DIR)/ortools/base/numbers.h
@@ -789,6 +796,7 @@ $(SRC_DIR)/ortools/lp_data/mps_reader.h: \
$(SRC_DIR)/ortools/lp_data/lp_types.h
$(SRC_DIR)/ortools/lp_data/permutation.h: \
$(SRC_DIR)/ortools/base/random.h \
$(SRC_DIR)/ortools/lp_data/lp_types.h \
$(SRC_DIR)/ortools/util/return_macros.h
@@ -2133,6 +2141,7 @@ $(OBJ_DIR)/sat/cp_model_presolve.$O: \
$(OBJ_DIR)/sat/cp_model_search.$O: \
$(SRC_DIR)/ortools/sat/cp_model_search.cc \
$(SRC_DIR)/ortools/base/stringprintf.h \
$(SRC_DIR)/ortools/sat/cp_model_search.h \
$(SRC_DIR)/ortools/sat/cp_model_utils.h \
$(SRC_DIR)/ortools/sat/util.h
@@ -2140,6 +2149,7 @@ $(OBJ_DIR)/sat/cp_model_search.$O: \
$(OBJ_DIR)/sat/cp_model_solver.$O: \
$(SRC_DIR)/ortools/sat/cp_model_solver.cc \
$(SRC_DIR)/ortools/base/cleanup.h \
$(SRC_DIR)/ortools/base/commandlineflags.h \
$(SRC_DIR)/ortools/base/int_type.h \
$(SRC_DIR)/ortools/base/int_type_indexed_vector.h \
@@ -2148,7 +2158,9 @@ $(OBJ_DIR)/sat/cp_model_solver.$O: \
$(SRC_DIR)/ortools/base/logging.h \
$(SRC_DIR)/ortools/base/map_util.h \
$(SRC_DIR)/ortools/base/memory.h \
$(SRC_DIR)/ortools/base/notification.h \
$(SRC_DIR)/ortools/base/stl_util.h \
$(SRC_DIR)/ortools/base/stringprintf.h \
$(SRC_DIR)/ortools/base/timer.h \
$(SRC_DIR)/ortools/graph/connectivity.h \
$(SRC_DIR)/ortools/port/proto_utils.h \

View File

@@ -0,0 +1,49 @@
// Copyright 2010-2017 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/base/notification.h"
#include <atomic>
#include <condition_variable> // NOLINT
#include <mutex> // NOLINT
namespace absl {
void Notification::Notify() {
std::unique_lock<std::mutex> mutex_lock(mutex_);
notified_yet_.store(true, std::memory_order_release);
condition_.notify_all();
}
Notification::~Notification() {
// Make sure that the thread running Notify() exits before the object is
// destructed.
std::unique_lock<std::mutex> mutex_lock(mutex_);
}
static inline bool HasBeenNotifiedInternal(
const std::atomic<bool> *notified_yet) {
return notified_yet->load(std::memory_order_acquire);
}
bool Notification::HasBeenNotified() const {
return HasBeenNotifiedInternal(&this->notified_yet_);
}
void Notification::WaitForNotification() {
if (!HasBeenNotifiedInternal(&this->notified_yet_)) {
std::unique_lock<std::mutex> mutex_lock(mutex_);
condition_.wait(mutex_lock);
}
}
} // namespace absl

View File

@@ -0,0 +1,62 @@
// Copyright 2010-2017 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BASE_NOTIFICATION_H_
#define OR_TOOLS_BASE_NOTIFICATION_H_
#include <atomic>
#include <condition_variable> // NOLINT
#include <mutex> // NOLINT
namespace absl {
// -----------------------------------------------------------------------------
// Notification
// -----------------------------------------------------------------------------
class Notification {
public:
// Initializes the "notified" state to unnotified.
Notification() : notified_yet_(false) {}
Notification(const Notification&) = delete;
Notification& operator=(const Notification&) = delete;
~Notification();
// Notification::HasBeenNotified()
//
// Returns the value of the notification's internal "notified" state.
bool HasBeenNotified() const;
// Notification::WaitForNotification()
//
// Blocks the calling thread until the notification's "notified" state is
// `true`. Note that if `Notify()` has been previously called on this
// notification, this function will immediately return.
void WaitForNotification();
// Notification::Notify()
//
// Sets the "notified" state of this notification to `true` and wakes waiting
// threads. Note: do not call `Notify()` multiple times on the same
// `Notification`; calling `Notify()` more than once on the same notification
// results in undefined behavior.
void Notify();
private:
std::mutex mutex_;
std::condition_variable condition_;
std::atomic<bool> notified_yet_; // written under mutex_
};
} // namespace absl
#endif // OR_TOOLS_BASE_NOTIFICATION_H_

View File

@@ -21,9 +21,7 @@
#if defined(__APPLE__) && defined(__GNUC__)
#include <mach/mach_time.h>
#endif
#include <chrono> // NOLINT
#include <ctime>
#include <thread> // NOLINT
namespace absl {
@@ -51,10 +49,4 @@ int64 GetCurrentTimeNanos() {
#endif
}
void SleepFor(Duration duration) {
const int duration_in_ms = static_cast<int>(duration * 1000.0);
std::this_thread::sleep_for(
std::chrono::milliseconds(duration_in_ms)); // NOLINT
}
} // namespace absl

View File

@@ -47,15 +47,6 @@ inline double ToDoubleSeconds(Duration x) { return x; }
inline int64 ToInt64Milliseconds(Duration x) { return x * 1e3; }
inline Duration ZeroDuration() { return Duration(0); }
// SleepFor()
//
// Sleeps for the specified duration, expressed as an `absl::Duration`.
//
// Notes:
// * Signal interruptions will not reduce the sleep duration.
// * Returns immediately when passed a nonpositive duration.
void SleepFor(absl::Duration duration);
} // namespace absl
// Temporary support for the legacy "base::" namespace

View File

@@ -810,36 +810,6 @@ void LogInFlatzincFormat(const std::string& multi_line_input) {
}
}
CpSolverResponse WorkerSearch(
const CpModelProto& cp_model, const SatParameters& parameters,
const fz::FlatzincParameters& p,
const std::function<void(const CpSolverResponse&)>& solution_observer,
std::function<CpSolverResponse()> solution_synchronization,
std::function<double()> objective_synchronization, int worker_id,
bool* stopped) {
Model sat_model;
sat_model.Add(NewSatParameters(parameters));
sat_model.GetOrCreate<TimeLimit>()->RegisterExternalBooleanAsLimit(stopped);
// Add solution observer.
if (solution_observer != nullptr) {
sat_model.Add(NewFeasibleSolutionObserver(solution_observer));
}
if (solution_synchronization != nullptr) {
SetSynchronizationFunction(std::move(solution_synchronization), &sat_model);
}
if (objective_synchronization != nullptr) {
SetObjectiveSynchronizationFunction(std::move(objective_synchronization),
&sat_model);
}
// Solve.
const CpSolverResponse response = SolveCpModel(cp_model, &sat_model);
// Report the solution found.
return response;
}
} // namespace
void SolveFzWithCpModelProto(const fz::Model& fz_model,
@@ -935,8 +905,14 @@ void SolveFzWithCpModelProto(const fz::Model& fz_model,
if (p.time_limit_in_ms > 0) {
m.parameters.set_max_time_in_seconds(p.time_limit_in_ms * 1e-3);
}
if (p.threads > 0) {
m.parameters.set_num_search_workers(p.threads);
// We don't support enumerating all solution in parallel for a SAT problem.
// But note that we do support it for an optimization problem since the
// meaning of p.all_solutions is not the same in this case.
if (p.all_solutions && fz_model.objective() == nullptr) {
m.parameters.set_num_search_workers(1);
} else {
m.parameters.set_num_search_workers(std::max(1, p.threads));
}
// The order is important, we want the flag parameters to overwrite anything
@@ -947,228 +923,63 @@ void SolveFzWithCpModelProto(const fz::Model& fz_model,
<< sat_params;
m.parameters.MergeFrom(flag_parameters);
const int num_search_workers = std::max(1, p.threads);
bool stopped = false;
SigintHandler handler;
handler.Register([&stopped]() { stopped = true; });
CpSolverResponse best_response;
if (fz_model.objective() != nullptr) {
const double kInfinity = std::numeric_limits<double>::infinity();
if (fz_model.maximize()) {
best_response.set_objective_value(-kInfinity);
best_response.set_best_objective_bound(kInfinity);
} else {
best_response.set_objective_value(kInfinity);
best_response.set_best_objective_bound(-kInfinity);
}
}
int solution_count = 1; // Start at 1 as in the sat solver output.
// Solve the problem.
if (!p.all_solutions && fz_model.objective() == nullptr && p.threads > 1) {
// SAT problem solved in parallel, without solution enumeration.
absl::Mutex mutex;
FZLOG << "Starting parallel search with " << num_search_workers
<< " workers" << FZENDL;
ThreadPool pool("Parallel_FlatZinc_sat", num_search_workers);
pool.StartWorkers();
for (int worker_id = 0; worker_id < num_search_workers; ++worker_id) {
std::string worker_name;
const SatParameters local_params = DiversifySearchParameters(
m.parameters, m.proto, worker_id, &worker_name);
pool.Schedule([&fz_model, &m, &p, &stopped, local_params, worker_id,
&best_response, &mutex]() {
const CpSolverResponse local_response =
WorkerSearch(m.proto, local_params, p, nullptr, nullptr, nullptr,
worker_id, &stopped);
absl::MutexLock lock(&mutex);
if (best_response.status() == CpSolverStatus::UNKNOWN) {
best_response = local_response;
}
if (local_response.status() != CpSolverStatus::UNKNOWN) {
CHECK_EQ(local_response.status(), best_response.status());
stopped = true;
}
});
}
} else if (fz_model.objective() != nullptr && p.threads > 1) {
// Optimization problem solved in parallel.
absl::Mutex mutex;
const bool maximize = fz_model.maximize();
FZLOG << "Starting parallel search with " << num_search_workers
<< " workers" << FZENDL;
{
ThreadPool pool("Parallel_FlatZinc_sat", num_search_workers);
pool.StartWorkers();
for (int worker_id = 0; worker_id < num_search_workers; ++worker_id) {
const auto solution_synchronization = [&mutex, &best_response]() {
absl::MutexLock lock(&mutex);
return best_response;
};
const auto objective_synchronization = [&mutex, &best_response]() {
absl::MutexLock lock(&mutex);
if (best_response.status() == CpSolverStatus::FEASIBLE) {
return best_response.objective_value();
} else {
return std::numeric_limits<double>::infinity();
}
};
std::string worker_name;
const SatParameters local_params = DiversifySearchParameters(
m.parameters, m.proto, worker_id, &worker_name);
const auto solution_observer = [maximize, &solution_count, worker_id,
&mutex, &p, &fz_model, &m,
&best_response, &timer, worker_name](
const CpSolverResponse& r) {
absl::MutexLock lock(&mutex);
// Check is the new solution is actually improving upon the best
// solution found so far.
if (MergeOptimizationSolution(r, maximize, &best_response)) {
if (p.all_solutions) {
if (maximize) {
FZLOG << absl::StrFormat(
"#%-5i %-7s %8.2fs [%0.0f, %0.0f] %s",
solution_count++, worker_name.c_str(), timer.Get(),
best_response.objective_value(),
best_response.best_objective_bound(),
r.solution_info().c_str())
<< FZENDL;
} else {
FZLOG << absl::StrFormat(
"#%-5i %-7s %8.2fs [%0.0f, %0.0f] %s",
solution_count++, worker_name.c_str(), timer.Get(),
best_response.best_objective_bound(),
best_response.objective_value(),
r.solution_info().c_str())
<< FZENDL;
}
if (FLAGS_use_flatzinc_format) {
const std::string solution_string =
SolutionString(fz_model, [&m, &r](fz::IntegerVariable* v) {
return r.solution(gtl::FindOrDie(m.fz_var_to_index, v));
});
std::cout << solution_string << std::endl;
}
}
}
};
pool.Schedule([&fz_model, &m, &p, solution_observer,
solution_synchronization, objective_synchronization,
&stopped, local_params, worker_id, &mutex,
&best_response, maximize, worker_name]() {
const CpSolverResponse local_response =
WorkerSearch(m.proto, local_params, p, solution_observer,
solution_synchronization, objective_synchronization,
worker_id, &stopped);
// Process final solution. Decide which worker has the 'best'
// solution. Note that the solution observer may or may not have been
// called.
//
// TODO(user): If the solution is SAT, we can pull the best lower
// bound.
absl::MutexLock lock(&mutex);
FZVLOG << "Worker " << worker_id << " terminates with status "
<< ProtoEnumToString<CpSolverStatus>(local_response.status())
<< " and an objective value of "
<< local_response.objective_value() << FZENDL;
MergeOptimizationSolution(local_response, maximize, &best_response);
if (best_response.status() == CpSolverStatus::OPTIMAL ||
best_response.status() == CpSolverStatus::INFEASIBLE) {
stopped = true;
}
});
}
}
// Fix wall time.
best_response.set_wall_time(timer.Get());
} else if (p.all_solutions) { // Sequential case with observer.
const bool maximize = fz_model.maximize();
auto solution_observer = [&fz_model, &solution_count, &m, maximize,
&timer](const CpSolverResponse& r) {
if (fz_model.objective() == nullptr) {
FZLOG << absl::StrFormat(
"#%-5i %8.2fs %s",
solution_count++, timer.Get(), r.solution_info().c_str()) << FZENDL;
} else {
if (maximize) {
FZLOG << absl::StrFormat(
"#%-5i %8.2fs [%0.0f, %0.0f] %s",
solution_count++, timer.Get(),
r.objective_value(),
r.best_objective_bound(),
r.solution_info().c_str())
<< FZENDL;
} else {
FZLOG << absl::StrFormat(
"#%-5i %8.2fs [%0.0f, %0.0f] %s",
solution_count++, timer.Get(),
r.best_objective_bound(),
r.objective_value(),
r.solution_info().c_str())
<< FZENDL;
}
}
if (FLAGS_use_flatzinc_format) {
const std::string solution_string =
SolutionString(fz_model, [&m, &r](fz::IntegerVariable* v) {
return r.solution(gtl::FindOrDie(m.fz_var_to_index, v));
});
std::cout << solution_string << std::endl;
}
// We only need an observer if 'p.all_solutions' is true.
std::function<void(const CpSolverResponse&)> solution_observer = nullptr;
if (p.all_solutions && FLAGS_use_flatzinc_format) {
solution_observer = [&fz_model, &m](const CpSolverResponse& r) {
const std::string solution_string =
SolutionString(fz_model, [&m, &r](fz::IntegerVariable* v) {
return r.solution(gtl::FindOrDie(m.fz_var_to_index, v));
});
std::cout << solution_string << std::endl;
};
best_response = WorkerSearch(m.proto, m.parameters, p, solution_observer,
nullptr, nullptr, 0, &stopped);
} else { // Sequential case, no observer.
best_response = WorkerSearch(m.proto, m.parameters, p, nullptr, nullptr,
nullptr, 0, &stopped);
}
Model sat_model;
sat_model.Add(NewSatParameters(m.parameters));
sat_model.GetOrCreate<TimeLimit>()->RegisterExternalBooleanAsLimit(&stopped);
if (solution_observer != nullptr) {
sat_model.Add(NewFeasibleSolutionObserver(solution_observer));
}
const CpSolverResponse response = SolveCpModel(m.proto, &sat_model);
// Check the returned solution with the fz model checker.
if (best_response.status() == CpSolverStatus::FEASIBLE ||
best_response.status() == CpSolverStatus::OPTIMAL) {
CHECK(CheckSolution(fz_model, [&best_response, &m](fz::IntegerVariable* v) {
return best_response.solution(gtl::FindOrDie(m.fz_var_to_index, v));
if (response.status() == CpSolverStatus::FEASIBLE ||
response.status() == CpSolverStatus::OPTIMAL) {
CHECK(CheckSolution(fz_model, [&response, &m](fz::IntegerVariable* v) {
return response.solution(gtl::FindOrDie(m.fz_var_to_index, v));
}));
}
// Output the solution in the flatzinc official format.
if (FLAGS_use_flatzinc_format) {
if (best_response.status() == CpSolverStatus::FEASIBLE ||
best_response.status() == CpSolverStatus::OPTIMAL) {
if (response.status() == CpSolverStatus::FEASIBLE ||
response.status() == CpSolverStatus::OPTIMAL) {
if (!p.all_solutions) { // Already printed otherwise.
const std::string solution_string = SolutionString(
fz_model, [&best_response, &m](fz::IntegerVariable* v) {
return best_response.solution(
gtl::FindOrDie(m.fz_var_to_index, v));
const std::string solution_string =
SolutionString(fz_model, [&response, &m](fz::IntegerVariable* v) {
return response.solution(gtl::FindOrDie(m.fz_var_to_index, v));
});
std::cout << solution_string << std::endl;
}
if (best_response.status() == CpSolverStatus::OPTIMAL ||
best_response.all_solutions_were_found()) {
if (response.status() == CpSolverStatus::OPTIMAL ||
response.all_solutions_were_found()) {
std::cout << "==========" << std::endl;
}
} else if (best_response.status() == CpSolverStatus::INFEASIBLE) {
} else if (response.status() == CpSolverStatus::INFEASIBLE) {
std::cout << "=====UNSATISFIABLE=====" << std::endl;
} else {
std::cout << "%% TIMEOUT" << std::endl;
}
if (p.statistics) {
LogInFlatzincFormat(CpSolverResponseStats(best_response));
LogInFlatzincFormat(CpSolverResponseStats(response));
}
} else {
LOG(INFO) << CpSolverResponseStats(best_response);
LOG(INFO) << CpSolverResponseStats(response);
}
}

View File

@@ -158,7 +158,7 @@ class MatrixNonZeroPattern {
// Returns the set of non-zeros of the given row (unsorted).
// Call RemoveDeletedColumnsFromRow(row) to clean the row first.
// This is only valid for the row indices still in the residual matrix.
const std::vector<ColIndex>& RowNonZero(RowIndex row) const {
const absl::InlinedVector<ColIndex, 6>& RowNonZero(RowIndex row) const {
return row_non_zero_[row];
}
@@ -174,7 +174,14 @@ class MatrixNonZeroPattern {
// non-sorted version. Investigate more.
void MergeIntoSorted(RowIndex pivot_row, RowIndex row);
gtl::ITIVector<RowIndex, std::vector<ColIndex>> row_non_zero_;
// Using InlinedVector helps because we usually have many rows with just a few
// non-zeros. Note that on a 64 bits computer we get exactly 6 inlined int32
// elements without extra space, and the size of the inlined vector is 4 times
// 64 bits.
//
// TODO(user): We could be even more efficient since a size of int32 is enough
// for us and we could store in common the inlined/not-inlined size.
gtl::ITIVector<RowIndex, absl::InlinedVector<ColIndex, 6>> row_non_zero_;
StrictITIVector<RowIndex, int32> row_degree_;
StrictITIVector<ColIndex, int32> col_degree_;
DenseBooleanRow deleted_columns_;

View File

@@ -66,20 +66,19 @@
// - OutgoingArcs(node): All the forward arcs leaving the node.
// - IncomingArcs(node): All the forward arcs arriving at the node.
//
// And a more involved one:
// And two more involved ones:
//
// - OutgoingOrOppositeIncomingArcs(node): This returns both the forward arcs
// leaving the node (i.e. OutgoingArcs(node)) and the reverse arcs leaving the
// node (i.e. the opposite arcs of the ones returned by IncomingArcs(node)).
// - OppositeIncomingArcs(node): This returns the reverse arcs leaving the node.
//
// Note on iteration efficiency: When re-indexing the arcs it is not possible to
// have both the outgoing arcs and the incoming ones form a consecutive range.
//
// It is however possible to do so for the outgoing arcs and the opposite
// incoming arcs. It is why the OutgoingOrOppositeIncomingArcs() and
// OutgoingArcs() iterations are more efficient than the IncomingArcs() one. If
// needed, we can add a OppositeIncomingArcs() function to quickly iterate on
// the opposite incoming arcs (and their heads).
// OutgoingArcs() iterations are more efficient than the IncomingArcs() one.
//
// If you know the graph size in advance, this already set the number of nodes,
// reserve space for the arcs and check in DEBUG mode that you don't go over the
@@ -482,6 +481,7 @@ class ReverseArcListGraph
// Do not use directly. See instead the arc iteration functions below.
class OutgoingOrOppositeIncomingArcIterator;
class OppositeIncomingArcIterator;
class IncomingArcIterator;
class OutgoingArcIterator;
class OutgoingHeadIterator;
@@ -500,6 +500,8 @@ class ReverseArcListGraph
BeginEndWrapper<IncomingArcIterator> IncomingArcs(NodeIndexType node) const;
BeginEndWrapper<OutgoingOrOppositeIncomingArcIterator>
OutgoingOrOppositeIncomingArcs(NodeIndexType node) const;
BeginEndWrapper<OppositeIncomingArcIterator> OppositeIncomingArcs(
NodeIndexType node) const;
BeginEndWrapper<OutgoingArcIterator> OutgoingArcsStartingFrom(
NodeIndexType node, ArcIndexType from) const;
BeginEndWrapper<IncomingArcIterator> IncomingArcsStartingFrom(
@@ -507,6 +509,8 @@ class ReverseArcListGraph
BeginEndWrapper<OutgoingOrOppositeIncomingArcIterator>
OutgoingOrOppositeIncomingArcsStartingFrom(NodeIndexType node,
ArcIndexType from) const;
BeginEndWrapper<OppositeIncomingArcIterator> OppositeIncomingArcsStartingFrom(
NodeIndexType node, ArcIndexType from) const;
// This loops over the heads of the OutgoingArcs(node). It is just a more
// convenient way to achieve this. Moreover this interface is used by some
@@ -562,6 +566,7 @@ class ReverseArcStaticGraph
// Deprecated.
class OutgoingOrOppositeIncomingArcIterator;
class OppositeIncomingArcIterator;
class IncomingArcIterator;
class OutgoingArcIterator;
@@ -573,6 +578,8 @@ class ReverseArcStaticGraph
BeginEndWrapper<IncomingArcIterator> IncomingArcs(NodeIndexType node) const;
BeginEndWrapper<OutgoingOrOppositeIncomingArcIterator>
OutgoingOrOppositeIncomingArcs(NodeIndexType node) const;
BeginEndWrapper<OppositeIncomingArcIterator> OppositeIncomingArcs(
NodeIndexType node) const;
BeginEndWrapper<OutgoingArcIterator> OutgoingArcsStartingFrom(
NodeIndexType node, ArcIndexType from) const;
BeginEndWrapper<IncomingArcIterator> IncomingArcsStartingFrom(
@@ -580,6 +587,8 @@ class ReverseArcStaticGraph
BeginEndWrapper<OutgoingOrOppositeIncomingArcIterator>
OutgoingOrOppositeIncomingArcsStartingFrom(NodeIndexType node,
ArcIndexType from) const;
BeginEndWrapper<OppositeIncomingArcIterator> OppositeIncomingArcsStartingFrom(
NodeIndexType node, ArcIndexType from) const;
// This loops over the heads of the OutgoingArcs(node). It is just a more
// convenient way to achieve this. Moreover this interface is used by some
@@ -645,6 +654,7 @@ class ReverseArcMixedGraph
// Deprecated.
class OutgoingOrOppositeIncomingArcIterator;
class OppositeIncomingArcIterator;
class IncomingArcIterator;
class OutgoingArcIterator;
@@ -655,6 +665,8 @@ class ReverseArcMixedGraph
BeginEndWrapper<IncomingArcIterator> IncomingArcs(NodeIndexType node) const;
BeginEndWrapper<OutgoingOrOppositeIncomingArcIterator>
OutgoingOrOppositeIncomingArcs(NodeIndexType node) const;
BeginEndWrapper<OppositeIncomingArcIterator> OppositeIncomingArcs(
NodeIndexType node) const;
BeginEndWrapper<OutgoingArcIterator> OutgoingArcsStartingFrom(
NodeIndexType node, ArcIndexType from) const;
BeginEndWrapper<IncomingArcIterator> IncomingArcsStartingFrom(
@@ -662,6 +674,8 @@ class ReverseArcMixedGraph
BeginEndWrapper<OutgoingOrOppositeIncomingArcIterator>
OutgoingOrOppositeIncomingArcsStartingFrom(NodeIndexType node,
ArcIndexType from) const;
BeginEndWrapper<OppositeIncomingArcIterator> OppositeIncomingArcsStartingFrom(
NodeIndexType node, ArcIndexType from) const;
// This loops over the heads of the OutgoingArcs(node). It is just a more
// convenient way to achieve this. Moreover this interface is used by some
@@ -842,10 +856,14 @@ class SVector {
T* new_storage = static_cast<T*>(malloc(2LL * new_capacity * sizeof(T)));
CHECK(new_storage != nullptr);
T* new_base = new_storage + new_capacity;
std::move(base_ - size_, base_ + size_, new_base - size_);
if (capacity_ > 0) {
free(base_ - capacity_);
// TODO(user): in C++17 we could use std::uninitialized_move instead
// of this loop.
for (int i = -size_; i < size_; ++i) {
new (new_base + i) T(std::move(base_[i]));
}
int saved_size = size_;
clear_and_dealloc();
size_ = saved_size;
base_ = new_base;
capacity_ = new_capacity;
}
@@ -1035,7 +1053,8 @@ void BaseGraph<NodeIndexType, ArcIndexType, HasReverseArcs>::
// The parameters are:
// - c: the class name.
// - t: the iteration type (Outgoing, Incoming or OutgoingOrOppositeIncoming).
// - t: the iteration type (Outgoing, Incoming, OutgoingOrOppositeIncoming
// or OppositeIncoming).
// - e: the "end" ArcIndexType.
#define DEFINE_RANGE_BASED_ARC_ITERATION(c, t, e) \
template <typename NodeIndexType, typename ArcIndexType> \
@@ -1061,13 +1080,13 @@ void BaseGraph<NodeIndexType, ArcIndexType, HasReverseArcs>::
using reference = const ArcIndexType&; \
using value_type = ArcIndexType; \
bool operator!=(const iterator_class_name& other) const { \
return index_ != other.index_; \
return this->index_ != other.index_; \
} \
bool operator==(const iterator_class_name& other) const { \
return index_ == other.index_; \
return this->index_ == other.index_; \
} \
ArcIndexType operator*() const { return Index(); } \
void operator++() { Next(); }
ArcIndexType operator*() const { return this->Index(); } \
void operator++() { this->Next(); }
// ListGraph implementation ----------------------------------------------------
@@ -1402,6 +1421,8 @@ DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcListGraph, Outgoing, Base::kNilArc);
DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcListGraph, Incoming, Base::kNilArc);
DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcListGraph,
OutgoingOrOppositeIncoming, Base::kNilArc);
DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcListGraph, OppositeIncoming,
Base::kNilArc);
template <typename NodeIndexType, typename ArcIndexType>
BeginEndWrapper<typename ReverseArcListGraph<
@@ -1425,7 +1446,7 @@ template <typename NodeIndexType, typename ArcIndexType>
ArcIndexType ReverseArcListGraph<NodeIndexType, ArcIndexType>::InDegree(
NodeIndexType node) const {
ArcIndexType degree(0);
for (auto arc ATTRIBUTE_UNUSED : IncomingArcs(node)) ++degree;
for (auto arc ATTRIBUTE_UNUSED : OppositeIncomingArcs(node)) ++degree;
return degree;
}
@@ -1528,37 +1549,58 @@ class ReverseArcListGraph<NodeIndexType, ArcIndexType>::OutgoingArcIterator {
};
template <typename NodeIndexType, typename ArcIndexType>
class ReverseArcListGraph<NodeIndexType, ArcIndexType>::IncomingArcIterator {
class ReverseArcListGraph<NodeIndexType,
ArcIndexType>::OppositeIncomingArcIterator {
public:
IncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node)
OppositeIncomingArcIterator(const ReverseArcListGraph& graph,
NodeIndexType node)
: graph_(graph), index_(graph.reverse_start_[node]) {
DCHECK(graph.IsNodeValid(node));
}
IncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node,
ArcIndexType arc)
: graph_(graph),
index_(arc == Base::kNilArc ? Base::kNilArc : graph.OppositeArc(arc)) {
OppositeIncomingArcIterator(const ReverseArcListGraph& graph,
NodeIndexType node, ArcIndexType arc)
: graph_(graph), index_(arc) {
DCHECK(graph.IsNodeValid(node));
DCHECK(arc == Base::kNilArc || arc >= 0);
DCHECK(arc == Base::kNilArc || graph.Head(arc) == node);
DCHECK(arc == Base::kNilArc || arc < 0);
DCHECK(arc == Base::kNilArc || graph.Tail(arc) == node);
}
bool Ok() const { return index_ != Base::kNilArc; }
ArcIndexType Index() const {
return index_ == Base::kNilArc ? Base::kNilArc : graph_.OppositeArc(index_);
}
ArcIndexType Index() const { return index_; }
void Next() {
DCHECK(Ok());
index_ = graph_.next_[index_];
}
DEFINE_STL_ITERATOR_FUNCTIONS(IncomingArcIterator);
DEFINE_STL_ITERATOR_FUNCTIONS(OppositeIncomingArcIterator);
private:
protected:
const ReverseArcListGraph& graph_;
ArcIndexType index_;
};
template <typename NodeIndexType, typename ArcIndexType>
class ReverseArcListGraph<NodeIndexType, ArcIndexType>::IncomingArcIterator
: public OppositeIncomingArcIterator {
public:
IncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node)
: OppositeIncomingArcIterator(graph, node) {}
IncomingArcIterator(const ReverseArcListGraph& graph, NodeIndexType node,
ArcIndexType arc)
: OppositeIncomingArcIterator(
graph, node,
arc == Base::kNilArc ? Base::kNilArc : graph.OppositeArc(arc)) {}
// We overwrite OppositeIncomingArcIterator::Index() here.
ArcIndexType Index() const {
return this->index_ == Base::kNilArc
? Base::kNilArc
: this->graph_.OppositeArc(this->index_);
}
DEFINE_STL_ITERATOR_FUNCTIONS(IncomingArcIterator);
};
template <typename NodeIndexType, typename ArcIndexType>
class ReverseArcListGraph<NodeIndexType,
ArcIndexType>::OutgoingOrOppositeIncomingArcIterator {
@@ -1635,6 +1677,8 @@ DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcStaticGraph, Incoming,
DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcStaticGraph,
OutgoingOrOppositeIncoming,
DirectArcLimit(node));
DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcStaticGraph, OppositeIncoming,
ReverseArcLimit(node));
template <typename NodeIndexType, typename ArcIndexType>
ArcIndexType ReverseArcStaticGraph<NodeIndexType, ArcIndexType>::OutDegree(
@@ -1782,42 +1826,62 @@ class ReverseArcStaticGraph<NodeIndexType, ArcIndexType>::OutgoingArcIterator {
};
template <typename NodeIndexType, typename ArcIndexType>
class ReverseArcStaticGraph<NodeIndexType, ArcIndexType>::IncomingArcIterator {
class ReverseArcStaticGraph<NodeIndexType,
ArcIndexType>::OppositeIncomingArcIterator {
public:
IncomingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node)
OppositeIncomingArcIterator(const ReverseArcStaticGraph& graph,
NodeIndexType node)
: graph_(graph),
limit_(graph.ReverseArcLimit(node)),
index_(graph.reverse_start_[node]) {
DCHECK(graph.IsNodeValid(node));
DCHECK_LE(index_, limit_);
}
IncomingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node,
ArcIndexType arc)
: graph_(graph),
limit_(graph.ReverseArcLimit(node)),
index_(arc == limit_ ? limit_ : graph_.OppositeArc(arc)) {
OppositeIncomingArcIterator(const ReverseArcStaticGraph& graph,
NodeIndexType node, ArcIndexType arc)
: graph_(graph), limit_(graph.ReverseArcLimit(node)), index_(arc) {
DCHECK(graph.IsNodeValid(node));
DCHECK_GE(index_, graph.reverse_start_[node]);
DCHECK_LE(index_, limit_);
}
bool Ok() const { return index_ < limit_; }
ArcIndexType Index() const {
return index_ == limit_ ? limit_ : graph_.OppositeArc(index_);
}
ArcIndexType Index() const { return index_; }
void Next() {
DCHECK(Ok());
index_++;
}
DEFINE_STL_ITERATOR_FUNCTIONS(IncomingArcIterator);
DEFINE_STL_ITERATOR_FUNCTIONS(OppositeIncomingArcIterator);
private:
protected:
const ReverseArcStaticGraph& graph_;
const ArcIndexType limit_;
ArcIndexType index_;
};
template <typename NodeIndexType, typename ArcIndexType>
class ReverseArcStaticGraph<NodeIndexType, ArcIndexType>::IncomingArcIterator
: public OppositeIncomingArcIterator {
public:
IncomingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node)
: OppositeIncomingArcIterator(graph, node) {}
IncomingArcIterator(const ReverseArcStaticGraph& graph, NodeIndexType node,
ArcIndexType arc)
: OppositeIncomingArcIterator(graph, node,
arc == graph.ReverseArcLimit(node)
? graph.ReverseArcLimit(node)
: graph.OppositeArc(arc)) {}
ArcIndexType Index() const {
return this->index_ == this->limit_
? this->limit_
: this->graph_.OppositeArc(this->index_);
}
DEFINE_STL_ITERATOR_FUNCTIONS(IncomingArcIterator);
};
template <typename NodeIndexType, typename ArcIndexType>
class ReverseArcStaticGraph<
NodeIndexType, ArcIndexType>::OutgoingOrOppositeIncomingArcIterator {
@@ -1870,6 +1934,8 @@ DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcMixedGraph, Incoming, Base::kNilArc);
DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcMixedGraph,
OutgoingOrOppositeIncoming,
DirectArcLimit(node));
DEFINE_RANGE_BASED_ARC_ITERATION(ReverseArcMixedGraph, OppositeIncoming,
Base::kNilArc);
template <typename NodeIndexType, typename ArcIndexType>
ArcIndexType ReverseArcMixedGraph<NodeIndexType, ArcIndexType>::OutDegree(
@@ -1881,7 +1947,7 @@ template <typename NodeIndexType, typename ArcIndexType>
ArcIndexType ReverseArcMixedGraph<NodeIndexType, ArcIndexType>::InDegree(
NodeIndexType node) const {
ArcIndexType degree(0);
for (auto arc ATTRIBUTE_UNUSED : IncomingArcs(node)) ++degree;
for (auto arc ATTRIBUTE_UNUSED : OppositeIncomingArcs(node)) ++degree;
return degree;
}
@@ -2000,40 +2066,57 @@ class ReverseArcMixedGraph<NodeIndexType, ArcIndexType>::OutgoingArcIterator {
};
template <typename NodeIndexType, typename ArcIndexType>
class ReverseArcMixedGraph<NodeIndexType, ArcIndexType>::IncomingArcIterator {
class ReverseArcMixedGraph<NodeIndexType,
ArcIndexType>::OppositeIncomingArcIterator {
public:
IncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node)
OppositeIncomingArcIterator(const ReverseArcMixedGraph& graph,
NodeIndexType node)
: graph_(&graph) {
DCHECK(graph.is_built_);
DCHECK(graph.IsNodeValid(node));
index_ = graph.reverse_start_[node];
}
IncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node,
ArcIndexType arc)
: graph_(&graph) {
OppositeIncomingArcIterator(const ReverseArcMixedGraph& graph,
NodeIndexType node, ArcIndexType arc)
: graph_(&graph), index_(arc) {
DCHECK(graph.is_built_);
DCHECK(graph.IsNodeValid(node));
DCHECK(arc == Base::kNilArc || arc >= 0);
DCHECK(arc == Base::kNilArc || graph.Head(arc) == node);
index_ = arc == Base::kNilArc ? arc : graph.OppositeArc(arc);
DCHECK(arc == Base::kNilArc || arc < 0);
DCHECK(arc == Base::kNilArc || graph.Tail(arc) == node);
}
bool Ok() const { return index_ != Base::kNilArc; }
ArcIndexType Index() const {
return index_ == Base::kNilArc ? Base::kNilArc
: graph_->OppositeArc(index_);
}
ArcIndexType Index() const { return index_; }
void Next() {
DCHECK(Ok());
index_ = graph_->next_[~index_];
}
DEFINE_STL_ITERATOR_FUNCTIONS(IncomingArcIterator);
DEFINE_STL_ITERATOR_FUNCTIONS(OppositeIncomingArcIterator);
private:
protected:
const ReverseArcMixedGraph* graph_;
ArcIndexType index_;
};
template <typename NodeIndexType, typename ArcIndexType>
class ReverseArcMixedGraph<NodeIndexType, ArcIndexType>::IncomingArcIterator
: public OppositeIncomingArcIterator {
public:
IncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node)
: OppositeIncomingArcIterator(graph, node) {}
IncomingArcIterator(const ReverseArcMixedGraph& graph, NodeIndexType node,
ArcIndexType arc)
: OppositeIncomingArcIterator(
graph, node, arc == Base::kNilArc ? arc : graph.OppositeArc(arc)) {}
ArcIndexType Index() const {
return this->index_ == Base::kNilArc
? Base::kNilArc
: this->graph_->OppositeArc(this->index_);
}
DEFINE_STL_ITERATOR_FUNCTIONS(IncomingArcIterator);
};
template <typename NodeIndexType, typename ArcIndexType>
class ReverseArcMixedGraph<
NodeIndexType, ArcIndexType>::OutgoingOrOppositeIncomingArcIterator {

View File

@@ -241,10 +241,10 @@ inline std::ostream& operator<<(std::ostream& os, ConstraintStatus status) {
// Returns the ConstraintStatus corresponding to a given VariableStatus.
ConstraintStatus VariableToConstraintStatus(VariableStatus status);
// Wrapper around an gtl::ITIVector to allow (and enforce) creation/resize/assign
// Wrapper around an ITIVector to allow (and enforce) creation/resize/assign
// to use the index type for the size.
//
// TODO(user): This should probably move into gtl::ITIVector, but note that this
// TODO(user): This should probably move into ITIVector, but note that this
// version is more strict and does not allow any other size types.
template <typename IntType, typename T>
class StrictITIVector : public gtl::ITIVector<IntType, T> {

View File

@@ -143,7 +143,7 @@ void Permutation<IndexType>::PopulateFromIdentity() {
template <typename IndexType>
void Permutation<IndexType>::PopulateRandomly() {
PopulateFromIdentity();
std::random_shuffle(perm_.begin(), perm_.end());
std::shuffle(perm_.begin(), perm_.end());
}
template <typename IndexType>

View File

@@ -350,8 +350,8 @@ class CompactSparseMatrix {
// const RowIndex row = compact_matrix_.EntryRow(i);
// const Fractional coefficient = compact_matrix_.EntryCoefficient(i);
// }
util::IntegerRange<EntryIndex> Column(ColIndex col) const {
return util::IntegerRange<EntryIndex>(starts_[col], starts_[col + 1]);
::util::IntegerRange<EntryIndex> Column(ColIndex col) const {
return ::util::IntegerRange<EntryIndex>(starts_[col], starts_[col + 1]);
}
Fractional EntryCoefficient(EntryIndex i) const { return coefficients_[i]; }
RowIndex EntryRow(EntryIndex i) const { return rows_[i]; }

View File

@@ -26,11 +26,14 @@
#include "ortools/base/commandlineflags.h"
#include "ortools/base/logging.h"
#include "ortools/base/mutex.h"
#include "ortools/base/stringprintf.h"
#include "ortools/base/timer.h"
#if !defined(__PORTABLE_PLATFORM__)
#include "google/protobuf/text_format.h"
#include "ortools/base/notification.h"
#endif // __PORTABLE_PLATFORM__
#include "ortools/base/cleanup.h"
#include "ortools/base/int_type.h"
#include "ortools/base/int_type_indexed_vector.h"
#include "ortools/base/iterator_adaptors.h"
@@ -38,7 +41,6 @@
#include "ortools/base/map_util.h"
#include "ortools/base/memory.h"
#include "ortools/base/stl_util.h"
#include "ortools/base/time_support.h"
#include "ortools/graph/connectivity.h"
#include "ortools/port/proto_utils.h"
#include "ortools/sat/all_different.h"
@@ -426,7 +428,7 @@ void ModelWithMapping::ExtractEncoding(const CpModelProto& model_proto) {
}
}
if (!inequalities.empty()) {
VLOG(1) << num_inequalities << " literals associated to VAR >= value (cts: "
VLOG(2) << num_inequalities << " literals associated to VAR >= value (cts: "
<< inequalities.size() << ")";
}
@@ -473,15 +475,15 @@ void ModelWithMapping::ExtractEncoding(const CpModelProto& model_proto) {
}
}
if (num_constraints > 0) {
VLOG(1) << num_equalities
VLOG(2) << num_equalities
<< " literals associated to VAR == value (cts: " << num_constraints
<< ")";
}
if (num_fully_encoded > 0) {
VLOG(1) << "num_fully_encoded_variables: " << num_fully_encoded;
VLOG(2) << "num_fully_encoded_variables: " << num_fully_encoded;
}
if (num_partially_encoded > 0) {
VLOG(1) << "num_partially_encoded_variables: " << num_partially_encoded;
VLOG(2) << "num_partially_encoded_variables: " << num_partially_encoded;
}
}
@@ -2039,10 +2041,10 @@ IntegerVariable AddLPConstraints(const CpModelProto& model_proto,
linear_constraints.resize(new_size);
}
VLOG(1) << "num_full_encoding_relaxations: " << num_full_encoding_relaxations;
VLOG(1) << "num_integer_encoding_constraints: " << num_extra_constraints;
VLOG(1) << linear_constraints.size() << " constraints in the LP relaxation.";
VLOG(1) << cut_generators.size() << " cuts generators.";
VLOG(2) << "num_full_encoding_relaxations: " << num_full_encoding_relaxations;
VLOG(2) << "num_integer_encoding_constraints: " << num_extra_constraints;
VLOG(2) << linear_constraints.size() << " constraints in the LP relaxation.";
VLOG(2) << cut_generators.size() << " cuts generators.";
// The bipartite graph of LP constraints might be disconnected:
// make a partition of the variables into connected components.
@@ -2174,11 +2176,11 @@ IntegerVariable AddLPConstraints(const CpModelProto& model_proto,
// Register LP constraints. Note that this needs to be done after all the
// constraints have been added.
for (auto* lp_constraint : lp_constraints) {
VLOG(1) << "LP constraint: " << lp_constraint->DimensionString() << ".";
VLOG(2) << "LP constraint: " << lp_constraint->DimensionString() << ".";
lp_constraint->RegisterWith(m->model());
}
VLOG(1) << top_level_cp_terms.size()
VLOG(2) << top_level_cp_terms.size()
<< " terms in the main objective linear equation ("
<< num_components_containing_objective << " from LP constraints).";
return main_objective_var;
@@ -2327,19 +2329,19 @@ CpSolverResponse SolveCpModelInternal(
Trail* trail = model->GetOrCreate<Trail>();
const int old_num_fixed = trail->Index();
if (trail->Index() > old_num_fixed) {
VLOG(1) << "Constraint fixed " << trail->Index() - old_num_fixed
VLOG(2) << "Constraint fixed " << trail->Index() - old_num_fixed
<< " Boolean variable(s): " << ProtobufDebugString(ct);
}
}
if (model->GetOrCreate<SatSolver>()->IsModelUnsat()) {
VLOG(1) << "UNSAT during extraction (after adding '"
VLOG(2) << "UNSAT during extraction (after adding '"
<< ConstraintCaseName(ct.constraint_case()) << "'). "
<< ProtobufDebugString(ct);
break;
}
}
if (num_ignored_constraints > 0) {
VLOG(1) << num_ignored_constraints << " constraints where skipped.";
VLOG(2) << num_ignored_constraints << " constraints where skipped.";
}
if (!unsupported_types.empty()) {
VLOG(1) << "There is unsuported constraints types in this model: ";
@@ -2405,15 +2407,15 @@ CpSolverResponse SolveCpModelInternal(
const auto automatic_domain =
model->GetOrCreate<IntegerTrail>()->InitialVariableDomain(
objective_var);
VLOG(1) << "Objective offset:" << model_proto.objective().offset()
VLOG(2) << "Objective offset:" << model_proto.objective().offset()
<< " scaling_factor:" << model_proto.objective().scaling_factor();
VLOG(1) << "Automatic internal objective domain: " << automatic_domain;
VLOG(1) << "User specified internal objective domain: " << user_domain;
VLOG(2) << "Automatic internal objective domain: " << automatic_domain;
VLOG(2) << "User specified internal objective domain: " << user_domain;
CHECK_NE(objective_var, kNoIntegerVariable);
const bool ok = model->GetOrCreate<IntegerTrail>()->UpdateInitialDomain(
objective_var, user_domain);
if (!ok) {
VLOG(1) << "UNSAT due to the objective domain.";
VLOG(2) << "UNSAT due to the objective domain.";
model->GetOrCreate<SatSolver>()->NotifyThatModelIsUnsat();
}
@@ -2494,9 +2496,9 @@ CpSolverResponse SolveCpModelInternal(
status =
SolveProblemWithPortfolioSearch(decision_policies, {no_restart}, model);
if (status == SatSolver::Status::FEASIBLE) {
VLOG(1) << "Solution hint: success, feasible solution found.";
VLOG(2) << "Solution hint: success, feasible solution found.";
} else {
VLOG(1) << "Solution: failure, no feasible solution found.";
VLOG(2) << "Solution: failure, no feasible solution found.";
}
model->GetOrCreate<SatParameters>()->set_max_number_of_conflicts(
old_conflict_limit);
@@ -2526,8 +2528,8 @@ CpSolverResponse SolveCpModelInternal(
} else {
// Optimization problem.
const CpObjectiveProto& obj = model_proto.objective();
VLOG(1) << obj.vars_size() << " terms in the proto objective.";
VLOG(1) << "Initial num_bool: " << model->Get<SatSolver>()->NumVariables();
VLOG(2) << obj.vars_size() << " terms in the proto objective.";
VLOG(2) << "Initial num_bool: " << model->Get<SatSolver>()->NumVariables();
const auto solution_observer = [&model_proto, &response, &num_solutions,
&obj, &m, &external_solution_observer,
objective_var, &fill_response_statistics](
@@ -2546,9 +2548,6 @@ CpSolverResponse SolveCpModelInternal(
response.set_best_objective_bound(ScaleObjectiveValue(
obj, integer_trail->LevelZeroBound(objective_var).value()));
external_solution_observer(response);
VLOG(1) << "Solution #" << num_solutions
<< " obj:" << response.objective_value()
<< " num_bool:" << sat_model.Get<SatSolver>()->NumVariables();
};
if (parameters.optimize_with_core()) {
@@ -2557,11 +2556,11 @@ CpSolverResponse SolveCpModelInternal(
ExtractLinearObjective(model_proto, &m, &linear_vars, &linear_coeffs);
if (parameters.optimize_with_max_hs()) {
status = MinimizeWithHittingSetAndLazyEncoding(
VLOG_IS_ON(1), objective_var, linear_vars, linear_coeffs,
VLOG_IS_ON(2), objective_var, linear_vars, linear_coeffs,
next_decision, solution_observer, model);
} else {
status = MinimizeWithCoreAndLazyEncoding(
VLOG_IS_ON(1), objective_var, linear_vars, linear_coeffs,
VLOG_IS_ON(2), objective_var, linear_vars, linear_coeffs,
next_decision, solution_observer, model);
}
} else {
@@ -2844,125 +2843,42 @@ CpSolverResponse SolvePureSatModel(const CpModelProto& model_proto,
return response;
}
// The model_proto is just used for solution checking and reconstruction of the
// solution to the original model.
CpSolverResponse PresolveAndSolve(const CpModelProto& model_proto,
CpModelProto* current_model, Model* model) {
// Solve without presolving ?
const SatParameters& params = *model->GetOrCreate<SatParameters>();
const auto& observers = model->GetOrCreate<SolutionObservers>()->observers;
const int num_original_variables = model_proto.variables_size();
if (!params.cp_model_presolve() || params.enumerate_all_solutions()) {
CpSolverResponse response = SolveCpModelInternal(
*current_model, true,
[&](const CpSolverResponse& intermediate_response) {
if (observers.empty()) return;
// Truncate the solution in case model expansion added more variables.
CpSolverResponse truncated_response = intermediate_response;
truncated_response.mutable_solution()->Truncate(
num_original_variables);
DCHECK(SolutionIsFeasible(
model_proto,
std::vector<int64>(truncated_response.solution().begin(),
truncated_response.solution().end())));
for (const auto& observer : observers) {
observer(truncated_response);
}
},
model);
if (response.status() == CpSolverStatus::FEASIBLE ||
response.status() == CpSolverStatus::OPTIMAL) {
// Truncate the solution in case model expansion added more variables.
if (response.solution_size() > 0) {
response.mutable_solution()->Truncate(num_original_variables);
} else if (response.solution_lower_bounds_size() > 0) {
response.mutable_solution_lower_bounds()->Truncate(
num_original_variables);
response.mutable_solution_upper_bounds()->Truncate(
num_original_variables);
}
if (!response.solution().empty()) {
CHECK(SolutionIsFeasible(
model_proto, std::vector<int64>(response.solution().begin(),
response.solution().end())));
}
}
return response;
}
// Do the actual presolve.
CpModelProto mapping_proto;
std::vector<int> postsolve_mapping;
PresolveCpModel(current_model, &mapping_proto, &postsolve_mapping);
VLOG(1) << CpModelStats(*current_model);
// Note that it is okay to use the initial model_proto in the postsolve even
// though we called PresolveCpModel() on the expanded proto. This is because
// PostsolveResponse() only use the proto to known the number of variables to
// fill in the response and to check the solution feasibility of these
// variables.
CpSolverResponse response = SolveCpModelInternal(
*current_model, true,
[&](const CpSolverResponse& response) {
if (observers.empty()) return;
CpSolverResponse copy = response;
PostsolveResponse(model_proto, mapping_proto, postsolve_mapping, &copy);
for (const auto& observer : observers) {
observer(copy);
}
},
model);
PostsolveResponse(model_proto, mapping_proto, postsolve_mapping, &response);
return response;
}
CpSolverResponse SolveCpModelWithLNS(const CpModelProto& model_proto,
const CpModelProto& expanded_model,
Model* model) {
CpSolverResponse SolveCpModelWithLNS(
const CpModelProto& model_proto,
const std::function<void(const CpSolverResponse&)>& observer,
Model* model) {
SatParameters* parameters = model->GetOrCreate<SatParameters>();
const auto& observers = model->GetOrCreate<SolutionObservers>()->observers;
parameters->set_stop_after_first_solution(true);
CpSolverResponse response;
auto* const synchro = model->Get<SynchronizationFunction>();
TimeLimit* const limit = model->GetOrCreate<TimeLimit>();
auto* synchro = model->Get<SynchronizationFunction>();
if (synchro != nullptr && synchro->f != nullptr) {
while (!limit->LimitReached()) {
response = synchro->f();
if (response.status() != CpSolverStatus::UNKNOWN) break;
absl::SleepFor(absl::Milliseconds(50));
}
response = synchro->f();
} else {
CpModelProto copy = expanded_model;
response = PresolveAndSolve(model_proto, &copy, model);
response = SolveCpModelInternal(model_proto, /*is_real_solve=*/true,
observer, model);
}
if (response.status() != CpSolverStatus::FEASIBLE) {
return response;
}
VLOG(1) << "LNS First solution: " << response.objective_value();
const bool focus_on_decision_variables =
parameters->lns_focus_on_decision_variables();
// For now we will just alternate between our possible neighborhoods.
//
// TODO(user): work on the presolved global problem rather than just the
// expanded problem?
NeighborhoodGeneratorHelper helper(&expanded_model,
focus_on_decision_variables);
NeighborhoodGeneratorHelper helper(&model_proto, focus_on_decision_variables);
std::vector<std::unique_ptr<NeighborhoodGenerator>> generators;
generators.push_back(
absl::make_unique<SimpleNeighborhoodGenerator>(&helper, "rnd_lns"));
generators.push_back(absl::make_unique<VariableGraphNeighborhoodGenerator>(
&helper, "var_lns"));
generators.push_back(absl::make_unique<ConstraintGraphNeighborhoodGenerator>(
&helper, "cts_lns"));
&helper, "cst_lns"));
// The "optimal" difficulties do not have to be the same for different
// generators. TODO(user): move this inside the generator API?
std::vector<AdaptiveParameterValue> difficulties(generators.size(),
AdaptiveParameterValue(0.5));
TimeLimit* limit = model->GetOrCreate<TimeLimit>();
double deterministic_time = 0.1;
int num_no_progress = 0;
@@ -2971,6 +2887,7 @@ CpSolverResponse SolveCpModelWithLNS(const CpModelProto& model_proto,
num_threads,
[&]() {
// Synchronize with external world.
auto* synchro = model->Get<SynchronizationFunction>();
if (synchro != nullptr && synchro->f != nullptr) {
const CpSolverResponse candidate_response = synchro->f();
if (!candidate_response.solution().empty()) {
@@ -3016,12 +2933,23 @@ CpSolverResponse SolveCpModelWithLNS(const CpModelProto& model_proto,
local_limit->RegisterExternalBooleanAsLimit(
limit->ExternalBooleanAsLimit());
}
const CpSolverResponse local_response =
PresolveAndSolve(model_proto, &local_problem, &local_model);
// Presolve and solve the LNS fragment.
CpSolverResponse local_response;
{
CpModelProto mapping_proto;
std::vector<int> postsolve_mapping;
PresolveCpModel(&local_problem, &mapping_proto, &postsolve_mapping);
local_response = SolveCpModelInternal(
local_problem, true, [](const CpSolverResponse& response) {},
&local_model);
PostsolveResponse(model_proto, mapping_proto, postsolve_mapping,
&local_response);
}
return [&num_no_progress, &model_proto, &response, &difficulty,
&deterministic_time, saved_difficulty, local_response,
&observers, limit, solution_info]() {
&observer, limit, solution_info]() {
// TODO(user): This is not ideal in multithread because even though
// the saved_difficulty will be the same for all thread, we will
// Increase()/Decrease() the difficuty sequentially more than once.
@@ -3033,8 +2961,6 @@ CpSolverResponse SolveCpModelWithLNS(const CpModelProto& model_proto,
}
if (local_response.status() == CpSolverStatus::FEASIBLE ||
local_response.status() == CpSolverStatus::OPTIMAL) {
VLOG(1) << solution_info;
// If the objective are the same, we override the solution,
// otherwise we just ignore this local solution and increment
// num_no_progress.
@@ -3066,9 +2992,7 @@ CpSolverResponse SolveCpModelWithLNS(const CpModelProto& model_proto,
local_response.solution().end())));
if (num_no_progress == 0) { // Improving solution.
response.set_solution_info(solution_info);
for (const auto& observer : observers) {
observer(response);
}
observer(response);
}
}
};
@@ -3083,6 +3007,172 @@ CpSolverResponse SolveCpModelWithLNS(const CpModelProto& model_proto,
return response;
}
#if !defined(__PORTABLE_PLATFORM__)
CpSolverResponse SolveCpModelParallel(
const CpModelProto& model_proto,
const std::function<void(const CpSolverResponse&)>& observer,
Model* model) {
WallTimer timer;
timer.Start();
const SatParameters& params = *model->GetOrCreate<SatParameters>();
CHECK(!params.enumerate_all_solutions());
// This is a bit hacky. If the provided TimeLimit as a "stopped" Boolean, we
// use this one instead.
bool stopped_boolean = false;
bool* stopped = &stopped_boolean;
if (model->GetOrCreate<TimeLimit>()->ExternalBooleanAsLimit() != nullptr) {
stopped = model->GetOrCreate<TimeLimit>()->ExternalBooleanAsLimit();
}
const bool maximize = model_proto.objective().scaling_factor() < 0.0;
CpSolverResponse best_response;
if (model_proto.has_objective()) {
const double kInfinity = std::numeric_limits<double>::infinity();
if (maximize) {
best_response.set_objective_value(-kInfinity);
best_response.set_best_objective_bound(kInfinity);
} else {
best_response.set_objective_value(kInfinity);
best_response.set_best_objective_bound(-kInfinity);
}
}
// Fix the walltime before returning the response.
auto fix_walltime = ::operations_research::util::MakeCleanup(
[&timer, &best_response]() { best_response.set_wall_time(timer.Get()); });
absl::Mutex mutex;
const int num_search_workers = params.num_search_workers();
VLOG(1) << "Starting parallel search with " << num_search_workers
<< " workers.";
ThreadPool pool("Parallel_search", num_search_workers);
pool.StartWorkers();
// In the LNS threads, we wait for this notification before starting work.
absl::Notification first_solution_found_or_search_finished;
if (!model_proto.has_objective()) {
for (int worker_id = 0; worker_id < num_search_workers; ++worker_id) {
std::string worker_name;
const SatParameters local_params = DiversifySearchParameters(
params, model_proto, worker_id, &worker_name);
pool.Schedule([&model_proto, stopped, local_params, &best_response,
&mutex, worker_name]() {
Model local_model;
local_model.Add(NewSatParameters(local_params));
local_model.GetOrCreate<TimeLimit>()->RegisterExternalBooleanAsLimit(
stopped);
const CpSolverResponse local_response = SolveCpModelInternal(
model_proto, true, [](const CpSolverResponse& response) {},
&local_model);
absl::MutexLock lock(&mutex);
if (best_response.status() == CpSolverStatus::UNKNOWN) {
best_response = local_response;
}
if (local_response.status() != CpSolverStatus::UNKNOWN) {
CHECK_EQ(local_response.status(), best_response.status());
VLOG(1) << "Solution found by worker '" << worker_name << "'.";
*stopped = true;
}
});
}
return best_response;
}
// Optimization problem.
const auto objective_synchronization = [&mutex, &best_response]() {
absl::MutexLock lock(&mutex);
return best_response.objective_value();
};
const auto solution_synchronization = [&mutex, &best_response]() {
absl::MutexLock lock(&mutex);
return best_response;
};
int num_solutions = 1;
for (int worker_id = 0; worker_id < num_search_workers; ++worker_id) {
std::string worker_name;
const SatParameters local_params =
DiversifySearchParameters(params, model_proto, worker_id, &worker_name);
const auto solution_observer =
[maximize, &num_solutions, worker_id, worker_name, &mutex,
&best_response, &observer, &timer,
&first_solution_found_or_search_finished](const CpSolverResponse& r) {
absl::MutexLock lock(&mutex);
// Check is the new solution is actually improving upon the best
// solution found so far.
if (MergeOptimizationSolution(r, maximize, &best_response)) {
if (!first_solution_found_or_search_finished.HasBeenNotified()) {
first_solution_found_or_search_finished.Notify();
}
VLOG(1) << absl::StrFormat(
"#%-5i %-6s %8.2fs obj:[%0.0f,%0.0f] %s", num_solutions++,
worker_name.c_str(), timer.Get(),
maximize ? best_response.objective_value()
: best_response.best_objective_bound(),
maximize ? best_response.best_objective_bound()
: best_response.objective_value(),
r.solution_info().c_str());
observer(best_response);
}
};
pool.Schedule([&model_proto, solution_observer, solution_synchronization,
objective_synchronization, stopped, local_params, worker_id,
&mutex, &best_response,
&first_solution_found_or_search_finished, maximize,
worker_name]() {
Model local_model;
local_model.Add(NewSatParameters(local_params));
local_model.GetOrCreate<TimeLimit>()->RegisterExternalBooleanAsLimit(
stopped);
SetSynchronizationFunction(std::move(solution_synchronization),
&local_model);
SetObjectiveSynchronizationFunction(std::move(objective_synchronization),
&local_model);
CpSolverResponse thread_response;
if (local_params.use_lns()) {
first_solution_found_or_search_finished.WaitForNotification();
thread_response =
SolveCpModelWithLNS(model_proto, solution_observer, &local_model);
} else {
thread_response = SolveCpModelInternal(model_proto, true,
solution_observer, &local_model);
}
// Process final solution. Decide which worker has the 'best'
// solution. Note that the solution observer may or may not have been
// called.
absl::MutexLock lock(&mutex);
VLOG(1) << "Worker '" << worker_name << "' terminates with status "
<< ProtoEnumToString<CpSolverStatus>(thread_response.status())
<< " and an objective value of "
<< thread_response.objective_value();
MergeOptimizationSolution(thread_response, maximize, &best_response);
// TODO(user): For now we assume that each worker only terminate when
// the time limit is reached or when the problem is solved, so we just
// abort all other threads and return.
*stopped = true;
if (!first_solution_found_or_search_finished.HasBeenNotified()) {
first_solution_found_or_search_finished.Notify();
}
});
}
return best_response;
}
#endif // __PORTABLE_PLATFORM__
} // namespace
CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
@@ -3146,11 +3236,95 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
}
// Starts by expanding some constraints if needed.
CpModelProto expanded_proto = ExpandCpModel(model_proto);
if (params.use_lns() && model_proto.has_objective()) {
return SolveCpModelWithLNS(model_proto, expanded_proto, model);
CpModelProto new_model = ExpandCpModel(model_proto);
// Presolve?
std::function<void(CpSolverResponse * response)> postprocess_solution;
if (params.cp_model_presolve() && !params.enumerate_all_solutions()) {
// Do the actual presolve.
CpModelProto mapping_proto;
std::vector<int> postsolve_mapping;
PresolveCpModel(&new_model, &mapping_proto, &postsolve_mapping);
VLOG(1) << CpModelStats(new_model);
postprocess_solution = [&model_proto, mapping_proto,
postsolve_mapping](CpSolverResponse* response) {
// Note that it is okay to use the initial model_proto in the postsolve
// even though we called PresolveCpModel() on the expanded proto. This is
// because PostsolveResponse() only use the proto to known the number of
// variables to fill in the response and to check the solution feasibility
// of these variables.
PostsolveResponse(model_proto, mapping_proto, postsolve_mapping,
response);
};
} else {
const int initial_size = model_proto.variables_size();
postprocess_solution = [initial_size](CpSolverResponse* response) {
// Truncate the solution in case model expansion added more variables.
if (response->solution_size() > 0) {
response->mutable_solution()->Truncate(initial_size);
} else if (response->solution_lower_bounds_size() > 0) {
response->mutable_solution_lower_bounds()->Truncate(initial_size);
response->mutable_solution_upper_bounds()->Truncate(initial_size);
}
};
}
return PresolveAndSolve(model_proto, &expanded_proto, model);
const auto& observers = model->GetOrCreate<SolutionObservers>()->observers;
std::function<void(const CpSolverResponse&)> observer_function =
[&model_proto, &observers,
&postprocess_solution](const CpSolverResponse& response) {
if (observers.empty()) return;
CpSolverResponse copy = response;
postprocess_solution(&copy);
if (!copy.solution().empty()) {
DCHECK(SolutionIsFeasible(model_proto,
std::vector<int64>(copy.solution().begin(),
copy.solution().end())));
}
for (const auto& observer : observers) {
observer(copy);
}
};
CpSolverResponse response;
if (params.num_search_workers() > 1) {
#if !defined(__PORTABLE_PLATFORM__)
response = SolveCpModelParallel(new_model, observer_function, model);
#endif // __PORTABLE_PLATFORM__
} else if (params.use_lns() && new_model.has_objective() &&
!params.enumerate_all_solutions()) {
int num_solutions = 1;
response =
SolveCpModelWithLNS(new_model,
[&num_solutions, &model, &observer_function](
const CpSolverResponse& response) {
observer_function(response);
VLOG(1) << "Solution #" << num_solutions++
<< " obj:" << response.objective_value()
<< " " << response.solution_info();
},
model);
} else {
int num_solutions = 1;
response = SolveCpModelInternal(
new_model, /*is_real_solve=*/true,
[&num_solutions, &model,
&observer_function](const CpSolverResponse& response) {
observer_function(response);
VLOG(1) << "Solution #" << num_solutions++
<< " obj:" << response.objective_value()
<< " num_bool:" << model->Get<SatSolver>()->NumVariables();
},
model);
}
postprocess_solution(&response);
if (!response.solution().empty()) {
CHECK(SolutionIsFeasible(model_proto,
std::vector<int64>(response.solution().begin(),
response.solution().end())));
}
return response;
}
} // namespace sat

View File

@@ -654,7 +654,7 @@ class IntegerTrail : public SatPropagator {
// Helper function to return the "dependencies" of a bound assignment.
// All the TrailEntry at these indices are part of the reason for this
// assignment.
util::BeginEndWrapper<std::vector<IntegerLiteral>::const_iterator>
::util::BeginEndWrapper<std::vector<IntegerLiteral>::const_iterator>
Dependencies(int trail_index) const;
// Helper function to append the Literal part of the reason for this bound

View File

@@ -571,6 +571,9 @@ message SatParameters {
// Specify the number of parallel workers to use during search.
// A number <= 1 means no parallelism.
//
// WARNING: For now the code is non-deterministic. There is plans to make it
// deterministic (or provide an options) soon.
optional int32 num_search_workers = 100 [default = 0];
// LNS parameters.

View File

@@ -17,6 +17,7 @@
#include "ortools/base/stringprintf.h"
#include "ortools/base/stl_util.h"
#include "ortools/base/stringprintf.h"
#include "ortools/port/sysinfo.h"
#include "ortools/port/utf8.h"
@@ -204,7 +205,7 @@ void TimeDistribution::AddTimeInCycles(double cycles) {
}
std::string TimeDistribution::ValueAsString() const {
return StringPrintf(
return absl::StrFormat(
"%8llu [%8s, %8s] %8s %8s %8s\n", num_, PrintCyclesAsTime(min_).c_str(),
PrintCyclesAsTime(max_).c_str(), PrintCyclesAsTime(Average()).c_str(),
PrintCyclesAsTime(StdDeviation()).c_str(),
@@ -217,16 +218,16 @@ void RatioDistribution::Add(double value) {
}
std::string RatioDistribution::ValueAsString() const {
return StringPrintf("%8llu [%7.2lf%%, %7.2lf%%] %7.2lf%% %7.2lf%%\n", num_,
100.0 * min_, 100.0 * max_, 100.0 * Average(),
100.0 * StdDeviation());
return absl::StrFormat("%8llu [%7.2lf%%, %7.2lf%%] %7.2lf%% %7.2lf%%\n", num_,
100.0 * min_, 100.0 * max_, 100.0 * Average(),
100.0 * StdDeviation());
}
void DoubleDistribution::Add(double value) { AddToDistribution(value); }
std::string DoubleDistribution::ValueAsString() const {
return StringPrintf("%8llu [%8.1e, %8.1e] %8.1e %8.1e\n", num_, min_, max_,
Average(), StdDeviation());
return absl::StrFormat("%8llu [%8.1e, %8.1e] %8.1e %8.1e\n", num_, min_, max_,
Average(), StdDeviation());
}
void IntegerDistribution::Add(int64 value) {
@@ -234,8 +235,8 @@ void IntegerDistribution::Add(int64 value) {
}
std::string IntegerDistribution::ValueAsString() const {
return StringPrintf("%8llu [%8.lf, %8.lf] %8.2lf %8.2lf %8.lf\n", num_, min_,
max_, Average(), StdDeviation(), sum_);
return absl::StrFormat("%8llu [%8.lf, %8.lf] %8.2lf %8.2lf %8.lf\n", num_, min_,
max_, Average(), StdDeviation(), sum_);
}
#ifdef HAS_PERF_SUBSYSTEM