improve linear relaxation for breaks in routing
This commit is contained in:
@@ -15,7 +15,10 @@
|
||||
|
||||
#include <numeric>
|
||||
|
||||
#include "absl/container/flat_hash_set.h"
|
||||
#include "absl/time/time.h"
|
||||
#include "ortools/base/integral_types.h"
|
||||
#include "ortools/base/map_util.h"
|
||||
#include "ortools/constraint_solver/routing.h"
|
||||
#include "ortools/glop/lp_solver.h"
|
||||
#include "ortools/lp_data/lp_types.h"
|
||||
@@ -57,6 +60,22 @@ bool SetVariableBounds(glop::LinearProgram* linear_program,
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GetCumulBoundsWithOffset(const IntVar& cumul_var, int64 cumul_offset,
|
||||
int64* lower_bound, int64* upper_bound) {
|
||||
DCHECK(lower_bound != nullptr);
|
||||
DCHECK(upper_bound != nullptr);
|
||||
*lower_bound = std::max<int64>(0, CapSub(cumul_var.Min(), cumul_offset));
|
||||
*upper_bound = cumul_var.Max();
|
||||
if (*upper_bound < kint64max) {
|
||||
*upper_bound = CapSub(*upper_bound, cumul_offset);
|
||||
if (*upper_bound < 0) {
|
||||
// The cumul must be less than the cumul_offset, which is impossible.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
LocalDimensionCumulOptimizer::LocalDimensionCumulOptimizer(
|
||||
@@ -115,6 +134,177 @@ bool LocalDimensionCumulOptimizer::ComputePackedRouteCumuls(
|
||||
lp_solver_[vehicle].get(), packed_cumuls);
|
||||
}
|
||||
|
||||
CumulBoundsPropagator::CumulBoundsPropagator(const RoutingDimension* dimension)
|
||||
: dimension_(*dimension), num_nodes_(2 * dimension->cumuls().size()) {
|
||||
outgoing_arcs_.resize(num_nodes_);
|
||||
node_in_queue_.resize(num_nodes_, false);
|
||||
tree_parent_node_of_.resize(num_nodes_, kNoParent);
|
||||
}
|
||||
|
||||
void CumulBoundsPropagator::AddArcs(int first_index, int second_index,
|
||||
int64 offset) {
|
||||
// Add arc first_index + offset <= second_index
|
||||
outgoing_arcs_[PositiveNode(first_index)].push_back(
|
||||
{PositiveNode(second_index), offset});
|
||||
AddNodeToQueue(PositiveNode(first_index));
|
||||
// Add arc -second_index + transit <= -first_index
|
||||
outgoing_arcs_[NegativeNode(second_index)].push_back(
|
||||
{NegativeNode(first_index), offset});
|
||||
AddNodeToQueue(NegativeNode(second_index));
|
||||
}
|
||||
|
||||
bool CumulBoundsPropagator::InitializeArcsAndBounds(
|
||||
const std::function<int64(int64)>& next_accessor, int64 cumul_offset,
|
||||
std::vector<int64>* initial_lower_bounds) {
|
||||
DCHECK(initial_lower_bounds != nullptr);
|
||||
// For each cumul var V in [l, u], we use one variable for V with lower bound
|
||||
// l, and one other for -V with lower bound -u.
|
||||
initial_lower_bounds->assign(num_nodes_, kint64min);
|
||||
|
||||
for (std::vector<ArcInfo>& arcs : outgoing_arcs_) {
|
||||
arcs.clear();
|
||||
}
|
||||
|
||||
RoutingModel* const model = dimension_.model();
|
||||
std::vector<int64>& lower_bounds = *initial_lower_bounds;
|
||||
|
||||
for (int vehicle = 0; vehicle < model->vehicles(); vehicle++) {
|
||||
const std::function<int64(int64, int64)>& transit_accessor =
|
||||
dimension_.transit_evaluator(vehicle);
|
||||
|
||||
int node = model->Start(vehicle);
|
||||
while (true) {
|
||||
int64 cumul_lb, cumul_ub;
|
||||
if (!GetCumulBoundsWithOffset(*dimension_.CumulVar(node), cumul_offset,
|
||||
&cumul_lb, &cumul_ub)) {
|
||||
return false;
|
||||
}
|
||||
lower_bounds[PositiveNode(node)] = cumul_lb;
|
||||
if (cumul_ub < kint64max) {
|
||||
lower_bounds[NegativeNode(node)] = -cumul_ub;
|
||||
}
|
||||
|
||||
if (model->IsEnd(node)) {
|
||||
break;
|
||||
}
|
||||
|
||||
const int next = next_accessor(node);
|
||||
const int64 transit = transit_accessor(node, next);
|
||||
// TODO(user): Also consider slack variables' bounds. The real
|
||||
// relation is node + transit + slack_var == next, which is equivalent to
|
||||
// node + transit + slack_min <= next, and
|
||||
// node + transit + slack_max >= next.
|
||||
AddArcs(node, next, transit);
|
||||
|
||||
node = next;
|
||||
}
|
||||
}
|
||||
|
||||
for (const RoutingDimension::NodePrecedence& precedence :
|
||||
dimension_.GetNodePrecedences()) {
|
||||
const int first_index = precedence.first_node;
|
||||
const int second_index = precedence.second_node;
|
||||
if (lower_bounds[PositiveNode(first_index)] == kint64min ||
|
||||
lower_bounds[PositiveNode(second_index)] == kint64min) {
|
||||
// One of the nodes is unperformed, so the precedence rule doesn't apply.
|
||||
continue;
|
||||
}
|
||||
AddArcs(first_index, second_index, precedence.offset);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CumulBoundsPropagator::UpdateCurrentLowerBoundOfNode(
|
||||
int node, int64 new_lb, std::vector<int64>* current_lower_bounds) {
|
||||
(*current_lower_bounds)[node] = new_lb;
|
||||
|
||||
// Test that the lower/upper bounds do not cross each other.
|
||||
const int cumul_var_index = node / 2;
|
||||
const int64 cumul_lower_bound =
|
||||
(*current_lower_bounds)[PositiveNode(cumul_var_index)];
|
||||
|
||||
const int64 negated_cumul_upper_bound =
|
||||
(*current_lower_bounds)[NegativeNode(cumul_var_index)];
|
||||
|
||||
// We do the test this way to avoid integer overflow.
|
||||
DCHECK_NE(cumul_lower_bound, kint64min);
|
||||
return -cumul_lower_bound >= negated_cumul_upper_bound;
|
||||
}
|
||||
|
||||
bool CumulBoundsPropagator::DisassembleSubtree(int source, int target) {
|
||||
tmp_dfs_stack_.clear();
|
||||
tmp_dfs_stack_.push_back(source);
|
||||
while (!tmp_dfs_stack_.empty()) {
|
||||
const int tail = tmp_dfs_stack_.back();
|
||||
tmp_dfs_stack_.pop_back();
|
||||
for (const ArcInfo& arc : outgoing_arcs_[tail]) {
|
||||
const int child_node = arc.head;
|
||||
if (tree_parent_node_of_[child_node] != tail) continue;
|
||||
if (child_node == target) return false;
|
||||
tree_parent_node_of_[child_node] = kParentToBePropagated;
|
||||
tmp_dfs_stack_.push_back(child_node);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CumulBoundsPropagator::PropagateCumulBounds(
|
||||
const std::function<int64(int64)>& next_accessor, int64 cumul_offset,
|
||||
std::vector<int64>* propagated_bounds) {
|
||||
tree_parent_node_of_.assign(num_nodes_, kNoParent);
|
||||
DCHECK(std::none_of(node_in_queue_.begin(), node_in_queue_.end(),
|
||||
[](bool b) { return b; }));
|
||||
DCHECK(bf_queue_.empty());
|
||||
|
||||
if (!InitializeArcsAndBounds(next_accessor, cumul_offset,
|
||||
propagated_bounds)) {
|
||||
return CleanupAndReturnFalse();
|
||||
}
|
||||
|
||||
std::vector<int64>& current_lb = *propagated_bounds;
|
||||
|
||||
// Bellman-Ford-Tarjan algorithm.
|
||||
while (!bf_queue_.empty()) {
|
||||
const int node = bf_queue_.front();
|
||||
bf_queue_.pop_front();
|
||||
node_in_queue_[node] = false;
|
||||
|
||||
if (tree_parent_node_of_[node] == kParentToBePropagated) {
|
||||
// The parent of this node is still in the queue, so no need to process
|
||||
// node now, since it will be re-enqued when its parent is processed.
|
||||
continue;
|
||||
}
|
||||
|
||||
const int64 lower_bound = current_lb[node];
|
||||
for (const ArcInfo& arc : outgoing_arcs_[node]) {
|
||||
// NOTE: kint64min as a lower bound means no lower bound at all, so we
|
||||
// don't use this value to propagate.
|
||||
const int64 induced_lb = (lower_bound == kint64min)
|
||||
? kint64min
|
||||
: CapAdd(lower_bound, arc.offset);
|
||||
|
||||
const int head_node = arc.head;
|
||||
if (induced_lb <= current_lb[head_node]) {
|
||||
// No update necessary for the head_node, continue to next children of
|
||||
// node.
|
||||
continue;
|
||||
}
|
||||
if (!UpdateCurrentLowerBoundOfNode(head_node, induced_lb,
|
||||
propagated_bounds) ||
|
||||
!DisassembleSubtree(head_node, node)) {
|
||||
// The new lower bound is infeasible, or a positive cycle was detected
|
||||
// in the precedence graph by DisassembleSubtree().
|
||||
return CleanupAndReturnFalse();
|
||||
}
|
||||
|
||||
tree_parent_node_of_[head_node] = node;
|
||||
AddNodeToQueue(head_node);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DimensionCumulOptimizerCore::OptimizeSingleRoute(
|
||||
int vehicle, const std::function<int64(int64)>& next_accessor,
|
||||
glop::LinearProgram* linear_program, glop::LPSolver* lp_solver,
|
||||
|
||||
@@ -22,6 +22,85 @@ namespace operations_research {
|
||||
// Classes to solve dimension cumul placement (aka scheduling) problems using
|
||||
// linear programming.
|
||||
|
||||
// Utility class used in the core optimizer to tighten the cumul bounds as much
|
||||
// as possible based on the model precedences.
|
||||
class CumulBoundsPropagator {
|
||||
public:
|
||||
explicit CumulBoundsPropagator(const RoutingDimension* dimension);
|
||||
|
||||
// Tightens the cumul bounds starting from the current cumul var min/max,
|
||||
// and propagating the precedences resulting from the next_accessor, and the
|
||||
// dimension's precedence rules.
|
||||
// Returns false iff the precedences are infeasible with the given routes.
|
||||
// Otherwise, for each node index n, propagated_bounds[2*n] and
|
||||
// -propagated_bounds[2*n+1] are respectively the propagated lower and upper
|
||||
// bounds of n's cumul variable.
|
||||
bool PropagateCumulBounds(const std::function<int64(int64)>& next_accessor,
|
||||
int64 cumul_offset,
|
||||
std::vector<int64>* propagated_bounds);
|
||||
|
||||
private:
|
||||
// An arc "tail --offset--> head" represents the relation
|
||||
// tail + offset <= head.
|
||||
// As arcs are stored by tail, we don't store it in the struct.
|
||||
struct ArcInfo {
|
||||
int head;
|
||||
int64 offset;
|
||||
};
|
||||
static constexpr int kNoParent = -2;
|
||||
static constexpr int kParentToBePropagated = -1;
|
||||
|
||||
// Return the node corresponding to the lower bound of the cumul of index and
|
||||
// -index respectively.
|
||||
int PositiveNode(int index) { return 2 * index; }
|
||||
int NegativeNode(int index) { return 2 * index + 1; }
|
||||
|
||||
void AddNodeToQueue(int node) {
|
||||
if (!node_in_queue_[node]) {
|
||||
bf_queue_.push_back(node);
|
||||
node_in_queue_[node] = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Adds the relation first_index + offset <= second_index, by adding arcs
|
||||
// first_index --offset--> second_index and
|
||||
// -second_index --offset--> -first_index.
|
||||
void AddArcs(int first_index, int second_index, int64 offset);
|
||||
|
||||
bool InitializeArcsAndBounds(const std::function<int64(int64)>& next_accessor,
|
||||
int64 cumul_offset,
|
||||
std::vector<int64>* initial_lower_bounds);
|
||||
|
||||
bool UpdateCurrentLowerBoundOfNode(int node, int64 new_lb,
|
||||
std::vector<int64>* current_lower_bounds);
|
||||
|
||||
bool DisassembleSubtree(int source, int target);
|
||||
|
||||
bool CleanupAndReturnFalse() {
|
||||
// We clean-up node_in_queue_ for future calls, and return false.
|
||||
for (int node_to_cleanup : bf_queue_) {
|
||||
node_in_queue_[node_to_cleanup] = false;
|
||||
}
|
||||
bf_queue_.clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
const RoutingDimension& dimension_;
|
||||
const int64 num_nodes_;
|
||||
|
||||
// TODO(user): Investigate if all arcs for a given tail can be created
|
||||
// at the same time, in which case outgoing_arcs_ could point to an absl::Span
|
||||
// for each tail index.
|
||||
std::vector<std::vector<ArcInfo>> outgoing_arcs_;
|
||||
|
||||
std::deque<int> bf_queue_;
|
||||
std::vector<bool> node_in_queue_;
|
||||
std::vector<int> tree_parent_node_of_;
|
||||
|
||||
// Vector used in DisassembleSubtree() to avoid memory reallocation.
|
||||
std::vector<int> tmp_dfs_stack_;
|
||||
};
|
||||
|
||||
// Utility class used in Local/GlobalDimensionCumulOptimizer to set the LP
|
||||
// constraints and solve the problem.
|
||||
class DimensionCumulOptimizerCore {
|
||||
|
||||
@@ -42,6 +42,9 @@ struct Arc {
|
||||
return a.tail == b.tail && a.head == b.head;
|
||||
}
|
||||
friend bool operator!=(const Arc& a, const Arc& b) { return !(a == b); }
|
||||
friend bool operator<(const Arc& a, const Arc& b) {
|
||||
return a.tail == b.tail ? a.head < b.head : a.tail < b.tail;
|
||||
}
|
||||
friend std::ostream& operator<<(std::ostream& strm, const Arc& arc) {
|
||||
return strm << "{" << arc.tail << ", " << arc.head << "}";
|
||||
}
|
||||
@@ -51,7 +54,7 @@ struct Arc {
|
||||
}
|
||||
};
|
||||
|
||||
using ArcVarMap = absl::flat_hash_map<Arc, int>;
|
||||
using ArcVarMap = std::map<Arc, int>; // needs to be stable when iterating
|
||||
|
||||
// Adds all dimensions to a CpModelProto. Only adds path cumul constraints and
|
||||
// cumul bounds.
|
||||
|
||||
Reference in New Issue
Block a user