more graph cleaning; add floating point version of min cost flow

This commit is contained in:
Laurent Perron
2025-01-10 22:24:24 +01:00
parent c34026b101
commit f0e0741d91
16 changed files with 1525 additions and 573 deletions

View File

@@ -565,6 +565,35 @@ cc_test(
],
)
# Min Cost Flow with floating point flow.
cc_library(
name = "fp_min_cost_flow",
srcs = ["fp_min_cost_flow.cc"],
hdrs = ["fp_min_cost_flow.h"],
deps = [
":min_cost_flow",
"//ortools/util:fp_roundtrip_conv",
"//ortools/util:saturated_arithmetic",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/log",
"@com_google_absl//absl/log:check",
],
)
# cc_test(
# name = "fp_min_cost_flow_test",
# srcs = ["fp_min_cost_flow_test.cc"],
# deps = [
# ":fp_min_cost_flow",
# ":min_cost_flow",
# "//absl/base:no_destructor",
# "//ortools/base:gmock_main",
# "//ortools/util:fp_roundtrip_conv",
# "@com_google_absl//absl/base:log_severity",
# "@com_google_absl//absl/strings",
# ],
# )
# Flow-problem solver
cc_binary(
name = "solve_flow_model",

View File

@@ -43,6 +43,7 @@ target_link_libraries(${NAME} PRIVATE
if(BUILD_TESTING)
file(GLOB _TEST_SRCS "*_test.cc")
list(FILTER _TEST_SRCS EXCLUDE REGEX "max_flow_test.cc")
list(FILTER _TEST_SRCS EXCLUDE REGEX "fp_min_cost_flow_test.cc")
foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS)
get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE)
get_filename_component(_FILE_NAME ${_FULL_FILE_NAME} NAME)

View File

@@ -87,6 +87,7 @@
%unignore operations_research::SimpleMinCostFlow::SimpleMinCostFlow;
%unignore operations_research::SimpleMinCostFlow::~SimpleMinCostFlow;
%unignore operations_research::SimpleMinCostFlow::AddArcWithCapacityAndUnitCost;
%unignore operations_research::SimpleMinCostFlow::SetArcCapacity;
%unignore operations_research::SimpleMinCostFlow::SetNodeSupply;
%unignore operations_research::SimpleMinCostFlow::Solve;
%unignore operations_research::SimpleMinCostFlow::SolveMaxFlowWithMinCost;

View File

@@ -20,19 +20,8 @@
// 30(6):513-519 (June 1987).
// http://portal.acm.org/citation.cfm?id=214769
//
// In this file there are three representations that have much in
// common. The general one, called simply EbertGraph, contains both
// forward- and backward-star representations. The other, called
// ForwardEbertGraph, contains only the forward-star representation of
// the graph, and is appropriate for applications where the reverse
// arcs are not needed.
//
// The point of including all the representations in this one file is
// to capitalize, where possible, on the commonalities among them, and
// those commonalities are mostly factored out into base classes as
// described below. Despite the commonalities, however, each of the
// three representations presents a somewhat different interface
// because of their different underlying semantics.
// This file defines a graph representation called EbertGraph, which contains
// both forward- and backward-star representations.
//
// Many clients are expected to use the interfaces to the graph
// objects directly, but some clients are parameterized by graph type
@@ -43,9 +32,7 @@
// AnnotatedGraphBuildManager<> template, which provides a uniform
// interface for building the various types of graphs; and the
// TailArrayManager<> template, which provides a uniform interface for
// applications that need to map from arc indices to arc tail nodes,
// accounting for the fact that such a mapping has to be requested
// explicitly from the ForwardStarGraph representation.
// applications that need to map from arc indices to arc tail nodes.
//
// There are two base class templates, StarGraphBase, and
// EbertGraphBase; their purpose is to hold methods and data
@@ -61,9 +48,9 @@
// / |
// / |
// (EbertGraphBase) |
// / \ |
// / \ |
// EbertGraph ForwardEbertGraph |
// / |
// / |
// EbertGraph |
//
// In the general EbertGraph case, the graph is represented with three
// arrays.
@@ -127,26 +114,6 @@
// * TODO(user) it is possible to implement arc deletion and garbage collection
// in an efficient (relatively) manner. For the time being we haven't seen an
// application for this.
//
// The ForwardEbertGraph representation is like the EbertGraph case described
// above, with the following modifications:
// * The part of the head_[] array with negative indices is absent. In its
// place is a pointer tail_ which, if assigned, points to an array of tail
// nodes indexed by (nonnegative) arc index. In typical usage tail_ is NULL
// and the memory for the tail nodes need not be allocated.
// * The array of arc tails can be allocated as needed and populated from the
// adjacency lists of the graph.
// * Representing only the forward star of each node implies that the graph
// cannot be serialized directly nor rebuilt from scratch from just the head_
// array. Rebuilding from scratch requires constructing the array of arc
// tails from the adjacency lists first, and serialization can be done either
// by first constructing the array of arc tails from the adjacency lists, or
// by serializing directly from the adjacency lists.
// * The memory consumption is: m * sizeof(NodeIndexType)
// + m * sizeof(ArcIndexType)
// + n * sizeof(ArcIndexType)
// plus a small constant when the array of arc tails is absent. Allocating
// the arc tail array adds another m * sizeof(NodeIndexType).
#include <algorithm>
#include <cstddef>
@@ -170,21 +137,17 @@ namespace operations_research {
// Forward declarations.
template <typename NodeIndexType, typename ArcIndexType>
class EbertGraph;
template <typename NodeIndexType, typename ArcIndexType>
class ForwardEbertGraph;
// Standard instantiation of ForwardEbertGraph (named 'ForwardStarGraph') of
// EbertGraph (named 'StarGraph'); and relevant type shortcuts. Unless their use
// cases prevent them from doing so, users are encouraged to use StarGraph or
// ForwardStarGraph according to whether or not they require reverse arcs to be
// represented explicitly. Along with either graph representation, the other
// type shortcuts here will often come in handy.
// Standard instantiation of EbertGraph (named 'StarGraph'); and relevant type
// shortcuts. Unless their use cases prevent them from doing so, users are
// encouraged to use StarGraph according to whether or not they require reverse
// arcs to be represented explicitly. Along with either graph representation,
// the other type shortcuts here will often come in handy.
typedef int32_t NodeIndex;
typedef int32_t ArcIndex;
typedef int64_t FlowQuantity;
typedef int64_t CostValue;
typedef EbertGraph<NodeIndex, ArcIndex> StarGraph;
typedef ForwardEbertGraph<NodeIndex, ArcIndex> ForwardStarGraph;
// Adapt our old iteration style to support range-based for loops. Add typedefs
// required by std::iterator_traits.
@@ -545,19 +508,14 @@ const ArcIndexType
StarGraphBase<NodeIndexType, ArcIndexType, DerivedGraph>::kMaxNumArcs =
std::numeric_limits<ArcIndexType>::max();
// A template for the base class that holds the functionality that exists in
// common between the EbertGraph<> template and the ForwardEbertGraph<>
// template.
//
// This template is for internal use only, and this is enforced by making all
// constructors for this class template protected. Clients should use one of the
// two derived-class templates. Most clients will not even use those directly,
// but will use the StarGraph and ForwardStarGraph typenames declared above.
// but will use the StarGraph typenames declared above.
//
// The DerivedGraph template argument must be the type of the class (typically
// itself built from a template) that:
// 1. implements the full interface expected for either ForwardEbertGraph or
// EbertGraph, and
// 1. implements the full interface expected for EbertGraph, and
// 2. inherits from an instance of this template.
// The base class needs access to some members of the derived class such as, for
// example, NextOutgoingArc(), and it gets this access via the DerivedGraph
@@ -1207,288 +1165,6 @@ class ABSL_DEPRECATED("Use `::util::ListGraph<>` instead.") EbertGraph
}
};
// A forward-star-only graph representation for greater efficiency in
// those algorithms that don't need reverse arcs.
template <typename NodeIndexType, typename ArcIndexType>
class ABSL_DEPRECATED("Use `::util::ListGraph<>` instead.") ForwardEbertGraph
: public EbertGraphBase<NodeIndexType, ArcIndexType,
ForwardEbertGraph<NodeIndexType, ArcIndexType> > {
typedef EbertGraphBase<NodeIndexType, ArcIndexType,
ForwardEbertGraph<NodeIndexType, ArcIndexType> >
Base;
friend class EbertGraphBase<NodeIndexType, ArcIndexType,
ForwardEbertGraph<NodeIndexType, ArcIndexType> >;
friend class StarGraphBase<NodeIndexType, ArcIndexType,
ForwardEbertGraph<NodeIndexType, ArcIndexType> >;
using Base::ArcDebugString;
using Base::Initialize;
using Base::NextAdjacentArc;
using Base::NodeDebugString;
using Base::first_incident_arc_;
using Base::head_;
using Base::max_num_arcs_;
using Base::max_num_nodes_;
using Base::next_adjacent_arc_;
using Base::num_arcs_;
using Base::num_nodes_;
using Base::representation_clean_;
public:
#if !SWIG
using Base::Head;
using Base::IsNodeValid;
using Base::kFirstArc;
using Base::kFirstNode;
using Base::kNilArc;
using Base::kNilNode;
#endif // SWIG
typedef NodeIndexType NodeIndex;
typedef ArcIndexType ArcIndex;
ForwardEbertGraph() {}
ForwardEbertGraph(NodeIndexType max_num_nodes, ArcIndexType max_num_arcs) {
Initialize(max_num_nodes, max_num_arcs);
}
~ForwardEbertGraph() {}
// Utility function to check that an arc index is within the bounds.
// It is exported so that users of the ForwardEbertGraph class can use it.
// To be used in a DCHECK.
bool CheckArcBounds(const ArcIndexType arc) const {
return (arc == kNilArc) || (arc >= kFirstArc && arc < max_num_arcs_);
}
// Utility function to check that an arc index is within the bounds AND
// different from kNilArc.
// It is exported so that users of the ForwardEbertGraph class can use it.
// To be used in a DCHECK.
bool CheckArcValidity(const ArcIndexType arc) const {
return (arc != kNilArc) && (arc >= kFirstArc && arc < max_num_arcs_);
}
// Returns true if arc is a valid index into the (*tail_) array.
bool CheckTailIndexValidity(const ArcIndexType arc) const {
return (tail_ != nullptr) && (arc >= kFirstArc) &&
(arc <= tail_->max_index());
}
// Returns the tail or start-node of arc.
NodeIndexType Tail(const ArcIndexType arc) const {
DCHECK(CheckArcValidity(arc));
DCHECK(CheckTailIndexValidity(arc));
return (*tail_)[arc];
}
// Returns true if arc is incoming to node.
bool IsIncoming(ArcIndexType arc, NodeIndexType node) const {
return IsDirect(arc) && Head(arc) == node;
}
// Recreates the next_adjacent_arc_ and first_incident_arc_
// variables from the arrays head_ and tail_ in O(n + m) time. This
// is useful if the head_ and tail_ arrays have been sorted
// according to a given criterion, for example.
void BuildRepresentation() {
first_incident_arc_.SetAll(kNilArc);
DCHECK(TailArrayComplete());
for (ArcIndexType arc = kFirstArc; arc < max_num_arcs_; ++arc) {
DCHECK(CheckTailIndexValidity(arc));
Attach((*tail_)[arc], arc);
}
representation_clean_ = true;
}
bool BuildTailArray() {
// If (*tail_) is already allocated, we have the invariant that
// its contents are canonical, so we do not need to do anything
// here in that case except return true.
if (tail_ == nullptr) {
if (!representation_clean_) {
// We have been asked to build the (*tail_) array, but we have
// no valid information from which to build it. The graph is
// in an unrecoverable, inconsistent state.
return false;
}
// Reallocate (*tail_) and rebuild its contents from the
// adjacency lists.
tail_.reset(new ZVector<NodeIndexType>);
tail_->Reserve(kFirstArc, max_num_arcs_ - 1);
typename Base::NodeIterator node_it(*this);
for (; node_it.Ok(); node_it.Next()) {
NodeIndexType node = node_it.Index();
typename Base::OutgoingArcIterator arc_it(*this, node);
for (; arc_it.Ok(); arc_it.Next()) {
(*tail_)[arc_it.Index()] = node;
}
}
}
DCHECK(TailArrayComplete());
return true;
}
void ReleaseTailArray() { tail_.reset(nullptr); }
// To be used in a DCHECK().
bool TailArrayComplete() const {
CHECK(tail_);
for (ArcIndexType arc = kFirstArc; arc < num_arcs_; ++arc) {
CHECK(CheckTailIndexValidity(arc));
CHECK(IsNodeValid((*tail_)[arc]));
}
return true;
}
// Returns a debug string containing all the information contained in the
// data structure in raw form.
std::string DebugString() const {
DCHECK(representation_clean_);
std::string result = "Arcs:(node, next arc) :\n";
for (ArcIndexType arc = kFirstArc; arc < num_arcs_; ++arc) {
result += " " + ArcDebugString(arc) + ":(" + NodeDebugString(head_[arc]) +
"," + ArcDebugString(next_adjacent_arc_[arc]) + ")\n";
}
result += "Node:First arc :\n";
for (NodeIndexType node = kFirstNode; node < num_nodes_; ++node) {
result += " " + NodeDebugString(node) + ":" +
ArcDebugString(first_incident_arc_[node]) + "\n";
}
return result;
}
private:
// Reserves space for the (*tail_) array.
//
// This method is separate from ReserveInternal() because our
// practice of making the (*tail_) array optional implies that the
// tail_ pointer might not be constructed when the ReserveInternal()
// method is called. Therefore we have this method also, and we
// ensure that it is called only when tail_ is guaranteed to have
// been initialized.
void ReserveTailArray(ArcIndexType new_max_num_arcs) {
if (tail_ != nullptr) {
// The (*tail_) values are already canonical, so we're just
// reserving additional space for new arcs that haven't been
// added yet.
if (tail_->Reserve(kFirstArc, new_max_num_arcs - 1)) {
for (ArcIndexType arc = tail_->max_index() + 1; arc < new_max_num_arcs;
++arc) {
tail_->Set(arc, kNilNode);
}
}
}
}
// Reserves space for the arrays indexed by arc indices, except
// (*tail_) even if it is present. We cannot grow the (*tail_) array
// in this method because this method is called from
// Base::Reserve(), which in turn is called from the base template
// class constructor. That base class constructor is called on *this
// before tail_ is constructed. Hence when this method is called,
// tail_ might contain garbage. This method can safely refer only to
// fields of the base template class, not to fields of *this outside
// the base template class.
//
// The strange situation in which this method of a derived class can
// refer only to members of the base class arises because different
// derived classes use the data members of the base class in
// slightly different ways. The purpose of this derived class
// method, then, is only to encode the derived-class-specific
// conventions for how the derived class uses the data members of
// the base class.
//
// To be specific, the forward-star graph representation, lacking
// reverse arcs, allocates only the positive index range for the
// head_ and next_adjacent_arc_ arrays, while the general
// representation allocates space for both positive- and
// negative-indexed arcs (i.e., both forward and reverse arcs).
void ReserveInternal(NodeIndexType new_max_num_nodes,
ArcIndexType new_max_num_arcs) {
head_.Reserve(kFirstArc, new_max_num_arcs - 1);
next_adjacent_arc_.Reserve(kFirstArc, new_max_num_arcs - 1);
for (ArcIndexType arc = max_num_arcs_; arc < new_max_num_arcs; ++arc) {
head_.Set(arc, kNilNode);
next_adjacent_arc_.Set(arc, kNilArc);
}
ReserveTailArray(new_max_num_arcs);
}
// Handles the part of AddArc() that is not in common wth other
// graph classes based on the EbertGraphBase template.
void RecordArc(ArcIndexType arc, NodeIndexType tail, NodeIndexType head) {
head_.Set(arc, head);
Attach(tail, arc);
}
// Using the SetTail() method implies that the BuildRepresentation()
// method must be called to restore consistency before the graph is
// used.
void SetTail(const ArcIndexType arc, const NodeIndexType tail) {
DCHECK(CheckTailIndexValidity(arc));
CHECK(tail_);
representation_clean_ = false;
tail_->Set(arc, tail);
}
// Utility method to attach a new arc.
void Attach(NodeIndexType tail, ArcIndexType arc) {
DCHECK(CheckArcValidity(arc));
DCHECK(IsNodeValid(tail));
next_adjacent_arc_.Set(arc, first_incident_arc_[tail]);
first_incident_arc_.Set(tail, arc);
const NodeIndexType head = head_[arc];
DCHECK(IsNodeValid(head));
// Because Attach() is a public method, keeping (*tail_) canonical
// requires us to record the new arc's tail here.
if (tail_ != nullptr) {
DCHECK(CheckTailIndexValidity(arc));
tail_->Set(arc, tail);
}
}
// Utility method that finds the next outgoing arc.
ArcIndexType FindNextOutgoingArc(ArcIndexType arc) const {
DCHECK(CheckArcBounds(arc));
return arc;
}
private:
// Always returns true because for any ForwardEbertGraph, only
// direct arcs are represented, so all valid arc indices refer to
// arcs that are outgoing from their tail nodes.
bool IsOutgoing(const ArcIndex unused_arc,
const NodeIndex unused_node) const {
return true;
}
// Always returns true because for any ForwardEbertGraph, only
// outgoing arcs are represented, so all valid arc indices refer to
// direct arcs.
bool IsDirect(const ArcIndex unused_arc) const { return true; }
// Array of node indices, not always present. (*tail_)[i] contains
// the tail node of arc i. This array is not needed for normal graph
// traversal operations, but is used in optimizing the graph's
// layout so arcs are grouped by tail node, and can be used in one
// approach to serializing the graph.
//
// Invariants: At any time when we are not executing a method of
// this class, either tail_ == NULL or the tail_ array's contents
// are kept canonical. If tail_ != NULL, any method that modifies
// adjacency lists must also ensure (*tail_) is modified
// correspondingly. The converse does not hold: Modifications to
// (*tail_) are allowed without updating the adjacency lists. If
// such modifications take place, representation_clean_ must be set
// to false, of course, to indicate that the adjacency lists are no
// longer current.
std::unique_ptr<ZVector<NodeIndexType> > tail_;
};
// Traits for EbertGraphBase types, for use in testing and clients
// that work with both forward-only and forward/reverse graphs.
//
@@ -1501,85 +1177,6 @@ struct graph_traits {
static constexpr bool is_dynamic = true;
};
template <typename NodeIndexType, typename ArcIndexType>
struct graph_traits<ForwardEbertGraph<NodeIndexType, ArcIndexType> > {
static constexpr bool has_reverse_arcs = false;
static constexpr bool is_dynamic = true;
};
namespace or_internal {
// The TailArrayBuilder class template is not expected to be used by
// clients. It is a helper for the TailArrayManager template.
//
// The TailArrayBuilder for graphs with reverse arcs does nothing.
template <typename GraphType, bool has_reverse_arcs>
struct TailArrayBuilder {
explicit TailArrayBuilder(GraphType* unused_graph) {}
bool BuildTailArray() const { return true; }
};
// The TailArrayBuilder for graphs without reverse arcs calls the
// appropriate method on the graph from the TailArrayBuilder
// constructor.
template <typename GraphType>
struct TailArrayBuilder<GraphType, false> {
explicit TailArrayBuilder(GraphType* graph) : graph_(graph) {}
bool BuildTailArray() const { return graph_->BuildTailArray(); }
GraphType* const graph_;
};
// The TailArrayReleaser class template is not expected to be used by
// clients. It is a helper for the TailArrayManager template.
//
// The TailArrayReleaser for graphs with reverse arcs does nothing.
template <typename GraphType, bool has_reverse_arcs>
struct TailArrayReleaser {
explicit TailArrayReleaser(GraphType* unused_graph) {}
void ReleaseTailArray() const {}
};
// The TailArrayReleaser for graphs without reverse arcs calls the
// appropriate method on the graph from the TailArrayReleaser
// constructor.
template <typename GraphType>
struct TailArrayReleaser<GraphType, false> {
explicit TailArrayReleaser(GraphType* graph) : graph_(graph) {}
void ReleaseTailArray() const { graph_->ReleaseTailArray(); }
GraphType* const graph_;
};
} // namespace or_internal
template <typename GraphType>
class TailArrayManager {
public:
explicit TailArrayManager(GraphType* g) : graph_(g) {}
bool BuildTailArrayFromAdjacencyListsIfForwardGraph() const {
or_internal::TailArrayBuilder<GraphType,
graph_traits<GraphType>::has_reverse_arcs>
tail_array_builder(graph_);
return tail_array_builder.BuildTailArray();
}
void ReleaseTailArrayIfForwardGraph() const {
or_internal::TailArrayReleaser<GraphType,
graph_traits<GraphType>::has_reverse_arcs>
tail_array_releaser(graph_);
tail_array_releaser.ReleaseTailArray();
}
private:
GraphType* graph_;
};
template <typename GraphType>
class ArcFunctorOrderingByTailAndHead {
public:
@@ -1687,11 +1284,8 @@ class GraphBuilderFromArcs<GraphType, true> {
GraphType* Graph(PermutationCycleHandler<typename GraphType::ArcIndex>*
client_cycle_handler) {
if (sort_arcs_) {
TailArrayManager<GraphType> tail_array_manager(graph_);
tail_array_manager.BuildTailArrayFromAdjacencyListsIfForwardGraph();
ArcFunctorOrderingByTailAndHead<GraphType> arc_ordering(*graph_);
graph_->GroupForwardArcsByFunctor(arc_ordering, client_cycle_handler);
tail_array_manager.ReleaseTailArrayIfForwardGraph();
}
GraphType* result = graph_;
delete this;

View File

@@ -195,8 +195,7 @@ void TestEbertGraph(const GraphType& graph,
template <typename GraphType>
class DebugStringEbertGraphTest : public ::testing::Test {};
typedef ::testing::Types<EbertGraph<int16_t, int16_t>,
ForwardEbertGraph<int16_t, int16_t> >
typedef ::testing::Types<EbertGraph<int16_t, int16_t> >
EbertGraphTypesForDebugStringTesting;
TYPED_TEST_SUITE(DebugStringEbertGraphTest,
@@ -298,20 +297,6 @@ TYPED_TEST(DebugStringEbertGraphTest, Test1) {
TestEbertGraph(graph, kGraphArcList, kExpectedAdjacencyList,
kExpectedIncomingArcList, kExpectedOutgoingArcList,
kExpectedDebugString, kExpectedForwardDebugString, "");
// The graph representation is already built, but nothing forbids us from
// testing that it can be rebuilt. To test this for forward graphs, we must
// first collect arc tail information from the adjacency lists, because those
// lists are the only source of information from which we can rebuild a
// forward graph if we haven't maintained its optional arc tail information
// until now.
TailArrayManager<TypeParam> tail_array_manager(&graph);
tail_array_manager.BuildTailArrayFromAdjacencyListsIfForwardGraph();
graph.BuildRepresentation();
TestEbertGraph(graph, kGraphArcList, kExpectedAdjacencyList,
kExpectedIncomingArcList, kExpectedOutgoingArcList,
kExpectedDebugString, kExpectedForwardDebugString, "");
}
// Unfortunately, this class template has to be defined outside the Test2 method
@@ -442,8 +427,6 @@ TYPED_TEST(DebugStringEbertGraphTest, Test2) {
kExpectedIncomingArcList, kExpectedOutgoingArcList,
kExpectedDebugString, kExpectedForwardDebugString, "");
TailArrayManager<TypeParam> tail_array_manager(&graph);
tail_array_manager.BuildTailArrayFromAdjacencyListsIfForwardGraph();
graph.GroupForwardArcsByFunctor(ArcFunctorByHead<TypeParam>(graph), nullptr);
const std::string kGraphHeadGroupedArcList =
@@ -631,8 +614,7 @@ TYPED_TEST(DebugStringEbertGraphTest, Test2) {
template <typename GraphType>
class DebugStringTestWithGraphBuildManager : public ::testing::Test {};
typedef ::testing::Types<EbertGraph<int16_t, int16_t>,
ForwardEbertGraph<int16_t, int16_t> >
typedef ::testing::Types<EbertGraph<int16_t, int16_t> >
GraphTypesForDebugStringTestWithGraphBuildManager;
TYPED_TEST_SUITE(DebugStringTestWithGraphBuildManager,
@@ -1017,8 +999,7 @@ TYPED_TEST(DebugStringTestWithGraphBuildManager, SortedArcsWithoutAnnotation) {
template <typename GraphType>
class TinyEbertGraphTest : public ::testing::Test {};
typedef ::testing::Types<EbertGraph<int8_t, int8_t>,
ForwardEbertGraph<int8_t, int8_t> >
typedef ::testing::Types<EbertGraph<int8_t, int8_t> >
TinyEbertGraphTypesForTesting;
TYPED_TEST_SUITE(TinyEbertGraphTest, TinyEbertGraphTypesForTesting);
@@ -1035,9 +1016,8 @@ TYPED_TEST(TinyEbertGraphTest, CheckDeathOnBadBounds) {
template <typename GraphType>
class SmallEbertGraphTest : public ::testing::Test {};
typedef ::testing::Types<
EbertGraph<int8_t, int8_t>, EbertGraph<int16_t, int16_t>,
ForwardEbertGraph<int8_t, int8_t>, ForwardEbertGraph<int16_t, int16_t> >
typedef ::testing::Types<EbertGraph<int8_t, int8_t>,
EbertGraph<int16_t, int16_t> >
SmallEbertGraphTypesForTesting;
TYPED_TEST_SUITE(SmallEbertGraphTest, SmallEbertGraphTypesForTesting);
@@ -1067,29 +1047,6 @@ TEST(EbertGraphTest, CheckBounds) {
EXPECT_TRUE(g.CheckArcValidity(g.Opposite(SmallStarGraph::kMaxNumArcs - 1)));
}
TEST(ForwardEbertGraphTest, CheckBounds) {
typedef ForwardEbertGraph<int16_t, int16_t> SmallStarGraph;
SmallStarGraph g(SmallStarGraph::kMaxNumNodes, SmallStarGraph::kMaxNumArcs);
EXPECT_TRUE(g.CheckArcBounds(SmallStarGraph::kNilArc));
EXPECT_FALSE(g.CheckArcValidity(SmallStarGraph::kNilArc));
EXPECT_FALSE(g.CheckArcValidity(SmallStarGraph::kMaxNumArcs));
EXPECT_TRUE(g.CheckArcValidity(g.SmallStarGraph::kMaxNumArcs - 1));
}
TEST(ForwardEbertGraphTest, ImpossibleBuildTailArray) {
typedef ForwardEbertGraph<int16_t, int16_t> SmallStarGraph;
SmallStarGraph g(3, 3);
ArcIndex arc = g.AddArc(0, 1);
// The SetHead() method is the easiest way to dirty the representation.
// Alternatively, since this is a FRIEND_TEST of the EbertGraphBase<>
// template, we could just set g.representation_clean_ = false. Once the
// representation is dirty, the adjacency lists are invalid and we haven't
// bothered to allocate and maintain the optional array of arc tails, so
// rebuilding that optional tail array is impossible.
g.SetHead(arc, 2);
EXPECT_FALSE(g.BuildTailArray());
}
template <typename GraphType, bool sort_arcs>
static void BM_RandomArcs(benchmark::State& state) {
const int kRandomSeed = 0;
@@ -1110,10 +1067,8 @@ static void BM_RandomArcs(benchmark::State& state) {
}
BENCHMARK_TEMPLATE2(BM_RandomArcs, StarGraph, false);
BENCHMARK_TEMPLATE2(BM_RandomArcs, ForwardStarGraph, false);
BENCHMARK_TEMPLATE2(BM_RandomArcs, StarGraph, true);
BENCHMARK_TEMPLATE2(BM_RandomArcs, ForwardStarGraph, true);
template <typename GraphType, bool sort_arcs>
static void BM_RandomAnnotatedArcs(benchmark::State& state) {
@@ -1139,10 +1094,8 @@ static void BM_RandomAnnotatedArcs(benchmark::State& state) {
}
BENCHMARK_TEMPLATE2(BM_RandomAnnotatedArcs, StarGraph, false);
BENCHMARK_TEMPLATE2(BM_RandomAnnotatedArcs, ForwardStarGraph, false);
BENCHMARK_TEMPLATE2(BM_RandomAnnotatedArcs, StarGraph, true);
BENCHMARK_TEMPLATE2(BM_RandomAnnotatedArcs, ForwardStarGraph, true);
template <typename GraphType>
static void BM_AddRandomArcsAndDoNotRetrieveGraph(benchmark::State& state) {
@@ -1164,6 +1117,5 @@ static void BM_AddRandomArcsAndDoNotRetrieveGraph(benchmark::State& state) {
}
BENCHMARK_TEMPLATE(BM_AddRandomArcsAndDoNotRetrieveGraph, StarGraph);
BENCHMARK_TEMPLATE(BM_AddRandomArcsAndDoNotRetrieveGraph, ForwardStarGraph);
} // namespace operations_research

View File

@@ -0,0 +1,374 @@
// Copyright 2010-2025 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/fp_min_cost_flow.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <optional>
#include <ostream>
#include <type_traits>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "ortools/base/logging.h"
#include "ortools/graph/min_cost_flow.h"
#include "ortools/util/fp_roundtrip_conv.h"
#include "ortools/util/saturated_arithmetic.h"
namespace operations_research {
namespace {
constexpr SimpleMinCostFlow::FlowQuantity kMaxFlowQuantity =
std::numeric_limits<SimpleMinCostFlow::FlowQuantity>::max();
constexpr SimpleFloatingPointMinCostFlow::FPFlowQuantity kMaxFPFlow =
std::numeric_limits<SimpleFloatingPointMinCostFlow::FPFlowQuantity>::max();
using ::operations_research::internal::ScaleFlow;
// Returns the scaling value computed from log2_scale.
double Scale(const int log2_scale) { return std::ldexp(1.0, log2_scale); }
// Returns the inverse of the scaling value computed from log2_scale.
double InvScale(const int log2_scale) { return std::ldexp(1.0, -log2_scale); }
// Returns true if the max in-flow or the max out-flow are >= kMaxFlowQuantity.
bool AreInOrOutFlowsOverflowing(const SimpleMinCostFlow& min_cost_flow) {
const SimpleMinCostFlow::NodeIndex num_nodes = min_cost_flow.NumNodes();
const SimpleMinCostFlow::ArcIndex num_arcs = min_cost_flow.NumArcs();
if (num_nodes == 0) {
return false;
}
std::vector<SimpleMinCostFlow::FlowQuantity> max_node_in_flow(num_nodes);
std::vector<SimpleMinCostFlow::FlowQuantity> max_node_out_flow(num_nodes);
for (SimpleMinCostFlow::NodeIndex node = 0; node < num_nodes; ++node) {
SimpleMinCostFlow::FlowQuantity supply = min_cost_flow.Supply(node);
if (supply < 0) {
max_node_in_flow[node] = -supply;
} else {
max_node_out_flow[node] = supply;
}
}
for (SimpleMinCostFlow::ArcIndex arc = 0; arc < num_arcs; ++arc) {
const SimpleMinCostFlow::NodeIndex head = min_cost_flow.Head(arc);
const SimpleMinCostFlow::NodeIndex tail = min_cost_flow.Tail(arc);
const SimpleMinCostFlow::FlowQuantity capacity =
min_cost_flow.Capacity(arc);
static_assert(std::is_same_v<SimpleMinCostFlow::FlowQuantity, int64_t>,
"CapAdd() only exists for int64_t");
max_node_in_flow[head] = CapAdd(max_node_in_flow[head], capacity);
max_node_out_flow[tail] = CapAdd(max_node_out_flow[tail], capacity);
}
// It is safe to assume absl::c_max_element() returns a valid iterator as we
// exit this function immediately when `num_nodes == 0`.
const SimpleMinCostFlow::FlowQuantity max_nodes_in_or_out_flow =
std::max(*absl::c_max_element(max_node_in_flow),
*absl::c_max_element(max_node_out_flow));
return max_nodes_in_or_out_flow == kMaxFlowQuantity;
}
} // namespace
SimpleFloatingPointMinCostFlow::SimpleFloatingPointMinCostFlow(
const NodeIndex reserve_num_nodes, const ArcIndex reserve_num_arcs)
: integer_flow_(/*reserve_num_nodes=*/reserve_num_nodes,
/*reserve_num_arcs=*/reserve_num_arcs) {
if (reserve_num_nodes > 0) {
node_supply_.reserve(reserve_num_nodes);
}
if (reserve_num_arcs > 0) {
arc_capacity_.reserve(reserve_num_arcs);
arc_flow_.reserve(reserve_num_arcs);
}
}
SimpleFloatingPointMinCostFlow::ArcIndex
SimpleFloatingPointMinCostFlow::AddArcWithCapacityAndUnitCost(
const NodeIndex tail, const NodeIndex head, const FPFlowQuantity capacity,
const CostValue unit_cost) {
// Add an arc in the integer flow with a temporary capacity of 0. We will
// update it when SolveMaxFlowWithMinCost() is called.
const ArcIndex arc = integer_flow_.AddArcWithCapacityAndUnitCost(
/*tail=*/tail, /*head=*/head,
/*capacity=*/0, /*unit_cost=*/unit_cost);
CHECK_EQ(arc, arc_capacity_.size());
arc_capacity_.push_back(capacity);
arc_flow_.push_back(0.0);
// AddArcWithCapacityAndUnitCost() may have added new nodes based on tail and
// head; we need to take them into account.
const NodeIndex new_num_nodes = integer_flow_.NumNodes();
if (new_num_nodes > node_supply_.size()) {
node_supply_.resize(new_num_nodes);
}
return arc;
}
void SimpleFloatingPointMinCostFlow::SetNodeSupply(
const NodeIndex node, const FPFlowQuantity supply) {
// Set a capacity on the integer flow.
integer_flow_.SetNodeSupply(node, 0);
if (node >= node_supply_.size()) {
node_supply_.resize(node + 1);
}
node_supply_[node] = supply;
}
SimpleFloatingPointMinCostFlow::Status
SimpleFloatingPointMinCostFlow::SolveMaxFlowWithMinCost() {
if (!ScaleSupplyAndCapacity()) {
// Resets the previously computed flow.
absl::c_fill(arc_flow_, 0.0);
return Status::BAD_CAPACITY_RANGE;
}
const Status solve_status = integer_flow_.SolveMaxFlowWithMinCost();
UpdateFlowFromIntegerFlow(solve_status);
return solve_status;
}
std::optional<SimpleFloatingPointMinCostFlow::FPFlowQuantity>
SimpleFloatingPointMinCostFlow::ComputeMaxInOrOutFlow() {
const NodeIndex num_nodes = integer_flow_.NumNodes();
const ArcIndex num_arcs = integer_flow_.NumArcs();
CHECK_EQ(num_nodes, node_supply_.size());
CHECK_EQ(num_arcs, arc_capacity_.size());
if (num_nodes == 0) {
return 0.0;
}
// Compute the max in-flow and max out-flow for each node.
std::vector<FPFlowQuantity> max_node_in_flow(num_nodes, 0.0);
std::vector<FPFlowQuantity> max_node_out_flow(num_nodes, 0.0);
for (NodeIndex node = 0; node < num_nodes; ++node) {
const FPFlowQuantity node_supply = node_supply_[node];
if (!std::isfinite(node_supply)) {
LOG(ERROR) << "Node " << node << " supply is not finite: " << node_supply;
return std::nullopt;
}
if (node_supply < 0.0) {
// Negative supply is demand, thus an input.
max_node_in_flow[node] = -node_supply;
} else {
max_node_out_flow[node] = node_supply;
}
}
for (ArcIndex arc = 0; arc < num_arcs; ++arc) {
const FPFlowQuantity arc_capacity = arc_capacity_[arc];
if (!std::isfinite(arc_capacity)) {
LOG(ERROR) << "Arc " << arc
<< " capacity is not finite: " << arc_capacity;
return std::nullopt;
}
// Negative capacities are considered zero.
if (arc_capacity <= 0.0) {
continue;
}
max_node_in_flow[integer_flow_.Head(arc)] += arc_capacity_[arc];
max_node_out_flow[integer_flow_.Tail(arc)] += arc_capacity_[arc];
}
// We already exited when num_nodes == 0 so we know that returned iterators
// are valid.
return std::max(*absl::c_max_element(max_node_in_flow),
*absl::c_max_element(max_node_out_flow));
}
bool SimpleFloatingPointMinCostFlow::ScaleSupplyAndCapacity() {
const NodeIndex num_nodes = integer_flow_.NumNodes();
const ArcIndex num_arcs = integer_flow_.NumArcs();
CHECK_EQ(num_nodes, node_supply_.size());
CHECK_EQ(num_arcs, arc_capacity_.size());
// Compute the scaling factor for flows.
//
// We choose to use the largest scaling that would not produce an integer
// overflow when solving integer_flow_.
//
// We could use a smaller scaling though, as long as it would not lose any
// non-zero bits. This would be a bit more complex though. On top of that
// always using the largest value may help finding overflow bugs even with
// simple test data.
//
// We want to make sure that the resulting integer flow does not produce a
// BAD_CAPACITY_RANGE. To do so we must make sure that for each node:
// * the sum of incoming arcs capacities + the max(0, node_supply),
// * and the sum of outgoing arcs capacities + the max(0, -node_supply)
// are less (<) than the max value of FlowQuantity.
//
// We thus compute these maximum values using floating point computations and
// use them to compute a scaling factor. Since floating point computations are
// rounded the end result may not be correct and the integer sum may still
// overflow. When that is the case we simply divide the scale by 2 and
// retry. We could do better by changing the rounding mode of floating point
// computation to FE_UPWARD instead of FE_TONEAREST to get an upper-bound on
// the sums but this requires the compiler to properly handle changing
// rounding modes, which is not reliable.
num_tested_scales_ = 0; // Always reset.
if (num_nodes == 0) {
// If we don't have nodes, we don't have arcs either. Thus we have nothing
// to scale.
log2_scale_ = 0;
return true;
}
const std::optional<FPFlowQuantity> max_nodes_in_or_out_flow =
ComputeMaxInOrOutFlow();
if (!max_nodes_in_or_out_flow.has_value()) {
// A LOG(ERROR) already occurred in ComputeMaxInOrOutFlow().
return false;
}
if (!std::isfinite(max_nodes_in_or_out_flow.value())) {
// Note that here we could scale down the floating point values to make the
// sum not overflow. But in practice the caller should avoid this situation.
LOG(ERROR) << "The computed max node in or out flow is not finite: "
<< max_nodes_in_or_out_flow.value();
return false;
}
// Compute the initial scale based on the max in or out flow over all nodes.
// We want that:
//
// static_cast<SimpleMinCostFlow::FlowQuantity>(
// std::round(Scale(log2_scale_) * max_nodes_in_or_out_flow))
// < kMaxFlowQuantity
//
// To make scaling and unscaling more precise, we will only use power-of-two
// values for scale_. Thus we compute p such that:
//
// 2^p <= kMaxFlowQuantity / max_nodes_in_or_out_flow
//
// `f = std::frexp(x, &p)` returns the floating point number of the form `f *
// 2^e` such that:
//
// 2^(e-1) ≤ x < 2^e
//
// Thus we can use it to compute a good starting candidate for the scaling
// factor. Note though that since the division and the computation of
// max_nodes_in_or_out_flow have been subject to rounding, this starting value
// may still lead to overflow of the scaled values. We will thus have a loop
// to try to lower p until we find a value of the scale that works.
if (max_nodes_in_or_out_flow == 0.0) {
// When we have no flow on any node, we can simply scale with 2^0 = 1.
log2_scale_ = 0;
} else {
// Note that if max_nodes_in_or_out_flow is a denormal number (< 2^-970)
// then the following division can overflow. If this is the case we simply
// replace the scale by the max possible value.
double scale_upper_bound = static_cast<double>(kMaxFlowQuantity) /
max_nodes_in_or_out_flow.value();
if (!std::isfinite(scale_upper_bound)) {
scale_upper_bound = kMaxFPFlow;
}
const double f = std::frexp(scale_upper_bound, &log2_scale_);
// The result of std::frexp() is such that:
// 2^(p-1) <= scale_upper_bound < 2^p
// we thus need to decrement the resulting value to get the lower bound.
//
// When f == 0.5, this means that actually scale_upper_bound == 2^(p-1). In
// that case we know that using this value as the scale will lead to an
// overflow so we simply decrement it by 2 instead.
log2_scale_ -= f == 0.5 ? 2 : 1;
}
// Iterate on values of p until we find one that does not overflow in
// integers. We use saturated_arithmetic.h and detect the issue when we
// overflow FlowQuantity.
//
// Note that we don't expect to do more than two loops usually. If we reach
// cases where we need more loops, then maybe we should a binary search
// approach here. Note that in any case we do a maximum of ~2000 loops (from
// the highest representable power-of-two to the smallest one).
const int initial_log2_scale = log2_scale_;
// We stop when the scale inverse is not representable anymore in a `double`
// (which occurs when we reach denormal number, a.k.a. very close to zero).
for (/*empty*/; std::isfinite(InvScale(log2_scale_)); --log2_scale_) {
const double scale = Scale(log2_scale_);
++num_tested_scales_;
// Sets the integer supply quantities and capacities.
for (NodeIndex node = 0; node < num_nodes; ++node) {
integer_flow_.SetNodeSupply(node, ScaleFlow(node_supply_[node], scale));
}
for (ArcIndex arc = 0; arc < num_arcs; ++arc) {
// Note that here it is safe to use std::max() as we have tested above
// that capacities are not NaN.
integer_flow_.SetArcCapacity(
arc, ScaleFlow(std::max(0.0, arc_capacity_[arc]), scale));
}
// Test the loop end condition.
if (!AreInOrOutFlowsOverflowing(integer_flow_)) {
return true;
}
VLOG(1) << "scale = " << RoundTripDoubleFormat(scale) << " (i.e. 2^"
<< log2_scale_
<< ") lead to an integer overflow; decrementing log2_scale_ and "
"trying again";
}
// It may not be possible to reach this code. If we ever do, we still want
// to treat this as an error.
LOG(ERROR) << "Failed to compute a positive scale that works; started with "
"log2_scale_ = "
<< initial_log2_scale
<< " and stopped at log2_scale_ = " << log2_scale_
<< " with scale_ = " << RoundTripDoubleFormat(Scale(log2_scale_))
<< " 1.0/scale_ = "
<< RoundTripDoubleFormat(InvScale(log2_scale_));
return false;
}
void SimpleFloatingPointMinCostFlow::UpdateFlowFromIntegerFlow(
const Status solve_status) {
switch (solve_status) {
case Status::OPTIMAL:
case Status::FEASIBLE: {
CHECK_EQ(integer_flow_.NumArcs(), arc_flow_.size());
// ScaleSupplyAndCapacity() only selects log2_scale_ values for which
// InvScale() is finite.
const double inv_scale = InvScale(log2_scale_);
for (ArcIndex arc = 0; arc < integer_flow_.NumArcs(); ++arc) {
arc_flow_[arc] = inv_scale * integer_flow_.Flow(arc);
}
break;
}
default:
// SimpleMinCostFlow::arc_flow_ is usually not set in errors cases so we
// simply reset the flow.
absl::c_fill(arc_flow_, 0.0);
break;
}
}
SimpleFloatingPointMinCostFlow::SolveStats
SimpleFloatingPointMinCostFlow::LastSolveStats() const {
return {
.scale = Scale(log2_scale_),
.num_tested_scales = num_tested_scales_,
};
}
std::ostream& operator<<(
std::ostream& out,
const SimpleFloatingPointMinCostFlow::SolveStats& stats) {
out << "{ scale: " << RoundTripDoubleFormat(stats.scale)
<< ", num_tested_scales: " << stats.num_tested_scales << " }";
return out;
}
} // namespace operations_research

View File

@@ -0,0 +1,226 @@
// Copyright 2010-2025 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_GRAPH_FP_MIN_COST_FLOW_H_
#define OR_TOOLS_GRAPH_FP_MIN_COST_FLOW_H_
#include <cmath>
#include <limits>
#include <optional>
#include <ostream>
#include <vector>
#include "absl/log/check.h"
#include "ortools/graph/min_cost_flow.h"
namespace operations_research {
// An implementation of an approximate min-cost-max-flow algorithm supporting
// floating point numbers for flow capacities.
//
// It uses the integer algorithm of SimpleMinCostFlow by scaling and rounding
// floating point supply quantities and capacities to make them fit on
// integers. This can be seen as using fixed-point arithmetic.
//
// Note that this only supports min-cost-max-flow and not min-cost-flow. The
// reason is that with floating point numbers it is harder to define that all
// demand and supply are met. This would require some tolerances and testing
// those tolerances would require solving the max-flow anyway.
class SimpleFloatingPointMinCostFlow {
public:
using NodeIndex = SimpleMinCostFlow::NodeIndex;
using ArcIndex = SimpleMinCostFlow::ArcIndex;
using CostValue = SimpleMinCostFlow::CostValue;
using FPFlowQuantity = double;
using Status = SimpleMinCostFlow::Status;
// Statistics associated with a call to SolveMaxFlowWithMinCost().
//
// Returned by LastSolveStats().
struct SolveStats {
// The scaling factor used to convert the FPFlowQuantity to a
// SimpleMinCostFlow::FlowQuantity.
double scale = 1.0;
// The number of values tested for the scaling factor.
//
// Internally SolveMaxFlowWithMinCost() will compute a first scaling factor
// with floating point arithmetic. Due to the approximate nature of this
// computation, it may still be too high. If the resulting integer numbers
// lead to integer overflows, a new lower scaling factor is computing.
int num_tested_scales = 0;
// Prints all the stats values on a single line.
friend std::ostream& operator<<(std::ostream& out, const SolveStats& stats);
};
explicit SimpleFloatingPointMinCostFlow(NodeIndex reserve_num_nodes = 0,
ArcIndex reserve_num_arcs = 0);
// This type is neither copyable nor movable.
SimpleFloatingPointMinCostFlow(const SimpleFloatingPointMinCostFlow&) =
delete;
SimpleFloatingPointMinCostFlow& operator=(
const SimpleFloatingPointMinCostFlow&) = delete;
// Adds a directed arc from tail to head to the underlying graph with
// a given capacity and cost per unit of flow.
// * Node indices must be non-negative (>= 0).
// * The capacity must be finite. When not, SolveMaxFlowWithMinCost() returns
// BAD_CAPACITY_RANGE. Negative values are OK and will be considered zero
// (which is useful when computed values are close to zero but negative).
// * The unit cost can take any integer value (even negative).
// * Self-looping and duplicate arcs are supported.
// * After the method finishes, NumArcs() == the returned ArcIndex + 1.
ArcIndex AddArcWithCapacityAndUnitCost(NodeIndex tail, NodeIndex head,
FPFlowQuantity capacity,
CostValue unit_cost);
// Sets the supply of the given node. The node index must be non-negative (>=
// 0). Nodes implicitly created will have a default supply set to 0. A demand
// is modeled as a negative supply.
//
// The supply quantity must be finite. When not, SolveMaxFlowWithMinCost()
// returns BAD_CAPACITY_RANGE.
void SetNodeSupply(NodeIndex node, FPFlowQuantity supply);
// Computes a maximum-flow with minimum cost.
//
// This function returns the Status of the underlying
// SimpleMinCostFlow::SolveMaxFlowWithMinCost().
//
// It also returns BAD_CAPACITY_RANGE:
// * either when the arc capacities or node supply quantities are NaN or
// infinite,
// * or when the computed in-flow or out-flow of a node results in an infinite
// value,
// * or if it failed to find a scale factor to make capacities and supply
// quantities fit in integers.
// In case of failure, a LOG(ERROR) should contain the explanation.
Status SolveMaxFlowWithMinCost();
// Returns the flow on arc, this only make sense for a successful
// SolveMaxFlowWithMinCost().
//
// If called before SolveMaxFlowWithMinCost(), it will return 0.0.
//
// Note: It is possible that there is more than one optimal solution. The
// algorithm is deterministic so it will always return the same solution for
// a given problem. However, there is no guarantee of this from one code
// version to the next (but the code does not change often).
FPFlowQuantity Flow(ArcIndex arc) const { return arc_flow_[arc]; }
// Returns the statistics of the last call to SolveMaxFlowWithMinCost().
SolveStats LastSolveStats() const;
// Accessors for the user given data. The implementation will crash if "arc"
// is not in [0, NumArcs()) or "node" is not in [0, NumNodes()).
NodeIndex NumNodes() const { return integer_flow_.NumNodes(); }
ArcIndex NumArcs() const { return integer_flow_.NumArcs(); }
NodeIndex Tail(ArcIndex arc) const { return integer_flow_.Tail(arc); }
NodeIndex Head(ArcIndex arc) const { return integer_flow_.Head(arc); }
FPFlowQuantity Capacity(ArcIndex arc) const { return arc_capacity_[arc]; }
FPFlowQuantity Supply(NodeIndex node) const { return node_supply_[node]; }
CostValue UnitCost(ArcIndex arc) const { return integer_flow_.UnitCost(arc); }
private:
// Returns the max value all in-flows or out-flows of each nodes. If some
// nodes or arcs have non-finite supply or capacities, returns std::nullopt
// after the rationale has been printed to LOG(ERROR).
//
// Precisely, returns max(max(in_flow(n) ∀ n), max(out_flow(n) ∀ n)). Returns
// 0.0 when there are no nodes.
std::optional<FPFlowQuantity> ComputeMaxInOrOutFlow();
// Sets the integer supply and capacity values on integer_flow_ from
// node_supply_ and arc_capacity_ floating point values after computing the
// log2_scale_. It will also updates num_tested_scales_.
//
// The integer quantities are computed by:
//
// static_cast<FlowQuantity>(
// std::round(Scale(log2_scale_) * fp_flow_quantity));
//
// Returns true in case of success, false otherwise (after the rational was
// printed to LOG(ERROR)).
bool ScaleSupplyAndCapacity();
// Updates arc_flow_ using the values of integer_flow_ and the status of the
// solve.
void UpdateFlowFromIntegerFlow(Status solve_status);
// The underlying integer min-cost-max-flow solver.
SimpleMinCostFlow integer_flow_;
// The log2 of the scale applied to FPFlowQuantity values to get the integer
// FlowQuantity ones in integer_flow_. When ScaleSupplyAndCapacity() succeeds
// this will contain a value for which both Scale() and InvScale() are finite
// and non-zero.
//
// See Scale() and InvScale().
int log2_scale_ = 0;
// The number of values of scale_ tested during the clal to
// ScaleSupplyAndCapacity().
int num_tested_scales_ = 0;
// Invariant on the following vectors: their size matches
// integer_flow_.NumNodes() or integer_flow_.NumArcs().
std::vector<FPFlowQuantity> arc_capacity_;
std::vector<FPFlowQuantity> node_supply_;
std::vector<FPFlowQuantity> arc_flow_;
};
// This namespace contains internal functions only there for tests.
namespace internal {
// Scale the input floating point flow to the integer flow, clamping the
// result to [-kMaxFlowQuantity, kMaxFlowQuantity].
//
// The scale and fp_flow must be finite (CHECKed).
//
// This function is internal. It is only exposed since by construction the input
// fp_flow and scale of ScaleFlow() should never trigger the code paths that
// lead to overflows. But we still want to make sure that if they are ever
// triggered they work as expected.
inline SimpleMinCostFlow::FlowQuantity ScaleFlow(
const SimpleFloatingPointMinCostFlow::FPFlowQuantity fp_flow,
const double scale) {
CHECK(std::isfinite(scale)) << scale;
CHECK(std::isfinite(fp_flow)) << fp_flow;
const double rounded_scaled_flow = std::round(scale * fp_flow);
// Note that we compare to kMaxFlowQuantity with >= and not > as:
// * the comparison will convert kMaxFlowQuantity to `double` first (see
// https://en.cppreference.com/w/cpp/language/usual_arithmetic_conversions),
// * and kMaxFlowQuantity (2^63 - 1) is not exactly representable in a
// `double` (that only has 53 bits of mantissa),
// * thus it will be rounded to the nearest `double`, i.e. 2^64,
// * so if we were to compare with > only, we would not reject the `double`
// 2^64 that can't fit in an int64_t; comparing with >= will reject this
// number and only accept `double` that are less or equal to the predecessor
// of 2^64, which all fit in an int64_t.
constexpr SimpleMinCostFlow::FlowQuantity kMaxFlowQuantity =
std::numeric_limits<SimpleMinCostFlow::FlowQuantity>::max();
if (rounded_scaled_flow >= kMaxFlowQuantity) {
return kMaxFlowQuantity;
}
if (rounded_scaled_flow <= -kMaxFlowQuantity) {
return -kMaxFlowQuantity;
}
return static_cast<SimpleMinCostFlow::FlowQuantity>(rounded_scaled_flow);
}
} // namespace internal
} // namespace operations_research
#endif // OR_TOOLS_GRAPH_FP_MIN_COST_FLOW_H_

View File

@@ -0,0 +1,604 @@
// Copyright 2010-2025 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/fp_min_cost_flow.h"
#include <cmath>
#include <limits>
#include <ostream>
#include <sstream>
#include <string>
#include <vector>
#include "absl/base/log_severity.h"
#include "absl/base/no_destructor.h"
#include "absl/strings/escaping.h"
#include "gtest/gtest.h"
#include "ortools/base/gmock.h"
#include "ortools/graph/min_cost_flow.h"
#include "ortools/util/fp_roundtrip_conv.h"
#include "testing/base/public/mock-log.h"
namespace operations_research {
namespace {
using ::testing::AnyNumber;
using ::testing::DoubleNear;
using ::testing::HasSubstr;
using ::testing::kDoNotCaptureLogsYet;
using ::testing::Mock;
using ::testing::ScopedMockLog;
using Status = SimpleFloatingPointMinCostFlow::Status;
using FPFlowQuantity = SimpleFloatingPointMinCostFlow::FPFlowQuantity;
using CostValue = SimpleFloatingPointMinCostFlow::CostValue;
using NodeIndex = SimpleFloatingPointMinCostFlow::NodeIndex;
using ArcIndex = SimpleFloatingPointMinCostFlow::ArcIndex;
using ::operations_research::internal::ScaleFlow;
constexpr double kInf = std::numeric_limits<double>::infinity();
constexpr double kNaN = std::numeric_limits<double>::quiet_NaN();
constexpr SimpleFloatingPointMinCostFlow::CostValue kMaxCostValue =
std::numeric_limits<SimpleFloatingPointMinCostFlow::CostValue>::max();
const SimpleFloatingPointMinCostFlow::FPFlowQuantity kMaxFPFlow =
std::numeric_limits<SimpleFloatingPointMinCostFlow::FPFlowQuantity>::max();
constexpr SimpleFloatingPointMinCostFlow::FPFlowQuantity kMinFPFlow =
std::numeric_limits<
SimpleFloatingPointMinCostFlow::FPFlowQuantity>::denorm_min();
// Returns the path of the source file that emits LOG(ERROR) in case of failure.
//
// It is meant to be used for log mocking assertions. It needs to be a `const
// std::string&` and can't be an absl::string_view. Thus this function returns a
// reference on a static.
const std::string& SourceFilePath() {
static const absl::NoDestructor<std::string> kModule(
"ortools/graph/fp_min_cost_flow.cc");
return *kModule;
}
TEST(SimpleFloatingPointMinCostFlowTest, Empty) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(), Status::OPTIMAL);
EXPECT_EQ(fp_min_cost_flow.NumNodes(), 0);
EXPECT_EQ(fp_min_cost_flow.NumArcs(), 0);
}
TEST(SimpleFloatingPointMinCostFlowTest, SetNodeSupplyCreateNodes) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
// Setting the supply of node 3 should create four nodes, even if the supply
// is 0.0.
fp_min_cost_flow.SetNodeSupply(/*node=*/3, /*supply=*/0.0);
ASSERT_EQ(fp_min_cost_flow.NumNodes(), 4);
EXPECT_EQ(fp_min_cost_flow.Supply(0), 0.0);
EXPECT_EQ(fp_min_cost_flow.Supply(1), 0.0);
EXPECT_EQ(fp_min_cost_flow.Supply(2), 0.0);
EXPECT_EQ(fp_min_cost_flow.Supply(3), 0.0);
// Setting arbitrary positive and negative values.
fp_min_cost_flow.SetNodeSupply(/*node=*/1, /*supply=*/13.75);
fp_min_cost_flow.SetNodeSupply(/*node=*/0, /*supply=*/-28.125);
ASSERT_EQ(fp_min_cost_flow.NumNodes(), 4);
EXPECT_EQ(fp_min_cost_flow.Supply(0), -28.125);
EXPECT_EQ(fp_min_cost_flow.Supply(1), 13.75);
EXPECT_EQ(fp_min_cost_flow.Supply(2), 0.0);
EXPECT_EQ(fp_min_cost_flow.Supply(3), 0.0);
}
TEST(SimpleFloatingPointMinCostFlowTest, AddArcCreateNodes) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
// Adding an arc between nodes 1 and 3 should create four nodes, with zero
// supply.
const ArcIndex arc0 = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/3, /*head=*/1, /*capacity=*/0.0, /*unit_cost=*/0);
const ArcIndex arc1 = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1, /*capacity=*/15.25, /*unit_cost=*/3);
ASSERT_EQ(fp_min_cost_flow.NumNodes(), 4);
EXPECT_EQ(fp_min_cost_flow.Supply(0), 0.0);
EXPECT_EQ(fp_min_cost_flow.Supply(1), 0.0);
EXPECT_EQ(fp_min_cost_flow.Supply(2), 0.0);
EXPECT_EQ(fp_min_cost_flow.Supply(3), 0.0);
ASSERT_EQ(fp_min_cost_flow.NumArcs(), 2);
EXPECT_EQ(fp_min_cost_flow.Tail(arc0), 3);
EXPECT_EQ(fp_min_cost_flow.Head(arc0), 1);
EXPECT_EQ(fp_min_cost_flow.Capacity(arc0), 0.0);
EXPECT_EQ(fp_min_cost_flow.UnitCost(arc0), 0.0);
EXPECT_EQ(fp_min_cost_flow.Tail(arc1), 0);
EXPECT_EQ(fp_min_cost_flow.Head(arc1), 1);
EXPECT_EQ(fp_min_cost_flow.Capacity(arc1), 15.25);
EXPECT_EQ(fp_min_cost_flow.UnitCost(arc1), 3);
}
// Value-parameterized test of invalid node supply values.
class BadNodeSupplyTest : public testing::TestWithParam<double> {};
TEST_P(BadNodeSupplyTest, FailingSolve) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
fp_min_cost_flow.SetNodeSupply(/*node=*/0, /*supply=*/GetParam());
ScopedMockLog log(kDoNotCaptureLogsYet);
EXPECT_CALL(log, Log).Times(AnyNumber()); // Ignore unexpected logs.
EXPECT_CALL(log, Log(base_logging::ERROR, SourceFilePath(),
HasSubstr("supply is not finite")));
log.StartCapturingLogs();
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(),
SimpleFloatingPointMinCostFlow::Status::BAD_CAPACITY_RANGE);
log.StopCapturingLogs();
Mock::VerifyAndClearExpectations(&log);
}
// Tests that failures properly resets computed arc flows before returning.
TEST_P(BadNodeSupplyTest, FailureResetsArcFlows) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
// First make a successful solve.
const ArcIndex arc = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1, /*capacity=*/5.25, /*unit_cost=*/0);
fp_min_cost_flow.SetNodeSupply(0, 10.0);
fp_min_cost_flow.SetNodeSupply(1, -10.0);
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(),
SimpleFloatingPointMinCostFlow::Status::OPTIMAL);
EXPECT_EQ(fp_min_cost_flow.Flow(arc), 5.25);
// Then set the invalid supply, the next solve should fail and resets the arc
// flow.
fp_min_cost_flow.SetNodeSupply(/*node=*/0, /*supply=*/GetParam());
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(),
SimpleFloatingPointMinCostFlow::Status::BAD_CAPACITY_RANGE);
EXPECT_EQ(fp_min_cost_flow.Flow(arc), 0.0);
}
INSTANTIATE_TEST_SUITE_P(All, BadNodeSupplyTest,
testing::Values(kInf, -kInf, kNaN));
// Value-parameterized test of invalid arc capacity values.
class BadArcCapacityTest : public testing::TestWithParam<double> {};
TEST_P(BadArcCapacityTest, FailingSolve) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1, /*capacity=*/GetParam(), /*unit_cost=*/0);
ScopedMockLog log(kDoNotCaptureLogsYet);
EXPECT_CALL(log, Log).Times(AnyNumber()); // Ignore unexpected logs.
EXPECT_CALL(log, Log(base_logging::ERROR, SourceFilePath(),
HasSubstr("capacity is not finite")));
log.StartCapturingLogs();
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(),
SimpleFloatingPointMinCostFlow::Status::BAD_CAPACITY_RANGE);
log.StopCapturingLogs();
Mock::VerifyAndClearExpectations(&log);
}
// Tests that failures properly resets computed arc flows before returning.
TEST_P(BadArcCapacityTest, FailureResetsArcFlows) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
// First make a successful solve.
const ArcIndex arc = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1, /*capacity=*/5.25, /*unit_cost=*/0);
fp_min_cost_flow.SetNodeSupply(0, 10.0);
fp_min_cost_flow.SetNodeSupply(1, -10.0);
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(),
SimpleFloatingPointMinCostFlow::Status::OPTIMAL);
EXPECT_EQ(fp_min_cost_flow.Flow(arc), 5.25);
// Then add an arc with the invalid capacity, the next solve should fail and
// resets the arc flow.
const ArcIndex bad_arc = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1, /*capacity=*/GetParam(), /*unit_cost=*/0);
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(),
SimpleFloatingPointMinCostFlow::Status::BAD_CAPACITY_RANGE);
EXPECT_EQ(fp_min_cost_flow.Flow(arc), 0.0);
EXPECT_EQ(fp_min_cost_flow.Flow(bad_arc), 0.0);
}
INSTANTIATE_TEST_SUITE_P(All, BadArcCapacityTest,
testing::Values(kInf, -kInf, kNaN));
// Parameters for the TwoNodesOneArcTest value-parameterized test.
struct TwoNodesOneArcTestParams {
// Name of the test; this will be used as a suffix to the test name and thus
// should be snake_case with no spaces.
std::string name;
FPFlowQuantity head_node_supply = 0.0;
FPFlowQuantity tail_node_supply = 0.0;
FPFlowQuantity arc_capacity = 0.0;
CostValue arc_unit_cost = 0;
// The expected computed flow on the arc; it is tested using
// expected_flow_tolerance absolute tolerance.
FPFlowQuantity expected_flow = 0.0;
FPFlowQuantity expected_flow_tolerance = 0.0;
friend std::ostream& operator<<(std::ostream& out,
const TwoNodesOneArcTestParams& params) {
out << "{ name: '" << absl::CEscape(params.name) << "', head_node_supply: "
<< RoundTripDoubleFormat(params.head_node_supply)
<< ", tail_node_supply: "
<< RoundTripDoubleFormat(params.tail_node_supply)
<< ", arc_capacity: " << RoundTripDoubleFormat(params.arc_capacity)
<< ", arc_unit_cost: " << params.arc_unit_cost
<< ", expected_flow: " << RoundTripDoubleFormat(params.expected_flow)
<< ", expected_flow_tolerance: "
<< RoundTripDoubleFormat(params.expected_flow_tolerance) << " }";
return out;
}
};
// Value-parameterized tests with a graph containing a single arc and two nodes.
class TwoNodesOneArcTest
: public testing::TestWithParam<TwoNodesOneArcTestParams> {};
TEST_P(TwoNodesOneArcTest, SolveMaxFlowWithMinCost) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
const NodeIndex kTailNode = 0;
const NodeIndex kHeadNode = 1;
const ArcIndex arc = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/kTailNode, /*head=*/kHeadNode,
/*capacity=*/GetParam().arc_capacity,
/*unit_cost=*/GetParam().arc_unit_cost);
fp_min_cost_flow.SetNodeSupply(kTailNode, GetParam().tail_node_supply);
fp_min_cost_flow.SetNodeSupply(kHeadNode, GetParam().head_node_supply);
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(), Status::OPTIMAL);
EXPECT_THAT(
fp_min_cost_flow.Flow(arc),
DoubleNear(GetParam().expected_flow, GetParam().expected_flow_tolerance));
}
INSTANTIATE_TEST_SUITE_P(
All, TwoNodesOneArcTest,
testing::Values(
// All parameters at 0. Expect no flow.
TwoNodesOneArcTestParams{.name = "empty"},
// A flow of 1.0 with small integer capacities and supply.
TwoNodesOneArcTestParams{
.name = "small_integers",
.head_node_supply = -1.0,
.tail_node_supply = 2.0,
.arc_capacity = 3.0,
.arc_unit_cost = 0,
.expected_flow = 1.0,
.expected_flow_tolerance = 0.0,
},
// Some big but not huge capacity and supply numbers.
TwoNodesOneArcTestParams{
.name = "big_numbers",
.head_node_supply = -1.0e18,
.tail_node_supply = 2.0e16,
.arc_capacity = 5.0e16,
.arc_unit_cost = 0,
.expected_flow = 2.0e16,
.expected_flow_tolerance = 0.0,
},
// Big capacity and small supply. We use -1.1 so that we have a mantissa
// with a lot of non-zeros (in base-2 -1.1 is -1.0001100110011...).
//
// With a value of 5.3e10, the largest scaling factor so that:
//
// std::round(2^p * 5.3e10) < (2^63 - 1)
//
// is 2**27. With this value, the 1.1 theoretical flow after scaling and
// unscaling will be:
//
// std::round(1.1 * 2.0^27) * 2.0^-27 = 1.1000000014901161
//
// the error is thus:
//
// 1.4901160305669237e-09
//
// We simply use 1.5e-9 as tolerance.
TwoNodesOneArcTestParams{
.name = "big_capacity_small_supply",
.head_node_supply = -1.1,
.tail_node_supply = 2.5,
.arc_capacity = 5.3e10,
.arc_unit_cost = 0,
.expected_flow = 1.1,
.expected_flow_tolerance = 1.5e-9, // See comment above.
},
// Small capacity and big supply. See previous test for rationale.
TwoNodesOneArcTestParams{
.name = "small_capacity_big_supply",
.head_node_supply = -5.3e10,
.tail_node_supply = 4.2e8,
.arc_capacity = 1.1,
.arc_unit_cost = 0,
.expected_flow = 1.1,
.expected_flow_tolerance = 1.0e-5, // See comment above.
},
// tiny capacity (1.5) and huge supply (2^100). We expect scaling to
// result in a 0 integer capacity as the scale factor that makes 2^100
// fit into an int64_t will make the small capacity close to zero.
TwoNodesOneArcTestParams{
.name = "tiny_capacity_huge_supply",
.head_node_supply = -std::pow(2.0, 100.0),
.tail_node_supply = std::pow(2.0, 80.0),
.arc_capacity = 1.5,
.arc_unit_cost = 0,
.expected_flow = 0,
.expected_flow_tolerance = 0.0,
},
// Huge capacity (2^100) and tiny supply values (1.5). See previous test
// for rationale.
TwoNodesOneArcTestParams{
.name = "huge_capacity_tiny_supply",
.head_node_supply = -1.5,
.tail_node_supply = 1.5,
.arc_capacity = std::pow(2.0, 100.0),
.arc_unit_cost = 0,
.expected_flow = 0,
.expected_flow_tolerance = 0.0,
},
// A capacity flow should be considered 0.
TwoNodesOneArcTestParams{
.name = "negative_capacity",
.head_node_supply = -1.0,
.tail_node_supply = 2.0,
.arc_capacity = -3.0,
.arc_unit_cost = 0,
.expected_flow = 0.0,
.expected_flow_tolerance = 0.0,
},
// Test scaling when the numbers are the smallest possible. The computed
// flow is likely 0. We use 4 * kMinFPFlow as a reasonable tolerance.
TwoNodesOneArcTestParams{
.name = "min_denormal_flow",
.head_node_supply = -kMinFPFlow,
.tail_node_supply = kMinFPFlow,
.arc_capacity = kMinFPFlow,
.arc_unit_cost = 0,
.expected_flow = kMinFPFlow,
.expected_flow_tolerance = 4 * kMinFPFlow,
},
// Test when supply are the smallest possible and capacity is zero (so
// that for each node we only have kMinFPFlow in or out flow).
TwoNodesOneArcTestParams{
.name = "min_denormal_supply_no_flow",
.head_node_supply = -kMinFPFlow,
.tail_node_supply = kMinFPFlow,
.arc_capacity = 0.0,
.arc_unit_cost = 0,
.expected_flow = 0.0,
.expected_flow_tolerance = 0.0,
}, ),
[](const testing::TestParamInfo<TwoNodesOneArcTestParams>& info) {
return info.param.name;
});
TEST(SimpleFloatingPointMinCostFlowTest, IntegerSolverFailing) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
// To generate a failure, we use a unit_cost that is too high.
const ArcIndex arc = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1,
/*capacity=*/1.0,
/*unit_cost=*/kMaxCostValue);
fp_min_cost_flow.SetNodeSupply(0, 1.0);
fp_min_cost_flow.SetNodeSupply(1, -1.0);
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(), Status::BAD_COST_RANGE);
EXPECT_EQ(fp_min_cost_flow.Flow(arc), 0.0);
}
TEST(SimpleFloatingPointMinCostFlowTest, IntegerSolverFailingResetsFlow) {
// First make a successful solve.
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
const ArcIndex arc = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1,
/*capacity=*/1.0,
/*unit_cost=*/0);
fp_min_cost_flow.SetNodeSupply(0, 1.0);
fp_min_cost_flow.SetNodeSupply(1, -1.0);
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(), Status::OPTIMAL);
EXPECT_EQ(fp_min_cost_flow.Flow(arc), 1.0);
// Then generate a failure using a too high cost value.
const ArcIndex failing_arc = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1, /*capacity=*/1.0, /*unit_cost=*/kMaxCostValue);
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(), Status::BAD_COST_RANGE);
EXPECT_EQ(fp_min_cost_flow.Flow(arc), 0.0);
EXPECT_EQ(fp_min_cost_flow.Flow(failing_arc), 0.0);
}
TEST(SimpleFloatingPointMinCostFlowTest, FloatingPointSumOverflow) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
// We use two arcs pointing the same node with floating point capacities that
// overflows the `double` exponent range. This will result in an overflow when
// computing the maximum intake of the node which should result in an error.
//
// Note that we could update SimpleFloatingPointMinCostFlow to deal with that
// by scaling the numbers down eventually.
fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1,
/*capacity=*/kMaxFPFlow,
/*unit_cost=*/0);
fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1,
/*capacity=*/kMaxFPFlow,
/*unit_cost=*/0);
ScopedMockLog log(kDoNotCaptureLogsYet);
EXPECT_CALL(log, Log).Times(AnyNumber()); // Ignore unexpected logs.
EXPECT_CALL(
log, Log(base_logging::ERROR, SourceFilePath(), HasSubstr("not finite")));
log.StartCapturingLogs();
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(),
Status::BAD_CAPACITY_RANGE);
log.StopCapturingLogs();
Mock::VerifyAndClearExpectations(&log);
}
TEST(SimpleFloatingPointMinCostFlowTest, FirstScaleFailed) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
// We build a corner case where the evaluation of the in/out-flow of nodes in
// double would lead to a starting scale value that is too high.
//
// The chosen `supply` value is such that adding arc capacities one by one
// (each arc has 1.0 capacity) does not change it. Thus the computed in-flow
// and out-flow of both nodes is `supply`.
//
// On top of that computing the initial scale will lead to a scale of 2, which
// would be OK as 2 * supply is a valid integer supply. The issue is that each
// arc integer capacity is 2 and we have 2048 of them. And now:
//
// 2 * supply + 2 * 2048 > 2^63 - 1
//
// We thus will have to loop and try using a scale of 1.0 which will work.
//
// Note that this test we relies on the implementation adding nodes supply
// first, then arc capacities.
const FPFlowQuantity supply = std::nextafter(std::ldexp(1.0, 62), -kInf);
fp_min_cost_flow.SetNodeSupply(/*node=*/0, supply);
fp_min_cost_flow.SetNodeSupply(/*node=*/1, -supply);
std::vector<ArcIndex> arcs;
for (int i = 0; i < 2048; ++i) {
arcs.push_back(fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/1,
/*capacity=*/1.0, /*unit_cost=*/0));
}
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(), Status::OPTIMAL);
EXPECT_EQ(fp_min_cost_flow.LastSolveStats().num_tested_scales, 2);
for (const ArcIndex arc : arcs) {
// Use ASSERT_EQ() here to prevent having too many errors. All arcs are
// equivalent so if there is an error it is likely to happen for each arc
// anyway.
ASSERT_EQ(fp_min_cost_flow.Flow(arc), 1.0) << "arc: " << arc;
}
}
TEST(SimpleFloatingPointMinCostFlowTest, FirstScaleSucceeds) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
// We build a corner case where the computed initial scale is good enough and
// we test that we get the expected result. This happens when the max
// in/out-flow is a power of two.
fp_min_cost_flow.SetNodeSupply(/*node=*/0, 2.0);
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(), Status::OPTIMAL);
EXPECT_EQ(fp_min_cost_flow.LastSolveStats().num_tested_scales, 1);
}
TEST(SimpleFloatingPointMinCostFlowTest, Assignment) {
SimpleFloatingPointMinCostFlow fp_min_cost_flow;
// Tests that costs are taken into account by doing a simple assignment:
//
// cap:5.1 cost:2
// n0:4 ---------------> n3:-5
// ^
// / cap:4
// / cost:1
// n1:3 -------------+
// \ cap:5
// \ cost:2
// v
// n2:5 ---------------> n4:-4
// cap:3 cost:3
//
// Here we expect:
// * n3 to match its demand of 5 from n0 and n1, using n1 first as it has the
// lowest cost, then n0.
// * n4 to match its demand of 4 from n1 and n2, using n1 first as it has the
// lower cost, then n2.
// * n3 to consume from n1 only 2 and not 3 as doing so would starve n4 that
// would not be able to match its demand.
fp_min_cost_flow.SetNodeSupply(/*node=*/0, 4.0);
fp_min_cost_flow.SetNodeSupply(/*node=*/1, 3.0);
fp_min_cost_flow.SetNodeSupply(/*node=*/2, 4.0);
fp_min_cost_flow.SetNodeSupply(/*node=*/3, -5.0);
fp_min_cost_flow.SetNodeSupply(/*node=*/4, -4.0);
const ArcIndex a03 = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/0, /*head=*/3, /*capacity=*/5.1, /*unit_cost=*/2);
const ArcIndex a13 = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/1, /*head=*/3, /*capacity=*/4.0, /*unit_cost=*/1);
const ArcIndex a14 = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/1, /*head=*/4, /*capacity=*/5.0, /*unit_cost=*/2);
const ArcIndex a24 = fp_min_cost_flow.AddArcWithCapacityAndUnitCost(
/*tail=*/2, /*head=*/4, /*capacity=*/3.0, /*unit_cost=*/3);
EXPECT_EQ(fp_min_cost_flow.SolveMaxFlowWithMinCost(), Status::OPTIMAL);
EXPECT_EQ(fp_min_cost_flow.Flow(a03), 3);
EXPECT_EQ(fp_min_cost_flow.Flow(a13), 2);
EXPECT_EQ(fp_min_cost_flow.Flow(a14), 1);
EXPECT_EQ(fp_min_cost_flow.Flow(a24), 3);
}
TEST(ScaleFlowTest, AllValues) {
constexpr SimpleMinCostFlow::FlowQuantity kMaxFlowQuantity =
std::numeric_limits<SimpleMinCostFlow::FlowQuantity>::max();
EXPECT_EQ(ScaleFlow(/*fp_flow=*/kMaxFPFlow,
/*scale=*/std::numeric_limits<double>::max()),
kMaxFlowQuantity);
EXPECT_EQ(ScaleFlow(/*fp_flow=*/-kMaxFPFlow,
/*scale=*/std::numeric_limits<double>::max()),
-kMaxFlowQuantity);
EXPECT_EQ(ScaleFlow(/*fp_flow=*/kMaxFPFlow, /*scale=*/1.0), kMaxFlowQuantity);
EXPECT_EQ(ScaleFlow(/*fp_flow=*/-kMaxFPFlow, /*scale=*/1.0),
-kMaxFlowQuantity);
EXPECT_EQ(ScaleFlow(/*fp_flow=*/2.0 * kMaxFlowQuantity, /*scale=*/1.0),
kMaxFlowQuantity);
EXPECT_EQ(ScaleFlow(/*fp_flow=*/-2.0 * kMaxFlowQuantity, /*scale=*/1.0),
-kMaxFlowQuantity);
EXPECT_EQ(ScaleFlow(/*fp_flow=*/kMaxFlowQuantity, /*scale=*/1.0),
kMaxFlowQuantity);
EXPECT_EQ(ScaleFlow(/*fp_flow=*/-kMaxFlowQuantity, /*scale=*/1.0),
-kMaxFlowQuantity);
// The integer that is the `double` just before the rounded value of
// kMaxFlowQuantity. This rounded value does not fit in an integer but its
// predecessor will.
const FPFlowQuantity kMaxFlowQuantityPredecessor =
std::nextafter(static_cast<FPFlowQuantity>(kMaxFlowQuantity), 0.0);
const SimpleMinCostFlow::FlowQuantity kMaxFlowQuantityPredecessorAsInt =
static_cast<SimpleMinCostFlow::FlowQuantity>(kMaxFlowQuantityPredecessor);
ASSERT_LT(kMaxFlowQuantityPredecessorAsInt, kMaxFlowQuantity);
EXPECT_EQ(ScaleFlow(/*fp_flow=*/kMaxFlowQuantityPredecessor, /*scale=*/1.0),
kMaxFlowQuantityPredecessor);
EXPECT_EQ(ScaleFlow(/*fp_flow=*/-kMaxFlowQuantityPredecessor, /*scale=*/1.0),
-kMaxFlowQuantityPredecessor);
}
TEST(ScaleFlowDeathTest, NaNScale) {
EXPECT_DEATH(ScaleFlow(/*fp_flow=*/1.0, /*scale=*/kNaN), "scale");
}
TEST(ScaleFlowDeathTest, InfScale) {
EXPECT_DEATH(ScaleFlow(/*fp_flow=*/1.0, /*scale=*/kInf), "scale");
}
TEST(ScaleFlowDeathTest, NaNFlow) {
EXPECT_DEATH(ScaleFlow(/*fp_flow=*/kNaN, /*scale=*/1.0), "fp_flow");
}
TEST(ScaleFlowDeathTest, InfFlow) {
EXPECT_DEATH(ScaleFlow(/*fp_flow=*/kInf, /*scale=*/1.0), "fp_flow");
}
TEST(SolveStatsTest, Stream) {
std::ostringstream oss;
oss << SimpleFloatingPointMinCostFlow::SolveStats{
.scale = 0.1,
.num_tested_scales = 4,
};
EXPECT_EQ(oss.str(), "{ scale: 0.1, num_tested_scales: 4 }");
}
} // namespace
} // namespace operations_research

View File

@@ -100,6 +100,7 @@
%unignore operations_research::SimpleMinCostFlow::~SimpleMinCostFlow;
%rename (addArcWithCapacityAndUnitCost)
operations_research::SimpleMinCostFlow::AddArcWithCapacityAndUnitCost;
%rename (setArcCapacity) operations_research::SimpleMinCostFlow::SetArcCapacity;
%rename (setNodeSupply) operations_research::SimpleMinCostFlow::SetNodeSupply;
%rename (solve) operations_research::SimpleMinCostFlow::Solve;
%rename (solveMaxFlowWithMinCost)

View File

@@ -230,7 +230,8 @@ class LinearSumAssignment {
// Constructor for the case in which we will build the graph
// incrementally as we discover arc costs, as might be done with any
// of the dynamic graph representations such as StarGraph or ForwardStarGraph.
// of the dynamic graph representations such as `ReverseArcListGraph` or
// `ForwardStarGraph`.
LinearSumAssignment(const GraphType& graph, NodeIndex num_left_nodes);
// Constructor for the case in which the underlying graph cannot be built
@@ -266,21 +267,6 @@ class LinearSumAssignment {
operations_research::PermutationCycleHandler<typename GraphType::ArcIndex>*
ArcAnnotationCycleHandler();
// Optimizes the layout of the graph for the access pattern our
// implementation will use.
//
// REQUIRES for LinearSumAssignment template instantiation if a call
// to the OptimizeGraphLayout() method is compiled: GraphType is a
// dynamic graph, i.e., one that implements the
// GroupForwardArcsByFunctor() member template method.
//
// If analogous optimization is needed for LinearSumAssignment
// instances based on static graphs, the graph layout should be
// constructed such that each node's outgoing arcs are sorted by
// head node index before the
// LinearSumAssignment<GraphType>::SetGraph() method is called.
void OptimizeGraphLayout(GraphType* graph);
// Allows tests, iterators, etc., to inspect our underlying graph.
inline const GraphType& Graph() const { return *graph_; }
@@ -1088,22 +1074,6 @@ LinearSumAssignment<GraphType, CostValue>::ArcAnnotationCycleHandler() {
&scaled_arc_cost_);
}
template <typename GraphType, typename CostValue>
void LinearSumAssignment<GraphType, CostValue>::OptimizeGraphLayout(
GraphType* graph) {
// The graph argument is only to give us a non-const-qualified
// handle on the graph we already have. Any different graph is
// nonsense.
DCHECK_EQ(graph_, graph);
const ArcIndexOrderingByTailNode<GraphType> compare(*graph_);
CostValueCycleHandler<typename GraphType::ArcIndex, CostValue> cycle_handler(
&scaled_arc_cost_);
TailArrayManager<GraphType> tail_array_manager(graph);
tail_array_manager.BuildTailArrayFromAdjacencyListsIfForwardGraph();
graph->GroupForwardArcsByFunctor(compare, &cycle_handler);
tail_array_manager.ReleaseTailArrayIfForwardGraph();
}
template <typename GraphType, typename CostValue>
CostValue LinearSumAssignment<GraphType, CostValue>::NewEpsilon(
const CostValue current_epsilon) const {

View File

@@ -101,11 +101,10 @@ struct AssignmentProblemSetup {
template <typename GraphType>
class LinearSumAssignmentTestWithGraphBuilder : public ::testing::Test {};
typedef ::testing::Types<
EbertGraph<int16_t, int16_t>, ForwardEbertGraph<int16_t, int16_t>,
EbertGraph<int16_t, ArcIndex>, ForwardEbertGraph<int16_t, ArcIndex>,
EbertGraph<NodeIndex, int16_t>, ForwardEbertGraph<NodeIndex, int16_t>,
StarGraph, ForwardStarGraph, util::ListGraph<>, util::ReverseArcListGraph<>>
typedef ::testing::Types<EbertGraph<int16_t, int16_t>,
EbertGraph<int16_t, ArcIndex>,
EbertGraph<NodeIndex, int16_t>, StarGraph,
util::ListGraph<>, util::ReverseArcListGraph<>>
GraphTypesForAssignmentTestingWithGraphBuilder;
TYPED_TEST_SUITE(LinearSumAssignmentTestWithGraphBuilder,
@@ -384,49 +383,14 @@ static ArcIndex CreateArcWithCost(
template <typename GraphType>
class LinearSumAssignmentTestWithDynamicGraph : public ::testing::Test {};
typedef ::testing::Types<
EbertGraph<int16_t, int16_t>, ForwardEbertGraph<int16_t, int16_t>,
EbertGraph<int16_t, ArcIndex>, ForwardEbertGraph<NodeIndex, int16_t>,
EbertGraph<NodeIndex, ArcIndex>, ForwardEbertGraph<NodeIndex, ArcIndex>>
typedef ::testing::Types<EbertGraph<int16_t, int16_t>,
EbertGraph<int16_t, ArcIndex>,
EbertGraph<NodeIndex, ArcIndex>>
DynamicGraphTypesForAssignmentTesting;
TYPED_TEST_SUITE(LinearSumAssignmentTestWithDynamicGraph,
DynamicGraphTypesForAssignmentTesting);
TYPED_TEST(LinearSumAssignmentTestWithDynamicGraph, GraphLayoutTest) {
// A complete bipartite 3x3 graph (9 edges).
TypeParam g(6, 9);
LinearSumAssignment<TypeParam> a(g, 3);
// We add arcs in a higgledy-piggledy order, with costs that indicate the
// order the arcs should have after the layout is optimized.
CreateArcWithCost(0, 3, 1, &g, &a); // in cycle [0]
CreateArcWithCost(2, 5, 9, &g, &a); // in cycle [1 8 3]
CreateArcWithCost(1, 5, 6, &g, &a); // in cycle [2 5]
CreateArcWithCost(0, 4, 2, &g, &a); // in cycle [1 8 3]
CreateArcWithCost(1, 4, 5, &g, &a); // in cycle [4]
CreateArcWithCost(0, 5, 3, &g, &a); // in cycle [2 5]
CreateArcWithCost(2, 4, 8, &g, &a); // in cycle [6 7]
CreateArcWithCost(2, 3, 7, &g, &a); // in cycle [6 7]
CreateArcWithCost(1, 3, 4, &g, &a); // in cycle [1 8 3]
EXPECT_TRUE(a.ComputeAssignment());
EXPECT_EQ(1 + 5 + 9, a.GetCost());
a.OptimizeGraphLayout(&g);
EXPECT_TRUE(a.ComputeAssignment());
EXPECT_EQ(1 + 5 + 9, a.GetCost());
// The optimized graph layout is supposed to group arcs by their tail nodes
// and sequence them within each group by their head nodes.
TailArrayManager<TypeParam> tail_array_manager(&g);
tail_array_manager.BuildTailArrayFromAdjacencyListsIfForwardGraph();
for (int i = 0; i < 9; ++i) {
EXPECT_EQ(i + 1, a.ArcCost(i));
EXPECT_EQ(i / 3, g.Tail(i));
EXPECT_EQ(3 + i % 3, g.Head(i));
}
tail_array_manager.ReleaseTailArrayIfForwardGraph();
}
// The EpsilonOptimal test and the PrecisionWarning test cannot be parameterized
// by the type of the underlying graph because doing so is not supported by the
// FRIEND_TEST() macro used in the LinearSumAssignment class template to grant
@@ -449,11 +413,11 @@ TEST(LinearSumAssignmentFriendTest, EpsilonOptimal) {
#if LARGE
TEST(LinearSumAssignmentPrecisionTest, PrecisionWarning) {
const NodeIndex kNumLeftNodes = 10000000;
ForwardStarGraph g(2 * kNumLeftNodes, 2 * kNumLeftNodes);
LinearSumAssignment<ForwardStarGraph> a(g, kNumLeftNodes);
util::ListGraph<> g(2 * kNumLeftNodes, 2 * kNumLeftNodes);
LinearSumAssignment<util::ListGraph<>> a(g, kNumLeftNodes);
int64_t node_count = 0;
for (NodeIndex left_node = ForwardStarGraph::kFirstNode;
node_count < kNumLeftNodes; ++node_count, ++left_node) {
for (NodeIndex left_node = 0; node_count < kNumLeftNodes;
++node_count, ++left_node) {
CreateArcWithCost(left_node, kNumLeftNodes + left_node, kNumLeftNodes, &g,
&a);
}
@@ -592,11 +556,7 @@ void BM_ConstructRandomAssignmentProblem(benchmark::State& state) {
}
BENCHMARK_TEMPLATE2(BM_ConstructRandomAssignmentProblem, StarGraph, false);
BENCHMARK_TEMPLATE2(BM_ConstructRandomAssignmentProblem, ForwardStarGraph,
false);
BENCHMARK_TEMPLATE2(BM_ConstructRandomAssignmentProblem, StarGraph, true);
BENCHMARK_TEMPLATE2(BM_ConstructRandomAssignmentProblem, ForwardStarGraph,
true);
template <typename GraphType>
void BM_ConstructRandomAssignmentProblemWithNewGraphApi(
@@ -632,16 +592,14 @@ void BM_SolveRandomAssignmentProblem(benchmark::State& state) {
kLeftNodes, kAverageDegree, kCostLimit, &graph, &assignment);
for (auto _ : state) {
assignment->ComputeAssignment();
EXPECT_EQ(65849286, assignment->GetCost());
EXPECT_EQ(65415697, assignment->GetCost());
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) *
kLeftNodes * kAverageDegree);
}
BENCHMARK_TEMPLATE2(BM_SolveRandomAssignmentProblem, StarGraph, false);
BENCHMARK_TEMPLATE2(BM_SolveRandomAssignmentProblem, ForwardStarGraph, false);
BENCHMARK_TEMPLATE2(BM_SolveRandomAssignmentProblem, StarGraph, true);
BENCHMARK_TEMPLATE2(BM_SolveRandomAssignmentProblem, ForwardStarGraph, true);
template <typename GraphType>
void BM_SolveRandomAssignmentProblemWithNewGraphApi(benchmark::State& state) {
@@ -654,7 +612,7 @@ void BM_SolveRandomAssignmentProblemWithNewGraphApi(benchmark::State& state) {
kLeftNodes, kAverageDegree, kCostLimit, &graph, &assignment);
for (auto _ : state) {
assignment->ComputeAssignment();
EXPECT_EQ(65849286, assignment->GetCost());
EXPECT_EQ(65415697, assignment->GetCost());
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) *
kLeftNodes * kAverageDegree);
@@ -678,7 +636,7 @@ void BM_ConstructAndSolveRandomAssignmentProblem(benchmark::State& state) {
ConstructRandomAssignment<GraphType, optimize_layout>(
kLeftNodes, kAverageDegree, kCostLimit, &graph, &assignment);
assignment->ComputeAssignment();
EXPECT_EQ(65849286, assignment->GetCost());
EXPECT_EQ(65415697, assignment->GetCost());
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) *
kLeftNodes * kAverageDegree);
@@ -686,12 +644,8 @@ void BM_ConstructAndSolveRandomAssignmentProblem(benchmark::State& state) {
BENCHMARK_TEMPLATE2(BM_ConstructAndSolveRandomAssignmentProblem, StarGraph,
false);
BENCHMARK_TEMPLATE2(BM_ConstructAndSolveRandomAssignmentProblem,
ForwardStarGraph, false);
BENCHMARK_TEMPLATE2(BM_ConstructAndSolveRandomAssignmentProblem, StarGraph,
true);
BENCHMARK_TEMPLATE2(BM_ConstructAndSolveRandomAssignmentProblem,
ForwardStarGraph, true);
template <typename GraphType>
void BM_ConstructAndSolveRandomAssignmentProblemWithNewGraphApi(
@@ -705,7 +659,7 @@ void BM_ConstructAndSolveRandomAssignmentProblemWithNewGraphApi(
ConstructRandomAssignmentForNewGraphApi<GraphType>(
kLeftNodes, kAverageDegree, kCostLimit, &graph, &assignment);
assignment->ComputeAssignment();
EXPECT_EQ(65849286, assignment->GetCost());
EXPECT_EQ(65415697, assignment->GetCost());
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) *
kLeftNodes * kAverageDegree);

View File

@@ -1043,6 +1043,10 @@ SimpleMinCostFlow::ArcIndex SimpleMinCostFlow::AddArcWithCapacityAndUnitCost(
return arc;
}
void SimpleMinCostFlow::SetArcCapacity(ArcIndex arc, FlowQuantity capacity) {
arc_capacity_[arc] = capacity;
}
SimpleMinCostFlow::ArcIndex SimpleMinCostFlow::PermutedArc(ArcIndex arc) {
return arc < arc_permutation_.size() ? arc_permutation_[arc] : arc;
}

View File

@@ -287,6 +287,11 @@ class SimpleMinCostFlow : public MinCostFlowBase {
FlowQuantity capacity,
CostValue unit_cost);
// Modifies the capacity of the given arc. The arc index must be non-negative
// (>= 0); it must be an index returned by a previous call to
// AddArcWithCapacityAndUnitCost().
void SetArcCapacity(ArcIndex arc, FlowQuantity capacity);
// Sets the supply of the given node. The node index must be non-negative (>=
// 0). Nodes implicitly created will have a default supply set to 0. A demand
// is modeled as a negative supply.
@@ -683,11 +688,15 @@ extern template class GenericMinCostFlow<
/*ArcFlowType=*/int16_t,
/*ArcScaledCostType=*/int32_t>;
// Default MinCostFlow instance that uses StarGraph.
// New clients should use SimpleMinCostFlow if they can.
class MinCostFlow : public GenericMinCostFlow<StarGraph> {
public:
explicit MinCostFlow(const StarGraph* graph) : GenericMinCostFlow(graph) {}
// TODO(b/385094969): Remove this alias after 2025-07-01 to give or-tools users
// a grace period.
struct MinCostFlow : public MinCostFlowBase {
template <typename = void>
MinCostFlow() {
static_assert(false,
"MinCostFlow is deprecated. Use `SimpleMinCostFlow` or "
"`GenericMinCostFlow` with a specific graph type instead.");
}
};
#endif // SWIG

View File

@@ -0,0 +1,91 @@
// Copyright 2010-2025 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/fp_min_cost_flow.h"
#include <sstream>
#include "ortools/graph/min_cost_flow.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
using ::operations_research::MinCostFlowBase;
using ::operations_research::SimpleFloatingPointMinCostFlow;
using ::pybind11::arg;
PYBIND11_MODULE(fp_min_cost_flow, m) {
pybind11::class_<SimpleFloatingPointMinCostFlow> smcf(
m, "SimpleFloatingPointMinCostFlow");
smcf.def(pybind11::init<>());
smcf.def("add_arc_with_capacity_and_unit_cost",
&SimpleFloatingPointMinCostFlow::AddArcWithCapacityAndUnitCost,
arg("tail"), arg("head"), arg("capacity"), arg("unit_cost"));
smcf.def("add_arcs_with_capacity_and_unit_cost",
pybind11::vectorize(
&SimpleFloatingPointMinCostFlow::AddArcWithCapacityAndUnitCost),
arg("tails"), arg("heads"), arg("capacities"), arg("unit_costs"));
smcf.def("set_node_supply", &SimpleFloatingPointMinCostFlow::SetNodeSupply,
arg("node"), arg("supply"));
smcf.def("set_nodes_supplies",
pybind11::vectorize(&SimpleFloatingPointMinCostFlow::SetNodeSupply),
arg("nodes"), arg("supplies"));
smcf.def("num_nodes", &SimpleFloatingPointMinCostFlow::NumNodes);
smcf.def("num_arcs", &SimpleFloatingPointMinCostFlow::NumArcs);
smcf.def("tail", &SimpleFloatingPointMinCostFlow::Tail, arg("arc"));
smcf.def("head", &SimpleFloatingPointMinCostFlow::Head, arg("arc"));
smcf.def("capacity", &SimpleFloatingPointMinCostFlow::Capacity, arg("arc"));
smcf.def("supply", &SimpleFloatingPointMinCostFlow::Supply, arg("node"));
smcf.def("unit_cost", &SimpleFloatingPointMinCostFlow::UnitCost, arg("arc"));
smcf.def("solve_max_flow_with_min_cost",
&SimpleFloatingPointMinCostFlow::SolveMaxFlowWithMinCost,
// Release the Python GIL during the solve. This function does not
// callback Python code so that is OK.
pybind11::call_guard<pybind11::gil_scoped_release>());
smcf.def("flow", &SimpleFloatingPointMinCostFlow::Flow, arg("arc"));
smcf.def("flows", pybind11::vectorize(&SimpleFloatingPointMinCostFlow::Flow),
arg("arcs"));
smcf.def("last_solve_stats", &SimpleFloatingPointMinCostFlow::LastSolveStats);
pybind11::class_<SimpleFloatingPointMinCostFlow::SolveStats> solve_stats(
smcf, "SolveStats");
solve_stats.def(pybind11::init<>());
solve_stats.def_readwrite("scale",
&SimpleFloatingPointMinCostFlow::SolveStats::scale);
solve_stats.def_readwrite(
"num_tested_scales",
&SimpleFloatingPointMinCostFlow::SolveStats::num_tested_scales);
solve_stats.def("__repr__",
[](const SimpleFloatingPointMinCostFlow::SolveStats& stats) {
std::ostringstream oss;
oss << "SolveStats" << stats;
return oss.str();
});
solve_stats.def("__str__",
[](const SimpleFloatingPointMinCostFlow::SolveStats& stats) {
std::ostringstream oss;
oss << stats;
return oss.str();
});
pybind11::enum_<SimpleFloatingPointMinCostFlow::Status>(smcf, "Status")
.value("BAD_COST_RANGE", MinCostFlowBase::Status::BAD_COST_RANGE)
.value("BAD_CAPACITY_RANGE", MinCostFlowBase::Status::BAD_CAPACITY_RANGE)
.value("BAD_RESULT", MinCostFlowBase::Status::BAD_RESULT)
.value("FEASIBLE", MinCostFlowBase::Status::FEASIBLE)
.value("INFEASIBLE", MinCostFlowBase::Status::INFEASIBLE)
.value("NOT_SOLVED", MinCostFlowBase::Status::NOT_SOLVED)
.value("OPTIMAL", MinCostFlowBase::Status::OPTIMAL)
.value("UNBALANCED", MinCostFlowBase::Status::UNBALANCED)
.export_values();
}

View File

@@ -0,0 +1,135 @@
#!/usr/bin/env python3
# Copyright 2010-2025 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of the fp_min_cost_flow Python wrapper."""
import math
import numpy as np
from absl.testing import absltest
from ortools.graph.python import fp_min_cost_flow
class FpMinCostFlowTest(absltest.TestCase):
def test_success(self) -> None:
"""Test a successful solve.
We use the same test as scenario as the
SimpleFloatingPointMinCostFlowTest.Assignment test in
fp_min_cost_flow_test.cc.
"""
solver = fp_min_cost_flow.SimpleFloatingPointMinCostFlow()
solver.set_node_supply(node=0, supply=4.0)
solver.set_node_supply(node=1, supply=3.0)
solver.set_node_supply(node=2, supply=4.0)
solver.set_node_supply(node=3, supply=-5.0)
solver.set_node_supply(node=4, supply=-4.0)
a03 = solver.add_arc_with_capacity_and_unit_cost(
tail=0, head=3, capacity=5.1, unit_cost=2
)
a13 = solver.add_arc_with_capacity_and_unit_cost(
tail=1, head=3, capacity=4.0, unit_cost=1
)
a14 = solver.add_arc_with_capacity_and_unit_cost(
tail=1, head=4, capacity=5.0, unit_cost=2
)
a24 = solver.add_arc_with_capacity_and_unit_cost(
tail=2, head=4, capacity=3.0, unit_cost=3
)
status = solver.solve_max_flow_with_min_cost()
self.assertEqual(
status, fp_min_cost_flow.SimpleFloatingPointMinCostFlow.Status.OPTIMAL
)
self.assertEqual(solver.flow(a03), 3)
self.assertEqual(solver.flow(a13), 2)
self.assertEqual(solver.flow(a14), 1)
self.assertEqual(solver.flow(a24), 3)
def test_success_with_numpy(self) -> None:
"""Test a successful solve using numpy inputs/outputs.
Same test as test_success() but with numpy vector functions.
"""
solver = fp_min_cost_flow.SimpleFloatingPointMinCostFlow()
solver.set_nodes_supplies(
nodes=np.arange(5), supplies=np.array([4.0, 3.0, 4.0, -5.0, -4.0])
)
arcs = solver.add_arcs_with_capacity_and_unit_cost(
tails=np.array([0, 1, 1, 2]),
heads=np.array([3, 3, 4, 4]),
capacities=np.array([5.1, 4.0, 5.0, 3.0]),
unit_costs=np.array([2, 1, 2, 3]),
)
status = solver.solve_max_flow_with_min_cost()
self.assertEqual(
status, fp_min_cost_flow.SimpleFloatingPointMinCostFlow.Status.OPTIMAL
)
np.testing.assert_array_equal(
solver.flows(arcs), np.array([3.0, 2.0, 1.0, 3.0])
)
def test_failure(self) -> None:
"""Test a failing solve.
We use an infinite supply on a node, which is not supported.
"""
solver = fp_min_cost_flow.SimpleFloatingPointMinCostFlow()
solver.set_node_supply(node=0, supply=math.inf)
status = solver.solve_max_flow_with_min_cost()
self.assertEqual(
status,
fp_min_cost_flow.SimpleFloatingPointMinCostFlow.Status.BAD_CAPACITY_RANGE,
)
def test_solve_stats(self) -> None:
solve_stats = fp_min_cost_flow.SimpleFloatingPointMinCostFlow.SolveStats()
solve_stats.scale = 0.1
solve_stats.num_tested_scales = 3
self.assertEqual(
repr(solve_stats), "SolveStats{ scale: 0.1, num_tested_scales: 3 }"
)
self.assertEqual(str(solve_stats), "{ scale: 0.1, num_tested_scales: 3 }")
def test_last_solve_stats(self) -> None:
"""Test getting the last solve statistics."""
solver = fp_min_cost_flow.SimpleFloatingPointMinCostFlow()
solver.set_node_supply(node=0, supply=2.0)
status = solver.solve_max_flow_with_min_cost()
self.assertEqual(
status, fp_min_cost_flow.SimpleFloatingPointMinCostFlow.Status.OPTIMAL
)
solve_stats = solver.last_solve_stats()
self.assertIsInstance(
solve_stats, fp_min_cost_flow.SimpleFloatingPointMinCostFlow.SolveStats
)
# 2.0**61 is the last power of two such as:
#
# (2.0 * 2.0**61) < 2**63 - 1
#
# This is the largest scale we expect to use.
self.assertEqual(solve_stats.scale, 2.0**61)
# The code should have directly tested the correct largest scale.
self.assertEqual(solve_stats.num_tested_scales, 1)
if __name__ == "__main__":
absltest.main()

View File

@@ -29,11 +29,18 @@ PYBIND11_MODULE(min_cost_flow, m) {
arg("head"), arg("capacity"), arg("unit_cost"));
smcf.def(
"add_arcs_with_capacity_and_unit_cost",
pybind11::vectorize(&SimpleMinCostFlow::AddArcWithCapacityAndUnitCost));
pybind11::vectorize(&SimpleMinCostFlow::AddArcWithCapacityAndUnitCost),
arg("tails"), arg("heads"), arg("capacities"), arg("unit_costs"));
smcf.def("set_arc_capacity", &SimpleMinCostFlow::SetArcCapacity, arg("arc"),
arg("capacity"));
smcf.def("set_arc_capacities",
pybind11::vectorize(&SimpleMinCostFlow::SetArcCapacity), arg("arcs"),
arg("capacities"));
smcf.def("set_node_supply", &SimpleMinCostFlow::SetNodeSupply, arg("node"),
arg("supply"));
smcf.def("set_nodes_supplies",
pybind11::vectorize(&SimpleMinCostFlow::SetNodeSupply));
pybind11::vectorize(&SimpleMinCostFlow::SetNodeSupply), arg("nodes"),
arg("supplies"));
smcf.def("num_nodes", &SimpleMinCostFlow::NumNodes);
smcf.def("num_arcs", &SimpleMinCostFlow::NumArcs);
smcf.def("tail", &SimpleMinCostFlow::Tail, arg("arc"));
@@ -47,7 +54,7 @@ PYBIND11_MODULE(min_cost_flow, m) {
smcf.def("optimal_cost", &SimpleMinCostFlow::OptimalCost);
smcf.def("maximum_flow", &SimpleMinCostFlow::MaximumFlow);
smcf.def("flow", &SimpleMinCostFlow::Flow, arg("arc"));
smcf.def("flows", pybind11::vectorize(&SimpleMinCostFlow::Flow));
smcf.def("flows", pybind11::vectorize(&SimpleMinCostFlow::Flow), arg("arcs"));
pybind11::enum_<SimpleMinCostFlow::Status>(smcf, "Status")
.value("BAD_COST_RANGE", MinCostFlowBase::Status::BAD_COST_RANGE)