reorganize graph code

This commit is contained in:
Laurent Perron
2024-12-18 17:56:38 +01:00
parent 81451ab96e
commit de72e8620b
13 changed files with 2543 additions and 2522 deletions

View File

@@ -96,6 +96,27 @@ cc_test(
],
)
cc_library(
name = "minimum_vertex_cover",
srcs = ["minimum_vertex_cover.cc"],
hdrs = ["minimum_vertex_cover.h"],
deps = [
":ebert_graph",
":max_flow",
"@com_google_absl//absl/log:check",
],
)
cc_test(
name = "minimum_vertex_cover_test",
srcs = ["minimum_vertex_cover_test.cc"],
deps = [
":minimum_vertex_cover",
"//ortools/base:gmock_main",
"@com_google_absl//absl/algorithm:container",
],
)
cc_library(
name = "multi_dijkstra",
hdrs = ["multi_dijkstra.h"],
@@ -443,40 +464,57 @@ cc_library(
srcs = ["max_flow.cc"],
hdrs = ["max_flow.h"],
deps = [
":ebert_graph",
":flow_problem_cc_proto",
":generic_max_flow",
":graph",
":graphs",
"//ortools/base",
"//ortools/util:stats",
"//ortools/util:zvector",
"@com_google_absl//absl/log:check",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/strings:str_format",
],
)
cc_test(
name = "max_flow_test",
size = "medium",
srcs = ["max_flow_test.cc"],
data = ["//ortools/graph/testdata:max_flow_test1.pb.txt"],
deps = [
":graph",
":graphs",
":ebert_graph",
":flow_problem_cc_proto",
":max_flow",
"//ortools/base",
"//ortools/base:gmock_main",
"//ortools/base:path",
"//ortools/linear_solver",
"//ortools/util:file_util",
"@com_google_absl//absl/algorithm:container",
"@com_google_protobuf//:protobuf",
],
)
cc_library(
name = "generic_max_flow",
hdrs = ["generic_max_flow.h"],
deps = [
":ebert_graph",
":flow_problem_cc_proto",
":graphs",
"//ortools/base",
"//ortools/util:stats",
"//ortools/util:zvector",
"@com_google_absl//absl/strings",
],
)
cc_test(
name = "generic_max_flow_test",
size = "medium",
srcs = ["generic_max_flow_test.cc"],
deps = [
":ebert_graph",
":generic_max_flow",
":graph",
":graphs",
"//ortools/base",
"//ortools/base:gmock_main",
"//ortools/linear_solver",
"@com_google_absl//absl/random",
"@com_google_absl//absl/strings:str_format",
"@com_google_absl//absl/types:span",
"@com_google_benchmark//:benchmark",
"@com_google_protobuf//:protobuf",
],
)
@@ -496,14 +534,13 @@ cc_library(
":graph",
":graphs",
":max_flow",
"//ortools/base",
"//ortools/base:dump_vars",
"//ortools/base:mathutil",
"//ortools/base:types",
"//ortools/util:saturated_arithmetic",
"//ortools/util:stats",
"//ortools/util:zvector",
"@com_google_absl//absl/flags:flag",
"@com_google_absl//absl/log",
"@com_google_absl//absl/log:check",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/strings:str_format",
],

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,685 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/generic_max_flow.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <random>
#include <string>
#include <vector>
#include "absl/random/random.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "benchmark/benchmark.h"
#include "gtest/gtest.h"
#include "ortools/base/gmock.h"
#include "ortools/base/logging.h"
#include "ortools/graph/ebert_graph.h"
#include "ortools/graph/graph.h"
#include "ortools/graph/graphs.h"
#include "ortools/linear_solver/linear_solver.h"
namespace operations_research {
namespace {
using ::testing::ContainerEq;
using ::testing::WhenSorted;
template <typename Graph>
typename GenericMaxFlow<Graph>::Status MaxFlowTester(
const typename Graph::NodeIndex num_nodes,
const typename Graph::ArcIndex num_arcs,
const typename Graph::NodeIndex* tail,
const typename Graph::NodeIndex* head, const FlowQuantity* capacity,
const FlowQuantity* expected_flow, const FlowQuantity expected_total_flow,
const std::vector<typename Graph::NodeIndex>* expected_source_min_cut =
nullptr,
const std::vector<typename Graph::NodeIndex>* expected_sink_min_cut =
nullptr) {
Graph graph(num_nodes, num_arcs);
for (int i = 0; i < num_arcs; ++i) {
graph.AddArc(tail[i], head[i]);
}
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
EXPECT_TRUE(permutation.empty());
GenericMaxFlow<Graph> max_flow(&graph, 0, num_nodes - 1);
for (typename Graph::ArcIndex arc = 0; arc < num_arcs; ++arc) {
max_flow.SetArcCapacity(arc, capacity[arc]);
EXPECT_EQ(max_flow.Capacity(arc), capacity[arc]);
}
EXPECT_TRUE(max_flow.Solve());
if (max_flow.status() == GenericMaxFlow<Graph>::OPTIMAL) {
const FlowQuantity total_flow = max_flow.GetOptimalFlow();
EXPECT_EQ(expected_total_flow, total_flow);
for (int i = 0; i < num_arcs; ++i) {
EXPECT_EQ(expected_flow[i], max_flow.Flow(i)) << " i = " << i;
}
}
// Tests the min-cut functions.
if (expected_source_min_cut != nullptr) {
std::vector<NodeIndex> cut;
max_flow.GetSourceSideMinCut(&cut);
std::sort(cut.begin(), cut.end());
EXPECT_THAT(*expected_source_min_cut, WhenSorted(ContainerEq(cut)));
}
if (expected_sink_min_cut != nullptr) {
std::vector<NodeIndex> cut;
max_flow.GetSinkSideMinCut(&cut);
std::sort(cut.begin(), cut.end());
EXPECT_THAT(*expected_sink_min_cut, WhenSorted(ContainerEq(cut)));
}
return max_flow.status();
}
template <typename Graph>
class GenericMaxFlowTest : public ::testing::Test {};
typedef ::testing::Types<StarGraph, util::ReverseArcListGraph<>,
util::ReverseArcStaticGraph<>,
util::ReverseArcMixedGraph<> >
GraphTypes;
TYPED_TEST_SUITE(GenericMaxFlowTest, GraphTypes);
TYPED_TEST(GenericMaxFlowTest, FeasibleFlow1) {
const int kNumNodes = 4;
const int kNumArcs = 3;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 1, 2};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3};
const FlowQuantity kCapacity[kNumArcs] = {8, 10, 8};
const FlowQuantity kExpectedFlow[kNumArcs] = {8, 8, 8};
const FlowQuantity kExpectedTotalFlow = 8;
std::vector<int> source_cut({0});
std::vector<int> sink_cut({3});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FeasibleFlow2) {
const int kNumNodes = 6;
const int kNumArcs = 9;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1,
2, 3, 3, 4};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 4, 3,
4, 4, 5, 5};
const FlowQuantity kCapacity[kNumArcs] = {6, 8, 5, 0, 1, 4, 0, 6, 4};
const FlowQuantity kExpectedFlow[kNumArcs] = {1, 4, 5, 0, 1, 4, 0, 6, 4};
const FlowQuantity kExpectedTotalFlow = 10;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({5});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FeasibleFlowWithMultipleArcs) {
const int kNumNodes = 5;
const int kNumArcs = 8;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 1,
2, 2, 3, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 1, 2, 2,
3, 3, 4, 4};
const FlowQuantity kCapacity[kNumArcs] = {5, 3, 5, 3, 4, 4, 4, 4};
const FlowQuantity kExpectedFlow[kNumArcs] = {5, 3, 5, 3, 4, 4, 4, 4};
const FlowQuantity kExpectedTotalFlow = 8;
std::vector<int> source_cut({0});
std::vector<int> sink_cut({4});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, HugeCapacity) {
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
const int kNumNodes = 5;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 2, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 3, 4};
const FlowQuantity kCapacity[kNumArcs] = {kCapacityMax, kCapacityMax, 5, 3,
kCapacityMax};
const FlowQuantity kExpectedFlow[kNumArcs] = {5, 3, 5, 3, 8};
const FlowQuantity kExpectedTotalFlow = 8;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({4, 3});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowQuantityOverflowLimitCase) {
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
const FlowQuantity kHalfLow = kCapacityMax / 2;
const FlowQuantity kHalfHigh = kCapacityMax - kHalfLow;
const int kNumNodes = 5;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 2, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 3, 4};
const FlowQuantity kCapacity[kNumArcs] = {kCapacityMax, kCapacityMax,
kHalfLow, kHalfHigh, kCapacityMax};
const FlowQuantity kExpectedFlow[kNumArcs] = {kHalfLow, kHalfHigh, kHalfLow,
kHalfHigh, kCapacityMax};
const FlowQuantity kExpectedTotalFlow = kCapacityMax;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({4});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowQuantityOverflow) {
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
const int kNumNodes = 4;
const int kNumArcs = 4;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 2};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 3};
const FlowQuantity kCapacity[kNumArcs] = {kCapacityMax, kCapacityMax,
kCapacityMax, kCapacityMax};
const FlowQuantity kExpectedFlow[kNumArcs] = {kCapacityMax, kCapacityMax,
kCapacityMax, kCapacityMax};
const FlowQuantity kExpectedTotalFlow = kCapacityMax;
EXPECT_EQ(
GenericMaxFlow<TypeParam>::INT_OVERFLOW,
MaxFlowTester<TypeParam>(kNumNodes, kNumArcs, kTail, kHead, kCapacity,
kExpectedFlow, kExpectedTotalFlow));
}
TYPED_TEST(GenericMaxFlowTest, DirectArcFromSourceToSink) {
const int kNumNodes = 4;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 0, 1, 2};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 3, 2, 3, 3};
const FlowQuantity kCapacity[kNumArcs] = {5, 8, 5, 2, 2};
const FlowQuantity kExpectedFlow[kNumArcs] = {2, 8, 2, 2, 2};
const FlowQuantity kExpectedTotalFlow = 12;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({3});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowOnDisconnectedGraph1) {
const int kNumNodes = 6;
const int kNumArcs = 7;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1, 2, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 4, 3, 4, 4};
const FlowQuantity kCapacity[kNumArcs] = {5, 8, 5, 3, 4, 5, 6};
const FlowQuantity kExpectedFlow[kNumArcs] = {0, 0, 0, 0, 0, 0, 0};
const FlowQuantity kExpectedTotalFlow = 0;
std::vector<int> source_cut({0, 1, 2, 3, 4});
std::vector<int> sink_cut({5});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowOnDisconnectedGraph2) {
const int kNumNodes = 6;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 3, 3, 4};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 4, 5, 5};
const FlowQuantity kCapacity[kNumArcs] = {5, 8, 6, 6, 4};
const FlowQuantity kExpectedFlow[kNumArcs] = {0, 0, 0, 0, 0};
const FlowQuantity kExpectedTotalFlow = 0;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({3, 4, 5});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
template <typename Graph>
void AddSourceAndSink(const typename Graph::NodeIndex num_tails,
const typename Graph::NodeIndex num_heads, Graph* graph) {
const typename Graph::NodeIndex source = num_tails + num_heads;
const typename Graph::NodeIndex sink = num_tails + num_heads + 1;
for (typename Graph::NodeIndex tail = 0; tail < num_tails; ++tail) {
graph->AddArc(source, tail);
}
for (typename Graph::NodeIndex head = 0; head < num_heads; ++head) {
graph->AddArc(num_tails + head, sink);
}
}
template <typename Graph>
void GenerateCompleteGraph(const typename Graph::NodeIndex num_tails,
const typename Graph::NodeIndex num_heads,
Graph* graph) {
const typename Graph::NodeIndex num_nodes = num_tails + num_heads + 2;
const typename Graph::ArcIndex num_arcs =
num_tails * num_heads + num_tails + num_heads;
graph->Reserve(num_nodes, num_arcs);
for (typename Graph::NodeIndex tail = 0; tail < num_tails; ++tail) {
for (typename Graph::NodeIndex head = 0; head < num_heads; ++head) {
graph->AddArc(tail, head + num_tails);
}
}
AddSourceAndSink(num_tails, num_heads, graph);
}
template <typename Graph>
void GeneratePartialRandomGraph(const typename Graph::NodeIndex num_tails,
const typename Graph::NodeIndex num_heads,
const typename Graph::NodeIndex degree,
Graph* graph) {
const typename Graph::NodeIndex num_nodes = num_tails + num_heads + 2;
const typename Graph::ArcIndex num_arcs =
num_tails * degree + num_tails + num_heads;
graph->Reserve(num_nodes, num_arcs);
std::mt19937 randomizer(0);
for (typename Graph::NodeIndex tail = 0; tail < num_tails; ++tail) {
for (typename Graph::NodeIndex d = 0; d < degree; ++d) {
const typename Graph::NodeIndex head =
absl::Uniform(randomizer, 0, num_heads);
graph->AddArc(tail, head + num_tails);
}
}
AddSourceAndSink(num_tails, num_heads, graph);
}
template <typename Graph>
void GenerateRandomArcValuations(const Graph& graph, const int64_t max_range,
std::vector<int64_t>* arc_valuation) {
const typename Graph::ArcIndex num_arcs = graph.num_arcs();
arc_valuation->resize(num_arcs);
std::mt19937 randomizer(0);
for (typename Graph::ArcIndex arc = 0; arc < graph.num_arcs(); ++arc) {
(*arc_valuation)[arc] = absl::Uniform(randomizer, 0, max_range);
}
}
template <typename Graph>
void SetUpNetworkData(absl::Span<const int64_t> arc_capacity,
GenericMaxFlow<Graph>* max_flow) {
const Graph* graph = max_flow->graph();
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
max_flow->SetArcCapacity(arc, arc_capacity[arc]);
}
}
template <typename Graph>
FlowQuantity SolveMaxFlow(GenericMaxFlow<Graph>* max_flow) {
EXPECT_TRUE(max_flow->Solve());
EXPECT_EQ(GenericMaxFlow<Graph>::OPTIMAL, max_flow->status());
const Graph* graph = max_flow->graph();
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
EXPECT_LE(max_flow->Flow(Graphs<Graph>::OppositeArc(*graph, arc)), 0)
<< arc;
EXPECT_EQ(0, max_flow->Capacity(Graphs<Graph>::OppositeArc(*graph, arc)))
<< arc;
EXPECT_LE(0, max_flow->Flow(arc)) << arc;
EXPECT_LE(max_flow->Flow(arc), max_flow->Capacity(arc)) << arc;
}
return max_flow->GetOptimalFlow();
}
template <typename Graph>
FlowQuantity SolveMaxFlowWithLP(GenericMaxFlow<Graph>* max_flow) {
MPSolver solver("LPSolver", MPSolver::GLOP_LINEAR_PROGRAMMING);
const double infinity = solver.infinity();
const Graph* graph = max_flow->graph();
const typename Graph::NodeIndex num_nodes = graph->num_nodes();
const typename Graph::ArcIndex num_arcs = graph->num_arcs();
const typename Graph::NodeIndex source_index = num_nodes - 2;
std::unique_ptr<MPConstraint*[]> constraint(new MPConstraint*[num_nodes]);
for (typename Graph::NodeIndex node = 0; node < graph->num_nodes(); ++node) {
constraint[node] = solver.MakeRowConstraint();
if (node < source_index) { // Node is neither source nor sink.
constraint[node]->SetBounds(0.0, 0.0); // Flow is conserved.
} else {
constraint[node]->SetBounds(-infinity, +infinity);
}
}
std::unique_ptr<MPVariable*[]> var(new MPVariable*[num_arcs]);
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
var[arc] = solver.MakeNumVar(0.0, max_flow->Capacity(arc),
absl::StrFormat("v%d", arc));
constraint[graph->Tail(arc)]->SetCoefficient(var[arc], 1.0);
constraint[graph->Head(arc)]->SetCoefficient(var[arc], -1.0);
}
MPObjective* const objective = solver.MutableObjective();
for (typename Graph::OutgoingArcIterator arc_it(*graph, source_index);
arc_it.Ok(); arc_it.Next()) {
const typename Graph::ArcIndex arc = arc_it.Index();
objective->SetCoefficient(var[arc], -1.0);
}
solver.Solve();
return static_cast<FlowQuantity>(-objective->Value() + .5);
}
template <typename Graph>
struct MaxFlowSolver {
typedef FlowQuantity (*Solver)(GenericMaxFlow<Graph>* max_flow);
};
template <typename Graph>
void FullRandomAssignment(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1,
FlowQuantity expected_flow2) {
Graph graph;
GenerateCompleteGraph(num_tails, num_heads, &graph);
Graphs<Graph>::Build(&graph);
std::vector<int64_t> arc_capacity(graph.num_arcs(), 1);
std::unique_ptr<GenericMaxFlow<Graph> > max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
template <typename Graph>
void PartialRandomAssignment(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1,
FlowQuantity expected_flow2) {
const typename Graph::NodeIndex kDegree = 10;
Graph graph;
GeneratePartialRandomGraph(num_tails, num_heads, kDegree, &graph);
Graphs<Graph>::Build(&graph);
CHECK_EQ(graph.num_arcs(), num_tails * kDegree + num_tails + num_heads);
std::vector<int64_t> arc_capacity(graph.num_arcs(), 1);
std::unique_ptr<GenericMaxFlow<Graph> > max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
template <typename Graph>
void ChangeCapacities(absl::Span<const int64_t> arc_capacity,
FlowQuantity delta, GenericMaxFlow<Graph>* max_flow) {
const Graph* graph = max_flow->graph();
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
max_flow->SetArcCapacity(arc,
std::max(arc_capacity[arc] - delta, int64_t{0}));
}
}
template <typename Graph>
void PartialRandomFlow(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1,
FlowQuantity expected_flow2) {
const typename Graph::NodeIndex kDegree = 10;
const FlowQuantity kCapacityRange = 10000;
const FlowQuantity kCapacityDelta = 1000;
Graph graph;
GeneratePartialRandomGraph(num_tails, num_heads, kDegree, &graph);
std::vector<int64_t> arc_capacity(graph.num_arcs());
GenerateRandomArcValuations(graph, kCapacityRange, &arc_capacity);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
util::Permute(permutation, &arc_capacity);
std::unique_ptr<GenericMaxFlow<Graph> > max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
ChangeCapacities(arc_capacity, kCapacityDelta, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow2, flow);
ChangeCapacities(arc_capacity, 0, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
template <typename Graph>
void FullRandomFlow(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1, FlowQuantity expected_flow2) {
const FlowQuantity kCapacityRange = 10000;
const FlowQuantity kCapacityDelta = 1000;
Graph graph;
GenerateCompleteGraph(num_tails, num_heads, &graph);
std::vector<int64_t> arc_capacity(graph.num_arcs());
GenerateRandomArcValuations(graph, kCapacityRange, &arc_capacity);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
util::Permute(permutation, &arc_capacity);
std::unique_ptr<GenericMaxFlow<Graph> > max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
ChangeCapacities(arc_capacity, kCapacityDelta, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow2, flow);
ChangeCapacities(arc_capacity, 0, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
#define LP_AND_FLOW_TEST(test_name, size, expected_flow1, expected_flow2) \
LP_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
FLOW_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
FLOW_ONLY_TEST_SG(test_name, size, expected_flow1, expected_flow2)
#define LP_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
TEST(LPMaxFlowTest, test_name##size) { \
test_name<StarGraph>(SolveMaxFlowWithLP<StarGraph>, size, size, \
expected_flow1, expected_flow2); \
}
#define FLOW_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
TEST(MaxFlowTest, test_name##size) { \
test_name<StarGraph>(SolveMaxFlow, size, size, expected_flow1, \
expected_flow2); \
}
#define FLOW_ONLY_TEST_SG(test_name, size, expected_flow1, expected_flow2) \
TEST(MaxFlowTestStaticGraph, test_name##size) { \
test_name<util::ReverseArcStaticGraph<> >(SolveMaxFlow, size, size, \
expected_flow1, expected_flow2); \
}
LP_AND_FLOW_TEST(FullRandomAssignment, 300, 300, 300);
LP_AND_FLOW_TEST(PartialRandomAssignment, 100, 100, 100);
LP_AND_FLOW_TEST(PartialRandomAssignment, 1000, 1000, 1000);
LP_AND_FLOW_TEST(PartialRandomFlow, 400, 1898664, 1515203);
LP_AND_FLOW_TEST(FullRandomFlow, 100, 482391, 386587);
// LARGE must be defined from the build command line to test larger instances.
#ifdef LARGE
LP_AND_FLOW_TEST(PartialRandomAssignment, 10000, 10000, 10000);
#endif
template <typename Graph>
static void BM_FullRandomAssignment(benchmark::State& state) {
const int kSize = 3000;
for (auto _ : state) {
FullRandomAssignment<Graph>(SolveMaxFlow, kSize, kSize, kSize, kSize);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
template <typename Graph>
static void BM_PartialRandomAssignment(benchmark::State& state) {
const int kSize = 10100;
for (auto _ : state) {
PartialRandomAssignment<Graph>(SolveMaxFlow, kSize, kSize, kSize, kSize);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
template <typename Graph>
static void BM_PartialRandomFlow(benchmark::State& state) {
const int kSize = 800;
for (auto _ : state) {
PartialRandomFlow<Graph>(SolveMaxFlow, kSize, kSize, 3884850, 3112123);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
template <typename Graph>
static void BM_FullRandomFlow(benchmark::State& state) {
const int kSize = 800;
for (auto _ : state) {
FullRandomFlow<Graph>(SolveMaxFlow, kSize, kSize, 4000549, 3239512);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, StarGraph);
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, util::ReverseArcMixedGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, StarGraph);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, util::ReverseArcMixedGraph<>);
// One iteration of each of the following tests is slow.
BENCHMARK_TEMPLATE(BM_FullRandomFlow, StarGraph);
BENCHMARK_TEMPLATE(BM_FullRandomFlow, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomFlow, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomFlow, util::ReverseArcMixedGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, StarGraph);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, util::ReverseArcMixedGraph<>);
#undef LP_AND_FLOW_TEST
#undef LP_ONLY_TEST
#undef FLOW_ONLY_TEST
#undef FLOW_ONLY_TEST_SG
// ----------------------------------------------------------
// PriorityQueueWithRestrictedPush tests.
// ----------------------------------------------------------
TEST(PriorityQueueWithRestrictedPushTest, BasicBehavior) {
PriorityQueueWithRestrictedPush<std::string, int> queue;
EXPECT_TRUE(queue.IsEmpty());
queue.Push("A", 1);
queue.Push("B", 0);
queue.Push("C", 2);
queue.Push("D", 10);
queue.Push("E", 9);
EXPECT_EQ("D", queue.Pop());
EXPECT_EQ("E", queue.Pop());
EXPECT_EQ("C", queue.Pop());
EXPECT_EQ("A", queue.Pop());
EXPECT_EQ("B", queue.Pop());
EXPECT_TRUE(queue.IsEmpty());
queue.Push("A", 1);
queue.Push("B", 0);
EXPECT_FALSE(queue.IsEmpty());
queue.Clear();
EXPECT_TRUE(queue.IsEmpty());
}
TEST(PriorityQueueWithRestrictedPushTest, BasicBehaviorWithMixedPushPop) {
PriorityQueueWithRestrictedPush<std::string, int> queue;
EXPECT_TRUE(queue.IsEmpty());
queue.Push("A", 1);
queue.Push("B", 0);
queue.Push("C", 2);
EXPECT_EQ("C", queue.Pop());
EXPECT_EQ("A", queue.Pop());
queue.Push("D", 1);
queue.Push("E", 0);
EXPECT_EQ("D", queue.Pop());
EXPECT_EQ("E", queue.Pop());
EXPECT_EQ("B", queue.Pop());
EXPECT_TRUE(queue.IsEmpty());
queue.Push("E", 1);
EXPECT_FALSE(queue.IsEmpty());
EXPECT_EQ("E", queue.Pop());
EXPECT_TRUE(queue.IsEmpty());
}
TEST(PriorityQueueWithRestrictedPushTest, RandomPushPop) {
struct ElementWithPriority {
ElementWithPriority(int _element, int _priority)
: element(_element), priority(_priority) {}
bool operator<(const ElementWithPriority& other) const {
return priority < other.priority;
}
int element;
int priority;
};
std::vector<ElementWithPriority> pairs;
std::mt19937 randomizer(1);
const int kNumElement = 10000;
const int kMaxPriority = 10000; // We want duplicates and gaps.
for (int i = 0; i < kNumElement; ++i) {
pairs.push_back(
ElementWithPriority(i, absl::Uniform(randomizer, 0, kMaxPriority)));
}
std::sort(pairs.begin(), pairs.end());
// Randomly add +1 and push to the queue.
PriorityQueueWithRestrictedPush<int, int> queue;
for (int i = 0; i < pairs.size(); ++i) {
pairs[i].priority += absl::Bernoulli(randomizer, 1.0 / 2) ? 1 : 0;
queue.Push(pairs[i].element, pairs[i].priority);
}
// Stable sort the pairs for checking (the queue order is stable).
std::stable_sort(pairs.begin(), pairs.end());
// Random Push() and Pop() with more Pop().
int current = pairs.size();
while (!queue.IsEmpty()) {
EXPECT_GT(current, 0);
if (absl::Bernoulli(randomizer, 1.0 / 4) && current < pairs.size()) {
queue.Push(pairs[current].element, pairs[current].priority);
++current;
} else {
--current;
EXPECT_EQ(pairs[current].element, queue.Pop());
}
}
}
TEST(PriorityQueueWithRestrictedPushDeathTest, DCHECK) {
// Don't run this test in opt mode.
if (!DEBUG_MODE) GTEST_SKIP();
PriorityQueueWithRestrictedPush<std::string, int> queue;
EXPECT_TRUE(queue.IsEmpty());
ASSERT_DEATH(queue.Pop(), "");
queue.Push("A", 10);
queue.Push("B", 9);
ASSERT_DEATH(queue.Push("C", 4), "");
ASSERT_DEATH(queue.Push("C", 8), "");
}
} // namespace
} // namespace operations_research

File diff suppressed because it is too large Load Diff

View File

@@ -11,137 +11,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// An implementation of a push-relabel algorithm for the max flow problem.
//
// In the following, we consider a graph G = (V,E,s,t) where V denotes the set
// of nodes (vertices) in the graph, E denotes the set of arcs (edges). s and t
// denote distinguished nodes in G called source and target. n = |V| denotes the
// number of nodes in the graph, and m = |E| denotes the number of arcs in the
// graph.
//
// Each arc (v,w) is associated a capacity c(v,w).
//
// A flow is a function from E to R such that:
//
// a) f(v,w) <= c(v,w) for all (v,w) in E (capacity constraint.)
//
// b) f(v,w) = -f(w,v) for all (v,w) in E (flow antisymmetry constraint.)
//
// c) sum on v f(v,w) = 0 (flow conservation.)
//
// The goal of this algorithm is to find the maximum flow from s to t, i.e.
// for example to maximize sum v f(s,v).
//
// The starting reference for this class of algorithms is:
// A.V. Goldberg and R.E. Tarjan. A new approach to the maximum flow problem.
// ACM Symposium on Theory of Computing, pp. 136-146.
// http://portal.acm.org/citation.cfm?id=12144.
//
// The basic idea of the algorithm is to handle preflows instead of flows,
// and to refine preflows until a maximum flow is obtained.
// A preflow is like a flow, except that the inflow can be larger than the
// outflow. If it is the case at a given node v, it is said that there is an
// excess at node v, and inflow = outflow + excess.
//
// More formally, a preflow is a function f such that:
//
// 1) f(v,w) <= c(v,w) for all (v,w) in E (capacity constraint). c(v,w) is a
// value representing the maximum capacity for arc (v,w).
//
// 2) f(v,w) = -f(w,v) for all (v,w) in E (flow antisymmetry constraint)
//
// 3) excess(v) = sum on u f(u,v) >= 0 is the excess at node v, the
// algebraic sum of all the incoming preflows at this node.
//
// Each node has an associated "height", in addition to its excess. The
// height of the source is defined to be equal to n, and cannot change. The
// height of the target is defined to be zero, and cannot change either. The
// height of all the other nodes is initialized at zero and is updated during
// the algorithm (see below). For those who want to know the details, the height
// of a node, corresponds to a reduced cost, and this enables one to prove that
// the algorithm actually computes the max flow. Note that the height of a node
// can be initialized to the distance to the target node in terms of number of
// nodes. This has not been tried in this implementation.
//
// A node v is said to be *active* if excess(v) > 0.
//
// In this case the following operations can be applied to it:
//
// - if there are *admissible* incident arcs, i.e. arcs which are not saturated,
// and whose head's height is lower than the height of the active node
// considered, a PushFlow operation can be applied. It consists in sending as
// much flow as both the excess at the node and the capacity of the arc
// permit.
// - if there are no admissible arcs, the active node considered is relabeled,
// i.e. its height is increased to 1 + the minimum height of its neighboring
// nodes on admissible arcs.
// This is implemented in Discharge, which itself calls PushFlow and Relabel.
//
// Before running Discharge, it is necessary to initialize the algorithm with a
// preflow. This is done in InitializePreflow, which saturates all the arcs
// leaving the source node, and sets the excess at the heads of those arcs
// accordingly.
//
// The algorithm terminates when there are no remaining active nodes, i.e. all
// the excesses at all nodes are equal to zero. In this case, a maximum flow is
// obtained.
//
// The complexity of this algorithm depends amongst other things on the choice
// of the next active node. It has been shown, for example in:
// L. Tuncel, "On the Complexity of Preflow-Push Algorithms for Maximum-Flow
// Problems", Algorithmica 11(4): 353-359 (1994).
// and
// J. Cheriyan and K. Mehlhorn, "An analysis of the highest-level selection rule
// in the preflow-push max-flow algorithm", Information processing letters,
// 69(5):239-242 (1999).
// http://www.math.uwaterloo.ca/~jcheriya/PS_files/me3.0.ps
//
// ...that choosing the active node with the highest level yields a
// complexity of O(n^2 * sqrt(m)).
//
// TODO(user): implement the above active node choice rule.
//
// This has been validated experimentally in:
// R.K. Ahuja, M. Kodialam, A.K. Mishra, and J.B. Orlin, "Computational
// Investigations of Maximum Flow Algorithms", EJOR 97:509-542(1997).
// http://jorlin.scripts.mit.edu/docs/publications/58-comput%20investigations%20of.pdf.
//
//
// TODO(user): an alternative would be to evaluate:
// A.V. Goldberg, "The Partial Augment-Relabel Algorithm for the Maximum Flow
// Problem.” In Proceedings of Algorithms ESA, LNCS 5193:466-477, Springer 2008.
// http://www.springerlink.com/index/5535k2j1mt646338.pdf
//
// An interesting general reference on network flows is:
// R. K. Ahuja, T. L. Magnanti, J. B. Orlin, "Network Flows: Theory, Algorithms,
// and Applications," Prentice Hall, 1993, ISBN: 978-0136175490,
// http://www.amazon.com/dp/013617549X
//
// Keywords: Push-relabel, max-flow, network, graph, Goldberg, Tarjan, Dinic,
// Dinitz.
#ifndef OR_TOOLS_GRAPH_MAX_FLOW_H_
#define OR_TOOLS_GRAPH_MAX_FLOW_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "ortools/base/logging.h"
#include "ortools/graph/ebert_graph.h"
#include "ortools/graph/flow_problem.pb.h"
#include "ortools/graph/generic_max_flow.h"
#include "ortools/graph/graph.h"
#include "ortools/util/stats.h"
#include "ortools/util/zvector.h"
namespace operations_research {
// Forward declaration.
template <typename Graph>
class GenericMaxFlow;
// A simple and efficient max-cost flow interface. This is as fast as
// GenericMaxFlow<ReverseArcStaticGraph>, which is the fastest, but uses
// more memory in order to hide the somewhat involved construction of the
@@ -150,6 +32,10 @@ class GenericMaxFlow;
// TODO(user): If the need arises, extend this interface to support warm start.
class SimpleMaxFlow {
public:
typedef int32_t NodeIndex;
typedef int32_t ArcIndex;
typedef int64_t FlowQuantity;
// The constructor takes no size.
// New node indices will be created lazily by AddArcWithCapacity().
SimpleMaxFlow();
@@ -246,514 +132,9 @@ class SimpleMaxFlow {
// instance that uses it.
typedef ::util::ReverseArcStaticGraph<NodeIndex, ArcIndex> Graph;
std::unique_ptr<Graph> underlying_graph_;
std::unique_ptr<GenericMaxFlow<Graph>> underlying_max_flow_;
std::unique_ptr<GenericMaxFlow<Graph> > underlying_max_flow_;
};
// Specific but efficient priority queue implementation. The priority type must
// be an integer. The queue allows to retrieve the element with highest priority
// but only allows pushes with a priority greater or equal to the highest
// priority in the queue minus one. All operations are in O(1) and the memory is
// in O(num elements in the queue). Elements with the same priority are
// retrieved with LIFO order.
//
// Note(user): As far as I know, this is an original idea and is the only code
// that use this in the Maximum Flow context. Papers usually refer to an
// height-indexed array of simple linked lists of active node with the same
// height. Even worse, sometimes they use double-linked list to allow arbitrary
// height update in order to detect missing height (used for the Gap heuristic).
// But this can actually be implemented a lot more efficiently by just
// maintaining the height distribution of all the node in the graph.
template <typename Element, typename IntegerPriority>
class PriorityQueueWithRestrictedPush {
public:
PriorityQueueWithRestrictedPush() : even_queue_(), odd_queue_() {}
#ifndef SWIG
// This type is neither copyable nor movable.
PriorityQueueWithRestrictedPush(const PriorityQueueWithRestrictedPush&) =
delete;
PriorityQueueWithRestrictedPush& operator=(
const PriorityQueueWithRestrictedPush&) = delete;
#endif
// Is the queue empty?
bool IsEmpty() const;
// Clears the queue.
void Clear();
// Push a new element in the queue. Its priority must be greater or equal to
// the highest priority present in the queue, minus one. This condition is
// DCHECKed, and violating it yields erroneous queue behavior in NDEBUG mode.
void Push(Element element, IntegerPriority priority);
// Returns the element with highest priority and remove it from the queue.
// IsEmpty() must be false, this condition is DCHECKed.
Element Pop();
private:
// Helper function to get the last element of a vector and pop it.
Element PopBack(std::vector<std::pair<Element, IntegerPriority>>* queue);
// This is the heart of the algorithm. basically we split the elements by
// parity of their priority and the precondition on the Push() ensures that
// both vectors are always sorted by increasing priority.
std::vector<std::pair<Element, IntegerPriority>> even_queue_;
std::vector<std::pair<Element, IntegerPriority>> odd_queue_;
};
// We want an enum for the Status of a max flow run, and we want this
// enum to be scoped under GenericMaxFlow<>. Unfortunately, swig
// doesn't handle templated enums very well, so we need a base,
// untemplated class to hold it.
class MaxFlowStatusClass {
public:
enum Status {
NOT_SOLVED, // The problem was not solved, or its data were edited.
OPTIMAL, // Solve() was called and found an optimal solution.
INT_OVERFLOW, // There is a feasible flow > max possible flow.
BAD_INPUT, // The input is inconsistent.
BAD_RESULT // There was an error.
};
};
// Generic MaxFlow (there is a default MaxFlow specialization defined below)
// that works with StarGraph and all the reverse arc graphs from graph.h, see
// the end of max_flow.cc for the exact types this class is compiled for.
template <typename Graph>
class GenericMaxFlow : public MaxFlowStatusClass {
public:
typedef typename Graph::NodeIndex NodeIndex;
typedef typename Graph::ArcIndex ArcIndex;
typedef typename Graph::OutgoingArcIterator OutgoingArcIterator;
typedef typename Graph::OutgoingOrOppositeIncomingArcIterator
OutgoingOrOppositeIncomingArcIterator;
typedef typename Graph::IncomingArcIterator IncomingArcIterator;
typedef ZVector<ArcIndex> ArcIndexArray;
// The height of a node never excess 2 times the number of node, so we
// use the same type as a Node index.
typedef NodeIndex NodeHeight;
typedef ZVector<NodeHeight> NodeHeightArray;
// Initialize a MaxFlow instance on the given graph. The graph does not need
// to be fully built yet, but its capacity reservation are used to initialize
// the memory of this class. source and sink must also be valid node of
// graph.
GenericMaxFlow(const Graph* graph, NodeIndex source, NodeIndex sink);
#ifndef SWIG
// This type is neither copyable nor movable.
GenericMaxFlow(const GenericMaxFlow&) = delete;
GenericMaxFlow& operator=(const GenericMaxFlow&) = delete;
#endif
virtual ~GenericMaxFlow() {}
// Returns the graph associated to the current object.
const Graph* graph() const { return graph_; }
// Returns the status of last call to Solve(). NOT_SOLVED is returned if
// Solve() has never been called or if the problem has been modified in such a
// way that the previous solution becomes invalid.
Status status() const { return status_; }
// Returns the index of the node corresponding to the source of the network.
NodeIndex GetSourceNodeIndex() const { return source_; }
// Returns the index of the node corresponding to the sink of the network.
NodeIndex GetSinkNodeIndex() const { return sink_; }
// Sets the capacity for arc to new_capacity.
void SetArcCapacity(ArcIndex arc, FlowQuantity new_capacity);
// Sets the flow for arc.
void SetArcFlow(ArcIndex arc, FlowQuantity new_flow);
// Returns true if a maximum flow was solved.
bool Solve();
// Returns the total flow found by the algorithm.
FlowQuantity GetOptimalFlow() const { return node_excess_[sink_]; }
// Returns the flow on arc using the equations given in the comment on
// residual_arc_capacity_.
FlowQuantity Flow(ArcIndex arc) const {
if (IsArcDirect(arc)) {
return residual_arc_capacity_[Opposite(arc)];
} else {
return -residual_arc_capacity_[arc];
}
}
// Returns the capacity of arc using the equations given in the comment on
// residual_arc_capacity_.
FlowQuantity Capacity(ArcIndex arc) const {
if (IsArcDirect(arc)) {
return residual_arc_capacity_[arc] +
residual_arc_capacity_[Opposite(arc)];
} else {
return 0;
}
}
// Returns the nodes reachable from the source in the residual graph, the
// outgoing arcs of this set form a minimum cut.
void GetSourceSideMinCut(std::vector<NodeIndex>* result);
// Returns the nodes that can reach the sink in the residual graph, the
// outgoing arcs of this set form a minimum cut. Note that if this is the
// complement of GetNodeReachableFromSource(), then the min-cut is unique.
//
// TODO(user): In the two-phases algorithm, we can get this minimum cut
// without doing the second phase. Add an option for this if there is a need
// to, note that the second phase is pretty fast so the gain will be small.
void GetSinkSideMinCut(std::vector<NodeIndex>* result);
// Checks the consistency of the input, i.e. that capacities on the arcs are
// non-negative or null.
bool CheckInputConsistency() const;
// Checks whether the result is valid, i.e. that node excesses are all equal
// to zero (we have a flow) and that residual capacities are all non-negative
// or zero.
bool CheckResult() const;
// Returns true if there exists a path from the source to the sink with
// remaining capacity. This allows us to easily check at the end that the flow
// we computed is indeed optimal (provided that all the conditions tested by
// CheckResult() also hold).
bool AugmentingPathExists() const;
// Sets the different algorithm options. All default to true.
// See the corresponding variable declaration below for more details.
void SetUseGlobalUpdate(bool value) {
use_global_update_ = value;
if (!use_global_update_) process_node_by_height_ = false;
}
void SetUseTwoPhaseAlgorithm(bool value) { use_two_phase_algorithm_ = value; }
void SetCheckInput(bool value) { check_input_ = value; }
void SetCheckResult(bool value) { check_result_ = value; }
void ProcessNodeByHeight(bool value) {
process_node_by_height_ = value && use_global_update_;
}
// Returns the protocol buffer representation of the current problem.
FlowModelProto CreateFlowModel();
protected:
// Returns true if arc is admissible.
bool IsAdmissible(ArcIndex arc) const {
return residual_arc_capacity_[arc] > 0 &&
node_potential_[Tail(arc)] == node_potential_[Head(arc)] + 1;
}
// Returns true if node is active, i.e. if its excess is positive and it
// is neither the source or the sink of the graph.
bool IsActive(NodeIndex node) const {
return (node != source_) && (node != sink_) && (node_excess_[node] > 0);
}
// Sets the capacity of arc to 'capacity' and clears the flow on arc.
void SetCapacityAndClearFlow(ArcIndex arc, FlowQuantity capacity) {
residual_arc_capacity_.Set(arc, capacity);
residual_arc_capacity_.Set(Opposite(arc), 0);
}
// Returns true if a precondition for Relabel is met, i.e. the outgoing arcs
// of node are all either saturated or the heights of their heads are greater
// or equal to the height of node.
bool CheckRelabelPrecondition(NodeIndex node) const;
// Returns context concatenated with information about arc
// in a human-friendly way.
std::string DebugString(absl::string_view context, ArcIndex arc) const;
// Initializes the container active_nodes_.
void InitializeActiveNodeContainer();
// Get the first element from the active node container.
NodeIndex GetAndRemoveFirstActiveNode() {
if (process_node_by_height_) return active_node_by_height_.Pop();
const NodeIndex node = active_nodes_.back();
active_nodes_.pop_back();
return node;
}
// Push element to the active node container.
void PushActiveNode(const NodeIndex& node) {
if (process_node_by_height_) {
active_node_by_height_.Push(node, node_potential_[node]);
} else {
active_nodes_.push_back(node);
}
}
// Check the emptiness of the container.
bool IsEmptyActiveNodeContainer() {
if (process_node_by_height_) {
return active_node_by_height_.IsEmpty();
} else {
return active_nodes_.empty();
}
}
// Performs optimization step.
void Refine();
void RefineWithGlobalUpdate();
// Discharges an active node node by saturating its admissible adjacent arcs,
// if any, and by relabelling it when it becomes inactive.
void Discharge(NodeIndex node);
// Initializes the preflow to a state that enables to run Refine.
void InitializePreflow();
// Clears the flow excess at each node by pushing the flow back to the source:
// - Do a depth-first search from the source in the direct graph to cancel
// flow cycles.
// - Then, return flow excess along the depth-first search tree (by pushing
// the flow in the reverse dfs topological order).
// The theoretical complexity is O(mn), but it is a lot faster in practice.
void PushFlowExcessBackToSource();
// Computes the best possible node potential given the current flow using a
// reverse breadth-first search from the sink in the reverse residual graph.
// This is an implementation of the global update heuristic mentioned in many
// max-flow papers. See for instance: B.V. Cherkassky, A.V. Goldberg, "On
// implementing push-relabel methods for the maximum flow problem",
// Algorithmica, 19:390-410, 1997.
// ftp://reports.stanford.edu/pub/cstr/reports/cs/tr/94/1523/CS-TR-94-1523.pdf
void GlobalUpdate();
// Tries to saturate all the outgoing arcs from the source that can reach the
// sink. Most of the time, we can do that in one go, except when more flow
// than kMaxFlowQuantity can be pushed out of the source in which case we
// have to be careful. Returns true if some flow was pushed.
bool SaturateOutgoingArcsFromSource();
// Pushes flow on arc, i.e. consumes flow on residual_arc_capacity_[arc],
// and consumes -flow on residual_arc_capacity_[Opposite(arc)]. Updates
// node_excess_ at the tail and head of arc accordingly.
void PushFlow(FlowQuantity flow, ArcIndex arc);
// Relabels a node, i.e. increases its height by the minimum necessary amount.
// This version of Relabel is relaxed in a way such that if an admissible arc
// exists at the current node height, then the node is not relabeled. This
// enables us to deal with wrong values of first_admissible_arc_[node] when
// updating it is too costly.
void Relabel(NodeIndex node);
// Handy member functions to make the code more compact.
NodeIndex Head(ArcIndex arc) const { return graph_->Head(arc); }
NodeIndex Tail(ArcIndex arc) const { return graph_->Tail(arc); }
ArcIndex Opposite(ArcIndex arc) const;
bool IsArcDirect(ArcIndex arc) const;
bool IsArcValid(ArcIndex arc) const;
// Returns the set of nodes reachable from start in the residual graph or in
// the reverse residual graph (if reverse is true).
template <bool reverse>
void ComputeReachableNodes(NodeIndex start, std::vector<NodeIndex>* result);
// Maximum manageable flow.
static const FlowQuantity kMaxFlowQuantity;
// A pointer to the graph passed as argument.
const Graph* graph_;
// An array representing the excess for each node in graph_.
QuantityArray node_excess_;
// An array representing the height function for each node in graph_. For a
// given node, this is a lower bound on the shortest path length from this
// node to the sink in the residual network. The height of a node always goes
// up during the course of a Solve().
//
// Since initially we saturate all the outgoing arcs of the source, we can
// never reach the sink from the source in the residual graph. Initially we
// set the height of the source to n (the number of node of the graph) and it
// never changes. If a node as an height >= n, then this node can't reach the
// sink and its height minus n is a lower bound on the shortest path length
// from this node to the source in the residual graph.
NodeHeightArray node_potential_;
// An array representing the residual_capacity for each arc in graph_.
// Residual capacities enable one to represent the capacity and flow for all
// arcs in the graph in the following manner.
// For all arc, residual_arc_capacity_[arc] = capacity[arc] - flow[arc]
// Moreover, for reverse arcs, capacity[arc] = 0 by definition,
// Also flow[Opposite(arc)] = -flow[arc] by definition.
// Therefore:
// - for a direct arc:
// flow[arc] = 0 - flow[Opposite(arc)]
// = capacity[Opposite(arc)] - flow[Opposite(arc)]
// = residual_arc_capacity_[Opposite(arc)]
// - for a reverse arc:
// flow[arc] = -residual_arc_capacity_[arc]
// Using these facts enables one to only maintain residual_arc_capacity_,
// instead of both capacity and flow, for each direct and indirect arc. This
// reduces the amount of memory for this information by a factor 2.
QuantityArray residual_arc_capacity_;
// An array representing the first admissible arc for each node in graph_.
ArcIndexArray first_admissible_arc_;
// A stack used for managing active nodes in the algorithm.
// Note that the papers cited above recommend the use of a queue, but
// benchmarking so far has not proved it is better. In particular, processing
// nodes in LIFO order has better cache locality.
std::vector<NodeIndex> active_nodes_;
// A priority queue used for managing active nodes in the algorithm. It allows
// to select the active node with highest height before each Discharge().
// Moreover, since all pushes from this node will be to nodes with height
// greater or equal to the initial discharged node height minus one, the
// PriorityQueueWithRestrictedPush is a perfect fit.
PriorityQueueWithRestrictedPush<NodeIndex, NodeHeight> active_node_by_height_;
// The index of the source node in graph_.
NodeIndex source_;
// The index of the sink node in graph_.
NodeIndex sink_;
// The status of the problem.
Status status_;
// BFS queue used by the GlobalUpdate() function. We do not use a C++ queue
// because we need access to the vector for different optimizations.
std::vector<bool> node_in_bfs_queue_;
std::vector<NodeIndex> bfs_queue_;
// Whether or not to use GlobalUpdate().
bool use_global_update_;
// Whether or not we use a two-phase algorithm:
// 1/ Only deal with nodes that can reach the sink. At the end we know the
// value of the maximum flow and we have a min-cut.
// 2/ Call PushFlowExcessBackToSource() to obtain a max-flow. This is usually
// a lot faster than the first phase.
bool use_two_phase_algorithm_;
// Whether or not we use the PriorityQueueWithRestrictedPush to process the
// active nodes rather than a simple queue. This can only be true if
// use_global_update_ is true.
//
// Note(user): using a template will be slightly faster, but since we test
// this in a non-critical path, this only has a minor impact.
bool process_node_by_height_;
// Whether or not we check the input, this is a small price to pay for
// robustness. Disable only if you know the input is valid because an invalid
// input can cause the algorithm to run into an infinite loop!
bool check_input_;
// Whether or not we check the result.
// TODO(user): Make the check more exhaustive by checking the optimality?
bool check_result_;
// Statistics about this class.
mutable StatsGroup stats_;
};
#if !SWIG
// Note: SWIG does not seem to understand explicit template specialization and
// instantiation declarations.
template <>
const FlowQuantity GenericMaxFlow<StarGraph>::kMaxFlowQuantity;
template <>
const FlowQuantity
GenericMaxFlow<::util::ReverseArcListGraph<>>::kMaxFlowQuantity;
template <>
const FlowQuantity
GenericMaxFlow<::util::ReverseArcStaticGraph<>>::kMaxFlowQuantity;
template <>
const FlowQuantity
GenericMaxFlow<::util::ReverseArcMixedGraph<>>::kMaxFlowQuantity;
extern template class GenericMaxFlow<StarGraph>;
extern template class GenericMaxFlow<::util::ReverseArcListGraph<>>;
extern template class GenericMaxFlow<::util::ReverseArcStaticGraph<>>;
extern template class GenericMaxFlow<::util::ReverseArcMixedGraph<>>;
// This method computes a minimum vertex cover for the bipartite graph.
//
// If we define num_left=left_to_right_arcs.size(), the "left" nodes are
// integers in [0, num_left), and the "right" nodes are integers in [num_left,
// num_left + num_right).
//
// Returns a vector of size num_left+num_right, such that element #l is true if
// it is part of the minimum vertex cover and false if it is part of the maximum
// independent set (one is the complement of the other).
std::vector<bool> BipartiteMinimumVertexCover(
const std::vector<std::vector<int>>& left_to_right_arcs, int num_right);
// Default instance MaxFlow that uses StarGraph. Note that we cannot just use a
// typedef because of dependent code expecting MaxFlow to be a real class.
// TODO(user): Modify this code and remove it.
class MaxFlow : public GenericMaxFlow<StarGraph> {
public:
MaxFlow(const StarGraph* graph, NodeIndex source, NodeIndex target)
: GenericMaxFlow(graph, source, target) {}
};
#endif // SWIG
template <typename Element, typename IntegerPriority>
bool PriorityQueueWithRestrictedPush<Element, IntegerPriority>::IsEmpty()
const {
return even_queue_.empty() && odd_queue_.empty();
}
template <typename Element, typename IntegerPriority>
void PriorityQueueWithRestrictedPush<Element, IntegerPriority>::Clear() {
even_queue_.clear();
odd_queue_.clear();
}
template <typename Element, typename IntegerPriority>
void PriorityQueueWithRestrictedPush<Element, IntegerPriority>::Push(
Element element, IntegerPriority priority) {
// Since users may rely on it, we DCHECK the exact condition.
DCHECK(even_queue_.empty() || priority >= even_queue_.back().second - 1);
DCHECK(odd_queue_.empty() || priority >= odd_queue_.back().second - 1);
// Note that the DCHECK() below are less restrictive than the ones above but
// check a necessary and sufficient condition for the priority queue to behave
// as expected.
if (priority & 1) {
DCHECK(odd_queue_.empty() || priority >= odd_queue_.back().second);
odd_queue_.push_back(std::make_pair(element, priority));
} else {
DCHECK(even_queue_.empty() || priority >= even_queue_.back().second);
even_queue_.push_back(std::make_pair(element, priority));
}
}
template <typename Element, typename IntegerPriority>
Element PriorityQueueWithRestrictedPush<Element, IntegerPriority>::Pop() {
DCHECK(!IsEmpty());
if (even_queue_.empty()) return PopBack(&odd_queue_);
if (odd_queue_.empty()) return PopBack(&even_queue_);
if (odd_queue_.back().second > even_queue_.back().second) {
return PopBack(&odd_queue_);
} else {
return PopBack(&even_queue_);
}
}
template <typename Element, typename IntegerPriority>
Element PriorityQueueWithRestrictedPush<Element, IntegerPriority>::PopBack(
std::vector<std::pair<Element, IntegerPriority>>* queue) {
DCHECK(!queue->empty());
Element element = queue->back().first;
queue->pop_back();
return element;
}
} // namespace operations_research
#endif // OR_TOOLS_GRAPH_MAX_FLOW_H_

View File

@@ -13,38 +13,22 @@
#include "ortools/graph/max_flow.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <random>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/random/random.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "benchmark/benchmark.h"
#include "google/protobuf/text_format.h"
#include "gtest/gtest.h"
#include "ortools/base/gmock.h"
#include "ortools/base/logging.h"
#include "ortools/base/path.h"
#include "ortools/graph/graph.h"
#include "ortools/graph/graphs.h"
#include "ortools/linear_solver/linear_solver.h"
#include "ortools/graph/ebert_graph.h"
#include "ortools/graph/flow_problem.pb.h"
#include "ortools/util/file_util.h"
#if not defined(ROOT_DIR)
#define ROOT_DIR "_main/"
#endif
namespace operations_research {
namespace {
using ::testing::ContainerEq;
using ::testing::WhenSorted;
#if not defined(ROOT_DIR)
#define ROOT_DIR "_main/"
#endif
TEST(SimpleMaxFlowTest, EmptyWithValidSourceAndSink) {
SimpleMaxFlow max_flow;
@@ -198,681 +182,5 @@ TEST(SimpleMaxFlowTest, ProblematicProblemWithMaxCapacity) {
EXPECT_EQ(10290243, solver.OptimalFlow());
}
template <typename Graph>
typename GenericMaxFlow<Graph>::Status MaxFlowTester(
const typename Graph::NodeIndex num_nodes,
const typename Graph::ArcIndex num_arcs,
const typename Graph::NodeIndex* tail,
const typename Graph::NodeIndex* head, const FlowQuantity* capacity,
const FlowQuantity* expected_flow, const FlowQuantity expected_total_flow,
const std::vector<typename Graph::NodeIndex>* expected_source_min_cut =
nullptr,
const std::vector<typename Graph::NodeIndex>* expected_sink_min_cut =
nullptr) {
Graph graph(num_nodes, num_arcs);
for (int i = 0; i < num_arcs; ++i) {
graph.AddArc(tail[i], head[i]);
}
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
EXPECT_TRUE(permutation.empty());
typename GenericMaxFlow<Graph>::Status result =
GenericMaxFlow<Graph>::OPTIMAL;
for (int options = 0; options < 8; ++options) {
GenericMaxFlow<Graph> max_flow(&graph, 0, num_nodes - 1);
max_flow.SetUseGlobalUpdate(options & 1);
max_flow.SetUseTwoPhaseAlgorithm(options & 2);
max_flow.ProcessNodeByHeight(options & 3);
for (typename Graph::ArcIndex arc = 0; arc < num_arcs; ++arc) {
max_flow.SetArcCapacity(arc, capacity[arc]);
EXPECT_EQ(max_flow.Capacity(arc), capacity[arc]);
}
EXPECT_TRUE(max_flow.CheckInputConsistency());
EXPECT_TRUE(max_flow.Solve());
if (max_flow.status() == GenericMaxFlow<Graph>::OPTIMAL) {
const FlowQuantity total_flow = max_flow.GetOptimalFlow();
EXPECT_EQ(expected_total_flow, total_flow);
for (int i = 0; i < num_arcs; ++i) {
EXPECT_EQ(expected_flow[i], max_flow.Flow(i)) << " i = " << i;
}
}
EXPECT_TRUE(max_flow.CheckResult());
if (options == 0) {
result = max_flow.status();
} else {
EXPECT_EQ(result, max_flow.status());
}
// Tests the min-cut functions.
if (expected_source_min_cut != nullptr) {
std::vector<NodeIndex> cut;
max_flow.GetSourceSideMinCut(&cut);
std::sort(cut.begin(), cut.end());
EXPECT_THAT(*expected_source_min_cut, WhenSorted(ContainerEq(cut)));
}
if (expected_sink_min_cut != nullptr) {
std::vector<NodeIndex> cut;
max_flow.GetSinkSideMinCut(&cut);
std::sort(cut.begin(), cut.end());
EXPECT_THAT(*expected_sink_min_cut, WhenSorted(ContainerEq(cut)));
}
}
return result;
}
template <typename Graph>
class GenericMaxFlowTest : public ::testing::Test {};
typedef ::testing::Types<StarGraph, util::ReverseArcListGraph<>,
util::ReverseArcStaticGraph<>,
util::ReverseArcMixedGraph<>>
GraphTypes;
TYPED_TEST_SUITE(GenericMaxFlowTest, GraphTypes);
TYPED_TEST(GenericMaxFlowTest, FeasibleFlow1) {
const int kNumNodes = 4;
const int kNumArcs = 3;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 1, 2};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3};
const FlowQuantity kCapacity[kNumArcs] = {8, 10, 8};
const FlowQuantity kExpectedFlow[kNumArcs] = {8, 8, 8};
const FlowQuantity kExpectedTotalFlow = 8;
std::vector<int> source_cut({0});
std::vector<int> sink_cut({3});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FeasibleFlow2) {
const int kNumNodes = 6;
const int kNumArcs = 9;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1,
2, 3, 3, 4};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 4, 3,
4, 4, 5, 5};
const FlowQuantity kCapacity[kNumArcs] = {6, 8, 5, 0, 1, 4, 0, 6, 4};
const FlowQuantity kExpectedFlow[kNumArcs] = {1, 4, 5, 0, 1, 4, 0, 6, 4};
const FlowQuantity kExpectedTotalFlow = 10;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({5});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FeasibleFlowWithMultipleArcs) {
const int kNumNodes = 5;
const int kNumArcs = 8;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 1,
2, 2, 3, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 1, 2, 2,
3, 3, 4, 4};
const FlowQuantity kCapacity[kNumArcs] = {5, 3, 5, 3, 4, 4, 4, 4};
const FlowQuantity kExpectedFlow[kNumArcs] = {5, 3, 5, 3, 4, 4, 4, 4};
const FlowQuantity kExpectedTotalFlow = 8;
std::vector<int> source_cut({0});
std::vector<int> sink_cut({4});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, HugeCapacity) {
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
const int kNumNodes = 5;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 2, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 3, 4};
const FlowQuantity kCapacity[kNumArcs] = {kCapacityMax, kCapacityMax, 5, 3,
kCapacityMax};
const FlowQuantity kExpectedFlow[kNumArcs] = {5, 3, 5, 3, 8};
const FlowQuantity kExpectedTotalFlow = 8;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({4, 3});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowQuantityOverflowLimitCase) {
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
const FlowQuantity kHalfLow = kCapacityMax / 2;
const FlowQuantity kHalfHigh = kCapacityMax - kHalfLow;
const int kNumNodes = 5;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 2, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 3, 4};
const FlowQuantity kCapacity[kNumArcs] = {kCapacityMax, kCapacityMax,
kHalfLow, kHalfHigh, kCapacityMax};
const FlowQuantity kExpectedFlow[kNumArcs] = {kHalfLow, kHalfHigh, kHalfLow,
kHalfHigh, kCapacityMax};
const FlowQuantity kExpectedTotalFlow = kCapacityMax;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({4});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowQuantityOverflow) {
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
const int kNumNodes = 4;
const int kNumArcs = 4;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 2};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 3};
const FlowQuantity kCapacity[kNumArcs] = {kCapacityMax, kCapacityMax,
kCapacityMax, kCapacityMax};
const FlowQuantity kExpectedFlow[kNumArcs] = {kCapacityMax, kCapacityMax,
kCapacityMax, kCapacityMax};
const FlowQuantity kExpectedTotalFlow = kCapacityMax;
EXPECT_EQ(
GenericMaxFlow<TypeParam>::INT_OVERFLOW,
MaxFlowTester<TypeParam>(kNumNodes, kNumArcs, kTail, kHead, kCapacity,
kExpectedFlow, kExpectedTotalFlow));
}
TYPED_TEST(GenericMaxFlowTest, DirectArcFromSourceToSink) {
const int kNumNodes = 4;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 0, 1, 2};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 3, 2, 3, 3};
const FlowQuantity kCapacity[kNumArcs] = {5, 8, 5, 2, 2};
const FlowQuantity kExpectedFlow[kNumArcs] = {2, 8, 2, 2, 2};
const FlowQuantity kExpectedTotalFlow = 12;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({3});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowOnDisconnectedGraph1) {
const int kNumNodes = 6;
const int kNumArcs = 7;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1, 2, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 4, 3, 4, 4};
const FlowQuantity kCapacity[kNumArcs] = {5, 8, 5, 3, 4, 5, 6};
const FlowQuantity kExpectedFlow[kNumArcs] = {0, 0, 0, 0, 0, 0, 0};
const FlowQuantity kExpectedTotalFlow = 0;
std::vector<int> source_cut({0, 1, 2, 3, 4});
std::vector<int> sink_cut({5});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowOnDisconnectedGraph2) {
const int kNumNodes = 6;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 3, 3, 4};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 4, 5, 5};
const FlowQuantity kCapacity[kNumArcs] = {5, 8, 6, 6, 4};
const FlowQuantity kExpectedFlow[kNumArcs] = {0, 0, 0, 0, 0};
const FlowQuantity kExpectedTotalFlow = 0;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({3, 4, 5});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
template <typename Graph>
void AddSourceAndSink(const typename Graph::NodeIndex num_tails,
const typename Graph::NodeIndex num_heads, Graph* graph) {
const typename Graph::NodeIndex source = num_tails + num_heads;
const typename Graph::NodeIndex sink = num_tails + num_heads + 1;
for (typename Graph::NodeIndex tail = 0; tail < num_tails; ++tail) {
graph->AddArc(source, tail);
}
for (typename Graph::NodeIndex head = 0; head < num_heads; ++head) {
graph->AddArc(num_tails + head, sink);
}
}
template <typename Graph>
void GenerateCompleteGraph(const typename Graph::NodeIndex num_tails,
const typename Graph::NodeIndex num_heads,
Graph* graph) {
const typename Graph::NodeIndex num_nodes = num_tails + num_heads + 2;
const typename Graph::ArcIndex num_arcs =
num_tails * num_heads + num_tails + num_heads;
graph->Reserve(num_nodes, num_arcs);
for (typename Graph::NodeIndex tail = 0; tail < num_tails; ++tail) {
for (typename Graph::NodeIndex head = 0; head < num_heads; ++head) {
graph->AddArc(tail, head + num_tails);
}
}
AddSourceAndSink(num_tails, num_heads, graph);
}
template <typename Graph>
void GeneratePartialRandomGraph(const typename Graph::NodeIndex num_tails,
const typename Graph::NodeIndex num_heads,
const typename Graph::NodeIndex degree,
Graph* graph) {
const typename Graph::NodeIndex num_nodes = num_tails + num_heads + 2;
const typename Graph::ArcIndex num_arcs =
num_tails * degree + num_tails + num_heads;
graph->Reserve(num_nodes, num_arcs);
std::mt19937 randomizer(0);
for (typename Graph::NodeIndex tail = 0; tail < num_tails; ++tail) {
for (typename Graph::NodeIndex d = 0; d < degree; ++d) {
const typename Graph::NodeIndex head =
absl::Uniform(randomizer, 0, num_heads);
graph->AddArc(tail, head + num_tails);
}
}
AddSourceAndSink(num_tails, num_heads, graph);
}
template <typename Graph>
void GenerateRandomArcValuations(const Graph& graph, const int64_t max_range,
std::vector<int64_t>* arc_valuation) {
const typename Graph::ArcIndex num_arcs = graph.num_arcs();
arc_valuation->resize(num_arcs);
std::mt19937 randomizer(0);
for (typename Graph::ArcIndex arc = 0; arc < graph.num_arcs(); ++arc) {
(*arc_valuation)[arc] = absl::Uniform(randomizer, 0, max_range);
}
}
template <typename Graph>
void SetUpNetworkData(absl::Span<const int64_t> arc_capacity,
GenericMaxFlow<Graph>* max_flow) {
const Graph* graph = max_flow->graph();
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
max_flow->SetArcCapacity(arc, arc_capacity[arc]);
}
}
template <typename Graph>
FlowQuantity SolveMaxFlow(GenericMaxFlow<Graph>* max_flow) {
EXPECT_TRUE(max_flow->Solve());
EXPECT_EQ(GenericMaxFlow<Graph>::OPTIMAL, max_flow->status());
const Graph* graph = max_flow->graph();
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
EXPECT_LE(max_flow->Flow(Graphs<Graph>::OppositeArc(*graph, arc)), 0)
<< arc;
EXPECT_EQ(0, max_flow->Capacity(Graphs<Graph>::OppositeArc(*graph, arc)))
<< arc;
EXPECT_LE(0, max_flow->Flow(arc)) << arc;
EXPECT_LE(max_flow->Flow(arc), max_flow->Capacity(arc)) << arc;
}
return max_flow->GetOptimalFlow();
}
template <typename Graph>
FlowQuantity SolveMaxFlowWithLP(GenericMaxFlow<Graph>* max_flow) {
MPSolver solver("LPSolver", MPSolver::GLOP_LINEAR_PROGRAMMING);
const double infinity = solver.infinity();
const Graph* graph = max_flow->graph();
const typename Graph::NodeIndex num_nodes = graph->num_nodes();
const typename Graph::ArcIndex num_arcs = graph->num_arcs();
const typename Graph::NodeIndex source_index = num_nodes - 2;
std::unique_ptr<MPConstraint*[]> constraint(new MPConstraint*[num_nodes]);
for (typename Graph::NodeIndex node = 0; node < graph->num_nodes(); ++node) {
constraint[node] = solver.MakeRowConstraint();
if (node < source_index) { // Node is neither source nor sink.
constraint[node]->SetBounds(0.0, 0.0); // Flow is conserved.
} else {
constraint[node]->SetBounds(-infinity, +infinity);
}
}
std::unique_ptr<MPVariable*[]> var(new MPVariable*[num_arcs]);
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
var[arc] = solver.MakeNumVar(0.0, max_flow->Capacity(arc),
absl::StrFormat("v%d", arc));
constraint[graph->Tail(arc)]->SetCoefficient(var[arc], 1.0);
constraint[graph->Head(arc)]->SetCoefficient(var[arc], -1.0);
}
MPObjective* const objective = solver.MutableObjective();
for (typename Graph::OutgoingArcIterator arc_it(*graph, source_index);
arc_it.Ok(); arc_it.Next()) {
const typename Graph::ArcIndex arc = arc_it.Index();
objective->SetCoefficient(var[arc], -1.0);
}
solver.Solve();
return static_cast<FlowQuantity>(-objective->Value() + .5);
}
template <typename Graph>
struct MaxFlowSolver {
typedef FlowQuantity (*Solver)(GenericMaxFlow<Graph>* max_flow);
};
template <typename Graph>
void FullRandomAssignment(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1,
FlowQuantity expected_flow2) {
Graph graph;
GenerateCompleteGraph(num_tails, num_heads, &graph);
Graphs<Graph>::Build(&graph);
std::vector<int64_t> arc_capacity(graph.num_arcs(), 1);
std::unique_ptr<GenericMaxFlow<Graph>> max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
template <typename Graph>
void PartialRandomAssignment(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1,
FlowQuantity expected_flow2) {
const typename Graph::NodeIndex kDegree = 10;
Graph graph;
GeneratePartialRandomGraph(num_tails, num_heads, kDegree, &graph);
Graphs<Graph>::Build(&graph);
CHECK_EQ(graph.num_arcs(), num_tails * kDegree + num_tails + num_heads);
std::vector<int64_t> arc_capacity(graph.num_arcs(), 1);
std::unique_ptr<GenericMaxFlow<Graph>> max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
template <typename Graph>
void ChangeCapacities(absl::Span<const int64_t> arc_capacity,
FlowQuantity delta, GenericMaxFlow<Graph>* max_flow) {
const Graph* graph = max_flow->graph();
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
max_flow->SetArcCapacity(arc,
std::max(arc_capacity[arc] - delta, int64_t{0}));
}
}
template <typename Graph>
void PartialRandomFlow(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1,
FlowQuantity expected_flow2) {
const typename Graph::NodeIndex kDegree = 10;
const FlowQuantity kCapacityRange = 10000;
const FlowQuantity kCapacityDelta = 1000;
Graph graph;
GeneratePartialRandomGraph(num_tails, num_heads, kDegree, &graph);
std::vector<int64_t> arc_capacity(graph.num_arcs());
GenerateRandomArcValuations(graph, kCapacityRange, &arc_capacity);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
util::Permute(permutation, &arc_capacity);
std::unique_ptr<GenericMaxFlow<Graph>> max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
ChangeCapacities(arc_capacity, kCapacityDelta, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow2, flow);
ChangeCapacities(arc_capacity, 0, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
template <typename Graph>
void FullRandomFlow(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1, FlowQuantity expected_flow2) {
const FlowQuantity kCapacityRange = 10000;
const FlowQuantity kCapacityDelta = 1000;
Graph graph;
GenerateCompleteGraph(num_tails, num_heads, &graph);
std::vector<int64_t> arc_capacity(graph.num_arcs());
GenerateRandomArcValuations(graph, kCapacityRange, &arc_capacity);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
util::Permute(permutation, &arc_capacity);
std::unique_ptr<GenericMaxFlow<Graph>> max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
ChangeCapacities(arc_capacity, kCapacityDelta, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow2, flow);
ChangeCapacities(arc_capacity, 0, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
#define LP_AND_FLOW_TEST(test_name, size, expected_flow1, expected_flow2) \
LP_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
FLOW_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
FLOW_ONLY_TEST_SG(test_name, size, expected_flow1, expected_flow2)
#define LP_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
TEST(LPMaxFlowTest, test_name##size) { \
test_name<StarGraph>(SolveMaxFlowWithLP<StarGraph>, size, size, \
expected_flow1, expected_flow2); \
}
#define FLOW_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
TEST(MaxFlowTest, test_name##size) { \
test_name<StarGraph>(SolveMaxFlow, size, size, expected_flow1, \
expected_flow2); \
}
#define FLOW_ONLY_TEST_SG(test_name, size, expected_flow1, expected_flow2) \
TEST(MaxFlowTestStaticGraph, test_name##size) { \
test_name<util::ReverseArcStaticGraph<>>(SolveMaxFlow, size, size, \
expected_flow1, expected_flow2); \
}
LP_AND_FLOW_TEST(FullRandomAssignment, 300, 300, 300);
LP_AND_FLOW_TEST(PartialRandomAssignment, 100, 100, 100);
LP_AND_FLOW_TEST(PartialRandomAssignment, 1000, 1000, 1000);
LP_AND_FLOW_TEST(PartialRandomFlow, 400, 1898664, 1515203);
LP_AND_FLOW_TEST(FullRandomFlow, 100, 482391, 386587);
// LARGE must be defined from the build command line to test larger instances.
#ifdef LARGE
LP_AND_FLOW_TEST(PartialRandomAssignment, 10000, 10000, 10000);
#endif
template <typename Graph>
static void BM_FullRandomAssignment(benchmark::State& state) {
const int kSize = 3000;
for (auto _ : state) {
FullRandomAssignment<Graph>(SolveMaxFlow, kSize, kSize, kSize, kSize);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
template <typename Graph>
static void BM_PartialRandomAssignment(benchmark::State& state) {
const int kSize = 10100;
for (auto _ : state) {
PartialRandomAssignment<Graph>(SolveMaxFlow, kSize, kSize, kSize, kSize);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
template <typename Graph>
static void BM_PartialRandomFlow(benchmark::State& state) {
const int kSize = 800;
for (auto _ : state) {
PartialRandomFlow<Graph>(SolveMaxFlow, kSize, kSize, 3884850, 3112123);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
template <typename Graph>
static void BM_FullRandomFlow(benchmark::State& state) {
const int kSize = 800;
for (auto _ : state) {
FullRandomFlow<Graph>(SolveMaxFlow, kSize, kSize, 4000549, 3239512);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, StarGraph);
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, util::ReverseArcMixedGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, StarGraph);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, util::ReverseArcMixedGraph<>);
// One iteration of each of the following tests is slow.
BENCHMARK_TEMPLATE(BM_FullRandomFlow, StarGraph);
BENCHMARK_TEMPLATE(BM_FullRandomFlow, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomFlow, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomFlow, util::ReverseArcMixedGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, StarGraph);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, util::ReverseArcMixedGraph<>);
#undef LP_AND_FLOW_TEST
#undef LP_ONLY_TEST
#undef FLOW_ONLY_TEST
#undef FLOW_ONLY_TEST_SG
// ----------------------------------------------------------
// PriorityQueueWithRestrictedPush tests.
// ----------------------------------------------------------
TEST(PriorityQueueWithRestrictedPushTest, BasicBehavior) {
PriorityQueueWithRestrictedPush<std::string, int> queue;
EXPECT_TRUE(queue.IsEmpty());
queue.Push("A", 1);
queue.Push("B", 0);
queue.Push("C", 2);
queue.Push("D", 10);
queue.Push("E", 9);
EXPECT_EQ("D", queue.Pop());
EXPECT_EQ("E", queue.Pop());
EXPECT_EQ("C", queue.Pop());
EXPECT_EQ("A", queue.Pop());
EXPECT_EQ("B", queue.Pop());
EXPECT_TRUE(queue.IsEmpty());
queue.Push("A", 1);
queue.Push("B", 0);
EXPECT_FALSE(queue.IsEmpty());
queue.Clear();
EXPECT_TRUE(queue.IsEmpty());
}
TEST(PriorityQueueWithRestrictedPushTest, BasicBehaviorWithMixedPushPop) {
PriorityQueueWithRestrictedPush<std::string, int> queue;
EXPECT_TRUE(queue.IsEmpty());
queue.Push("A", 1);
queue.Push("B", 0);
queue.Push("C", 2);
EXPECT_EQ("C", queue.Pop());
EXPECT_EQ("A", queue.Pop());
queue.Push("D", 1);
queue.Push("E", 0);
EXPECT_EQ("D", queue.Pop());
EXPECT_EQ("E", queue.Pop());
EXPECT_EQ("B", queue.Pop());
EXPECT_TRUE(queue.IsEmpty());
queue.Push("E", 1);
EXPECT_FALSE(queue.IsEmpty());
EXPECT_EQ("E", queue.Pop());
EXPECT_TRUE(queue.IsEmpty());
}
TEST(PriorityQueueWithRestrictedPushTest, RandomPushPop) {
struct ElementWithPriority {
ElementWithPriority(int _element, int _priority)
: element(_element), priority(_priority) {}
bool operator<(const ElementWithPriority& other) const {
return priority < other.priority;
}
int element;
int priority;
};
std::vector<ElementWithPriority> pairs;
std::mt19937 randomizer(1);
const int kNumElement = 10000;
const int kMaxPriority = 10000; // We want duplicates and gaps.
for (int i = 0; i < kNumElement; ++i) {
pairs.push_back(
ElementWithPriority(i, absl::Uniform(randomizer, 0, kMaxPriority)));
}
std::sort(pairs.begin(), pairs.end());
// Randomly add +1 and push to the queue.
PriorityQueueWithRestrictedPush<int, int> queue;
for (int i = 0; i < pairs.size(); ++i) {
pairs[i].priority += absl::Bernoulli(randomizer, 1.0 / 2) ? 1 : 0;
queue.Push(pairs[i].element, pairs[i].priority);
}
// Stable sort the pairs for checking (the queue order is stable).
std::stable_sort(pairs.begin(), pairs.end());
// Random Push() and Pop() with more Pop().
int current = pairs.size();
while (!queue.IsEmpty()) {
EXPECT_GT(current, 0);
if (absl::Bernoulli(randomizer, 1.0 / 4) && current < pairs.size()) {
queue.Push(pairs[current].element, pairs[current].priority);
++current;
} else {
--current;
EXPECT_EQ(pairs[current].element, queue.Pop());
}
}
}
TEST(BipartiteMinimumVertexCoverTest, BasicBehavior) {
const int num_right = 4;
const std::vector<std::vector<int>> left_to_right = {
{5}, {4, 5, 6}, {5}, {5, 6, 7}};
EXPECT_EQ(absl::c_count(BipartiteMinimumVertexCover(left_to_right, num_right),
true),
3);
EXPECT_EQ(absl::c_count(BipartiteMinimumVertexCover(left_to_right, num_right),
false),
5);
}
TEST(BipartiteMinimumVertexCoverTest, Empty) {
const int num_right = 4;
const std::vector<std::vector<int>> left_to_right = {{}, {}};
EXPECT_EQ(absl::c_count(BipartiteMinimumVertexCover(left_to_right, num_right),
false),
6);
}
TEST(PriorityQueueWithRestrictedPushDeathTest, DCHECK) {
// Don't run this test in opt mode.
if (!DEBUG_MODE) GTEST_SKIP();
PriorityQueueWithRestrictedPush<std::string, int> queue;
EXPECT_TRUE(queue.IsEmpty());
ASSERT_DEATH(queue.Pop(), "");
queue.Push("A", 10);
queue.Push("B", 9);
ASSERT_DEATH(queue.Push("C", 4), "");
ASSERT_DEATH(queue.Push("C", 8), "");
}
} // namespace
} // namespace operations_research

View File

@@ -22,9 +22,10 @@
#include <vector>
#include "absl/flags/flag.h"
#include "absl/log/check.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "ortools/base/dump_vars.h"
#include "ortools/base/logging.h"
#include "ortools/base/mathutil.h"
#include "ortools/graph/graph.h"
#include "ortools/graph/graphs.h"
@@ -39,8 +40,6 @@ ABSL_FLAG(bool, min_cost_flow_check_feasibility, true,
"Check that the graph has enough capacity to send all supplies "
"and serve all demands. Also check that the sum of supplies "
"is equal to the sum of demands.");
ABSL_FLAG(bool, min_cost_flow_check_balance, true,
"Check that the sum of supplies is equal to the sum of demands.");
ABSL_FLAG(bool, min_cost_flow_check_result, true,
"Check that the result is valid.");
@@ -59,7 +58,6 @@ GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::GenericMinCostFlow(
node_potential_.assign(max_num_nodes, 0);
node_excess_.assign(max_num_nodes, 0);
initial_node_excess_.assign(max_num_nodes, 0);
feasible_node_excess_.assign(max_num_nodes, 0);
}
const ArcIndex max_num_arcs = Graphs<Graph>::ArcReservation(*graph_);
if (max_num_arcs > 0) {
@@ -127,49 +125,97 @@ void GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::SetArcCapacity(
}
}
template <typename Graph, typename ArcFlowType, typename ArcScaledCostType>
void GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::SetArcFlow(
ArcIndex arc, ArcFlowType new_flow) {
DCHECK(IsArcValid(arc));
const FlowQuantity capacity = Capacity(arc);
DCHECK_GE(capacity, new_flow);
residual_arc_capacity_.Set(Opposite(arc), new_flow);
residual_arc_capacity_.Set(arc, capacity - new_flow);
status_ = NOT_SOLVED;
feasibility_checked_ = false;
}
template <typename Graph, typename ArcFlowType, typename ArcScaledCostType>
bool GenericMinCostFlow<Graph, ArcFlowType,
ArcScaledCostType>::CheckInputConsistency() {
FlowQuantity total_supply = 0;
uint64_t max_capacity = 0; // uint64_t because it is positive and will be
// used to check against FlowQuantity overflows.
for (ArcIndex arc = 0; arc < graph_->num_arcs(); ++arc) {
const uint64_t capacity =
static_cast<uint64_t>(residual_arc_capacity_[arc]);
max_capacity = std::max(capacity, max_capacity);
}
uint64_t total_flow = 0; // uint64_t for the same reason as max_capacity.
const FlowQuantity kMaxFlow = std::numeric_limits<FlowQuantity>::max();
// First lets make sure supply == demand and the total supply/demand do not
// overflow.
FlowQuantity sum_of_supplies = 0;
FlowQuantity sum_of_demands = 0;
for (NodeIndex node = 0; node < graph_->num_nodes(); ++node) {
const FlowQuantity excess = node_excess_[node];
total_supply += excess;
if (excess > 0) {
total_flow += excess;
if (std::numeric_limits<FlowQuantity>::max() <
max_capacity + total_flow) {
status_ = BAD_COST_RANGE;
LOG(ERROR) << "Input consistency error: max capacity + flow exceed "
<< "precision";
sum_of_supplies = CapAdd(sum_of_supplies, excess);
if (sum_of_supplies >= kMaxFlow) {
status_ = BAD_CAPACITY_RANGE;
LOG(ERROR) << "Input consistency error: sum of supplies overflow";
return false;
}
} else if (excess < 0) {
sum_of_demands = CapAdd(sum_of_demands, -excess);
if (sum_of_demands >= kMaxFlow) {
status_ = BAD_CAPACITY_RANGE;
LOG(ERROR) << "Input consistency error: sum of demands overflow";
return false;
}
}
}
if (total_supply != 0) {
if (sum_of_supplies != sum_of_demands) {
status_ = UNBALANCED;
LOG(ERROR) << "Input consistency error: unbalanced problem";
return false;
}
std::vector<FlowQuantity> max_node_excess = node_excess_;
std::vector<FlowQuantity> min_node_excess = node_excess_;
for (ArcIndex arc = 0; arc < graph_->num_arcs(); ++arc) {
const FlowQuantity capacity = residual_arc_capacity_[arc];
CHECK_GE(capacity, 0);
if (capacity == 0) continue;
const int tail = graph_->Tail(arc);
const int head = graph_->Head(arc);
min_node_excess[tail] = CapSub(min_node_excess[tail], capacity);
max_node_excess[head] = CapAdd(max_node_excess[head], capacity);
}
const int num_nodes = graph_->num_nodes();
for (NodeIndex node = 0; node < num_nodes; ++node) {
if (max_node_excess[node] >= kMaxFlow ||
min_node_excess[node] <= -kMaxFlow) {
// Try to fix it.
// Some user just use arc with infinite capacity out of the source/sink.
// This is why we use CappedCapacity() for a name.
if (max_node_excess[node] < std::numeric_limits<ArcFlowType>::max()) {
// There is no point having an outgoing arc with more than this.
const ArcFlowType upper_bound =
std::max<ArcFlowType>(0, max_node_excess[node]);
// Adjust and recompute min_node_excess[node].
min_node_excess[node] = node_excess_[node];
for (OutgoingArcIterator it(*graph_, node); it.Ok(); it.Next()) {
const int arc = it.Index();
residual_arc_capacity_[arc] =
std::min(residual_arc_capacity_[arc], upper_bound);
min_node_excess[node] =
CapSub(min_node_excess[node], residual_arc_capacity_[arc]);
}
if (min_node_excess[node] > -kMaxFlow) continue;
}
if (min_node_excess[node] > -std::numeric_limits<ArcFlowType>::max()) {
// There is no point having an incoming arc with more than this.
const ArcFlowType upper_bound =
std::max<ArcFlowType>(0, -min_node_excess[node]);
// Adjust and recompute max_node_excess[node].
max_node_excess[node] = node_excess_[node];
for (IncomingArcIterator it(*graph_, node); it.Ok(); it.Next()) {
const int arc = it.Index();
residual_arc_capacity_[arc] =
std::min(residual_arc_capacity_[arc], upper_bound);
max_node_excess[node] =
CapAdd(max_node_excess[node], residual_arc_capacity_[arc]);
}
if (max_node_excess[node] < kMaxFlow) continue;
}
status_ = BAD_CAPACITY_RANGE;
LOG(ERROR) << "Maximum in or out flow of node + excess " << node
<< " overflow the FlowQuantity type (int64_t).";
return false;
}
}
return true;
}
@@ -248,9 +294,8 @@ GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::DebugString(
}
template <typename Graph, typename ArcFlowType, typename ArcScaledCostType>
bool GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::
CheckFeasibility(std::vector<NodeIndex>* const infeasible_supply_node,
std::vector<NodeIndex>* const infeasible_demand_node) {
bool GenericMinCostFlow<Graph, ArcFlowType,
ArcScaledCostType>::CheckFeasibility() {
SCOPED_TIME_STAT(&stats_);
// Create a new graph, which is a copy of graph_, with the following
// modifications:
@@ -276,8 +321,6 @@ bool GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::
const NodeIndex sink = num_nodes_in_max_flow - 1;
StarGraph checker_graph(num_nodes_in_max_flow, num_arcs_in_max_flow);
MaxFlow checker(&checker_graph, source, sink);
checker.SetCheckInput(false);
checker.SetCheckResult(false);
// Copy graph_ to checker_graph.
for (ArcIndex arc = 0; arc < graph_->num_arcs(); ++arc) {
const ArcIndex new_arc =
@@ -310,44 +353,10 @@ bool GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::
return false;
}
const FlowQuantity optimal_max_flow = checker.GetOptimalFlow();
feasible_node_excess_.assign(checker_graph.num_nodes(), 0);
for (StarGraph::OutgoingArcIterator it(checker_graph, source); it.Ok();
it.Next()) {
const ArcIndex arc = it.Index();
const NodeIndex node = checker_graph.Head(arc);
const FlowQuantity flow = checker.Flow(arc);
feasible_node_excess_[node] = flow;
if (infeasible_supply_node != nullptr) {
infeasible_supply_node->push_back(node);
}
}
for (StarGraph::IncomingArcIterator it(checker_graph, sink); it.Ok();
it.Next()) {
const ArcIndex arc = it.Index();
const NodeIndex node = checker_graph.Tail(arc);
const FlowQuantity flow = checker.Flow(arc);
feasible_node_excess_[node] = -flow;
if (infeasible_demand_node != nullptr) {
infeasible_demand_node->push_back(node);
}
}
feasibility_checked_ = true;
return optimal_max_flow == total_supply;
}
template <typename Graph, typename ArcFlowType, typename ArcScaledCostType>
bool GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::MakeFeasible() {
if (!feasibility_checked_) {
return false;
}
for (NodeIndex node = 0; node < graph_->num_nodes(); ++node) {
const FlowQuantity excess = feasible_node_excess_[node];
node_excess_[node] = excess;
initial_node_excess_[node] = excess;
}
return true;
}
template <typename Graph, typename ArcFlowType, typename ArcScaledCostType>
FlowQuantity GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::Flow(
ArcIndex arc) const {
@@ -385,20 +394,6 @@ FlowQuantity GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::Supply(
return node_excess_[node];
}
template <typename Graph, typename ArcFlowType, typename ArcScaledCostType>
FlowQuantity
GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::InitialSupply(
NodeIndex node) const {
return initial_node_excess_[node];
}
template <typename Graph, typename ArcFlowType, typename ArcScaledCostType>
FlowQuantity
GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::FeasibleSupply(
NodeIndex node) const {
return feasible_node_excess_[node];
}
template <typename Graph, typename ArcFlowType, typename ArcScaledCostType>
bool GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::IsAdmissible(
ArcIndex arc) const {
@@ -452,17 +447,17 @@ GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::
template <typename Graph, typename ArcFlowType, typename ArcScaledCostType>
bool GenericMinCostFlow<Graph, ArcFlowType, ArcScaledCostType>::Solve() {
if (absl::GetFlag(FLAGS_min_cost_flow_check_balance) &&
!CheckInputConsistency()) {
if (!CheckInputConsistency()) {
return false;
}
if (check_feasibility_ && !CheckFeasibility(nullptr, nullptr)) {
if (check_feasibility_ && !CheckFeasibility()) {
status_ = INFEASIBLE;
return false;
}
status_ = NOT_SOLVED;
node_potential_.assign(node_potential_.size(), 0);
ResetFirstAdmissibleArcs();
if (!ScaleCosts()) return false;
if (!Optimize()) return false;
@@ -1170,6 +1165,7 @@ SimpleMinCostFlow::Status SimpleMinCostFlow::SolveWithPossibleAdjustment(
arc_flow_[arc] = min_cost_flow.Flow(PermutedArc(arc));
}
}
return min_cost_flow.status();
}

View File

@@ -201,9 +201,6 @@ class MinCostFlowBase {
// execution.
//
// Some details on how to deal with this:
// - The sum of all incoming/outgoing capacity at each node should not
// overflow. TODO(user): this is not always properly checked and probably
// deserve a different return status.
// - Since we scale cost, each arc unit cost times (num_nodes + 1) should
// not overflow. We detect that at the beginning of the Solve().
// - This is however not sufficient as the node potential depends on the
@@ -225,7 +222,28 @@ class MinCostFlowBase {
// deal with since we will still have the proper flow on each arc. It is
// thus possible to recompute the total cost in double or using
// absl::int128 if the need arise.
BAD_COST_RANGE
BAD_COST_RANGE,
// This is returned in our initial check if the arc capacity are too large.
// For each node these quantity should not overflow the FlowQuantity type
// which is int64_t by default.
// - Its initial excess (+supply or -demand) + sum incoming arc capacity
// - Its initial excess (+supply or -demand) - sum outgoing arc capacity.
//
// Note that these are upper bounds that guarantee that no overflow will
// take place during the algorithm execution. It is possible to go over that
// and still encounter no overflow, but since we cannot guarantee this, we
// rather check early and return a proper status.
//
// Note that we might cap the capacity of the arcs if we can detect it
// is too large in order to avoid returning this status for simple case,
// like if a client used int64_t max for all arcs out of the source.
//
// TODO(user): Not sure this is a good idea, probably better to make sure
// client use reasonable capacities. Also we should template by FlowQuantity
// and allow use of absl::int128 so we never have issue if the input is
// int64_t.
BAD_CAPACITY_RANGE,
};
};
@@ -379,6 +397,7 @@ class GenericMinCostFlow : public MinCostFlowBase {
public:
typedef typename Graph::NodeIndex NodeIndex;
typedef typename Graph::ArcIndex ArcIndex;
typedef typename Graph::IncomingArcIterator IncomingArcIterator;
typedef typename Graph::OutgoingArcIterator OutgoingArcIterator;
typedef typename Graph::OutgoingOrOppositeIncomingArcIterator
OutgoingOrOppositeIncomingArcIterator;
@@ -413,31 +432,9 @@ class GenericMinCostFlow : public MinCostFlowBase {
// Sets the capacity for the given arc.
void SetArcCapacity(ArcIndex arc, ArcFlowType new_capacity);
// Sets the flow for the given arc. Note that new_flow must be smaller than
// the capacity of the arc.
void SetArcFlow(ArcIndex arc, ArcFlowType new_flow);
// Solves the problem, returning true if a min-cost flow could be found.
bool Solve();
// Checks for feasibility, i.e., that all the supplies and demands can be
// matched without exceeding bottlenecks in the network.
// If infeasible_supply_node (resp. infeasible_demand_node) are not NULL,
// they are populated with the indices of the nodes where the initial supplies
// (resp. demands) are too large. Feasible values for the supplies and
// demands are accessible through FeasibleSupply.
// Note that CheckFeasibility is called by Solve() when the flag
// min_cost_flow_check_feasibility is set to true (which is the default.)
bool CheckFeasibility(std::vector<NodeIndex>* infeasible_supply_node,
std::vector<NodeIndex>* infeasible_demand_node);
// Makes the min-cost flow problem solvable by truncating supplies and
// demands to a level acceptable by the network. There may be several ways to
// do it. In our case, the levels are computed from the result of the max-flow
// algorithm run in CheckFeasibility().
// MakeFeasible returns false if CheckFeasibility() was not called before.
bool MakeFeasible();
// Returns the cost of the minimum-cost flow found by the algorithm. This
// works in O(num_arcs). This will only work if the last Solve() call was
// successful and returned true, otherwise it will return 0. Note that the
@@ -450,24 +447,18 @@ class GenericMinCostFlow : public MinCostFlowBase {
FlowQuantity Flow(ArcIndex arc) const;
// Returns the capacity of the given arc.
//
// Warning: If the capacity were close to ArcFlowType::max() we might have
// adjusted them in order to avoid overflow.
FlowQuantity Capacity(ArcIndex arc) const;
// Returns the unscaled cost for the given arc.
CostValue UnitCost(ArcIndex arc) const;
// Returns the supply at a given node. Demands are modelled as negative
// supplies.
// Returns the supply at a given node.
// Demands are modelled as negative supplies.
FlowQuantity Supply(NodeIndex node) const;
// Returns the initial supply at a given node.
FlowQuantity InitialSupply(NodeIndex node) const;
// Returns the largest supply (if > 0) or largest demand in absolute value
// (if < 0) admissible at node. If the problem is not feasible, some of these
// values will be smaller (in absolute value) than the initial supplies
// and demand given as input.
FlowQuantity FeasibleSupply(NodeIndex node) const;
// Whether to check the feasibility of the problem with a max-flow, prior to
// solving it. This uses about twice as much memory, but detects infeasible
// problems (where the flow can't be satisfied) and makes Solve() return
@@ -481,6 +472,12 @@ class GenericMinCostFlow : public MinCostFlowBase {
void SetPriceScaling(bool value) { scale_prices_ = value; }
private:
// Checks for feasibility, i.e., that all the supplies and demands can be
// matched without exceeding bottlenecks in the network.
// Note that CheckFeasibility is called by Solve() when SetCheckFeasibility()
// is set to true, which is the default.
bool CheckFeasibility();
// Returns true if the given arc is admissible i.e. if its residual capacity
// is strictly positive, and its reduced cost strictly negative, i.e., pushing
// more flow into it will result in a reduction of the total cost.
@@ -643,12 +640,6 @@ class GenericMinCostFlow : public MinCostFlowBase {
// node. This is used to create the max-flow-based feasibility checker.
std::vector<FlowQuantity> initial_node_excess_;
// An array containing the best acceptable excesses for each of the
// nodes. These excesses are imposed by the result of the max-flow-based
// feasibility checker for the nodes with an initial supply != 0. For the
// other nodes, the excess is simply 0.
std::vector<FlowQuantity> feasible_node_excess_;
// Statistics about this class.
StatsGroup stats_;

View File

@@ -34,6 +34,73 @@
#include "ortools/linear_solver/linear_solver.h"
namespace operations_research {
namespace {
TEST(SolveTest, CapacityTooLarge) {
using Graph = ::util::ReverseArcListGraph<int64_t, int64_t>;
using Solver =
::operations_research::GenericMinCostFlow<Graph, int64_t, int64_t>;
const int num_nodes = 6;
const int num_arcs = 10;
auto graph = std::make_unique<Graph>(num_nodes, num_arcs);
auto solver = std::make_unique<Solver>(graph.get());
const std::vector<int64_t> tails = {1, 2, 3, 4, 5, 0, 1, 2, 3, 4};
const std::vector<int64_t> heads = {0, 1, 2, 3, 4, 5, 5, 5, 5, 5};
const std::vector<int64_t> capacities = {
3184525836262886912, 3184525836262886912, 3184525836262886912,
3184525836262886912, 3184525836262886912, 1025,
3184525836262886914, 3184525836262886914, 3184525836262886914,
3184525836262886914,
};
const std::vector<int64_t> objectives(num_arcs, 0);
const std::vector<int64_t> supplies = {-3184525836262885888, 1, 1, 1, 1,
3184525836262885884};
for (int64_t e = 0; e < num_arcs; ++e) {
graph->AddArc(/*tail=*/tails[e], /*head=*/heads[e]);
solver->SetArcCapacity(/*arc=*/e, /*new_capacity=*/capacities[e]);
solver->SetArcUnitCost(/*arc=*/e, /*unit_cost=*/objectives[e]);
}
for (int64_t n = 0; n < num_nodes; ++n) {
solver->SetNodeSupply(/*node=*/n, /*supply=*/supplies[n]);
}
// This one can actually be "corrected" by our simple heuristic.
EXPECT_TRUE(solver->Solve());
EXPECT_EQ(solver->status(), Solver::OPTIMAL);
}
TEST(SolveTest, CapacityTooLarge2) {
using Graph = ::util::ReverseArcListGraph<int64_t, int64_t>;
using Solver =
::operations_research::GenericMinCostFlow<Graph, int64_t, int64_t>;
const int num_nodes = 3;
const int num_arcs = 6;
auto graph = std::make_unique<Graph>(num_nodes, num_arcs);
auto solver = std::make_unique<Solver>(graph.get());
// We construct a double cycle so that the incoming/outgoing flow cannot be
// easily bounded.
const int64_t kint64max = std::numeric_limits<int64_t>::max();
const std::vector<int64_t> tails = {0, 0, 1, 1, 2, 2};
const std::vector<int64_t> heads = {1, 1, 2, 2, 0, 0};
const std::vector<int64_t> capacities(num_arcs, kint64max - 10);
const std::vector<int64_t> objectives(num_arcs, 0);
const std::vector<int64_t> supplies = {-(kint64max - 10), kint64max - 10, 0};
for (int64_t e = 0; e < num_arcs; ++e) {
graph->AddArc(/*tail=*/tails[e], /*head=*/heads[e]);
solver->SetArcCapacity(/*arc=*/e, /*new_capacity=*/capacities[e]);
solver->SetArcUnitCost(/*arc=*/e, /*unit_cost=*/objectives[e]);
}
for (int64_t n = 0; n < num_nodes; ++n) {
solver->SetNodeSupply(/*node=*/n, /*supply=*/supplies[n]);
}
EXPECT_FALSE(solver->Solve());
EXPECT_EQ(solver->status(), Solver::BAD_CAPACITY_RANGE);
}
template <typename Graph>
void GenericMinCostFlowTester(
@@ -77,11 +144,6 @@ void GenericMinCostFlowTester(
}
} else if (expected_status == GenericMinCostFlow<Graph>::INFEASIBLE) {
EXPECT_FALSE(ok);
for (int node = 0; node < graph.num_nodes(); ++node) {
FlowQuantity delta = min_cost_flow.InitialSupply(node) -
min_cost_flow.FeasibleSupply(node);
EXPECT_EQ(0, delta) << "at node " << node;
}
}
}
}
@@ -169,66 +231,6 @@ TYPED_TEST(GenericMinCostFlowTest, Test3) {
kExpectedFlowCost, kExpectedFlow, GenericMinCostFlow<TypeParam>::OPTIMAL);
}
TYPED_TEST(GenericMinCostFlowTest, Infeasible) {
const int kNumNodes = 6;
const int kNumArcs = 9;
const FlowQuantity kNodeSupply[kNumNodes] = {20, 0, 0, 0, 0, -20};
const FlowQuantity kNodeInfeasibility[kNumNodes] = {4, 0, 0, 0, 0, -4};
const NodeIndex kTail[kNumArcs] = {0, 0, 1, 1, 1, 2, 3, 4, 4};
const NodeIndex kHead[kNumArcs] = {1, 2, 1, 3, 4, 3, 5, 3, 5};
const CostValue kCost[kNumArcs] = {1, 6, 3, 5, 7, 3, 1, 6, 9};
const FlowQuantity kCapacity[kNumArcs] = {10, 10, 10, 6, 6, 6, 10, 10, 10};
const NodeIndex kInfeasibleSupplyNode[] = {0};
const NodeIndex kInfeasibleDemandNode[] = {5};
const NodeIndex kFeasibleSupply[] = {16};
const NodeIndex kFeasibleDemand[] = {-16};
TypeParam graph(kNumNodes, kNumArcs);
for (ArcIndex arc = 0; arc < kNumArcs; ++arc) {
graph.AddArc(kTail[arc], kHead[arc]);
}
std::vector<ArcIndex> permutation;
Graphs<TypeParam>::Build(&graph, &permutation);
EXPECT_TRUE(permutation.empty());
GenericMinCostFlow<TypeParam> min_cost_flow(&graph);
for (ArcIndex arc = 0; arc < kNumArcs; ++arc) {
min_cost_flow.SetArcUnitCost(arc, kCost[arc]);
min_cost_flow.SetArcCapacity(arc, kCapacity[arc]);
}
for (ArcIndex arc = 0; arc < kNumNodes; ++arc) {
min_cost_flow.SetNodeSupply(arc, kNodeSupply[arc]);
}
std::vector<NodeIndex> infeasible_supply_node;
std::vector<NodeIndex> infeasible_demand_node;
bool feasible = min_cost_flow.CheckFeasibility(&infeasible_supply_node,
&infeasible_demand_node);
EXPECT_FALSE(feasible);
for (int i = 0; i < infeasible_supply_node.size(); ++i) {
const NodeIndex node = infeasible_supply_node[i];
EXPECT_EQ(node, kInfeasibleSupplyNode[i]);
EXPECT_EQ(min_cost_flow.FeasibleSupply(node), kFeasibleSupply[i]);
}
for (int i = 0; i < infeasible_demand_node.size(); ++i) {
const NodeIndex node = infeasible_demand_node[i];
EXPECT_EQ(node, kInfeasibleDemandNode[i]);
EXPECT_EQ(min_cost_flow.FeasibleSupply(node), kFeasibleDemand[i]);
}
bool ok = min_cost_flow.Solve();
EXPECT_FALSE(ok);
EXPECT_EQ(GenericMinCostFlow<TypeParam>::INFEASIBLE, min_cost_flow.status());
for (NodeIndex node = 0; node < kNumNodes; ++node) {
FlowQuantity delta =
min_cost_flow.InitialSupply(node) - min_cost_flow.FeasibleSupply(node);
EXPECT_EQ(kNodeInfeasibility[node], delta);
}
EXPECT_EQ(min_cost_flow.GetOptimalCost(), 0);
min_cost_flow.MakeFeasible();
ok = min_cost_flow.Solve();
EXPECT_TRUE(ok);
EXPECT_EQ(GenericMinCostFlow<TypeParam>::OPTIMAL, min_cost_flow.status());
}
// Test on a 4x4 matrix. Example taken from
// http://www.ee.oulu.fi/~mpa/matreng/eem1_2-1.htm
TYPED_TEST(GenericMinCostFlowTest, Small4x4Matrix) {
@@ -275,7 +277,7 @@ TYPED_TEST(GenericMinCostFlowTest, Small4x4Matrix) {
TYPED_TEST(GenericMinCostFlowTest, TotalFlowCostOverflow) {
const int kNumNodes = 2;
const int kNumArcs = 1;
const FlowQuantity kNodeSupply[kNumNodes] = {1LL << 61, -1LL << 61};
const FlowQuantity kNodeSupply[kNumNodes] = {1LL << 61, -(1LL << 61)};
const NodeIndex kTail[kNumArcs] = {0};
const NodeIndex kHead[kNumArcs] = {1};
const CostValue kCost[kNumArcs] = {10};
@@ -298,7 +300,7 @@ TEST(GenericMinCostFlowTest, OverflowPrevention1) {
mcf.SetNodeSupply(1, -std::numeric_limits<int64_t>::max());
EXPECT_FALSE(mcf.Solve());
EXPECT_EQ(mcf.status(), MinCostFlowBase::BAD_COST_RANGE);
EXPECT_EQ(mcf.status(), MinCostFlowBase::BAD_CAPACITY_RANGE);
}
TEST(GenericMinCostFlowTest, OverflowPrevention2) {
@@ -976,4 +978,5 @@ static void BM_MinCostFlowOnMultiMatchingProblem(benchmark::State& state) {
BENCHMARK(BM_MinCostFlowOnMultiMatchingProblem);
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,103 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/minimum_vertex_cover.h"
#include <vector>
#include "absl/log/check.h"
#include "ortools/graph/ebert_graph.h"
#include "ortools/graph/max_flow.h"
namespace operations_research {
std::vector<bool> BipartiteMinimumVertexCover(
const std::vector<std::vector<int>>& left_to_right_arcs, int num_right) {
// This algorithm first uses the maximum flow to find a maximum matching. Then
// it uses the same method outlined in the proof of Konig's theorem to
// transform the maximum matching into a minimum vertex cover.
//
// More concretely, it uses a DFS starting with unmatched nodes and
// alternating matched/unmatched edges to find a minimum vertex cover.
SimpleMaxFlow max_flow;
const int num_left = left_to_right_arcs.size();
std::vector<ArcIndex> arcs;
for (int i = 0; i < num_left; ++i) {
for (const int right_node : left_to_right_arcs[i]) {
DCHECK_GE(right_node, num_left);
DCHECK_LT(right_node, num_right + num_left);
arcs.push_back(max_flow.AddArcWithCapacity(i, right_node, 1));
}
}
std::vector<std::vector<int>> adj_list = left_to_right_arcs;
adj_list.resize(num_left + num_right);
for (int i = 0; i < num_left; ++i) {
for (const int right_node : left_to_right_arcs[i]) {
adj_list[right_node].push_back(i);
}
}
const int sink = num_left + num_right;
const int source = num_left + num_right + 1;
for (int i = 0; i < num_left; ++i) {
max_flow.AddArcWithCapacity(source, i, 1);
}
for (int i = 0; i < num_right; ++i) {
max_flow.AddArcWithCapacity(i + num_left, sink, 1);
}
CHECK(max_flow.Solve(source, sink) == SimpleMaxFlow::OPTIMAL);
std::vector<int> maximum_matching(num_left + num_right, -1);
for (const ArcIndex arc : arcs) {
if (max_flow.Flow(arc) > 0) {
maximum_matching[max_flow.Tail(arc)] = max_flow.Head(arc);
maximum_matching[max_flow.Head(arc)] = max_flow.Tail(arc);
}
}
// We do a DFS starting with unmatched nodes and alternating matched/unmatched
// edges.
std::vector<bool> in_alternating_path(num_left + num_right, false);
std::vector<int> to_visit;
for (int i = 0; i < num_left; ++i) {
if (maximum_matching[i] == -1) {
to_visit.push_back(i);
}
}
while (!to_visit.empty()) {
const int current = to_visit.back();
to_visit.pop_back();
if (in_alternating_path[current]) {
continue;
}
in_alternating_path[current] = true;
for (const int j : adj_list[current]) {
if (current < num_left && maximum_matching[current] != j) {
to_visit.push_back(j);
} else if (current >= num_left && maximum_matching[current] == j) {
to_visit.push_back(j);
}
}
}
std::vector<bool> minimum_vertex_cover(num_left + num_right, false);
for (int i = 0; i < num_left; ++i) {
if (!in_alternating_path[i]) {
minimum_vertex_cover[i] = true;
}
}
for (int i = num_left; i < num_left + num_right; ++i) {
if (in_alternating_path[i]) {
minimum_vertex_cover[i] = true;
}
}
return minimum_vertex_cover;
}
} // namespace operations_research

View File

@@ -0,0 +1,35 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_GRAPH_MINIMUM_VERTEX_COVER_H_
#define OR_TOOLS_GRAPH_MINIMUM_VERTEX_COVER_H_
#include <vector>
namespace operations_research {
// This method computes a minimum vertex cover for the bipartite graph.
//
// If we define num_left=left_to_right_arcs.size(), the "left" nodes are
// integers in [0, num_left), and the "right" nodes are integers in [num_left,
// num_left + num_right).
//
// Returns a vector of size num_left+num_right, such that element #l is true if
// it is part of the minimum vertex cover and false if it is part of the maximum
// independent set (one is the complement of the other).
std::vector<bool> BipartiteMinimumVertexCover(
const std::vector<std::vector<int>>& left_to_right_arcs, int num_right);
} // namespace operations_research
#endif // OR_TOOLS_GRAPH_MINIMUM_VERTEX_COVER_H_

View File

@@ -0,0 +1,78 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/minimum_vertex_cover.h"
#include <vector>
#include "absl/algorithm/container.h"
#include "gtest/gtest.h"
namespace operations_research {
namespace {
// Creates the complete bipartite graph K(n, m).
std::vector<std::vector<int>> MakeCompleteBipartiteGraph(int num_left,
int num_right) {
std::vector<int> adjacencies(num_right);
absl::c_iota(adjacencies, num_left);
return std::vector<std::vector<int>>(num_left, adjacencies);
}
TEST(BipartiteMinimumVertexCoverTest, BasicBehavior) {
const int num_right = 4;
const std::vector<std::vector<int>> left_to_right = {
{5}, {4, 5, 6}, {5}, {5, 6, 7}};
const auto cover = BipartiteMinimumVertexCover(left_to_right, num_right);
EXPECT_EQ(absl::c_count(cover, true), 3);
EXPECT_EQ(absl::c_count(cover, false), 5);
}
TEST(BipartiteMinimumVertexCoverTest, StarGraph) {
const std::vector<std::vector<int>> left_to_right =
MakeCompleteBipartiteGraph(1, 4);
const auto cover = BipartiteMinimumVertexCover(left_to_right, 4);
EXPECT_EQ(absl::c_count(cover, true), 1);
EXPECT_EQ(absl::c_count(cover, false), 4);
}
TEST(BipartiteMinimumVertexCoverTest, UtilityGraph) {
const std::vector<std::vector<int>> left_to_right =
MakeCompleteBipartiteGraph(3, 3);
const auto cover = BipartiteMinimumVertexCover(left_to_right, 3);
EXPECT_EQ(absl::c_count(cover, true), 3);
EXPECT_EQ(absl::c_count(cover, false), 3);
}
TEST(BipartiteMinimumVertexCoverTest, DuplicateEdges) {
const int num_right = 4;
const std::vector<std::vector<int>> left_to_right = {
{5, 5}, {4, 4, 5, 6}, {5, 5, 5}, {5, 5, 5, 6, 6, 7}};
EXPECT_EQ(absl::c_count(BipartiteMinimumVertexCover(left_to_right, num_right),
true),
3);
EXPECT_EQ(absl::c_count(BipartiteMinimumVertexCover(left_to_right, num_right),
false),
5);
}
TEST(BipartiteMinimumVertexCoverTest, Empty) {
const int num_right = 4;
const std::vector<std::vector<int>> left_to_right = {{}, {}};
EXPECT_EQ(absl::c_count(BipartiteMinimumVertexCover(left_to_right, num_right),
false),
6);
}
} // namespace
} // namespace operations_research

View File

@@ -51,6 +51,7 @@ PYBIND11_MODULE(min_cost_flow, m) {
pybind11::enum_<SimpleMinCostFlow::Status>(smcf, "Status")
.value("BAD_COST_RANGE", MinCostFlowBase::Status::BAD_COST_RANGE)
.value("BAD_CAPACITY_RANGE", MinCostFlowBase::Status::BAD_CAPACITY_RANGE)
.value("BAD_RESULT", MinCostFlowBase::Status::BAD_RESULT)
.value("FEASIBLE", MinCostFlowBase::Status::FEASIBLE)
.value("INFEASIBLE", MinCostFlowBase::Status::INFEASIBLE)