OR-Tools  9.2
max_flow.cc
Go to the documentation of this file.
1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
15
16#include <algorithm>
17#include <string>
18
19#include "absl/memory/memory.h"
20#include "absl/strings/str_format.h"
21#include "ortools/graph/graph.h"
23
24namespace operations_research {
25
26SimpleMaxFlow::SimpleMaxFlow() : num_nodes_(0) {}
27
30 const ArcIndex num_arcs = arc_tail_.size();
31 num_nodes_ = std::max(num_nodes_, tail + 1);
32 num_nodes_ = std::max(num_nodes_, head + 1);
33 arc_tail_.push_back(tail);
34 arc_head_.push_back(head);
35 arc_capacity_.push_back(capacity);
36 return num_arcs;
37}
38
39NodeIndex SimpleMaxFlow::NumNodes() const { return num_nodes_; }
40
41ArcIndex SimpleMaxFlow::NumArcs() const { return arc_tail_.size(); }
42
43NodeIndex SimpleMaxFlow::Tail(ArcIndex arc) const { return arc_tail_[arc]; }
44
45NodeIndex SimpleMaxFlow::Head(ArcIndex arc) const { return arc_head_[arc]; }
46
48 return arc_capacity_[arc];
49}
50
52 arc_capacity_[arc] = capacity;
53}
54
56 const ArcIndex num_arcs = arc_capacity_.size();
57 arc_flow_.assign(num_arcs, 0);
58 underlying_max_flow_.reset();
59 underlying_graph_.reset();
60 optimal_flow_ = 0;
61 if (source == sink || source < 0 || sink < 0) {
62 return BAD_INPUT;
63 }
64 if (source >= num_nodes_ || sink >= num_nodes_) {
65 return OPTIMAL;
66 }
67 underlying_graph_ = absl::make_unique<Graph>(num_nodes_, num_arcs);
68 underlying_graph_->AddNode(source);
69 underlying_graph_->AddNode(sink);
70 for (int arc = 0; arc < num_arcs; ++arc) {
71 underlying_graph_->AddArc(arc_tail_[arc], arc_head_[arc]);
72 }
73 underlying_graph_->Build(&arc_permutation_);
74 underlying_max_flow_ = absl::make_unique<GenericMaxFlow<Graph>>(
75 underlying_graph_.get(), source, sink);
76 for (ArcIndex arc = 0; arc < num_arcs; ++arc) {
77 ArcIndex permuted_arc =
78 arc < arc_permutation_.size() ? arc_permutation_[arc] : arc;
79 underlying_max_flow_->SetArcCapacity(permuted_arc, arc_capacity_[arc]);
80 }
81 if (underlying_max_flow_->Solve()) {
82 optimal_flow_ = underlying_max_flow_->GetOptimalFlow();
83 for (ArcIndex arc = 0; arc < num_arcs; ++arc) {
84 ArcIndex permuted_arc =
85 arc < arc_permutation_.size() ? arc_permutation_[arc] : arc;
86 arc_flow_[arc] = underlying_max_flow_->Flow(permuted_arc);
87 }
88 }
89 // Translate the GenericMaxFlow::Status. It is different because NOT_SOLVED
90 // does not make sense in the simple api.
91 switch (underlying_max_flow_->status()) {
93 return BAD_RESULT;
95 return OPTIMAL;
97 return POSSIBLE_OVERFLOW;
99 return BAD_INPUT;
101 return BAD_RESULT;
102 }
103 return BAD_RESULT;
104}
105
106FlowQuantity SimpleMaxFlow::OptimalFlow() const { return optimal_flow_; }
107
108FlowQuantity SimpleMaxFlow::Flow(ArcIndex arc) const { return arc_flow_[arc]; }
109
110void SimpleMaxFlow::GetSourceSideMinCut(std::vector<NodeIndex>* result) {
111 if (underlying_max_flow_ == nullptr) return;
112 underlying_max_flow_->GetSourceSideMinCut(result);
113}
114
115void SimpleMaxFlow::GetSinkSideMinCut(std::vector<NodeIndex>* result) {
116 if (underlying_max_flow_ == nullptr) return;
117 underlying_max_flow_->GetSinkSideMinCut(result);
118}
119
121 NodeIndex sink) const {
123 model.set_problem_type(FlowModelProto::MAX_FLOW);
124 for (int n = 0; n < num_nodes_; ++n) {
125 FlowNodeProto* node = model.add_nodes();
126 node->set_id(n);
127 if (n == source) node->set_supply(1);
128 if (n == sink) node->set_supply(-1);
129 }
130 for (int a = 0; a < arc_tail_.size(); ++a) {
131 FlowArcProto* arc = model.add_arcs();
132 arc->set_tail(Tail(a));
133 arc->set_head(Head(a));
134 arc->set_capacity(Capacity(a));
135 }
136 return model;
137}
138
139template <typename Graph>
141 NodeIndex sink)
142 : graph_(graph),
143 node_excess_(),
144 node_potential_(),
145 residual_arc_capacity_(),
146 first_admissible_arc_(),
147 active_nodes_(),
148 source_(source),
149 sink_(sink),
150 use_global_update_(true),
151 use_two_phase_algorithm_(true),
152 process_node_by_height_(true),
153 check_input_(true),
154 check_result_(true),
155 stats_("MaxFlow") {
157 DCHECK(graph->IsNodeValid(source));
158 DCHECK(graph->IsNodeValid(sink));
159 const NodeIndex max_num_nodes = Graphs<Graph>::NodeReservation(*graph_);
160 if (max_num_nodes > 0) {
161 node_excess_.Reserve(0, max_num_nodes - 1);
163 node_potential_.Reserve(0, max_num_nodes - 1);
165 first_admissible_arc_.Reserve(0, max_num_nodes - 1);
166 first_admissible_arc_.SetAll(Graph::kNilArc);
167 bfs_queue_.reserve(max_num_nodes);
168 active_nodes_.reserve(max_num_nodes);
169 }
170 const ArcIndex max_num_arcs = Graphs<Graph>::ArcReservation(*graph_);
171 if (max_num_arcs > 0) {
172 residual_arc_capacity_.Reserve(-max_num_arcs, max_num_arcs - 1);
174 }
175}
176
177template <typename Graph>
179 SCOPED_TIME_STAT(&stats_);
180 bool ok = true;
181 for (ArcIndex arc = 0; arc < graph_->num_arcs(); ++arc) {
182 if (residual_arc_capacity_[arc] < 0) {
183 ok = false;
184 }
185 }
186 return ok;
187}
188
189template <typename Graph>
191 FlowQuantity new_capacity) {
192 SCOPED_TIME_STAT(&stats_);
193 DCHECK_LE(0, new_capacity);
194 DCHECK(IsArcDirect(arc));
195 const FlowQuantity free_capacity = residual_arc_capacity_[arc];
196 const FlowQuantity capacity_delta = new_capacity - Capacity(arc);
197 if (capacity_delta == 0) {
198 return; // Nothing to do.
199 }
200 status_ = NOT_SOLVED;
201 if (free_capacity + capacity_delta >= 0) {
202 // The above condition is true if one of the two conditions is true:
203 // 1/ (capacity_delta > 0), meaning we are increasing the capacity
204 // 2/ (capacity_delta < 0 && free_capacity + capacity_delta >= 0)
205 // meaning we are reducing the capacity, but that the capacity
206 // reduction is not larger than the free capacity.
207 DCHECK((capacity_delta > 0) ||
208 (capacity_delta < 0 && free_capacity + capacity_delta >= 0));
209 residual_arc_capacity_.Set(arc, free_capacity + capacity_delta);
210 DCHECK_LE(0, residual_arc_capacity_[arc]);
211 } else {
212 // Note that this breaks the preflow invariants but it is currently not an
213 // issue since we restart from scratch on each Solve() and we set the status
214 // to NOT_SOLVED.
215 //
216 // TODO(user): The easiest is probably to allow negative node excess in
217 // other places than the source, but the current implementation does not
218 // deal with this.
219 SetCapacityAndClearFlow(arc, new_capacity);
220 }
221}
222
223template <typename Graph>
225 SCOPED_TIME_STAT(&stats_);
226 DCHECK(IsArcValid(arc));
227 DCHECK_GE(new_flow, 0);
228 const FlowQuantity capacity = Capacity(arc);
229 DCHECK_GE(capacity, new_flow);
230
231 // Note that this breaks the preflow invariants but it is currently not an
232 // issue since we restart from scratch on each Solve() and we set the status
233 // to NOT_SOLVED.
234 residual_arc_capacity_.Set(Opposite(arc), -new_flow);
235 residual_arc_capacity_.Set(arc, capacity - new_flow);
236 status_ = NOT_SOLVED;
237}
238
239template <typename Graph>
241 std::vector<NodeIndex>* result) {
242 ComputeReachableNodes<false>(source_, result);
243}
244
245template <typename Graph>
246void GenericMaxFlow<Graph>::GetSinkSideMinCut(std::vector<NodeIndex>* result) {
247 ComputeReachableNodes<true>(sink_, result);
248}
249
250template <typename Graph>
252 SCOPED_TIME_STAT(&stats_);
253 bool ok = true;
254 if (node_excess_[source_] != -node_excess_[sink_]) {
255 LOG(DFATAL) << "-node_excess_[source_] = " << -node_excess_[source_]
256 << " != node_excess_[sink_] = " << node_excess_[sink_];
257 ok = false;
258 }
259 for (NodeIndex node = 0; node < graph_->num_nodes(); ++node) {
260 if (node != source_ && node != sink_) {
261 if (node_excess_[node] != 0) {
262 LOG(DFATAL) << "node_excess_[" << node << "] = " << node_excess_[node]
263 << " != 0";
264 ok = false;
265 }
266 }
267 }
268 for (ArcIndex arc = 0; arc < graph_->num_arcs(); ++arc) {
269 const ArcIndex opposite = Opposite(arc);
270 const FlowQuantity direct_capacity = residual_arc_capacity_[arc];
271 const FlowQuantity opposite_capacity = residual_arc_capacity_[opposite];
272 if (direct_capacity < 0) {
273 LOG(DFATAL) << "residual_arc_capacity_[" << arc
274 << "] = " << direct_capacity << " < 0";
275 ok = false;
276 }
277 if (opposite_capacity < 0) {
278 LOG(DFATAL) << "residual_arc_capacity_[" << opposite
279 << "] = " << opposite_capacity << " < 0";
280 ok = false;
281 }
282 // The initial capacity of the direct arcs is non-negative.
283 if (direct_capacity + opposite_capacity < 0) {
284 LOG(DFATAL) << "initial capacity [" << arc
285 << "] = " << direct_capacity + opposite_capacity << " < 0";
286 ok = false;
287 }
288 }
289 return ok;
290}
291
292template <typename Graph>
294 SCOPED_TIME_STAT(&stats_);
295
296 // We simply compute the reachability from the source in the residual graph.
297 const NodeIndex num_nodes = graph_->num_nodes();
298 std::vector<bool> is_reached(num_nodes, false);
299 std::vector<NodeIndex> to_process;
300
301 to_process.push_back(source_);
302 is_reached[source_] = true;
303 while (!to_process.empty()) {
304 const NodeIndex node = to_process.back();
305 to_process.pop_back();
306 for (OutgoingOrOppositeIncomingArcIterator it(*graph_, node); it.Ok();
307 it.Next()) {
308 const ArcIndex arc = it.Index();
309 if (residual_arc_capacity_[arc] > 0) {
310 const NodeIndex head = graph_->Head(arc);
311 if (!is_reached[head]) {
312 is_reached[head] = true;
313 to_process.push_back(head);
314 }
315 }
316 }
317 }
318 return is_reached[sink_];
319}
320
321template <typename Graph>
323 DCHECK(IsActive(node));
324 for (OutgoingOrOppositeIncomingArcIterator it(*graph_, node); it.Ok();
325 it.Next()) {
326 const ArcIndex arc = it.Index();
327 DCHECK(!IsAdmissible(arc)) << DebugString("CheckRelabelPrecondition:", arc);
328 }
329 return true;
330}
331
332template <typename Graph>
333std::string GenericMaxFlow<Graph>::DebugString(const std::string& context,
334 ArcIndex arc) const {
335 const NodeIndex tail = Tail(arc);
336 const NodeIndex head = Head(arc);
337 return absl::StrFormat(
338 "%s Arc %d, from %d to %d, "
339 "Capacity = %d, Residual capacity = %d, "
340 "Flow = residual capacity for reverse arc = %d, "
341 "Height(tail) = %d, Height(head) = %d, "
342 "Excess(tail) = %d, Excess(head) = %d",
343 context, arc, tail, head, Capacity(arc), residual_arc_capacity_[arc],
344 Flow(arc), node_potential_[tail], node_potential_[head],
345 node_excess_[tail], node_excess_[head]);
346}
347
348template <typename Graph>
350 status_ = NOT_SOLVED;
351 if (check_input_ && !CheckInputConsistency()) {
352 status_ = BAD_INPUT;
353 return false;
354 }
355 InitializePreflow();
356
357 // Deal with the case when source_ or sink_ is not inside graph_.
358 // Since they are both specified independently of the graph, we do need to
359 // take care of this corner case.
360 const NodeIndex num_nodes = graph_->num_nodes();
361 if (sink_ >= num_nodes || source_ >= num_nodes) {
362 // Behave like a normal graph where source_ and sink_ are disconnected.
363 // Note that the arc flow is set to 0 by InitializePreflow().
364 status_ = OPTIMAL;
365 return true;
366 }
367 if (use_global_update_) {
368 RefineWithGlobalUpdate();
369 } else {
370 Refine();
371 }
372 if (check_result_) {
373 if (!CheckResult()) {
374 status_ = BAD_RESULT;
375 return false;
376 }
377 if (GetOptimalFlow() < kMaxFlowQuantity && AugmentingPathExists()) {
378 LOG(ERROR) << "The algorithm terminated, but the flow is not maximal!";
379 status_ = BAD_RESULT;
380 return false;
381 }
382 }
383 DCHECK_EQ(node_excess_[sink_], -node_excess_[source_]);
384 status_ = OPTIMAL;
385 if (GetOptimalFlow() == kMaxFlowQuantity && AugmentingPathExists()) {
386 // In this case, we are sure that the flow is > kMaxFlowQuantity.
387 status_ = INT_OVERFLOW;
388 }
389 IF_STATS_ENABLED(VLOG(1) << stats_.StatString());
390 return true;
391}
392
393template <typename Graph>
396 // InitializePreflow() clears the whole flow that could have been computed
397 // by a previous Solve(). This is not optimal in terms of complexity.
398 // TODO(user): find a way to make the re-solving incremental (not an obvious
399 // task, and there has not been a lot of literature on the subject.)
400 node_excess_.SetAll(0);
401 const ArcIndex num_arcs = graph_->num_arcs();
402 for (ArcIndex arc = 0; arc < num_arcs; ++arc) {
403 SetCapacityAndClearFlow(arc, Capacity(arc));
405
406 // All the initial heights are zero except for the source whose height is
407 // equal to the number of nodes and will never change during the algorithm.
408 node_potential_.SetAll(0);
409 node_potential_.Set(source_, graph_->num_nodes());
411 // Initially no arcs are admissible except maybe the one leaving the source,
412 // but we treat the source in a special way, see
413 // SaturateOutgoingArcsFromSource().
414 const NodeIndex num_nodes = graph_->num_nodes();
415 for (NodeIndex node = 0; node < num_nodes; ++node) {
416 first_admissible_arc_[node] = Graph::kNilArc;
417 }
418}
419
420// Note(user): Calling this function will break the property on the node
421// potentials because of the way we cancel flow on cycle. However, we only call
422// that at the end of the algorithm, or just before a GlobalUpdate() that will
423// restore the precondition on the node potentials.
424template <typename Graph>
427 const NodeIndex num_nodes = graph_->num_nodes();
428
429 // We implement a variation of Tarjan's strongly connected component algorithm
430 // to detect cycles published in: Tarjan, R. E. (1972), "Depth-first search
431 // and linear graph algorithms", SIAM Journal on Computing. A description can
432 // also be found in wikipedia.
433
434 // Stored nodes are settled nodes already stored in the
435 // reverse_topological_order (except the sink_ that we do not actually store).
436 std::vector<bool> stored(num_nodes, false);
437 stored[sink_] = true;
438
439 // The visited nodes that are not yet stored are all the nodes from the
440 // source_ to the current node in the current dfs branch.
441 std::vector<bool> visited(num_nodes, false);
442 visited[sink_] = true;
443
444 // Stack of arcs to explore in the dfs search.
445 // The current node is Head(arc_stack.back()).
446 std::vector<ArcIndex> arc_stack;
447
448 // Increasing list of indices into the arc_stack that correspond to the list
449 // of arcs in the current dfs branch from the source_ to the current node.
450 std::vector<int> index_branch;
451
452 // Node in reverse_topological_order in the final dfs tree.
453 std::vector<NodeIndex> reverse_topological_order;
455 // We start by pushing all the outgoing arcs from the source on the stack to
456 // avoid special conditions in the code. As a result, source_ will not be
457 // stored in reverse_topological_order, and this is what we want.
458 for (OutgoingArcIterator it(*graph_, source_); it.Ok(); it.Next()) {
459 const ArcIndex arc = it.Index();
460 const FlowQuantity flow = Flow(arc);
461 if (flow > 0) {
462 arc_stack.push_back(arc);
463 }
464 }
465 visited[source_] = true;
466
467 // Start the dfs on the subgraph formed by the direct arcs with positive flow.
468 while (!arc_stack.empty()) {
469 const NodeIndex node = Head(arc_stack.back());
470
471 // If the node is visited, it means we have explored all its arcs and we
472 // have just backtracked in the dfs. Store it if it is not already stored
473 // and process the next arc on the stack.
474 if (visited[node]) {
475 if (!stored[node]) {
476 stored[node] = true;
477 reverse_topological_order.push_back(node);
478 DCHECK(!index_branch.empty());
479 index_branch.pop_back();
480 }
481 arc_stack.pop_back();
482 continue;
483 }
484
485 // The node is a new unexplored node, add all its outgoing arcs with
486 // positive flow to the stack and go deeper in the dfs.
487 DCHECK(!stored[node]);
488 DCHECK(index_branch.empty() ||
489 (arc_stack.size() - 1 > index_branch.back()));
490 visited[node] = true;
491 index_branch.push_back(arc_stack.size() - 1);
492
493 for (OutgoingArcIterator it(*graph_, node); it.Ok(); it.Next()) {
494 const ArcIndex arc = it.Index();
495 const FlowQuantity flow = Flow(arc);
496 const NodeIndex head = Head(arc);
497 if (flow > 0 && !stored[head]) {
498 if (!visited[head]) {
499 arc_stack.push_back(arc);
500 } else {
501 // There is a cycle.
502 // Find the first index to consider,
503 // arc_stack[index_branch[cycle_begin]] will be the first arc on the
504 // cycle.
505 int cycle_begin = index_branch.size();
506 while (cycle_begin > 0 &&
507 Head(arc_stack[index_branch[cycle_begin - 1]]) != head) {
508 --cycle_begin;
509 }
510
511 // Compute the maximum flow that can be canceled on the cycle and the
512 // min index such that arc_stack[index_branch[i]] will be saturated.
513 FlowQuantity max_flow = flow;
514 int first_saturated_index = index_branch.size();
515 for (int i = index_branch.size() - 1; i >= cycle_begin; --i) {
516 const ArcIndex arc_on_cycle = arc_stack[index_branch[i]];
517 if (Flow(arc_on_cycle) <= max_flow) {
518 max_flow = Flow(arc_on_cycle);
519 first_saturated_index = i;
520 }
521 }
523 // This is just here for a DCHECK() below.
524 const FlowQuantity excess = node_excess_[head];
525
526 // Cancel the flow on the cycle, and set visited[node] = false for
527 // the node that will be backtracked over.
528 PushFlow(-max_flow, arc);
529 for (int i = index_branch.size() - 1; i >= cycle_begin; --i) {
530 const ArcIndex arc_on_cycle = arc_stack[index_branch[i]];
531 PushFlow(-max_flow, arc_on_cycle);
532 if (i >= first_saturated_index) {
533 DCHECK(visited[Head(arc_on_cycle)]);
534 visited[Head(arc_on_cycle)] = false;
535 } else {
536 DCHECK_GT(Flow(arc_on_cycle), 0);
537 }
538 }
539
540 // This is a simple check that the flow was pushed properly.
541 DCHECK_EQ(excess, node_excess_[head]);
542
543 // Backtrack the dfs just before index_branch[first_saturated_index].
544 // If the current node is still active, there is nothing to do.
545 if (first_saturated_index < index_branch.size()) {
546 arc_stack.resize(index_branch[first_saturated_index]);
547 index_branch.resize(first_saturated_index);
548
549 // We backtracked over the current node, so there is no need to
550 // continue looping over its arcs.
551 break;
552 }
553 }
554 }
555 }
556 }
557 DCHECK(arc_stack.empty());
558 DCHECK(index_branch.empty());
559
560 // Return the flow to the sink. Note that the sink_ and the source_ are not
561 // stored in reverse_topological_order.
562 for (int i = 0; i < reverse_topological_order.size(); i++) {
563 const NodeIndex node = reverse_topological_order[i];
564 if (node_excess_[node] == 0) continue;
565 for (IncomingArcIterator it(*graph_, node); it.Ok(); it.Next()) {
566 const ArcIndex opposite_arc = Opposite(it.Index());
567 if (residual_arc_capacity_[opposite_arc] > 0) {
568 const FlowQuantity flow =
569 std::min(node_excess_[node], residual_arc_capacity_[opposite_arc]);
570 PushFlow(flow, opposite_arc);
571 if (node_excess_[node] == 0) break;
572 }
573 }
574 DCHECK_EQ(0, node_excess_[node]);
575 }
576 DCHECK_EQ(-node_excess_[source_], node_excess_[sink_]);
577}
578
579template <typename Graph>
581 SCOPED_TIME_STAT(&stats_);
582 bfs_queue_.clear();
583 int queue_index = 0;
584 const NodeIndex num_nodes = graph_->num_nodes();
585 node_in_bfs_queue_.assign(num_nodes, false);
586 node_in_bfs_queue_[sink_] = true;
587 node_in_bfs_queue_[source_] = true;
588
589 // We do two BFS in the reverse residual graph, one from the sink and one from
590 // the source. Because all the arcs from the source are saturated (except in
591 // presence of integer overflow), the source cannot reach the sink in the
592 // residual graph. However, we still want to relabel all the nodes that cannot
593 // reach the sink but can reach the source (because if they have excess, we
594 // need to push it back to the source).
595 //
596 // Note that the second pass is not needed here if we use a two-pass algorithm
597 // to return the flow to the source after we found the min cut.
598 const int num_passes = use_two_phase_algorithm_ ? 1 : 2;
599 for (int pass = 0; pass < num_passes; ++pass) {
600 if (pass == 0) {
601 bfs_queue_.push_back(sink_);
602 } else {
603 bfs_queue_.push_back(source_);
604 }
605
606 while (queue_index != bfs_queue_.size()) {
607 const NodeIndex node = bfs_queue_[queue_index];
608 ++queue_index;
609 const NodeIndex candidate_distance = node_potential_[node] + 1;
610 for (OutgoingOrOppositeIncomingArcIterator it(*graph_, node); it.Ok();
611 it.Next()) {
612 const ArcIndex arc = it.Index();
613 const NodeIndex head = Head(arc);
614
615 // Skip the arc if the height of head was already set to the correct
616 // value (Remember we are doing reverse BFS).
617 if (node_in_bfs_queue_[head]) continue;
618
619 // TODO(user): By using more memory we can speed this up quite a bit by
620 // avoiding to take the opposite arc here, too options:
621 // - if (residual_arc_capacity_[arc] != arc_capacity_[arc])
622 // - if (opposite_arc_is_admissible_[arc]) // need updates.
623 // Experiment with the first option shows more than 10% gain on this
624 // function running time, which is the bottleneck on many instances.
625 const ArcIndex opposite_arc = Opposite(arc);
626 if (residual_arc_capacity_[opposite_arc] > 0) {
627 // Note(user): We used to have a DCHECK_GE(candidate_distance,
628 // node_potential_[head]); which is always true except in the case
629 // where we can push more than kMaxFlowQuantity out of the source. The
630 // problem comes from the fact that in this case, we call
631 // PushFlowExcessBackToSource() in the middle of the algorithm. The
632 // later call will break the properties of the node potential. Note
633 // however, that this function will recompute a good node potential
634 // for all the nodes and thus fix the issue.
635
636 // If head is active, we can steal some or all of its excess.
637 // This brings a huge gain on some problems.
638 // Note(user): I haven't seen this anywhere in the literature.
639 // TODO(user): Investigate more and maybe write a publication :)
640 if (node_excess_[head] > 0) {
641 const FlowQuantity flow = std::min(
642 node_excess_[head], residual_arc_capacity_[opposite_arc]);
643 PushFlow(flow, opposite_arc);
644
645 // If the arc became saturated, it is no longer in the residual
646 // graph, so we do not need to consider head at this time.
647 if (residual_arc_capacity_[opposite_arc] == 0) continue;
648 }
649
650 // Note that there is no need to touch first_admissible_arc_[node]
651 // because of the relaxed Relabel() we use.
652 node_potential_[head] = candidate_distance;
653 node_in_bfs_queue_[head] = true;
654 bfs_queue_.push_back(head);
655 }
656 }
657 }
658 }
659
660 // At the end of the search, some nodes may not be in the bfs_queue_. Such
661 // nodes cannot reach the sink_ or source_ in the residual graph, so there is
662 // no point trying to push flow toward them. We obtain this effect by setting
663 // their height to something unreachable.
664 //
665 // Note that this also prevents cycling due to our anti-overflow procedure.
666 // For instance, suppose there is an edge s -> n outgoing from the source. If
667 // node n has no other connection and some excess, we will push the flow back
668 // to the source, but if we don't update the height of n
669 // SaturateOutgoingArcsFromSource() will push the flow to n again.
670 // TODO(user): This is another argument for another anti-overflow algorithm.
671 for (NodeIndex node = 0; node < num_nodes; ++node) {
672 if (!node_in_bfs_queue_[node]) {
673 node_potential_[node] = 2 * num_nodes - 1;
674 }
675 }
676
677 // Reset the active nodes. Doing it like this pushes the nodes in increasing
678 // order of height. Note that bfs_queue_[0] is the sink_ so we skip it.
679 DCHECK(IsEmptyActiveNodeContainer());
680 for (int i = 1; i < bfs_queue_.size(); ++i) {
681 const NodeIndex node = bfs_queue_[i];
682 if (node_excess_[node] > 0) {
683 DCHECK(IsActive(node));
684 PushActiveNode(node);
685 }
686 }
687}
688
689template <typename Graph>
691 SCOPED_TIME_STAT(&stats_);
692 const NodeIndex num_nodes = graph_->num_nodes();
693
694 // If sink_ or source_ already have kMaxFlowQuantity, then there is no
695 // point pushing more flow since it will cause an integer overflow.
696 if (node_excess_[sink_] == kMaxFlowQuantity) return false;
697 if (node_excess_[source_] == -kMaxFlowQuantity) return false;
698
699 bool flow_pushed = false;
700 for (OutgoingArcIterator it(*graph_, source_); it.Ok(); it.Next()) {
701 const ArcIndex arc = it.Index();
702 const FlowQuantity flow = residual_arc_capacity_[arc];
703
704 // This is a special IsAdmissible() condition for the source.
705 if (flow == 0 || node_potential_[Head(arc)] >= num_nodes) continue;
706
707 // We are careful in case the sum of the flow out of the source is greater
708 // than kMaxFlowQuantity to avoid overflow.
709 const FlowQuantity current_flow_out_of_source = -node_excess_[source_];
710 DCHECK_GE(flow, 0) << flow;
711 DCHECK_GE(current_flow_out_of_source, 0) << current_flow_out_of_source;
712 const FlowQuantity capped_flow =
713 kMaxFlowQuantity - current_flow_out_of_source;
714 if (capped_flow < flow) {
715 // We push as much flow as we can so the current flow on the network will
716 // be kMaxFlowQuantity.
717
718 // Since at the beginning of the function, current_flow_out_of_source
719 // was different from kMaxFlowQuantity, we are sure to have pushed some
720 // flow before if capped_flow is 0.
721 if (capped_flow == 0) return true;
722 PushFlow(capped_flow, arc);
723 return true;
724 }
725 PushFlow(flow, arc);
726 flow_pushed = true;
727 }
728 DCHECK_LE(node_excess_[source_], 0);
729 return flow_pushed;
730}
731
732template <typename Graph>
734 SCOPED_TIME_STAT(&stats_);
735 // TODO(user): Do not allow a zero flow after fixing the UniformMaxFlow code.
736 DCHECK_GE(residual_arc_capacity_[Opposite(arc)] + flow, 0);
737 DCHECK_GE(residual_arc_capacity_[arc] - flow, 0);
738
739 // node_excess_ should be always greater than or equal to 0 except for the
740 // source where it should always be smaller than or equal to 0. Note however
741 // that we cannot check this because when we cancel the flow on a cycle in
742 // PushFlowExcessBackToSource(), we may break this invariant during the
743 // operation even if it is still valid at the end.
744
745 // Update the residual capacity of the arc and its opposite arc.
746 residual_arc_capacity_[arc] -= flow;
747 residual_arc_capacity_[Opposite(arc)] += flow;
748
749 // Update the excesses at the tail and head of the arc.
750 node_excess_[Tail(arc)] -= flow;
751 node_excess_[Head(arc)] += flow;
752}
753
754template <typename Graph>
756 SCOPED_TIME_STAT(&stats_);
757 DCHECK(IsEmptyActiveNodeContainer());
758 const NodeIndex num_nodes = graph_->num_nodes();
759 for (NodeIndex node = 0; node < num_nodes; ++node) {
760 if (IsActive(node)) {
761 if (use_two_phase_algorithm_ && node_potential_[node] >= num_nodes) {
762 continue;
763 }
764 PushActiveNode(node);
765 }
766 }
767}
768
769template <typename Graph>
771 SCOPED_TIME_STAT(&stats_);
772 // Usually SaturateOutgoingArcsFromSource() will saturate all the arcs from
773 // the source in one go, and we will loop just once. But in case we can push
774 // more than kMaxFlowQuantity out of the source the loop is as follow:
775 // - Push up to kMaxFlowQuantity out of the source on the admissible outgoing
776 // arcs. Stop if no flow was pushed.
777 // - Compute the current max-flow. This will push some flow back to the
778 // source and render more outgoing arcs from the source not admissible.
779 //
780 // TODO(user): This may not be the most efficient algorithm if we need to loop
781 // many times. An alternative may be to handle the source like the other nodes
782 // in the algorithm, initially putting an excess of kMaxFlowQuantity on it,
783 // and making the source active like any other node with positive excess. To
784 // investigate.
785 //
786 // TODO(user): The code below is buggy when more than kMaxFlowQuantity can be
787 // pushed out of the source (i.e. when we loop more than once in the while()).
788 // This is not critical, since this code is not used in the default algorithm
789 // computation. The issue is twofold:
790 // - InitializeActiveNodeContainer() doesn't push the nodes in
791 // the correct order.
792 // - PushFlowExcessBackToSource() may break the node potential properties, and
793 // we will need a call to GlobalUpdate() to fix that.
794 while (SaturateOutgoingArcsFromSource()) {
795 DCHECK(IsEmptyActiveNodeContainer());
796 InitializeActiveNodeContainer();
797 while (!IsEmptyActiveNodeContainer()) {
798 const NodeIndex node = GetAndRemoveFirstActiveNode();
799 if (node == source_ || node == sink_) continue;
800 Discharge(node);
801 }
802 if (use_two_phase_algorithm_) {
803 PushFlowExcessBackToSource();
804 }
805 }
806}
807
808template <typename Graph>
810 SCOPED_TIME_STAT(&stats_);
811
812 // TODO(user): This should be graph_->num_nodes(), but ebert graph does not
813 // have a correct size if the highest index nodes have no arcs.
814 const NodeIndex num_nodes = Graphs<Graph>::NodeReservation(*graph_);
815 std::vector<int> skip_active_node;
816
817 while (SaturateOutgoingArcsFromSource()) {
818 int num_skipped;
819 do {
820 num_skipped = 0;
821 skip_active_node.assign(num_nodes, 0);
822 skip_active_node[sink_] = 2;
823 skip_active_node[source_] = 2;
824 GlobalUpdate();
825 while (!IsEmptyActiveNodeContainer()) {
826 const NodeIndex node = GetAndRemoveFirstActiveNode();
827 if (skip_active_node[node] > 1) {
828 if (node != sink_ && node != source_) ++num_skipped;
829 continue;
830 }
831 const NodeIndex old_height = node_potential_[node];
832 Discharge(node);
833
834 // The idea behind this is that if a node height augments by more than
835 // one, then it is likely to push flow back the way it came. This can
836 // lead to very costly loops. A bad case is: source -> n1 -> n2 and n2
837 // just recently isolated from the sink. Then n2 will push flow back to
838 // n1, and n1 to n2 and so on. The height of each node will increase by
839 // steps of two until the height of the source is reached, which can
840 // take a long time. If the chain is longer, the situation is even
841 // worse. The behavior of this heuristic is related to the Gap
842 // heuristic.
843 //
844 // Note that the global update will fix all such cases efficiently. So
845 // the idea is to discharge the active node as much as possible, and
846 // then do a global update.
847 //
848 // We skip a node when this condition was true 2 times to avoid doing a
849 // global update too frequently.
850 if (node_potential_[node] > old_height + 1) {
851 ++skip_active_node[node];
852 }
853 }
854 } while (num_skipped > 0);
855 if (use_two_phase_algorithm_) {
856 PushFlowExcessBackToSource();
857 }
858 }
859}
860
861template <typename Graph>
863 SCOPED_TIME_STAT(&stats_);
864 const NodeIndex num_nodes = graph_->num_nodes();
865 while (true) {
866 DCHECK(IsActive(node));
867 for (OutgoingOrOppositeIncomingArcIterator it(*graph_, node,
868 first_admissible_arc_[node]);
869 it.Ok(); it.Next()) {
870 const ArcIndex arc = it.Index();
871 if (IsAdmissible(arc)) {
872 DCHECK(IsActive(node));
873 const NodeIndex head = Head(arc);
874 if (node_excess_[head] == 0) {
875 // The push below will make the node active for sure. Note that we may
876 // push the sink_, but that is handled properly in Refine().
877 PushActiveNode(head);
878 }
879 const FlowQuantity delta =
880 std::min(node_excess_[node], residual_arc_capacity_[arc]);
881 PushFlow(delta, arc);
882 if (node_excess_[node] == 0) {
883 first_admissible_arc_[node] = arc; // arc may still be admissible.
884 return;
885 }
886 }
887 }
888 Relabel(node);
889 if (use_two_phase_algorithm_ && node_potential_[node] >= num_nodes) break;
890 }
891}
892
893template <typename Graph>
895 SCOPED_TIME_STAT(&stats_);
896 // Because we use a relaxed version, this is no longer true if the
897 // first_admissible_arc_[node] was not actually the first arc!
898 // DCHECK(CheckRelabelPrecondition(node));
900 ArcIndex first_admissible_arc = Graph::kNilArc;
901 for (OutgoingOrOppositeIncomingArcIterator it(*graph_, node); it.Ok();
902 it.Next()) {
903 const ArcIndex arc = it.Index();
904 if (residual_arc_capacity_[arc] > 0) {
905 // Update min_height only for arcs with available capacity.
906 NodeHeight head_height = node_potential_[Head(arc)];
907 if (head_height < min_height) {
908 min_height = head_height;
909 first_admissible_arc = arc;
910
911 // We found an admissible arc at the current height, just stop there.
912 // This is the true first_admissible_arc_[node].
913 if (min_height + 1 == node_potential_[node]) break;
914 }
915 }
916 }
917 DCHECK_NE(first_admissible_arc, Graph::kNilArc);
918 node_potential_[node] = min_height + 1;
919
920 // Note that after a Relabel(), the loop will continue in Discharge(), and
921 // we are sure that all the arcs before first_admissible_arc are not
922 // admissible since their height is > min_height.
923 first_admissible_arc_[node] = first_admissible_arc;
924}
925
926template <typename Graph>
928 return Graphs<Graph>::OppositeArc(*graph_, arc);
929}
930
931template <typename Graph>
933 return IsArcValid(arc) && arc >= 0;
934}
935
936template <typename Graph>
938 return Graphs<Graph>::IsArcValid(*graph_, arc);
939}
940
941template <typename Graph>
944
945template <typename Graph>
946template <bool reverse>
948 NodeIndex start, std::vector<NodeIndex>* result) {
949 // If start is not a valid node index, it can reach only itself.
950 // Note(user): This is needed because source and sink are given independently
951 // of the graph and sometimes before it is even constructed.
952 const NodeIndex num_nodes = graph_->num_nodes();
953 if (start >= num_nodes) {
954 result->clear();
955 result->push_back(start);
956 return;
957 }
958 bfs_queue_.clear();
959 node_in_bfs_queue_.assign(num_nodes, false);
960
961 int queue_index = 0;
962 bfs_queue_.push_back(start);
963 node_in_bfs_queue_[start] = true;
964 while (queue_index != bfs_queue_.size()) {
965 const NodeIndex node = bfs_queue_[queue_index];
966 ++queue_index;
967 for (OutgoingOrOppositeIncomingArcIterator it(*graph_, node); it.Ok();
968 it.Next()) {
969 const ArcIndex arc = it.Index();
970 const NodeIndex head = Head(arc);
971 if (node_in_bfs_queue_[head]) continue;
972 if (residual_arc_capacity_[reverse ? Opposite(arc) : arc] == 0) continue;
973 node_in_bfs_queue_[head] = true;
974 bfs_queue_.push_back(head);
975 }
976 }
977 *result = bfs_queue_;
978}
979
980template <typename Graph>
983 model.set_problem_type(FlowModelProto::MAX_FLOW);
984 for (int n = 0; n < graph_->num_nodes(); ++n) {
985 FlowNodeProto* node = model.add_nodes();
986 node->set_id(n);
987 if (n == source_) node->set_supply(1);
988 if (n == sink_) node->set_supply(-1);
989 }
990 for (int a = 0; a < graph_->num_arcs(); ++a) {
991 FlowArcProto* arc = model.add_arcs();
992 arc->set_tail(graph_->Tail(a));
993 arc->set_head(graph_->Head(a));
994 arc->set_capacity(Capacity(a));
995 }
996 return model;
997}
998
999// Explicit instantiations that can be used by a client.
1000//
1001// TODO(user): moves this code out of a .cc file and include it at the end of
1002// the header so it can work with any graph implementation ?
1003template <>
1006template <>
1007const FlowQuantity
1010template <>
1011const FlowQuantity
1014template <>
1015const FlowQuantity
1018
1019template class GenericMaxFlow<StarGraph>;
1023
1024} // namespace operations_research
int64_t max
Definition: alldiff_cst.cc:140
int64_t min
Definition: alldiff_cst.cc:139
#define DCHECK_LE(val1, val2)
Definition: base/logging.h:892
#define DCHECK_NE(val1, val2)
Definition: base/logging.h:891
#define DCHECK_GE(val1, val2)
Definition: base/logging.h:894
#define DCHECK_GT(val1, val2)
Definition: base/logging.h:895
#define LOG(severity)
Definition: base/logging.h:420
#define DCHECK(condition)
Definition: base/logging.h:889
#define DCHECK_EQ(val1, val2)
Definition: base/logging.h:890
#define VLOG(verboselevel)
Definition: base/logging.h:983
static constexpr ProblemType MAX_FLOW
Graph::OutgoingArcIterator OutgoingArcIterator
Definition: max_flow.h:319
void Relabel(NodeIndex node)
Definition: max_flow.cc:894
std::vector< NodeIndex > active_nodes_
Definition: max_flow.h:590
void SetArcCapacity(ArcIndex arc, FlowQuantity new_capacity)
Definition: max_flow.cc:190
std::string DebugString(const std::string &context, ArcIndex arc) const
Definition: max_flow.cc:333
Graph::OutgoingOrOppositeIncomingArcIterator OutgoingOrOppositeIncomingArcIterator
Definition: max_flow.h:321
bool CheckRelabelPrecondition(NodeIndex node) const
Definition: max_flow.cc:322
std::vector< NodeIndex > bfs_queue_
Definition: max_flow.h:611
void SetArcFlow(ArcIndex arc, FlowQuantity new_flow)
Definition: max_flow.cc:224
void GetSourceSideMinCut(std::vector< NodeIndex > *result)
Definition: max_flow.cc:240
const Graph * graph() const
Definition: max_flow.h:338
void PushFlow(FlowQuantity flow, ArcIndex arc)
Definition: max_flow.cc:733
void ComputeReachableNodes(NodeIndex start, std::vector< NodeIndex > *result)
Definition: max_flow.cc:947
bool IsArcValid(ArcIndex arc) const
Definition: max_flow.cc:937
GenericMaxFlow(const Graph *graph, NodeIndex source, NodeIndex sink)
Definition: max_flow.cc:140
void GetSinkSideMinCut(std::vector< NodeIndex > *result)
Definition: max_flow.cc:246
ArcIndex Opposite(ArcIndex arc) const
Definition: max_flow.cc:927
bool IsArcDirect(ArcIndex arc) const
Definition: max_flow.cc:932
void Discharge(NodeIndex node)
Definition: max_flow.cc:862
FlowModelProto CreateFlowModelProto(NodeIndex source, NodeIndex sink) const
Definition: max_flow.cc:120
FlowQuantity Flow(ArcIndex arc) const
Definition: max_flow.cc:108
void GetSourceSideMinCut(std::vector< NodeIndex > *result)
Definition: max_flow.cc:110
Status Solve(NodeIndex source, NodeIndex sink)
Definition: max_flow.cc:55
FlowQuantity OptimalFlow() const
Definition: max_flow.cc:106
ArcIndex AddArcWithCapacity(NodeIndex tail, NodeIndex head, FlowQuantity capacity)
Definition: max_flow.cc:28
FlowQuantity Capacity(ArcIndex arc) const
Definition: max_flow.cc:47
NodeIndex Head(ArcIndex arc) const
Definition: max_flow.cc:45
NodeIndex Tail(ArcIndex arc) const
Definition: max_flow.cc:43
void SetArcCapacity(ArcIndex arc, FlowQuantity capacity)
Definition: max_flow.cc:51
void GetSinkSideMinCut(std::vector< NodeIndex > *result)
Definition: max_flow.cc:115
bool Reserve(int64_t new_min_index, int64_t new_max_index)
Definition: zvector.h:98
int64_t a
GRBmodel * model
GurobiMPCallbackContext * context
Collection of objects used to extend the Constraint Solver library.
ListGraph Graph
Definition: graph.h:2362
int64_t delta
Definition: resource.cc:1692
int64_t capacity
int64_t tail
int64_t head
int64_t start
#define IF_STATS_ENABLED(instructions)
Definition: stats.h:437
#define SCOPED_TIME_STAT(stats)
Definition: stats.h:438
static ArcIndex ArcReservation(const Graph &graph)
Definition: graphs.h:39
static NodeIndex NodeReservation(const Graph &graph)
Definition: graphs.h:36
static bool IsArcValid(const Graph &graph, ArcIndex arc)
Definition: graphs.h:33
static ArcIndex OppositeArc(const Graph &graph, ArcIndex arc)
Definition: graphs.h:30