notebook: regenerate all

This commit is contained in:
Corentin Le Molgat
2025-06-03 18:10:28 +02:00
parent 62bf1f278a
commit 0703f09de9
18 changed files with 313 additions and 433 deletions

View File

@@ -0,0 +1,270 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "google",
"metadata": {},
"source": [
"##### Copyright 2025 Google LLC."
]
},
{
"cell_type": "markdown",
"id": "apache",
"metadata": {},
"source": [
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
" http://www.apache.org/licenses/LICENSE-2.0\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License.\n"
]
},
{
"cell_type": "markdown",
"id": "basename",
"metadata": {},
"source": [
"# permutation_flow_shop"
]
},
{
"cell_type": "markdown",
"id": "link",
"metadata": {},
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/contrib/permutation_flow_shop.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/examples/contrib/permutation_flow_shop.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "doc",
"metadata": {},
"source": [
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "install",
"metadata": {},
"outputs": [],
"source": [
"%pip install ortools"
]
},
{
"cell_type": "markdown",
"id": "description",
"metadata": {},
"source": [
"\n",
"This model implements the permutation flow shop problem (PFSP).\n",
"\n",
"In the PFSP, a set of jobs has to be processed on a set of machines. Each job\n",
"must be processed on each machine in sequence and all jobs have to be processed\n",
"in the same order on every machine. The objective is to minimize the makespan.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "code",
"metadata": {},
"outputs": [],
"source": [
"from typing import Sequence\n",
"from dataclasses import dataclass\n",
"from itertools import product\n",
"\n",
"import numpy as np\n",
"\n",
"from ortools.sat.colab import flags\n",
"from google.protobuf import text_format\n",
"from ortools.sat.python import cp_model\n",
"\n",
"_PARAMS = flags.define_string(\n",
" \"params\",\n",
" \"num_search_workers:16\",\n",
" \"Sat solver parameters.\",\n",
")\n",
"\n",
"_TIME_LIMIT = flags.define_float(\n",
" \"time_limit\",\n",
" 60.0,\n",
" \"Time limit in seconds. Default is 60s.\",\n",
")\n",
"\n",
"_LOG = flags.define_boolean(\n",
" \"log\",\n",
" False,\n",
" \"Whether to log the solver output.\",\n",
")\n",
"\n",
"\n",
"@dataclass\n",
"class TaskType:\n",
" \"\"\"\n",
" Small wrapper to hold the start, end, and interval variables of a task.\n",
" \"\"\"\n",
"\n",
" start: cp_model.IntVar\n",
" end: cp_model.IntVar\n",
" interval: cp_model.IntervalVar\n",
"\n",
"\n",
"def permutation_flow_shop(\n",
" processing_times: np.ndarray,\n",
" time_limit: float,\n",
" log: bool,\n",
" params: str\n",
"):\n",
" \"\"\"\n",
" Solves the given permutation flow shop problem instance with OR-Tools.\n",
"\n",
" Parameters\n",
" ----------\n",
" processing_times\n",
" An n-by-m matrix of processing times of the jobs on the machines.\n",
" time_limit\n",
" The time limit in seconds. If not set, the solver runs until an\n",
" optimal solution is found.\n",
" log\n",
" Whether to log the solver output. Default is False.\n",
"\n",
" Raises\n",
" ------\n",
" ValueError\n",
" If the number of lines is greater than 1, i.e., the instance is a\n",
" distributed permutation flow shop problem.\n",
" \"\"\"\n",
" m = cp_model.CpModel()\n",
" num_jobs, num_machines = processing_times.shape\n",
" horizon = processing_times.sum()\n",
"\n",
" # Create interval variables for all tasks (each job/machine pair).\n",
" tasks = {}\n",
" for job, machine in product(range(num_jobs), range(num_machines)):\n",
" start = m.new_int_var(0, horizon, \"\")\n",
" end = m.new_int_var(0, horizon, \"\")\n",
" duration = processing_times[job][machine]\n",
" interval = m.new_interval_var(start, duration, end, \"\")\n",
" tasks[job, machine] = TaskType(start, end, interval)\n",
"\n",
" # No overlap for all job intervals on this machine.\n",
" for machine in range(num_machines):\n",
" intervals = [tasks[job, machine].interval for job in range(num_jobs)]\n",
" m.add_no_overlap(intervals)\n",
"\n",
" # Add precedence constraints between tasks of the same job.\n",
" for job, machine in product(range(num_jobs), range(num_machines - 1)):\n",
" pred = tasks[job, machine]\n",
" succ = tasks[job, machine + 1]\n",
" m.add(pred.end <= succ.start)\n",
"\n",
" # Create arcs for circuit constraints.\n",
" arcs = []\n",
" for idx1 in range(num_jobs):\n",
" arcs.append((0, idx1 + 1, m.new_bool_var(\"start\")))\n",
" arcs.append((idx1 + 1, 0, m.new_bool_var(\"end\")))\n",
"\n",
" lits = {}\n",
" for idx1, idx2 in product(range(num_jobs), repeat=2):\n",
" if idx1 != idx2:\n",
" lit = m.new_bool_var(f\"{idx1} -> {idx2}\")\n",
" lits[idx1, idx2] = lit\n",
" arcs.append((idx1 + 1, idx2 + 1, lit))\n",
"\n",
" m.add_circuit(arcs)\n",
"\n",
" # Enforce that the permutation of jobs is the same on all machines.\n",
" for machine in range(num_machines):\n",
" starts = [tasks[job, machine].start for job in range(num_jobs)]\n",
" ends = [tasks[job, machine].end for job in range(num_jobs)]\n",
"\n",
" for idx1, idx2 in product(range(num_jobs), repeat=2):\n",
" if idx1 == idx2:\n",
" continue\n",
"\n",
" # Since all machines share the same arc literals, if the literal\n",
" # i -> j is True, this enforces that job i is always scheduled\n",
" # before job j on all machines.\n",
" lit = lits[idx1, idx2]\n",
" m.add(ends[idx1] <= starts[idx2]).only_enforce_if(lit)\n",
"\n",
" # Set minimizing makespan as objective.\n",
" obj_var = m.new_int_var(0, horizon, \"makespan\")\n",
" completion_times = [\n",
" tasks[(job, num_machines - 1)].end for job in range(num_jobs)\n",
" ]\n",
" m.add_max_equality(obj_var, completion_times)\n",
" m.minimize(obj_var)\n",
"\n",
" solver = cp_model.CpSolver()\n",
" if params:\n",
" text_format.Parse(params, solver.parameters)\n",
" solver.parameters.log_search_progress = log\n",
" solver.parameters.max_time_in_seconds = time_limit\n",
"\n",
" status_code = solver.Solve(m)\n",
" status = solver.StatusName(status_code)\n",
"\n",
" print(f\"Status: {status}\")\n",
" print(f\"Makespan: {solver.ObjectiveValue()}\")\n",
"\n",
" if status in [\"OPTIMAL\", \"FEASIBLE\"]:\n",
" start = [solver.Value(tasks[job, 0].start) for job in range(num_jobs)]\n",
" solution = np.argsort(start) + 1\n",
" print(f\"Solution: {solution}\")\n",
"\n",
"\n",
"def main(argv: Sequence[str]) -> None:\n",
" \"\"\"Creates the data and calls the solving procedure.\"\"\"\n",
" # VRF_10_5_2 instance from http://soa.iti.es/problem-instances.\n",
" # Optimal makespan is 698.\n",
" processing_times = [\n",
" [79, 67, 10, 48, 52],\n",
" [40, 40, 57, 21, 54],\n",
" [48, 93, 49, 11, 79],\n",
" [16, 23, 19, 2, 38],\n",
" [38, 90, 57, 73, 3],\n",
" [76, 13, 99, 98, 55],\n",
" [73, 85, 40, 20, 85],\n",
" [34, 6, 27, 53, 21],\n",
" [38, 6, 35, 28, 44],\n",
" [32, 11, 11, 34, 27],\n",
" ]\n",
"\n",
" permutation_flow_shop(\n",
" np.array(processing_times), _TIME_LIMIT.value, _LOG.value, _PARAMS.value\n",
" )\n",
"\n",
"\n",
"app.run(main) \n",
"\n"
]
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -91,8 +91,6 @@
"outputs": [],
"source": [
"import collections\n",
"import time\n",
"from typing import Optional\n",
"\n",
"from ortools.sat.colab import flags\n",
"from google.protobuf import text_format\n",
@@ -111,24 +109,6 @@
" \"Whether we encode the makespan using an interval or not.\",\n",
")\n",
"_HORIZON = flags.define_integer(\"horizon\", -1, \"Force horizon.\")\n",
"_ADD_REDUNDANT_ENERGETIC_CONSTRAINTS = flags.define_bool(\n",
" \"add_redundant_energetic_constraints\",\n",
" False,\n",
" \"add redundant energetic constraints on the pairs of tasks extracted from\"\n",
" + \" precedence graph.\",\n",
")\n",
"_DELAY_TIME_LIMIT = flags.define_float(\n",
" \"pairwise_delay_total_time_limit\",\n",
" 120.0,\n",
" \"Total time limit when computing min delay between tasks.\"\n",
" + \" A non-positive time limit disable min delays computation.\",\n",
")\n",
"_PREEMPTIVE_LB_TIME_LIMIT = flags.define_float(\n",
" \"preemptive_lb_time_limit\",\n",
" 0.0,\n",
" \"Time limit when computing a preemptive schedule lower bound.\"\n",
" + \" A non-positive time limit disable this computation.\",\n",
")\n",
"\n",
"\n",
"def print_problem_statistics(problem: rcpsp_pb2.RcpspProblem):\n",
@@ -176,85 +156,6 @@
" print(f\" - {tasks_with_delay} tasks with successor delays\")\n",
"\n",
"\n",
"def analyse_dependency_graph(\n",
" problem: rcpsp_pb2.RcpspProblem,\n",
") -> tuple[list[tuple[int, int, list[int]]], dict[int, list[int]]]:\n",
" \"\"\"Analyses the dependency graph to improve the model.\n",
"\n",
" Args:\n",
" problem: the protobuf of the problem to solve.\n",
"\n",
" Returns:\n",
" a list of (task1, task2, in_between_tasks) with task2 and indirect successor\n",
" of task1, and in_between_tasks being the list of all tasks after task1 and\n",
" before task2.\n",
" \"\"\"\n",
"\n",
" num_nodes = len(problem.tasks)\n",
" print(f\"Analysing the dependency graph over {num_nodes} nodes\")\n",
"\n",
" ins = collections.defaultdict(list)\n",
" outs = collections.defaultdict(list)\n",
" after = collections.defaultdict(set)\n",
" before = collections.defaultdict(set)\n",
"\n",
" # Build the transitive closure of the precedences.\n",
" # This algorithm has the wrong complexity (n^4), but is OK for the psplib\n",
" # as the biggest example has 120 nodes.\n",
" for n in range(num_nodes):\n",
" for s in problem.tasks[n].successors:\n",
" ins[s].append(n)\n",
" outs[n].append(s)\n",
"\n",
" for a in list(after[s]) + [s]:\n",
" for b in list(before[n]) + [n]:\n",
" after[b].add(a)\n",
" before[a].add(b)\n",
"\n",
" # Search for pair of tasks, containing at least two parallel branch between\n",
" # them in the precedence graph.\n",
" num_candidates = 0\n",
" result: list[tuple[int, int, list[int]]] = []\n",
" for source, start_outs in outs.items():\n",
" if len(start_outs) <= 1:\n",
" # Starting with the unique successor of source will be as good.\n",
" continue\n",
" for sink, end_ins in ins.items():\n",
" if len(end_ins) <= 1:\n",
" # Ending with the unique predecessor of sink will be as good.\n",
" continue\n",
" if sink == source:\n",
" continue\n",
" if sink not in after[source]:\n",
" continue\n",
"\n",
" num_active_outgoing_branches = 0\n",
" num_active_incoming_branches = 0\n",
" for succ in outs[source]:\n",
" if sink in after[succ]:\n",
" num_active_outgoing_branches += 1\n",
" for pred in ins[sink]:\n",
" if source in before[pred]:\n",
" num_active_incoming_branches += 1\n",
"\n",
" if num_active_outgoing_branches <= 1 or num_active_incoming_branches <= 1:\n",
" continue\n",
"\n",
" common = after[source].intersection(before[sink])\n",
" if len(common) <= 1:\n",
" continue\n",
" num_candidates += 1\n",
" result.append((source, sink, common))\n",
"\n",
" # Sort entries lexicographically by (len(common), source, sink)\n",
" def price(entry):\n",
" return num_nodes * num_nodes * len(entry[2]) + num_nodes * entry[0] + entry[1]\n",
"\n",
" result.sort(key=price)\n",
" print(f\" - created {len(result)} pairs of nodes to examine\", flush=True)\n",
" return result, after\n",
"\n",
"\n",
"def solve_rcpsp(\n",
" problem: rcpsp_pb2.RcpspProblem,\n",
" proto_file: str,\n",
@@ -262,12 +163,7 @@
" active_tasks: set[int],\n",
" source: int,\n",
" sink: int,\n",
" intervals_of_tasks: list[tuple[int, int, list[int]]],\n",
" delays: dict[tuple[int, int], tuple[int, int]],\n",
" in_main_solve: bool = False,\n",
" initial_solution: Optional[rcpsp_pb2.RcpspAssignment] = None,\n",
" lower_bound: int = 0,\n",
") -> tuple[int, int, Optional[rcpsp_pb2.RcpspAssignment]]:\n",
") -> None:\n",
" \"\"\"Parse and solve a given RCPSP problem in proto format.\n",
"\n",
" The model will only look at the tasks {source} + {sink} + active_tasks, and\n",
@@ -281,13 +177,6 @@
" active_tasks: the set of active tasks to consider.\n",
" source: the source task in the graph. Its end will be forced to 0.\n",
" sink: the sink task of the graph. Its start is the makespan of the problem.\n",
" intervals_of_tasks: a heuristic lists of (task1, task2, tasks) used to add\n",
" redundant energetic equations to the model.\n",
" delays: a list of (task1, task2, min_delays) used to add extended precedence\n",
" constraints (start(task2) >= end(task1) + min_delay).\n",
" in_main_solve: indicates if this is the main solve procedure.\n",
" initial_solution: A valid assignment used to hint the search.\n",
" lower_bound: A valid lower bound of the makespan objective.\n",
"\n",
" Returns:\n",
" (lower_bound of the objective, best solution found, assignment)\n",
@@ -305,8 +194,6 @@
" horizon = problem.deadline if problem.deadline != -1 else problem.horizon\n",
" if _HORIZON.value > 0:\n",
" horizon = _HORIZON.value\n",
" elif delays and in_main_solve and (source, sink) in delays:\n",
" horizon = delays[(source, sink)][1]\n",
" elif horizon == -1: # Naive computation.\n",
" horizon = sum(max(r.duration for r in t.recipes) for t in problem.tasks)\n",
" if problem.is_rcpsp_max:\n",
@@ -315,8 +202,7 @@
" for rd in sd.recipe_delays:\n",
" for d in rd.min_delays:\n",
" horizon += abs(d)\n",
" if in_main_solve:\n",
" print(f\"Horizon = {horizon}\", flush=True)\n",
" print(f\"Horizon = {horizon}\", flush=True)\n",
"\n",
" # Containers.\n",
" task_starts = {}\n",
@@ -342,7 +228,6 @@
" start_var = model.new_int_var(0, horizon, f\"start_of_task_{t}\")\n",
" end_var = model.new_int_var(0, horizon, f\"end_of_task_{t}\")\n",
"\n",
" literals = []\n",
" if num_recipes > 1:\n",
" # Create one literal per recipe.\n",
" literals = [model.new_bool_var(f\"is_present_{t}_{r}\") for r in all_recipes]\n",
@@ -418,7 +303,7 @@
" )\n",
"\n",
" # Create makespan variable\n",
" makespan = model.new_int_var(lower_bound, horizon, \"makespan\")\n",
" makespan = model.new_int_var(0, horizon, \"makespan\")\n",
" makespan_size = model.new_int_var(1, horizon, \"interval_makespan_size\")\n",
" interval_makespan = model.new_interval_var(\n",
" makespan,\n",
@@ -526,22 +411,6 @@
"\n",
" model.minimize(objective)\n",
"\n",
" # Add min delay constraints.\n",
" if delays is not None:\n",
" for (local_start, local_end), (min_delay, _) in delays.items():\n",
" if local_start == source and local_end in active_tasks:\n",
" model.add(task_starts[local_end] >= min_delay)\n",
" elif local_start in active_tasks and local_end == sink:\n",
" model.add(makespan >= task_ends[local_start] + min_delay)\n",
" elif local_start in active_tasks and local_end in active_tasks:\n",
" model.add(task_starts[local_end] >= task_ends[local_start] + min_delay)\n",
"\n",
" problem_is_single_mode = True\n",
" for t in all_active_tasks:\n",
" if len(task_to_presence_literals[t]) > 1:\n",
" problem_is_single_mode = False\n",
" break\n",
"\n",
" # Add sentinels.\n",
" task_starts[source] = 0\n",
" task_ends[source] = 0\n",
@@ -549,53 +418,6 @@
" task_starts[sink] = makespan\n",
" task_to_presence_literals[sink].append(True)\n",
"\n",
" # For multi-mode problems, add a redundant energetic constraint:\n",
" # for every (start, end, in_between_tasks) extracted from the precedence\n",
" # graph, it add the energetic relaxation:\n",
" # (start_var('end') - end_var('start')) * capacity_max >=\n",
" # sum of linearized energies of all tasks from 'in_between_tasks'\n",
" if (\n",
" not problem.is_resource_investment\n",
" and not problem.is_consumer_producer\n",
" and _ADD_REDUNDANT_ENERGETIC_CONSTRAINTS.value\n",
" and in_main_solve\n",
" and not problem_is_single_mode\n",
" ):\n",
" added_constraints = 0\n",
" ignored_constraits = 0\n",
" for local_start, local_end, common in intervals_of_tasks:\n",
" for res in all_resources:\n",
" resource = problem.resources[res]\n",
" if not resource.renewable:\n",
" continue\n",
" c = resource.max_capacity\n",
" if delays and (local_start, local_end) in delays:\n",
" min_delay, _ = delays[local_start, local_end]\n",
" sum_of_max_energies = sum(\n",
" task_resource_to_max_energy[(t, res)] for t in common\n",
" )\n",
" if sum_of_max_energies <= c * min_delay:\n",
" ignored_constraits += 1\n",
" continue\n",
" model.add(\n",
" c * (task_starts[local_end] - task_ends[local_start])\n",
" >= sum(task_resource_to_energy[(t, res)] for t in common)\n",
" )\n",
" added_constraints += 1\n",
" print(\n",
" f\"Added {added_constraints} redundant energetic constraints, and \"\n",
" + f\"ignored {ignored_constraits} constraints.\",\n",
" flush=True,\n",
" )\n",
"\n",
" # Add solution hint.\n",
" if initial_solution:\n",
" for t in all_active_tasks:\n",
" model.add_hint(task_starts[t], initial_solution.start_of_task[t])\n",
" if len(task_to_presence_literals[t]) > 1:\n",
" selected = initial_solution.selected_recipe_of_task[t]\n",
" model.add_hint(task_to_presence_literals[t][selected], 1)\n",
"\n",
" # Write model to file.\n",
" if proto_file:\n",
" print(f\"Writing proto to{proto_file}\")\n",
@@ -608,243 +430,19 @@
" if params:\n",
" text_format.Parse(params, solver.parameters)\n",
"\n",
" # Favor objective_shaving_search over objective_lb_search.\n",
" # Favor objective_shaving over objective_lb_search.\n",
" if solver.parameters.num_workers >= 16 and solver.parameters.num_workers < 24:\n",
" solver.parameters.ignore_subsolvers.append(\"objective_lb_search\")\n",
" solver.parameters.extra_subsolvers.append(\"objective_shaving_search\")\n",
" solver.parameters.extra_subsolvers.append(\"objective_shaving\")\n",
"\n",
" # Experimental: Specify the fact that the objective is a makespan\n",
" solver.parameters.push_all_tasks_toward_start = True\n",
"\n",
" # Enable logging in the main solve.\n",
" solver.parameters.log_search_progress = True\n",
"\n",
" if in_main_solve:\n",
" solver.parameters.log_search_progress = True\n",
" #\n",
" status = solver.solve(model)\n",
" if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:\n",
" assignment = rcpsp_pb2.RcpspAssignment()\n",
" for t, _ in enumerate(problem.tasks):\n",
" if t in task_starts:\n",
" assignment.start_of_task.append(solver.value(task_starts[t]))\n",
" for r, recipe_literal in enumerate(task_to_presence_literals[t]):\n",
" if solver.boolean_value(recipe_literal):\n",
" assignment.selected_recipe_of_task.append(r)\n",
" break\n",
" else: # t is not an active task.\n",
" assignment.start_of_task.append(0)\n",
" assignment.selected_recipe_of_task.append(0)\n",
" return (\n",
" int(solver.best_objective_bound),\n",
" int(solver.objective_value),\n",
" assignment,\n",
" )\n",
" return -1, -1, None\n",
"\n",
"\n",
"def compute_delays_between_nodes(\n",
" problem: rcpsp_pb2.RcpspProblem,\n",
" task_intervals: list[tuple[int, int, list[int]]],\n",
") -> tuple[\n",
" dict[tuple[int, int], tuple[int, int]],\n",
" Optional[rcpsp_pb2.RcpspAssignment],\n",
" bool,\n",
"]:\n",
" \"\"\"Computes the min delays between all pairs of tasks in 'task_intervals'.\n",
"\n",
" Args:\n",
" problem: The protobuf of the model.\n",
" task_intervals: The output of the AnalysePrecedenceGraph().\n",
"\n",
" Returns:\n",
" a list of (task1, task2, min_delay_between_task1_and_task2)\n",
" \"\"\"\n",
" print(\"Computing the minimum delay between pairs of intervals\")\n",
" delays = {}\n",
" if (\n",
" problem.is_resource_investment\n",
" or problem.is_consumer_producer\n",
" or problem.is_rcpsp_max\n",
" or _DELAY_TIME_LIMIT.value <= 0.0\n",
" ):\n",
" return delays, None, False\n",
"\n",
" time_limit = _DELAY_TIME_LIMIT.value\n",
" complete_problem_assignment = None\n",
" num_optimal_delays = 0\n",
" num_delays_not_found = 0\n",
" optimal_found = True\n",
" for start_task, end_task, active_tasks in task_intervals:\n",
" if time_limit <= 0:\n",
" optimal_found = False\n",
" print(f\" - #timeout ({_DELAY_TIME_LIMIT.value}s) reached\", flush=True)\n",
" break\n",
"\n",
" start_time = time.time()\n",
" min_delay, feasible_delay, assignment = solve_rcpsp(\n",
" problem,\n",
" \"\",\n",
" f\"num_search_workers:16,max_time_in_seconds:{time_limit}\",\n",
" set(active_tasks),\n",
" start_task,\n",
" end_task,\n",
" [],\n",
" delays,\n",
" )\n",
" time_limit -= time.time() - start_time\n",
"\n",
" if min_delay != -1:\n",
" delays[(start_task, end_task)] = min_delay, feasible_delay\n",
" if start_task == 0 and end_task == len(problem.tasks) - 1:\n",
" complete_problem_assignment = assignment\n",
" if min_delay == feasible_delay:\n",
" num_optimal_delays += 1\n",
" else:\n",
" optimal_found = False\n",
" else:\n",
" num_delays_not_found += 1\n",
" optimal_found = False\n",
"\n",
" print(f\" - #optimal delays = {num_optimal_delays}\", flush=True)\n",
" if num_delays_not_found:\n",
" print(f\" - #not computed delays = {num_delays_not_found}\", flush=True)\n",
"\n",
" return delays, complete_problem_assignment, optimal_found\n",
"\n",
"\n",
"def accept_new_candidate(\n",
" problem: rcpsp_pb2.RcpspProblem,\n",
" after: dict[int, list[int]],\n",
" demand_map: dict[tuple[int, int], int],\n",
" current: list[int],\n",
" candidate: int,\n",
") -> bool:\n",
" \"\"\"Check if candidate is compatible with the tasks in current.\"\"\"\n",
" for c in current:\n",
" if candidate in after[c] or c in after[candidate]:\n",
" return False\n",
"\n",
" all_resources = range(len(problem.resources))\n",
" for res in all_resources:\n",
" resource = problem.resources[res]\n",
" if not resource.renewable:\n",
" continue\n",
" if (\n",
" sum(demand_map[(t, res)] for t in current) + demand_map[(candidate, res)]\n",
" > resource.max_capacity\n",
" ):\n",
" return False\n",
"\n",
" return True\n",
"\n",
"\n",
"def compute_preemptive_lower_bound(\n",
" problem: rcpsp_pb2.RcpspProblem,\n",
" after: dict[int, list[int]],\n",
" lower_bound: int,\n",
") -> int:\n",
" \"\"\"Computes a preemtive lower bound for the makespan statically.\n",
"\n",
" For this, it breaks all intervals into a set of intervals of size one.\n",
" Then it will try to assign all of them in a minimum number of configurations.\n",
" This is a standard complete set covering using column generation approach\n",
" where each column is a possible combination of itervals of size one.\n",
"\n",
" Args:\n",
" problem: The probuf of the model.\n",
" after: a task to list of task dict that contains all tasks after a given\n",
" task.\n",
" lower_bound: A valid lower bound of the problem. It can be 0.\n",
"\n",
" Returns:\n",
" a valid lower bound of the problem.\n",
" \"\"\"\n",
" # Check this is a single mode problem.\n",
" if (\n",
" problem.is_rcpsp_max\n",
" or problem.is_resource_investment\n",
" or problem.is_consumer_producer\n",
" ):\n",
" return lower_bound\n",
"\n",
" demand_map = collections.defaultdict(int)\n",
" duration_map = {}\n",
" all_active_tasks = list(range(1, len(problem.tasks) - 1))\n",
" max_duration = 0\n",
" sum_of_demands = 0\n",
"\n",
" for t in all_active_tasks:\n",
" task = problem.tasks[t]\n",
" if len(task.recipes) > 1:\n",
" return 0\n",
" recipe = task.recipes[0]\n",
" duration_map[t] = recipe.duration\n",
" for demand, resource in zip(recipe.demands, recipe.resources):\n",
" demand_map[(t, resource)] = demand\n",
" max_duration = max(max_duration, recipe.duration)\n",
" sum_of_demands += demand\n",
"\n",
" print(\n",
" f\"Compute a bin-packing lower bound with {len(all_active_tasks)}\"\n",
" + \" active tasks\",\n",
" flush=True,\n",
" )\n",
" all_combinations = []\n",
"\n",
" for t in all_active_tasks:\n",
" new_combinations = [[t]]\n",
"\n",
" for c in all_combinations:\n",
" if accept_new_candidate(problem, after, demand_map, c, t):\n",
" new_combinations.append(c + [t])\n",
"\n",
" all_combinations.extend(new_combinations)\n",
"\n",
" print(f\" - created {len(all_combinations)} combinations\")\n",
" if len(all_combinations) > 5000000:\n",
" return lower_bound # Abort if too large.\n",
"\n",
" # solve the selection model.\n",
"\n",
" # TODO(user): a few possible improvements:\n",
" # 1/ use \"dominating\" columns, i.e. if you can add a task to a column, then\n",
" # do not use that column.\n",
" # 2/ Merge all task with exactly same demands into one.\n",
" model = cp_model.CpModel()\n",
" model.name = f\"lower_bound_{problem.name}\"\n",
"\n",
" vars_per_task = collections.defaultdict(list)\n",
" all_vars = []\n",
" for c in all_combinations:\n",
" min_duration = max_duration\n",
" for t in c:\n",
" min_duration = min(min_duration, duration_map[t])\n",
" count = model.new_int_var(0, min_duration, f\"count_{c}\")\n",
" all_vars.append(count)\n",
" for t in c:\n",
" vars_per_task[t].append(count)\n",
"\n",
" # Each task must be performed.\n",
" for t in all_active_tasks:\n",
" model.add(sum(vars_per_task[t]) >= duration_map[t])\n",
"\n",
" # Objective\n",
" objective_var = model.new_int_var(lower_bound, sum_of_demands, \"objective_var\")\n",
" model.add(objective_var == sum(all_vars))\n",
"\n",
" model.minimize(objective_var)\n",
"\n",
" # solve model.\n",
" solver = cp_model.CpSolver()\n",
" solver.parameters.num_search_workers = 16\n",
" solver.parameters.max_time_in_seconds = _PREEMPTIVE_LB_TIME_LIMIT.value\n",
" status = solver.solve(model)\n",
" if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:\n",
" status_str = \"optimal\" if status == cp_model.OPTIMAL else \"\"\n",
" lower_bound = max(lower_bound, int(solver.best_objective_bound))\n",
" print(f\" - {status_str} static lower bound = {lower_bound}\", flush=True)\n",
"\n",
" return lower_bound\n",
" # Solve the model.\n",
" solver.solve(model)\n",
"\n",
"\n",
"def main(_):\n",
@@ -854,16 +452,7 @@
" problem = rcpsp_parser.problem()\n",
" print_problem_statistics(problem)\n",
"\n",
" intervals_of_tasks, after = analyse_dependency_graph(problem)\n",
" delays, initial_solution, optimal_found = compute_delays_between_nodes(\n",
" problem, intervals_of_tasks\n",
" )\n",
"\n",
" last_task = len(problem.tasks) - 1\n",
" key = (0, last_task)\n",
" lower_bound = delays[key][0] if key in delays else 0\n",
" if not optimal_found and _PREEMPTIVE_LB_TIME_LIMIT.value > 0.0:\n",
" lower_bound = compute_preemptive_lower_bound(problem, after, lower_bound)\n",
"\n",
" solve_rcpsp(\n",
" problem=problem,\n",
@@ -872,11 +461,6 @@
" active_tasks=set(range(1, last_task)),\n",
" source=0,\n",
" sink=last_task,\n",
" intervals_of_tasks=intervals_of_tasks,\n",
" delays=delays,\n",
" in_main_solve=True,\n",
" initial_solution=initial_solution,\n",
" lower_bound=lower_bound,\n",
" )\n",
"\n",
"\n",

View File

@@ -86,6 +86,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" # Data\n",
" costs = [\n",

View File

@@ -90,6 +90,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" # Data\n",
" data_str = \"\"\"\n",

View File

@@ -86,6 +86,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" # Data\n",
" costs = [\n",

View File

@@ -86,6 +86,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" # Data\n",
" costs = [\n",

View File

@@ -86,6 +86,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" \"\"\"Showcases assumptions.\"\"\"\n",
" # Creates the model.\n",

View File

@@ -90,6 +90,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame]:\n",
" \"\"\"Create the data for the example.\"\"\"\n",
"\n",

View File

@@ -83,6 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
"import copy\n",
"\n",
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
@@ -109,15 +111,24 @@
" if status == cp_model.OPTIMAL:\n",
" print(\"Optimal value of the original model: {}\".format(solver.objective_value))\n",
"\n",
" # Clones the model.\n",
" copy = model.clone()\n",
" # Creates a dictionary holding the model and the variables you want to use.\n",
" to_clone = {\n",
" \"model\": model,\n",
" \"x\": x,\n",
" \"y\": y,\n",
" \"z\": z,\n",
" }\n",
"\n",
" copy_x = copy.get_int_var_from_proto_index(x.index)\n",
" copy_y = copy.get_int_var_from_proto_index(y.index)\n",
" # Deep copy the dictionary.\n",
" clone = copy.deepcopy(to_clone)\n",
"\n",
" copy.add(copy_x + copy_y <= 1)\n",
" # Retrieve the cloned model and variables.\n",
" cloned_model: cp_model.CpModel = clone[\"model\"]\n",
" cloned_x = clone[\"x\"]\n",
" cloned_y = clone[\"y\"]\n",
" cloned_model.add(cloned_x + cloned_y <= 1)\n",
"\n",
" status = solver.solve(copy)\n",
" status = solver.solve(cloned_model)\n",
"\n",
" if status == cp_model.OPTIMAL:\n",
" print(\"Optimal value of the modified model: {}\".format(solver.objective_value))\n",

View File

@@ -91,6 +91,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"class VarArraySolutionPrinter(cp_model.CpSolverSolutionCallback):\n",
" \"\"\"Print intermediate solutions.\"\"\"\n",
"\n",

View File

@@ -86,6 +86,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" \"\"\"Minimal CP-SAT example to showcase calling the solver.\"\"\"\n",
" # Creates the model.\n",

View File

@@ -87,6 +87,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" \"\"\"Minimal jobshop problem.\"\"\"\n",
" # Data.\n",

View File

@@ -86,6 +86,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" data = {}\n",
" data[\"weights\"] = [48, 30, 42, 36, 36, 48, 42, 42, 36, 24, 30, 30, 42, 36, 36]\n",

View File

@@ -88,6 +88,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"class NQueenSolutionPrinter(cp_model.CpSolverSolutionCallback):\n",
" \"\"\"Print intermediate solutions.\"\"\"\n",
"\n",
@@ -122,6 +123,7 @@
"\n",
"\n",
"\n",
"\n",
"def main(board_size: int) -> None:\n",
" # Creates the solver.\n",
" model = cp_model.CpModel()\n",

View File

@@ -86,6 +86,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" # Data.\n",
" num_nurses = 4\n",

View File

@@ -88,6 +88,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def main() -> None:\n",
" # This program tries to find an optimal assignment of nurses to shifts\n",
" # (3 shifts per day, for 7 days), subject to some constraints (see below).\n",

View File

@@ -86,6 +86,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"\n",
"def simple_sat_program():\n",
" \"\"\"Minimal CP-SAT example to showcase calling the solver.\"\"\"\n",
" # Creates the model.\n",

View File

@@ -41,10 +41,10 @@
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/algorithms/set_cover.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/set_cover/set_cover.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/algorithms/samples/set_cover.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/set_cover/samples/set_cover.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
@@ -83,7 +83,7 @@
"metadata": {},
"outputs": [],
"source": [
"from ortools.algorithms.python import set_cover\n",
"from ortools.set_cover.python import set_cover\n",
"\n",
"\n",
"def main():\n",