notebook: sync

This commit is contained in:
Corentin Le Molgat
2024-07-12 16:21:51 +02:00
parent 22f931251c
commit 7ad3909af3
5 changed files with 669 additions and 47 deletions

View File

@@ -72,8 +72,8 @@
"id": "description",
"metadata": {},
"source": [
"Cutting stock problem with the objective to minimize wasted space.\n",
"\n"
"\n",
"Cutting stock problem with the objective to minimize wasted space.\n"
]
},
{
@@ -85,9 +85,10 @@
"source": [
"import collections\n",
"import time\n",
"import numpy as np\n",
"\n",
"from ortools.sat.colab import flags\n",
"import numpy as np\n",
"\n",
"from google.protobuf import text_format\n",
"from ortools.linear_solver.python import model_builder as mb\n",
"from ortools.sat.python import cp_model\n",
@@ -95,13 +96,14 @@
"FLAGS = flags.FLAGS\n",
"\n",
"_OUTPUT_PROTO = flags.define_string(\n",
" 'output_proto', '', 'Output file to write the cp_model proto to.')\n",
" \"output_proto\", \"\", \"Output file to write the cp_model proto to.\"\n",
")\n",
"_PARAMS = flags.define_string(\n",
" 'params',\n",
" 'num_search_workers:8,log_search_progress:true,max_time_in_seconds:10',\n",
" 'Sat solver parameters.')\n",
"_SOLVER = flags.define_string(\n",
" 'solver', 'sat', 'Method used to solve: sat, mip.')\n",
" \"params\",\n",
" \"num_search_workers:8,log_search_progress:true,max_time_in_seconds:10\",\n",
" \"Sat solver parameters.\",\n",
")\n",
"_SOLVER = flags.define_string(\"solver\", \"sat\", \"Method used to solve: sat, mip.\")\n",
"\n",
"\n",
"DESIRED_LENGTHS = [\n",
@@ -173,9 +175,9 @@
" states.append(new_state)\n",
" state_to_index[new_state] = new_state_index\n",
" # Add the transition\n",
" transitions.append([\n",
" current_state_index, new_state_index, item_index, card + 1\n",
" ])\n",
" transitions.append(\n",
" [current_state_index, new_state_index, item_index, card + 1]\n",
" )\n",
"\n",
" return states, transitions\n",
"\n",
@@ -183,14 +185,19 @@
"def solve_cutting_stock_with_arc_flow_and_sat(output_proto_file: str, params: str):\n",
" \"\"\"Solve the cutting stock with arc-flow and the CP-SAT solver.\"\"\"\n",
" items = regroup_and_count(DESIRED_LENGTHS)\n",
" print('Items:', items)\n",
" print(\"Items:\", items)\n",
" num_items = len(DESIRED_LENGTHS)\n",
"\n",
" max_capacity = max(POSSIBLE_CAPACITIES)\n",
" states, transitions = create_state_graph(items, max_capacity)\n",
"\n",
" print('Dynamic programming has generated', len(states), 'states and',\n",
" len(transitions), 'transitions')\n",
" print(\n",
" \"Dynamic programming has generated\",\n",
" len(states),\n",
" \"states and\",\n",
" len(transitions),\n",
" \"transitions\",\n",
" )\n",
"\n",
" incoming_vars = collections.defaultdict(list)\n",
" outgoing_vars = collections.defaultdict(list)\n",
@@ -208,8 +215,8 @@
" count = items[item_index][1]\n",
" max_count = count // card\n",
" count_var = model.NewIntVar(\n",
" 0, max_count,\n",
" 'i%i_f%i_t%i_C%s' % (item_index, incoming, outgoing, card))\n",
" 0, max_count, \"i%i_f%i_t%i_C%s\" % (item_index, incoming, outgoing, card)\n",
" )\n",
" incoming_vars[incoming].append(count_var)\n",
" outgoing_vars[outgoing].append(count_var)\n",
" item_vars[item_index].append(count_var)\n",
@@ -219,7 +226,7 @@
" for state_index, state in enumerate(states):\n",
" if state_index == 0:\n",
" continue\n",
" exit_var = model.NewIntVar(0, num_items, 'e%i' % state_index)\n",
" exit_var = model.NewIntVar(0, num_items, \"e%i\" % state_index)\n",
" outgoing_vars[state_index].append(exit_var)\n",
" incoming_sink_vars.append(exit_var)\n",
" price = price_usage(state, POSSIBLE_CAPACITIES)\n",
@@ -228,8 +235,7 @@
"\n",
" # Flow conservation\n",
" for state_index in range(1, len(states)):\n",
" model.Add(\n",
" sum(incoming_vars[state_index]) == sum(outgoing_vars[state_index]))\n",
" model.Add(sum(incoming_vars[state_index]) == sum(outgoing_vars[state_index]))\n",
"\n",
" # Flow going out of the source must go in the sink\n",
" model.Add(sum(outgoing_vars[0]) == sum(incoming_sink_vars))\n",
@@ -238,13 +244,17 @@
" for item_index, size_and_count in enumerate(items):\n",
" num_arcs = len(item_vars[item_index])\n",
" model.Add(\n",
" sum(item_vars[item_index][i] * item_coeffs[item_index][i]\n",
" for i in range(num_arcs)) == size_and_count[1])\n",
" sum(\n",
" item_vars[item_index][i] * item_coeffs[item_index][i]\n",
" for i in range(num_arcs)\n",
" )\n",
" == size_and_count[1]\n",
" )\n",
"\n",
" # Objective is the sum of waste\n",
" model.Minimize(\n",
" sum(objective_vars[i] * objective_coeffs[i]\n",
" for i in range(len(objective_vars))))\n",
" sum(objective_vars[i] * objective_coeffs[i] for i in range(len(objective_vars)))\n",
" )\n",
"\n",
" # Output model proto to file.\n",
" if output_proto_file:\n",
@@ -261,13 +271,18 @@
"def solve_cutting_stock_with_arc_flow_and_mip():\n",
" \"\"\"Solve the cutting stock with arc-flow and a MIP solver.\"\"\"\n",
" items = regroup_and_count(DESIRED_LENGTHS)\n",
" print('Items:', items)\n",
" print(\"Items:\", items)\n",
" num_items = len(DESIRED_LENGTHS)\n",
" max_capacity = max(POSSIBLE_CAPACITIES)\n",
" states, transitions = create_state_graph(items, max_capacity)\n",
"\n",
" print('Dynamic programming has generated', len(states), 'states and',\n",
" len(transitions), 'transitions')\n",
" print(\n",
" \"Dynamic programming has generated\",\n",
" len(states),\n",
" \"states and\",\n",
" len(transitions),\n",
" \"transitions\",\n",
" )\n",
"\n",
" incoming_vars = collections.defaultdict(list)\n",
" outgoing_vars = collections.defaultdict(list)\n",
@@ -285,8 +300,10 @@
" for outgoing, incoming, item_index, card in transitions:\n",
" count = items[item_index][1]\n",
" count_var = model.new_int_var(\n",
" 0, count, 'a%i_i%i_f%i_t%i_c%i' % (var_index, item_index, incoming,\n",
" outgoing, card))\n",
" 0,\n",
" count,\n",
" \"a%i_i%i_f%i_t%i_c%i\" % (var_index, item_index, incoming, outgoing, card),\n",
" )\n",
" var_index += 1\n",
" incoming_vars[incoming].append(count_var)\n",
" outgoing_vars[outgoing].append(count_var)\n",
@@ -296,7 +313,7 @@
" for state_index, state in enumerate(states):\n",
" if state_index == 0:\n",
" continue\n",
" exit_var = model.new_int_var(0, num_items, 'e%i' % state_index)\n",
" exit_var = model.new_int_var(0, num_items, \"e%i\" % state_index)\n",
" outgoing_vars[state_index].append(exit_var)\n",
" incoming_sink_vars.append(exit_var)\n",
" price = price_usage(state, POSSIBLE_CAPACITIES)\n",
@@ -306,41 +323,49 @@
" # Flow conservation\n",
" for state_index in range(1, len(states)):\n",
" model.add(\n",
" mb.LinearExpr.sum(incoming_vars[state_index]) == mb.LinearExpr.sum(\n",
" outgoing_vars[state_index]))\n",
" mb.LinearExpr.sum(incoming_vars[state_index])\n",
" == mb.LinearExpr.sum(outgoing_vars[state_index])\n",
" )\n",
"\n",
" # Flow going out of the source must go in the sink\n",
" model.add(\n",
" mb.LinearExpr.sum(outgoing_vars[0]) == mb.LinearExpr.sum(\n",
" incoming_sink_vars))\n",
" mb.LinearExpr.sum(outgoing_vars[0]) == mb.LinearExpr.sum(incoming_sink_vars)\n",
" )\n",
"\n",
" # Items must be placed\n",
" for item_index, size_and_count in enumerate(items):\n",
" num_arcs = len(item_vars[item_index])\n",
" model.add(\n",
" mb.LinearExpr.sum([item_vars[item_index][i] * item_coeffs[item_index][i]\n",
" for i in range(num_arcs)]) == size_and_count[1])\n",
" mb.LinearExpr.sum(\n",
" [\n",
" item_vars[item_index][i] * item_coeffs[item_index][i]\n",
" for i in range(num_arcs)\n",
" ]\n",
" )\n",
" == size_and_count[1]\n",
" )\n",
"\n",
" # Objective is the sum of waste\n",
" model.minimize(np.dot(objective_vars, objective_coeffs))\n",
"\n",
" solver = mb.ModelSolver('scip')\n",
" solver = mb.ModelSolver(\"scip\")\n",
" solver.enable_output(True)\n",
" status = solver.solve(model)\n",
"\n",
" ### Output the solution.\n",
" if status == mb.SolveStatus.OPTIMAL or status == mb.SolveStatus.FEASIBLE:\n",
" print('Objective value = %f found in %.2f s' %\n",
" (solver.objective_value, time.time() - start_time))\n",
" print(\n",
" \"Objective value = %f found in %.2f s\"\n",
" % (solver.objective_value, time.time() - start_time)\n",
" )\n",
" else:\n",
" print('No solution')\n",
" print(\"No solution\")\n",
"\n",
"\n",
"def main(_):\n",
" \"\"\"Main function\"\"\"\n",
" if _SOLVER.value == 'sat':\n",
" solve_cutting_stock_with_arc_flow_and_sat(_OUTPUT_PROTO.value,\n",
" _PARAMS.value)\n",
" \"\"\"Main function.\"\"\"\n",
" if _SOLVER.value == \"sat\":\n",
" solve_cutting_stock_with_arc_flow_and_sat(_OUTPUT_PROTO.value, _PARAMS.value)\n",
" else: # 'mip'\n",
" solve_cutting_stock_with_arc_flow_and_mip()\n",
"\n",

View File

@@ -0,0 +1,161 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "google",
"metadata": {},
"source": [
"##### Copyright 2023 Google LLC."
]
},
{
"cell_type": "markdown",
"id": "apache",
"metadata": {},
"source": [
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
" http://www.apache.org/licenses/LICENSE-2.0\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License.\n"
]
},
{
"cell_type": "markdown",
"id": "basename",
"metadata": {},
"source": [
"# all_different_except_zero_sample_sat"
]
},
{
"cell_type": "markdown",
"id": "link",
"metadata": {},
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/sat/all_different_except_zero_sample_sat.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/sat/samples/all_different_except_zero_sample_sat.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "doc",
"metadata": {},
"source": [
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "install",
"metadata": {},
"outputs": [],
"source": [
"%pip install ortools"
]
},
{
"cell_type": "markdown",
"id": "description",
"metadata": {},
"source": [
"\n",
"Implements AllDifferentExcept0 using atomic constraints.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "code",
"metadata": {},
"outputs": [],
"source": [
"import collections\n",
"\n",
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"def all_different_except_0():\n",
" \"\"\"Encode the AllDifferentExcept0 constraint.\"\"\"\n",
"\n",
" # Model.\n",
" model = cp_model.CpModel()\n",
"\n",
" # Declare our primary variable.\n",
" x = [model.new_int_var(0, 10, f\"x{i}\") for i in range(5)]\n",
"\n",
" # Expand the AllDifferentExcept0 constraint.\n",
" variables_per_value = collections.defaultdict(list)\n",
" all_values = set()\n",
"\n",
" for var in x:\n",
" all_encoding_literals = []\n",
" # Domains of variables are represented by flat intervals.\n",
" for i in range(0, len(var.proto.domain), 2):\n",
" start = var.proto.domain[i]\n",
" end = var.proto.domain[i + 1]\n",
" for value in range(start, end + 1): # Intervals are inclusive.\n",
" # Create the literal attached to var == value.\n",
" bool_var = model.new_bool_var(f\"{var} == {value}\")\n",
" model.add(var == value).only_enforce_if(bool_var)\n",
"\n",
" # Collect all encoding literals for a given variable.\n",
" all_encoding_literals.append(bool_var)\n",
"\n",
" # Collect all encoding literals for a given value.\n",
" variables_per_value[value].append(bool_var)\n",
"\n",
" # Collect all different values.\n",
" all_values.add(value)\n",
"\n",
" # One variable must have exactly one value.\n",
" model.add_exactly_one(all_encoding_literals)\n",
"\n",
" # Add the all_different constraints.\n",
" for value, literals in variables_per_value.items():\n",
" if value == 0:\n",
" continue\n",
" model.add_at_most_one(literals)\n",
"\n",
" model.add(x[0] == 0)\n",
" model.add(x[1] == 0)\n",
"\n",
" model.maximize(sum(x))\n",
"\n",
" # Create a solver and solve.\n",
" solver = cp_model.CpSolver()\n",
" status = solver.solve(model)\n",
"\n",
" # Checks and prints the output.\n",
" if status == cp_model.OPTIMAL:\n",
" print(f\"Optimal solution: {solver.objective_value}, expected: 27.0\")\n",
" elif status == cp_model.FEASIBLE:\n",
" print(f\"Feasible solution: {solver.objective_value}, optimal 27.0\")\n",
" elif status == cp_model.INFEASIBLE:\n",
" print(\"The model is infeasible\")\n",
" else:\n",
" print(\"Something went wrong. Please check the status and the log\")\n",
"\n",
"\n",
"all_different_except_0()\n",
"\n"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,156 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "google",
"metadata": {},
"source": [
"##### Copyright 2023 Google LLC."
]
},
{
"cell_type": "markdown",
"id": "apache",
"metadata": {},
"source": [
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
" http://www.apache.org/licenses/LICENSE-2.0\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License.\n"
]
},
{
"cell_type": "markdown",
"id": "basename",
"metadata": {},
"source": [
"# interval_relations_sample_sat"
]
},
{
"cell_type": "markdown",
"id": "link",
"metadata": {},
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/sat/interval_relations_sample_sat.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/sat/samples/interval_relations_sample_sat.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "doc",
"metadata": {},
"source": [
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "install",
"metadata": {},
"outputs": [],
"source": [
"%pip install ortools"
]
},
{
"cell_type": "markdown",
"id": "description",
"metadata": {},
"source": [
"\n",
"Builds temporal relations between intervals.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "code",
"metadata": {},
"outputs": [],
"source": [
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"def interval_relations_sample_sat():\n",
" \"\"\"Showcases how to build temporal relations between intervals.\"\"\"\n",
" model = cp_model.CpModel()\n",
" horizon = 100\n",
"\n",
" # An interval can be created from three 1-var affine expressions.\n",
" start_var = model.new_int_var(0, horizon, \"start\")\n",
" duration = 10 # Python CP-SAT code accept integer variables or constants.\n",
" end_var = model.new_int_var(0, horizon, \"end\")\n",
" interval_var = model.new_interval_var(start_var, duration, end_var, \"interval\")\n",
"\n",
" # If the size is fixed, a simpler version uses the start expression and the\n",
" # size.\n",
" fixed_size_start_var = model.new_int_var(0, horizon, \"fixed_start\")\n",
" fixed_size_duration = 10\n",
" fixed_size_interval_var = model.new_fixed_size_interval_var(\n",
" fixed_size_start_var,\n",
" fixed_size_duration,\n",
" \"fixed_size_interval_var\",\n",
" )\n",
"\n",
" # An optional interval can be created from three 1-var affine expressions and\n",
" # a literal.\n",
" opt_start_var = model.new_int_var(0, horizon, \"opt_start\")\n",
" opt_duration = model.new_int_var(2, 6, \"opt_size\")\n",
" opt_end_var = model.new_int_var(0, horizon, \"opt_end\")\n",
" opt_presence_var = model.new_bool_var(\"opt_presence\")\n",
" opt_interval_var = model.new_optional_interval_var(\n",
" opt_start_var, opt_duration, opt_end_var, opt_presence_var, \"opt_interval\"\n",
" )\n",
"\n",
" # If the size is fixed, a simpler version uses the start expression, the\n",
" # size, and the presence literal.\n",
" opt_fixed_size_start_var = model.new_int_var(0, horizon, \"opt_fixed_start\")\n",
" opt_fixed_size_duration = 10\n",
" opt_fixed_size_presence_var = model.new_bool_var(\"opt_fixed_presence\")\n",
" opt_fixed_size_interval_var = model.new_optional_fixed_size_interval_var(\n",
" opt_fixed_size_start_var,\n",
" opt_fixed_size_duration,\n",
" opt_fixed_size_presence_var,\n",
" \"opt_fixed_size_interval_var\",\n",
" )\n",
"\n",
" # Simple precedence between two non optional intervals.\n",
" model.add(interval_var.start_expr() >= fixed_size_interval_var.end_expr())\n",
"\n",
" # Synchronize start between two intervals (one optional, one not)\n",
" model.add(\n",
" interval_var.start_expr() == opt_interval_var.start_expr()\n",
" ).only_enforce_if(opt_presence_var)\n",
"\n",
" # Exact delay between two optional intervals.\n",
" exact_delay: int = 5\n",
" model.add(\n",
" opt_interval_var.start_expr()\n",
" == opt_fixed_size_interval_var.end_expr() + exact_delay\n",
" ).only_enforce_if(opt_presence_var, opt_fixed_size_presence_var)\n",
"\n",
"\n",
"interval_relations_sample_sat()\n",
"\n"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -95,7 +95,7 @@
" durations: Sequence[int],\n",
" presences: Sequence[cp_model.IntVar],\n",
" ranks: Sequence[cp_model.IntVar],\n",
"):\n",
") -> None:\n",
" \"\"\"This method uses a circuit constraint to rank tasks.\n",
"\n",
" This method assumes that all starts are disjoint, meaning that all tasks have\n",
@@ -105,7 +105,7 @@
" To implement this ranking, we will create a dense graph with num_tasks + 1\n",
" nodes.\n",
" The extra node (with id 0) will be used to decide which task is first with\n",
" its only outgoing arc, and whhich task is last with its only incoming arc.\n",
" its only outgoing arc, and which task is last with its only incoming arc.\n",
" Each task i will be associated with id i + 1, and an arc between i + 1 and j +\n",
" 1 indicates that j is the immediate successor of i.\n",
"\n",
@@ -171,7 +171,7 @@
" model.add_circuit(arcs)\n",
"\n",
"\n",
"def ranking_sample_sat():\n",
"def ranking_sample_sat() -> None:\n",
" \"\"\"Ranks tasks in a NoOverlap constraint.\"\"\"\n",
"\n",
" model = cp_model.CpModel()\n",

View File

@@ -0,0 +1,280 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "google",
"metadata": {},
"source": [
"##### Copyright 2023 Google LLC."
]
},
{
"cell_type": "markdown",
"id": "apache",
"metadata": {},
"source": [
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
" http://www.apache.org/licenses/LICENSE-2.0\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License.\n"
]
},
{
"cell_type": "markdown",
"id": "basename",
"metadata": {},
"source": [
"# transitions_in_no_overlap_sample_sat"
]
},
{
"cell_type": "markdown",
"id": "link",
"metadata": {},
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/sat/transitions_in_no_overlap_sample_sat.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/sat/samples/transitions_in_no_overlap_sample_sat.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "doc",
"metadata": {},
"source": [
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "install",
"metadata": {},
"outputs": [],
"source": [
"%pip install ortools"
]
},
{
"cell_type": "markdown",
"id": "description",
"metadata": {},
"source": [
"\n",
"Implements transition times and costs in a no_overlap constraint.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "code",
"metadata": {},
"outputs": [],
"source": [
"from typing import Dict, List, Sequence, Tuple, Union\n",
"\n",
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"def transitive_reduction_with_circuit_delays_and_penalties(\n",
" model: cp_model.CpModel,\n",
" starts: Sequence[cp_model.IntVar],\n",
" durations: Sequence[int],\n",
" presences: Sequence[Union[cp_model.IntVar, bool]],\n",
" penalties: Dict[Tuple[int, int], int],\n",
" delays: Dict[Tuple[int, int], int],\n",
") -> Sequence[Tuple[cp_model.IntVar, int]]:\n",
" \"\"\"This method uses a circuit constraint to rank tasks.\n",
"\n",
" This method assumes that all starts are disjoint, meaning that all tasks have\n",
" a strictly positive duration, and they appear in the same NoOverlap\n",
" constraint.\n",
"\n",
" The extra node (with id 0) will be used to decide which task is first with\n",
" its only outgoing arc, and which task is last with its only incoming arc.\n",
" Each task i will be associated with id i + 1, and an arc between i + 1 and j +\n",
" 1 indicates that j is the immediate successor of i.\n",
"\n",
" The circuit constraint ensures there is at most 1 hamiltonian cycle of\n",
" length > 1. If no such path exists, then no tasks are active.\n",
" We also need to enforce that any hamiltonian cycle of size > 1 must contain\n",
" the node 0. And thus, there is a self loop on node 0 iff the circuit is empty.\n",
"\n",
" Args:\n",
" model: The CpModel to add the constraints to.\n",
" starts: The array of starts variables of all tasks.\n",
" durations: the durations of all tasks.\n",
" presences: The array of presence variables of all tasks.\n",
" penalties: the array of tuple (`tail_index`, `head_index`, `penalty`) that\n",
" specifies that if task `tail_index` is the successor of the task\n",
" `head_index`, then `penalty` must be added to the cost.\n",
" delays: the array of tuple (`tail_index`, `head_index`, `delay`) that\n",
" specifies that if task `tail_index` is the successor of the task\n",
" `head_index`, then an extra `delay` must be added between the end of the\n",
" first task and the start of the second task.\n",
"\n",
" Returns:\n",
" The list of pairs (Boolean variables, penalty) to be added to the objective.\n",
" \"\"\"\n",
"\n",
" num_tasks = len(starts)\n",
" all_tasks = range(num_tasks)\n",
"\n",
" arcs: List[cp_model.ArcT] = []\n",
" penalty_terms = []\n",
" for i in all_tasks:\n",
" # if node i is first.\n",
" start_lit = model.new_bool_var(f\"start_{i}\")\n",
" arcs.append((0, i + 1, start_lit))\n",
"\n",
" # As there are no other constraints on the problem, we can add this\n",
" # redundant constraint.\n",
" model.add(starts[i] == 0).only_enforce_if(start_lit)\n",
"\n",
" # if node i is last.\n",
" end_lit = model.new_bool_var(f\"end_{i}\")\n",
" arcs.append((i + 1, 0, end_lit))\n",
"\n",
" for j in all_tasks:\n",
" if i == j:\n",
" arcs.append((i + 1, i + 1, ~presences[i]))\n",
" else:\n",
" literal = model.new_bool_var(f\"arc_{i}_to_{j}\")\n",
" arcs.append((i + 1, j + 1, literal))\n",
"\n",
" # To perform the transitive reduction from precedences to successors,\n",
" # we need to tie the starts of the tasks with 'literal'.\n",
" # In a pure problem, the following inequality could be an equality.\n",
" # It is not true in general.\n",
" #\n",
" # Note that we could use this literal to penalize the transition, add an\n",
" # extra delay to the precedence.\n",
" min_delay = 0\n",
" key = (i, j)\n",
" if key in delays:\n",
" min_delay = delays[key]\n",
" model.add(\n",
" starts[j] >= starts[i] + durations[i] + min_delay\n",
" ).only_enforce_if(literal)\n",
"\n",
" # Create the penalties.\n",
" if key in penalties:\n",
" penalty_terms.append((literal, penalties[key]))\n",
"\n",
" # Manage the empty circuit\n",
" empty = model.new_bool_var(\"empty\")\n",
" arcs.append((0, 0, empty))\n",
"\n",
" for i in all_tasks:\n",
" model.add_implication(empty, ~presences[i])\n",
"\n",
" # Add the circuit constraint.\n",
" model.add_circuit(arcs)\n",
"\n",
" return penalty_terms\n",
"\n",
"\n",
"def transitions_in_no_overlap_sample_sat():\n",
" \"\"\"Implement transitions in a NoOverlap constraint.\"\"\"\n",
"\n",
" model = cp_model.CpModel()\n",
" horizon = 40\n",
" num_tasks = 4\n",
"\n",
" # Breaking the natural sequence induces a fixed penalty.\n",
" penalties = {\n",
" (1, 0): 10,\n",
" (2, 0): 10,\n",
" (3, 0): 10,\n",
" (2, 1): 10,\n",
" (3, 1): 10,\n",
" (3, 2): 10,\n",
" }\n",
"\n",
" # Switching from an odd to even or even to odd task indices induces a delay.\n",
" delays = {\n",
" (1, 0): 10,\n",
" (0, 1): 10,\n",
" (3, 0): 10,\n",
" (0, 3): 10,\n",
" (1, 2): 10,\n",
" (2, 1): 10,\n",
" (3, 2): 10,\n",
" (2, 3): 10,\n",
" }\n",
"\n",
" all_tasks = range(num_tasks)\n",
"\n",
" starts = []\n",
" durations = []\n",
" intervals = []\n",
" presences = []\n",
"\n",
" # Creates intervals, all present. But the cost is robust w.r.t. optional\n",
" # intervals.\n",
" for t in all_tasks:\n",
" start = model.new_int_var(0, horizon, f\"start[{t}]\")\n",
" duration = 5\n",
" presence = True\n",
" interval = model.new_optional_fixed_size_interval_var(\n",
" start, duration, presence, f\"opt_interval[{t}]\"\n",
" )\n",
"\n",
" starts.append(start)\n",
" durations.append(duration)\n",
" intervals.append(interval)\n",
" presences.append(presence)\n",
"\n",
" # Adds NoOverlap constraint.\n",
" model.add_no_overlap(intervals)\n",
"\n",
" # Adds ranking constraint.\n",
" penalty_terms = transitive_reduction_with_circuit_delays_and_penalties(\n",
" model, starts, durations, presences, penalties, delays\n",
" )\n",
"\n",
" # Minimize the sum of penalties,\n",
" model.minimize(sum(var * penalty for var, penalty in penalty_terms))\n",
"\n",
" # In practise, only one penalty can happen. Thus the two even tasks are\n",
" # together, same for the two odd tasks.\n",
" # Because of the penalties, the optimal sequence is 0 -> 2 -> 1 -> 3\n",
" # which induces one penalty and one delay.\n",
"\n",
" # Solves the model model.\n",
" solver = cp_model.CpSolver()\n",
" status = solver.solve(model)\n",
"\n",
" if status == cp_model.OPTIMAL:\n",
" # Prints out the makespan and the start times and ranks of all tasks.\n",
" print(f\"Optimal cost: {solver.objective_value}\")\n",
" for t in all_tasks:\n",
" if solver.value(presences[t]):\n",
" print(f\"Task {t} starts at {solver.value(starts[t])} \")\n",
" else:\n",
" print(f\"Task {t} in not performed\")\n",
" else:\n",
" print(f\"Solver exited with nonoptimal status: {status}\")\n",
"\n",
"\n",
"transitions_in_no_overlap_sample_sat()\n",
"\n"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}