362 lines
13 KiB
Plaintext
362 lines
13 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "google",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Copyright 2025 Google LLC."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "apache",
|
|
"metadata": {},
|
|
"source": [
|
|
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
|
|
"you may not use this file except in compliance with the License.\n",
|
|
"You may obtain a copy of the License at\n",
|
|
"\n",
|
|
" http://www.apache.org/licenses/LICENSE-2.0\n",
|
|
"\n",
|
|
"Unless required by applicable law or agreed to in writing, software\n",
|
|
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
|
|
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
|
|
"See the License for the specific language governing permissions and\n",
|
|
"limitations under the License.\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "basename",
|
|
"metadata": {},
|
|
"source": [
|
|
"# cumulative_variable_profile_sample_sat"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "link",
|
|
"metadata": {},
|
|
"source": [
|
|
"<table align=\"left\">\n",
|
|
"<td>\n",
|
|
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/sat/cumulative_variable_profile_sample_sat.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
|
|
"</td>\n",
|
|
"<td>\n",
|
|
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/sat/samples/cumulative_variable_profile_sample_sat.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
|
|
"</td>\n",
|
|
"</table>"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "doc",
|
|
"metadata": {},
|
|
"source": [
|
|
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "install",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"%pip install ortools"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "description",
|
|
"metadata": {},
|
|
"source": [
|
|
"\n",
|
|
"Solves a scheduling problem with a min and max profile for the work load.\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "code",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import io\n",
|
|
"\n",
|
|
"import pandas as pd\n",
|
|
"\n",
|
|
"from ortools.sat.python import cp_model\n",
|
|
"\n",
|
|
"\n",
|
|
"def create_data_model() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n",
|
|
" \"\"\"Creates the dataframes that describes the model.\"\"\"\n",
|
|
"\n",
|
|
" max_load_str: str = \"\"\"\n",
|
|
" start_hour max_load\n",
|
|
" 0 0\n",
|
|
" 2 0\n",
|
|
" 4 3\n",
|
|
" 6 6\n",
|
|
" 8 8\n",
|
|
" 10 12\n",
|
|
" 12 8\n",
|
|
" 14 12\n",
|
|
" 16 10\n",
|
|
" 18 6\n",
|
|
" 20 4\n",
|
|
" 22 0\n",
|
|
" \"\"\"\n",
|
|
"\n",
|
|
" min_load_str: str = \"\"\"\n",
|
|
" start_hour min_load\n",
|
|
" 0 0\n",
|
|
" 2 0\n",
|
|
" 4 0\n",
|
|
" 6 0\n",
|
|
" 8 3\n",
|
|
" 10 3\n",
|
|
" 12 1\n",
|
|
" 14 3\n",
|
|
" 16 3\n",
|
|
" 18 1\n",
|
|
" 20 1\n",
|
|
" 22 0\n",
|
|
" \"\"\"\n",
|
|
"\n",
|
|
" tasks_str: str = \"\"\"\n",
|
|
" name duration load priority\n",
|
|
" t1 60 3 2\n",
|
|
" t2 180 2 1\n",
|
|
" t3 240 5 3\n",
|
|
" t4 90 4 2\n",
|
|
" t5 120 3 1\n",
|
|
" t6 300 3 3\n",
|
|
" t7 120 1 2\n",
|
|
" t8 100 5 2\n",
|
|
" t9 110 2 1\n",
|
|
" t10 300 5 3\n",
|
|
" t11 90 4 2\n",
|
|
" t12 120 3 1\n",
|
|
" t13 250 3 3\n",
|
|
" t14 120 1 2\n",
|
|
" t15 40 5 3\n",
|
|
" t16 70 4 2\n",
|
|
" t17 90 8 1\n",
|
|
" t18 40 3 3\n",
|
|
" t19 120 5 2\n",
|
|
" t20 60 3 2\n",
|
|
" t21 180 2 1\n",
|
|
" t22 240 5 3\n",
|
|
" t23 90 4 2\n",
|
|
" t24 120 3 1\n",
|
|
" t25 300 3 3\n",
|
|
" t26 120 1 2\n",
|
|
" t27 100 5 2\n",
|
|
" t28 110 2 1\n",
|
|
" t29 300 5 3\n",
|
|
" t30 90 4 2\n",
|
|
" \"\"\"\n",
|
|
"\n",
|
|
" max_load_df = pd.read_table(io.StringIO(max_load_str), sep=r\"\\s+\")\n",
|
|
" min_load_df = pd.read_table(io.StringIO(min_load_str), sep=r\"\\s+\")\n",
|
|
" tasks_df = pd.read_table(io.StringIO(tasks_str), index_col=0, sep=r\"\\s+\")\n",
|
|
" return max_load_df, min_load_df, tasks_df\n",
|
|
"\n",
|
|
"\n",
|
|
"def check_solution(\n",
|
|
" tasks: list[tuple[int, int, int]],\n",
|
|
" min_load_df: pd.DataFrame,\n",
|
|
" max_load_df: pd.DataFrame,\n",
|
|
" period_length: int,\n",
|
|
" horizon: int,\n",
|
|
") -> bool:\n",
|
|
" \"\"\"Checks the solution validity against the min and max load constraints.\"\"\"\n",
|
|
" minutes_per_hour = 60\n",
|
|
" actual_load_profile = [0 for _ in range(horizon)]\n",
|
|
" min_load_profile = [0 for _ in range(horizon)]\n",
|
|
" max_load_profile = [0 for _ in range(horizon)]\n",
|
|
"\n",
|
|
" # The complexity of the checker is linear in the number of time points, and\n",
|
|
" # should be improved.\n",
|
|
" for task in tasks:\n",
|
|
" for t in range(task[1]):\n",
|
|
" actual_load_profile[task[0] + t] += task[2]\n",
|
|
" for row in max_load_df.itertuples():\n",
|
|
" for t in range(period_length):\n",
|
|
" max_load_profile[row.start_hour * minutes_per_hour + t] = row.max_load\n",
|
|
" for row in min_load_df.itertuples():\n",
|
|
" for t in range(period_length):\n",
|
|
" min_load_profile[row.start_hour * minutes_per_hour + t] = row.min_load\n",
|
|
"\n",
|
|
" for time in range(horizon):\n",
|
|
" if actual_load_profile[time] > max_load_profile[time]:\n",
|
|
" print(\n",
|
|
" f\"actual load {actual_load_profile[time]} at time {time} is greater\"\n",
|
|
" f\" than max load {max_load_profile[time]}\"\n",
|
|
" )\n",
|
|
" return False\n",
|
|
" if actual_load_profile[time] < min_load_profile[time]:\n",
|
|
" print(\n",
|
|
" f\"actual load {actual_load_profile[time]} at time {time} is\"\n",
|
|
" f\" less than min load {min_load_profile[time]}\"\n",
|
|
" )\n",
|
|
" return False\n",
|
|
" return True\n",
|
|
"\n",
|
|
"\n",
|
|
"def main(_) -> None:\n",
|
|
" \"\"\"Create the model and solves it.\"\"\"\n",
|
|
" max_load_df, min_load_df, tasks_df = create_data_model()\n",
|
|
"\n",
|
|
" # Create the model.\n",
|
|
" model = cp_model.CpModel()\n",
|
|
"\n",
|
|
" # Get the max capacity from the capacity dataframe.\n",
|
|
" max_load = max_load_df.max_load.max()\n",
|
|
" print(f\"Max capacity = {max_load}\")\n",
|
|
" print(f\"#tasks = {len(tasks_df)}\")\n",
|
|
"\n",
|
|
" minutes_per_hour: int = 60\n",
|
|
" horizon: int = 24 * 60\n",
|
|
"\n",
|
|
" # Variables\n",
|
|
" starts = model.new_int_var_series(\n",
|
|
" name=\"starts\",\n",
|
|
" lower_bounds=0,\n",
|
|
" upper_bounds=horizon - tasks_df.duration,\n",
|
|
" index=tasks_df.index,\n",
|
|
" )\n",
|
|
" performed = model.new_bool_var_series(name=\"performed\", index=tasks_df.index)\n",
|
|
"\n",
|
|
" intervals = model.new_optional_fixed_size_interval_var_series(\n",
|
|
" name=\"intervals\",\n",
|
|
" index=tasks_df.index,\n",
|
|
" starts=starts,\n",
|
|
" sizes=tasks_df.duration,\n",
|
|
" are_present=performed,\n",
|
|
" )\n",
|
|
"\n",
|
|
" # Set up the max profile. We use fixed (intervals, demands) to fill in the\n",
|
|
" # space between the actual max load profile and the max capacity.\n",
|
|
" time_period_max_intervals = model.new_fixed_size_interval_var_series(\n",
|
|
" name=\"time_period_max_intervals\",\n",
|
|
" index=max_load_df.index,\n",
|
|
" starts=max_load_df.start_hour * minutes_per_hour,\n",
|
|
" sizes=minutes_per_hour * 2,\n",
|
|
" )\n",
|
|
" time_period_max_heights = max_load - max_load_df.max_load\n",
|
|
"\n",
|
|
" # Cumulative constraint for the max profile.\n",
|
|
" model.add_cumulative(\n",
|
|
" intervals.to_list() + time_period_max_intervals.to_list(),\n",
|
|
" tasks_df.load.to_list() + time_period_max_heights.to_list(),\n",
|
|
" max_load,\n",
|
|
" )\n",
|
|
"\n",
|
|
" # Set up complemented intervals (from 0 to start, and from start + size to\n",
|
|
" # horizon).\n",
|
|
" prefix_intervals = model.new_optional_interval_var_series(\n",
|
|
" name=\"prefix_intervals\",\n",
|
|
" index=tasks_df.index,\n",
|
|
" starts=0,\n",
|
|
" sizes=starts,\n",
|
|
" ends=starts,\n",
|
|
" are_present=performed,\n",
|
|
" )\n",
|
|
"\n",
|
|
" suffix_intervals = model.new_optional_interval_var_series(\n",
|
|
" name=\"suffix_intervals\",\n",
|
|
" index=tasks_df.index,\n",
|
|
" starts=starts + tasks_df.duration,\n",
|
|
" sizes=horizon - starts - tasks_df.duration,\n",
|
|
" ends=horizon,\n",
|
|
" are_present=performed,\n",
|
|
" )\n",
|
|
"\n",
|
|
" # Set up the min profile. We use complemented intervals to maintain the\n",
|
|
" # complement of the work load, and fixed intervals to enforce the min\n",
|
|
" # number of active workers per time period.\n",
|
|
" #\n",
|
|
" # Note that this works only if the max load cumulative is also added to the\n",
|
|
" # model.\n",
|
|
" time_period_min_intervals = model.new_fixed_size_interval_var_series(\n",
|
|
" name=\"time_period_min_intervals\",\n",
|
|
" index=min_load_df.index,\n",
|
|
" starts=min_load_df.start_hour * minutes_per_hour,\n",
|
|
" sizes=minutes_per_hour * 2,\n",
|
|
" )\n",
|
|
" time_period_min_heights = min_load_df.min_load\n",
|
|
"\n",
|
|
" # We take into account optional intervals. The actual capacity of the min load\n",
|
|
" # cumulative is the sum of all the active demands.\n",
|
|
" sum_of_demands = sum(tasks_df.load)\n",
|
|
" complement_capacity = model.new_int_var(0, sum_of_demands, \"complement_capacity\")\n",
|
|
" model.add(complement_capacity == performed.dot(tasks_df.load))\n",
|
|
"\n",
|
|
" # Cumulative constraint for the min profile.\n",
|
|
" model.add_cumulative(\n",
|
|
" prefix_intervals.to_list()\n",
|
|
" + suffix_intervals.to_list()\n",
|
|
" + time_period_min_intervals.to_list(),\n",
|
|
" tasks_df.load.to_list()\n",
|
|
" + tasks_df.load.to_list()\n",
|
|
" + time_period_min_heights.to_list(),\n",
|
|
" complement_capacity,\n",
|
|
" )\n",
|
|
"\n",
|
|
" # Objective: maximize the value of performed intervals.\n",
|
|
" # 1 is the max priority.\n",
|
|
" max_priority = max(tasks_df.priority)\n",
|
|
" model.maximize(sum(performed * (max_priority + 1 - tasks_df.priority)))\n",
|
|
"\n",
|
|
" # Create the solver and solve the model.\n",
|
|
" solver = cp_model.CpSolver()\n",
|
|
" # solver.parameters.log_search_progress = True # Uncomment to see the logs.\n",
|
|
" solver.parameters.num_workers = 16\n",
|
|
" solver.parameters.max_time_in_seconds = 30.0\n",
|
|
" status = solver.solve(model)\n",
|
|
"\n",
|
|
" if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:\n",
|
|
" start_values = solver.values(starts)\n",
|
|
" performed_values = solver.boolean_values(performed)\n",
|
|
" tasks: list[tuple[int, int, int]] = []\n",
|
|
" for task in tasks_df.index:\n",
|
|
" if performed_values[task]:\n",
|
|
" print(\n",
|
|
" f'task {task} duration={tasks_df[\"duration\"][task]} '\n",
|
|
" f'load={tasks_df[\"load\"][task]} starts at {start_values[task]}'\n",
|
|
" )\n",
|
|
" tasks.append(\n",
|
|
" (start_values[task], tasks_df.duration[task], tasks_df.load[task])\n",
|
|
" )\n",
|
|
" else:\n",
|
|
" print(f\"task {task} is not performed\")\n",
|
|
" assert check_solution(\n",
|
|
" tasks=tasks,\n",
|
|
" min_load_df=min_load_df,\n",
|
|
" max_load_df=max_load_df,\n",
|
|
" period_length=2 * minutes_per_hour,\n",
|
|
" horizon=horizon,\n",
|
|
" )\n",
|
|
" elif status == cp_model.INFEASIBLE:\n",
|
|
" print(\"No solution found\")\n",
|
|
" else:\n",
|
|
" print(\"Something is wrong, check the status and the log of the solve\")\n",
|
|
"\n",
|
|
"\n",
|
|
"main()\n",
|
|
"\n"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"language_info": {
|
|
"name": "python"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|