This commit is contained in:
lperron@google.com
2015-01-16 17:02:32 +00:00
parent 7c8441ee74
commit 1aaf2814d7
39 changed files with 7760 additions and 20 deletions

View File

@@ -7,11 +7,15 @@ DYNAMIC_BASE_LIBS = \
DYNAMIC_LP_LIBS = \
$(LIB_DIR)/$(LIBPREFIX)linear_solver.$(DYNAMIC_LIB_SUFFIX) \
$(LIB_DIR)/$(LIBPREFIX)bop.$(DYNAMIC_LIB_SUFFIX) \
$(LIB_DIR)/$(LIBPREFIX)glop.$(DYNAMIC_LIB_SUFFIX)
DYNAMIC_ALGORITHMS_LIBS = \
$(LIB_DIR)/$(LIBPREFIX)algorithms.$(DYNAMIC_LIB_SUFFIX)
DYNAMIC_SPLIT_LIBS = \
$(LIB_DIR)/$(LIBPREFIX)split.$(DYNAMIC_LIB_SUFFIX)
DYNAMIC_CP_LIBS = \
$(LIB_DIR)/$(LIBPREFIX)constraint_solver.$(DYNAMIC_LIB_SUFFIX)
@@ -48,6 +52,7 @@ DYNAMIC_GRAPH_DEPS = $(DYNAMIC_GRAPH_LIBS) \
DYNAMIC_LP_DEPS = $(DYNAMIC_LP_LIBS) \
$(GEN_DIR)/linear_solver/linear_solver2.pb.h \
$(DYNAMIC_SPLIT_LIBS) \
$(DYNAMIC_BASE_DEPS)
DYNAMIC_ALGORITHMS_DEPS = $(DYNAMIC_ALGORITHMS_LIBS)\
@@ -55,6 +60,7 @@ DYNAMIC_ALGORITHMS_DEPS = $(DYNAMIC_ALGORITHMS_LIBS)\
$(DYNAMIC_LP_DEPS)
DYNAMIC_SAT_DEPS = $(DYNAMIC_SAT_LIBS) \
$(DYNAMIC_SPLIT_LIBS) \
$(DYNAMIC_ALGORITHMS_DEPS)
DYNAMIC_CP_DEPS = $(DYNAMIC_CP_LIBS) \
@@ -99,7 +105,9 @@ DYNAMIC_GRAPH_LNK = \
DYNAMIC_LP_LNK = \
$(DYNAMIC_PRE_LIB)linear_solver$(DYNAMIC_POST_LIB) \
$(DYNAMIC_PRE_LIB)bop$(DYNAMIC_POST_LIB) \
$(DYNAMIC_PRE_LIB)glop$(DYNAMIC_POST_LIB) \
$(DYNAMIC_PRE_LIB)split$(DYNAMIC_POST_LIB) \
$(DYNAMIC_BASE_LNK) \
$(DYNAMIC_LD_LP_DEPS) # Third party linear solvers.
@@ -109,6 +117,7 @@ DYNAMIC_ALGORITHMS_LNK = \
DYNAMIC_SAT_LNK = \
$(DYNAMIC_PRE_LIB)sat$(DYNAMIC_POST_LIB) \
$(DYNAMIC_PRE_LIB)split$(DYNAMIC_POST_LIB) \
$(DYNAMIC_ALGORITHMS_LNK)
DYNAMIC_CP_LNK = \
@@ -156,11 +165,15 @@ STATIC_BASE_LIBS = \
STATIC_LP_LIBS = \
$(LIB_DIR)/$(LIBPREFIX)linear_solver.$(STATIC_LIB_SUFFIX) \
$(LIB_DIR)/$(LIBPREFIX)bop.$(STATIC_LIB_SUFFIX) \
$(LIB_DIR)/$(LIBPREFIX)glop.$(STATIC_LIB_SUFFIX)
STATIC_ALGORITHMS_LIBS = \
$(LIB_DIR)/$(LIBPREFIX)algorithms.$(STATIC_LIB_SUFFIX)
STATIC_SPLIT_LIBS = \
$(LIB_DIR)/$(LIBPREFIX)split.$(STATIC_LIB_SUFFIX)
STATIC_CP_LIBS = \
$(LIB_DIR)/$(LIBPREFIX)constraint_solver.$(STATIC_LIB_SUFFIX)
@@ -187,6 +200,7 @@ STATIC_GRAPH_DEPS = $(STATIC_GRAPH_LIBS) \
$(STATIC_BASE_DEPS)
STATIC_LP_DEPS = $(STATIC_LP_LIBS) \
$(STATIC_SPLIT_LIBS) \
$(STATIC_BASE_DEPS)
STATIC_ALGORITHMS_DEPS = $(STATIC_ALGORITHMS_LIBS) \
@@ -219,6 +233,7 @@ STATIC_ALL_DEPS = \
$(STATIC_GRAPH_LIBS) \
$(STATIC_LP_LIBS) \
$(STATIC_ALGORITHMS_LIBS) \
$(STATIC_SPLIT_LIBS) \
$(STATIC_SAT_LIBS) \
$(STATIC_CP_LIBS) \
$(STATIC_ROUTING_LIBS)
@@ -235,6 +250,7 @@ STATIC_GRAPH_LNK = \
STATIC_LP_LNK = \
$(STATIC_PRE_LIB)linear_solver$(STATIC_POST_LIB) \
$(STATIC_PRE_LIB)bop$(STATIC_POST_LIB) \
$(STATIC_PRE_LIB)glop$(STATIC_POST_LIB) \
$(STATIC_BASE_LNK) \
$(STATIC_LD_LP_DEPS) # Third party linear solvers.
@@ -245,7 +261,8 @@ STATIC_ALGORITHMS_LNK = \
STATIC_SAT_LNK = \
$(STATIC_PRE_LIB)sat$(STATIC_POST_LIB) \
$(STATIC_ALGORITHMS_LNK)
$(STATIC_PRE_LIB)split$(STATIC_POST_LIB) \
$(STATIC_ALGORITHMS_LNK) \
STATIC_CP_LNK = \
$(STATIC_PRE_LIB)constraint_solver$(STATIC_POST_LIB) \
@@ -319,6 +336,7 @@ clean_cc:
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)util.$(DYNAMIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)constraint_solver.$(DYNAMIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)linear_solver.$(DYNAMIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)bop.$(DYNAMIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)glop.$(DYNAMIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)graph.$(DYNAMIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)routing.$(DYNAMIC_LIB_SUFFIX)
@@ -334,6 +352,7 @@ clean_cc:
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)util.$(STATIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)constraint_solver.$(STATIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)linear_solver.$(STATIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)bop.$(STATIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)glop.$(STATIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)graph.$(STATIC_LIB_SUFFIX)
-$(DEL) $(LIB_DIR)$S$(LIBPREFIX)routing.$(STATIC_LIB_SUFFIX)
@@ -348,6 +367,7 @@ clean_cc:
-$(DEL) $(OBJ_DIR)$Sbase$S*.$O
-$(DEL) $(OBJ_DIR)$Sold_flatzinc$S*.$O
-$(DEL) $(OBJ_DIR)$Sflatzinc$S*.$O
-$(DEL) $(OBJ_DIR)$Sbop$S*.$O
-$(DEL) $(OBJ_DIR)$Sglop$S*.$O
-$(DEL) $(OBJ_DIR)$Slp_data$S*.$O
-$(DEL) $(OBJ_DIR)$Sgraph$S*.$O
@@ -364,6 +384,7 @@ clean_cc:
-$(DEL) $(LPBINARIES)
-$(DEL) $(GEN_DIR)$Sconstraint_solver$S*.pb.*
-$(DEL) $(GEN_DIR)$Slinear_solver$S*.pb.*
-$(DEL) $(GEN_DIR)$Sbop$S*.pb.*
-$(DEL) $(GEN_DIR)$Sglop$S*.pb.*
-$(DEL) $(GEN_DIR)$Sold_flatzinc$Sflatzinc*
-$(DEL) $(GEN_DIR)$Sold_flatzinc$Sparser*
@@ -386,7 +407,7 @@ clean_compat:
# Individual targets.
algorithmslibs: $(DYNAMIC_ALGORITHMS_DEPS) $(STATIC_ALGORITHMS_DEPS)
algorithmslibs: $(DYNAMIC_ALGORITHMS_DEPS) $(STATIC_ALGORITHMS_DEPS) $(DYNAMIC_SPLIT_DEPS) $(STATIC_SPLIT_DEPS)
cpexe: $(CPBINARIES)
@@ -612,15 +633,17 @@ endif
# Linear Solver Library
LINEAR_SOLVER_LIB_OBJS = \
$(OBJ_DIR)/linear_solver/bop_interface.$O \
$(OBJ_DIR)/linear_solver/glop_interface.$O \
$(OBJ_DIR)/linear_solver/cbc_interface.$O \
$(OBJ_DIR)/linear_solver/cplex_interface.$O \
$(OBJ_DIR)/linear_solver/cplex_interface.$O \
$(OBJ_DIR)/linear_solver/clp_interface.$O \
$(OBJ_DIR)/linear_solver/glpk_interface.$O \
$(OBJ_DIR)/linear_solver/gurobi_interface.$O \
$(OBJ_DIR)/linear_solver/linear_solver.$O \
$(OBJ_DIR)/linear_solver/linear_solver2.pb.$O \
$(OBJ_DIR)/linear_solver/model_exporter.$O \
# $(OBJ_DIR)/linear_solver/proto_tools.$O \
$(OBJ_DIR)/linear_solver/scip_interface.$O \
$(OBJ_DIR)/linear_solver/sulum_interface.$O
@@ -631,6 +654,9 @@ $(OBJ_DIR)/linear_solver/cbc_interface.$O:$(SRC_DIR)/linear_solver/cbc_interface
$(OBJ_DIR)/linear_solver/clp_interface.$O:$(SRC_DIR)/linear_solver/clp_interface.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)/linear_solver/clp_interface.cc $(OBJ_OUT)$(OBJ_DIR)$Slinear_solver$Sclp_interface.$O
$(OBJ_DIR)/linear_solver/bop_interface.$O:$(SRC_DIR)/linear_solver/bop_interface.cc $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Slinear_solver$Sbop_interface.cc $(OBJ_OUT)$(OBJ_DIR)$Slinear_solver$Sbop_interface.$O
$(OBJ_DIR)/linear_solver/cplex_interface.$O:$(SRC_DIR)/linear_solver/cplex_interface.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)/linear_solver/cplex_interface.cc $(OBJ_OUT)$(OBJ_DIR)$Slinear_solver$Scplex_interface.$O
@@ -643,7 +669,7 @@ $(OBJ_DIR)/linear_solver/glpk_interface.$O:$(SRC_DIR)/linear_solver/glpk_interfa
$(OBJ_DIR)/linear_solver/gurobi_interface.$O:$(SRC_DIR)/linear_solver/gurobi_interface.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Slinear_solver$Sgurobi_interface.cc $(OBJ_OUT)$(OBJ_DIR)$Slinear_solver$Sgurobi_interface.$O
$(OBJ_DIR)/linear_solver/linear_solver.$O:$(SRC_DIR)/linear_solver/linear_solver.cc $(GEN_DIR)/linear_solver/linear_solver2.pb.h
$(OBJ_DIR)/linear_solver/linear_solver.$O:$(SRC_DIR)/linear_solver/linear_solver.cc $(GEN_DIR)/linear_solver/linear_solver2.pb.h $(GEN_DIR)/glop/parameters.pb.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Slinear_solver$Slinear_solver.cc $(OBJ_OUT)$(OBJ_DIR)$Slinear_solver$Slinear_solver.$O
$(OBJ_DIR)/linear_solver/linear_solver2.pb.$O:$(GEN_DIR)/linear_solver/linear_solver2.pb.cc
@@ -679,6 +705,7 @@ endif
UTIL_LIB_OBJS=\
$(OBJ_DIR)/util/bitset.$O \
$(OBJ_DIR)/util/cached_log.$O \
$(OBJ_DIR)/util/fp_utils.$O \
$(OBJ_DIR)/util/graph_export.$O \
$(OBJ_DIR)/util/piecewise_linear_function.$O \
$(OBJ_DIR)/util/rational_approximation.$O \
@@ -693,6 +720,9 @@ $(OBJ_DIR)/util/bitset.$O:$(SRC_DIR)/util/bitset.cc
$(OBJ_DIR)/util/cached_log.$O:$(SRC_DIR)/util/cached_log.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)/util/cached_log.cc $(OBJ_OUT)$(OBJ_DIR)$Sutil$Scached_log.$O
$(OBJ_DIR)/util/fp_utils.$O:$(SRC_DIR)/util/fp_utils.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)/util/fp_utils.cc $(OBJ_OUT)$(OBJ_DIR)$Sutil$Sfp_utils.$O
$(OBJ_DIR)/util/graph_export.$O:$(SRC_DIR)/util/graph_export.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)/util/graph_export.cc $(OBJ_OUT)$(OBJ_DIR)$Sutil$Sgraph_export.$O
@@ -801,14 +831,16 @@ endif
# Algorithms library.
ALGORITHMS_LIB_OBJS=\
$(OBJ_DIR)/algorithms/hungarian.$O \
$(OBJ_DIR)/algorithms/knapsack_solver.$O \
SPLIT_LIB_OBJS=\
$(OBJ_DIR)/algorithms/dynamic_partition.$O \
$(OBJ_DIR)/algorithms/dynamic_permutation.$O \
$(OBJ_DIR)/algorithms/sparse_permutation.$O \
$(OBJ_DIR)/algorithms/find_graph_symmetries.$O
ALGORITHMS_LIB_OBJS=\
$(OBJ_DIR)/algorithms/hungarian.$O \
$(OBJ_DIR)/algorithms/knapsack_solver.$O
$(OBJ_DIR)/algorithms/hungarian.$O:$(SRC_DIR)/algorithms/hungarian.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)/algorithms/hungarian.cc $(OBJ_OUT)$(OBJ_DIR)$Salgorithms$Shungarian.$O
@@ -835,6 +867,14 @@ $(LIB_DIR)/$(LIBPREFIX)algorithms.$(STATIC_LIB_SUFFIX): $(ALGORITHMS_LIB_OBJS)
$(STATIC_LINK_CMD) $(STATIC_LINK_PREFIX)$(LIB_DIR)$S$(LIBPREFIX)algorithms.$(STATIC_LIB_SUFFIX) $(ALGORITHMS_LIB_OBJS)
endif
$(LIB_DIR)/$(LIBPREFIX)split.$(DYNAMIC_LIB_SUFFIX): $(SPLIT_LIB_OBJS)
$(DYNAMIC_LINK_CMD) $(DYNAMIC_LINK_PREFIX)$(LIB_DIR)$S$(LIBPREFIX)split.$(DYNAMIC_LIB_SUFFIX) $(SPLIT_LIB_OBJS)
ifneq ($(SYSTEM),win)
$(LIB_DIR)/$(LIBPREFIX)split.$(STATIC_LIB_SUFFIX): $(SPLIT_LIB_OBJS)
$(STATIC_LINK_CMD) $(STATIC_LINK_PREFIX)$(LIB_DIR)$S$(LIBPREFIX)split.$(STATIC_LIB_SUFFIX) $(SPLIT_LIB_OBJS)
endif
# Base library.
BASE_LIB_OBJS=\
@@ -903,6 +943,7 @@ endif
LP_DATA_OBJS= \
$(OBJ_DIR)/lp_data/lp_data.$O \
$(OBJ_DIR)/lp_data/lp_decomposer.$O \
$(OBJ_DIR)/lp_data/lp_print_utils.$O \
$(OBJ_DIR)/lp_data/lp_types.$O \
$(OBJ_DIR)/lp_data/lp_utils.$O \
@@ -915,6 +956,9 @@ LP_DATA_OBJS= \
$(OBJ_DIR)/lp_data/lp_data.$O:$(SRC_DIR)/lp_data/lp_data.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Slp_data$Slp_data.cc $(OBJ_OUT)$(OBJ_DIR)$Slp_data$Slp_data.$O
$(OBJ_DIR)/lp_data/lp_decomposer.$O:$(SRC_DIR)/lp_data/lp_decomposer.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Slp_data$Slp_decomposer.cc $(OBJ_OUT)$(OBJ_DIR)$Slp_data$Slp_decomposer.$O
$(OBJ_DIR)/lp_data/lp_print_utils.$O:$(SRC_DIR)/lp_data/lp_print_utils.cc
$(CCC) $(CFLAGS) -c $(SRC_DIR)$Slp_data$Slp_print_utils.cc $(OBJ_OUT)$(OBJ_DIR)$Slp_data$Slp_print_utils.$O
@@ -1471,6 +1515,7 @@ SAT_LIB_OBJS = \
$(OBJ_DIR)/sat/boolean_problem.pb.$O \
$(OBJ_DIR)/sat/clause.$O\
$(OBJ_DIR)/sat/encoding.$O\
$(OBJ_DIR)/sat/lp_utils.$O\
$(OBJ_DIR)/sat/optimization.$O\
$(OBJ_DIR)/sat/pb_constraint.$O\
$(OBJ_DIR)/sat/sat_parameters.pb.$O\
@@ -1484,6 +1529,9 @@ satlibs: $(DYNAMIC_SAT_DEPS) $(STATIC_SAT_DEPS)
$(OBJ_DIR)/sat/sat_solver.$O: $(SRC_DIR)/sat/sat_solver.cc $(SRC_DIR)/sat/sat_solver.h $(SRC_DIR)/sat/sat_base.h $(SRC_DIR)/sat/clause.h $(SRC_DIR)/sat/encoding.h $(SRC_DIR)/sat/unsat_proof.h $(GEN_DIR)/sat/sat_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/sat/sat_solver.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Ssat_solver.$O
$(OBJ_DIR)/sat/lp_utils.$O: $(SRC_DIR)/sat/lp_utils.cc $(SRC_DIR)/sat/lp_utils.h $(SRC_DIR)/sat/sat_solver.h $(GEN_DIR)/sat/sat_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/sat/lp_utils.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Slp_utils.$O
$(OBJ_DIR)/sat/simplification.$O: $(SRC_DIR)/sat/simplification.cc $(SRC_DIR)/sat/simplification.h $(SRC_DIR)/sat/sat_base.h $(GEN_DIR)/sat/sat_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/sat/simplification.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Ssimplification.$O
@@ -1532,22 +1580,86 @@ $(LIB_DIR)/$(LIBPREFIX)sat.$(STATIC_LIB_SUFFIX): $(SAT_LIB_OBJS)
$(STATIC_LINK_CMD) $(STATIC_LINK_PREFIX)$(LIB_DIR)$S$(LIBPREFIX)sat.$(STATIC_LIB_SUFFIX) $(SAT_LIB_OBJS)
endif
$(OBJ_DIR)/sat/sat_runner.$O:$(EX_DIR)/cpp/sat_runner.cc $(SRC_DIR)/sat/sat_solver.h $(EX_DIR)/cpp/opb_reader.h $(EX_DIR)/cpp/sat_cnf_reader.h $(GEN_DIR)/sat/sat_parameters.pb.h $(GEN_DIR)/sat/boolean_problem.pb.h $(SRC_DIR)/sat/boolean_problem.h $(SRC_DIR)/sat/sat_base.h
$(OBJ_DIR)/sat/sat_runner.$O:$(EX_DIR)/cpp/sat_runner.cc $(SRC_DIR)/sat/sat_solver.h $(EX_DIR)/cpp/opb_reader.h $(EX_DIR)/cpp/sat_cnf_reader.h $(GEN_DIR)/sat/sat_parameters.pb.h $(GEN_DIR)/sat/boolean_problem.pb.h $(SRC_DIR)/sat/boolean_problem.h $(SRC_DIR)/sat/sat_base.h $(SRC_DIR)/sat/simplification.h
$(CCC) $(CFLAGS) -c $(EX_DIR)$Scpp$Ssat_runner.cc $(OBJ_OUT)$(OBJ_DIR)$Ssat$Ssat_runner.$O
$(BIN_DIR)/sat_runner$E: $(STATIC_SAT_DEPS) $(OBJ_DIR)/sat/sat_runner.$O
$(CCC) $(CFLAGS) $(FZ_STATIC) $(OBJ_DIR)$Ssat$Ssat_runner.$O $(STATIC_SAT_LNK) $(STATIC_LD_FLAGS) $(EXE_OUT)$(BIN_DIR)$Ssat_runner$E
# Bop solver
BOP_LIB_OBJS = \
$(OBJ_DIR)/bop/bop_base.$O\
$(OBJ_DIR)/bop/bop_fs.$O\
$(OBJ_DIR)/bop/bop_lns.$O\
$(OBJ_DIR)/bop/bop_ls.$O\
$(OBJ_DIR)/bop/bop_parameters.pb.$O\
$(OBJ_DIR)/bop/bop_portfolio.$O\
$(OBJ_DIR)/bop/bop_solution.$O\
$(OBJ_DIR)/bop/bop_solver.$O\
$(OBJ_DIR)/bop/bop_util.$O\
$(OBJ_DIR)/bop/complete_optimizer.$O\
$(OBJ_DIR)/bop/integral_solver.$O
boplibs: $(DYNAMIC_BOP_DEPS) $(STATIC_BOP_DEPS)
$(OBJ_DIR)/bop/bop_base.$O: $(SRC_DIR)/bop/bop_base.cc $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_solution.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/bop_base.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sbop_base.$O
$(OBJ_DIR)/bop/bop_fs.$O: $(SRC_DIR)/bop/bop_fs.cc $(SRC_DIR)/bop/bop_fs.h $(SRC_DIR)/bop/bop_util.h $(SRC_DIR)/bop/bop_types.h $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_solution.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/bop_fs.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sbop_fs.$O
$(OBJ_DIR)/bop/bop_lns.$O: $(SRC_DIR)/bop/bop_lns.cc $(SRC_DIR)/bop/bop_lns.h $(SRC_DIR)/bop/bop_util.h $(SRC_DIR)/bop/bop_types.h $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_solution.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/bop_lns.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sbop_lns.$O
$(OBJ_DIR)/bop/bop_ls.$O: $(SRC_DIR)/bop/bop_ls.cc $(SRC_DIR)/bop/bop_ls.h $(SRC_DIR)/bop/bop_util.h $(SRC_DIR)/bop/bop_types.h $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_solution.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/bop_ls.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sbop_ls.$O
$(GEN_DIR)/bop/bop_parameters.pb.cc: $(SRC_DIR)/bop/bop_parameters.proto
$(PROTOBUF_DIR)/bin/protoc --proto_path=$(INC_DIR) --cpp_out=$(GEN_DIR) $(SRC_DIR)/bop/bop_parameters.proto
$(GEN_DIR)/bop/bop_parameters.pb.h: $(GEN_DIR)/bop/bop_parameters.pb.cc
$(OBJ_DIR)/bop/bop_parameters.pb.$O: $(GEN_DIR)/bop/bop_parameters.pb.cc $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(GEN_DIR)/bop/bop_parameters.pb.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sbop_parameters.pb.$O
$(OBJ_DIR)/bop/bop_portfolio.$O: $(SRC_DIR)/bop/bop_portfolio.cc $(SRC_DIR)/bop/bop_portfolio.h $(SRC_DIR)/bop/bop_util.h $(SRC_DIR)/bop/bop_types.h $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_fs.h $(SRC_DIR)/bop/bop_ls.h $(SRC_DIR)/bop/bop_lns.h $(SRC_DIR)/bop/bop_solution.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/bop_portfolio.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sbop_portfolio.$O
$(OBJ_DIR)/bop/bop_solver.$O: $(SRC_DIR)/bop/bop_solver.cc $(SRC_DIR)/bop/bop_solver.h $(SRC_DIR)/bop/bop_util.h $(SRC_DIR)/bop/bop_types.h $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_fs.h $(SRC_DIR)/bop/bop_ls.h $(SRC_DIR)/bop/bop_lns.h $(SRC_DIR)/bop/bop_portfolio.h $(SRC_DIR)/bop/bop_solution.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/bop_solver.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sbop_solver.$O
$(OBJ_DIR)/bop/bop_solution.$O: $(SRC_DIR)/bop/bop_solution.cc $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_solution.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/bop_solution.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sbop_solution.$O
$(OBJ_DIR)/bop/bop_util.$O: $(SRC_DIR)/bop/bop_util.cc $(SRC_DIR)/bop/bop_util.h $(SRC_DIR)/bop/bop_types.h $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_solution.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/bop_util.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sbop_util.$O
$(OBJ_DIR)/bop/complete_optimizer.$O: $(SRC_DIR)/bop/complete_optimizer.cc $(SRC_DIR)/bop/complete_optimizer.h $(SRC_DIR)/bop/bop_util.h $(SRC_DIR)/bop/bop_types.h $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_solution.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/complete_optimizer.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Scomplete_optimizer.$O
$(OBJ_DIR)/bop/integral_solver.$O: $(SRC_DIR)/bop/integral_solver.cc $(SRC_DIR)/bop/integral_solver.h $(SRC_DIR)/bop/bop_solver.h $(SRC_DIR)/bop/bop_util.h $(SRC_DIR)/bop/bop_types.h $(SRC_DIR)/bop/bop_base.h $(SRC_DIR)/bop/bop_fs.h $(SRC_DIR)/bop/bop_ls.h $(SRC_DIR)/bop/bop_lns.h $(SRC_DIR)/bop/bop_portfolio.h $(SRC_DIR)/bop/bop_solution.h $(GEN_DIR)/bop/bop_parameters.pb.h
$(CCC) $(CFLAGS) -c $(SRC_DIR)/bop/integral_solver.cc $(OBJ_OUT)$(OBJ_DIR)$Sbop$Sintegral_solver.$O
$(LIB_DIR)/$(LIBPREFIX)bop.$(DYNAMIC_LIB_SUFFIX): $(BOP_LIB_OBJS)
$(DYNAMIC_LINK_CMD) $(DYNAMIC_LINK_PREFIX)$(LIB_DIR)$S$(LIBPREFIX)bop.$(DYNAMIC_LIB_SUFFIX) $(BOP_LIB_OBJS)
ifneq ($(SYSTEM),win)
$(LIB_DIR)/$(LIBPREFIX)bop.$(STATIC_LIB_SUFFIX): $(BOP_LIB_OBJS)
$(STATIC_LINK_CMD) $(STATIC_LINK_PREFIX)$(LIB_DIR)$S$(LIBPREFIX)bop.$(STATIC_LIB_SUFFIX) $(BOP_LIB_OBJS)
endif
# OR Tools unique library.
$(LIB_DIR)/$(LIBPREFIX)ortools.$(DYNAMIC_LIB_SUFFIX): $(CONSTRAINT_SOLVER_LIB_OBJS) $(LINEAR_SOLVER_LIB_OBJS) $(UTIL_LIB_OBJS) $(GRAPH_LIB_OBJS) $(SHORTESTPATHS_LIB_OBJS) $(ROUTING_LIB_OBJS) $(GLOP_LIB_OBJS) $(ALGORITHMS_LIB_OBJS) $(SAT_LIB_OBJS) $(BASE_LIB_OBJS)
$(LIB_DIR)/$(LIBPREFIX)ortools.$(DYNAMIC_LIB_SUFFIX): $(CONSTRAINT_SOLVER_LIB_OBJS) $(LINEAR_SOLVER_LIB_OBJS) $(UTIL_LIB_OBJS) $(GRAPH_LIB_OBJS) $(SHORTESTPATHS_LIB_OBJS) $(ROUTING_LIB_OBJS) $(BOP_LIB_OBJS) $(GLOP_LIB_OBJS) $(ALGORITHMS_LIB_OBJS) $(SPLIT_LIB_OBJS) $(SAT_LIB_OBJS) $(BASE_LIB_OBJS)
$(DYNAMIC_LINK_CMD) \
$(LDOUT)$(LIB_DIR)$S$(LIBPREFIX)ortools.$(DYNAMIC_LIB_SUFFIX) \
$(ALGORITHMS_LIB_OBJS) \
$(SPLIT_LIB_OBJS) \
$(BASE_LIB_OBJS) \
$(CONSTRAINT_SOLVER_LIB_OBJS) \
$(GRAPH_LIB_OBJS) \
$(LINEAR_SOLVER_LIB_OBJS) \
$(BOP_LIB_OBJS) \
$(GLOP_LIB_OBJS) \
$(ROUTING_LIB_OBJS) \
$(SAT_LIB_OBJS) \
@@ -1588,6 +1700,8 @@ cc_archive: $(LIB_DIR)/$(LIBPREFIX)ortools.$(DYNAMIC_LIB_SUFFIX)
copy src\\constraint_solver\\*.h temp\\or-tools.$(PORT)\\include\\constraint_solver
copy src\\gen\\constraint_solver\\*.pb.h temp\\or-tools.$(PORT)\\include\\constraint_solver
copy src\\graph\\*.h temp\\or-tools.$(PORT)\\include\\graph
copy src\\bop\\*.h temp\\or-tools.$(PORT)\\include\\bop
copy src\\gen\\bop\\*.pb.h temp\\or-tools.$(PORT)\\include\\bop
copy src\\glop\\*.h temp\\or-tools.$(PORT)\\include\\glop
copy src\\gen\\glop\\*.h temp\\or-tools.$(PORT)\\include\\glop
copy src\\linear_solver\\*.h temp\\or-tools.$(PORT)\\include\\linear_solver
@@ -1612,6 +1726,7 @@ cc_archive: $(LIB_DIR)/$(LIBPREFIX)ortools.$(DYNAMIC_LIB_SUFFIX)
mkdir temp/or-tools.$(PORT)/include/base
mkdir temp/or-tools.$(PORT)/include/constraint_solver
mkdir temp/or-tools.$(PORT)/include/gflags
mkdir temp/or-tools.$(PORT)/include/bop
mkdir temp/or-tools.$(PORT)/include/glop
mkdir temp/or-tools.$(PORT)/include/google
mkdir temp/or-tools.$(PORT)/include/graph
@@ -1633,6 +1748,8 @@ endif
cp src/base/*.h temp/or-tools.$(PORT)/include/base
cp src/constraint_solver/*.h temp/or-tools.$(PORT)/include/constraint_solver
cp src/gen/constraint_solver/*.pb.h temp/or-tools.$(PORT)/include/constraint_solver
cp src/bop/*.h temp/or-tools.$(PORT)/include/bop
cp src/gen/bop/*.pb.h temp/or-tools.$(PORT)/include/bop
cp src/glop/*.h temp/or-tools.$(PORT)/include/glop
cp src/gen/glop/*.pb.h temp/or-tools.$(PORT)/include/glop
cp src/graph/*.h temp/or-tools.$(PORT)/include/graph

View File

@@ -178,6 +178,27 @@ template <>
struct hash<std::string> {
size_t operator()(const std::string& x) const { return hash<const std::string>()(x); }
};
template <class T, std::size_t N>
struct hash<std::array<T, N>> {
public:
size_t operator()(const std::array<T, N>& t) const {
uint64 current = 71;
for (int index = 0; index < N; ++index) {
const T& elem = t[index];
const uint64 new_hash = hash<T>()(elem);
current = operations_research::Hash64NumWithSeed(current, new_hash);
}
return current;
}
// Less than operator for MSVC.
bool operator()(const std::array<T, N>& a,
const std::array<T, N>& b) const {
return a < b;
}
static const size_t bucket_size = 4; // These are required by MSVC
static const size_t min_buckets = 8; // 4 and 8 are defaults.
};
#endif // STLPORT
} // namespace HASH_NAMESPACE
@@ -329,6 +350,27 @@ class PairPointerIntTypeHasher : public stdext::hash_compare<std::pair<T*, U> >
};
#endif
template <class T, std::size_t N>
struct StdArrayHasher : public stdext::hash_compare<std::array<T, N>> {
public:
size_t operator()(const std::array<T, N>& t) const {
uint64 current = 71;
for (int index = 0; index < N; ++index) {
const T& elem = t[index];
const uint64 new_hash = hash<T>()(elem);
current = operations_research::Hash64NumWithSeed(current, new_hash);
}
return current;
}
// Less than operator for MSVC.
bool operator()(const std::array<T, N>& a,
const std::array<T, N>& b) const {
return a < b;
}
static const size_t bucket_size = 4; // These are required by MSVC
static const size_t min_buckets = 8; // 4 and 8 are defaults.
};
using std::hash;
using stdext::hash_map;
using stdext::hash_set;

View File

@@ -64,5 +64,9 @@ class CondVar {
// Checking macros.
#define EXCLUSIVE_LOCK_FUNCTION(x)
#define UNLOCK_FUNCTION(x)
#define LOCKS_EXCLUDED(x)
#define EXCLUSIVE_LOCKS_REQUIRED(x)
#define NO_THREAD_SAFETY_ANALYSIS
#define GUARDED_BY(x)
} // namespace operations_research
#endif // OR_TOOLS_BASE_MUTEX_H_

View File

@@ -52,6 +52,10 @@ class ACMRandom {
double RandDouble() { return RndDouble(); }
double UniformDouble(double x) {
return RandDouble() * x;
}
// Returns a double in [a, b). The distribution is uniform.
double UniformDouble(double a, double b) { return a + (b - a) * RndDouble(); }

283
src/bop/bop_base.cc Normal file
View File

@@ -0,0 +1,283 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/bop_base.h"
#include <string>
#include <vector>
#include "sat/boolean_problem.h"
using operations_research::LinearBooleanProblem;
namespace operations_research {
namespace bop {
//------------------------------------------------------------------------------
// ProblemState
//------------------------------------------------------------------------------
ProblemState::ProblemState(const LinearBooleanProblem& problem)
: original_problem_(problem),
parameters_(),
is_fixed_(problem.num_variables(), false),
fixed_values_(problem.num_variables(), false),
lp_values_(),
solution_(problem, "AllZero"),
lower_bound_(kint64min),
upper_bound_(kint64max) {
// TODO(user): Extract to a function used by all solvers.
// Compute trivial unscaled lower bound.
const LinearObjective& objective = problem.objective();
lower_bound_ = 0;
for (int i = 0; i < objective.coefficients_size(); ++i) {
lower_bound_ += std::min<int64>(0LL, objective.coefficients(i));
}
upper_bound_ = solution_.IsFeasible() ? solution_.GetCost() : kint64max;
}
bool ProblemState::MergeLearnedInfo(const LearnedInfo& learned_info) {
const std::string kIndent(25, ' ');
bool new_lp_values = false;
if (!learned_info.lp_values.empty()) {
if (lp_values_ != learned_info.lp_values) {
lp_values_ = learned_info.lp_values;
new_lp_values = true;
LOG(INFO) << kIndent + "New LP values.";
}
}
bool new_binary_clauses = false;
if (!learned_info.binary_clauses.empty()) {
const int old_num = binary_clause_manager_.NumClauses();
for (sat::BinaryClause c : learned_info.binary_clauses) {
sat::VariableIndex num_vars(original_problem_.num_variables());
if (c.a.Variable() < num_vars && c.b.Variable() < num_vars) {
binary_clause_manager_.Add(c);
}
}
if (binary_clause_manager_.NumClauses() > old_num) {
new_binary_clauses = true;
LOG(INFO) << kIndent + "Num binary clauses: "
<< binary_clause_manager_.NumClauses();
}
}
bool new_solution = false;
if (learned_info.solution.IsFeasible() &&
(!solution_.IsFeasible() ||
learned_info.solution.GetCost() < solution_.GetCost())) {
solution_ = learned_info.solution;
new_solution = true;
LOG(INFO) << kIndent + "New solution.";
}
bool new_lower_bound = false;
if (learned_info.lower_bound > lower_bound()) {
lower_bound_ = learned_info.lower_bound;
new_lower_bound = true;
LOG(INFO) << kIndent + "New lower bound.";
}
if (solution_.IsFeasible()) {
upper_bound_ = std::min(upper_bound(), solution_.GetCost());
if (upper_bound() <= lower_bound() ||
(upper_bound() - lower_bound() <=
parameters_.relative_gap_limit() *
std::max(std::abs(upper_bound()), std::abs(lower_bound())))) {
// The lower bound might be greater that the cost of a feasible solution
// due to rounding errors in the problem scaling and Glop.
// As a feasible solution was found, the solution is proved optimal.
MarkAsOptimal();
}
}
// Merge fixed variables. Note that variables added during search, i.e. not
// in the original problem, are ignored.
int num_newly_fixed_variables = 0;
for (const sat::Literal literal : learned_info.fixed_literals) {
const VariableIndex var(literal.Variable().value());
if (var >= original_problem_.num_variables()) {
continue;
}
const bool value = literal.IsPositive();
if (is_fixed_[var]) {
if (fixed_values_[var] != value) {
MarkAsInfeasible();
return true;
}
} else {
is_fixed_[var] = true;
fixed_values_[var] = value;
++num_newly_fixed_variables;
}
}
if (num_newly_fixed_variables > 0) {
int num_fixed_variables = 0;
for (const bool is_fixed : is_fixed_) {
if (is_fixed) {
++num_fixed_variables;
}
}
LOG(INFO) << kIndent << num_newly_fixed_variables
<< " newly fixed variables (" << num_fixed_variables << " / "
<< is_fixed_.size() << ").";
if (num_fixed_variables == is_fixed_.size() && solution_.IsFeasible()) {
MarkAsOptimal();
LOG(INFO) << kIndent << "Optimal";
}
}
return new_lp_values || new_binary_clauses || new_solution ||
new_lower_bound || num_newly_fixed_variables > 0;
}
LearnedInfo ProblemState::GetLearnedInfo() const {
LearnedInfo learned_info(original_problem_);
for (VariableIndex var(0); var < is_fixed_.size(); ++var) {
if (is_fixed_[var]) {
learned_info.fixed_literals.push_back(
sat::Literal(sat::VariableIndex(var.value()), fixed_values_[var]));
}
}
learned_info.solution = solution_;
learned_info.lower_bound = lower_bound();
learned_info.lp_values = lp_values_;
learned_info.binary_clauses = NewlyAddedBinaryClauses();
return learned_info;
}
void ProblemState::MarkAsOptimal() {
CHECK(solution_.IsFeasible());
lower_bound_ = upper_bound();
}
void ProblemState::MarkAsInfeasible() {
// Mark as infeasible, i.e. set a lower_bound greater than the upper_bound.
CHECK(!solution_.IsFeasible());
if (upper_bound() == kint64max) {
lower_bound_ = kint64max;
upper_bound_ = kint64max - 1;
} else {
lower_bound_ = upper_bound_ - 1;
}
}
const std::vector<sat::BinaryClause>& ProblemState::NewlyAddedBinaryClauses() const {
return binary_clause_manager_.newly_added();
}
void ProblemState::SynchronizationDone() {
binary_clause_manager_.ClearNewlyAdded();
}
//------------------------------------------------------------------------------
// StampedLearnedInfo
//------------------------------------------------------------------------------
StampedLearnedInfo::StampedLearnedInfo()
: learned_infos_(), last_stamp_reached_(false), mutex_() {}
void StampedLearnedInfo::AddLearnedInfo(SolverTimeStamp stamp,
const LearnedInfo& learned_info) {
// MutexLock mutex_lock(&mutex_);
CHECK_EQ(stamp, learned_infos_.size());
learned_infos_.push_back(learned_info);
}
bool StampedLearnedInfo::GetLearnedInfo(SolverTimeStamp stamp,
LearnedInfo* learned_info) {
CHECK_LE(0, stamp);
CHECK(nullptr != learned_info);
learned_info->Clear();
{
// MutexLock mutex_lock(&mutex_);
if (ContainsStamp(stamp)) {
*learned_info = learned_infos_[stamp];
return true;
}
if (LastStampReached()) {
return false;
}
}
// MutexConditionInfo m(this, stamp);
// mutex_.LockWhen(Condition(&SatisfiesStampCondition, &m));
// mutex_.AssertHeld();
if (LastStampReached()) {
// mutex_.Unlock();
return false;
}
CHECK(ContainsStamp(stamp));
*learned_info = learned_infos_[stamp];
// mutex_.Unlock();
return true;
}
void StampedLearnedInfo::MarkLastStampReached() {
// MutexLock mutex_lock(&mutex_);
last_stamp_reached_ = true;
}
bool StampedLearnedInfo::LastStampReached() const {
// mutex_.AssertHeld();
return last_stamp_reached_;
}
bool StampedLearnedInfo::ContainsStamp(SolverTimeStamp stamp) const {
// mutex_.AssertHeld();
return stamp < learned_infos_.size();
}
bool StampedLearnedInfo::SatisfiesStampCondition(MutexConditionInfo* m) {
CHECK(nullptr != m);
return m->learned_infos->ContainsStamp(m->stamp) ||
m->learned_infos->LastStampReached();
}
//------------------------------------------------------------------------------
// BopOptimizerBase
//------------------------------------------------------------------------------
BopOptimizerBase::BopOptimizerBase(const std::string& name)
: name_(name), stats_(name) {
SCOPED_TIME_STAT(&stats_);
}
BopOptimizerBase::~BopOptimizerBase() {
IF_STATS_ENABLED(LOG(INFO) << stats_.StatString());
}
std::string BopOptimizerBase::GetStatusString(Status status) {
switch (status) {
case OPTIMAL_SOLUTION_FOUND:
return "OPTIMAL_SOLUTION_FOUND";
case SOLUTION_FOUND:
return "SOLUTION_FOUND";
case INFEASIBLE:
return "INFEASIBLE";
case LIMIT_REACHED:
return "LIMIT_REACHED";
case CONTINUE:
return "CONTINUE";
case ABORT:
return "ABORT";
}
// Fallback. We don't use "default:" so the compiler will return an error
// if we forgot one enum case above.
LOG(DFATAL) << "Invalid Status " << static_cast<int>(status);
return "UNKNOWN Status";
}
} // namespace bop
} // namespace operations_research

301
src/bop/bop_base.h Normal file
View File

@@ -0,0 +1,301 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BOP_BOP_BASE_H_
#define OR_TOOLS_BOP_BOP_BASE_H_
#include <string>
#include "base/basictypes.h"
#include "base/mutex.h"
#include "bop/bop_parameters.pb.h"
#include "bop/bop_solution.h"
#include "lp_data/lp_types.h"
#include "sat/boolean_problem.pb.h"
#include "sat/sat_base.h"
#include "sat/clause.h"
#include "util/stats.h"
#include "util/time_limit.h"
namespace operations_research {
namespace bop {
// Forward declaration.
struct LearnedInfo;
// This class represents the current state of the problem with all the
// information that the solver learned about it at a given time.
class ProblemState {
public:
explicit ProblemState(const LinearBooleanProblem& problem);
// Sets parameters, used for instance to get the tolerance, the gap limit...
void SetParameters(const BopParameters& parameters) {
parameters_ = parameters;
}
// Merges the learned information with the current problem state. For
// instance, if variables x, and y are fixed in the current state, and z is
// learned to be fixed, the result of the merge will be x, y, and z being
// fixed in the problem state.
// Note that the LP values contained in the learned information (if any)
// will replace the LP values of the problem state, whatever the cost is.
// Returns true when the merge has changed the problem state.
bool MergeLearnedInfo(const LearnedInfo& learned_info);
// Returns all the information learned so far.
// TODO(user): In the current implementation the learned information only
// contains binary clauses added since the last call to
// SynchronizationDone().
// Add an iterator on the sat::BinaryClauseManager.
LearnedInfo GetLearnedInfo() const;
// Marks the problem state as optimal.
void MarkAsOptimal();
// Marks the problem state as infeasible.
void MarkAsInfeasible();
// Returns true when the current state is proved to be optimal. In such a case
// solution() returns the optimal solution.
bool IsOptimal() const {
return solution_.IsFeasible() && solution_.GetCost() == lower_bound();
}
// Returns true when the problem is proved to be infeasible.
bool IsInfeasible() const { return lower_bound() > upper_bound(); }
// Returns true when the variable var is fixed in the current problem state.
// The value of the fixed variable is returned by GetVariableFixedValue(var).
bool IsVariableFixed(VariableIndex var) const { return is_fixed_[var]; }
const ITIVector<VariableIndex, bool>& is_fixed() const { return is_fixed_; }
// Returns the value of the fixed variable var. Should be only called on fixed
// variables (CHECKed).
bool GetVariableFixedValue(VariableIndex var) const {
return fixed_values_[var];
}
const ITIVector<VariableIndex, bool>& fixed_values() const {
return fixed_values_;
}
// Returns the values of the LP relaxation of the problem. Returns an empty
// vector when the LP has not been populated.
const glop::DenseRow& lp_values() const { return lp_values_; }
// Returns the solution to the current state problem.
// Note that the solution might not be feasible because until we find one, it
// will just be the all-false assignment.
const BopSolution& solution() const { return solution_; }
// Returns the original problem. Note that the current problem might be
// different, e.g. fixed variables, but equivalent, i.e. a solution to one
// should be a solution to the other too.
const LinearBooleanProblem& original_problem() const {
return original_problem_;
}
// Returns the current lower (resp. upper) bound of the objective cost.
// For internal use only: this is the unscaled version of the lower (resp.
// upper) bound, and so should be compared only to the unscaled cost given by
// solution.GetCost().
int64 lower_bound() const { return lower_bound_; }
int64 upper_bound() const { return upper_bound_; }
// Returns the scaled lower bound of the original problem.
double GetScaledLowerBound() const {
return lower_bound() * original_problem_.objective().scaling_factor() +
original_problem_.objective().offset();
}
// Returns the newly added binary clause since the last SynchronizationDone().
const std::vector<sat::BinaryClause>& NewlyAddedBinaryClauses() const;
// Resets what is considered "new" information. This is meant to be called
// once all the optimize have been synchronized.
void SynchronizationDone();
private:
const LinearBooleanProblem& original_problem_;
BopParameters parameters_;
ITIVector<VariableIndex, bool> is_fixed_;
ITIVector<VariableIndex, bool> fixed_values_;
glop::DenseRow lp_values_;
BopSolution solution_;
int64 lower_bound_;
int64 upper_bound_;
// Manage the set of the problem binary clauses (including the learned ones).
sat::BinaryClauseManager binary_clause_manager_;
DISALLOW_COPY_AND_ASSIGN(ProblemState);
};
// This struct represents what has been learned on the problem state by
// running an optimizer. The goal is then to merge the learned information
// with the problem state in order to get a more constrained problem to be used
// by the next called optimizer.
struct LearnedInfo {
explicit LearnedInfo(const LinearBooleanProblem& problem)
: fixed_literals(),
solution(problem, "AllZero"),
lower_bound(kint64min),
lp_values(),
binary_clauses() {}
// Clears all just as if the object were a brand new one. This can be used
// to reduce the number of creation / deletion of objects.
void Clear() {
fixed_literals.clear();
lower_bound = kint64min;
lp_values.clear();
binary_clauses.clear();
}
// Vector of all literals that have been fixed.
std::vector<sat::Literal> fixed_literals;
// New solution. Note that the solution might be infeasible.
BopSolution solution;
// A lower bound (for multi-threading purpose).
int64 lower_bound;
// An assignment for the relaxed linear programming problem (can be empty).
// This is meant to be the optimal LP solution, but can just be a feasible
// solution or any floating point assignment if the LP solver didn't solve
// the relaxed problem optimally.
glop::DenseRow lp_values;
// New binary clauses.
std::vector<sat::BinaryClause> binary_clauses;
};
// Thread-safe storage of LearnedInfo for each stamp. This class is used in
// a multi-threading context to synchronize learned information between solvers.
class StampedLearnedInfo {
public:
StampedLearnedInfo();
// Adds the learned_info at position stamp.
// Requires the learned_infos to be added sequentially, i.e. the stamp should
// not already exist, and should be +1 of the current hightest stored stamp.
void AddLearnedInfo(SolverTimeStamp stamp, const LearnedInfo& learned_info)
LOCKS_EXCLUDED(mutex_);
// Gets the learned_info corresponding to the given stamp.
// Returns false when the info doesn't and won't exist because the search
// was stopped.
// Note that when the search is not marked as stopped, this method will wait
// for the given stamp to be added.
bool GetLearnedInfo(SolverTimeStamp stamp, LearnedInfo* learned_info)
LOCKS_EXCLUDED(mutex_);
// Marks the search as stopped, the last stamp is reached.
void MarkLastStampReached() LOCKS_EXCLUDED(mutex_);
private:
// Returns true when the last stamp has been reached, i.e. no more stamps
// will be stored. This might be due to a reached time limit.
// The caller must hold the mutex_ (Asserted).
bool LastStampReached() const EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// The caller must hold the mutex_ (Asserted).
bool ContainsStamp(SolverTimeStamp stamp) const
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
struct MutexConditionInfo {
MutexConditionInfo(StampedLearnedInfo* i, SolverTimeStamp s)
: learned_infos(i), stamp(s) {}
const StampedLearnedInfo* learned_infos;
SolverTimeStamp stamp;
};
// Static function used as a Condition to wait for available info at a given
// stamp.
// Returns true when the info is available.
static bool SatisfiesStampCondition(MutexConditionInfo* const m)
NO_THREAD_SAFETY_ANALYSIS;
ITIVector<SolverTimeStamp, LearnedInfo> learned_infos_ GUARDED_BY(mutex_);
bool last_stamp_reached_ GUARDED_BY(mutex_);
// TODO(user): Use ReadMutex and WriteMutex?
mutable Mutex mutex_;
};
// Base class used to optimize a ProblemState.
// Optimizers implementing this class are used in a sort of portfolio and
// are run sequentially or concurrently. See for instance BopRandomLNSOptimizer.
class BopOptimizerBase {
public:
explicit BopOptimizerBase(const std::string& name);
virtual ~BopOptimizerBase();
// Returns true if it's useless to run the optimizer several times on the
// same solution.
virtual bool RunOncePerSolution() const = 0;
// Returns true if the optimizer needs a feasible solution to run.
virtual bool NeedAFeasibleSolution() const = 0;
// TODO(user): To redesign, some are not needed anymore thanks to the
// problem state, e.g. IsOptimal().
enum Status {
OPTIMAL_SOLUTION_FOUND,
SOLUTION_FOUND,
INFEASIBLE,
LIMIT_REACHED,
CONTINUE,
ABORT
};
// Resets all internal structures to start a new round of Optimize() calls
// with a new problem state.
// Returns the status of the optimizer for the current problem state, e.g.
// returns INFEASIBLE when the problem is proved UNSAT, ABORT when the
// optimizer already knows it can't optimize the problem state...
// Note that the underlying problem state object might change during search;
// If possible copy needed objects, keep a pointer on them only with great
// caution.
virtual Status Synchronize(const ProblemState& problem_state) = 0;
// Tries to infer more information about the problem state, i.e. reduces the
// gap by increasing the lower bound or finding a better solution.
// Returns SOLUTION_FOUND when a new solution with a better objective cost is
// found before a time limit.
// The learned information is cleared and the filled with any new information
// about the problem, e.g. a new lower bound.
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit) = 0;
std::string name() const { return name_; }
// Returns a std::string describing the status.
static std::string GetStatusString(Status status);
protected:
const std::string name_;
mutable StatsGroup stats_;
};
inline std::ostream& operator<<(std::ostream& os,
BopOptimizerBase::Status status) {
os << BopOptimizerBase::GetStatusString(status);
return os;
}
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_BOP_BASE_H_

625
src/bop/bop_fs.cc Normal file
View File

@@ -0,0 +1,625 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/bop_fs.h"
#include <string>
#include <vector>
#include "base/commandlineflags.h"
#include "base/stringprintf.h"
#include "google/protobuf/text_format.h"
#include "base/stl_util.h"
#include "glop/lp_solver.h"
#include "lp_data/lp_print_utils.h"
#include "sat/boolean_problem.h"
#include "sat/lp_utils.h"
#include "sat/optimization.h"
#include "sat/sat_solver.h"
#include "util/bitset.h"
using operations_research::glop::ColIndex;
using operations_research::glop::DenseRow;
using operations_research::glop::GlopParameters;
using operations_research::glop::LinearProgram;
using operations_research::glop::LPSolver;
using operations_research::glop::RowIndex;
using operations_research::LinearBooleanProblem;
using operations_research::LinearBooleanConstraint;
using operations_research::LinearObjective;
namespace operations_research {
namespace bop {
namespace {
BopOptimizerBase::Status SolutionStatus(const BopSolution& solution,
int64 lower_bound) {
// The lower bound might be greater that the cost of a feasible solution due
// to rounding errors in the problem scaling and Glop.
return solution.IsFeasible() ? (solution.GetCost() <= lower_bound
? BopOptimizerBase::OPTIMAL_SOLUTION_FOUND
: BopOptimizerBase::SOLUTION_FOUND)
: BopOptimizerBase::LIMIT_REACHED;
}
bool AllIntegralValues(const DenseRow& values, double tolerance) {
for (const glop::Fractional value : values) {
// Note that this test is correct because in this part of the code, Bop
// only deals with boolean variables.
if (value >= tolerance && value + tolerance < 1.0) {
return false;
}
}
return true;
}
void DenseRowToBopSolution(const DenseRow& values, BopSolution* solution) {
CHECK(solution != nullptr);
CHECK_EQ(solution->Size(), values.size());
for (VariableIndex var(0); var < solution->Size(); ++var) {
solution->SetValue(var, round(values[ColIndex(var.value())]));
}
}
} // anonymous namespace
//------------------------------------------------------------------------------
// BopSatObjectiveFirstSolutionGenerator
//------------------------------------------------------------------------------
BopSatObjectiveFirstSolutionGenerator::BopSatObjectiveFirstSolutionGenerator(
const std::string& name, double time_limit_ratio)
: BopOptimizerBase(name),
time_limit_ratio_(time_limit_ratio),
first_solve_(true),
sat_solver_(),
lower_bound_(kint64min),
upper_bound_(kint64max) {}
BopSatObjectiveFirstSolutionGenerator::
~BopSatObjectiveFirstSolutionGenerator() {}
BopOptimizerBase::Status BopSatObjectiveFirstSolutionGenerator::Synchronize(
const ProblemState& problem_state) {
if (!problem_state.solution().IsFeasible()) {
first_solve_ = true;
}
if (!first_solve_) return BopOptimizerBase::ABORT;
const BopOptimizerBase::Status load_status =
LoadStateProblemToSatSolver(problem_state, &sat_solver_);
if (load_status != BopOptimizerBase::CONTINUE) return load_status;
UseObjectiveForSatAssignmentPreference(problem_state.original_problem(),
&sat_solver_);
lower_bound_ = problem_state.lower_bound();
upper_bound_ = problem_state.upper_bound();
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status BopSatObjectiveFirstSolutionGenerator::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
if (!first_solve_) return BopOptimizerBase::ABORT;
first_solve_ = false;
const double initial_deterministic_time = sat_solver_.deterministic_time();
sat::SatParameters sat_parameters;
sat_parameters.set_max_time_in_seconds(
std::min(time_limit->GetTimeLeft(),
time_limit_ratio_ * time_limit->GetTimeLeft()));
sat_parameters.set_max_deterministic_time(
std::min(time_limit->GetDeterministicTimeLeft(),
time_limit_ratio_ * time_limit->GetDeterministicTimeLeft()));
sat_solver_.SetParameters(sat_parameters);
// Doubling the time limit for the next call to Optimize().
if (first_solve_) time_limit_ratio_ *= 2.0;
const sat::SatSolver::Status sat_status = sat_solver_.Solve();
time_limit->AdvanceDeterministicTime(sat_solver_.deterministic_time() -
initial_deterministic_time);
if (sat_status == sat::SatSolver::MODEL_UNSAT) {
if (upper_bound_ != kint64max) {
// As the solution in the state problem is feasible, it is proved optimal.
learned_info->lower_bound = upper_bound_;
return BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
// The problem is proved infeasible
return BopOptimizerBase::INFEASIBLE;
}
if (sat_status == sat::SatSolver::MODEL_SAT) {
ExtractLearnedInfoFromSatSolver(&sat_solver_, learned_info);
SatAssignmentToBopSolution(sat_solver_.Assignment(),
&learned_info->solution);
return SolutionStatus(learned_info->solution, lower_bound_);
}
return BopOptimizerBase::LIMIT_REACHED;
}
//------------------------------------------------------------------------------
// BopSatLpFirstSolutionGenerator
//------------------------------------------------------------------------------
BopSatLpFirstSolutionGenerator::BopSatLpFirstSolutionGenerator(
const std::string& name, double time_limit_ratio)
: BopOptimizerBase(name),
time_limit_ratio_(time_limit_ratio),
first_solve_(true),
sat_solver_(),
lower_bound_(kint64min),
upper_bound_(kint64max),
lp_values_() {}
BopSatLpFirstSolutionGenerator::~BopSatLpFirstSolutionGenerator() {}
BopOptimizerBase::Status BopSatLpFirstSolutionGenerator::Synchronize(
const ProblemState& problem_state) {
if (!problem_state.solution().IsFeasible()) {
first_solve_ = true;
}
if (!first_solve_ || problem_state.lp_values().empty()) {
return BopOptimizerBase::ABORT;
}
lower_bound_ = problem_state.lower_bound();
upper_bound_ = problem_state.upper_bound();
lp_values_ = problem_state.lp_values();
const BopOptimizerBase::Status load_status =
LoadStateProblemToSatSolver(problem_state, &sat_solver_);
if (load_status != BopOptimizerBase::CONTINUE) return load_status;
// Assign SAT preferences based on the LP solution.
for (ColIndex col(0); col < lp_values_.size(); ++col) {
const double value = lp_values_[col];
sat_solver_.SetAssignmentPreference(
sat::Literal(sat::VariableIndex(col.value()), round(value) == 1),
1 - fabs(value - round(value)));
}
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status BopSatLpFirstSolutionGenerator::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
if (!first_solve_ || lp_values_.empty()) return BopOptimizerBase::ABORT;
first_solve_ = false;
const double sat_deterministic_time = sat_solver_.deterministic_time();
sat::SatParameters sat_parameters;
sat_parameters.set_max_time_in_seconds(
std::min(time_limit->GetTimeLeft(),
time_limit_ratio_ * time_limit->GetTimeLeft()));
sat_parameters.set_max_deterministic_time(
std::min(time_limit->GetDeterministicTimeLeft(),
time_limit_ratio_ * time_limit->GetDeterministicTimeLeft()));
sat_solver_.SetParameters(sat_parameters);
// Doubling the time limit for the next call to Optimize().
if (first_solve_) time_limit_ratio_ *= 2.0;
const sat::SatSolver::Status sat_status = sat_solver_.Solve();
time_limit->AdvanceDeterministicTime(sat_solver_.deterministic_time() -
sat_deterministic_time);
if (sat_status == sat::SatSolver::MODEL_UNSAT) {
if (upper_bound_ != kint64max) {
// As the solution in the state problem is feasible,it is proved optimal.
learned_info->lower_bound = upper_bound_;
return BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
// The problem is proved infeasible
return BopOptimizerBase::INFEASIBLE;
}
if (sat_status == sat::SatSolver::MODEL_SAT) {
ExtractLearnedInfoFromSatSolver(&sat_solver_, learned_info);
SatAssignmentToBopSolution(sat_solver_.Assignment(),
&learned_info->solution);
return SolutionStatus(learned_info->solution, lower_bound_);
}
return BopOptimizerBase::LIMIT_REACHED;
}
//------------------------------------------------------------------------------
// BopRandomFirstSolutionGenerator
//------------------------------------------------------------------------------
BopRandomFirstSolutionGenerator::BopRandomFirstSolutionGenerator(
int random_seed, const std::string& name, double time_limit_ratio)
: BopOptimizerBase(name),
time_limit_ratio_(time_limit_ratio),
problem_(nullptr),
initial_solution_(),
random_(random_seed),
sat_solver_(),
sat_seed_(0),
first_solve_(true) {}
BopRandomFirstSolutionGenerator::~BopRandomFirstSolutionGenerator() {}
BopOptimizerBase::Status BopRandomFirstSolutionGenerator::Synchronize(
const ProblemState& problem_state) {
const BopOptimizerBase::Status load_status =
LoadStateProblemToSatSolver(problem_state, &sat_solver_);
if (load_status != BopOptimizerBase::CONTINUE) return load_status;
problem_ = &problem_state.original_problem();
initial_solution_.reset(new BopSolution(problem_state.solution()));
lp_values_ = problem_state.lp_values();
if (!problem_state.solution().IsFeasible()) {
first_solve_ = true;
}
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status BopRandomFirstSolutionGenerator::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
// We want to check both the local time limit and the global time limit, as
// during first solution phase the local time limit can be more restrictive.
TimeLimit local_time_limit(
std::min(time_limit->GetTimeLeft(),
time_limit_ratio_ * time_limit->GetTimeLeft()),
std::min(time_limit->GetDeterministicTimeLeft(),
time_limit_ratio_ * time_limit->GetDeterministicTimeLeft()));
const int kMaxNumConflicts = 10;
learned_info->solution = *initial_solution_;
learned_info->solution.set_name(
initial_solution_->IsFeasible()
? StringPrintf("RandomFirstSolutionFrom_%lld",
initial_solution_->GetCost())
: "RandomFirstSolution");
int64 best_cost = initial_solution_->IsFeasible()
? initial_solution_->GetCost()
: kint64max;
int64 remaining_num_conflicts =
first_solve_
? kint64max
: parameters.max_number_of_conflicts_in_random_solution_generation();
first_solve_ = false;
int64 old_num_failures = 0;
while (remaining_num_conflicts > 0 && !local_time_limit.LimitReached()) {
++sat_seed_;
sat_solver_.Backtrack(0);
old_num_failures = sat_solver_.num_failures();
sat::SatParameters sat_parameters;
sat::RandomizeDecisionHeuristic(&random_, &sat_parameters);
sat_parameters.set_max_number_of_conflicts(kMaxNumConflicts);
sat_parameters.set_random_seed(sat_seed_);
sat_solver_.SetParameters(sat_parameters);
sat_solver_.ResetDecisionHeuristic();
// Constrain the solution to be better than the current one.
if (best_cost == kint64max ||
AddObjectiveConstraint(*problem_, false, sat::Coefficient(0), true,
sat::Coefficient(best_cost) - 1, &sat_solver_)) {
// Special assignment preference parameters.
const int preference = random_.Uniform(4);
if (preference == 0) {
UseObjectiveForSatAssignmentPreference(*problem_, &sat_solver_);
} else if (preference == 1 && !lp_values_.empty()) {
// Assign SAT assignment preference based on the LP solution.
for (ColIndex col(0); col < lp_values_.size(); ++col) {
const double value = lp_values_[col];
sat_solver_.SetAssignmentPreference(
sat::Literal(sat::VariableIndex(col.value()), round(value) == 1),
1 - fabs(value - round(value)));
}
}
const sat::SatSolver::Status sat_status =
sat_solver_.SolveWithTimeLimit(&local_time_limit);
if (sat_status == sat::SatSolver::MODEL_SAT) {
SatAssignmentToBopSolution(sat_solver_.Assignment(),
&learned_info->solution);
CHECK_LT(learned_info->solution.GetCost(), best_cost);
best_cost = learned_info->solution.GetCost();
} else if (sat_status == sat::SatSolver::MODEL_UNSAT) {
// The solution is proved optimal (if any).
learned_info->lower_bound = best_cost;
return best_cost == kint64max
? BopOptimizerBase::INFEASIBLE
: BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
} else {
// The solution is proved optimal (if any).
learned_info->lower_bound = best_cost;
return best_cost == kint64max ? BopOptimizerBase::INFEASIBLE
: BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
// The number of failure is a good approximation of the number of conflicts.
// Note that the number of failures of the SAT solver is not reinitialized.
remaining_num_conflicts -= sat_solver_.num_failures() - old_num_failures;
}
ExtractLearnedInfoFromSatSolver(&sat_solver_, learned_info);
time_limit->AdvanceDeterministicTime(
local_time_limit.GetElapsedDeterministicTime());
return learned_info->solution.IsFeasible() &&
(!initial_solution_->IsFeasible() ||
learned_info->solution.GetCost() <
initial_solution_->GetCost())
? BopOptimizerBase::SOLUTION_FOUND
: BopOptimizerBase::LIMIT_REACHED;
}
//------------------------------------------------------------------------------
// LinearRelaxation
//------------------------------------------------------------------------------
LinearRelaxation::LinearRelaxation(const BopParameters& parameters,
const std::string& name, double time_limit_ratio)
: BopOptimizerBase(name),
parameters_(parameters),
time_limit_ratio_(time_limit_ratio),
lp_model_loaded_(false),
lp_model_(),
lp_solver_(),
scaling_(1),
offset_(0),
num_fixed_variables_(-1),
problem_already_solved_(false),
scaled_solution_cost_(glop::kInfinity) {}
LinearRelaxation::~LinearRelaxation() {}
BopOptimizerBase::Status LinearRelaxation::Synchronize(
const ProblemState& problem_state) {
// Check if the number of fixed variables is greater than last time.
// TODO(user): Consider checking changes in number of conflicts too.
int num_fixed_variables = 0;
for (const bool is_fixed : problem_state.is_fixed()) {
if (is_fixed) {
++num_fixed_variables;
}
}
problem_already_solved_ =
problem_already_solved_ && num_fixed_variables_ >= num_fixed_variables;
if (problem_already_solved_) return BopOptimizerBase::ABORT;
// Create the LP model based on the current problem state.
num_fixed_variables_ = num_fixed_variables;
if (!lp_model_loaded_) {
lp_model_.Clear();
sat::ConvertBooleanProblemToLinearProgram(problem_state.original_problem(),
&lp_model_);
lp_model_loaded_ = true;
}
for (VariableIndex var(0); var < problem_state.is_fixed().size(); ++var) {
if (problem_state.IsVariableFixed(var)) {
const glop::Fractional value =
problem_state.GetVariableFixedValue(var) ? 1.0 : 0.0;
lp_model_.SetVariableBounds(ColIndex(var.value()), value, value);
}
}
// Add learned binary clauses.
if (parameters_.use_learned_binary_clauses_in_lp()) {
for (const sat::BinaryClause& clause :
problem_state.NewlyAddedBinaryClauses()) {
const RowIndex constraint_index = lp_model_.CreateNewConstraint();
const int64 coefficient_a = clause.a.IsPositive() ? 1 : -1;
const int64 coefficient_b = clause.b.IsPositive() ? 1 : -1;
const int64 rhs = 1 + (clause.a.IsPositive() ? 0 : -1) +
(clause.b.IsPositive() ? 0 : -1);
const ColIndex col_a(clause.a.Variable().value());
const ColIndex col_b(clause.b.Variable().value());
lp_model_.SetConstraintName(
constraint_index,
StringPrintf((clause.a.IsPositive() ? "%s" : "not(%s)"),
lp_model_.GetVariableName(col_a).c_str()) +
" or " + StringPrintf((clause.b.IsPositive() ? "%s" : "not(%s)"),
lp_model_.GetVariableName(col_b).c_str()));
lp_model_.SetCoefficient(constraint_index, col_a, coefficient_a);
lp_model_.SetCoefficient(constraint_index, col_b, coefficient_b);
lp_model_.SetConstraintBounds(constraint_index, rhs, glop::kInfinity);
}
}
scaling_ = problem_state.original_problem().objective().scaling_factor();
offset_ = problem_state.original_problem().objective().offset();
scaled_solution_cost_ =
problem_state.solution().IsFeasible()
? problem_state.solution().GetScaledCost()
: (lp_model_.IsMaximizationProblem() ? -glop::kInfinity
: glop::kInfinity);
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status LinearRelaxation::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
if (problem_already_solved_) return BopOptimizerBase::ABORT;
const glop::ProblemStatus lp_status = Solve(false, time_limit);
LOG(INFO) << " LP: "
<< StringPrintf("%.6f", lp_solver_.GetObjectiveValue())
<< " status: " << GetProblemStatusString(lp_status);
problem_already_solved_ = true;
if (lp_status != glop::ProblemStatus::OPTIMAL &&
lp_status != glop::ProblemStatus::IMPRECISE &&
lp_status != glop::ProblemStatus::PRIMAL_FEASIBLE) {
return BopOptimizerBase::ABORT;
}
learned_info->lp_values = lp_solver_.variable_values();
if (lp_status == glop::ProblemStatus::OPTIMAL) {
// The lp returns the objective with the offset and scaled, so we need to
// unscale it and then remove the offset.
double lower_bound = lp_solver_.GetObjectiveValue();
if (parameters_.use_lp_strong_branching()) {
lower_bound =
ComputeLowerBoundUsingStrongBranching(learned_info, time_limit);
LOG(INFO) << " LP: "
<< StringPrintf("%.6f", lower_bound)
<< " using strong branching.";
}
const int tolerance_sign = scaling_ < 0 ? 1 : -1;
const double unscaled_cost =
(lower_bound +
tolerance_sign *
lp_solver_.GetParameters().solution_feasibility_tolerance()) /
scaling_ -
offset_;
learned_info->lower_bound = static_cast<int64>(ceil(unscaled_cost));
if (AllIntegralValues(
learned_info->lp_values,
lp_solver_.GetParameters().primal_feasibility_tolerance())) {
DenseRowToBopSolution(learned_info->lp_values, &learned_info->solution);
CHECK(learned_info->solution.IsFeasible());
return BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
}
return BopOptimizerBase::LIMIT_REACHED;
}
// TODO(user): It is possible to stop the search earlier using the glop
// parameter objective_lower_limit / objective_upper_limit. That
// can be used when a feasible solution is known, or when the false
// best bound is computed.
glop::ProblemStatus LinearRelaxation::Solve(bool incremental_solve,
TimeLimit* time_limit) {
GlopParameters glop_parameters;
if (incremental_solve) {
glop_parameters.set_use_dual_simplex(true);
glop_parameters.set_allow_simplex_algorithm_change(true);
glop_parameters.set_use_preprocessing(false);
}
glop_parameters.set_max_time_in_seconds(
std::min(time_limit->GetTimeLeft(),
time_limit_ratio_ * time_limit->GetTimeLeft()));
glop_parameters.set_max_deterministic_time(
std::min(time_limit->GetDeterministicTimeLeft(),
time_limit_ratio_ * time_limit->GetDeterministicTimeLeft()));
lp_solver_.SetParameters(glop_parameters);
const double initial_deterministic_time = lp_solver_.DeterministicTime();
const glop::ProblemStatus lp_status = lp_solver_.Solve(lp_model_);
time_limit->AdvanceDeterministicTime(lp_solver_.DeterministicTime() -
initial_deterministic_time);
return lp_status;
}
double LinearRelaxation::ComputeLowerBoundUsingStrongBranching(
LearnedInfo* learned_info, TimeLimit* time_limit) {
const glop::DenseRow initial_lp_values = lp_solver_.variable_values();
const double tolerance =
lp_solver_.GetParameters().primal_feasibility_tolerance();
double best_lp_objective = lp_solver_.GetObjectiveValue();
for (glop::ColIndex col(0); col < initial_lp_values.size(); ++col) {
// TODO(user): Order the variables by some meaningful quantity (probably
// the cost variation when we snap it to one of its bound) so
// we can try the one that seems the most promising first.
// That way we can stop the strong branching earlier.
if (time_limit->LimitReached()) break;
// Skip fixed variables.
if (lp_model_.variable_lower_bounds()[col] ==
lp_model_.variable_upper_bounds()[col]) {
continue;
}
CHECK_EQ(0.0, lp_model_.variable_lower_bounds()[col]);
CHECK_EQ(1.0, lp_model_.variable_upper_bounds()[col]);
// Note(user): Experiments show that iterating on all variables can be
// costly and doesn't lead to better solutions when a SAT optimizer is used
// afterward, e.g. BopSatLpFirstSolutionGenerator, and no feasible solutions
// are available.
// No variables are skipped when a feasible solution is know as the best
// bound / cost comparison can be used to deduce fixed variables, and be
// useful for other optimizers.
if ((scaled_solution_cost_ == glop::kInfinity ||
scaled_solution_cost_ == -glop::kInfinity) &&
(initial_lp_values[col] < tolerance ||
initial_lp_values[col] + tolerance > 1)) {
continue;
}
double objective_true = best_lp_objective;
double objective_false = best_lp_objective;
// Set to true.
lp_model_.SetVariableBounds(col, 1.0, 1.0);
const glop::ProblemStatus status_true = Solve(true, time_limit);
// TODO(user): Deal with PRIMAL_INFEASIBLE, DUAL_INFEASIBLE and
// INFEASIBLE_OR_UNBOUNDED statuses. In all cases, if the
// original lp was feasible, this means that the variable can
// be fixed to the other bound.
if (status_true == glop::ProblemStatus::OPTIMAL ||
status_true == glop::ProblemStatus::DUAL_FEASIBLE) {
objective_true = lp_solver_.GetObjectiveValue();
// Set to false.
lp_model_.SetVariableBounds(col, 0.0, 0.0);
const glop::ProblemStatus status_false = Solve(true, time_limit);
if (status_false == glop::ProblemStatus::OPTIMAL ||
status_false == glop::ProblemStatus::DUAL_FEASIBLE) {
objective_false = lp_solver_.GetObjectiveValue();
// Compute the new min.
best_lp_objective =
lp_model_.IsMaximizationProblem()
? std::min(best_lp_objective, std::max(objective_true, objective_false))
: std::max(best_lp_objective, std::min(objective_true, objective_false));
}
}
if (CostIsWorseThanSolution(objective_true, tolerance)) {
// Having variable col set to true can't possibly lead to and better
// solution than the current one. Set the variable to false.
lp_model_.SetVariableBounds(col, 0.0, 0.0);
learned_info->fixed_literals.push_back(
sat::Literal(sat::VariableIndex(col.value()), false));
} else if (CostIsWorseThanSolution(objective_false, tolerance)) {
// Having variable col set to false can't possibly lead to and better
// solution than the current one. Set the variable to true.
lp_model_.SetVariableBounds(col, 1.0, 1.0);
learned_info->fixed_literals.push_back(
sat::Literal(sat::VariableIndex(col.value()), true));
} else {
// Unset. This is safe to use 0.0 and 1.0 as the variable is not fixed.
lp_model_.SetVariableBounds(col, 0.0, 1.0);
}
}
return best_lp_objective;
}
bool LinearRelaxation::CostIsWorseThanSolution(double scaled_cost,
double tolerance) const {
return lp_model_.IsMaximizationProblem()
? scaled_cost + tolerance < scaled_solution_cost_
: scaled_cost > scaled_solution_cost_ + tolerance;
}
} // namespace bop
} // namespace operations_research

157
src/bop/bop_fs.h Normal file
View File

@@ -0,0 +1,157 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BOP_BOP_FS_H_
#define OR_TOOLS_BOP_BOP_FS_H_
#include <string>
#include "base/basictypes.h"
#include "base/integral_types.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/int_type_indexed_vector.h"
#include "base/int_type.h"
#include "bop/bop_base.h"
#include "bop/bop_parameters.pb.h"
#include "bop/bop_solution.h"
#include "bop/bop_types.h"
#include "bop/bop_util.h"
#include "glop/lp_solver.h"
#include "sat/boolean_problem.pb.h"
#include "sat/sat_solver.h"
#include "util/time_limit.h"
namespace operations_research {
namespace bop {
class BopSatObjectiveFirstSolutionGenerator : public BopOptimizerBase {
public:
BopSatObjectiveFirstSolutionGenerator(const std::string& name,
double time_limit_ratio);
virtual ~BopSatObjectiveFirstSolutionGenerator();
virtual bool RunOncePerSolution() const { return false; }
virtual bool NeedAFeasibleSolution() const { return false; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
private:
double time_limit_ratio_;
bool first_solve_;
sat::SatSolver sat_solver_;
int64 lower_bound_;
int64 upper_bound_;
bool problem_already_solved_;
};
class BopSatLpFirstSolutionGenerator : public BopOptimizerBase {
public:
BopSatLpFirstSolutionGenerator(const std::string& name, double time_limit_ratio);
virtual ~BopSatLpFirstSolutionGenerator();
virtual bool RunOncePerSolution() const { return false; }
virtual bool NeedAFeasibleSolution() const { return false; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
private:
double time_limit_ratio_;
bool first_solve_;
sat::SatSolver sat_solver_;
int64 lower_bound_;
int64 upper_bound_;
glop::DenseRow lp_values_;
};
// This class implements an optimizer that looks for a better solution than the
// initial solution, if any, by randomly generating new solutions using the
// SAT solver.
// It can be used to both generate a first solution and improve an existing one.
// TODO(user): Coupled with some Local Search it might be use to diversify
// the solutions. To try.
class BopRandomFirstSolutionGenerator : public BopOptimizerBase {
public:
BopRandomFirstSolutionGenerator(int random_seed, const std::string& name,
double time_limit_ratio);
virtual ~BopRandomFirstSolutionGenerator();
virtual bool RunOncePerSolution() const { return false; }
virtual bool NeedAFeasibleSolution() const { return false; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
private:
double time_limit_ratio_;
const LinearBooleanProblem* problem_;
std::unique_ptr<BopSolution> initial_solution_;
glop::DenseRow lp_values_;
MTRandom random_;
sat::SatSolver sat_solver_;
uint32 sat_seed_;
bool first_solve_;
};
// This class computes the linear relaxation of the state problem.
// Optimize() fills the relaxed values (possibly floating values) that can be
// used by other optimizers as BopSatLpFirstSolutionGenerator for instance,
// and the lower bound.
class LinearRelaxation : public BopOptimizerBase {
public:
LinearRelaxation(const BopParameters& parameters, const std::string& name,
double time_limit_ratio);
virtual ~LinearRelaxation();
virtual bool RunOncePerSolution() const { return false; }
virtual bool NeedAFeasibleSolution() const { return false; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
private:
// Runs Glop to solve the current lp_model_.
// Updates the time limit and returns the status of the solve.
// Note that when the solve is incremental, the preprocessor is deactivated,
// and the dual simplex is used.
glop::ProblemStatus Solve(bool incremental_solve, TimeLimit* time_limit);
// Computes and returns a better best bound using strong branching, i.e.
// doing a what-if analysis on each variable v: compute the best bound when
// v is assigned to true, compute the best bound when v is assigned to false,
// and then use those best bounds to improve the overall best bound.
// As a side effect, it might fix some variables.
double ComputeLowerBoundUsingStrongBranching(LearnedInfo* learned_info,
TimeLimit* time_limit);
// Returns true when the cost is worse than the cost of the current solution.
// If they are within the given tolerance, returns false.
bool CostIsWorseThanSolution(double scaled_cost, double tolerance) const;
const BopParameters parameters_;
const double time_limit_ratio_;
bool lp_model_loaded_;
glop::LinearProgram lp_model_;
glop::LPSolver lp_solver_;
double scaling_;
double offset_;
int num_fixed_variables_;
bool problem_already_solved_;
double scaled_solution_cost_;
};
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_BOP_FS_H_

673
src/bop/bop_lns.cc Normal file
View File

@@ -0,0 +1,673 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/bop_lns.h"
#include <string>
#include <vector>
#include "base/commandlineflags.h"
#include "base/stringprintf.h"
#include "google/protobuf/text_format.h"
#include "base/stl_util.h"
#include "glop/lp_solver.h"
#include "lp_data/lp_print_utils.h"
#include "sat/boolean_problem.h"
#include "sat/lp_utils.h"
#include "sat/sat_solver.h"
#include "util/bitset.h"
using operations_research::glop::ColIndex;
using operations_research::glop::DenseRow;
using operations_research::glop::GlopParameters;
using operations_research::glop::LinearProgram;
using operations_research::glop::LPSolver;
using operations_research::LinearBooleanProblem;
using operations_research::LinearBooleanConstraint;
using operations_research::LinearObjective;
namespace operations_research {
namespace bop {
namespace {
// Creates and solves a LP model which enforces a solution with a better cost,
// and minimizes the distance to the initial solution.
void CreateLinearProgramFromCurrentBopSolution(
const LinearBooleanProblem& problem, const BopSolution& initial_solution,
glop::LinearProgram* lp) {
const int64 initial_cost = initial_solution.GetCost();
lp->Clear();
for (int i = 0; i < 2 * problem.num_variables(); ++i) {
const glop::ColIndex col = lp->CreateNewVariable();
lp->SetVariableBounds(col, 0.0, 1.0);
}
glop::RowIndex constraint_index(0);
for (const LinearBooleanConstraint& constraint : problem.constraints()) {
double sum = 0.0;
for (int i = 0; i < constraint.literals_size(); ++i) {
const int literal = constraint.literals(i);
const double coeff = constraint.coefficients(i);
const glop::ColIndex variable_index = glop::ColIndex(abs(literal) - 1);
if (literal < 0) {
sum += coeff;
lp->SetCoefficient(constraint_index, variable_index, -coeff);
} else {
lp->SetCoefficient(constraint_index, variable_index, coeff);
}
}
lp->SetConstraintBounds(
constraint_index,
constraint.has_lower_bound() ? constraint.lower_bound() - sum
: -glop::kInfinity,
constraint.has_upper_bound() ? constraint.upper_bound() - sum
: glop::kInfinity);
++constraint_index;
}
const LinearObjective& objective = problem.objective();
double offset = objective.offset();
for (int i = 0; i < objective.literals_size(); ++i) {
const int literal = objective.literals(i);
const double coeff = objective.coefficients(i);
const glop::ColIndex variable_index = glop::ColIndex(abs(literal) - 1);
if (literal < 0) {
offset += coeff;
lp->SetCoefficient(constraint_index, variable_index, -coeff);
} else {
lp->SetCoefficient(constraint_index, variable_index, coeff);
}
}
const int64 objective_bound = initial_cost - offset - 1;
lp->SetConstraintBounds(constraint_index, -glop::kInfinity, objective_bound);
++constraint_index;
// Add constraint for each new variable.
for (glop::ColIndex var_id(problem.num_variables());
var_id < 2 * problem.num_variables(); ++var_id) {
const glop::ColIndex lp_var_id(var_id - problem.num_variables());
const VariableIndex bop_var_id(lp_var_id.value());
const bool solution_value = initial_solution.Value(bop_var_id);
if (solution_value) {
lp->SetCoefficient(constraint_index, lp_var_id, 1);
lp->SetCoefficient(constraint_index, var_id, -1);
lp->SetConstraintBounds(constraint_index, 0, 0);
} else {
lp->SetCoefficient(constraint_index, lp_var_id, 1);
lp->SetCoefficient(constraint_index, var_id, 1);
lp->SetConstraintBounds(constraint_index, 1, 1);
}
++constraint_index;
}
// Objective
for (glop::ColIndex var_id(problem.num_variables());
var_id < 2 * problem.num_variables(); ++var_id) {
lp->SetObjectiveCoefficient(var_id, 1);
}
lp->SetMaximizationProblem(true);
}
void ComputeVariablesToFixFromToRelax(
const BopSolution& initial_solution,
const ITIVector<SparseIndex, BopConstraintTerm>& objective_terms,
const SparseBitset<VariableIndex>& to_relax, std::vector<sat::Literal>* output) {
output->clear();
for (BopConstraintTerm term : objective_terms) {
if (!to_relax[term.var_id] &&
((initial_solution.Value(term.var_id) && term.weight < 0) ||
(!initial_solution.Value(term.var_id) && term.weight > 0))) {
output->push_back(sat::Literal(sat::VariableIndex(term.var_id.value()),
initial_solution.Value(term.var_id)));
}
}
}
enum DifficultyType {
OK,
NOTHING_FIXED,
EVERYTHING_FIXED,
UNSAT,
};
DifficultyType ComputeVariablesToFixFromSat(
const std::vector<bool>& is_in_random_order,
const std::vector<sat::Literal>& random_order, double target_difficulty,
sat::SatSolver* sat_solver, std::vector<sat::Literal>* output) {
// We will use the sat_solver_ to fix some variables of 'random_order'
// as long as the number_of propagated variable in is_in_random_order is lower
// than what we want to relax.
sat_solver->Backtrack(0);
int num_relaxed_variables = random_order.size();
const int target_num_relaxed_variables =
round(target_difficulty * random_order.size());
if (target_num_relaxed_variables == 0) {
return DifficultyType::EVERYTHING_FIXED;
}
// Remove fixed variables from the picture.
for (int i = 0; i < sat_solver->LiteralTrail().Index(); ++i) {
if (is_in_random_order[sat_solver->LiteralTrail()[i].Variable().value()]) {
--num_relaxed_variables;
}
}
for (int i = 0; i < random_order.size(); ++i) {
if (sat_solver->Assignment().IsVariableAssigned(
random_order[i].Variable())) {
continue;
}
// Note that since we only enqueue variable from a feasible solution,
// they can only lead to a conflict because of the new objective
// constraint...
const int current_decision_level = sat_solver->CurrentDecisionLevel();
int first_trail_index =
sat_solver->EnqueueDecisionAndBackjumpOnConflict(random_order[i]);
if (first_trail_index == sat::kUnsatTrailIndex) {
return DifficultyType::UNSAT;
}
if (sat_solver->CurrentDecisionLevel() <= current_decision_level) {
// there was a conflict, continue.
// Note that we need to recompute num_relaxed_variables from scratch.
num_relaxed_variables = random_order.size();
first_trail_index = 0;
}
for (int index = first_trail_index;
index < sat_solver->LiteralTrail().Index(); ++index) {
if (is_in_random_order
[sat_solver->LiteralTrail()[index].Variable().value()]) {
--num_relaxed_variables;
}
}
if (num_relaxed_variables == target_num_relaxed_variables) break;
if (num_relaxed_variables < target_num_relaxed_variables) {
sat_solver->Backtrack(std::max(0, sat_solver->CurrentDecisionLevel() - 1));
break;
}
}
// When the SAT decision level is zero it means that we didn't fix any
// variables. This happens when the target_difficulty is too high.
if (sat_solver->CurrentDecisionLevel() == 0) {
sat_solver->Backtrack(0);
return DifficultyType::NOTHING_FIXED;
}
output->clear();
for (int i = 0; i < sat_solver->LiteralTrail().Index(); ++i) {
output->push_back(sat_solver->LiteralTrail()[i]);
}
sat_solver->Backtrack(0);
return DifficultyType::OK;
}
BopOptimizerBase::Status SolveLNSProblem(
const LinearBooleanProblem& problem, const BopSolution& initial_solution,
const BopParameters& bop_parameters,
const std::vector<sat::Literal>& fixed_variables, BopSolution* solution,
TimeLimit* time_limit, int* num_conflicts_used) {
CHECK(solution != nullptr);
CHECK(time_limit != nullptr);
const int64 initial_cost = initial_solution.GetCost();
sat::SatParameters sat_parameters;
sat_parameters.set_max_number_of_conflicts(
bop_parameters.max_number_of_conflicts_in_random_lns());
sat::SatSolver sat_solver;
sat_parameters.set_max_time_in_seconds(time_limit->GetTimeLeft());
sat_parameters.set_max_deterministic_time(
time_limit->GetDeterministicTimeLeft());
sat_solver.SetParameters(sat_parameters);
// Starts by adding the unit clauses to fix the variables.
sat_solver.SetNumVariables(problem.num_variables());
for (sat::Literal literal : fixed_variables) {
CHECK(sat_solver.AddUnitClause(literal));
}
// Then load the rest of the problem. Since we fixed variables from a
// feasible assignment, this should always be true.
if (!LoadBooleanProblem(problem, &sat_solver)) {
return BopOptimizerBase::INFEASIBLE;
}
UseObjectiveForSatAssignmentPreference(problem, &sat_solver);
// Add the objective constraint and solve the problem. Note that it is
// possible that the problem is detected to be unsat as soon as this
// constraint is added.
sat::SatSolver::Status sat_status = sat::SatSolver::MODEL_UNSAT;
if (AddObjectiveUpperBound(problem, sat::Coefficient(initial_cost) - 1,
&sat_solver)) {
sat_status = sat_solver.Solve();
}
if (sat_status == sat::SatSolver::MODEL_SAT) {
SatAssignmentToBopSolution(sat_solver.Assignment(), solution);
}
*num_conflicts_used = sat_solver.num_failures();
time_limit->AdvanceDeterministicTime(sat_solver.deterministic_time());
return sat_status == sat::SatSolver::MODEL_SAT
? BopOptimizerBase::SOLUTION_FOUND
: BopOptimizerBase::LIMIT_REACHED;
}
BopOptimizerBase::Status SolveLNSProblemWithSatPropagator(
const std::vector<sat::Literal>& fixed_variables, bool setup_already_done,
const BopParameters& bop_parameters, sat::SatSolver* sat_solver,
BopSolution* solution, TimeLimit* time_limit, int* num_conflicts_used) {
const sat::SatParameters old_parameters = sat_solver->parameters();
sat::SatParameters new_parameters = old_parameters;
new_parameters.set_max_number_of_conflicts(
bop_parameters.max_number_of_conflicts_in_random_lns());
new_parameters.set_max_time_in_seconds(time_limit->GetTimeLeft());
new_parameters.set_max_deterministic_time(
time_limit->GetDeterministicTimeLeft());
sat_solver->SetParameters(new_parameters);
if (!setup_already_done) {
// Adds the assumptions before the Solve().
sat_solver->Backtrack(0);
for (sat::Literal literal : fixed_variables) {
if (!sat_solver->Assignment().IsVariableAssigned(literal.Variable())) {
const int current_decision_level = sat_solver->CurrentDecisionLevel();
sat_solver->EnqueueDecisionAndBackjumpOnConflict(literal);
CHECK_GT(sat_solver->CurrentDecisionLevel(), current_decision_level);
}
}
}
// Solve it.
const int old_num_conflicts = sat_solver->num_failures();
sat_solver->SetAssumptionLevel(sat_solver->CurrentDecisionLevel());
sat::SatSolver::Status sat_status = sat_solver->Solve();
sat_solver->SetAssumptionLevel(0);
if (sat_status == sat::SatSolver::MODEL_SAT) {
SatAssignmentToBopSolution(sat_solver->Assignment(), solution);
}
*num_conflicts_used = sat_solver->num_failures() - old_num_conflicts;
sat_solver->SetParameters(old_parameters);
return sat_status == sat::SatSolver::MODEL_SAT
? BopOptimizerBase::SOLUTION_FOUND
: BopOptimizerBase::LIMIT_REACHED;
}
} // anonymous namespace.
//------------------------------------------------------------------------------
// SatPropagator
//------------------------------------------------------------------------------
SatPropagator::SatPropagator(const LinearBooleanProblem& problem)
: problem_(problem),
sat_solver_(),
problem_loaded_(false),
propagated_by_(problem.num_variables()) {}
bool SatPropagator::LoadBooleanProblem() {
if (problem_loaded_) return true;
problem_loaded_ = true;
if (!sat::LoadBooleanProblem(problem_, &sat_solver_)) {
return false;
}
UseObjectiveForSatAssignmentPreference(problem_, &sat_solver_);
return true;
}
bool SatPropagator::OverConstrainObjective(
const BopSolution& current_solution) {
if (sat_solver_.CurrentDecisionLevel() > 0) {
sat_solver_.Backtrack(0);
}
return AddObjectiveUpperBound(
problem_, sat::Coefficient(current_solution.GetCost() - 1), &sat_solver_);
}
void SatPropagator::AddPropagationRelation(sat::Literal decision_literal,
VariableIndex var_id) {
const std::vector<sat::Literal>::iterator& iter =
lower_bound(propagated_by_[var_id].begin(), propagated_by_[var_id].end(),
decision_literal);
if (iter == propagated_by_[var_id].end()) {
// Add the variable at the end as all variables are smaller.
propagated_by_[var_id].push_back(decision_literal);
} else if (iter->Index() != decision_literal.Index()) {
propagated_by_[var_id].insert(iter, decision_literal);
}
}
//------------------------------------------------------------------------------
// BopCompleteLNSOptimizer
//------------------------------------------------------------------------------
BopCompleteLNSOptimizer::BopCompleteLNSOptimizer(
const std::string& name, const BopConstraintTerms& objective_terms,
sat::SatSolver* sat_solver)
: BopOptimizerBase(name),
problem_(),
initial_solution_(),
sat_solver_(sat_solver),
objective_terms_(objective_terms) {}
BopCompleteLNSOptimizer::~BopCompleteLNSOptimizer() {}
BopOptimizerBase::Status BopCompleteLNSOptimizer::Synchronize(
const ProblemState& problem_state) {
problem_ = problem_state.original_problem();
initial_solution_.reset(new BopSolution(problem_state.solution()));
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status BopCompleteLNSOptimizer::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
SCOPED_TIME_STAT(&stats_);
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
LinearBooleanConstraint* constraint = problem_.add_constraints();
for (BopConstraintTerm term : objective_terms_) {
if (initial_solution_->Value(term.var_id) && term.weight < 0) {
constraint->add_literals(term.var_id.value() + 1);
constraint->add_coefficients(1);
} else if (!initial_solution_->Value(term.var_id) && term.weight > 0) {
constraint->add_literals(-(term.var_id.value() + 1));
constraint->add_coefficients(1);
}
}
constraint->set_lower_bound(constraint->literals_size() -
parameters.num_relaxed_vars());
// Relax all variables of the objective.
std::vector<sat::Literal> fixed_variables;
int num_conflicts_used = 0;
return SolveLNSProblem(problem_, *initial_solution_, parameters,
fixed_variables, &learned_info->solution, time_limit,
&num_conflicts_used);
}
//------------------------------------------------------------------------------
// BopRandomLNSOptimizer
//------------------------------------------------------------------------------
BopRandomLNSOptimizer::BopRandomLNSOptimizer(
const std::string& name, const BopConstraintTerms& objective_terms,
int random_seed, bool use_sat_to_choose_lns_neighbourhood,
SatPropagator* sat_propagator)
: BopOptimizerBase(name),
problem_(nullptr),
initial_solution_(),
use_sat_to_choose_lns_neighbourhood_(use_sat_to_choose_lns_neighbourhood),
sat_propagator_(sat_propagator),
objective_terms_(objective_terms),
random_(random_seed),
adaptive_difficulty_(),
to_relax_() {
CHECK(sat_propagator != nullptr);
}
BopRandomLNSOptimizer::~BopRandomLNSOptimizer() {}
BopOptimizerBase::Status BopRandomLNSOptimizer::Synchronize(
const ProblemState& problem_state) {
problem_ = &problem_state.original_problem();
initial_solution_.reset(new BopSolution(problem_state.solution()));
literals_.clear();
is_in_literals_.assign(initial_solution_->Size(), false);
for (BopConstraintTerm term : objective_terms_) {
if ((initial_solution_->Value(term.var_id) && term.weight < 0) ||
(!initial_solution_->Value(term.var_id) && term.weight > 0)) {
is_in_literals_[term.var_id.value()] = true;
literals_.push_back(sat::Literal(sat::VariableIndex(term.var_id.value()),
initial_solution_->Value(term.var_id)));
}
}
adaptive_difficulty_.Reset();
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status BopRandomLNSOptimizer::GenerateProblemUsingSat(
const BopSolution& initial_solution, double target_difficulty,
TimeLimit* time_limit, BopParameters* parameters, BopSolution* solution,
std::vector<sat::Literal>* fixed_variables) {
sat::SatSolver* sat_solver = sat_propagator_->sat_solver();
const DifficultyType type = ComputeVariablesToFixFromSat(
is_in_literals_, literals_, target_difficulty, sat_solver,
fixed_variables);
VLOG(1) << "num_decisions: " << sat_solver->CurrentDecisionLevel();
switch (type) {
case DifficultyType::NOTHING_FIXED:
// Our adaptative parameters strategy resulted in an LNS problem where
// nothing is fixed. Better to abort the LNS and use the full SAT solver
// with no conflicts limit.
LOG(INFO) << "Aborting LNS.";
return BopOptimizerBase::ABORT;
case DifficultyType::EVERYTHING_FIXED:
// We can't solve anything. In this case we double the base number of
// conflicts. This heuristic allows to solve "protfold.mps" to optimality.
if (adaptive_difficulty_.BoostLuby()) {
LOG(INFO) << "Aborting LNS (boost too high).";
return BopOptimizerBase::ABORT;
}
return BopOptimizerBase::CONTINUE;
case DifficultyType::UNSAT:
LOG(INFO) << "Aborting LNS (the current problem has been proved UNSAT).";
return BopOptimizerBase::INFEASIBLE;
case DifficultyType::OK:
// Since everything is already set-up, we try the sat_solver with
// a really low conflict limit. This allow to quickly skip over UNSAT
// cases without the costly new problem setup.
int num_conflicts_used = 0;
parameters->set_max_number_of_conflicts_in_random_lns(
parameters->max_number_of_conflicts_for_quick_check());
return SolveLNSProblemWithSatPropagator(
*fixed_variables, /*setup_already_done=*/true, *parameters,
sat_solver, solution, time_limit, &num_conflicts_used);
}
// Not supposed to be here as the switch is exhaustive.
return BopOptimizerBase::ABORT;
}
BopOptimizerBase::Status BopRandomLNSOptimizer::GenerateProblem(
const BopSolution& initial_solution, double target_difficulty,
TimeLimit* time_limit, std::vector<sat::Literal>* fixed_variables) {
const int target_parameter = round(target_difficulty * literals_.size());
if (target_parameter == literals_.size()) {
LOG(INFO) << "Aborting LNS.";
return BopOptimizerBase::ABORT;
} else if (target_parameter == 0) {
if (adaptive_difficulty_.BoostLuby()) {
LOG(INFO) << "Aborting LNS (boost too high).";
return BopOptimizerBase::ABORT;
}
return BopOptimizerBase::CONTINUE;
}
to_relax_.ClearAndResize(VariableIndex(initial_solution.Size()));
for (int i = 0; i < target_parameter; ++i) {
const VariableIndex var(literals_[i].Variable().value());
RelaxVariable(var, initial_solution);
}
ComputeVariablesToFixFromToRelax(initial_solution, objective_terms_,
to_relax_, fixed_variables);
return BopOptimizerBase::LIMIT_REACHED;
}
BopOptimizerBase::Status BopRandomLNSOptimizer::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
SCOPED_TIME_STAT(&stats_);
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
// For the SAT conflicts limit of each LNS, we follow a luby sequence times
// the base number of conflicts (num_conflicts_). Note that the numbers of the
// Luby sequence are always power of two.
//
// We dynamically change the size of the neighbourhood depending on the
// difficulty of the problem. There is one "target" difficulty for each
// different numbers in the Luby sequence. Note that the initial value is
// reused from the last run.
BopParameters local_parameters = parameters;
const int default_num_conflicts =
parameters.max_number_of_conflicts_in_random_lns();
sat::SatSolver* const sat_solver = sat_propagator_->sat_solver();
int num_tries = 0;
while (!time_limit->LimitReached() &&
num_tries < local_parameters.num_random_lns_tries()) {
++num_tries;
std::random_shuffle(literals_.begin(), literals_.end(), random_);
// Compute the target problem difficulty.
adaptive_difficulty_.UpdateLuby();
const double difficulty = adaptive_difficulty_.GetParameterValue();
std::vector<sat::Literal> fixed_variables;
const double initial_deterministic_time = sat_solver->deterministic_time();
BopOptimizerBase::Status status =
use_sat_to_choose_lns_neighbourhood_
? GenerateProblemUsingSat(*initial_solution_, difficulty,
time_limit, &local_parameters,
&learned_info->solution, &fixed_variables)
: GenerateProblem(*initial_solution_, difficulty, time_limit,
&fixed_variables);
time_limit->AdvanceDeterministicTime(sat_solver->deterministic_time() -
initial_deterministic_time);
if (status == BopOptimizerBase::CONTINUE) {
continue;
} else if (status == BopOptimizerBase::ABORT ||
status == BopOptimizerBase::INFEASIBLE) {
return status;
}
bool used_long_limit = false;
int num_conflicts_used = 0;
if (status == BopOptimizerBase::LIMIT_REACHED) {
used_long_limit = true;
const int new_num_conflicts =
adaptive_difficulty_.luby_value() * default_num_conflicts;
local_parameters.set_max_number_of_conflicts_in_random_lns(
new_num_conflicts);
status = SolveLNSProblem(*problem_, *initial_solution_, local_parameters,
fixed_variables, &learned_info->solution,
time_limit, &num_conflicts_used);
if (num_conflicts_used < new_num_conflicts / 2) {
adaptive_difficulty_.IncreaseParameter();
} else {
adaptive_difficulty_.DecreaseParameter();
}
}
VLOG(1) << num_tries << " size (" << literals_.size() << ", "
<< objective_terms_.size() << ", " << initial_solution_->Size()
<< ")"
<< " " << difficulty << " fixed (" << fixed_variables.size() << ") "
<< num_conflicts_used << (used_long_limit ? " *" : "");
if (status == BopOptimizerBase::SOLUTION_FOUND) {
return BopOptimizerBase::SOLUTION_FOUND;
}
}
return BopOptimizerBase::LIMIT_REACHED;
}
void BopRandomLNSOptimizer::RelaxVariable(VariableIndex var_id,
const BopSolution& solution) {
to_relax_.Set(var_id);
// Relax all variables that might set vars[i] which should be relaxed.
for (const sat::Literal propagator_literal :
sat_propagator_->propagated_by(var_id)) {
const VariableIndex propagator_var_id(
propagator_literal.Variable().value());
to_relax_.Set(propagator_var_id);
}
}
//------------------------------------------------------------------------------
// BopRandomConstraintLNSOptimizer
//------------------------------------------------------------------------------
BopRandomConstraintLNSOptimizer::BopRandomConstraintLNSOptimizer(
const std::string& name, const BopConstraintTerms& objective_terms,
int random_seed)
: BopOptimizerBase(name),
problem_(nullptr),
initial_solution_(),
objective_terms_(objective_terms),
random_(random_seed),
to_relax_() {}
BopRandomConstraintLNSOptimizer::~BopRandomConstraintLNSOptimizer() {}
BopOptimizerBase::Status BopRandomConstraintLNSOptimizer::Synchronize(
const ProblemState& problem_state) {
problem_ = &problem_state.original_problem();
initial_solution_.reset(new BopSolution(problem_state.solution()));
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status BopRandomConstraintLNSOptimizer::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
SCOPED_TIME_STAT(&stats_);
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
std::vector<int> ct_ids(problem_->constraints_size(), 0);
for (int ct_id = 0; ct_id < problem_->constraints_size(); ++ct_id) {
ct_ids[ct_id] = ct_id;
}
int num_tries = 0;
while (!time_limit->LimitReached() &&
num_tries < parameters.num_random_lns_tries()) {
std::random_shuffle(ct_ids.begin(), ct_ids.end(), random_);
++num_tries;
to_relax_.ClearAndResize(VariableIndex(initial_solution_->Size()));
for (int i = 0; i < ct_ids.size(); ++i) {
const LinearBooleanConstraint& constraint =
problem_->constraints(ct_ids[i]);
for (int j = 0; j < constraint.literals_size(); ++j) {
const VariableIndex var_id(constraint.literals(j) - 1);
to_relax_.Set(var_id);
}
// TODO(user): Use the auto-adaptative code of the RandomLNS instead of
// this hard-coded 10% logic.
const double kHardCodedTenPercent = 0.1;
if (to_relax_.PositionsSetAtLeastOnce().size() >
initial_solution_->Size() * kHardCodedTenPercent) {
break;
}
}
std::vector<sat::Literal> fixed_variables;
ComputeVariablesToFixFromToRelax(*initial_solution_, objective_terms_,
to_relax_, &fixed_variables);
int num_conflicts_used;
const BopOptimizerBase::Status status = SolveLNSProblem(
*problem_, *initial_solution_, parameters, fixed_variables,
&learned_info->solution, time_limit, &num_conflicts_used);
if (status == BopOptimizerBase::SOLUTION_FOUND) {
return BopOptimizerBase::SOLUTION_FOUND;
}
}
return BopOptimizerBase::LIMIT_REACHED;
}
} // namespace bop
} // namespace operations_research

147
src/bop/bop_lns.h Normal file
View File

@@ -0,0 +1,147 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BOP_BOP_LNS_H_
#define OR_TOOLS_BOP_BOP_LNS_H_
#include <string>
#include <vector>
#include "base/basictypes.h"
#include "base/integral_types.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/int_type_indexed_vector.h"
#include "base/int_type.h"
#include "bop/bop_base.h"
#include "bop/bop_parameters.pb.h"
#include "bop/bop_solution.h"
#include "bop/bop_types.h"
#include "bop/bop_util.h"
#include "glop/lp_solver.h"
#include "sat/boolean_problem.pb.h"
#include "sat/sat_solver.h"
#include "util/stats.h"
#include "util/time_limit.h"
namespace operations_research {
namespace bop {
// TODO(user): Remove this class.
class SatPropagator {
public:
explicit SatPropagator(const LinearBooleanProblem& problem);
bool LoadBooleanProblem();
bool OverConstrainObjective(const BopSolution& current_solution);
void AddPropagationRelation(sat::Literal decision_literal,
VariableIndex var_id);
sat::SatSolver* sat_solver() { return &sat_solver_; }
const std::vector<sat::Literal>& propagated_by(VariableIndex var_id) const {
return propagated_by_[var_id];
}
private:
const LinearBooleanProblem& problem_;
sat::SatSolver sat_solver_;
bool problem_loaded_;
ITIVector<VariableIndex, std::vector<sat::Literal> > propagated_by_;
};
class BopCompleteLNSOptimizer : public BopOptimizerBase {
public:
explicit BopCompleteLNSOptimizer(const std::string& name,
const BopConstraintTerms& objective_terms,
sat::SatSolver* sat_solver);
virtual ~BopCompleteLNSOptimizer();
protected:
virtual bool RunOncePerSolution() const { return true; }
virtual bool NeedAFeasibleSolution() const { return true; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
LinearBooleanProblem problem_;
std::unique_ptr<BopSolution> initial_solution_;
sat::SatSolver* const sat_solver_;
const BopConstraintTerms& objective_terms_;
};
class BopRandomLNSOptimizer : public BopOptimizerBase {
public:
explicit BopRandomLNSOptimizer(const std::string& name,
const BopConstraintTerms& objective_terms,
int random_seed,
bool use_sat_to_choose_lns_neighbourhood,
SatPropagator* sat_propagator);
virtual ~BopRandomLNSOptimizer();
private:
virtual bool RunOncePerSolution() const { return false; }
virtual bool NeedAFeasibleSolution() const { return true; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
Status GenerateProblemUsingSat(const BopSolution& initial_solution,
double target_difficulty,
TimeLimit* time_limit,
BopParameters* parameters,
BopSolution* solution,
std::vector<sat::Literal>* fixed_variables);
Status GenerateProblem(const BopSolution& initial_solution,
double target_difficulty, TimeLimit* time_limit,
std::vector<sat::Literal>* fixed_variables);
void RelaxVariable(VariableIndex var_id, const BopSolution& solution);
const LinearBooleanProblem* problem_;
std::unique_ptr<BopSolution> initial_solution_;
const bool use_sat_to_choose_lns_neighbourhood_;
SatPropagator* const sat_propagator_;
const BopConstraintTerms& objective_terms_;
MTRandom random_;
// RandomLNS parameter whose values are kept from one run to the next.
std::vector<sat::Literal> literals_;
std::vector<bool> is_in_literals_;
LubyAdaptiveParameterValue adaptive_difficulty_;
SparseBitset<VariableIndex> to_relax_;
};
class BopRandomConstraintLNSOptimizer : public BopOptimizerBase {
public:
explicit BopRandomConstraintLNSOptimizer(
const std::string& name, const BopConstraintTerms& objective_terms,
int random_seed);
virtual ~BopRandomConstraintLNSOptimizer();
private:
virtual bool RunOncePerSolution() const { return false; }
virtual bool NeedAFeasibleSolution() const { return true; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
const LinearBooleanProblem* problem_;
std::unique_ptr<BopSolution> initial_solution_;
const BopConstraintTerms& objective_terms_;
MTRandom random_;
SparseBitset<VariableIndex> to_relax_;
};
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_BOP_LNS_H_

797
src/bop/bop_ls.cc Normal file
View File

@@ -0,0 +1,797 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/bop_ls.h"
#include "bop/bop_util.h"
#include "sat/boolean_problem.h"
namespace operations_research {
namespace bop {
//------------------------------------------------------------------------------
// LocalSearchOptimizer
//------------------------------------------------------------------------------
LocalSearchOptimizer::LocalSearchOptimizer(const std::string& name,
int max_num_decisions)
: BopOptimizerBase(name),
max_num_decisions_(max_num_decisions),
assignment_iterator_() {}
LocalSearchOptimizer::~LocalSearchOptimizer() {}
BopOptimizerBase::Status LocalSearchOptimizer::Synchronize(
const ProblemState& problem_state) {
if (assignment_iterator_ == nullptr) {
assignment_iterator_.reset(
new LocalSearchAssignmentIterator(problem_state, max_num_decisions_));
} else {
assignment_iterator_->Synchronize(problem_state);
}
assignment_iterator_->OverConstrainObjective(
problem_state.solution().GetCost(), 1);
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status LocalSearchOptimizer::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
double prev_deterministic_time = assignment_iterator_->deterministic_time();
assignment_iterator_->UseTranspositionTable(
parameters.use_transposition_table_in_ls());
int64 num_assignments_to_explore =
parameters.max_number_of_explored_assignments_per_try_in_ls();
while (!time_limit->LimitReached() && num_assignments_to_explore > 0 &&
assignment_iterator_->NextAssignment()) {
time_limit->AdvanceDeterministicTime(
assignment_iterator_->deterministic_time() - prev_deterministic_time);
prev_deterministic_time = assignment_iterator_->deterministic_time();
--num_assignments_to_explore;
}
assignment_iterator_->ExtractLearnedInfo(learned_info);
if (assignment_iterator_->AssignmentIsFeasible()) {
CHECK(assignment_iterator_->Assignment().IsFeasible());
learned_info->solution = assignment_iterator_->Assignment();
return BopOptimizerBase::SOLUTION_FOUND;
}
if (time_limit->LimitReached()) {
// The time limit is reached without finding a solution.
return BopOptimizerBase::LIMIT_REACHED;
}
if (num_assignments_to_explore <= 0) {
// Explore the remaining assignments in a future call to Optimize().
return BopOptimizerBase::CONTINUE;
}
// All assignments reachable in max_num_decisions_ or less have been explored,
// don't call optimize() with the same initial solution again.
return BopOptimizerBase::ABORT;
}
//------------------------------------------------------------------------------
// BacktrackableIntegerSet
//------------------------------------------------------------------------------
template <typename IntType>
void BacktrackableIntegerSet<IntType>::ClearAndResize(IntType n) {
size_ = 0;
saved_sizes_.clear();
saved_stack_sizes_.clear();
stack_.clear();
in_stack_.assign(n.value(), false);
}
template <typename IntType>
void BacktrackableIntegerSet<IntType>::ChangeState(IntType i,
bool should_be_inside) {
size_ += should_be_inside ? 1 : -1;
if (!in_stack_[i.value()]) {
in_stack_[i.value()] = true;
stack_.push_back(i);
}
}
template <typename IntType>
void BacktrackableIntegerSet<IntType>::AddBacktrackingLevel() {
saved_stack_sizes_.push_back(stack_.size());
saved_sizes_.push_back(size_);
}
template <typename IntType>
void BacktrackableIntegerSet<IntType>::BacktrackOneLevel() {
if (saved_stack_sizes_.empty()) {
BacktrackAll();
} else {
for (int i = saved_stack_sizes_.back(); i < stack_.size(); ++i) {
in_stack_[stack_[i].value()] = false;
}
stack_.resize(saved_stack_sizes_.back());
saved_stack_sizes_.pop_back();
size_ = saved_sizes_.back();
saved_sizes_.pop_back();
}
}
template <typename IntType>
void BacktrackableIntegerSet<IntType>::BacktrackAll() {
for (int i = 0; i < stack_.size(); ++i) {
in_stack_[stack_[i].value()] = false;
}
stack_.clear();
saved_stack_sizes_.clear();
size_ = 0;
saved_sizes_.clear();
}
// Explicit instantiation of BacktrackableIntegerSet.
// TODO(user): move the code in a separate .h and -inl.h to avoid this.
template class BacktrackableIntegerSet<ConstraintIndex>;
//------------------------------------------------------------------------------
// AssignmentAndConstraintFeasibilityMaintainer
//------------------------------------------------------------------------------
AssignmentAndConstraintFeasibilityMaintainer::
AssignmentAndConstraintFeasibilityMaintainer(
const LinearBooleanProblem& problem)
: by_variable_matrix_(problem.num_variables()),
constraint_lower_bounds_(),
constraint_upper_bounds_(),
reference_solution_(problem, "ReferenceSolution"),
assignment_(problem, "Assignment"),
is_assigned_(problem.num_variables(), false),
constraint_values_(),
literals_applied_stack_() {
// Add the objective constraint as the first constraint.
ConstraintIndex num_constraints(0);
const LinearObjective& objective = problem.objective();
CHECK_EQ(objective.literals_size(), objective.coefficients_size());
for (int i = 0; i < objective.literals_size(); ++i) {
CHECK_GT(objective.literals(i), 0);
CHECK_NE(objective.coefficients(i), 0);
const VariableIndex var(objective.literals(i) - 1);
const int64 weight = objective.coefficients(i);
by_variable_matrix_[var].push_back(
ConstraintEntry(num_constraints, weight));
}
constraint_lower_bounds_.push_back(kint64min);
constraint_values_.push_back(0);
constraint_upper_bounds_.push_back(kint64max);
// Add each constraint.
for (const LinearBooleanConstraint& constraint : problem.constraints()) {
if (constraint.literals_size() <= 2) {
// Infeasible binary constraints are automatically repaired by propagation
// (when possible). Then there are no needs to consider the binary
// constraints here, the propagation is delegated to the SAT propagator.
continue;
}
++num_constraints;
CHECK_EQ(constraint.literals_size(), constraint.coefficients_size());
for (int i = 0; i < constraint.literals_size(); ++i) {
const VariableIndex var(constraint.literals(i) - 1);
const int64 weight = constraint.coefficients(i);
by_variable_matrix_[var].push_back(
ConstraintEntry(num_constraints, weight));
}
constraint_lower_bounds_.push_back(
constraint.has_lower_bound() ? constraint.lower_bound() : kint64min);
constraint_values_.push_back(0);
constraint_upper_bounds_.push_back(
constraint.has_upper_bound() ? constraint.upper_bound() : kint64max);
}
// Initialize infeasible_constraint_set_;
infeasible_constraint_set_.ClearAndResize(
ConstraintIndex(constraint_values_.size()));
CHECK_EQ(constraint_values_.size(), constraint_lower_bounds_.size());
CHECK_EQ(constraint_values_.size(), constraint_upper_bounds_.size());
}
const ConstraintIndex
AssignmentAndConstraintFeasibilityMaintainer::kObjectiveConstraint(0);
void AssignmentAndConstraintFeasibilityMaintainer::SetReferenceSolution(
const BopSolution& reference_solution) {
CHECK(reference_solution.IsFeasible());
infeasible_constraint_set_.BacktrackAll();
reference_solution_ = reference_solution;
assignment_ = reference_solution;
is_assigned_.assign(reference_solution_.Size(), false);
literals_applied_stack_.clear();
AddBacktrackingLevel(); // To handle initial propagation.
// Recompute the value of all constraints.
constraint_values_.assign(NumConstraints(), 0);
for (VariableIndex var(0); var < reference_solution_.Size(); ++var) {
if (reference_solution_.Value(var)) {
for (const ConstraintEntry& entry : by_variable_matrix_[var]) {
constraint_values_[entry.constraint] += entry.weight;
}
}
}
// Update the objective constraint upper bound.
constraint_upper_bounds_[kObjectiveConstraint] = reference_solution.GetCost();
}
void AssignmentAndConstraintFeasibilityMaintainer::OverConstrainObjective(
int64 delta) {
constraint_upper_bounds_[kObjectiveConstraint] -= delta;
infeasible_constraint_set_.BacktrackAll();
infeasible_constraint_set_.ChangeState(ConstraintIndex(0), true);
CHECK(!ConstraintIsFeasible(ConstraintIndex(0)));
if (DEBUG_MODE) {
for (ConstraintIndex ct(1); ct < NumConstraints(); ++ct) {
CHECK(ConstraintIsFeasible(ct));
}
}
}
void AssignmentAndConstraintFeasibilityMaintainer::Assign(
const std::vector<sat::Literal>& literals) {
// Add all literals at the current backtrack level.
literals_applied_stack_.back().insert(literals_applied_stack_.back().end(),
literals.begin(), literals.end());
// Apply each literal.
for (const sat::Literal& literal : literals) {
const VariableIndex var(literal.Variable().value());
const bool value = literal.IsPositive();
CHECK(!is_assigned_[var]);
is_assigned_[var] = true;
if (reference_solution_.Value(var) != value) {
assignment_.SetValue(var, value);
for (const ConstraintEntry& entry : by_variable_matrix_[var]) {
const bool was_feasible = ConstraintIsFeasible(entry.constraint);
constraint_values_[entry.constraint] +=
value ? entry.weight : -entry.weight;
if (ConstraintIsFeasible(entry.constraint) != was_feasible) {
infeasible_constraint_set_.ChangeState(entry.constraint,
was_feasible);
}
}
}
}
}
void AssignmentAndConstraintFeasibilityMaintainer::AddBacktrackingLevel() {
literals_applied_stack_.push_back({});
infeasible_constraint_set_.AddBacktrackingLevel();
}
void AssignmentAndConstraintFeasibilityMaintainer::BacktrackOneLevel() {
// Backtrack each literal of the last level.
for (const sat::Literal& literal : literals_applied_stack_.back()) {
const VariableIndex var(literal.Variable().value());
const bool ref_value = reference_solution_.Value(var);
CHECK(is_assigned_[var]);
is_assigned_[var] = false;
if (assignment_.Value(var) != ref_value) {
assignment_.SetValue(var, ref_value);
for (const ConstraintEntry& entry : by_variable_matrix_[var]) {
constraint_values_[entry.constraint] +=
ref_value ? entry.weight : -entry.weight;
}
}
}
literals_applied_stack_.pop_back();
infeasible_constraint_set_.BacktrackOneLevel();
}
std::string AssignmentAndConstraintFeasibilityMaintainer::DebugString() const {
std::string str;
str += "ref: ";
for (bool value : reference_solution_) {
str += value ? " 1 " : " 0 ";
}
str += "\ncurr: ";
for (bool value : assignment_) {
str += value ? " 1 " : " 0 ";
}
str += "\n ";
for (const bool is_assigned : is_assigned_) {
str += is_assigned ? " 1 " : " 0 ";
}
str += "\nApplied Literals: ";
for (std::vector<sat::Literal> literals : literals_applied_stack_) {
str += "\n ";
for (sat::Literal literal : literals) {
str += " " + literal.DebugString();
}
}
str += "\nmin curr max\n";
for (ConstraintIndex ct(0); ct < constraint_values_.size(); ++ct) {
if (constraint_lower_bounds_[ct] == kint64min) {
str += StringPrintf("- %lld %lld\n", constraint_values_[ct],
constraint_upper_bounds_[ct]);
} else {
str += StringPrintf("%lld %lld %lld\n", constraint_lower_bounds_[ct],
constraint_values_[ct], constraint_upper_bounds_[ct]);
}
}
return str;
}
//------------------------------------------------------------------------------
// OneFlipConstraintRepairer
//------------------------------------------------------------------------------
OneFlipConstraintRepairer::OneFlipConstraintRepairer(
const LinearBooleanProblem& problem,
const AssignmentAndConstraintFeasibilityMaintainer& maintainer)
: by_constraint_matrix_(problem.constraints_size() + 1),
maintainer_(maintainer) {
// Add the objective constraint as the first constraint.
ConstraintIndex num_constraint(0);
const LinearObjective& objective = problem.objective();
CHECK_EQ(objective.literals_size(), objective.coefficients_size());
for (int i = 0; i < objective.literals_size(); ++i) {
CHECK_GT(objective.literals(i), 0);
CHECK_NE(objective.coefficients(i), 0);
const VariableIndex var(objective.literals(i) - 1);
const int64 weight = objective.coefficients(i);
by_constraint_matrix_[num_constraint].push_back(
ConstraintTerm(var, weight));
}
// Add each constraint.
for (const LinearBooleanConstraint& constraint : problem.constraints()) {
if (constraint.literals_size() <= 2) {
// Infeasible binary constraints are automatically repaired by propagation
// (when possible). Then there are no needs to consider the binary
// constraints here, the propagation is delegated to the SAT propagator.
continue;
}
++num_constraint;
CHECK_EQ(constraint.literals_size(), constraint.coefficients_size());
for (int i = 0; i < constraint.literals_size(); ++i) {
const VariableIndex var(constraint.literals(i) - 1);
const int64 weight = constraint.coefficients(i);
by_constraint_matrix_[num_constraint].push_back(
ConstraintTerm(var, weight));
}
}
SortTerms(problem.num_variables());
}
const ConstraintIndex OneFlipConstraintRepairer::kInvalidConstraint(-1);
const TermIndex OneFlipConstraintRepairer::kInitTerm(-1);
const TermIndex OneFlipConstraintRepairer::kInvalidTerm(-2);
ConstraintIndex OneFlipConstraintRepairer::ConstraintToRepair() const {
ConstraintIndex selected_ct = kInvalidConstraint;
int32 selected_num_branches = kint32max;
for (const ConstraintIndex& ct :
maintainer_.PossiblyInfeasibleConstraints()) {
const int64 constraint_value = maintainer_.ConstraintValue(ct);
if (maintainer_.ConstraintIsFeasible(ct)) continue;
int32 num_branches = 0;
for (const ConstraintTerm& term : by_constraint_matrix_[ct]) {
if (maintainer_.IsAssigned(term.var)) {
continue;
}
const bool value = maintainer_.Assignment(term.var);
const int64 new_sum =
constraint_value + (value ? -term.weight : term.weight);
if (new_sum >= maintainer_.ConstraintLowerBound(ct) &&
new_sum <= maintainer_.ConstraintUpperBound(ct)) {
++num_branches;
}
}
if (num_branches == 0) {
// The constraint can't be repaired in one decision.
continue;
}
if (selected_num_branches > num_branches) {
selected_ct = ct;
selected_num_branches = num_branches;
}
}
return selected_ct;
}
TermIndex OneFlipConstraintRepairer::NextRepairingTerm(
ConstraintIndex constraint, TermIndex init_term_index,
TermIndex start_term_index) const {
const ITIVector<TermIndex, ConstraintTerm>& terms =
by_constraint_matrix_[constraint];
const int64 constraint_value = maintainer_.ConstraintValue(constraint);
const TermIndex end_term_index(terms.size() + init_term_index + 1);
for (TermIndex loop_term_index(
start_term_index + 1 +
(start_term_index < init_term_index ? terms.size() : 0));
loop_term_index < end_term_index; ++loop_term_index) {
const TermIndex term_index(loop_term_index % terms.size());
const ConstraintTerm term = terms[term_index];
if (maintainer_.IsAssigned(term.var)) {
continue;
}
const bool value = maintainer_.Assignment(term.var);
const int64 new_sum =
constraint_value + (value ? -term.weight : term.weight);
if (new_sum >= maintainer_.ConstraintLowerBound(constraint) &&
new_sum <= maintainer_.ConstraintUpperBound(constraint)) {
return term_index;
}
}
return kInvalidTerm;
}
sat::Literal OneFlipConstraintRepairer::Literal(ConstraintIndex constraint,
TermIndex term_index) const {
const ConstraintTerm term = by_constraint_matrix_[constraint][term_index];
const bool value = maintainer_.Assignment(term.var);
// Return the literal of the flipped value.
return sat::Literal(sat::VariableIndex(term.var.value()), !value);
}
namespace {
struct CompareConstraintTermByDecreasingImpactOnObjectiveCost {
explicit CompareConstraintTermByDecreasingImpactOnObjectiveCost(
const ITIVector<VariableIndex, int64>& o)
: objective(o) {}
bool operator()(const OneFlipConstraintRepairer::ConstraintTerm& a,
const OneFlipConstraintRepairer::ConstraintTerm& b) const {
return objective[a.var] > objective[b.var];
}
const ITIVector<VariableIndex, int64>& objective;
};
} // anonymous namespace
void OneFlipConstraintRepairer::SortTerms(int num_variables) {
ITIVector<VariableIndex, int64> objective(num_variables, 0);
const ConstraintIndex kObjectiveConstraint(0);
for (const ConstraintTerm& term :
by_constraint_matrix_[kObjectiveConstraint]) {
objective[term.var] = term.weight;
}
CompareConstraintTermByDecreasingImpactOnObjectiveCost compare_object(
objective);
for (ITIVector<TermIndex, ConstraintTerm>& terms : by_constraint_matrix_) {
std::sort(terms.begin(), terms.end(), compare_object);
}
}
//------------------------------------------------------------------------------
// SatWrapper
//------------------------------------------------------------------------------
SatWrapper::SatWrapper(const ProblemState& problem_state)
: problem_(problem_state.original_problem()), sat_solver_(), unsat_(false) {
unsat_ = !LoadStateProblemToSatSolver(problem_state, &sat_solver_);
if (!unsat_) {
UseObjectiveForSatAssignmentPreference(problem_state.original_problem(),
&sat_solver_);
}
}
void SatWrapper::Synchronize(const ProblemState& problem_state) {
sat_solver_.Backtrack(0);
unsat_ = !LoadStateProblemToSatSolver(problem_state, &sat_solver_);
}
std::vector<sat::Literal> SatWrapper::OverConstrainObjective(int64 reference_cost,
int64 delta) {
// Backtrack to the root node.
sat_solver_.Backtrack(0);
unsat_ =
!AddObjectiveUpperBound(
problem_, sat::Coefficient(reference_cost - delta), &sat_solver_);
// Return the list of propagated literals at the root node.
std::vector<sat::Literal> propagated_literals;
const sat::Trail& propagation_trail = sat_solver_.LiteralTrail();
for (int trail_index = 0; trail_index < propagation_trail.Index();
++trail_index) {
propagated_literals.push_back(propagation_trail[trail_index]);
}
return propagated_literals;
}
int SatWrapper::ApplyDecision(sat::Literal decision_literal,
std::vector<sat::Literal>* propagated_literals) {
CHECK(!sat_solver_.Assignment().IsVariableAssigned(
decision_literal.Variable()));
CHECK(propagated_literals != nullptr);
propagated_literals->clear();
const int old_decision_level = sat_solver_.CurrentDecisionLevel();
const int new_trail_index =
sat_solver_.EnqueueDecisionAndBackjumpOnConflict(decision_literal);
unsat_ = unsat_ || new_trail_index == sat::kUnsatTrailIndex;
if (unsat_) {
return old_decision_level + 1;
}
// Return the propagated literals, whenever there is a conflict or not.
// In case of conflict, these literals will have to be added to the last
// decision point after backtrack.
const sat::Trail& propagation_trail = sat_solver_.LiteralTrail();
for (int trail_index = new_trail_index;
trail_index < propagation_trail.Index(); ++trail_index) {
propagated_literals->push_back(propagation_trail[trail_index]);
}
return old_decision_level + 1 - sat_solver_.CurrentDecisionLevel();
}
void SatWrapper::BacktrackOneLevel() {
const int old_decision_level = sat_solver_.CurrentDecisionLevel();
if (old_decision_level > 0) {
sat_solver_.Backtrack(old_decision_level - 1);
}
}
void SatWrapper::ExtractLearnedInfo(LearnedInfo* info) {
bop::ExtractLearnedInfoFromSatSolver(&sat_solver_, info);
}
double SatWrapper::deterministic_time() const {
return sat_solver_.deterministic_time();
}
//------------------------------------------------------------------------------
// LocalSearchAssignmentIterator
//------------------------------------------------------------------------------
LocalSearchAssignmentIterator::LocalSearchAssignmentIterator(
const ProblemState& problem_state, int max_num_decisions)
: max_num_decisions_(max_num_decisions),
maintainer_(problem_state.original_problem()),
sat_wrapper_(problem_state),
repairer_(problem_state.original_problem(), maintainer_),
search_nodes_(),
initial_term_index_(
problem_state.original_problem().constraints_size() + 1,
OneFlipConstraintRepairer::kInitTerm),
num_nodes_(0),
num_skipped_nodes_(0) {
maintainer_.SetReferenceSolution(problem_state.solution());
}
void LocalSearchAssignmentIterator::Synchronize(
const ProblemState& problem_state) {
maintainer_.SetReferenceSolution(problem_state.solution());
sat_wrapper_.Synchronize(problem_state);
for (const SearchNode& node : search_nodes_) {
initial_term_index_[node.constraint] = node.term_index;
}
search_nodes_.clear();
transposition_table_.clear();
num_nodes_ = 0;
num_skipped_nodes_ = 0;
}
void LocalSearchAssignmentIterator::OverConstrainObjective(int64 reference_cost,
int64 delta) {
const std::vector<sat::Literal> propagated_literals =
sat_wrapper_.OverConstrainObjective(reference_cost, delta);
maintainer_.OverConstrainObjective(delta);
maintainer_.Assign(propagated_literals);
}
bool LocalSearchAssignmentIterator::NextAssignment() {
if (AssignmentIsFeasible() || sat_wrapper_.unsat()) {
return false;
}
// If possible, go deeper, i.e. take one more decision.
if (!GoDeeper()) {
// If not, backtrack to the first node that still has untried way to fix
// its associated constraint. Update it to the next untried way.
Backtrack();
}
// All nodes have been explored.
if (search_nodes_.empty()) {
LOG(INFO) << std::string(25, ' ') + "LS finished."
<< " #explored:" << num_nodes_
<< " #stored:" << transposition_table_.size()
<< " #skipped:" << num_skipped_nodes_;
return false;
}
// Apply the next decision, i.e. the literal of the flipped variable.
const SearchNode last_node = search_nodes_.back();
const sat::Literal literal =
repairer_.Literal(last_node.constraint, last_node.term_index);
ApplyDecision(literal);
return !AssignmentIsFeasible();
}
void LocalSearchAssignmentIterator::ExtractLearnedInfo(LearnedInfo* info) {
sat_wrapper_.ExtractLearnedInfo(info);
}
// TODO(user): The 1.2 multiplier is an approximation only based on the time
// spent in the SAT wrapper. So far experiments show a good
// correlation with real time, but we might want to be more
// accurate.
double LocalSearchAssignmentIterator::deterministic_time() const {
return sat_wrapper_.deterministic_time() * 1.2;
}
std::string LocalSearchAssignmentIterator::DebugString() const {
std::string str = "Search nodes:\n";
for (int i = 0; i < search_nodes_.size(); ++i) {
str +=
StringPrintf(" %d: %d %d\n", i, search_nodes_[i].constraint.value(),
search_nodes_[i].term_index.value());
}
return str;
}
void LocalSearchAssignmentIterator::ApplyDecision(sat::Literal literal) {
++num_nodes_;
std::vector<sat::Literal> propagated_literals;
const int num_backtracks =
sat_wrapper_.ApplyDecision(literal, &propagated_literals);
// Sync the maintainer with SAT.
if (num_backtracks == 0) {
maintainer_.AddBacktrackingLevel();
maintainer_.Assign(propagated_literals);
} else {
CHECK_GT(num_backtracks, 0);
CHECK_LE(num_backtracks, search_nodes_.size());
// Only backtrack -1 decisions as the last one has not been pushed yet.
for (int i = 0; i < num_backtracks - 1; ++i) {
maintainer_.BacktrackOneLevel();
}
maintainer_.Assign(propagated_literals);
search_nodes_.resize(search_nodes_.size() - num_backtracks);
}
}
void LocalSearchAssignmentIterator::InitializeTranspostionTableKey(
std::array<int32, kStoredMaxDecisions>* a) {
int i = 0;
for (const SearchNode& n : search_nodes_) {
// Negated because we already fixed it to its opposite value!
(*a)[i] = -repairer_.Literal(n.constraint, n.term_index).SignedValue();
++i;
}
// 'a' is not zero-initialized, so we need to complete it with zeros.
while (i < kStoredMaxDecisions) {
(*a)[i] = 0;
++i;
}
}
bool LocalSearchAssignmentIterator::NewStateIsInTranspositionTable(
sat::Literal l) {
if (search_nodes_.size() + 1 > kStoredMaxDecisions) return false;
// Fill the transposition table element, i.e the array 'a' of decisions.
std::array<int32, kStoredMaxDecisions> a;
InitializeTranspostionTableKey(&a);
a[search_nodes_.size()] = l.SignedValue();
std::sort(a.begin(), a.begin() + 1 + search_nodes_.size());
if (transposition_table_.find(a) == transposition_table_.end()) {
return false;
} else {
++num_skipped_nodes_;
return true;
}
}
void LocalSearchAssignmentIterator::InsertInTranspositionTable() {
// If there is more decision that kStoredMaxDecisions, do nothing.
if (search_nodes_.size() > kStoredMaxDecisions) return;
// Fill the transposition table element, i.e the array 'a' of decisions.
std::array<int32, kStoredMaxDecisions> a;
InitializeTranspostionTableKey(&a);
std::sort(a.begin(), a.begin() + search_nodes_.size());
transposition_table_.insert(a);
}
bool LocalSearchAssignmentIterator::EnqueueNextRepairingTermIfAny(
ConstraintIndex ct_to_repair, TermIndex term_index) {
if (term_index == initial_term_index_[ct_to_repair]) return false;
if (term_index == OneFlipConstraintRepairer::kInvalidTerm) {
term_index = initial_term_index_[ct_to_repair];
}
while (true) {
term_index = repairer_.NextRepairingTerm(
ct_to_repair, initial_term_index_[ct_to_repair], term_index);
if (term_index == OneFlipConstraintRepairer::kInvalidTerm) return false;
if (!use_transposition_table_ ||
!NewStateIsInTranspositionTable(
repairer_.Literal(ct_to_repair, term_index))) {
search_nodes_.push_back(SearchNode(ct_to_repair, term_index));
return true;
}
if (term_index == initial_term_index_[ct_to_repair]) return false;
}
}
bool LocalSearchAssignmentIterator::GoDeeper() {
// Can we add one more decision?
if (search_nodes_.size() >= max_num_decisions_) {
return false;
}
// Can we find a constraint that can be repaired in one decision?
const ConstraintIndex ct_to_repair = repairer_.ConstraintToRepair();
if (ct_to_repair == OneFlipConstraintRepairer::kInvalidConstraint) {
return false;
}
// Add the new decision.
//
// TODO(user): Store the last explored term index to not start from -1 each
// time. This will be very useful when a backtrack occured due to the SAT
// propagator. Note however that this behavior is already enforced when we use
// the transposition table, since we will not explore again the branches
// already explored.
return EnqueueNextRepairingTermIfAny(ct_to_repair,
OneFlipConstraintRepairer::kInvalidTerm);
}
void LocalSearchAssignmentIterator::Backtrack() {
while (!search_nodes_.empty()) {
// We finished exploring this node. Store it in the transposition table so
// that the same decisions will not be explored again. Note that the SAT
// solver may have learned more the second time the exact same decisions are
// seen, but we assume that it is not worth exploring again.
if (use_transposition_table_) InsertInTranspositionTable();
const SearchNode last_node = search_nodes_.back();
search_nodes_.pop_back();
maintainer_.BacktrackOneLevel();
sat_wrapper_.BacktrackOneLevel();
if (EnqueueNextRepairingTermIfAny(last_node.constraint,
last_node.term_index)) {
return;
}
}
}
} // namespace bop
} // namespace operations_research

525
src/bop/bop_ls.h Normal file
View File

@@ -0,0 +1,525 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file defines the needed classes to efficiently perform Local Search in
// Bop.
// Local Search is a technique used to locally improve an existing solution by
// flipping a limited number of variables. To be successful the produced
// solution have to satisfy all constraints of the problem and improve the
// objective cost.
//
// The class BopLocalSearchOptimizer is the only public interface for Local
// Search in Bop. For unit-testing purpose this file also contains the four
// internal classes AssignmentAndConstraintFeasibilityMaintainer,
// OneFlipConstraintRepairer, SatWrapper and LocalSearchAssignmentIterator.
// They are implementation details and should not be used outside of bop_ls.
#ifndef OR_TOOLS_BOP_BOP_LS_H_
#define OR_TOOLS_BOP_BOP_LS_H_
#include "base/hash.h"
#include "base/hash.h"
#include "bop/bop_base.h"
#include "bop/bop_solution.h"
#include "bop/bop_types.h"
#include "sat/boolean_problem.pb.h"
#include "sat/sat_solver.h"
namespace operations_research {
namespace bop {
// Forward declaration.
class LocalSearchAssignmentIterator;
// This class defines a Local Search optimizer. The goal is to find a new
// solution with a better cost than the given solution by iterating on all
// assignments that can be reached in max_num_decisions decisions or less.
// The bop parameter max_number_of_explored_assignments_per_try_in_ls can be
// used to specify the number of new assignments to iterate on each time the
// method Optimize() is called. Limiting that parameter allows to reduce the
// time spent in the Optimize() method at once, and still explore all the
// reachable assignments (if Optimize() is called enough times).
// Note that due to propagation, the number of variables with a different value
// in the new solution can be greater than max_num_decisions.
class LocalSearchOptimizer : public BopOptimizerBase {
public:
explicit LocalSearchOptimizer(const std::string& name, int max_num_decisions);
virtual ~LocalSearchOptimizer();
protected:
virtual bool RunOncePerSolution() const { return false; }
virtual bool NeedAFeasibleSolution() const { return true; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
// Maximum number of decisions the Local Search can take.
// Note that there are no limit on the number of changed variables due to
// propagation.
const int max_num_decisions_;
// Iterator on all reachable assignments.
// Note that this iterator is only reseted when Synchronize() is called, i.e.
// the iterator continue its iteration of the next assignments each time
// Optimize() is called until everything is explored or a solution is found.
std::unique_ptr<LocalSearchAssignmentIterator> assignment_iterator_;
};
//------------------------------------------------------------------------------
// Implementation details. The declarations of those utility classes are in
// the .h for testing reasons.
//------------------------------------------------------------------------------
// Maintains some information on a sparse set of integers in [0, n). More
// specifically this class:
// - Allows to dynamically add/remove element from the set.
// - Has a backtracking support.
// - Maintains the number of elements in the set.
// - Maintains a superset of the elements of the set that contains all the
// modified elements.
template <typename IntType>
class BacktrackableIntegerSet {
public:
BacktrackableIntegerSet() {}
// Prepares the class for integers in [0, n) and initializes the set to the
// empty one. Note that this run in O(n). Once resized, it is better to call
// BacktrackAll() instead of this to clear the set.
void ClearAndResize(IntType n);
// Changes the state of the given integer i to be either inside or outside the
// set. Important: this should only be called with the opposite state of the
// current one, otherwise size() will not be correct.
void ChangeState(IntType i, bool should_be_inside);
// Returns the current number of elements in the set.
// Note that this is not its maximum size n.
int size() const { return size_; }
// Returns a superset of the current set of integers.
const std::vector<IntType>& Superset() const { return stack_; }
// BacktrackOneLevel() backtracks to the state the class was in when the
// last AddBacktrackingLevel() was called. BacktrackAll() just restore the
// class to its state just after the last ClearAndResize().
void AddBacktrackingLevel();
void BacktrackOneLevel();
void BacktrackAll();
private:
int size_;
// Contains the elements whose status has been changed at least once.
std::vector<IntType> stack_;
std::vector<bool> in_stack_;
// Used for backtracking. Contains the size_ and the stack_.size() at the time
// of each call to AddBacktrackingLevel() that is not yet backtracked over.
std::vector<int> saved_sizes_;
std::vector<int> saved_stack_sizes_;
};
// This class is used to incrementally maintain an assignment and the
// feasibility of the constraints of a given LinearBooleanProblem.
//
// The current assignment is initialized using a feasible reference solution,
// i.e. the reference solution satisfies all the constraints of the problem.
// The current assignment is updated using the Assign() method.
//
// Note that the current assignment is not a solution in the sense that it
// might not be feasible, ie. violates some constraints.
//
// The assignment can be accessed at any time using Assignment().
// The set of infeasible constraints can be accessed at any time using
// InfeasibleConstraints().
//
// Note that this class is reversible, i.e. it is possible to backtrack to
// previously added backtracking levels.
// levels. Consider for instance variable a, b, c, and d.
// Method called Assigned after method call
// 1- Assign({a, b}) a b
// 2- AddBacktrackingLevel() a b |
// 3- Assign({c}) a b | c
// 4- Assign({d}) a b | c d
// 5- BacktrackOneLevel() a b
// 6- Assign({c}) a b c
// 7- BacktrackOneLevel()
class AssignmentAndConstraintFeasibilityMaintainer {
public:
AssignmentAndConstraintFeasibilityMaintainer(
const LinearBooleanProblem& problem);
static const ConstraintIndex kObjectiveConstraint;
// Sets a new reference solution and reverts all internal structures to their
// initial state, e.g. is_assigned_[i] == false.
// Note that the reference solution has to be feasible.
void SetReferenceSolution(const BopSolution& reference_solution);
// Over-constraints the objective cost by delta. Note that calling this method
// right after setting the reference solution, i.e. no calls to Assign(), will
// break the objective constraint.
void OverConstrainObjective(int64 delta);
// Assigns all literals. That updates the assignment, the constraint values,
// and the infeasible constraints.
// Note that the assignment of those literals can be reverted thanks to
// AddBacktrackingLevel() and BacktrackOneLevel().
// Note that a variable can't be assigned twice, even for the same literal.
void Assign(const std::vector<sat::Literal>& literals);
// Adds a new backtracking level to specify the state that will be restored
// by BacktrackOneLevel().
// See the example in the class comment.
void AddBacktrackingLevel();
// Backtracks internal structures to the previous level defined by
// AddBacktrackingLevel(). As a consequence the state will be exactly as
// before the previous call to AddBacktrackingLevel().
// Note that backtracking the initial state has no effect.
void BacktrackOneLevel();
// Returns true when the variable var has been set by Assign().
bool IsAssigned(VariableIndex var) const { return is_assigned_[var]; }
// Returns true if there is no infeasible constraint in the current state.
bool IsFeasible() const { return infeasible_constraint_set_.size() == 0; }
// Returns a superset of all the infeasible constraints in the current state.
const std::vector<ConstraintIndex>& PossiblyInfeasibleConstraints() const {
return infeasible_constraint_set_.Superset();
}
// Returns the number of constraints of the problem, objective included,
// i.e. the number of constraint in the problem + 1.
size_t NumConstraints() const { return constraint_lower_bounds_.size(); }
// Returns the value of the var in the assignment.
// As the assignment is initialized with the reference solution, if the
// variable has not been assigned through Assign(), the returned value is
// the value of the variable in the reference solution.
bool Assignment(VariableIndex var) const { return assignment_.Value(var); }
// Returns the assignment as a BopSolution. Note that the resulting solution
// might not be feasible.
const BopSolution& assignment() const { return assignment_; }
// Returns the lower bound of the constraint.
int64 ConstraintLowerBound(ConstraintIndex constraint) const {
return constraint_lower_bounds_[constraint];
}
// Returns the upper bound of the constraint.
int64 ConstraintUpperBound(ConstraintIndex constraint) const {
return constraint_upper_bounds_[constraint];
}
// Returns the value of the constraint. The value is computed using the
// variable values in the assignment.
// Note that a constraint is feasible iff its value is between its two bounds
// (inclusive).
int64 ConstraintValue(ConstraintIndex constraint) const {
return constraint_values_[constraint];
}
// Returns true if the given constraint is currently feasible.
bool ConstraintIsFeasible(ConstraintIndex constraint) const {
const int64 value = ConstraintValue(constraint);
return value >= ConstraintLowerBound(constraint) &&
value <= ConstraintUpperBound(constraint);
}
std::string DebugString() const;
private:
// Local structure to represent the sparse matrix by variable used for fast
// update of the contraint values.
struct ConstraintEntry {
ConstraintEntry(ConstraintIndex c, int64 w) : constraint(c), weight(w) {}
ConstraintIndex constraint;
int64 weight;
};
ITIVector<VariableIndex, ITIVector<EntryIndex, ConstraintEntry> >
by_variable_matrix_;
ITIVector<ConstraintIndex, int64> constraint_lower_bounds_;
ITIVector<ConstraintIndex, int64> constraint_upper_bounds_;
BopSolution reference_solution_;
BopSolution assignment_;
ITIVector<VariableIndex, bool> is_assigned_;
ITIVector<ConstraintIndex, int64> constraint_values_;
std::vector<std::vector<sat::Literal> > literals_applied_stack_;
BacktrackableIntegerSet<ConstraintIndex> infeasible_constraint_set_;
DISALLOW_COPY_AND_ASSIGN(AssignmentAndConstraintFeasibilityMaintainer);
};
// This class is an utility class used to select which infeasible constraint to
// repair and identify one variable to flip to actually repair the constraint.
// A constraint 'lb <= sum_i(w_i * x_i) <= ub', with 'lb' the lower bound,
// 'ub' the upper bound, 'w_i' the weight of the i-th term and 'x_i' the
// boolean variable appearing in the i-th term, is infeasible for a given
// assignment iff its value 'sum_i(w_i * x_i)' is outside of the bounds.
// Repairing-a-constraint-in-one-flip means making the constraint feasible by
// just flipping the value of one unassigned variable of the current assignment
// from the AssignmentAndConstraintFeasibilityMaintainer.
// For performance reasons, the pairs weight / variable (w_i, x_i) are stored
// in a sparse manner as a vector of terms (w_i, x_i). In the following the
// TermIndex term_index refers to the position of the term in the vector.
class OneFlipConstraintRepairer {
public:
OneFlipConstraintRepairer(
const LinearBooleanProblem& problem,
const AssignmentAndConstraintFeasibilityMaintainer& maintainer);
static const ConstraintIndex kInvalidConstraint;
static const TermIndex kInitTerm;
static const TermIndex kInvalidTerm;
// Returns the index of a constraint to repair. Returns kInvalidConstraint if
// no constraint can be repaired with only one flip.
// Note that indices follows the convention in
// AssignmentAndConstraintFeasibilityMaintainer, i.e. '0' corresponds to the
// objective constraint and 'i' corresponds to the 'i - 1' constraint in
// the problem.
ConstraintIndex ConstraintToRepair() const;
// Returns the index of the next term which repairs the constraint when the
// value of its variable is flipped. This method explores terms with an
// index strictly greater than start_term_index and then terms with an index
// smaller than or equal to init_term_index if any.
// Returns kInvalidTerm when no reparing terms are found.
//
// Note that if init_term_index == start_term_index, then all the terms will
// be explored. Both TermIndex arguments can take values in [-1, constraint
// size).
TermIndex NextRepairingTerm(ConstraintIndex constraint,
TermIndex init_term_index,
TermIndex start_term_index) const;
// Returns the literal corresponding to the term_index in the constraint.
sat::Literal Literal(ConstraintIndex constraint, TermIndex term_index) const;
// Local structure to represent the sparse matrix by constraint used for fast
// lookups.
struct ConstraintTerm {
ConstraintTerm(VariableIndex v, int64 w) : var(v), weight(w) {}
VariableIndex var;
int64 weight;
};
private:
// Sorts the terms of each constraints to iterate on most promising variables
// first.
void SortTerms(int num_variables);
ITIVector<ConstraintIndex, ITIVector<TermIndex, ConstraintTerm> >
by_constraint_matrix_;
const AssignmentAndConstraintFeasibilityMaintainer& maintainer_;
DISALLOW_COPY_AND_ASSIGN(OneFlipConstraintRepairer);
};
// This class is used to ease the connection with the SAT solver.
// TODO(user): Consider a tightener integration where more objects of the SAT
// solver are shared, e.g. the trail stack, the assignment...
class SatWrapper {
public:
explicit SatWrapper(const ProblemState& problem_state);
// Backtracks the SAT solver to its root level and resets internal structure,
// and synchronizes with the problem state (e.g. new fixed variables).
void Synchronize(const ProblemState& problem_state);
// Constrains the underlying SAT solver to find a solution with a cost better
// than the given reference_cost by at least delta.
// Returns the newly propagated literals.
// In other words, in case of minimization the objective is constrained to be
// smaller than or equal to 'reference_cost - delta', and in case of
// maximisation the objective is constrained to be greater than
// reference_cost + delta.
std::vector<sat::Literal> OverConstrainObjective(int64 reference_cost,
int64 delta);
// Returns true if the problem is UNSAT.
// Note that an UNSAT problem might not be marked as UNSAT at first because
// the SAT solver is not able to prove it; After some decisions / learned
// conflicts, the SAT solver might be able to prove UNSAT and so this will
// return true.
bool unsat() const { return unsat_; }
// Applies the decision that makes the given literal true and returns the
// number of decisions to backtrack due to conflicts if any.
// Two cases:
// - No conflicts: Returns 0 and fills the propagated_literals with the
// literals that have been propagated due to the decision including the
// the decision itself.
// - Conflicts: Returns the number of decisions to backtrack (the current
// decision included, i.e. returned value > 0) and fills the
// propagated_literals with the literals that the conflicts propagated.
// Note that the decision variable should not be already assigned in SAT.
int ApplyDecision(sat::Literal decision_literal,
std::vector<sat::Literal>* propagated_literals);
// Backtracks the last decision if any.
void BacktrackOneLevel();
// Extracts any new information learned during the search.
void ExtractLearnedInfo(LearnedInfo* info);
// Returns a deterministic number that should be correlated with the time
// spent in the SAT wrapper. The order of magnitude should be close to the
// time in seconds.
double deterministic_time() const;
private:
const LinearBooleanProblem& problem_;
sat::SatSolver sat_solver_;
bool unsat_;
DISALLOW_COPY_AND_ASSIGN(SatWrapper);
};
// This class is used to iterate on all assignments that can be obtained by
// deliberately flipping 'n' variables from the reference solution, 'n' being
// smaller than or equal to max_num_decisions.
// Note that one deliberate variable flip may lead to many other flips due to
// constraint propagation, those additional flips are not counted in 'n'.
class LocalSearchAssignmentIterator {
public:
LocalSearchAssignmentIterator(const ProblemState& problem_state,
int max_num_decisions);
// Sets whether or not a transposition table is used.
void UseTranspositionTable(bool v) { use_transposition_table_ = v; }
// Synchronizes the iterator with the problem state, e.g. set fixed variables,
// set the reference solution.
// Note that the iteration will continue where it stopped.
void Synchronize(const ProblemState& problem_state);
// Constrains to look for a feasible solution than improves the objective
// cost by at least delta.
void OverConstrainObjective(int64 reference_cost, int64 delta);
// Move to the next assignment. Returns false when the search is finished.
bool NextAssignment();
// Returns true if the current assignment corresponds to a feasible solution
// with a better objective cost than the reference solution.
bool AssignmentIsFeasible() const {
// TODO(user): change the second term in a DCHECK(), it shouldn't be needed.
return maintainer_.IsFeasible() && maintainer_.assignment().IsFeasible();
}
// Returns the current assignment.
// Note that this assignment is a feasible solution only when the method
// IsFeasibleAssignment() returns true.
const BopSolution& Assignment() const { return maintainer_.assignment(); }
// Extracts any new informations learned during the search.
void ExtractLearnedInfo(LearnedInfo* info);
// Returns a deterministic number that should be correlated with the time
// spent in the iterator. The order of magnitude should be close to the time
// in seconds.
double deterministic_time() const;
std::string DebugString() const;
private:
// See transposition_table_ below.
static const size_t kStoredMaxDecisions = 4;
// Internal structure used to represent a node of the search tree during local
// search.
struct SearchNode {
SearchNode()
: constraint(OneFlipConstraintRepairer::kInvalidConstraint),
term_index(OneFlipConstraintRepairer::kInvalidTerm) {}
SearchNode(ConstraintIndex c, TermIndex t) : constraint(c), term_index(t) {}
ConstraintIndex constraint;
TermIndex term_index;
};
// Applies the decision. Automatically backtracks when SAT detects conflicts.
void ApplyDecision(sat::Literal literal);
// Adds one more decision to repair infeasible constraints.
// Returns true in case of success.
bool GoDeeper();
// Backtracks and moves to the next decision in the search tree.
void Backtrack();
// Looks if the current decisions (in search_nodes_) plus the new one (given
// by l) lead to a position already present in transposition_table_.
bool NewStateIsInTranspositionTable(sat::Literal l);
// Inserts the current set of decisions in transposition_table_.
void InsertInTranspositionTable();
// Initializes the given array with the current decisions in search_nodes_ and
// by filling the other positions with 0.
void InitializeTranspostionTableKey(
std::array<int32, kStoredMaxDecisions>* a);
// Looks for the next repairing term in the given constraints while skipping
// the position already present in transposition_table_. A given TermIndex of
// -1 means that this is the first time we explore this constraint.
bool EnqueueNextRepairingTermIfAny(ConstraintIndex ct_to_repair,
TermIndex index);
const int max_num_decisions_;
AssignmentAndConstraintFeasibilityMaintainer maintainer_;
SatWrapper sat_wrapper_;
OneFlipConstraintRepairer repairer_;
std::vector<SearchNode> search_nodes_;
ITIVector<ConstraintIndex, TermIndex> initial_term_index_;
// For each set of explored decisions, we store it in this table so that we
// don't explore decisions (a, b) and later (b, a) for instance. The decisions
// are converted to int32, sorted and padded with 0 before beeing inserted
// here.
//
// TODO(user): We may still miss some equivalent states because it is possible
// that completely differents decisions lead to exactly the same state.
// However this is more time consuming to detect because we must apply the
// last decision first before trying to compare the states.
//
// TODO(user): Currently, we only store kStoredMaxDecisions or less decisions.
// Ideally, this should be related to the maximum number of decision in the
// LS, but that requires templating the whole LS optimizer.
bool use_transposition_table_;
#if defined(_MSC_VER)
hash_set<std::array<int32, kStoredMaxDecisions>,
StdArrayHasher<int32, kStoredMaxDecisions>> transposition_table_;
#else
hash_set<std::array<int32, kStoredMaxDecisions>> transposition_table_;
#endif
// The number of explored nodes.
int64 num_nodes_;
// The number of skipped nodes thanks to the transposition table.
int64 num_skipped_nodes_;
DISALLOW_COPY_AND_ASSIGN(LocalSearchAssignmentIterator);
};
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_BOP_LS_H_

View File

@@ -0,0 +1,223 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package operations_research.bop;
// Method used to optimize a solution in Bop and its associated initial score.
message BopOptimizerMethod {
enum OptimizerType {
SAT_CORE_BASED = 0;
LINEAR_RELAXATION = 1;
LOCAL_SEARCH = 2;
RANDOM_FIRST_SOLUTION = 3;
RANDOM_CONSTRAINT_LNS = 4;
RANDOM_LNS_PROPAGATION = 5;
RANDOM_LNS_SAT = 6;
COMPLETE_LNS = 7;
LP_FIRST_SOLUTION = 8;
OBJECTIVE_FIRST_SOLUTION = 9;
}
optional OptimizerType type = 1;
// The score is used as an initial input to the adaptative optimizer selector
// (see PortfolioOptimizer).
optional double initial_score = 2 [default = 0.1];
// The ratio is used to compute the max time the method might spend to look
// for a solution. For instance, with a ratio of 0.05 and a time limit of
// 300 seconds, the method can spend up to 15 seconds.
// Note that this is only used by *_FIRST_SOLUTION optimizers in the
// current implementation.
optional double time_limit_ratio = 3 [default = 0.05];
}
// Set of optimizer methods to be run by an instance of the portfolio optimizer.
// Note that in the current implementation, all the methods specified in the
// repeated field methods will run on the same solver / thread.
message BopSolverOptimizerSet {
repeated BopOptimizerMethod methods = 1;
}
// Contains the definitions for all the bop algorithm parameters and their
// default values.
//
// NEXT TAG: 30
message BopParameters {
// Maximum time allowed in seconds to solve a problem.
// The counter will starts as soon as Solve() is called.
optional double max_time_in_seconds = 1 [default = inf];
// Maximum time allowed in deterministic time to solve a problem.
// The deterministic time should be correlated with the real time used by the
// solver, the time unit being roughly the order of magnitude of a second.
// The counter will starts as soon as SetParameters() or SolveWithTimeLimit()
// is called.
optional double max_deterministic_time = 27 [default = inf];
// Limit used to stop the optimization as soon as the relative gap is smaller
// than the given value.
// The relative gap is defined as:
// abs(solution_cost - best_bound)
// / max(abs(solution_cost), abs(best_bound)).
optional double relative_gap_limit = 28 [default = 1e-4];
// Maximum number of cascading decisions the solver might use to repair the
// current solution in the LS.
optional int32 max_num_decisions = 2 [default = 4];
// Compute estimated impact at each iteration when true; only once when false.
optional bool compute_estimated_impact = 3 [default = true];
// Avoid exploring both branches (b, a, ...) and (a, b, ...).
optional bool prune_search_tree = 4 [default = false];
// Sort constraints by increasing total number of terms instead of number of
// contributing terms.
optional bool sort_constraints_by_num_terms = 5 [default = false];
// Use the random Large Neighborhood Search instead of the exhaustive one.
optional bool use_random_lns = 6 [default = true];
// The seed used to initialize the random generator.
optional int32 random_seed = 7 [default = 0];
// Number of variables to relax in the exhaustive Large Neighborhood Search.
optional int32 num_relaxed_vars = 8 [default = 10];
// The number of conflicts the SAT solver has to solve a random LNS
// subproblem.
optional int32 max_number_of_conflicts_in_random_lns = 9 [default = 2500];
// Number of tries in the random lns before calling the SAT solver on the
// full problem.
optional int32 num_random_lns_tries = 10 [default = 1];
// Maximum number of backtracks times the number of variables in Local Search,
// ie. max num backtracks == max_number_of_backtracks_in_ls / num variables.
optional int64 max_number_of_backtracks_in_ls = 11 [default = 100000000];
// Use Large Neighborhood Search based on the LP relaxation.
optional bool use_lp_lns = 12 [default = true];
// Whether we use sat propagation to choose the lns neighbourhood.
optional bool use_sat_to_choose_lns_neighbourhood = 15 [default = true];
// The number of conflicts the SAT solver has to solve a random LNS
// subproblem for the quick check of infeasibility.
optional int32 max_number_of_conflicts_for_quick_check = 16 [default = 10];
// If true, find and exploit the eventual symmetries of the problem.
//
// TODO(user): turn this on by default once the symmetry finder becomes fast
// enough to be negligeable for most problem. Or at least support a time
// limit.
optional bool use_symmetry = 17 [default = false];
// The number of conflicts the SAT solver has to generate a random solution.
optional int32 max_number_of_conflicts_in_random_solution_generation = 20
[default = 500];
// The maximum number of assignments the Local Search iterates on during one
// try. Note that if the Local Search is called again on the same solution
// it will not restart from scratch but will iterate on the next
// max_number_of_explored_assignments_per_try_in_ls assignments.
optional int64 max_number_of_explored_assignments_per_try_in_ls = 21
[default = 2500];
// Whether we use an hash set during the LS to avoid exploring more than once
// the "same" state. Note that because the underlying SAT solver may learn
// information in the middle of the LS, this may make the LS slightly less
// "complete", but it should be faster.
optional bool use_transposition_table_in_ls = 22 [default = true];
// Whether we use the learned binary clauses in the Linear Relaxation.
optional bool use_learned_binary_clauses_in_lp = 23 [default = true];
// The number of solvers used to run Bop. Note that one thread will be created
// per solver. The type of communication between solvers is specified by the
// synchronization_type parameter.
optional int32 number_of_solvers = 24 [default = 1];
// Defines how the different solvers are synchronized during the search.
// Note that the synchronization (if any) occurs before each call to an
// optimizer (the smallest granularity of the solver in a parallel context).
enum ThreadSynchronizationType {
// No synchronization. The solvers run independently until the time limit
// is reached; Then learned information from each solver are aggregated.
// The final solution is the best of all found solutions.
// Pros: - No need to wait for another solver to complete its task,
// - Adding a new solver always improves the final solution (In the
// current implementation it still depends on the machine load and
// the time limit).
// Cons: - No learning between solvers.
NO_SYNCHRONIZATION = 0;
// Synchronize all solvers. Each solver waits for all other solvers to
// complete the previous optimizer run, before running again.
// The final solution is the best of all found solutions.
// Pros: - Full learning between solvers.
// Cons: - A lot of waiting time when solvers don't run at the exact same
// speed,
// - The quality of the final solution depends on the number of
// solvers, adding one more solver might lead to poorer results
// because the search goes on a different path.
SYNCHRONIZE_ALL = 1;
// Solver i synchronizes with solvers 0..i-1.
// This is a good tradeoff between NO_SYNCHRONIZATION and SYNCHRONIZE_ALL:
// communication while keeping a relative determinism on the result even
// when the number of solvers increases.
// The final solution is the best of all found solutions.
// Pros: - Solver i learns from i different solvers,
// - Adding a new solver always improves the final solution (In the
// current implementation it still depends on the machine load and
// the time limit).
// Cons: - No full learning,
// - Some solvers need to wait for synchronization.
SYNCHRONIZE_ON_RIGHT = 2;
}
optional ThreadSynchronizationType synchronization_type = 25
[default = NO_SYNCHRONIZATION];
// List of set of optimizers to be run by the solvers.
// Note that the i_th solver will run the
// min(i, solver_optimizer_sets_size())_th optimizer set.
// The default is (only one set):
// {
// type:LINEAR_RELAXATION initial_score:1.0 time_limit_ratio:0.15
// type:LP_FIRST_SOLUTION initial_score:0.1
// type:OBJECTIVE_FIRST_SOLUTION initial_score:0.1
// type:RANDOM_FIRST_SOLUTION initial_score:0.1
// type:SAT_CORE_BASED initial_score:1.0
// type:LOCAL_SEARCH initial_score:2.0
// type:RANDOM_CONSTRAINT_LNS initial_score:0.1
// type:RANDOM_LNS_PROPAGATION initial_score:1.0
// type:RANDOM_LNS_SAT initial_score:1.0
// type:COMPLETE_LNS initial_score:0.1
// }
repeated BopSolverOptimizerSet solver_optimizer_sets = 26;
// Use strong branching in the linear relaxation optimizer.
// The strong branching is a what-if analysis on each variable v, i.e.
// compute the best bound when v is assigned to true, compute the best bound
// when v is assigned to false, and then use those best bounds to improve the
// overall best bound.
// This is useful to improve the best_bound, but also to fix some variables
// during search.
// Note that using probing might be time consuming as it runs
// 2 * num_variables times the LP solver.
optional bool use_lp_strong_branching = 29 [default = false];
}

298
src/bop/bop_portfolio.cc Normal file
View File

@@ -0,0 +1,298 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/bop_portfolio.h"
#include "bop/bop_fs.h"
#include "bop/bop_lns.h"
#include "bop/bop_ls.h"
#include "bop/bop_util.h"
#include "bop/complete_optimizer.h"
#include "sat/boolean_problem.h"
namespace operations_research {
namespace bop {
namespace {
void BuildObjectiveTerms(const LinearBooleanProblem& problem,
BopConstraintTerms* objective_terms) {
CHECK(objective_terms != nullptr);
objective_terms->clear();
const LinearObjective& objective = problem.objective();
const size_t num_objective_terms = objective.literals_size();
CHECK_EQ(num_objective_terms, objective.coefficients_size());
for (int i = 0; i < num_objective_terms; ++i) {
CHECK_GT(objective.literals(i), 0);
CHECK_NE(objective.coefficients(i), 0);
const VariableIndex var_id(objective.literals(i) - 1);
const int64 weight = objective.coefficients(i);
objective_terms->push_back(BopConstraintTerm(var_id, weight));
}
}
} // anonymous namespace
//------------------------------------------------------------------------------
// PortfolioOptimizer
//------------------------------------------------------------------------------
PortfolioOptimizer::PortfolioOptimizer(
const ProblemState& problem_state, const BopParameters& parameters,
const BopSolverOptimizerSet& optimizer_set, const std::string& name)
: BopOptimizerBase(name),
random_(parameters.random_seed()),
objective_terms_(),
selector_(),
optimizers_(),
optimizer_initial_scores_(),
sat_propagator_(problem_state.original_problem()),
feasible_solution_(false),
lower_bound_(-glop::kInfinity),
upper_bound_(glop::kInfinity) {
CreateOptimizers(problem_state, parameters, optimizer_set);
}
PortfolioOptimizer::~PortfolioOptimizer() {}
BopOptimizerBase::Status PortfolioOptimizer::Synchronize(
const ProblemState& problem_state) {
if (!sat_propagator_.LoadBooleanProblem()) {
return BopOptimizerBase::INFEASIBLE;
}
// Set a new constraint on the objective cost to only look for better
// solutions.
if (problem_state.solution().IsFeasible() &&
!sat_propagator_.OverConstrainObjective(problem_state.solution())) {
// Stop optimization as SAT proved optimality.
return BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
for (const std::unique_ptr<BopOptimizerBase>& optimizer : optimizers_) {
if (problem_state.solution().IsFeasible() ||
!optimizer->NeedAFeasibleSolution()) {
const BopOptimizerBase::Status sync_status =
optimizer->Synchronize(problem_state);
if (sync_status == BopOptimizerBase::OPTIMAL_SOLUTION_FOUND ||
sync_status == BopOptimizerBase::INFEASIBLE) {
return sync_status;
}
}
}
feasible_solution_ = problem_state.solution().IsFeasible();
lower_bound_ = problem_state.GetScaledLowerBound();
upper_bound_ = feasible_solution_ ? problem_state.solution().GetScaledCost()
: glop::kInfinity;
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status PortfolioOptimizer::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
if (!feasible_solution_) {
// Mark optimizers that can't run because they need an initial solution.
for (int i = 0; i < optimizers_.size(); ++i) {
if (optimizers_[i]->NeedAFeasibleSolution()) {
selector_->MarkItemNonSelectable(i);
}
}
}
if (!selector_->SelectItem()) {
return BopOptimizerBase::ABORT;
}
const int selected_optimizer_id = selector_->selected_item_id();
const std::unique_ptr<BopOptimizerBase>& selected_optimizer =
optimizers_[selected_optimizer_id];
LOG(INFO) << " " << lower_bound_ << " .. " << upper_bound_ << " "
<< name() << " - " << selected_optimizer->name()
<< ". Time limit: " << time_limit->GetTimeLeft() << " -- "
<< time_limit->GetDeterministicTimeLeft();
const BopOptimizerBase::Status optimization_status =
selected_optimizer->Optimize(parameters, learned_info, time_limit);
selector_->UpdateSelectedItem(
optimization_status == BopOptimizerBase::SOLUTION_FOUND,
!selected_optimizer->RunOncePerSolution() &&
optimization_status != BopOptimizerBase::ABORT);
if (optimization_status == BopOptimizerBase::SOLUTION_FOUND) {
selector_->StartNewRoundOfSelections();
}
return BopOptimizerBase::CONTINUE;
}
void PortfolioOptimizer::AddOptimizer(
const BopParameters& parameters,
const BopOptimizerMethod& optimizer_method) {
switch (optimizer_method.type()) {
case BopOptimizerMethod::SAT_CORE_BASED:
optimizers_.emplace_back(
new SatCoreBasedOptimizer("SatCoreBasedOptimizer"));
break;
case BopOptimizerMethod::LINEAR_RELAXATION:
optimizers_.emplace_back(new LinearRelaxation(
parameters, "LinearRelaxation", optimizer_method.time_limit_ratio()));
break;
case BopOptimizerMethod::LOCAL_SEARCH: {
double initial_local_search_score = optimizer_method.initial_score();
for (int i = 1; i <= parameters.max_num_decisions(); ++i) {
optimizers_.emplace_back(
new LocalSearchOptimizer(StringPrintf("LS_%d", i), i));
optimizer_initial_scores_.push_back(initial_local_search_score);
initial_local_search_score /= 2.0;
}
} break;
case BopOptimizerMethod::RANDOM_FIRST_SOLUTION:
optimizers_.emplace_back(new BopRandomFirstSolutionGenerator(
parameters.random_seed(), "SATRandomFirstSolution",
optimizer_method.time_limit_ratio()));
break;
case BopOptimizerMethod::RANDOM_CONSTRAINT_LNS:
optimizers_.emplace_back(new BopRandomConstraintLNSOptimizer(
"Random_Constraint_LNS", objective_terms_, parameters.random_seed()));
break;
case BopOptimizerMethod::RANDOM_LNS_PROPAGATION:
optimizers_.emplace_back(new BopRandomLNSOptimizer(
"Random_LNS_Using_Variables_Connections", objective_terms_,
parameters.random_seed(), false, &sat_propagator_));
break;
case BopOptimizerMethod::RANDOM_LNS_SAT:
optimizers_.emplace_back(new BopRandomLNSOptimizer(
"Random_LNS_Using_SAT", objective_terms_, parameters.random_seed(),
true, &sat_propagator_));
break;
case BopOptimizerMethod::COMPLETE_LNS:
optimizers_.emplace_back(new BopCompleteLNSOptimizer(
"LNS", objective_terms_, sat_propagator_.sat_solver()));
break;
case BopOptimizerMethod::LP_FIRST_SOLUTION:
optimizers_.emplace_back(new BopSatLpFirstSolutionGenerator(
"SATLPFirstSolution", optimizer_method.time_limit_ratio()));
break;
case BopOptimizerMethod::OBJECTIVE_FIRST_SOLUTION:
optimizers_.emplace_back(new BopSatObjectiveFirstSolutionGenerator(
"SATObjectiveFirstSolution", optimizer_method.time_limit_ratio()));
break;
default:
LOG(FATAL) << "Unknown optimizer type.";
}
if (optimizer_method.type() != BopOptimizerMethod::LOCAL_SEARCH) {
optimizer_initial_scores_.push_back(optimizer_method.initial_score());
}
}
void PortfolioOptimizer::CreateOptimizers(
const ProblemState& problem_state, const BopParameters& parameters,
const BopSolverOptimizerSet& optimizer_set) {
BuildObjectiveTerms(problem_state.original_problem(), &objective_terms_);
if (parameters.use_symmetry()) {
LOG(INFO) << "Finding symmetries of the problem.";
std::vector<std::unique_ptr<SparsePermutation>> generators;
sat::FindLinearBooleanProblemSymmetries(problem_state.original_problem(),
&generators);
sat_propagator_.sat_solver()->AddSymmetries(&generators);
}
for (const BopOptimizerMethod& optimizer_method : optimizer_set.methods()) {
AddOptimizer(parameters, optimizer_method);
}
selector_.reset(new AdaptativeItemSelector(parameters.random_seed(),
optimizer_initial_scores_));
}
//------------------------------------------------------------------------------
// AdaptativeItemSelector
//------------------------------------------------------------------------------
AdaptativeItemSelector::AdaptativeItemSelector(
int random_seed, const std::vector<double>& initial_scores)
: random_(random_seed), selected_item_id_(kNoSelection), items_() {
for (const double score : initial_scores) {
items_.push_back(Item(score));
}
StartNewRoundOfSelections();
}
const int AdaptativeItemSelector::kNoSelection(-1);
const double AdaptativeItemSelector::kErosion(0.2);
const double AdaptativeItemSelector::kScoreMin(1e-10);
void AdaptativeItemSelector::StartNewRoundOfSelections() {
for (int item_id = 0; item_id < items_.size(); ++item_id) {
Item& item = items_[item_id];
if (item.num_selections > 0) {
item.round_score = std::max(kScoreMin, (1 - kErosion) * item.round_score +
kErosion * item.num_successes);
}
item.current_score = item.round_score;
item.can_be_selected = true;
item.num_selections = 0;
item.num_successes = 0;
}
}
bool AdaptativeItemSelector::SelectItem() {
// Compute the sum score of selectable items.
double score_sum = 0.0;
for (const Item& item : items_) {
if (item.can_be_selected) {
score_sum += item.current_score;
}
}
// Select item.
const double selected_score_sum = random_.UniformDouble(score_sum);
double selection_sum = 0.0;
selected_item_id_ = kNoSelection;
for (int item_id = 0; item_id < items_.size(); ++item_id) {
const Item& item = items_[item_id];
if (item.can_be_selected) {
selection_sum += item.current_score;
}
if (selection_sum > selected_score_sum) {
selected_item_id_ = item_id;
break;
}
}
if (selected_item_id_ != kNoSelection) {
items_[selected_item_id_].num_selections++;
return true;
}
return false;
}
void AdaptativeItemSelector::UpdateSelectedItem(bool success,
bool can_still_be_selected) {
Item& item = items_[selected_item_id_];
item.num_successes += success ? 1 : 0;
item.can_be_selected = can_still_be_selected;
item.current_score = std::max(kScoreMin, (1 - kErosion) * item.current_score +
(success ? kErosion : 0));
}
void AdaptativeItemSelector::MarkItemNonSelectable(int item) {
items_[item].can_be_selected = false;
}
} // namespace bop
} // namespace operations_research

124
src/bop/bop_portfolio.h Normal file
View File

@@ -0,0 +1,124 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BOP_BOP_PORTFOLIO_H_
#define OR_TOOLS_BOP_BOP_PORTFOLIO_H_
#include "bop/bop_base.h"
#include "bop/bop_lns.h"
#include "bop/bop_parameters.pb.h"
#include "bop/bop_solution.h"
#include "bop/bop_types.h"
#include "glop/lp_solver.h"
#include "sat/boolean_problem.pb.h"
#include "sat/sat_solver.h"
#include "util/stats.h"
#include "util/time_limit.h"
namespace operations_research {
namespace bop {
// Forward declaration.
class AdaptativeItemSelector;
// This class implements a portfolio optimizer.
// The portfolio currently includes all the following optimizers:
// - SatCoreBasedOptimizer
// - LinearRelaxation
// - LocalSearchOptimizer
// - BopRandomFirstSolutionGenerator
// - BopRandomConstraintLNSOptimizer
// - BopRandomLNSOptimizer.
// At each call of Optimize(), the portfolio optimizer selects the next
// optimizer to run and runs it. The selection is auto-adaptative, meaning that
// optimizers that succeeded more in the previous calls to Optimizer() are more
// likely to be selected.
class PortfolioOptimizer : public BopOptimizerBase {
public:
explicit PortfolioOptimizer(const ProblemState& problem_state,
const BopParameters& parameters,
const BopSolverOptimizerSet& optimizer_set,
const std::string& name);
virtual ~PortfolioOptimizer();
virtual bool RunOncePerSolution() const { return false; }
virtual bool NeedAFeasibleSolution() const { return false; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
private:
void AddOptimizer(const BopParameters& parameters,
const BopOptimizerMethod& optimizer_method);
void CreateOptimizers(const ProblemState& problem_state,
const BopParameters& parameters,
const BopSolverOptimizerSet& optimizer_set);
MTRandom random_;
BopConstraintTerms objective_terms_;
std::unique_ptr<AdaptativeItemSelector> selector_;
std::vector<std::unique_ptr<BopOptimizerBase>> optimizers_;
std::vector<double> optimizer_initial_scores_;
SatPropagator sat_propagator_;
bool feasible_solution_;
double lower_bound_;
double upper_bound_;
};
// This class provides a way to iteratively select an item among n items in
// an adaptative way.
// TODO(user): Document and move to util?
class AdaptativeItemSelector {
public:
AdaptativeItemSelector(int random_seed, const std::vector<double>& initial_scores);
static const int kNoSelection;
static const double kErosion;
static const double kScoreMin;
void StartNewRoundOfSelections();
bool SelectItem();
bool has_selected_element() const {
return selected_item_id_ != kNoSelection;
}
int selected_item_id() const { return selected_item_id_; }
void UpdateSelectedItem(bool success, bool can_still_be_selected);
void MarkItemNonSelectable(int item);
private:
struct Item {
explicit Item(double initial_score)
: round_score(std::max(kScoreMin, initial_score)),
current_score(std::max(kScoreMin, initial_score)),
can_be_selected(true),
num_selections(0),
num_successes(0) {}
double round_score;
double current_score;
bool can_be_selected;
int num_selections;
int num_successes;
};
MTRandom random_;
int selected_item_id_;
std::vector<Item> items_;
};
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_BOP_PORTFOLIO_H_

78
src/bop/bop_solution.cc Normal file
View File

@@ -0,0 +1,78 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/bop_solution.h"
namespace operations_research {
namespace bop {
//------------------------------------------------------------------------------
// BopSolution
//------------------------------------------------------------------------------
BopSolution::BopSolution(const LinearBooleanProblem& problem,
const std::string& name)
: problem_(&problem),
name_(name),
values_(problem.num_variables(), false),
recompute_cost_(true),
recompute_is_feasible_(true),
cost_(0),
is_feasible_(false) {
// Try the lucky assignment, i.e. the optimal one if feasible.
const LinearObjective& objective = problem.objective();
for (int i = 0; i < objective.coefficients_size(); ++i) {
const VariableIndex var(objective.literals(i) - 1);
values_[var] = objective.coefficients(i) < 0;
}
}
int64 BopSolution::ComputeCost() const {
recompute_cost_ = false;
int64 sum = 0;
const LinearObjective& objective = problem_->objective();
const size_t num_sparse_vars = objective.literals_size();
CHECK_EQ(num_sparse_vars, objective.coefficients_size());
for (int i = 0; i < num_sparse_vars; ++i) {
CHECK_GT(objective.literals(i), 0);
const VariableIndex var(abs(objective.literals(i)) - 1);
if (values_[var]) {
sum += objective.coefficients(i);
}
}
return sum;
}
bool BopSolution::ComputeIsFeasible() const {
recompute_is_feasible_ = false;
for (const LinearBooleanConstraint& constraint : problem_->constraints()) {
int64 sum = 0;
const size_t num_sparse_vars = constraint.literals_size();
CHECK_EQ(num_sparse_vars, constraint.coefficients_size());
for (int i = 0; i < num_sparse_vars; ++i) {
// The solver doesn't support negative literals yet.
CHECK_GT(constraint.literals(i), 0);
const VariableIndex var(abs(constraint.literals(i)) - 1);
if (values_[var]) {
sum += constraint.coefficients(i);
}
}
if ((constraint.has_upper_bound() && sum > constraint.upper_bound()) ||
(constraint.has_lower_bound() && sum < constraint.lower_bound())) {
return false;
}
}
return true;
}
} // namespace bop
} // namespace operations_research

112
src/bop/bop_solution.h Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BOP_BOP_SOLUTION_H_
#define OR_TOOLS_BOP_BOP_SOLUTION_H_
#include "bop/bop_types.h"
#include "sat/boolean_problem.h"
#include "sat/boolean_problem.pb.h"
namespace operations_research {
namespace bop {
// A Bop solution is a boolean assignment for each variable of the
// problem. The cost value associated to the solution is the instantiation
// of the objective cost of the problem.
// Note that a solution might not be a feasible solution, i.e. might violate
// some constraints of the problem. The Check() method can be used to
// test the feasibility.
class BopSolution {
public:
BopSolution(const LinearBooleanProblem& problem, const std::string& name);
void SetValue(VariableIndex var, bool value) {
recompute_cost_ = true;
recompute_is_feasible_ = true;
values_[var] = value;
}
size_t Size() const { return values_.size(); }
bool Value(VariableIndex var) const { return values_[var]; }
const std::string& name() const { return name_; }
void set_name(const std::string& name) { name_ = name; }
// Returns the objective cost of the solution.
// Note that this code is lazy but not incremental and might run in the
// problem size. Use with care during search.
int64 GetCost() const {
if (recompute_cost_) {
cost_ = ComputeCost();
}
return cost_;
}
// Returns the objective cost of the solution taking into account the problem
// cost scaling and offset. This is mainly useful for displaying the current
// problem cost, while internally, the algorithm works directly with the
// integer version of the cost returned by GetCost().
double GetScaledCost() const {
return sat::AddOffsetAndScaleObjectiveValue(*problem_,
sat::Coefficient(GetCost()));
}
// Returns true iff the solution is feasible.
// Note that this code is lazy but not incremental and might run in the
// problem size. Use with care during search.
bool IsFeasible() const {
if (recompute_is_feasible_) {
is_feasible_ = ComputeIsFeasible();
}
return is_feasible_;
}
// For range based iteration, i.e. for (const bool value : solution) {...}.
ITIVector<VariableIndex, bool>::const_iterator begin() const {
return values_.begin();
}
ITIVector<VariableIndex, bool>::const_iterator end() const {
return values_.end();
}
// Returns true when the cost of the argument solution is strictly greater
// than the cost of the object.
// This is used to sort solutions.
bool operator<(const BopSolution& solution) const {
return IsFeasible() == solution.IsFeasible()
? GetCost() < solution.GetCost()
: IsFeasible() > solution.IsFeasible();
}
private:
bool ComputeIsFeasible() const;
int64 ComputeCost() const;
const LinearBooleanProblem* problem_;
std::string name_;
ITIVector<VariableIndex, bool> values_;
// Those are mutable because they behave as const values for a given solution
// but for performance reasons we want to be lazy on their computation,
// e.g. not compute the cost each time set_value() is called.
mutable bool recompute_cost_;
mutable bool recompute_is_feasible_;
mutable int64 cost_;
mutable bool is_feasible_;
// Note that assign/copy are defined to allow usage of
// STL collections / algorithms.
};
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_BOP_SOLUTION_H_

389
src/bop/bop_solver.cc Normal file
View File

@@ -0,0 +1,389 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/bop_solver.h"
#include <string>
#include <vector>
#include "base/commandlineflags.h"
#include "base/stringprintf.h"
#include "google/protobuf/text_format.h"
#include "base/threadpool.h"
#include "base/stl_util.h"
#include "bop/bop_fs.h"
#include "bop/bop_lns.h"
#include "bop/bop_ls.h"
#include "bop/bop_portfolio.h"
#include "bop/bop_util.h"
#include "bop/complete_optimizer.h"
#include "glop/lp_solver.h"
#include "lp_data/lp_print_utils.h"
#include "sat/boolean_problem.h"
#include "sat/lp_utils.h"
#include "sat/sat_solver.h"
#include "util/bitset.h"
using operations_research::glop::ColIndex;
using operations_research::glop::DenseRow;
using operations_research::glop::GlopParameters;
using operations_research::glop::LinearProgram;
using operations_research::glop::LPSolver;
using operations_research::LinearBooleanProblem;
using operations_research::LinearBooleanConstraint;
using operations_research::LinearObjective;
using operations_research::glop::ProblemStatus;
namespace operations_research {
namespace bop {
namespace {
// Updates the problem_state when the solution is proved optimal or the problem
// is proved infeasible.
// Returns true when the problem_state has been changed.
bool UpdateProblemStateBasedOnStatus(BopOptimizerBase::Status status,
ProblemState* problem_state) {
CHECK(nullptr != problem_state);
if (BopOptimizerBase::OPTIMAL_SOLUTION_FOUND == status) {
problem_state->MarkAsOptimal();
return true;
}
if (BopOptimizerBase::INFEASIBLE == status) {
problem_state->MarkAsInfeasible();
return true;
}
return false;
}
//------------------------------------------------------------------------------
// SolverSynchronizer
//------------------------------------------------------------------------------
// This internal class is a safety layer that defines the API the solvers have
// to use to synchronize with other solvers.
class SolverSynchronizer {
public:
struct Info {
Info(const BopParameters& params, const LinearBooleanProblem& problem)
: parameters(params), problem_state(problem), learned_infos() {
problem_state.SetParameters(params);
}
BopParameters parameters;
ProblemState problem_state;
StampedLearnedInfo learned_infos;
};
SolverSynchronizer(
int solver_index, const LinearBooleanProblem& problem,
std::vector<std::unique_ptr<SolverSynchronizer::Info>>* all_infos);
// Adds the learned info at the given stamp to the solver.
void AddLearnedInfo(SolverTimeStamp stamp, const LearnedInfo& learned_info);
// Synchronizes the problem state of the solver as defined by the
// BopParameters synchronization_type.
// Returns *problem_changed true when the state problem has changed due to
// synchronization.
// Returns *stop_solver true when the solver should be stopped as the solvers
// it might wait for have stopped.
void SynchronizeSolverInfos(SolverTimeStamp stamp, bool* problem_changed,
bool* stop_solver);
// Marks the solver as done, i.e. it reached its last stamp.
void MarkLastStampReached();
// Returns the mutable problem state of the solver.
ProblemState* GetMutableProblemState() const;
// Returns the parameters of the solver.
const BopParameters& GetParameters() const;
// Returns the index of the solver.
int solver_index() const { return solver_index_; }
private:
int solver_index_;
std::vector<std::unique_ptr<SolverSynchronizer::Info>>* all_infos_;
std::vector<int> solvers_to_sync_;
LearnedInfo learned_info_;
};
SolverSynchronizer::SolverSynchronizer(
int solver_index, const LinearBooleanProblem& problem,
std::vector<std::unique_ptr<SolverSynchronizer::Info>>* all_infos)
: solver_index_(solver_index),
all_infos_(all_infos),
solvers_to_sync_(),
learned_info_(problem) {
CHECK(nullptr != all_infos_);
CHECK_LT(solver_index_, all_infos_->size());
switch ((*all_infos_)[solver_index]->parameters.synchronization_type()) {
case BopParameters::NO_SYNCHRONIZATION:
solvers_to_sync_.push_back(solver_index);
break;
case BopParameters::SYNCHRONIZE_ALL:
for (int index = 0; index < all_infos_->size(); ++index) {
solvers_to_sync_.push_back(index);
}
break;
case BopParameters::SYNCHRONIZE_ON_RIGHT:
for (int index = 0; index <= solver_index; ++index) {
solvers_to_sync_.push_back(index);
}
break;
default:
LOG(FATAL) << "Unknown synchronization type.";
}
}
void SolverSynchronizer::AddLearnedInfo(SolverTimeStamp stamp,
const LearnedInfo& learned_info) {
(*all_infos_)[solver_index_]->learned_infos.AddLearnedInfo(stamp,
learned_info);
}
void SolverSynchronizer::SynchronizeSolverInfos(SolverTimeStamp stamp,
bool* problem_changed,
bool* stop_solver) {
CHECK(nullptr != problem_changed);
CHECK(nullptr != stop_solver);
*problem_changed = false;
*stop_solver = false;
for (const int index : solvers_to_sync_) {
learned_info_.Clear();
if (!(*all_infos_)[index]->learned_infos.GetLearnedInfo(stamp,
&learned_info_)) {
// Limit reached on solver index, stop search.
*stop_solver = true;
return;
}
*problem_changed =
GetMutableProblemState()->MergeLearnedInfo(learned_info_) ||
*problem_changed;
}
}
void SolverSynchronizer::MarkLastStampReached() {
(*all_infos_)[solver_index_]->learned_infos.MarkLastStampReached();
}
const BopParameters& SolverSynchronizer::GetParameters() const {
return (*all_infos_)[solver_index_]->parameters;
}
ProblemState* SolverSynchronizer::GetMutableProblemState() const {
return &((*all_infos_)[solver_index_]->problem_state);
}
// Runs a new solver until the time limit is reached. The result of the run
// is submitted to the SolverSynchronizer.
void RunOptimizer(const std::string& name, SolverSynchronizer* solver_sync) {
CHECK(nullptr != solver_sync);
const BopParameters& parameters = solver_sync->GetParameters();
ProblemState* const problem_state = solver_sync->GetMutableProblemState();
TimeLimit time_limit(parameters.max_time_in_seconds(),
parameters.max_deterministic_time());
LOG(INFO) << "limit: " << parameters.max_time_in_seconds() << " -- "
<< parameters.max_deterministic_time();
const int solver_index = solver_sync->solver_index();
CHECK_LT(0, parameters.solver_optimizer_sets_size());
// Use the last defined method set if needed.
const BopSolverOptimizerSet& solver_optimizer_sets =
solver_index >= parameters.solver_optimizer_sets_size()
? parameters.solver_optimizer_sets(
parameters.solver_optimizer_sets_size() - 1)
: parameters.solver_optimizer_sets(solver_index);
PortfolioOptimizer optimizer(*problem_state, parameters,
solver_optimizer_sets,
StringPrintf("Portfolio_%d", solver_index));
const BopOptimizerBase::Status sync_status =
optimizer.Synchronize(*problem_state);
if (UpdateProblemStateBasedOnStatus(sync_status, problem_state)) return;
LearnedInfo learned_info(problem_state->original_problem());
SolverTimeStamp stamp(0);
while (!time_limit.LimitReached()) {
const BopOptimizerBase::Status optimization_status =
optimizer.Optimize(parameters, &learned_info, &time_limit);
solver_sync->AddLearnedInfo(stamp, learned_info);
bool problem_changed = false;
bool stop_solver = false;
solver_sync->SynchronizeSolverInfos(stamp, &problem_changed, &stop_solver);
if (stop_solver || problem_state->IsOptimal() ||
problem_state->IsInfeasible()) {
return;
}
if (optimization_status == BopOptimizerBase::SOLUTION_FOUND) {
CHECK(problem_state->solution().IsFeasible());
LOG(INFO) << problem_state->solution().GetScaledCost()
<< " New solution! " << name;
}
if (problem_changed) {
// The problem has changed, re-synchronize the optimizer.
const BopOptimizerBase::Status sync_status =
optimizer.Synchronize(*problem_state);
if (UpdateProblemStateBasedOnStatus(sync_status, problem_state)) return;
problem_state->SynchronizationDone();
}
if (optimization_status == BopOptimizerBase::ABORT) {
break;
}
++stamp;
}
// The search is finished for this run, mark the last stamped reached
// to not block other runs waiting for updates on the learned_infos.
solver_sync->MarkLastStampReached();
}
} // anonymous namespace
//------------------------------------------------------------------------------
// BopSolver
//------------------------------------------------------------------------------
BopSolver::BopSolver(const LinearBooleanProblem& problem)
: problem_(problem),
problem_state_(problem),
parameters_(),
stats_("BopSolver") {
SCOPED_TIME_STAT(&stats_);
CHECK(sat::ValidateBooleanProblem(problem).ok());
}
BopSolver::~BopSolver() { IF_STATS_ENABLED(LOG(INFO) << stats_.StatString()); }
BopSolveStatus BopSolver::Solve() {
SCOPED_TIME_STAT(&stats_);
UpdateParameters();
// Create parameters, learned info and problem states for each solver.
const int num_solvers = parameters_.number_of_solvers();
LearnedInfo learned_info = problem_state_.GetLearnedInfo();
std::vector<std::unique_ptr<SolverSynchronizer::Info>> all_infos;
for (int index = 0; index < num_solvers; ++index) {
all_infos.emplace_back(new SolverSynchronizer::Info(parameters_, problem_));
all_infos.back()->parameters.set_random_seed(parameters_.random_seed() +
index);
all_infos.back()->problem_state.MergeLearnedInfo(learned_info);
}
// Build dedicated synchronizers to forbid unsafe access to the memory of the
// other solvers.
std::vector<SolverSynchronizer> synchronizers;
for (int index = 0; index < num_solvers; ++index) {
synchronizers.push_back(SolverSynchronizer(index, problem_, &all_infos));
}
if (num_solvers > 1) {
LOG(FATAL) << "Multi threading is not supported";
// ThreadPool thread_pool("ParallelSolve", num_solvers);
// thread_pool.StartWorkers();
// for (int index = 0; index < num_solvers; ++index) {
// const std::string solver_name = StringPrintf("Solver_%d", index);
// SolverSynchronizer* const solver_sync = &synchronizers[index];
// thread_pool.Schedule([solver_name, solver_sync]() {
// RunOptimizer(solver_name, solver_sync);
// });
// }
} else {
// TODO(user): Consider having a dedicated method to solve with only one
// solver.
const std::string solver_name = StringPrintf("Solver_%d", 0);
SolverSynchronizer* const solver_sync = &synchronizers[0];
RunOptimizer(solver_name, solver_sync);
}
// Synchronize the BopSolver::problem_state_ from the solvers' problem states.
for (int index = 0; index < num_solvers; ++index) {
LOG(INFO) << "Solution of solver " << index << ": "
<< all_infos[index]->problem_state.solution().GetScaledCost();
learned_info.Clear();
learned_info.solution = all_infos[index]->problem_state.solution();
learned_info.lower_bound = all_infos[index]->problem_state.lower_bound();
problem_state_.MergeLearnedInfo(learned_info);
if (all_infos[index]->problem_state.IsInfeasible()) {
problem_state_.MarkAsInfeasible();
}
}
return problem_state_.solution().IsFeasible()
? (problem_state_.IsOptimal()
? BopSolveStatus::OPTIMAL_SOLUTION_FOUND
: BopSolveStatus::FEASIBLE_SOLUTION_FOUND)
: (problem_state_.IsInfeasible()
? BopSolveStatus::INFEASIBLE_PROBLEM
: BopSolveStatus::NO_SOLUTION_FOUND);
}
BopSolveStatus BopSolver::Solve(const BopSolution& first_solution) {
SCOPED_TIME_STAT(&stats_);
CHECK(first_solution.IsFeasible());
LearnedInfo learned_info(problem_);
learned_info.solution = first_solution;
if (problem_state_.MergeLearnedInfo(learned_info) &&
problem_state_.IsOptimal()) {
return BopSolveStatus::OPTIMAL_SOLUTION_FOUND;
}
CHECK(problem_state_.solution().IsFeasible());
return Solve();
}
double BopSolver::GetScaledBestBound() const {
return sat::AddOffsetAndScaleObjectiveValue(
problem_, sat::Coefficient(problem_state_.lower_bound()));
}
double BopSolver::GetScaledGap() const {
return 100.0 * std::abs(problem_state_.solution().GetScaledCost() -
GetScaledBestBound()) /
std::abs(problem_state_.solution().GetScaledCost());
}
void BopSolver::UpdateParameters() {
if (parameters_.solver_optimizer_sets_size() == 0) {
// No user defined optimizers: Implement the default behavior.
const std::vector<std::string> default_optimizers = {
"type:LINEAR_RELAXATION initial_score:1.0 time_limit_ratio:0.15",
"type:LP_FIRST_SOLUTION initial_score:0.1",
"type:OBJECTIVE_FIRST_SOLUTION initial_score:0.1",
"type:RANDOM_FIRST_SOLUTION initial_score:0.1",
"type:SAT_CORE_BASED initial_score:1.0",
"type:LOCAL_SEARCH initial_score:2.0",
"type:RANDOM_CONSTRAINT_LNS initial_score:0.1",
"type:RANDOM_LNS_PROPAGATION initial_score:1.0",
"type:RANDOM_LNS_SAT initial_score:1.0",
"type:COMPLETE_LNS initial_score:0.1"};
BopSolverOptimizerSet* const solver_methods =
parameters_.add_solver_optimizer_sets();
for (const std::string& default_optimizer : default_optimizers) {
CHECK(::google::protobuf::TextFormat::ParseFromString(
default_optimizer, solver_methods->add_methods()));
}
}
problem_state_.SetParameters(parameters_);
}
} // namespace bop
} // namespace operations_research

96
src/bop/bop_solver.h Normal file
View File

@@ -0,0 +1,96 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BOP_BOP_SOLVER_H_
#define OR_TOOLS_BOP_BOP_SOLVER_H_
// Solver for Boolean Optimization Problems built on top of the SAT solver.
// To optimize a problem the solver uses several optimization strategies like
// Local Search (LS), Large Neighborhood Search (LNS), and Linear
// Programming (LP). See bop_parameters.proto to tune the strategies.
//
// Note that the BopSolver usage is limited to:
// - Boolean variables,
// - Linear constraints and linear optimization objective,
// - Integral weights for both constraints and objective,
// - Minimization.
// To deal with maximization, integral variables and floating weights, one can
// use the bop::IntegralSolver.
//
// Usage example:
// const LinearBooleanProblem problem = BuildProblem();
// BopSolver bop_solver(problem);
// BopParameters bop_parameters;
// bop_parameters.set_max_deterministic_time(10);
// bop_solver.SetParameters(bop_parameters);
// const BopSolveStatus solve_status = bop_solver.Solve();
// if (solve_status == BopSolveStatus::OPTIMAL_SOLUTION_FOUND) { ... }
#include <string>
#include <vector>
#include "base/basictypes.h"
#include "base/integral_types.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/int_type_indexed_vector.h"
#include "base/int_type.h"
#include "bop/bop_base.h"
#include "bop/bop_parameters.pb.h"
#include "bop/bop_solution.h"
#include "bop/bop_types.h"
#include "glop/lp_solver.h"
#include "sat/boolean_problem.pb.h"
#include "sat/sat_solver.h"
#include "util/stats.h"
#include "util/time_limit.h"
namespace operations_research {
namespace bop {
// Solver of Boolean Optimization Problems based on Local Search.
class BopSolver {
public:
explicit BopSolver(const LinearBooleanProblem& problem);
virtual ~BopSolver();
// Parameters management.
void SetParameters(const BopParameters& parameters) {
parameters_ = parameters;
}
// Returns the status of the optimization.
BopSolveStatus Solve();
BopSolveStatus Solve(const BopSolution& first_solution);
const BopSolution& best_solution() const { return problem_state_.solution(); }
bool GetSolutionValue(VariableIndex var_id) const {
return problem_state_.solution().Value(var_id);
}
// Returns the scaled best bound.
// In case of minimization (resp. maximization), the best bound is defined as
// the lower bound (resp. upper bound).
double GetScaledBestBound() const;
double GetScaledGap() const;
private:
void UpdateParameters();
const LinearBooleanProblem& problem_;
ProblemState problem_state_;
BopParameters parameters_;
mutable StatsGroup stats_;
};
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_BOP_SOLVER_H_

89
src/bop/bop_types.h Normal file
View File

@@ -0,0 +1,89 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BOP_BOP_TYPES_H_
#define OR_TOOLS_BOP_BOP_TYPES_H_
#include "base/basictypes.h"
#include "base/int_type.h"
#include "base/int_type_indexed_vector.h"
namespace operations_research {
namespace bop {
DEFINE_INT_TYPE(ConstraintIndex, int);
DEFINE_INT_TYPE(EntryIndex, int);
DEFINE_INT_TYPE(SearchIndex, int);
DEFINE_INT_TYPE(TermIndex, int);
DEFINE_INT_TYPE(VariableIndex, int);
DEFINE_INT_TYPE(SolverTimeStamp, int64);
// Status of the solve of Bop.
enum class BopSolveStatus {
// The solver found the proven optimal solution.
OPTIMAL_SOLUTION_FOUND,
// The solver found a solution, but it is not proven to be the optimal
// solution.
FEASIBLE_SOLUTION_FOUND,
// The solver didn't find any solution.
NO_SOLUTION_FOUND,
// The problem is infeasible.
INFEASIBLE_PROBLEM,
// The problem is invalid.
INVALID_PROBLEM
};
inline std::string GetSolveStatusString(BopSolveStatus status) {
switch (status) {
case BopSolveStatus::OPTIMAL_SOLUTION_FOUND:
return "OPTIMAL_SOLUTION_FOUND";
case BopSolveStatus::FEASIBLE_SOLUTION_FOUND:
return "FEASIBLE_SOLUTION_FOUND";
case BopSolveStatus::NO_SOLUTION_FOUND:
return "NO_SOLUTION_FOUND";
case BopSolveStatus::INFEASIBLE_PROBLEM:
return "INFEASIBLE_PROBLEM";
case BopSolveStatus::INVALID_PROBLEM:
return "INVALID_PROBLEM";
}
// Fallback. We don't use "default:" so the compiler will return an error
// if we forgot one enum case above.
return "UNKNOWN Status";
}
inline std::ostream& operator<<(std::ostream& os, BopSolveStatus status) {
os << GetSolveStatusString(status);
return os;
}
// TODO(user): Remove.
DEFINE_INT_TYPE(SparseIndex, int);
struct BopConstraintTerm {
BopConstraintTerm(VariableIndex _var_id, int64 _weight)
: var_id(_var_id), search_id(0), weight(_weight) {}
VariableIndex var_id;
SearchIndex search_id;
int64 weight;
bool operator<(const BopConstraintTerm& other) const {
return search_id < other.search_id;
}
};
typedef ITIVector<SparseIndex, BopConstraintTerm> BopConstraintTerms;
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_BOP_TYPES_H_

191
src/bop/bop_util.cc Normal file
View File

@@ -0,0 +1,191 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/bop_util.h"
#include <vector>
#include "base/basictypes.h"
#include "base/integral_types.h"
#include "bop/bop_base.h"
#include "bop/bop_solution.h"
#include "sat/boolean_problem.h"
#include "sat/sat_solver.h"
namespace operations_research {
namespace bop {
namespace {
static const int kMaxLubyIndex = 30;
static const int kMaxBoost = 30;
// Loads the problem state into the SAT solver. If the problem has already been
// loaded in the sat_solver, fixed variables and objective bounds are updated.
// Returns false when the problem is proved UNSAT.
bool InternalLoadStateProblemToSatSolver(const ProblemState& problem_state,
sat::SatSolver* sat_solver) {
// Load the problem if not done yet.
if (sat_solver->NumVariables() == 0 &&
!LoadBooleanProblem(problem_state.original_problem(), sat_solver)) {
return false;
}
// Backtrack the solver to be able to add new constraints.
sat_solver->Backtrack(0);
sat_solver->TrackBinaryClauses(true);
// Constrain the objective cost to be greater or equal to the lower bound,
// and to be smaller than the upper bound. If enforcing the strictier upper
// bound constraint leads to an UNSAT problem, it means the current solution
// is proved optimal (if the solution is feasible, else the problem is proved
// infeasible).
if (!AddObjectiveConstraint(problem_state.original_problem(),
problem_state.lower_bound() != kint64min,
sat::Coefficient(problem_state.lower_bound()),
problem_state.upper_bound() != kint64max,
sat::Coefficient(problem_state.upper_bound() - 1),
sat_solver)) {
return false;
}
// Set fixed variables.
for (VariableIndex var(0); var < problem_state.is_fixed().size(); ++var) {
if (problem_state.is_fixed()[var]) {
if (!sat_solver->AddUnitClause(
sat::Literal(sat::VariableIndex(var.value()),
problem_state.fixed_values()[var]))) {
return false;
}
}
}
// Adds the new binary clauses.
if (!sat_solver->AddBinaryClauses(problem_state.NewlyAddedBinaryClauses())) {
return false;
}
sat_solver->ClearNewlyAddedBinaryClauses();
return true;
}
} // anonymous namespace
BopOptimizerBase::Status LoadStateProblemToSatSolver(
const ProblemState& problem_state, sat::SatSolver* sat_solver) {
if (InternalLoadStateProblemToSatSolver(problem_state, sat_solver)) {
return BopOptimizerBase::CONTINUE;
}
return problem_state.solution().IsFeasible()
? BopOptimizerBase::OPTIMAL_SOLUTION_FOUND
: BopOptimizerBase::INFEASIBLE;
}
void ExtractLearnedInfoFromSatSolver(sat::SatSolver* solver,
LearnedInfo* info) {
CHECK(nullptr != solver);
CHECK(nullptr != info);
// Fixed variables.
info->fixed_literals.clear();
const sat::Trail& propagation_trail = solver->LiteralTrail();
const int root_size = solver->CurrentDecisionLevel() == 0
? propagation_trail.Index()
: solver->Decisions().front().trail_index;
for (int trail_index = 0; trail_index < root_size; ++trail_index) {
info->fixed_literals.push_back(propagation_trail[trail_index]);
}
// Binary clauses.
info->binary_clauses = solver->NewlyAddedBinaryClauses();
solver->ClearNewlyAddedBinaryClauses();
}
void SatAssignmentToBopSolution(const sat::VariablesAssignment& assignment,
BopSolution* solution) {
CHECK(solution != nullptr);
// Only extract the variables of the initial problem.
CHECK_LE(solution->Size(), assignment.NumberOfVariables());
for (sat::VariableIndex var(0); var < solution->Size(); ++var) {
CHECK(assignment.IsVariableAssigned(var));
const bool value = assignment.IsLiteralTrue(sat::Literal(var, true));
const VariableIndex bop_var_id(var.value());
solution->SetValue(bop_var_id, value);
}
}
//------------------------------------------------------------------------------
// AdaptiveParameterValue
//------------------------------------------------------------------------------
AdaptiveParameterValue::AdaptiveParameterValue(double initial_value)
: value_(initial_value), num_changes_(0) {}
void AdaptiveParameterValue::Reset() { num_changes_ = 0; }
void AdaptiveParameterValue::Increase() {
++num_changes_;
const double factor = 1.0 + 1.0 / (num_changes_ / 2.0 + 1);
value_ = std::min(1.0 - (1.0 - value_) / factor, value_ * factor);
}
void AdaptiveParameterValue::Decrease() {
++num_changes_;
const double factor = 1.0 + 1.0 / (num_changes_ / 2.0 + 1);
value_ = std::max(value_ / factor, 1.0 - (1.0 - value_) * factor);
}
//------------------------------------------------------------------------------
// LubyAdaptiveParameterValue
//------------------------------------------------------------------------------
LubyAdaptiveParameterValue::LubyAdaptiveParameterValue()
: luby_id_(0),
luby_boost_(0),
luby_value_(0),
difficulties_(kMaxLubyIndex, AdaptiveParameterValue(0.5)) {
Reset();
}
void LubyAdaptiveParameterValue::Reset() {
luby_id_ = 0;
luby_boost_ = 0;
luby_value_ = 0;
for (int i = 0; i < difficulties_.size(); ++i) {
difficulties_[i].Reset();
}
}
void LubyAdaptiveParameterValue::IncreaseParameter() {
const int luby_msb = MostSignificantBitPosition64(luby_value_);
difficulties_[luby_msb].Increase();
}
void LubyAdaptiveParameterValue::DecreaseParameter() {
const int luby_msb = MostSignificantBitPosition64(luby_value_);
difficulties_[luby_msb].Decrease();
}
double LubyAdaptiveParameterValue::GetParameterValue() const {
const int luby_msb = MostSignificantBitPosition64(luby_value_);
return difficulties_[luby_msb].value();
}
bool LubyAdaptiveParameterValue::BoostLuby() {
++luby_boost_;
return luby_boost_ >= kMaxBoost;
}
void LubyAdaptiveParameterValue::UpdateLuby() {
++luby_id_;
luby_value_ = sat::SUniv(luby_id_) << luby_boost_;
}
} // namespace bop
} // namespace operations_research

89
src/bop/bop_util.h Normal file
View File

@@ -0,0 +1,89 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BOP_BOP_UTIL_H_
#define OR_TOOLS_BOP_BOP_UTIL_H_
#include <vector>
#include "base/basictypes.h"
#include "base/integral_types.h"
#include "bop/bop_base.h"
#include "bop/bop_solution.h"
#include "sat/sat_solver.h"
namespace operations_research {
namespace bop {
// Loads the problem state into the sat_solver. If the problem has already been
// loaded in the sat_solver, fixed variables and objective bounds are updated.
// Returns the status of the load:
// - CONTINUE: State problem successfully loaded.
// - OPTIMAL_SOLUTION_FOUND: Solution is proved optimal.
// If a feasible solution exists, this load function imposes the solution
// to be strictly better. Then when SAT proves the problem is UNSAT, that
// actually means that the current solution is optimal.
// - INFEASIBLE: The problem is proved to be infeasible.
// Note that the sat_solver will be backtracked to the root level in order
// to add new constraints.
BopOptimizerBase::Status LoadStateProblemToSatSolver(
const ProblemState& problem_state, sat::SatSolver* sat_solver);
// Extracts from the sat solver any new information about the problem. Note that
// the solver is not const because this function clears what is considered
// "new".
void ExtractLearnedInfoFromSatSolver(sat::SatSolver* solver, LearnedInfo* info);
void SatAssignmentToBopSolution(const sat::VariablesAssignment& assignment,
BopSolution* solution);
class AdaptiveParameterValue {
public:
// Initial value is in [0..1].
explicit AdaptiveParameterValue(double initial_value);
void Reset();
void Increase();
void Decrease();
double value() const { return value_; }
private:
double value_;
int num_changes_;
};
class LubyAdaptiveParameterValue {
public:
LubyAdaptiveParameterValue();
void Reset();
void IncreaseParameter();
void DecreaseParameter();
double GetParameterValue() const;
void UpdateLuby();
bool BoostLuby();
int luby_value() const { return luby_value_; }
private:
int luby_id_;
int luby_boost_;
int luby_value_;
std::vector<AdaptiveParameterValue> difficulties_;
};
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_BOP_UTIL_H_

View File

@@ -0,0 +1,280 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/complete_optimizer.h"
#include "bop/bop_util.h"
#include "sat/optimization.h"
#include "sat/boolean_problem.h"
namespace operations_research {
namespace bop {
SatLinearScanOptimizer::SatLinearScanOptimizer(const std::string& name)
: BopOptimizerBase(name),
initial_solution_cost_(kint64max) {}
SatLinearScanOptimizer::~SatLinearScanOptimizer() {}
BopOptimizerBase::Status SatLinearScanOptimizer::Synchronize(
const ProblemState& problem_state) {
SCOPED_TIME_STAT(&stats_);
const BopOptimizerBase::Status status =
LoadStateProblemToSatSolver(problem_state, &solver_);
if (status != BopOptimizerBase::CONTINUE) return status;
UseObjectiveForSatAssignmentPreference(problem_state.original_problem(),
&solver_);
initial_solution_cost_ = problem_state.solution().GetCost();
solver_.Backtrack(0);
AddObjectiveConstraint(
problem_state.original_problem(), false, sat::Coefficient(0), true,
sat::Coefficient(initial_solution_cost_) - 1, &solver_);
return BopOptimizerBase::CONTINUE;
}
BopOptimizerBase::Status SatLinearScanOptimizer::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
SCOPED_TIME_STAT(&stats_);
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
sat::SatParameters sat_params = solver_.parameters();
sat_params.set_max_number_of_conflicts(
parameters.max_number_of_conflicts_in_random_lns());
solver_.SetParameters(sat_params);
const sat::SatSolver::Status sat_status =
solver_.SolveWithTimeLimit(time_limit);
ExtractLearnedInfoFromSatSolver(&solver_, learned_info);
CHECK_NE(sat_status, sat::SatSolver::ASSUMPTIONS_UNSAT);
if (sat_status == sat::SatSolver::MODEL_UNSAT) {
learned_info->lower_bound = initial_solution_cost_;
return BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
if (sat_status == sat::SatSolver::MODEL_SAT) {
SatAssignmentToBopSolution(solver_.Assignment(), &learned_info->solution);
return BopOptimizerBase::SOLUTION_FOUND;
}
return BopOptimizerBase::CONTINUE;
}
SatCoreBasedOptimizer::SatCoreBasedOptimizer(const std::string& name)
: BopOptimizerBase(name),
initialized_(false),
initial_solution_(),
assumptions_already_added_(false) {
// This is in term of number of variables not at their minimal value.
lower_bound_ = sat::Coefficient(0);
upper_bound_ = sat::Coefficient(kint64max);
}
SatCoreBasedOptimizer::~SatCoreBasedOptimizer() {}
BopOptimizerBase::Status SatCoreBasedOptimizer::Synchronize(
const ProblemState& problem_state) {
const BopOptimizerBase::Status status =
LoadStateProblemToSatSolver(problem_state, &solver_);
if (status != BopOptimizerBase::CONTINUE) return status;
if (!initialized_) {
// Initialize the algorithm.
nodes_ = sat::CreateInitialEncodingNodes(
problem_state.original_problem().objective(), &offset_, &repository_);
initialized_ = true;
// This is used by the "stratified" approach.
stratified_lower_bound_ = sat::Coefficient(0);
for (sat::EncodingNode* n : nodes_) {
stratified_lower_bound_ = std::max(stratified_lower_bound_, n->weight());
}
}
initial_solution_.reset(new BopSolution(problem_state.solution()));
// Extract the new upper bound.
upper_bound_ = initial_solution_->GetCost() + offset_;
return BopOptimizerBase::CONTINUE;
}
sat::SatSolver::Status SatCoreBasedOptimizer::SolveWithAssumptions() {
solver_.Backtrack(0);
for (sat::EncodingNode* n : nodes_) {
lower_bound_ += n->Reduce(solver_) * n->weight();
}
if (upper_bound_ != sat::kCoefficientMax) {
const sat::Coefficient gap = upper_bound_ - lower_bound_;
if (gap == 0) return sat::SatSolver::MODEL_SAT;
for (sat::EncodingNode* n : nodes_) {
n->ApplyUpperBound((gap / n->weight()).value(), &solver_);
}
}
std::vector<sat::Literal> assumptions;
{
int new_node_index = 0;
for (sat::EncodingNode* n : nodes_) {
if (n->size() > 0) {
if (n->weight() >= stratified_lower_bound_) {
assumptions.push_back(n->literal(0).Negated());
}
nodes_[new_node_index] = n;
++new_node_index;
}
}
nodes_.resize(new_node_index);
}
CHECK_LE(assumptions.size(), nodes_.size());
return solver_.ResetAndSolveWithGivenAssumptions(assumptions);
}
BopOptimizerBase::Status SatCoreBasedOptimizer::Optimize(
const BopParameters& parameters, LearnedInfo* learned_info,
TimeLimit* time_limit) {
SCOPED_TIME_STAT(&stats_);
CHECK(learned_info != nullptr);
CHECK(time_limit != nullptr);
learned_info->Clear();
int64 conflict_limit = parameters.max_number_of_conflicts_in_random_lns();
double deterministic_time_at_last_sync = solver_.deterministic_time();
while (true) {
sat::SatParameters sat_params = solver_.parameters();
sat_params.set_max_time_in_seconds(time_limit->GetTimeLeft());
sat_params.set_max_deterministic_time(
time_limit->GetDeterministicTimeLeft());
sat_params.set_max_number_of_conflicts(conflict_limit);
solver_.SetParameters(sat_params);
const int64 old_num_conflicts = solver_.num_failures();
const sat::SatSolver::Status sat_status =
assumptions_already_added_ ? solver_.Solve() : SolveWithAssumptions();
time_limit->AdvanceDeterministicTime(solver_.deterministic_time() -
deterministic_time_at_last_sync);
deterministic_time_at_last_sync = solver_.deterministic_time();
assumptions_already_added_ = true;
conflict_limit -= solver_.num_failures() - old_num_conflicts;
learned_info->lower_bound = lower_bound_.value() - offset_.value();
ExtractLearnedInfoFromSatSolver(&solver_, learned_info);
// This is possible because we over-constrain the objective.
if (sat_status == sat::SatSolver::MODEL_UNSAT) {
learned_info->solution = *initial_solution_;
learned_info->lower_bound = learned_info->solution.GetCost();
return BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
if (sat_status == sat::SatSolver::LIMIT_REACHED || conflict_limit < 0) {
return BopOptimizerBase::CONTINUE;
}
if (sat_status == sat::SatSolver::MODEL_SAT) {
const sat::Coefficient old_lower_bound = stratified_lower_bound_;
for (sat::EncodingNode* n : nodes_) {
if (n->weight() < old_lower_bound) {
if (stratified_lower_bound_ == old_lower_bound ||
n->weight() > stratified_lower_bound_) {
stratified_lower_bound_ = n->weight();
}
}
}
if (stratified_lower_bound_ < old_lower_bound) {
assumptions_already_added_ = false;
BopSolution new_solution = *initial_solution_;
SatAssignmentToBopSolution(solver_.Assignment(), &new_solution);
if (new_solution.GetCost() < initial_solution_->GetCost()) {
learned_info->solution = new_solution;
return BopOptimizerBase::SOLUTION_FOUND;
} else {
continue;
}
}
if (upper_bound_ == lower_bound_) {
// The initial solution is proved optimal thanks to the bounds.
// Note that the SAT solver might not have a current valid solution.
learned_info->solution = *initial_solution_;
} else {
SatAssignmentToBopSolution(solver_.Assignment(),
&learned_info->solution);
}
learned_info->lower_bound = learned_info->solution.GetCost();
return BopOptimizerBase::OPTIMAL_SOLUTION_FOUND;
}
// The interesting case: we have a core.
// TODO(user): Check that this cannot fail because of the conflict limit.
std::vector<sat::Literal> core = solver_.GetLastIncompatibleDecisions();
sat::MinimizeCore(&solver_, &core);
// TODO(user): make a function.
sat::Coefficient min_weight = sat::kCoefficientMax;
{
int index = 0;
for (int i = 0; i < core.size(); ++i) {
for (; index < nodes_.size() &&
nodes_[index]->literal(0).Negated() != core[i];
++index) {
}
CHECK_LT(index, nodes_.size());
min_weight = std::min(min_weight, nodes_[index]->weight());
}
}
solver_.Backtrack(0);
assumptions_already_added_ = false;
int new_node_index = 0;
if (core.size() == 1) {
CHECK(solver_.Assignment().IsLiteralFalse(core[0]));
for (sat::EncodingNode* n : nodes_) {
if (n->literal(0).Negated() == core[0]) {
sat::IncreaseNodeSize(n, &solver_);
continue;
}
}
} else {
int index = 0;
std::vector<sat::EncodingNode*> to_merge;
for (int i = 0; i < core.size(); ++i) {
for (; nodes_[index]->literal(0).Negated() != core[i]; ++index) {
CHECK_LT(index, nodes_.size());
nodes_[new_node_index] = nodes_[index];
++new_node_index;
}
CHECK_LT(index, nodes_.size());
to_merge.push_back(nodes_[index]);
if (nodes_[index]->weight() > min_weight) {
nodes_[index]->set_weight(nodes_[index]->weight() - min_weight);
nodes_[new_node_index] = nodes_[index];
++new_node_index;
}
++index;
}
for (; index < nodes_.size(); ++index) {
nodes_[new_node_index] = nodes_[index];
++new_node_index;
}
nodes_.resize(new_node_index);
nodes_.push_back(
sat::LazyMergeAllNodeWithPQ(to_merge, &solver_, &repository_));
sat::IncreaseNodeSize(nodes_.back(), &solver_);
nodes_.back()->set_weight(min_weight);
CHECK(solver_.AddUnitClause(nodes_.back()->literal(0)));
}
}
}
} // namespace bop
} // namespace operations_research

View File

@@ -0,0 +1,85 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file contains some BopOptimizerBase implementations that are "complete"
// solvers. That is, they work on the full problem, and can solve the problem
// (and prove optimality) by themselves. Moreover, they can be run for short
// period of time and resumed later from the state they where left off.
//
// The idea is that it is worthwhile spending some time in these algorithms,
// because in some situation they can improve the current upper/lower bound or
// even solve the problem to optimality.
#ifndef OR_TOOLS_BOP_COMPLETE_OPTIMIZER_H_
#define OR_TOOLS_BOP_COMPLETE_OPTIMIZER_H_
#include "bop/bop_base.h"
#include "bop/bop_solution.h"
#include "bop/bop_types.h"
#include "sat/encoding.h"
#include "sat/sat_solver.h"
#include "sat/boolean_problem.pb.h"
namespace operations_research {
namespace bop {
class SatLinearScanOptimizer : public BopOptimizerBase {
public:
// TODO(user): change api to deal with error when loading the problem?
explicit SatLinearScanOptimizer(const std::string& name);
virtual ~SatLinearScanOptimizer();
protected:
virtual bool RunOncePerSolution() const { return false; }
// TODO(user): The scan optimizer doesn't need a solution.
virtual bool NeedAFeasibleSolution() const { return true; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
private:
int64 initial_solution_cost_;
sat::SatSolver solver_;
};
// TODO(user): Merge this with the code in sat/optimization.cc
class SatCoreBasedOptimizer : public BopOptimizerBase {
public:
explicit SatCoreBasedOptimizer(const std::string& name);
virtual ~SatCoreBasedOptimizer();
protected:
virtual bool RunOncePerSolution() const { return false; }
// TODO(user): The core based optimizer doesn't need a solution.
virtual bool NeedAFeasibleSolution() const { return true; }
virtual Status Synchronize(const ProblemState& problem_state);
virtual Status Optimize(const BopParameters& parameters,
LearnedInfo* learned_info, TimeLimit* time_limit);
private:
bool initialized_;
std::unique_ptr<BopSolution> initial_solution_;
sat::SatSolver::Status SolveWithAssumptions();
bool assumptions_already_added_;
sat::SatSolver solver_;
sat::Coefficient offset_;
sat::Coefficient lower_bound_;
sat::Coefficient upper_bound_;
sat::Coefficient stratified_lower_bound_;
std::vector<std::unique_ptr<sat::EncodingNode>> repository_;
std::vector<sat::EncodingNode*> nodes_;
};
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_COMPLETE_OPTIMIZER_H_

795
src/bop/integral_solver.cc Normal file
View File

@@ -0,0 +1,795 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "bop/integral_solver.h"
#include <math.h>
#include <vector>
#include "bop/bop_solver.h"
#include "lp_data/lp_decomposer.h"
namespace operations_research {
namespace bop {
using operations_research::glop::ColIndex;
using operations_research::glop::DenseRow;
using operations_research::glop::Fractional;
using operations_research::glop::LinearProgram;
using operations_research::glop::LPDecomposer;
using operations_research::glop::SparseColumn;
using operations_research::glop::SparseMatrix;
using operations_research::glop::RowIndex;
using operations_research::glop::kInfinity;
namespace {
// TODO(user): Use the one in MIP or move this one to util.
bool IsIntegerWithinTolerance(Fractional x) {
const double kTolerance = 1e-10;
return std::abs(x - round(x)) <= kTolerance;
}
//------------------------------------------------------------------------------
// IntegralVariable
//------------------------------------------------------------------------------
// Model an integral variable using boolean variables.
// TODO(user): Enable discrete representation by value, i.e. use three boolean
// variables when only possible values are 10, 12, 32.
// In the same way, when only two consecutive values are possible
// use only one boolean variable with an offset.
class IntegralVariable {
public:
IntegralVariable();
// Creates the minimal number of boolean variables to represent an integral
// variable with range [lower_bound, upper_bound]. start_var_index corresponds
// to the next available boolean variable index. If three boolean variables
// are needed to model the integral variable, the used variables will have
// indices start_var_index, start_var_index +1, and start_var_index +2.
void BuildFromRange(int start_var_index, Fractional lower_bound,
Fractional upper_bound);
void Clear();
void set_offset(int64 offset) { offset_ = offset; }
void set_weight(VariableIndex var, int64 weight);
int GetNumberOfBooleanVariables() const { return bits_.size(); }
const std::vector<VariableIndex>& bits() const { return bits_; }
const std::vector<int64>& weights() const { return weights_; }
int64 offset() const { return offset_; }
// Returns the value of the integral variable based on the boolean conversion
// solution and the boolean solution to the problem.
int64 GetSolutionValue(const BopSolution& solution) const;
std::string DebugString() const;
private:
// The value of the integral variable is expressed as
// sum_i(weights[i] * Value(bits[i])) + offset.
// Note that weights can be negative to represent negative values.
std::vector<VariableIndex> bits_;
std::vector<int64> weights_;
int64 offset_;
};
IntegralVariable::IntegralVariable() : bits_(), weights_(), offset_(0) {}
void IntegralVariable::BuildFromRange(int start_var_index,
Fractional lower_bound,
Fractional upper_bound) {
Clear();
// Integral variable. Split the variable into the minimum number of bits
// required to model the upper bound.
CHECK_NE(-kInfinity, lower_bound);
CHECK_NE(kInfinity, upper_bound);
const int64 integral_lower_bound = static_cast<int64>(ceil(lower_bound));
const int64 integral_upper_bound = static_cast<int64>(floor(upper_bound));
offset_ = integral_lower_bound;
const int64 delta = integral_upper_bound - integral_lower_bound;
const int num_used_bits = MostSignificantBitPosition64(delta) + 1;
for (int i = 0; i < num_used_bits; ++i) {
bits_.push_back(VariableIndex(start_var_index + i));
weights_.push_back(1ULL << i);
}
}
void IntegralVariable::Clear() {
bits_.clear();
weights_.clear();
offset_ = 0;
}
void IntegralVariable::set_weight(VariableIndex var, int64 weight) {
bits_.push_back(var);
weights_.push_back(weight);
}
int64 IntegralVariable::GetSolutionValue(const BopSolution& solution) const {
int64 value = offset_;
for (int i = 0; i < bits_.size(); ++i) {
value += weights_[i] * solution.Value(bits_[i]);
}
return value;
}
std::string IntegralVariable::DebugString() const {
std::string str;
CHECK_EQ(bits_.size(), weights_.size());
for (int i = 0; i < bits_.size(); ++i) {
str += StringPrintf("%lld [%d] ", weights_[i], bits_[i].value());
}
str += StringPrintf(" Offset: %lld", offset_);
return str;
}
//------------------------------------------------------------------------------
// IntegralProblemConverter
//------------------------------------------------------------------------------
// This class is used to convert a LinearProblem containing integral variables
// into a LinearBooleanProblem that Bop can consume.
// The converter tries to reuse existing boolean variables as much as possible,
// but there are no guarantees to model all integral variables using the total
// minimal number of boolean variables.
// Consider for instance the constraint "x - 2 * y = 0".
// Depending on the declaration order, two different outcomes are possible:
// - When x is considered first, the converter will generate new variables
// for both x and y as we only consider integral weights, i.e. y = x / 2.
// - When y is considered first, the converter will reuse boolean variables
// from y to model x as x = 2 * y (integral weight).
//
// Note that the converter only deals with integral constraint and variable
// coefficients, and integral variables, i.e. no continuous variables.
class IntegralProblemConverter {
public:
explicit IntegralProblemConverter(const LinearProgram& linear_problem);
// Converts the LinearProgram into a LinearBooleanProblem.
// Returns false when the conversion fails.
bool ConvertToBooleanProblem();
// Returns the value of the integral variable based on the boolean conversion
// and the boolean solution to the problem.
int64 GetSolutionValue(ColIndex col, const BopSolution& solution) const;
const LinearBooleanProblem& problem() const { return problem_; }
private:
// Returns true when the linear_problem_ can be converted into a boolean
// problem. Note that floating weigths and continuous variables are not
// supported.
bool CheckProblem() const;
// Initializes the type of each variable of the linear_problem_.
void InitVariableTypes();
// Converts all variables of the problem.
void ConvertAllVariables();
// Adds all variables constraints, i.e. lower and upper bounds of variables.
void AddVariableConstraints();
// Converts all constraints from LinearProgram to boolean problem.
void ConvertAllConstraints();
// Converts the objective from LinearProgram to boolean problem.
void ConvertObjective();
// Converts the integral variable represented by col in the linear_problem_
// into an IntegralVariable using existing boolean variables.
// Returns false when existing boolean variables are not enough to model
// the integral variable.
bool ConvertUsingExistingBooleans(ColIndex col,
IntegralVariable* integral_var);
// Creates the integral_var using the given linear_problem_ constraint.
// The constraint is an equality constraint and contains only one integral
// variable (already the case in the model or thanks to previous
// booleanization of other integral variables), i.e.
// bound <= w * integral_var + sum(w_i * b_i) <= bound
// The remaining integral variable can then be expressed:
// integral_var == (bound + sum(-w_i * b_i)) / w
// Note that all divisions by w have to be integral as Bop only deals with
// integral coefficients.
bool CreateVariableUsingConstraint(RowIndex constraint,
IntegralVariable* integral_var);
// Adds weighted integral variable represented by col to the current dense
// constraint.
Fractional AddWeightedIntegralVariable(
ColIndex col, Fractional weight,
ITIVector<VariableIndex, Fractional>* dense_weights);
// Scales weights and adds all non-zero scaled weights and literals to t.
// t is a constraint or the objective.
// Returns the bound error due to the scaling.
// The weight is scaled using:
// static_cast<int64>(round(weight * scaling_factor)) / gcd;
template <class T>
double ScaleAndSparsifyWeights(
double scaling_factor, int64 gcd,
const ITIVector<VariableIndex, Fractional>& dense_weights, T* t);
// Returns true when at least one element is non-zero.
bool HasNonZeroWeigths(
const ITIVector<VariableIndex, Fractional>& dense_weights) const;
const LinearProgram& linear_problem_;
LinearBooleanProblem problem_;
// boolean_variables_[i] represents the boolean variable index in Bop; when
// negative -boolean_variables_[i] - 1 represents the index of the
// integral variable in integral_variables_.
ITIVector<glop::ColIndex, int> boolean_variables_;
std::vector<IntegralVariable> integral_variables_;
std::vector<ColIndex> integral_indices_;
int num_boolean_variables_;
enum VariableType { BOOLEAN, INTEGRAL, INTEGRAL_EXPRESSED_AS_BOOLEAN };
ITIVector<glop::ColIndex, VariableType> variable_types_;
};
IntegralProblemConverter::IntegralProblemConverter(
const LinearProgram& linear_problem)
: linear_problem_(linear_problem),
problem_(),
boolean_variables_(linear_problem.num_variables().value(), 0),
integral_variables_(),
integral_indices_(),
num_boolean_variables_(0),
variable_types_(linear_problem.num_variables().value(), INTEGRAL) {
InitVariableTypes();
}
bool IntegralProblemConverter::ConvertToBooleanProblem() {
if (!CheckProblem()) {
return false;
}
ConvertAllVariables();
problem_.set_num_variables(num_boolean_variables_);
problem_.set_name(linear_problem_.name());
AddVariableConstraints();
ConvertAllConstraints();
ConvertObjective();
// A BooleanLinearProblem is always in the minimization form.
if (linear_problem_.IsMaximizationProblem()) {
sat::ChangeOptimizationDirection(&problem_);
}
return true;
}
int64 IntegralProblemConverter::GetSolutionValue(
ColIndex col, const BopSolution& solution) const {
const int pos = boolean_variables_[col];
return pos >= 0 ? solution.Value(VariableIndex(pos))
: integral_variables_[-pos - 1].GetSolutionValue(solution);
}
bool IntegralProblemConverter::CheckProblem() const {
for (ColIndex col(0); col < linear_problem_.num_variables(); ++col) {
if (!linear_problem_.is_variable_integer()[col]) {
LOG(ERROR) << "Variable " << linear_problem_.GetVariableName(col)
<< " is continuous. This is not supported by BOP.";
return false;
}
if (linear_problem_.variable_lower_bounds()[col] == -kInfinity) {
LOG(ERROR) << "Variable " << linear_problem_.GetVariableName(col)
<< " has no lower bound. This is not supported by BOP.";
return false;
}
if (linear_problem_.variable_upper_bounds()[col] == kInfinity) {
LOG(ERROR) << "Variable " << linear_problem_.GetVariableName(col)
<< " has no upper bound. This is not supported by BOP.";
return false;
}
}
return true;
}
void IntegralProblemConverter::InitVariableTypes() {
for (ColIndex col(0); col < linear_problem_.num_variables(); ++col) {
const Fractional lower_bound = linear_problem_.variable_lower_bounds()[col];
const Fractional upper_bound = linear_problem_.variable_upper_bounds()[col];
if (lower_bound > -1.0 && upper_bound < 2.0) {
// Boolean variable.
variable_types_[col] = BOOLEAN;
boolean_variables_[col] = num_boolean_variables_;
++num_boolean_variables_;
problem_.add_var_names(linear_problem_.GetVariableName(col));
} else {
// Integral variable.
variable_types_[col] = INTEGRAL;
integral_indices_.push_back(col);
}
}
}
void IntegralProblemConverter::ConvertAllVariables() {
for (const ColIndex col : integral_indices_) {
CHECK_EQ(INTEGRAL, variable_types_[col]);
IntegralVariable integral_var;
if (!ConvertUsingExistingBooleans(col, &integral_var)) {
const Fractional lower_bound =
linear_problem_.variable_lower_bounds()[col];
const Fractional upper_bound =
linear_problem_.variable_upper_bounds()[col];
integral_var.BuildFromRange(num_boolean_variables_, lower_bound,
upper_bound);
num_boolean_variables_ += integral_var.GetNumberOfBooleanVariables();
const std::string var_name = linear_problem_.GetVariableName(col);
for (int i = 0; i < integral_var.bits().size(); ++i) {
problem_.add_var_names(var_name + StringPrintf("_%d", i));
}
}
integral_variables_.push_back(integral_var);
boolean_variables_[col] = -integral_variables_.size();
variable_types_[col] = INTEGRAL_EXPRESSED_AS_BOOLEAN;
}
}
void IntegralProblemConverter::ConvertAllConstraints() {
// TODO(user): This is the way it's done in glop/proto_utils.cc but having
// to transpose looks unnecessary costly.
glop::SparseMatrix transpose;
transpose.PopulateFromTranspose(linear_problem_.GetSparseMatrix());
double max_relative_error = 0.0;
double max_bound_error = 0.0;
double max_scaling_factor = 0.0;
double relative_error = 0.0;
double scaling_factor = 0.0;
std::vector<double> coefficients;
for (RowIndex row(0); row < linear_problem_.num_constraints(); ++row) {
Fractional offset = 0.0;
ITIVector<VariableIndex, Fractional> dense_weights(num_boolean_variables_,
0.0);
for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) {
// Cast in ColIndex due to the transpose.
offset += AddWeightedIntegralVariable(RowToColIndex(e.row()),
e.coefficient(), &dense_weights);
}
if (!HasNonZeroWeigths(dense_weights)) {
continue;
}
// Compute the scaling for non-integral weights.
coefficients.clear();
for (VariableIndex var(0); var < num_boolean_variables_; ++var) {
if (dense_weights[var] != 0.0) {
coefficients.push_back(dense_weights[var]);
}
}
GetBestScalingOfDoublesToInt64(coefficients, kint64max, &scaling_factor,
&relative_error);
const int64 gcd = ComputeGcdOfRoundedDoubles(coefficients, scaling_factor);
max_relative_error = std::max(relative_error, max_relative_error);
max_scaling_factor = std::max(scaling_factor / gcd, max_scaling_factor);
LinearBooleanConstraint* constraint = problem_.add_constraints();
constraint->set_name(linear_problem_.GetConstraintName(row));
const double bound_error =
ScaleAndSparsifyWeights(scaling_factor, gcd, dense_weights, constraint);
max_bound_error = std::max(max_bound_error, bound_error);
const Fractional lower_bound =
linear_problem_.constraint_lower_bounds()[row];
if (lower_bound != -kInfinity) {
const Fractional offset_lower_bound = lower_bound - offset;
if (offset_lower_bound * scaling_factor >
static_cast<double>(kint64max)) {
LOG(WARNING) << "A constraint is trivially unsatisfiable.";
return;
}
if (offset_lower_bound * scaling_factor >
-static_cast<double>(kint64max)) {
// Otherwise, the constraint is not needed.
constraint->set_lower_bound(
static_cast<int64>(
round(offset_lower_bound * scaling_factor - bound_error)) /
gcd);
}
}
const Fractional upper_bound =
linear_problem_.constraint_upper_bounds()[row];
if (upper_bound != kInfinity) {
const Fractional offset_upper_bound = upper_bound - offset;
if (offset_upper_bound * scaling_factor <
-static_cast<double>(kint64max)) {
LOG(WARNING) << "A constraint is trivially unsatisfiable.";
return;
}
if (offset_upper_bound * scaling_factor <
static_cast<double>(kint64max)) {
// Otherwise, the constraint is not needed.
constraint->set_upper_bound(
static_cast<int64>(
round(offset_upper_bound * scaling_factor + bound_error)) /
gcd);
}
}
}
}
void IntegralProblemConverter::ConvertObjective() {
LinearObjective* objective = problem_.mutable_objective();
Fractional offset = 0.0;
ITIVector<VariableIndex, Fractional> dense_weights(num_boolean_variables_,
0.0);
// Compute the objective weights for the binary variable model.
for (ColIndex col(0); col < linear_problem_.num_variables(); ++col) {
offset += AddWeightedIntegralVariable(
col, linear_problem_.objective_coefficients()[col], &dense_weights);
}
// Compute the scaling for non-integral weights.
std::vector<double> coefficients;
for (VariableIndex var(0); var < num_boolean_variables_; ++var) {
if (dense_weights[var] != 0.0) {
coefficients.push_back(dense_weights[var]);
}
}
double scaling_factor = 0.0;
double max_relative_error = 0.0;
double relative_error = 0.0;
GetBestScalingOfDoublesToInt64(coefficients, kint64max, &scaling_factor,
&relative_error);
const int64 gcd = ComputeGcdOfRoundedDoubles(coefficients, scaling_factor);
max_relative_error = std::max(relative_error, max_relative_error);
LOG(INFO) << "objective relative error: " << relative_error;
LOG(INFO) << "objective scaling factor: " << scaling_factor / gcd;
ScaleAndSparsifyWeights(scaling_factor, gcd, dense_weights, objective);
// Note that here we set the scaling factor for the inverse operation of
// getting the "true" objective value from the scaled one. Hence the inverse.
objective->set_scaling_factor(1.0 / scaling_factor * gcd);
objective->set_offset((linear_problem_.objective_offset() + offset) *
scaling_factor / gcd);
}
void IntegralProblemConverter::AddVariableConstraints() {
for (ColIndex col(0); col < linear_problem_.num_variables(); ++col) {
const Fractional lower_bound = linear_problem_.variable_lower_bounds()[col];
const Fractional upper_bound = linear_problem_.variable_upper_bounds()[col];
const int pos = boolean_variables_[col];
if (pos >= 0) {
// Boolean variable.
CHECK_EQ(BOOLEAN, variable_types_[col]);
const bool is_fixed = (lower_bound > -1.0 && upper_bound < 1.0) ||
(lower_bound > 0.0 && upper_bound < 2.0);
if (is_fixed) {
// Set the variable.
const int fixed_value = lower_bound > -1.0 && upper_bound < 1.0 ? 0 : 1;
LinearBooleanConstraint* constraint = problem_.add_constraints();
constraint->set_lower_bound(fixed_value);
constraint->set_upper_bound(fixed_value);
constraint->add_literals(pos + 1);
constraint->add_coefficients(1);
}
} else {
CHECK_EQ(INTEGRAL_EXPRESSED_AS_BOOLEAN, variable_types_[col]);
// Integral variable.
if (lower_bound != -kInfinity || upper_bound != kInfinity) {
const IntegralVariable& integral_var = integral_variables_[-pos - 1];
LinearBooleanConstraint* constraint = problem_.add_constraints();
for (int i = 0; i < integral_var.bits().size(); ++i) {
constraint->add_literals(integral_var.bits()[i].value() + 1);
constraint->add_coefficients(integral_var.weights()[i]);
}
if (lower_bound != -kInfinity) {
constraint->set_lower_bound(static_cast<int64>(ceil(lower_bound)) -
integral_var.offset());
}
if (upper_bound != kInfinity) {
constraint->set_upper_bound(static_cast<int64>(floor(upper_bound)) -
integral_var.offset());
}
}
}
}
}
bool IntegralProblemConverter::ConvertUsingExistingBooleans(
ColIndex col, IntegralVariable* integral_var) {
CHECK(nullptr != integral_var);
CHECK_EQ(INTEGRAL, variable_types_[col]);
const SparseMatrix& matrix = linear_problem_.GetSparseMatrix();
const SparseMatrix& transpose = linear_problem_.GetTransposeSparseMatrix();
for (const SparseColumn::Entry var_entry : matrix.column(col)) {
const RowIndex constraint = var_entry.row();
const Fractional lb = linear_problem_.constraint_lower_bounds()[constraint];
const Fractional ub = linear_problem_.constraint_upper_bounds()[constraint];
if (lb != ub) {
// To replace an integral variable by a weighted sum of boolean variables,
// the constraint has to be an equality.
continue;
}
bool only_one_integral_variable = true;
for (const SparseColumn::Entry constraint_entry :
transpose.column(RowToColIndex(constraint))) {
const ColIndex var_index = RowToColIndex(constraint_entry.row());
if (var_index != col && variable_types_[var_index] == INTEGRAL) {
only_one_integral_variable = false;
break;
}
}
if (only_one_integral_variable &&
CreateVariableUsingConstraint(constraint, integral_var)) {
return true;
}
}
integral_var->Clear();
return false;
}
bool IntegralProblemConverter::CreateVariableUsingConstraint(
RowIndex constraint, IntegralVariable* integral_var) {
CHECK(nullptr != integral_var);
integral_var->Clear();
const SparseMatrix& transpose = linear_problem_.GetTransposeSparseMatrix();
ITIVector<VariableIndex, Fractional> dense_weights(num_boolean_variables_,
0.0);
Fractional scale = 1.0;
int64 variable_offset = 0;
for (const SparseColumn::Entry constraint_entry :
transpose.column(RowToColIndex(constraint))) {
const ColIndex col = RowToColIndex(constraint_entry.row());
if (variable_types_[col] == INTEGRAL) {
scale = constraint_entry.coefficient();
} else if (variable_types_[col] == BOOLEAN) {
const int pos = boolean_variables_[col];
CHECK_LE(0, pos);
dense_weights[VariableIndex(pos)] -= constraint_entry.coefficient();
} else {
CHECK_EQ(INTEGRAL_EXPRESSED_AS_BOOLEAN, variable_types_[col]);
const int pos = boolean_variables_[col];
CHECK_GT(0, pos);
const IntegralVariable& local_integral_var =
integral_variables_[-pos - 1];
variable_offset -=
constraint_entry.coefficient() * local_integral_var.offset();
for (int i = 0; i < local_integral_var.bits().size(); ++i) {
dense_weights[local_integral_var.bits()[i]] -=
constraint_entry.coefficient() * local_integral_var.weights()[i];
}
}
}
// Rescale using the weight of the integral variable.
const Fractional lb = linear_problem_.constraint_lower_bounds()[constraint];
const Fractional offset = (lb + variable_offset) / scale;
if (!IsIntegerWithinTolerance(offset)) {
return false;
}
integral_var->set_offset(static_cast<int64>(offset));
for (VariableIndex var(0); var < dense_weights.size(); ++var) {
if (dense_weights[var] != 0.0) {
const Fractional weight = dense_weights[var] / scale;
if (!IsIntegerWithinTolerance(weight)) {
return false;
}
integral_var->set_weight(var, static_cast<int64>(weight));
}
}
return true;
}
Fractional IntegralProblemConverter::AddWeightedIntegralVariable(
ColIndex col, Fractional weight,
ITIVector<VariableIndex, Fractional>* dense_weights) {
CHECK(nullptr != dense_weights);
if (weight == 0.0) {
return 0;
}
Fractional offset = 0;
const int pos = boolean_variables_[col];
if (pos >= 0) {
// Boolean variable.
(*dense_weights)[VariableIndex(pos)] += weight;
} else {
// Integral variable.
const IntegralVariable& integral_var = integral_variables_[-pos - 1];
for (int i = 0; i < integral_var.bits().size(); ++i) {
(*dense_weights)[integral_var.bits()[i]] +=
integral_var.weights()[i] * weight;
}
offset += weight * integral_var.offset();
}
return offset;
}
template <class T>
double IntegralProblemConverter::ScaleAndSparsifyWeights(
double scaling_factor, int64 gcd,
const ITIVector<VariableIndex, Fractional>& dense_weights, T* t) {
CHECK(nullptr != t);
double bound_error = 0.0;
for (VariableIndex var(0); var < dense_weights.size(); ++var) {
if (dense_weights[var] != 0.0) {
const double scaled_weight = dense_weights[var] * scaling_factor;
bound_error += fabs(round(scaled_weight) - scaled_weight);
t->add_literals(var.value() + 1);
t->add_coefficients(static_cast<int64>(round(scaled_weight)) / gcd);
}
}
return bound_error;
}
bool IntegralProblemConverter::HasNonZeroWeigths(
const ITIVector<VariableIndex, Fractional>& dense_weights) const {
for (const Fractional weight : dense_weights) {
if (weight != 0.0) {
return true;
}
}
return false;
}
bool CheckSolution(const LinearProgram& linear_problem,
const glop::DenseRow& variable_values) {
glop::DenseColumn constraint_values(linear_problem.num_constraints(), 0);
const SparseMatrix& matrix = linear_problem.GetSparseMatrix();
for (ColIndex col(0); col < linear_problem.num_variables(); ++col) {
const Fractional lower_bound = linear_problem.variable_lower_bounds()[col];
const Fractional upper_bound = linear_problem.variable_upper_bounds()[col];
const Fractional value = variable_values[col];
if (lower_bound > value || upper_bound < value) {
LOG(ERROR) << "Variable " << col << " out of bound: " << value
<< " should be in " << lower_bound << " .. " << upper_bound;
return false;
}
for (const SparseColumn::Entry entry : matrix.column(col)) {
constraint_values[entry.row()] += entry.coefficient() * value;
}
}
for (RowIndex row(0); row < linear_problem.num_constraints(); ++row) {
const Fractional lb = linear_problem.constraint_lower_bounds()[row];
const Fractional ub = linear_problem.constraint_upper_bounds()[row];
const Fractional value = constraint_values[row];
if (lb > value || ub < value) {
LOG(ERROR) << "Constraint " << row << " out of bound: " << value
<< " should be in " << lb << " .. " << ub;
return false;
}
}
return true;
}
} // anonymous namespace
IntegralSolver::IntegralSolver()
: parameters_(), variable_values_(), objective_value_(0.0) {}
// TODO(user): Parallelize the solve of sub-problems.
BopSolveStatus IntegralSolver::Solve(const LinearProgram& linear_problem) {
{ // Limited scope to release the decomposer memory when not used.
LPDecomposer decomposer;
decomposer.Decompose(&linear_problem);
const int num_sub_problems = decomposer.GetNumberOfProblems();
if (num_sub_problems > 1) {
// The problem can be decomposed. Solve each sub-problem and aggregate the
// result.
BopSolveStatus status = BopSolveStatus::OPTIMAL_SOLUTION_FOUND;
LinearProgram sub_problem;
std::vector<DenseRow> local_variable_values(num_sub_problems);
// TODO(user): Investigate a better approximation of the time needed to
// solve the problem than just the number of variables.
const double total_num_variables =
std::max(1.0, static_cast<double>(linear_problem.num_variables().value()));
const double time_per_variable =
parameters_.max_time_in_seconds() / total_num_variables;
const double deterministic_time_per_variable =
parameters_.max_deterministic_time() / total_num_variables;
objective_value_ = linear_problem.objective_offset();
best_bound_ = 0.0;
for (int i = 0; i < num_sub_problems; ++i) {
decomposer.BuildProblem(i, &sub_problem);
Fractional local_objective_value = 0.0;
Fractional local_best_bound = 0.0;
BopParameters local_parameters = parameters_;
const int num_variables = std::max(1, sub_problem.num_variables().value());
local_parameters.set_max_time_in_seconds(time_per_variable *
num_variables);
local_parameters.set_max_deterministic_time(
deterministic_time_per_variable * num_variables);
const BopSolveStatus local_status = InternalSolve(
sub_problem, local_parameters, &(local_variable_values[i]),
&local_objective_value, &local_best_bound);
if (local_status == BopSolveStatus::NO_SOLUTION_FOUND ||
local_status == BopSolveStatus::INFEASIBLE_PROBLEM ||
local_status == BopSolveStatus::INVALID_PROBLEM) {
return local_status;
}
if (local_status == BopSolveStatus::FEASIBLE_SOLUTION_FOUND) {
status = BopSolveStatus::FEASIBLE_SOLUTION_FOUND;
}
objective_value_ += local_objective_value;
best_bound_ += local_best_bound;
}
variable_values_ = decomposer.AggregateAssignments(local_variable_values);
CheckSolution(linear_problem, variable_values_);
return status;
}
}
// The problem can't be decomposed, solve the original problem.
return InternalSolve(linear_problem, parameters_, &variable_values_,
&objective_value_, &best_bound_);
}
BopSolveStatus IntegralSolver::InternalSolve(
const LinearProgram& linear_problem, const BopParameters& parameters,
DenseRow* variable_values, Fractional* objective_value,
Fractional* best_bound) {
CHECK(variable_values != nullptr);
CHECK(objective_value != nullptr);
CHECK(best_bound != nullptr);
// Those values will only make sense when a solution is found, however we
// resize here such that one can access the values even if they don't mean
// anything.
variable_values->resize(linear_problem.num_variables(), 0);
IntegralProblemConverter converter(linear_problem);
if (!converter.ConvertToBooleanProblem()) {
return BopSolveStatus::INVALID_PROBLEM;
}
BopSolver bop_solver(converter.problem());
bop_solver.SetParameters(parameters);
const BopSolveStatus status = bop_solver.Solve();
if (status == BopSolveStatus::OPTIMAL_SOLUTION_FOUND ||
status == BopSolveStatus::FEASIBLE_SOLUTION_FOUND) {
// Compute objective value.
const BopSolution& solution = bop_solver.best_solution();
CHECK(solution.IsFeasible());
*objective_value = linear_problem.objective_offset();
for (ColIndex col(0); col < linear_problem.num_variables(); ++col) {
const int64 value = converter.GetSolutionValue(col, solution);
(*variable_values)[col] = value;
*objective_value += value * linear_problem.objective_coefficients()[col];
}
CheckSolution(linear_problem, *variable_values);
// TODO(user): Check that the scaled best bound from Bop is a valid one
// even after conversion. If yes, remove the optimality test.
*best_bound = status == BopSolveStatus::OPTIMAL_SOLUTION_FOUND
? *objective_value
: bop_solver.GetScaledBestBound();
}
return status;
}
} // namespace bop
} // namespace operations_research

73
src/bop/integral_solver.h Normal file
View File

@@ -0,0 +1,73 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_BOP_INTEGRAL_SOLVER_H_
#define OR_TOOLS_BOP_INTEGRAL_SOLVER_H_
#include <map>
#include <vector>
#include "bop/bop_base.h"
#include "bop/bop_parameters.pb.h"
#include "bop/bop_solution.h"
#include "bop/bop_types.h"
#include "lp_data/lp_data.h"
namespace operations_research {
namespace bop {
// This class implements an Integer Programming solver, i.e. the solver solves
// problems with both integral and boolean variables, linear constraint and
// linear objective function.
// Note that the current implementation is limited to integral coefficient.
// TODO(user): Add scaling to deal with any coefficient.
class IntegralSolver {
public:
IntegralSolver();
// Sets the solver parameters.
// See the proto for an extensive documentation.
void SetParameters(const BopParameters& parameters) {
parameters_ = parameters;
}
BopParameters parameters() const { return parameters_; }
// Solves the given linear program and returns the solve status.
BopSolveStatus Solve(const glop::LinearProgram& linear_problem)
MUST_USE_RESULT;
// Returns the objective value of the solution with its offset.
glop::Fractional objective_value() const { return objective_value_; }
// Returns the best bound found so far.
glop::Fractional best_bound() const { return best_bound_; }
// Returns the solution values. Note that the values only make sense when a
// solution is found.
const glop::DenseRow& variable_values() const { return variable_values_; }
private:
// Solves the given linear program and returns the solve status.
BopSolveStatus InternalSolve(const glop::LinearProgram& linear_problem,
const BopParameters& parameters,
glop::DenseRow* variable_values,
glop::Fractional* objective_value,
glop::Fractional* best_bound) MUST_USE_RESULT;
BopParameters parameters_;
glop::DenseRow variable_values_;
glop::Fractional objective_value_;
glop::Fractional best_bound_;
};
} // namespace bop
} // namespace operations_research
#endif // OR_TOOLS_BOP_INTEGRAL_SOLVER_H_

View File

@@ -178,8 +178,10 @@ class RangeBipartiteMatching {
// This method sorts the min_sorted_ and max_sorted_ arrays and fill
// the bounds_ array (and set the active_size_ counter).
void SortArray() {
std::sort(min_sorted_.get(), min_sorted_.get() + size_, CompareIntervalMin());
std::sort(max_sorted_.get(), max_sorted_.get() + size_, CompareIntervalMax());
std::sort(min_sorted_.get(), min_sorted_.get() + size_,
CompareIntervalMin());
std::sort(max_sorted_.get(), max_sorted_.get() + size_,
CompareIntervalMax());
int64 min = min_sorted_[0]->min;
int64 max = max_sorted_[0]->max + 1;

View File

@@ -45,10 +45,10 @@ class LPSolver {
// Incrementality: From one Solve() call to the next, the internal state is
// not cleared and the solver may take advantage of its current state if the
// given lp is only slightly modified. If the modification is too important,
// or if the solver do not see how to reuse the previous state efficiently, it
// will just solve the problem from scrath. On the other hand, if the lp is
// the same, calling Solve() again should basically resume the solve from the
// last position. To disable this behavior, simply call Clear() before.
// or if the solver does not see how to reuse the previous state efficiently,
// it will just solve the problem from scratch. On the other hand, if the lp
// is the same, calling Solve() again should basically resume the solve from
// the last position. To disable this behavior, simply call Clear() before.
ProblemStatus Solve(const LinearProgram& lp) MUST_USE_RESULT;
// Puts the solver in a clean state.

View File

@@ -0,0 +1,367 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include <vector>
#include <fstream>
#include "base/commandlineflags.h"
#include "base/integral_types.h"
#include "base/logging.h"
#include "base/stringprintf.h"
#include "base/file.h"
#include "google/protobuf/text_format.h"
#include "base/hash.h"
#include "bop/integral_solver.h"
#include "bop/bop_parameters.pb.h"
#include "linear_solver/linear_solver.h"
#if defined(USE_BOP)
namespace operations_research {
namespace {
MPSolver::ResultStatus TranslateProblemStatus(bop::BopSolveStatus status) {
switch (status) {
case bop::BopSolveStatus::OPTIMAL_SOLUTION_FOUND:
return MPSolver::OPTIMAL;
case bop::BopSolveStatus::FEASIBLE_SOLUTION_FOUND:
return MPSolver::FEASIBLE;
case bop::BopSolveStatus::NO_SOLUTION_FOUND:
return MPSolver::NOT_SOLVED;
case bop::BopSolveStatus::INFEASIBLE_PROBLEM:
return MPSolver::INFEASIBLE;
case bop::BopSolveStatus::INVALID_PROBLEM:
return MPSolver::ABNORMAL;
}
LOG(DFATAL) << "Invalid bop::BopSolveStatus";
return MPSolver::ABNORMAL;
}
} // Anonymous namespace
class BopInterface : public MPSolverInterface {
public:
explicit BopInterface(MPSolver* const solver);
virtual ~BopInterface();
// ----- Solve -----
virtual MPSolver::ResultStatus Solve(const MPSolverParameters& param);
// ----- Model modifications and extraction -----
virtual void Reset();
virtual void SetOptimizationDirection(bool maximize);
virtual void SetVariableBounds(int index, double lb, double ub);
virtual void SetVariableInteger(int index, bool integer);
virtual void SetConstraintBounds(int index, double lb, double ub);
virtual void AddRowConstraint(MPConstraint* const ct);
virtual void AddVariable(MPVariable* const var);
virtual void SetCoefficient(MPConstraint* const constraint,
const MPVariable* const variable,
double new_value, double old_value);
virtual void ClearConstraint(MPConstraint* const constraint);
virtual void SetObjectiveCoefficient(const MPVariable* const variable,
double coefficient);
virtual void SetObjectiveOffset(double value);
virtual void ClearObjective();
// ------ Query statistics on the solution and the solve ------
virtual int64 iterations() const;
virtual int64 nodes() const;
virtual double best_objective_bound() const;
virtual MPSolver::BasisStatus row_status(int constraint_index) const;
virtual MPSolver::BasisStatus column_status(int variable_index) const;
// ----- Misc -----
virtual bool IsContinuous() const;
virtual bool IsLP() const;
virtual bool IsMIP() const;
virtual std::string SolverVersion() const;
virtual void* underlying_solver();
virtual void ExtractNewVariables();
virtual void ExtractNewConstraints();
virtual void ExtractObjective();
virtual void SetParameters(const MPSolverParameters& param);
virtual void SetRelativeMipGap(double value);
virtual void SetPrimalTolerance(double value);
virtual void SetDualTolerance(double value);
virtual void SetPresolveMode(int value);
virtual void SetScalingMode(int value);
virtual void SetLpAlgorithm(int value);
virtual bool ReadParameterFile(const std::string& filename);
private:
void NonIncrementalChange();
glop::LinearProgram linear_program_;
bop::IntegralSolver bop_solver_;
std::vector<MPSolver::BasisStatus> column_status_;
std::vector<MPSolver::BasisStatus> row_status_;
bop::BopParameters parameters_;
double best_objective_bound_;
};
BopInterface::BopInterface(MPSolver* const solver)
: MPSolverInterface(solver),
linear_program_(),
bop_solver_(),
column_status_(),
row_status_(),
parameters_() {}
BopInterface::~BopInterface() {}
MPSolver::ResultStatus BopInterface::Solve(const MPSolverParameters& param) {
// Reset extraction as this interface is not incremental yet.
Reset();
ExtractModel();
SetParameters(param);
linear_program_.SetMaximizationProblem(maximize_);
linear_program_.CleanUp();
// Time limit.
if (solver_->time_limit()) {
VLOG(1) << "Setting time limit = " << solver_->time_limit() << " ms.";
parameters_.set_max_time_in_seconds(
static_cast<double>(solver_->time_limit()) / 1000.0);
}
solver_->SetSolverSpecificParametersAsString(
solver_->solver_specific_parameter_string_);
bop_solver_.SetParameters(parameters_);
const bop::BopSolveStatus status = bop_solver_.Solve(linear_program_);
// The solution must be marked as synchronized even when no solution exists.
sync_status_ = SOLUTION_SYNCHRONIZED;
result_status_ = TranslateProblemStatus(status);
if (result_status_ == MPSolver::FEASIBLE ||
result_status_ == MPSolver::OPTIMAL) {
// Get the results.
objective_value_ = bop_solver_.objective_value();
best_objective_bound_ = bop_solver_.best_bound();
// TODO(user): Implement the column status.
const size_t num_vars = solver_->variables_.size();
column_status_.resize(num_vars, MPSolver::FREE);
for (int var_id = 0; var_id < num_vars; ++var_id) {
MPVariable* const var = solver_->variables_[var_id];
const glop::ColIndex lp_solver_var_id(var->index());
const glop::Fractional solution_value =
bop_solver_.variable_values()[lp_solver_var_id];
var->set_solution_value(static_cast<double>(solution_value));
}
// TODO(user): Implement the row activity and row status.
const size_t num_constraints = solver_->constraints_.size();
row_status_.resize(num_constraints, MPSolver::FREE);
for (int ct_id = 0; ct_id < num_constraints; ++ct_id) {
MPConstraint* const ct = solver_->constraints_[ct_id];
ct->set_activity(static_cast<double>(0));
}
}
return result_status_;
}
void BopInterface::Reset() {
ResetExtractionInformation();
linear_program_.Clear();
}
void BopInterface::SetOptimizationDirection(bool maximize) {
NonIncrementalChange();
}
void BopInterface::SetVariableBounds(int index, double lb, double ub) {
NonIncrementalChange();
}
void BopInterface::SetVariableInteger(int index, bool integer) {
NonIncrementalChange();
}
void BopInterface::SetConstraintBounds(int index, double lb, double ub) {
NonIncrementalChange();
}
void BopInterface::AddRowConstraint(MPConstraint* const ct) {
NonIncrementalChange();
}
void BopInterface::AddVariable(MPVariable* const var) {
NonIncrementalChange();
}
void BopInterface::SetCoefficient(MPConstraint* const constraint,
const MPVariable* const variable,
double new_value, double old_value) {
NonIncrementalChange();
}
void BopInterface::ClearConstraint(MPConstraint* const constraint) {
NonIncrementalChange();
}
void BopInterface::SetObjectiveCoefficient(const MPVariable* const variable,
double coefficient) {
NonIncrementalChange();
}
void BopInterface::SetObjectiveOffset(double value) { NonIncrementalChange(); }
void BopInterface::ClearObjective() { NonIncrementalChange(); }
int64 BopInterface::iterations() const {
LOG(DFATAL) << "Number of iterations not available";
return kUnknownNumberOfIterations;
}
int64 BopInterface::nodes() const {
LOG(DFATAL) << "Number of nodes not available";
return kUnknownNumberOfNodes;
}
double BopInterface::best_objective_bound() const {
if (!CheckSolutionIsSynchronized() || !CheckBestObjectiveBoundExists()) {
return trivial_worst_objective_bound();
}
return best_objective_bound_;
}
MPSolver::BasisStatus BopInterface::row_status(int constraint_index) const {
return row_status_[constraint_index];
}
MPSolver::BasisStatus BopInterface::column_status(int variable_index) const {
return column_status_[variable_index];
}
bool BopInterface::IsContinuous() const { return false; }
bool BopInterface::IsLP() const { return false; }
bool BopInterface::IsMIP() const { return true; }
std::string BopInterface::SolverVersion() const {
// TODO(user): Decide how to version bop.
return "Bop-0.0";
}
void* BopInterface::underlying_solver() { return &bop_solver_; }
// TODO(user): remove duplication with GlopInterface.
void BopInterface::ExtractNewVariables() {
DCHECK_EQ(0, last_variable_index_);
DCHECK_EQ(0, last_constraint_index_);
const glop::ColIndex num_cols(solver_->variables_.size());
for (glop::ColIndex col(last_variable_index_); col < num_cols; ++col) {
MPVariable* const var = solver_->variables_[col.value()];
const glop::ColIndex new_col =
linear_program_.FindOrCreateVariable(var->name());
DCHECK_EQ(new_col, col);
var->set_index(col.value());
linear_program_.SetVariableBounds(col, var->lb(), var->ub());
linear_program_.SetVariableIntegrality(col, var->integer());
}
}
// TODO(user): remove duplication with GlopInterface.
void BopInterface::ExtractNewConstraints() {
DCHECK_EQ(0, last_constraint_index_);
const glop::RowIndex num_rows(solver_->constraints_.size());
for (glop::RowIndex row(0); row < num_rows; ++row) {
MPConstraint* const ct = solver_->constraints_[row.value()];
ct->set_index(row.value());
const double lb = ct->lb();
const double ub = ct->ub();
const glop::RowIndex new_row =
linear_program_.FindOrCreateConstraint(ct->name());
DCHECK_EQ(new_row, row);
linear_program_.SetConstraintBounds(row, lb, ub);
for (CoeffEntry entry : ct->coefficients_) {
const int var_index = entry.first->index();
DCHECK_NE(kNoIndex, var_index);
const glop::ColIndex col(var_index);
const double coeff = entry.second;
linear_program_.SetCoefficient(row, col, coeff);
}
}
}
// TODO(user): remove duplication with GlopInterface.
void BopInterface::ExtractObjective() {
linear_program_.SetObjectiveOffset(solver_->Objective().offset());
for (CoeffEntry entry : solver_->objective_->coefficients_) {
const int var_index = entry.first->index();
const glop::ColIndex col(var_index);
const double coeff = entry.second;
linear_program_.SetObjectiveCoefficient(col, coeff);
}
}
void BopInterface::SetParameters(const MPSolverParameters& param) {
parameters_.Clear();
SetCommonParameters(param);
}
// All these have no effect.
void BopInterface::SetPrimalTolerance(double value) {}
void BopInterface::SetDualTolerance(double value) {}
void BopInterface::SetScalingMode(int value) {}
void BopInterface::SetLpAlgorithm(int value) {}
void BopInterface::SetRelativeMipGap(double value) {}
void BopInterface::SetPresolveMode(int value) {
switch (value) {
case MPSolverParameters::PRESOLVE_OFF:
// TODO(user): add this to BopParameters.
break;
case MPSolverParameters::PRESOLVE_ON:
// TODO(user): add this to BopParameters.
break;
default:
if (value != MPSolverParameters::kDefaultIntegerParamValue) {
SetIntegerParamToUnsupportedValue(MPSolverParameters::PRESOLVE, value);
}
}
}
bool BopInterface::ReadParameterFile(const std::string& filename) {
std::string params;
if (!file::GetContents(filename, &params, file::Defaults()).ok()) {
return false;
}
const bool ok = google::protobuf::TextFormat::MergeFromString(params, &parameters_);
bop_solver_.SetParameters(parameters_);
return ok;
}
void BopInterface::NonIncrementalChange() {
// The current implementation is not incremental.
sync_status_ = MUST_RELOAD;
}
// Register BOP in the global linear solver factory.
MPSolverInterface* BuildBopInterface(MPSolver* const solver) {
return new BopInterface(solver);
}
} // namespace operations_research
#endif // #if defined(USE_BOP)

View File

@@ -208,6 +208,9 @@ class MPSolver {
#if defined(USE_CPLEX)
CPLEX_MIXED_INTEGER_PROGRAMMING = 11,
#endif
#if defined(USE_BOP)
BOP_INTEGER_PROGRAMMING = 12,
#endif
};
MPSolver(const std::string& name, OptimizationProblemType problem_type);
@@ -532,6 +535,7 @@ class MPSolver {
friend class SLMInterface;
friend class MPSolverInterface;
friend class GLOPInterface;
friend class BopInterface;
// Debugging: verify that the given MPVariable* belongs to this solver.
bool OwnsVariable(const MPVariable* var) const;
@@ -662,6 +666,7 @@ class MPObjective {
friend class GurobiInterface;
friend class CplexInterface;
friend class GLOPInterface;
friend class BopInterface;
// Constructor. An objective points to a single MPSolverInterface
// that is specified in the constructor. An objective cannot belong
@@ -735,6 +740,7 @@ class MPVariable {
friend class CplexInterface;
friend class GLOPInterface;
friend class MPVariableSolutionValueTest;
friend class BopInterface;
// Constructor. A variable points to a single MPSolverInterface that
// is specified in the constructor. A variable cannot belong to
@@ -835,6 +841,7 @@ class MPConstraint {
friend class GurobiInterface;
friend class CplexInterface;
friend class GLOPInterface;
friend class BopInterface;
// Constructor. A constraint points to a single MPSolverInterface
// that is specified in the constructor. A constraint cannot belong

View File

@@ -143,6 +143,7 @@ message MPModelRequest {
CBC_MIXED_INTEGER_PROGRAMMING = 5;
GUROBI_MIXED_INTEGER_PROGRAMMING = 7; // Commercial, needs a valid license.
CPLEX_MIXED_INTEGER_PROGRAMMING = 11; // Commercial, needs a valid license.
BOP_INTEGER_PROGRAMMING = 12;
}
optional SolverType solver_type = 2;

View File

@@ -43,6 +43,9 @@
DECLARE_double(solver_timeout_in_seconds);
DECLARE_string(solver_write_model);
DEFINE_bool(scip_feasibility_emphasis, false,
"When true, emphasize search towards feasibility. This may or"
" may not result in speedups in some problems.");
namespace operations_research {
@@ -168,6 +171,12 @@ void SCIPInterface::Reset() {
void SCIPInterface::CreateSCIP() {
ORTOOLS_SCIP_CALL(SCIPcreate(&scip_));
ORTOOLS_SCIP_CALL(SCIPincludeDefaultPlugins(scip_));
// Set the emphasis to enum SCIP_PARAMEMPHASIS_FEASIBILITY. Do not print
// the new parameter (quiet = true).
if (FLAGS_scip_feasibility_emphasis) {
ORTOOLS_SCIP_CALL(SCIPsetEmphasis(scip_, SCIP_PARAMEMPHASIS_FEASIBILITY,
/*quiet = */ true));
}
// Default clock type (1: CPU user seconds, 2: wall clock time). We use
// wall clock time because getting CPU user seconds involves calling
// times() which is very expensive.

View File

@@ -0,0 +1,149 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lp_data/lp_decomposer.h"
#include <vector>
#include "algorithms/dynamic_partition.h"
#include "lp_data/lp_data.h"
#include "lp_data/lp_utils.h"
namespace operations_research {
namespace glop {
//------------------------------------------------------------------------------
// LPDecomposer
//------------------------------------------------------------------------------
LPDecomposer::LPDecomposer()
: original_problem_(nullptr), clusters_(), local_to_global_vars_() {}
void LPDecomposer::Decompose(const LinearProgram* linear_problem) {
original_problem_ = linear_problem;
clusters_.clear();
local_to_global_vars_.clear();
const SparseMatrix& transposed_matrix =
original_problem_->GetTransposeSparseMatrix();
MergingPartition partition;
partition.Reset(original_problem_->num_variables().value());
// Iterate on all constraints, and merge all variables of each constraint.
const ColIndex num_ct = RowToColIndex(original_problem_->num_constraints());
for (ColIndex ct(0); ct < num_ct; ++ct) {
const SparseColumn& sparse_constraint = transposed_matrix.column(ct);
if (sparse_constraint.num_entries() > 1) {
const RowIndex first_row = sparse_constraint.GetFirstRow();
for (EntryIndex e(1); e < sparse_constraint.num_entries(); ++e) {
partition.MergePartsOf(first_row.value(),
sparse_constraint.EntryRow(e).value());
}
}
}
std::vector<int> classes;
const int num_classes = partition.FillEquivalenceClasses(&classes);
clusters_.resize(num_classes);
for (int i = 0; i < classes.size(); ++i) {
clusters_[classes[i]].push_back(ColIndex(i));
}
for (int i = 0; i < num_classes; ++i) {
std::sort(clusters_[i].begin(), clusters_[i].end());
}
local_to_global_vars_.resize(clusters_.size());
}
void LPDecomposer::BuildProblem(int problem_index, LinearProgram* lp) {
CHECK(lp != nullptr);
CHECK_GE(problem_index, 0);
CHECK_LT(problem_index, clusters_.size());
lp->Clear();
const std::vector<ColIndex>& cluster = clusters_[problem_index];
StrictITIVector<ColIndex, ColIndex> global_to_local_vars(
original_problem_->num_variables(), kInvalidCol);
SparseBitset<RowIndex> constraints_to_use(
original_problem_->num_constraints());
StrictITIVector<ColIndex, ColIndex> local_to_global(ColIndex(cluster.size()),
kInvalidCol);
lp->SetMaximizationProblem(original_problem_->IsMaximizationProblem());
// Create variables and get all constraints of the cluster.
const SparseMatrix& original_matrix = original_problem_->GetSparseMatrix();
const SparseMatrix& transposed_matrix =
original_problem_->GetTransposeSparseMatrix();
for (int i = 0; i < cluster.size(); ++i) {
const ColIndex global_col = cluster[i];
const ColIndex local_col = lp->CreateNewVariable();
CHECK(global_to_local_vars[global_col] == kInvalidCol ||
global_to_local_vars[global_col] == local_col)
<< "If the mapping is already assigned it has to be the same.";
global_to_local_vars[global_col] = local_col;
CHECK_EQ(kInvalidCol, local_to_global[local_col]);
local_to_global[local_col] = global_col;
lp->SetVariableName(local_col,
original_problem_->GetVariableName(global_col));
lp->SetVariableIntegrality(
local_col, original_problem_->is_variable_integer()[global_col]);
lp->SetVariableBounds(
local_col, original_problem_->variable_lower_bounds()[global_col],
original_problem_->variable_upper_bounds()[global_col]);
lp->SetObjectiveCoefficient(
local_col, original_problem_->objective_coefficients()[global_col]);
for (const SparseColumn::Entry e : original_matrix.column(global_col)) {
constraints_to_use.Set(e.row());
}
}
// Create the constraints.
for (const RowIndex global_row :
constraints_to_use.PositionsSetAtLeastOnce()) {
const RowIndex local_row = lp->CreateNewConstraint();
lp->SetConstraintName(local_row,
original_problem_->GetConstraintName(global_row));
lp->SetConstraintBounds(
local_row, original_problem_->constraint_lower_bounds()[global_row],
original_problem_->constraint_upper_bounds()[global_row]);
for (const SparseColumn::Entry e :
transposed_matrix.column(RowToColIndex(global_row))) {
const ColIndex global_col = RowToColIndex(e.row());
const ColIndex local_col = global_to_local_vars[global_col];
lp->SetCoefficient(local_row, local_col, e.coefficient());
}
}
local_to_global_vars_[problem_index] = local_to_global;
}
DenseRow LPDecomposer::AggregateAssignments(
const std::vector<DenseRow>& assignments) const {
CHECK_EQ(assignments.size(), local_to_global_vars_.size());
DenseRow values(original_problem_->num_variables(), Fractional(0.0));
for (int problem = 0; problem < assignments.size(); ++problem) {
const DenseRow& assignment = assignments[problem];
const StrictITIVector<ColIndex, ColIndex>& local_to_global =
local_to_global_vars_[problem];
for (ColIndex local_col(0); local_col < assignment.size(); ++local_col) {
const ColIndex global_col = local_to_global[local_col];
const Fractional value = assignment[local_col];
values[global_col] = value;
}
}
return values;
}
} // namespace glop
} // namespace operations_research

View File

@@ -0,0 +1,81 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_LP_DATA_LP_DECOMPOSER_H_
#define OR_TOOLS_LP_DATA_LP_DECOMPOSER_H_
#include "base/unique_ptr.h"
#include <vector>
#include "lp_data/lp_data.h"
#include "lp_data/lp_types.h"
namespace operations_research {
namespace glop {
// This class is used to decompose an existing LinearProgram into several
// independent LinearPrograms. Problems are independent when none of their
// variables are connected, i.e. appear in the same constraints.
// Consider for instance the following problem:
// min: x + 2 y + 3 z + 4 t + 5 u
// c1: 0 <= x + z <= 1;
// c2: 0 <= y + t <= 1;
// c3: 0 <= x + u <= 1;
// int: x, y, z, t, u
// Variables x, z and u are connected by constraints c1 and c3.
// Variables y and t are connected by constraints c2.
// The problem can be decomposed into two independent problems:
// min: x + 3 z + 5 u
// c1: 0 <= x + z <= 1;
// c3: 0 <= x + u <= 1;
// int: x, z, u
// and
// min: 2 y + 4 t
// c2: 0 <= y + t <= 1;
// int: y, t
//
// Note that a solution to those two independent problems is a solution to the
// original problem.
class LPDecomposer {
public:
LPDecomposer();
// Decomposes the problem into independent problems.
// Note that a pointer is kept (no copy) on the linear_problem, so the problem
// should not change during the life of the LPDecomposer object.
void Decompose(const LinearProgram* linear_problem);
// Returns the number of independent problems generated by Decompose().
int GetNumberOfProblems() const { return clusters_.size(); }
// Fills lp with the problem_index^th independent problem generated by
// Decompose().
// Note that this method runs in O(num-entries-in-generated-problem).
void BuildProblem(int problem_index, LinearProgram* lp);
// Returns an assignment to the original problem based on the assignments
// to the independent problems.
DenseRow AggregateAssignments(const std::vector<DenseRow>& assignments) const;
private:
const LinearProgram* original_problem_;
std::vector<std::vector<ColIndex>> clusters_;
std::vector<StrictITIVector<ColIndex, ColIndex>> local_to_global_vars_;
DISALLOW_COPY_AND_ASSIGN(LPDecomposer);
};
} // namespace glop
} // namespace operations_research
#endif // OR_TOOLS_LP_DATA_LP_DECOMPOSER_H_

View File

@@ -346,10 +346,14 @@ void MPSReader::ProcessColumnsSection() {
const std::string& row1_name = GetField(start_index, 1);
const std::string& row1_value = GetField(start_index, 2);
const ColIndex col = data_->FindOrCreateVariable(column_name);
is_binary_by_default_.resize(col + 1, false);
if (in_integer_section_) {
data_->SetVariableIntegrality(col, true);
// The default bounds for integer variables are [0, 1].
data_->SetVariableBounds(col, 0.0, 1.0);
is_binary_by_default_[col] = true;
} else {
data_->SetVariableBounds(col, 0.0, kInfinity);
}
StoreCoefficient(col, row1_name, row1_value);
if (fields_.size() - start_index >= 4) {
@@ -482,15 +486,25 @@ void MPSReader::StoreBound(const std::string& bound_type_mnemonic,
if (integer_type_names_set_.count(bound_type_mnemonic) != 0) {
data_->SetVariableIntegrality(col, true);
}
Fractional lower_bound(-kInfinity);
Fractional upper_bound(kInfinity);
// Resize the is_binary_by_default_ in case it is the first time this column
// is encountered.
is_binary_by_default_.resize(col + 1, false);
// Check that "binary by default" implies "integer".
DCHECK(!is_binary_by_default_[col] || data_->is_variable_integer()[col]);
Fractional lower_bound = data_->variable_lower_bounds()[col];
Fractional upper_bound = data_->variable_upper_bounds()[col];
// If a variable is binary by default, its status is reset if any bound
// is set on it. We take care to restore the default bounds for general
// integer variables.
if (is_binary_by_default_[col]) {
lower_bound = Fractional(0.0);
upper_bound = kInfinity;
}
switch (bound_type_id) {
case LOWER_BOUND:
lower_bound = Fractional(GetDoubleFromString(bound_value));
upper_bound = data_->variable_upper_bounds()[col];
break;
case UPPER_BOUND:
lower_bound = data_->variable_lower_bounds()[col];
upper_bound = Fractional(GetDoubleFromString(bound_value));
break;
case FIXED_VARIABLE: {
@@ -500,13 +514,16 @@ void MPSReader::StoreBound(const std::string& bound_type_mnemonic,
break;
}
case FREE_VARIABLE:
// No constraint to add.
lower_bound = -kInfinity;
upper_bound = +kInfinity;
break;
case NEGATIVE:
lower_bound = -kInfinity;
upper_bound = Fractional(0.0);
break;
case POSITIVE:
lower_bound = Fractional(0.0);
upper_bound = +kInfinity;
break;
case BINARY:
lower_bound = Fractional(0.0);
@@ -520,6 +537,7 @@ void MPSReader::StoreBound(const std::string& bound_type_mnemonic,
<< ". (Line contents: " << line_ << ").";
parse_success_ = false;
}
is_binary_by_default_[col] = false;
data_->SetVariableBounds(col, lower_bound, upper_bound);
}

View File

@@ -220,6 +220,10 @@ class MPSReader {
// The current line in the file being parsed.
std::string line_;
// A row of Booleans. is_binary_by_default_[col] is true if col
// appeared within a scope started by INTORG and ended with INTEND markers.
DenseBooleanRow is_binary_by_default_;
// True if the problem contains lazy constraints (LAZYCONS).
bool has_lazy_constraints_;

338
src/sat/lp_utils.cc Normal file
View File

@@ -0,0 +1,338 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sat/lp_utils.h"
#include "glop/lp_solver.h"
#include "lp_data/lp_print_utils.h"
#include "sat/boolean_problem.h"
#include "util/fp_utils.h"
namespace operations_research {
namespace sat {
using glop::ColIndex;
using glop::Fractional;
using glop::RowIndex;
using glop::kInfinity;
using operations_research::new_proto::MPConstraintProto;
using operations_research::new_proto::MPModelProto;
using operations_research::new_proto::MPVariableProto;
bool ConvertBinaryMPModelProtoToBooleanProblem(
const new_proto::MPModelProto& mp_model, LinearBooleanProblem* problem) {
CHECK(problem != nullptr);
problem->Clear();
problem->set_name(mp_model.name());
const int num_variables = mp_model.variable_size();
problem->set_num_variables(num_variables);
// Test if the variables are binary variables.
// Add constraints for the fixed variables.
for (int var_id(0); var_id < num_variables; ++var_id) {
const MPVariableProto& mp_var = mp_model.variable(var_id);
problem->add_var_names(mp_var.name());
// This will be changed to false as soon as we detect the variable to be
// non-binary. This is done this way so we can display a nice error message
// before aborting the function and returning false.
bool is_binary = mp_var.is_integer();
const Fractional lb = mp_var.lower_bound();
const Fractional ub = mp_var.upper_bound();
if (lb <= -1.0) is_binary = false;
if (ub >= 2.0) is_binary = false;
if (is_binary) {
// 4 cases.
if (lb <= 0.0 && ub >= 1.0) {
// Binary variable. Ok.
} else if (lb <= 1.0 && ub >= 1.0) {
// Fixed variable at 1.
LinearBooleanConstraint* constraint = problem->add_constraints();
constraint->set_lower_bound(1);
constraint->set_upper_bound(1);
constraint->add_literals(var_id + 1);
constraint->add_coefficients(1);
} else if (lb <= 0.0 && ub >= 0.0) {
// Fixed variable at 0.
LinearBooleanConstraint* constraint = problem->add_constraints();
constraint->set_lower_bound(1);
constraint->set_upper_bound(1);
constraint->add_literals(var_id + 1);
constraint->add_coefficients(1);
} else {
// No possible integer value!
is_binary = false;
}
}
// Abort if the variable is not binary.
if (!is_binary) {
LOG(WARNING) << "The variable #" << var_id << " with name "
<< mp_var.name() << " is not binary. "
<< "lb: " << lb << " ub: " << ub;
return false;
}
}
// Variables needed to scale the double coefficients into int64.
const int64 kInt64Max = std::numeric_limits<int64>::max();
double max_relative_error = 0.0;
double max_bound_error = 0.0;
double max_scaling_factor = 0.0;
double relative_error = 0.0;
double scaling_factor = 0.0;
std::vector<double> coefficients;
// Add all constraints.
for (const MPConstraintProto& mp_constraint : mp_model.constraint()) {
LinearBooleanConstraint* constraint = problem->add_constraints();
constraint->set_name(mp_constraint.name());
// First scale the coefficients of the constraints.
coefficients.clear();
const int num_coeffs = mp_constraint.coefficient_size();
for (int i = 0; i < num_coeffs; ++i) {
coefficients.push_back(mp_constraint.coefficient(i));
}
GetBestScalingOfDoublesToInt64(coefficients, kInt64Max, &scaling_factor,
&relative_error);
const int64 gcd = ComputeGcdOfRoundedDoubles(coefficients, scaling_factor);
max_relative_error = std::max(relative_error, max_relative_error);
max_scaling_factor = std::max(scaling_factor / gcd, max_scaling_factor);
double bound_error = 0.0;
for (int i = 0; i < num_coeffs; ++i) {
const double scaled_value = mp_constraint.coefficient(i) * scaling_factor;
bound_error += fabs(round(scaled_value) - scaled_value);
const int64 value = static_cast<int64>(round(scaled_value)) / gcd;
if (value != 0) {
constraint->add_literals(mp_constraint.var_index(i) + 1);
constraint->add_coefficients(value);
}
}
max_bound_error = std::max(max_bound_error, bound_error);
// Add the bounds. Note that we do not pass them to
// GetBestScalingOfDoublesToInt64() because we know that the sum of absolute
// coefficients of the constraint fit on an int64. If one of the scaled
// bound overflows, we don't care by how much because in this case the
// constraint is just trivial or unsatisfiable.
const Fractional lb = mp_constraint.lower_bound();
if (lb != -kInfinity) {
if (lb * scaling_factor > static_cast<double>(kInt64Max)) {
LOG(WARNING) << "A constraint is trivially unsatisfiable.";
return false;
}
if (lb * scaling_factor > -static_cast<double>(kInt64Max)) {
// Otherwise, the constraint is not needed.
constraint->set_lower_bound(
static_cast<int64>(round(lb * scaling_factor - bound_error)) / gcd);
}
}
const Fractional ub = mp_constraint.upper_bound();
if (ub != kInfinity) {
if (ub * scaling_factor < -static_cast<double>(kInt64Max)) {
LOG(WARNING) << "A constraint is trivially unsatisfiable.";
return false;
}
if (ub * scaling_factor < static_cast<double>(kInt64Max)) {
// Otherwise, the constraint is not needed.
constraint->set_upper_bound(
static_cast<int64>(round(ub * scaling_factor + bound_error)) / gcd);
}
}
}
// Display the error/scaling without taking into account the objective first.
LOG(INFO) << "Maximum constraint relative error: " << max_relative_error;
LOG(INFO) << "Maximum constraint bound error: " << max_bound_error;
LOG(INFO) << "Maximum constraint scaling factor: " << max_scaling_factor;
// Add the objective.
coefficients.clear();
for (int var_id = 0; var_id < num_variables; ++var_id) {
const MPVariableProto& mp_var = mp_model.variable(var_id);
coefficients.push_back(mp_var.objective_coefficient());
}
GetBestScalingOfDoublesToInt64(coefficients, kInt64Max, &scaling_factor,
&relative_error);
const int64 gcd = ComputeGcdOfRoundedDoubles(coefficients, scaling_factor);
max_relative_error = std::max(relative_error, max_relative_error);
// Display the objective error/scaling.
LOG(INFO) << "objective relative error: " << relative_error;
LOG(INFO) << "objective scaling factor: " << scaling_factor / gcd;
LinearObjective* objective = problem->mutable_objective();
objective->set_offset(mp_model.objective_offset() * scaling_factor / gcd);
// Note that here we set the scaling factor for the inverse operation of
// getting the "true" objective value from the scaled one. Hence the inverse.
objective->set_scaling_factor(1.0 / scaling_factor * gcd);
for (int var_id = 0; var_id < num_variables; ++var_id) {
const MPVariableProto& mp_var = mp_model.variable(var_id);
const int64 value = static_cast<int64>(round(
mp_var.objective_coefficient() * scaling_factor)) /
gcd;
if (value != 0) {
objective->add_literals(var_id + 1);
objective->add_coefficients(value);
}
}
// If the problem was a maximization one, we need to modify the objective.
if (mp_model.maximize()) ChangeOptimizationDirection(problem);
// Test the precision of the conversion.
const double kRelativeTolerance = 1e-8;
if (max_relative_error > kRelativeTolerance) {
LOG(WARNING) << "The relative error during double -> int64 conversion "
<< "is too high!";
return false;
}
return true;
}
void ConvertBooleanProblemToLinearProgram(const LinearBooleanProblem& problem,
glop::LinearProgram* lp) {
lp->Clear();
for (int i = 0; i < problem.num_variables(); ++i) {
const ColIndex col = lp->CreateNewVariable();
lp->SetVariableIntegrality(col, true);
lp->SetVariableBounds(col, 0.0, 1.0);
}
// Variables name are optional.
if (problem.var_names_size() != 0) {
CHECK_EQ(problem.var_names_size(), problem.num_variables());
for (int i = 0; i < problem.num_variables(); ++i) {
lp->SetVariableName(ColIndex(i), problem.var_names(i));
}
}
for (const LinearBooleanConstraint& constraint : problem.constraints()) {
const RowIndex constraint_index = lp->CreateNewConstraint();
lp->SetConstraintName(constraint_index, constraint.name());
double sum = 0.0;
for (int i = 0; i < constraint.literals_size(); ++i) {
const int literal = constraint.literals(i);
const double coeff = constraint.coefficients(i);
const ColIndex variable_index = ColIndex(abs(literal) - 1);
if (literal < 0) {
sum += coeff;
lp->SetCoefficient(constraint_index, variable_index, -coeff);
} else {
lp->SetCoefficient(constraint_index, variable_index, coeff);
}
}
lp->SetConstraintBounds(
constraint_index,
constraint.has_lower_bound() ? constraint.lower_bound() - sum
: -kInfinity,
constraint.has_upper_bound() ? constraint.upper_bound() - sum
: kInfinity);
}
// Objective.
{
double sum = 0.0;
const LinearObjective& objective = problem.objective();
const double scaling_factor = objective.scaling_factor();
for (int i = 0; i < objective.literals_size(); ++i) {
const int literal = objective.literals(i);
const double coeff =
static_cast<double>(objective.coefficients(i)) * scaling_factor;
const ColIndex variable_index = ColIndex(abs(literal) - 1);
if (literal < 0) {
sum += coeff;
lp->SetObjectiveCoefficient(variable_index, -coeff);
} else {
lp->SetObjectiveCoefficient(variable_index, coeff);
}
}
lp->SetObjectiveOffset((sum + objective.offset()) * scaling_factor);
lp->SetMaximizationProblem(scaling_factor < 0);
}
lp->CleanUp();
}
int FixVariablesFromSat(const SatSolver& solver, glop::LinearProgram* lp) {
int num_fixed_variables = 0;
const Trail& trail = solver.LiteralTrail();
for (int i = 0; i < trail.Index(); ++i) {
const VariableIndex var = trail[i].Variable();
const int value = trail[i].IsPositive() ? 1.0 : 0.0;
if (trail.Info(var).level == 0) {
++num_fixed_variables;
lp->SetVariableBounds(ColIndex(var.value()), value, value);
}
}
return num_fixed_variables;
}
bool SolveLpAndUseSolutionForSatAssignmentPreference(
const glop::LinearProgram& lp, SatSolver* sat_solver,
double max_time_in_seconds) {
glop::LPSolver solver;
glop::GlopParameters glop_parameters;
glop_parameters.set_max_time_in_seconds(max_time_in_seconds);
solver.SetParameters(glop_parameters);
const glop::ProblemStatus& status = solver.Solve(lp);
if (status != glop::ProblemStatus::OPTIMAL &&
status != glop::ProblemStatus::IMPRECISE &&
status != glop::ProblemStatus::PRIMAL_FEASIBLE)
return false;
for (ColIndex col(0); col < lp.num_variables(); ++col) {
const Fractional& value = solver.variable_values()[col];
sat_solver->SetAssignmentPreference(
Literal(VariableIndex(col.value()), round(value) == 1),
1 - fabs(value - round(value)));
}
return true;
}
bool SolveLpAndUseIntegerVariableToStartLNS(const glop::LinearProgram& lp,
LinearBooleanProblem* problem) {
glop::LPSolver solver;
const glop::ProblemStatus& status = solver.Solve(lp);
if (status != glop::ProblemStatus::OPTIMAL &&
status != glop::ProblemStatus::PRIMAL_FEASIBLE) return false;
int num_variable_fixed = 0;
for (ColIndex col(0); col < lp.num_variables(); ++col) {
const Fractional tolerance = 1e-5;
const Fractional& value = solver.variable_values()[col];
if (value > 1 - tolerance) {
++num_variable_fixed;
LinearBooleanConstraint* constraint = problem->add_constraints();
constraint->set_lower_bound(1);
constraint->set_upper_bound(1);
constraint->add_coefficients(1);
constraint->add_literals(col.value() + 1);
} else if (value < tolerance) {
++num_variable_fixed;
LinearBooleanConstraint* constraint = problem->add_constraints();
constraint->set_lower_bound(0);
constraint->set_upper_bound(0);
constraint->add_coefficients(1);
constraint->add_literals(col.value() + 1);
}
}
LOG(INFO) << "LNS with " << num_variable_fixed << " fixed variables.";
return true;
}
} // namespace sat
} // namespace operations_research

60
src/sat/lp_utils.h Normal file
View File

@@ -0,0 +1,60 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Utility functions to interact with an lp solver from the SAT context.
#ifndef OR_TOOLS_SAT_LP_UTILS_H_
#define OR_TOOLS_SAT_LP_UTILS_H_
#include "sat/boolean_problem.pb.h"
#include "linear_solver/linear_solver2.pb.h"
#include "lp_data/lp_data.h"
#include "sat/sat_solver.h"
namespace operations_research {
namespace sat {
// Converts an integer program with only binary variables to a Boolean
// optimization problem. Returns false if the problem didn't contains only
// binary integer variable, or if the coefficients couldn't be converted to
// integer with a good enough precision.
bool ConvertBinaryMPModelProtoToBooleanProblem(
const new_proto::MPModelProto& mp_model, LinearBooleanProblem* problem);
// Converts a Boolean optimization problem to its lp formulation.
void ConvertBooleanProblemToLinearProgram(const LinearBooleanProblem& problem,
glop::LinearProgram* lp);
// Changes the variable bounds of the lp to reflect the variables that have been
// fixed by the SAT solver (i.e. assigned at decision level 0). Returns the
// number of variables fixed this way.
int FixVariablesFromSat(const SatSolver& solver, glop::LinearProgram* lp);
// Solves the given lp problem and uses the lp solution to drive the SAT solver
// polarity choices. The variable must have the same index in the solved lp
// problem and in SAT for this to make sense.
//
// Returns false if a problem occured while trying to solve the lp.
bool SolveLpAndUseSolutionForSatAssignmentPreference(
const glop::LinearProgram& lp, SatSolver* sat_solver,
double max_time_in_seconds);
// Solves the lp and add constraints to fix the integer variable of the lp in
// the LinearBoolean problem.
bool SolveLpAndUseIntegerVariableToStartLNS(const glop::LinearProgram& lp,
LinearBooleanProblem* problem);
} // namespace sat
} // namespace operations_research
#endif // OR_TOOLS_SAT_LP_UTILS_H_

107
src/util/fp_utils.cc Normal file
View File

@@ -0,0 +1,107 @@
// Copyright 2010-2014 Google
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "util/fp_utils.h"
#include <cmath>
#include "util/bitset.h"
namespace operations_research {
void GetBestScalingOfDoublesToInt64(const std::vector<double>& input,
int64 max_absolute_sum,
double* scaling_factor,
double* relative_error) {
const double kInfinity = std::numeric_limits<double>::infinity();
// We start by initializing the returns value to the "error" state.
*scaling_factor = 0;
*relative_error = kInfinity;
// Abort in the "error" state if max_absolute_sum doesn't make sense.
if (max_absolute_sum < 0) return;
// Our scaling scaling_factor will be 2^factor_exponent.
//
// TODO(user): Consider using 'max_abs_sum / abs_sum' as a factor if the
// error can't be zero? Note however that using power of two has the extra
// advantage that subsequent int64 -> double -> scaled back to int64 will
// loose no extra information.
int factor_exponent = 0;
uint64 abs_sum = 0;
bool is_first_value = true;
const int msb = MostSignificantBitPosition64(max_absolute_sum);
for (double x : input) {
// A value of zero can just be skipped (and needs to because the code below
// doesn't handle it correctly).
if (x == 0.0) continue;
// If x is not finite, then abort in the "error" state.
if (!(x > -kInfinity && x < kInfinity)) return;
// Compute the greatest candidate such that
// round(fabs(x).2^candidate) <= max_absolute_sum.
int candidate = msb - ilogb(x);
if (round(ldexp(fabs(x), candidate)) > max_absolute_sum) --candidate;
DCHECK_LE(round(ldexp(fabs(x), candidate)), max_absolute_sum);
// Update factor_exponent which is the min of all the candidates.
if (is_first_value || candidate < factor_exponent) {
is_first_value = false;
abs_sum >>= std::min(64, factor_exponent - candidate);
factor_exponent = candidate;
}
// Update the sum of absolute values of the numbers seen so far.
abs_sum += round(fabs(ldexp(x, factor_exponent)));
if (abs_sum > max_absolute_sum) {
abs_sum >>= 1;
factor_exponent--;
}
DCHECK_LE(abs_sum, max_absolute_sum);
}
// Compute the relative_error.
*relative_error = 0.0;
*scaling_factor = ldexp(1.0, factor_exponent);
for (double x : input) {
if (x == 0.0) continue;
const double scaled = fabs(ldexp(x, factor_exponent));
*relative_error = std::max(*relative_error, fabs(round(scaled) / scaled - 1));
}
}
int64 ComputeGcdOfRoundedDoubles(const std::vector<double>& x,
double scaling_factor) {
int64 gcd = 0;
for (int i = 0; i < x.size() && gcd != 1; ++i) {
int64 value = round(fabs(x[i] * scaling_factor));
DCHECK_GE(value, 0);
if (value == 0) continue;
if (gcd == 0) {
gcd = value;
continue;
}
// GCD(gcd, value) = GCD(value, gcd % value);
while (value != 0) {
const int64 r = gcd % value;
gcd = value;
value = r;
}
}
DCHECK_GE(gcd, 0);
return gcd > 0 ? gcd : 1;
}
} // namespace operations_research