def test_stab(): graph = nx.cycle_graph(5) weights = [1., 1., 1., 1., 1.] instance = MaxWeightStableSetInstance(graph, weights) solver = LearningSolver() solver.solve(instance) assert instance.model.OBJ() == 2.
def test_lazy_before(): instances, models = get_training_instances_and_models() instances[0].build_lazy_constraint = Mock(return_value="c1") solver = LearningSolver() solver.internal_solver = Mock(spec=InternalSolver) component = LazyConstraintsComponent(threshold=0.10) component.classifiers = { "a": Mock(spec=Classifier), "b": Mock(spec=Classifier) } component.classifiers["a"].predict_proba = Mock( return_value=[[0.95, 0.05]]) component.classifiers["b"].predict_proba = Mock( return_value=[[0.02, 0.80]]) component.before_solve(solver, instances[0], models[0]) # Should ask classifier likelihood of each constraint being violated expected_x_test_a = np.array([[67., 21.75, 1287.92]]) expected_x_test_b = np.array([[67., 21.75, 1287.92]]) actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0] actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0] assert norm(expected_x_test_a - actual_x_test_a) < E assert norm(expected_x_test_b - actual_x_test_b) < E # Should ask instance to generate cut for constraints whose likelihood # of being violated exceeds the threshold instances[0].build_lazy_constraint.assert_called_once_with(models[0], "b") # Should ask internal solver to add generated constraint solver.internal_solver.add_constraint.assert_called_once_with("c1")
def test_ml(): logger.info("Loading instances...") train_instances = load("%s/train_instances.bin" % basepath) test_instances = load("%s/test_instances.bin" % basepath) solvers = { "ml-exact": LearningSolver( time_limit=time_limit, solver=internal_solver, ), "ml-heuristic": LearningSolver( time_limit=time_limit, solver=internal_solver, mode="heuristic", ), } benchmark = BenchmarkRunner(solvers) logger.info("Loading results...") benchmark.load_results("%s/benchmark_baseline.csv" % basepath) logger.info("Fitting...") benchmark.fit(train_instances) logger.info("Solving...") benchmark.parallel_solve(test_instances, n_jobs=n_jobs) benchmark.save_results("%s/benchmark_ml.csv" % basepath)
def test_parallel_solve(): instances = [_get_instance() for _ in range(10)] solver = LearningSolver() results = solver.parallel_solve(instances, n_jobs=3) assert len(results) == 10 for instance in instances: assert len(instance.solution["x"].keys()) == 4
def train(): problem_name, challenge_name = args["<challenge>"].split("/") pkg = importlib.import_module("miplearn.problems.%s" % problem_name) challenge = getattr(pkg, challenge_name)() train_instances = challenge.training_instances test_instances = challenge.test_instances solver = LearningSolver(time_limit=time_limit, solver=internal_solver, components={}) solver.parallel_solve(train_instances, n_jobs=n_jobs) save(train_instances, "%s/train_instances.bin" % basepath) save(test_instances, "%s/test_instances.bin" % basepath)
def _get_instances(): instances = [ KnapsackInstance( weights=[23., 26., 20., 18.], prices=[505., 352., 458., 220.], capacity=67., ), ] * 5 models = [inst.to_model() for inst in instances] solver = LearningSolver() for i in range(len(instances)): solver.solve(instances[i], models[i]) return instances, models
def test_solver(): instance = _get_instance() for mode in ["exact", "heuristic"]: for internal_solver in ["cplex", "gurobi"]: solver = LearningSolver(time_limit=300, gap_tolerance=1e-3, threads=1, solver=internal_solver, mode=mode) solver.solve(instance) assert instance.solution["x"][0] == 1.0 assert instance.solution["x"][1] == 0.0 assert instance.solution["x"][2] == 1.0 assert instance.solution["x"][3] == 1.0 assert instance.lower_bound == 1183.0 assert instance.upper_bound == 1183.0 assert round(instance.lp_solution["x"][0], 3) == 1.000 assert round(instance.lp_solution["x"][1], 3) == 0.923 assert round(instance.lp_solution["x"][2], 3) == 1.000 assert round(instance.lp_solution["x"][3], 3) == 0.000 assert round(instance.lp_value, 3) == 1287.923 solver.fit([instance]) solver.solve(instance)
def test_learning_solver(): instance = _get_instance() for mode in ["exact", "heuristic"]: for internal_solver in ["cplex", "gurobi", GurobiSolver]: solver = LearningSolver(time_limit=300, gap_tolerance=1e-3, threads=1, solver=internal_solver, mode=mode) solver.solve(instance) assert instance.solution["x"][0] == 1.0 assert instance.solution["x"][1] == 0.0 assert instance.solution["x"][2] == 1.0 assert instance.solution["x"][3] == 1.0 assert instance.lower_bound == 1183.0 assert instance.upper_bound == 1183.0 assert round(instance.lp_solution["x"][0], 3) == 1.000 assert round(instance.lp_solution["x"][1], 3) == 0.923 assert round(instance.lp_solution["x"][2], 3) == 1.000 assert round(instance.lp_solution["x"][3], 3) == 0.000 assert round(instance.lp_value, 3) == 1287.923 assert instance.found_violations == [] assert len(instance.solver_log) > 100 solver.fit([instance]) solver.solve(instance) # Assert solver is picklable with tempfile.TemporaryFile() as file: pickle.dump(solver, file)
def get_training_instances_and_models(): instances = [ KnapsackInstance( weights=[23., 26., 20., 18.], prices=[505., 352., 458., 220.], capacity=67., ), KnapsackInstance( weights=[25., 30., 22., 18.], prices=[500., 365., 420., 150.], capacity=70., ), ] models = [instance.to_model() for instance in instances] solver = LearningSolver() for i in range(len(instances)): solver.solve(instances[i], models[i]) return instances, models
def _get_instances(): instances = [ KnapsackInstance( weights=[1., 2., 3.], prices=[10., 20., 30.], capacity=2.5, ), KnapsackInstance( weights=[3., 4., 5.], prices=[20., 30., 40.], capacity=4.5, ), ] models = [instance.to_model() for instance in instances] solver = LearningSolver() for (i, instance) in enumerate(instances): solver.solve(instances[i], models[i]) return instances, models
def test_baseline(): test_instances = load("%s/test_instances.bin" % basepath) solvers = { "baseline": LearningSolver( time_limit=test_time_limit, solver=internal_solver, ), } benchmark = BenchmarkRunner(solvers) benchmark.parallel_solve(test_instances, n_jobs=n_jobs) benchmark.save_results("%s/benchmark_baseline.csv" % basepath)
def test_instance(): n_cities = 4 distances = np.array([ [0., 1., 2., 1.], [1., 0., 1., 2.], [2., 1., 0., 1.], [1., 2., 1., 0.], ]) instance = TravelingSalesmanInstance(n_cities, distances) for solver_name in ['gurobi', 'cplex']: solver = LearningSolver(solver=solver_name) solver.solve(instance) x = instance.solution["x"] assert x[0, 1] == 1.0 assert x[0, 2] == 0.0 assert x[0, 3] == 1.0 assert x[1, 2] == 1.0 assert x[1, 3] == 0.0 assert x[2, 3] == 1.0 assert instance.lower_bound == 4.0 assert instance.upper_bound == 4.0
def test_benchmark(): # Generate training and test instances train_instances = MaxWeightStableSetGenerator( n=randint(low=25, high=26)).generate(5) test_instances = MaxWeightStableSetGenerator( n=randint(low=25, high=26)).generate(3) # Training phase... training_solver = LearningSolver() training_solver.parallel_solve(train_instances, n_jobs=10) # Test phase... test_solvers = { "Strategy A": LearningSolver(), "Strategy B": LearningSolver(), } benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2) assert benchmark.raw_results().values.shape == (12, 16) benchmark.save_results("/tmp/benchmark.csv") assert os.path.isfile("/tmp/benchmark.csv") benchmark = BenchmarkRunner(test_solvers) benchmark.load_results("/tmp/benchmark.csv") assert benchmark.raw_results().values.shape == (12, 16)
def test_subtour(): n_cities = 6 cities = np.array([ [0., 0.], [1., 0.], [2., 0.], [3., 0.], [0., 1.], [3., 1.], ]) distances = squareform(pdist(cities)) instance = TravelingSalesmanInstance(n_cities, distances) for solver_name in ['gurobi', 'cplex']: solver = LearningSolver(solver=solver_name) solver.solve(instance) x = instance.solution["x"] assert x[0, 1] == 1.0 assert x[0, 4] == 1.0 assert x[1, 2] == 1.0 assert x[2, 3] == 1.0 assert x[3, 5] == 1.0 assert x[4, 5] == 1.0
def test_add_components(): solver = LearningSolver(components=[]) solver.add(BranchPriorityComponent()) solver.add(BranchPriorityComponent()) assert len(solver.components) == 1 assert "BranchPriorityComponent" in solver.components