Exemple #1
0
    def advance(self, evaluator: Evaluator):
        self.iteration += 1
        logger.info("Starting Random Walk iteration %d" % self.iteration)

        parameters = [np.array([
            bounds[0] + self.random.random() * (bounds[1] - bounds[0]) # TODO: Not demterinistic!
            for bounds in self.bounds
        ]) for k in range(self.parallel)]

        identifiers = [evaluator.submit(p) for p in parameters]

        evaluator.wait(identifiers)
        evaluator.clean()
Exemple #2
0
def test_nelder_mead_quadratic():
    problem = QuadraticProblem([2.0, 1.0], [0.0, 0.0])

    evaluator = Evaluator(simulator=QuadraticSimulator(), problem=problem)

    algorithm = NelderMead(problem)

    assert Loop(threshold=1e-4).run(evaluator=evaluator,
                                    algorithm=algorithm) == pytest.approx(
                                        (2.0, 1.0), 1e-2)
Exemple #3
0
def test_random_walk():
    evaluator = Evaluator(simulator=QuadraticSimulator(),
                          problem=QuadraticProblem([2.0, 1.0]))

    for seed in (1000, 2000, 3000, 4000):
        algorithm = RandomWalk(evaluator, seed=seed)

        assert Loop(threshold=1e-2).run(evaluator=evaluator,
                                        algorithm=algorithm) == pytest.approx(
                                            (2.0, 1.0), 1e-1)
Exemple #4
0
def test_cobyla_quadratic():
    problem = QuadraticProblem([2.0, 1.0], [0.0, 0.0])

    evaluator = Evaluator(simulator=QuadraticSimulator(), problem=problem)

    algorithm = ScipyAlgorithm(problem, method="COBYLA")

    assert Loop(threshold=1e-4).run(evaluator=evaluator,
                                    algorithm=algorithm) == pytest.approx(
                                        (2.0, 1.0), 1e-2)
Exemple #5
0
def test_scipy_traffic():
    problem = TrafficProblem()

    evaluator = Evaluator(simulator=TrafficSimulator(), problem=problem)

    algorithm = ScipyAlgorithm(problem, method="Nelder-Mead")

    assert Loop(threshold=5.0).run(evaluator=evaluator,
                                   algorithm=algorithm) == pytest.approx(
                                       (510.0, 412.0), 1.0)
Exemple #6
0
def test_random_walk_traffic():
    problem = TrafficProblem()

    evaluator = Evaluator(simulator=TrafficSimulator(), problem=problem)

    algorithm = RandomWalk(problem, seed=1000)

    assert Loop(threshold=10.0).run(evaluator=evaluator,
                                    algorithm=algorithm) == pytest.approx(
                                        (510.0, 412.0), 10.0)
Exemple #7
0
def test_random_walk_quadratic():
    problem = QuadraticProblem([2.0, 1.0])

    evaluator = Evaluator(simulator=QuadraticSimulator(), problem=problem)

    algorithm = RandomWalk(problem, seed=1000)

    assert Loop(threshold=1e-2).run(evaluator=evaluator,
                                    algorithm=algorithm) == pytest.approx(
                                        (2.0, 1.0), 1e-1)
Exemple #8
0
def test_cmaes_quadratic():
    problem = QuadraticProblem([2.0, 1.0], [0.0, 0.0])

    evaluator = Evaluator(simulator=QuadraticSimulator(), problem=problem)

    for seed in (1000, 2000, 3000, 4000):
        algorithm = CMAES(problem, initial_step_size=0.1, seed=seed)

        assert Loop(threshold=1e-4).run(evaluator=evaluator,
                                        algorithm=algorithm) == pytest.approx(
                                            (2.0, 1.0), 1e-2)
Exemple #9
0
def test_cmaes_traffic():
    problem = TrafficProblem()

    evaluator = Evaluator(simulator=TrafficSimulator(), problem=problem)

    for seed in (1000, 2000, 3000, 4000):
        algorithm = CMAES(problem, initial_step_size=10.0, seed=seed)

        assert Loop(threshold=1e-2).run(evaluator=evaluator,
                                        algorithm=algorithm) == pytest.approx(
                                            (510.0, 412.0), 1.0)
Exemple #10
0
def test_cma_es():
    for seed in (1000, 2000, 3000, 4000):
        evaluator = Evaluator(simulator=CongestionSimulator(),
                              problem=CongestionProblem(0.3, iterations=200))

        algorithm = CMAES(evaluator, initial_step_size=50, seed=seed)

        assert abs(
            np.round(
                Loop(threshold=1e-4).run(evaluator=evaluator,
                                         algorithm=algorithm)) - 230) < 10
Exemple #11
0
def __test_sis_single_fidelity():
    evaluator = Evaluator(simulator=SISSimulator(), problem=SISProblem(0.6))

    algorithm = BatchBayesianOptimization(evaluator, batch_size=4)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        np.random.seed(0)

        assert Loop(threshold=1e-2).run(evaluator=evaluator,
                                        algorithm=algorithm) == pytest.approx(
                                            0.625, 1e-1)
Exemple #12
0
def test_nelder_mead_traffic():
    problem = TrafficProblem()
    problem.use_bounds([[400.0, 600.0]] * 2)

    evaluator = Evaluator(simulator=TrafficSimulator(),
                          problem=TrafficProblem())

    algorithm = NelderMead(problem)

    assert Loop(threshold=1e-2).run(evaluator=evaluator,
                                    algorithm=algorithm) == pytest.approx(
                                        (510.0, 412.0), 1.0)
Exemple #13
0
def test_scipy():
    evaluator = Evaluator(
        simulator = QuadraticSimulator(),
        problem = QuadraticProblem([2.0, 1.0], [0.0, 0.0])
    )

    algorithm = ScipyAlgorithm(evaluator, method = "COBYLA")

    assert Loop(threshold = 1e-3).run(
        evaluator = evaluator,
        algorithm = algorithm
    ) == pytest.approx((2.0, 1.0), 1e-3)
Exemple #14
0
def __test_bayesian_optimization():
    evaluator = Evaluator(simulator=QuadraticSimulator(),
                          problem=QuadraticProblem([2.0, 1.0]))

    algorithm = BatchBayesianOptimization(evaluator, batch_size=4)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        np.random.seed(0)

        assert Loop(threshold=1e-2).run(evaluator=evaluator,
                                        algorithm=algorithm) == pytest.approx(
                                            (2.0, 1.0), 1e-1)
Exemple #15
0
def test_spsa():
    evaluator = Evaluator(simulator=QuadraticSimulator(),
                          problem=QuadraticProblem([2.0, 1.0], [0.0, 0.0]))

    for seed in (1000, 2000, 3000, 4000):
        algorithm = SPSA(evaluator,
                         perturbation_factor=2e-2,
                         gradient_factor=0.2,
                         seed=seed)

        assert Loop(threshold=1e-4).run(evaluator=evaluator,
                                        algorithm=algorithm) == pytest.approx(
                                            (2.0, 1.0), 1e-2)
Exemple #16
0
def test_spsa_traffic():
    problem = TrafficProblem()

    evaluator = Evaluator(simulator=TrafficSimulator(), problem=problem)

    for seed in (1000, 2000, 3000, 4000):
        algorithm = SPSA(problem,
                         perturbation_factor=10.0,
                         gradient_factor=1.0,
                         seed=seed)

        assert Loop(threshold=1e-2).run(evaluator=evaluator,
                                        algorithm=algorithm) == pytest.approx(
                                            (510.0, 412.0), 1.0)
Exemple #17
0
def test_fdsa_quadratic():
    problem = QuadraticProblem([2.0, 1.0], [0.0, 0.0])

    evaluator = Evaluator(simulator=QuadraticSimulator(), problem=problem)

    algorithm = FDSA(
        problem,
        perturbation_factor=2e-2,
        gradient_factor=0.2,
    )

    assert Loop(threshold=1e-4).run(evaluator=evaluator,
                                    algorithm=algorithm) == pytest.approx(
                                        (2.0, 1.0), 1e-2)
Exemple #18
0
def test_fdsa_traffic():
    problem = TrafficProblem()

    evaluator = Evaluator(simulator=TrafficSimulator(), problem=problem)

    algorithm = FDSA(
        problem,
        perturbation_factor=10.0,
        gradient_factor=1.0,
    )

    assert Loop(threshold=1e-2).run(evaluator=evaluator,
                                    algorithm=algorithm) == pytest.approx(
                                        (510.0, 412.0), 1.0)
Exemple #19
0
def test_nelder_mead_congestion():
    evaluator = Evaluator(
        simulator = RosenbrockSimulator(),
        problem = RosenbrockProblem(2)
    )

    algorithm = NelderMead(evaluator, bounds = [
        [-6.0, 6.0],
        [-6.0, 6.0]
    ])

    assert Loop(threshold = 1e-6).run(
        evaluator = evaluator,
        algorithm = algorithm
    ) == pytest.approx((1.0, 1.0), 1e-2)
Exemple #20
0
def test_opdyts_traffic():
    problem = TrafficProblem(iterations=40)

    evaluator = Evaluator(simulator=TrafficSimulator(), problem=problem)

    for seed in (1000, 2000, 3000, 4000):
        algorithm = Opdyts(problem,
                           candidate_set_size=16,
                           number_of_transitions=10,
                           perturbation_length=50,
                           seed=seed)

        assert Loop(threshold=1e-2).run(evaluator=evaluator,
                                        algorithm=algorithm) == pytest.approx(
                                            (510.0, 412.0), 1.0)
Exemple #21
0
def test_opdyts():
    loop = Loop()

    evaluator = Evaluator(simulator=CongestionSimulator(),
                          problem=CongestionProblem(0.3, iterations=10))

    algorithm = Opdyts(evaluator,
                       candidate_set_size=16,
                       number_of_transitions=20,
                       perturbation_length=50,
                       seed=0)

    assert np.round(
        Loop(threshold=1e-3,
             maximum_cost=10000).run(evaluator=evaluator,
                                     algorithm=algorithm)) == 231
Exemple #22
0
    def advance(self, evaluator: Evaluator):
        self.iteration += 1
        logger.info("Starting SPSA iteration %d." % self.iteration)

        if self.parameters is None:
            self.parameters = evaluator.problem.initial

        # Calculate objective
        if self.compute_objective:
            annotations = { "type": "objective" }
            objective_identifier = evaluator.submit(self.parameters, annotations = annotations)

        # Update step lengths
        gradient_length = self.gradient_factor / (self.iteration + self.gradient_offset)**self.gradient_exponent
        perturbation_length = self.perturbation_factor / self.iteration**self.perturbation_exponent

        # Sample direction from Rademacher distribution
        direction = self.random.randint(0, 2, len(self.parameters)) - 0.5

        annotations = {
            "gradient_length": gradient_length,
            "perturbation_length": perturbation_length,
            "direction": direction,
            "type": "gradient"
        }

        # Schedule samples
        positive_parameters = np.copy(self.parameters)
        positive_parameters += direction * perturbation_length
        annotations = deep_merge.merge(annotations, { "type": "positive_gradient" })
        positive_identifier = evaluator.submit(positive_parameters, annotations = annotations)

        negative_parameters = np.copy(self.parameters)
        negative_parameters -= direction * perturbation_length
        annotations = deep_merge.merge(annotations, { "type": "negative_gradient" })
        negative_identifier = evaluator.submit(negative_parameters, annotations = annotations)

        # Wait for gradient run results
        evaluator.wait()

        if self.compute_objective:
            evaluator.clean(objective_identifier)

        positive_objective, positive_state = evaluator.get(positive_identifier)
        evaluator.clean(positive_identifier)

        negative_objective, negative_state = evaluator.get(negative_identifier)
        evaluator.clean(negative_identifier)

        g_k = (positive_objective - negative_objective) / (2.0 * perturbation_length)
        g_k *= direction**-1

        # Update state
        self.parameters -= gradient_length * g_k
Exemple #23
0
    def advance(self, evaluator: Evaluator):
        if self.iteration == 0:
            logger.info("Initializing Opdyts")

            self.initial_identifier = evaluator.submit(self.initial_parameters,
                { "iterations": 1 }, { "type": "initial", "transient": True }
            )
            self.initial_objective, self.initial_state = evaluator.get(self.initial_identifier)

        self.iteration += 1
        logger.info("Starting Opdyts iteration %d" % self.iteration)

        # Create new set of candidate parameters
        candidate_parameters = np.zeros((self.candidate_set_size, self.number_of_parameters))

        for c in range(0, self.candidate_set_size, 2):
            direction = self.random.random_sample(size = (self.number_of_parameters,)) * 2.0 - 1.0
            candidate_parameters[c] = self.initial_parameters + direction * self.perturbation_length
            candidate_parameters[c + 1] = self.initial_parameters + direction * self.perturbation_length

        # Find initial candiate states
        candidate_identifiers = []
        candidate_states = np.zeros((self.candidate_set_size, self.number_of_states))
        candidate_deltas = np.zeros((self.candidate_set_size, self.number_of_states))
        candidate_objectives = np.zeros((self.candidate_set_size,))
        candidate_transitions = np.ones((self.candidate_set_size,))

        annotations = {
            "type": "candidate", "v": self.v, "w": self.w,
            "transient": True, "iteration": self.iteration
        }

        for c in range(self.candidate_set_size):
            candidate_annotations = copy.copy(annotations)
            candidate_annotations.update({ "candidate": c })

            candidate_identifiers.append(evaluator.submit(candidate_parameters[c], {
                #"iterations": transition_iterations,
                "restart": self.initial_identifier
            }, candidate_annotations))

        evaluator.wait()

        for c in range(self.candidate_set_size):
            candidate_objectives[c], candidate_states[c] = evaluator.get(candidate_identifiers[c])
            candidate_deltas[c] = candidate_states[c] - self.initial_state

        # Advance candidates
        local_adaptation_transient_performance = []
        local_adaptation_equilibrium_gap = []
        local_adaptation_uniformity_gap = []

        while np.max(candidate_transitions) < self.number_of_transitions:
            # Approximate selection problem
            selection_problem = ApproximateSelectionProblem(self.v, self.w, candidate_deltas, candidate_objectives)
            alpha = selection_problem.solve()

            transient_performance = selection_problem.get_transient_performance(alpha)
            equilibrium_gap = selection_problem.get_equilibrium_gap(alpha)
            uniformity_gap = selection_problem.get_uniformity_gap(alpha)

            local_adaptation_transient_performance.append(transient_performance)
            local_adaptation_equilibrium_gap.append(equilibrium_gap)
            local_adaptation_uniformity_gap.append(uniformity_gap)

            logger.info(
                "Transient performance: %f, Equilibirum gap: %f, Uniformity_gap: %f",
                transient_performance, equilibrium_gap, uniformity_gap)

            cumulative_alpha = np.cumsum(alpha)
            c = np.sum(self.random.random_sample() > cumulative_alpha) # TODO: Not deterministic!

            logger.info("Transitioning candidate %d", c)
            candidate_transitions[c] += 1
            transient = candidate_transitions[c] < self.number_of_transitions

            annotations.update({
                "type": "transition",
                "candidate": c, "transient_performance": transient_performance,
                "equilibrium_gap": equilibrium_gap, "uniformity_gap": uniformity_gap,
                "transient": transient
            })

            # Advance selected candidate
            identifier = evaluator.submit(candidate_parameters[c], {
                #"iterations": transition_iterations,
                "restart": candidate_identifiers[c]
            }, annotations)

            new_objective, new_state = evaluator.get(identifier)
            evaluator.clean(candidate_identifiers[c])

            candidate_deltas[c] = new_state - candidate_states[c]
            candidate_states[c], candidate_objectives[c] = new_state, new_objective
            candidate_identifiers[c] = identifier

        index = np.argmax(candidate_transitions)
        logger.info("Solved selection problem with candidate %d", index)

        for c in range(self.candidate_set_size):
            if c != index:
                evaluator.clean(candidate_identifiers[c])

        evaluator.clean(self.initial_identifier)
        self.initial_identifier = candidate_identifiers[index]
        self.initial_state = candidate_states[index]
        self.initial_parameters = candidate_parameters[index]

        self.adaptation_selection_performance.append(candidate_objectives[index])
        self.adaptation_transient_performance.append(np.array(local_adaptation_transient_performance))
        self.adaptation_equilibrium_gap.append(np.array(local_adaptation_equilibrium_gap))
        self.adaptation_uniformity_gap.append(np.array(local_adaptation_uniformity_gap))

        adaptation_problem = AdaptationProblem(self.adaptation_weight, self.adaptation_selection_performance, self.adaptation_transient_performance, self.adaptation_equilibrium_gap, self.adaptation_uniformity_gap)
        self.v, self.w = adaptation_problem.solve()

        logger.info("Solved Adaptation Problem. v = %f, w = %f", self.v, self.w)
Exemple #24
0
    def advance(self, evaluator: Evaluator):
        self.iteration += 1
        logger.info("Starting FDSA iteration %d." % self.iteration)

        # Calculate objective
        if self.compute_objective:
            annotations = {"type": "objective"}
            objective_identifier = evaluator.submit(self.parameters,
                                                    annotations=annotations)

        # Update lengths
        gradient_length = self.gradient_factor / (
            self.iteration + self.gradient_offset)**self.gradient_exponent
        perturbation_length = self.perturbation_factor / self.iteration**self.perturbation_exponent

        annotations = {
            "gradient_length": gradient_length,
            "perturbation_length": perturbation_length,
            "type": "gradient"
        }

        # I) Calculate gradients
        gradient = np.zeros((len(self.parameters), ))
        gradient_information = []

        # Schedule all necessary runs
        for d in range(len(self.parameters)):
            annotations = deep_merge.merge(annotations, {"dimension": d})

            positive_parameters = np.copy(self.parameters)
            positive_parameters[d] += perturbation_length
            annotations = deep_merge.merge(annotations,
                                           {"type": "positive_gradient"})
            positive_identifier = evaluator.submit(positive_parameters,
                                                   annotations=annotations)

            negative_parameters = np.copy(self.parameters)
            negative_parameters[d] -= perturbation_length
            annotations = deep_merge.merge(annotations,
                                           {"sign": "negative_gradient"})
            negative_identifier = evaluator.submit(negative_parameters,
                                                   annotations=annotations)

            gradient_information.append(
                (positive_parameters, positive_identifier, negative_parameters,
                 negative_identifier))

        # Wait for gradient run results
        evaluator.wait()

        if self.compute_objective:
            evaluator.clean(objective_identifier)

        for d, item in enumerate(gradient_information):
            positive_parameters, positive_identifier, negative_parameters, negative_identifier = item

            positive_objective, positive_state = evaluator.get(
                positive_identifier)
            evaluator.clean(positive_identifier)

            negative_objective, negative_state = evaluator.get(
                negative_identifier)
            evaluator.clean(negative_identifier)

            gradient[d] = (positive_objective -
                           negative_objective) / (2.0 * perturbation_length)

        # II) Update state
        self.parameters -= gradient_length * gradient
Exemple #25
0
def test_rosenbrock_evaluation():
    simulator = RosenbrockSimulator()

    evaluator = Evaluator(problem = RosenbrockProblem(3), simulator = simulator)
    identifier = evaluator.submit([1, 1, 1])
    assert evaluator.get(identifier)[0] == 0.0

    evaluator = Evaluator(problem = RosenbrockProblem(5), simulator = simulator)
    identifier = evaluator.submit([-1, 1, 1, 1, 1])
    assert evaluator.get(identifier)[0] == 4.0

    evaluator = Evaluator(problem = RosenbrockProblem(4), simulator = simulator)

    identifier1 = evaluator.submit([-1, 1, 1, 1])
    assert evaluator.get(identifier1)[0] == 4.0

    identifier2 = evaluator.submit([-1, 1, 2, 1])
    assert evaluator.get(identifier2)[0] != 4.0
Exemple #26
0
    def advance(self, evaluator: Evaluator):
        if self.iteration == 0:
            self.mean = np.copy(self.initial_values).reshape((self.N, 1))

        self.iteration += 1
        logger.info("Starting CMA-ES iteration %d." % self.iteration)

        annotations = {
            "mean": self.mean,
            "covariance": self.C,
            "pc": self.pc,
            "ps": self.ps,
            "sigma": self.sigma
        }

        self.counteval += self.L

        candidate_parameters = self.sigma * np.dot(
            (self.random.normal(size=(self.N, self.L)) *
             self.D[:, np.newaxis]).T, self.B) + self.mean.T

        candidate_identifiers = [
            evaluator.submit(parameters, annotations=annotations)
            for parameters in candidate_parameters
        ]

        # Wait for samples
        evaluator.wait()

        # Obtain fitness
        candidate_objectives = np.array([
            evaluator.get(identifier)[0]  # We minimize!
            for identifier in candidate_identifiers
        ])

        # Cleanup
        for identifier in candidate_identifiers:
            evaluator.clean(identifier)

        sorter = np.argsort(candidate_objectives)

        candidate_objectives = candidate_objectives[sorter]
        candidate_parameters = candidate_parameters[sorter, :]

        # Update mean
        previous_mean = self.mean
        self.mean = np.sum(candidate_parameters[:self.mu] *
                           self.weights[:, np.newaxis],
                           axis=0).reshape((self.N, 1))

        # Update evolution paths
        psa = (1.0 - self.cs) * self.ps
        psb = np.sqrt(self.cs * (2.0 - self.cs) * self.mueff) * np.dot(
            self.invsqrtC, self.mean - previous_mean) / self.sigma
        self.ps = psa + psb

        hsig = la.norm(
            self.ps) / np.sqrt(1.0 -
                               (1.0 - self.cs)**(2.0 * self.counteval / self.L)
                               ) / self.chiN < 1.4 + 2.0 / (self.N + 1.0)
        pca = (1.0 - self.cc) * self.pc
        pcb = hsig * np.sqrt(self.cc * (2.0 - self.cc) * self.mueff) * (
            self.mean - previous_mean) / self.sigma
        self.pc = pca + pcb

        # Adapt covariance matrix
        artmp = (1.0 / self.sigma) * (candidate_parameters[:self.mu].T -
                                      previous_mean)

        Ca = (1.0 - self.c1 - self.cmu) * self.C
        Cb = self.c1 * (np.dot(self.pc, self.pc.T) + (not hsig) * self.cc *
                        (2.0 - self.cc) * self.C)
        Cc = self.cmu * np.dot(artmp, np.dot(np.diag(self.weights), artmp.T))
        C = Ca + Cb + Cc

        # Adapt step size
        self.sigma = self.sigma * np.exp(
            (self.cs / self.damps) * (la.norm(self.ps) / self.chiN - 1.0))

        if self.counteval - self.eigeneval > self.L / (
                self.c1 + self.cmu) / self.N / 10.0:
            self.eigeneval = self.counteval

            self.C = np.triu(self.C) + np.triu(self.C, 1).T
            d, self.B = la.eig(self.C)

            self.D = np.sqrt(d)
            Dm = np.diag(1.0 / np.sqrt(d))

            self.invsqrtC = np.dot(self.B.T, np.dot(Dm, self.B))

        if np.max(self.D) > 1e7 * np.min(self.D):
            logger.warning("Condition exceeds 1e14")
Exemple #27
0
output_path = "optimization_output.p"

# First, build the project in toy_example with
#  mvn package

simulator = MATSimSimulator(
    working_directory=
    "/home/shoerl/bo/test2",  # Change to an existing empty directory
    class_path="toy_example/target/optimization_toy_example-1.0.0.jar",
    main_class="ch.ethz.matsim.optimization_toy_example.RunToyExample",
    iterations=200,
    java="/home/shoerl/.java/jdk8u222-b10/bin/java")

problem = ToyExampleProblem(0.5)

evaluator = Evaluator(problem=problem, simulator=simulator, parallel=4)

algorithm = CMAES(evaluator=evaluator, initial_step_size=0.1, seed=0)

#algorithm = BatchBayesianOptimization(
#    evaluator = evaluator,
#    batch_size = 4
#)

#algorithm = RandomWalk(
#    evaluator = evaluator
#)

tracker = PickleTracker(output_path)

Loop().run(evaluator=evaluator, algorithm=algorithm, tracker=tracker)
Exemple #28
0
simulator = MATSimSimulator(
    working_directory="work",
    class_path="resources/ile_de_france-1.2.0.jar",
    main_class="org.eqasim.ile_de_france.RunSimulation")

analyzer = ParisAnalyzer(threshold=0.05,
                         number_of_bounds=40,
                         minimum_distance=100.0,
                         maximum_distance=40 * 1e3,
                         reference_path=reference_path)

problem = ParisProblem(analyzer,
                       threads=threads_per_simulation,
                       iterations=iterations,
                       config_path=config_path)

evaluator = Evaluator(problem=problem,
                      simulator=simulator,
                      parallel=parallel_simulations)

algorithm = SPSA(evaluator=evaluator,
                 perturbation_factor=0.1,
                 gradient_factor=0.5,
                 perturbation_exponent=0.101,
                 gradient_exponent=0.602,
                 gradient_offset=0)

tracker = PickleTracker(output_path)

Loop().run(evaluator=evaluator, algorithm=algorithm, tracker=tracker)