Exemple #1
0
def test_MultiRun2():
    """
    Test-generator for multi-runs of the optimizers using many different configurations.

    This keeps generating random test-configurations for a desired number of minutes.
    """

    # Keep testing configurations for this many minutes.
    max_minutes = 10

    # Start a timer.
    timer = Timer()

    # For the desired number of minutes we select random configurations to test.
    while timer.less_than(minutes=max_minutes):
        # Select an optimizer at random.
        optimizer = random.choice([PSO, MOL, DE, LUS, PS])

        # Search-space dimensionality.
        dim = np.random.randint(1, 1000)

        # Display intervals.
        display_interval = np.random.randint(0, 250)

        # Max fitness evaluations.
        max_evaluations = np.random.randint(1, 2000)

        # Number of optimization runs.
        num_runs = np.random.randint(1, 10)

        # Fitness-trace-length.
        trace_len = np.random.randint(0, 1000)

        # Take a benchmark problem at random.
        problem_class = random.choice(Problem.all_benchmark_problems)
        problem = problem_class(dim=dim)

        # Either parallel or not.
        parallel = random.choice([True, False])

        # Run the test using this configuration.
        yield _do_test_MultiRun, optimizer, problem, dim, max_evaluations, display_interval, trace_len, parallel, num_runs
def test_MultiRun2():
    """
    Test-generator for multi-runs of the optimizers using many different configurations.

    This keeps generating random test-configurations for a desired number of minutes.
    """

    # Keep testing configurations for this many minutes.
    max_minutes = 10

    # Start a timer.
    timer = Timer()

    # For the desired number of minutes we select random configurations to test.
    while timer.less_than(minutes=max_minutes):
        # Select an optimizer at random.
        optimizer = random.choice([PSO, MOL, DE, LUS, PS])

        # Search-space dimensionality.
        dim = np.random.randint(1, 1000)

        # Display intervals.
        display_interval = np.random.randint(0, 250)

        # Max fitness evaluations.
        max_evaluations = np.random.randint(1, 2000)

        # Number of optimization runs.
        num_runs = np.random.randint(1, 10)

        # Fitness-trace-length.
        trace_len = np.random.randint(0, 1000)

        # Take a benchmark problem at random.
        problem_class = random.choice(Problem.all_benchmark_problems)
        problem = problem_class(dim=dim)

        # Either parallel or not.
        parallel = random.choice([True, False])

        # Run the test using this configuration.
        yield _do_test_MultiRun, optimizer, problem, dim, max_evaluations, display_interval, trace_len, parallel, num_runs
Exemple #3
0
        lower_init=[20 , 0.1 , (5.2-0.152) ]
        upper_init=[90 , 0.2 , (5.2-0.148) ]

        problem = Problem(name="CNOT_OPT_"+str(index), dim=3, fitness_min=0.0,
                                        lower_bound=lower_bound, 
                                        upper_bound=upper_bound,
                                        lower_init=lower_init, 
                                        upper_init=upper_init,
                                        func=func)
        
        print('start')
        optimizer = SuSSADE
        parameters = [20, 0.3, 0.9 , 0.9 ]

        # Start a timer.
        timer = Timer()

        # Perform a single optimization run using the optimizer
        # where the fitness is evaluated in parallel.
        result = optimizer(parallel=True, problem=problem,
                            max_evaluations=500,
                            display_interval=1,
                            trace_len=500,
                            StdTol = 0.0001,
                            directoryname  = 'resultSuSSADE_'+str(g))

        # Stop the timer.
        timer.stop()

        print()  # Newline.
        print("Time-Usage: {0}".format(timer))
    def fitness(self, x, limit=np.Infinity):
        '''
        某个初始参数x下,通过optimizer得到的最优解
        '''
        """
        Calculate the meta-fitness measure.

        :param x:
            Control parameters for the optimization method.

        :param limit:
            Abort the calculation of the meta-fitness when it
            becomes greater than this limit.

        :return:
            The meta-fitness measures how well the optimizer
            performed on the list of problems and using the given
            control parameters.
        """

        # Start a timer so we can later print the time-usage.
        timer = Timer()

        # Convenience variables.
        optimizer = self.optimizer
        max_evaluations = self.max_evaluations

        # Initialize the meta-fitness to zero.
        # The meta-fitness is just the (adjusted) sum of the
        # fitness obtained on multiple optimization runs.
        fitness_sum = 0.0

        # For each problem do the following.
        # Note that we iterate over self.problem_ranks which
        # is sorted so that we first try and optimize the problems
        # that are most likely to cause fitness_sum to exceed the
        # limit so the calculation can be aborted. This is called
        # Pre-Emptive Fitness Evaluation and greatly saves run-time.
        for problem_rank in self.problem_ranks:
            # Convenience variables.
            problem = problem_rank.problem
            weight = problem_rank.weight

            # Initialize the fitness sum for this problem.
            fitness_sum_inner = 0.0

            # Perform a number of optimization runs on the problem.
            for i in range(self.num_runs):
                # Perform one optimization run on the given problem
                # using the given control parameters.
                result = optimizer(problem=problem,
                                   max_evaluations=max_evaluations,
                                   parameters=x)

                # Keep track of the best-found solution for this problem.
                problem_rank.update_best(best=result.best,
                                         best_fitness=result.best_fitness)

                # Adjust the fitness so it is non-negative.
                fitness_adjusted = result.best_fitness - problem.fitness_min

                # Print warning if adjusted fitness is negative. Due to tiny rounding
                # errors this might occur without being an issue. But if the adjusted
                # fitness is negative and large, then problem.fitness_min must be corrected
                # in order for Pre-Emptive Fitness Evaluation to work properly.
                # It is better to print a warning than to use an assert which would
                # stop the execution.
                if fitness_adjusted < 0.0:
                    msg = "WARNING: MetaFitness.py, fitness_adjusted is negative {0:.4e} on problem {1}"
                    print(msg.format(fitness_adjusted, problem.name))

                # Accumulate the fitness sum for the inner-loop.
                fitness_sum_inner += weight * fitness_adjusted

                # Accumulate the overall fitness sum.
                fitness_sum += weight * fitness_adjusted

                # If the fitness sum exceeds the limit then break from the inner for-loop.
                if fitness_sum > limit:
                    break

            # Update the problem's ranking with the fitness-sum.
            # This is the key used in sorting below.
            problem_rank.fitness_sum = fitness_sum_inner

            # If the fitness sum exceeds the limit then break from the outer for-loop.
            if fitness_sum > limit:
                break

        # Sort the problems using the fitness_sum as the key in descending order.
        # This increases the probability that the for-loops above can be
        # aborted pre-emptively the next time the meta-fitness is calculated.
        self.problem_ranks = sorted(self.problem_ranks,
                                    key=lambda rank: rank.fitness_sum,
                                    reverse=True)

        # Stop the timer.
        timer.stop()

        # Print various results so we can follow the progress.
        print("- Parameters tried: {0}".format(x))
        msg = "- Meta-Fitness: {0:.4e}, Improvement: {1}"
        improvement = fitness_sum < limit
        print(msg.format(fitness_sum, improvement))
        print("- Time-Usage: {0}".format(timer))

        return fitness_sum
Exemple #5
0
    def fitness(self, x, limit=np.Infinity):
        """
        Calculate the meta-fitness measure.

        :param x:
            Control parameters for the optimization method.

        :param limit:
            Abort the calculation of the meta-fitness when it
            becomes greater than this limit.

        :return:
            The meta-fitness measures how well the optimizer
            performed on the list of problems and using the given
            control parameters.
        """

        # Start a timer so we can later print the time-usage.
        timer = Timer()

        # Convenience variables.
        optimizer = self.optimizer
        max_evaluations = self.max_evaluations

        # Initialize the meta-fitness to zero.
        # The meta-fitness is just the (adjusted) sum of the
        # fitness obtained on multiple optimization runs.
        fitness_sum = 0.0

        # For each problem do the following.
        # Note that we iterate over self.problem_ranks which
        # is sorted so that we first try and optimize the problems
        # that are most likely to cause fitness_sum to exceed the
        # limit so the calculation can be aborted. This is called
        # Pre-Emptive Fitness Evaluation and greatly saves run-time.
        for problem_rank in self.problem_ranks:
            # Convenience variables.
            problem = problem_rank.problem
            weight = problem_rank.weight

            # Initialize the fitness sum for this problem.
            fitness_sum_inner = 0.0

            # Perform a number of optimization runs on the problem.
            for i in range(self.num_runs):
                # Perform one optimization run on the given problem
                # using the given control parameters.
                result = optimizer(problem=problem,
                                   max_evaluations=max_evaluations,
                                   parameters=x)

                # Keep track of the best-found solution for this problem.
                problem_rank.update_best(best=result.best,
                                         best_fitness=result.best_fitness)

                # Adjust the fitness so it is non-negative.
                fitness_adjusted = result.best_fitness - problem.fitness_min

                # Print warning if adjusted fitness is negative. Due to tiny rounding
                # errors this might occur without being an issue. But if the adjusted
                # fitness is negative and large, then problem.fitness_min must be corrected
                # in order for Pre-Emptive Fitness Evaluation to work properly.
                # It is better to print a warning than to use an assert which would
                # stop the execution.
                if fitness_adjusted < 0.0:
                    msg = "WARNING: MetaFitness.py, fitness_adjusted is negative {0:.4e} on problem {1}"
                    print(msg.format(fitness_adjusted, problem.name))

                # Accumulate the fitness sum for the inner-loop.
                fitness_sum_inner += weight * fitness_adjusted

                # Accumulate the overall fitness sum.
                fitness_sum += weight * fitness_adjusted

                # If the fitness sum exceeds the limit then break from the inner for-loop.
                if fitness_sum > limit:
                    break

            # Update the problem's ranking with the fitness-sum.
            # This is the key used in sorting below.
            problem_rank.fitness_sum = fitness_sum_inner

            # If the fitness sum exceeds the limit then break from the outer for-loop.
            if fitness_sum > limit:
                break

        # Sort the problems using the fitness_sum as the key in descending order.
        # This increases the probability that the for-loops above can be
        # aborted pre-emptively the next time the meta-fitness is calculated.
        self.problem_ranks = sorted(self.problem_ranks,
                                    key=lambda rank: rank.fitness_sum,
                                    reverse=True)

        # Stop the timer.
        timer.stop()

        # Print various results so we can follow the progress.
        print("- Parameters tried: {0}".format(x))
        msg = "- Meta-Fitness: {0:.4e}, Improvement: {1}"
        improvement = fitness_sum < limit
        print(msg.format(fitness_sum, improvement))
        print("- Time-Usage: {0}".format(timer))

        return fitness_sum
    print("Dimensionality: {0}".format(dim))
    print("Iterations per run: {0}".format(max_evaluations))

    print("Problems:")
    for problem in problems:
        print(" - {0}".format(problem.name))

    print()  # Newline.
    print("Warning: This may be very slow!")
    print()  # Newline.

    ########################################################################
    # Perform meta-optimization.

    # Start a timer.
    timer = Timer()

    # Perform the meta-optimization.
    meta = MetaOptimize(optimizer=optimizer, problems=problems, weights=weights,
                        num_runs=num_runs, max_evaluations=max_evaluations,
                        meta_num_runs=meta_num_runs, meta_max_evaluations=meta_max_evaluations,
                        log_capacity=log_capacity, parallel=parallel)

    # Stop the timer.
    timer.stop()

    ########################################################################
    # Print results.

    # Print the time-usage.
    print("-------------------------------------------")