Beispiel #1
0
def _make_prep_configs(pso_names, swarm_sizes, benchmark_names,
                       dimensionalities, nums_iterations):
    print t.now(), 'determining prep configurations...'
    print t.now(), '(processing', len(pso_names), 'pso names,', len(
        swarm_sizes), 'swarm sizes,', len(
            benchmark_names), 'benchmark names, and', len(dimensionalities)

    global _prep_configs

    for pso_name in pso_names:
        for swarm_size in swarm_sizes:
            for benchmark_name in benchmark_names:
                for dimensionality in dimensionalities:
                    benchmark = benchmarks.get(benchmark_name)
                    for dimensionality in dimensionalities:
                        if not benchmark.is_dimensionality_valid(
                                dimensionality):
                            continue
                        for num_iterations in nums_iterations:
                            for experiment_num in range(0, 30):
                                prep_config = (pso_name, swarm_size,
                                               benchmark_name, dimensionality,
                                               num_iterations, experiment_num)
                                if not prep_config in _prep_configs:
                                    _prep_configs.append(prep_config)
Beispiel #2
0
def _make_configs(pso_names, swarm_sizes, benchmark_names, dimensionalities,
                  nums_iterations):
    print t.now(), 'determining configurations...'
    print t.now(), '(processing', len(pso_names), 'pso names,', len(
        swarm_sizes), 'swarm sizes,', len(
            benchmark_names), 'benchmark names,', len(
                dimensionalities), 'dimensionalities, and', len(
                    nums_iterations), 'numbers of iterations)'

    global _configs

    for pso_1_i in range(0, len(pso_names) - 1):
        pso_1_name = pso_names[pso_1_i]
        for pso_2_i in range(pso_1_i + 1, len(pso_names)):
            pso_2_name = pso_names[pso_2_i]
            if pso_1_name == pso_2_name:
                continue
            for swarm_size in swarm_sizes:
                for benchmark_name in benchmark_names:
                    benchmark = benchmarks.get(benchmark_name)
                    for dimensionality in dimensionalities:
                        if not benchmark.is_dimensionality_valid(
                                dimensionality):
                            continue
                        for num_iterations in nums_iterations:
                            config = (pso_1_name, pso_2_name, swarm_size,
                                      benchmark_name, dimensionality,
                                      num_iterations)
                            if not config in _configs:
                                _configs.append(config)
    print t.now(), 'done.'
Beispiel #3
0
def _num_valid_configurations():
    num_valid_benchmark_dimensionality_configs = 0
    # Determine the number of valid benchmark/dimensionality combinations:
    for benchmark_name in benchmark_names:
        for dimensionality in dimensionalities:
            benchmark = benchmarks.get(benchmark_name)
            if benchmark.is_dimensionality_valid(dimensionality):
                num_valid_benchmark_dimensionality_configs += 1
    # Determine the total number of valid configurations:
    return len(pso_names) * len(
        swarm_sizes) * num_valid_benchmark_dimensionality_configs * len(
            num_iterations) * len(experiment_nums)
Beispiel #4
0
def _calculate(pso_name, pso_population_size, benchmark_name, benchmark_dimensions, verbose=False, num_iterations=None):
    # Set up the PSO for a single diversity experiment
    pso = psos.get(pso_name)
    benchmark = benchmarks.get(benchmark_name)
    pso.function = benchmark.function
    pso.lower_bound = benchmark.min(0)
    pso.upper_bound = benchmark.max(0)
    pso.num_dimensions = benchmark_dimensions
    pso.init_pso_defaults()
    pso.init_swarm(size=pso_population_size)

    # Initialize the x and y values for diversity measurements taken
    diversity_ys = []

    # For each iteration of the PSO algorithm, take a diversity measurement.
    if verbose:
        prev_perc = -1
        # perc_increment = 1
        prev_time = time.time()
        time_increment = 1

    if num_iterations is None:
        num_iterations = _max_iterations
    for i in range(0, num_iterations):
        if verbose:
            perc = (i*100)/num_iterations
            if perc > prev_perc and time.time() > (prev_time + time_increment):
                prev_perc = perc
                prev_time = time.time()
                print t.now(), '     -', perc, 'percent complete...'

        xs = pso.positions
        diversity_measurement = diversity.avg_distance_around_swarm_centre(xs)

        assert not np.isnan(diversity_measurement), 'Diversity result is NaN!'

        diversity_ys.append(diversity_measurement)
        pso.iterate()

    if verbose:
        print t.now(), '     - 100 percent complete.'

    # Return the diversity measurements
    return diversity_ys
Beispiel #5
0
def _get_configuration(index):
    i = 0
    for pso_name in pso_names:
        for swarm_size in swarm_sizes:
            for benchmark_name in benchmark_names:
                for dimensionality in dimensionalities:
                    for iterations in num_iterations:
                        for experiment_num in experiment_nums:
                            benchmark = benchmarks.get(benchmark_name)
                            if benchmark.is_dimensionality_valid(
                                    dimensionality):
                                if i == index:
                                    configuration = (pso_name, swarm_size,
                                                     benchmark_name,
                                                     dimensionality,
                                                     iterations,
                                                     experiment_num)
                                    return configuration
                                else:
                                    i += 1
Beispiel #6
0
# Which dimensionalities to use the benchmark functions with.
# dimensionalities = range(1, 10)
# dimensionalities = [5, 25, 50, 100, 500, 1000]
dimensionalities = [5]

# The number of independent experiments per config
experiments = range(0, 30)

configurations = []
for pso_name in pso_names:
    for swarm_size in swarm_sizes:
        for benchmark_name in benchmark_names:
            for dimensionality in dimensionalities:
                for experiment in experiments:
                    benchmark = benchmarks.get(benchmark_name)
                    if benchmark.is_dimensionality_valid(dimensionality):
                        configuration = DiversityConfiguration(
                            pso_name, swarm_size, benchmark_name, dimensionality, experiment)
                        configurations.append(configuration)


def _process_configurations(configurations, verbose=False):
    for (i, configuration) in enumerate(configurations):
        print "result", colored('{}'.format(
            i), 'cyan', attrs=['bold']), "of", colored('{}'.format(len(configurations)), 'cyan'),
        print '(', configuration.pso_name, configuration.swarm_size, configuration.benchmark_name, configuration.dimensionality, configuration.experiment, ")"
        diversities.get(
            configuration.pso_name,
            configuration.swarm_size,
            configuration.benchmark_name,
Beispiel #7
0
        self.server = subprocess.Popen(['python', 'server.py'])

    def __exit__(self, type, value, traceback):
        self.log("Terminating proxy server.")
        if self.server:
            self.server.terminate()
            self.server = None

if __name__ == '__main__':
    utils.log_banner("EXECUTE")
    log = utils.make_log('EXECUTE')

    with AutoSpawnServer(log):
        log("Running each benchmark for each config...")

        benchmarks = [benchmarks.get(name) for name in options.benchmarks]
        for benchmark in benchmarks:
            try:
                log("now trying to run benchmark %s..." % benchmark.suite)
            except:
                pass

            for engine_path in engines:
                info = engineInfo.read_info_file(engine_path)
                executor = executors.make_executor(info)

                for config_name in options.configs:
                    config = configs.getConfig(config_name, info)
                    if config.omit():
                        continue
    def __exit__(self, type, value, traceback):
        self.log("Terminating proxy server.")
        if self.server:
            self.server.terminate()
            self.server = None


if __name__ == '__main__':
    utils.log_banner("EXECUTE")
    log = utils.make_log('EXECUTE')

    with AutoSpawnServer(log):
        log("Running each benchmark for each config...")

        benchmarks = [benchmarks.get(name) for name in options.benchmarks]
        for benchmark in benchmarks:
            try:
                log("now trying to run benchmark %s..." % benchmark.suite)
            except:
                pass

            for engine_path in engines:
                info = engineInfo.read_info_file(engine_path)
                executor = executors.make_executor(info)

                for config_name in options.configs:
                    config = configs.getConfig(config_name, info)
                    if config.omit():
                        continue