def run():
    r = parallel_evaluate(
        solvers=[
            spearmint_minimize,
        ],
        task_subset=Sphere_7_ri,  # automatically selects all tasks
        n_reps=1,
        eval_kwargs={'n_calls': 6},
        joblib_kwargs={
            'n_jobs': 1,
            'verbose': 1000
        })
    # it is a good idea to cache results
    pc.dump(r, open(res_name, 'wb'))
Beispiel #2
0
    def evaluate(self, fnc, params=None, mode="train"):
        partition = self.partitions[mode]

        if params is not None:
            wrapper = AlgoWrapper(fnc, params)
        else:
            wrapper = fnc

        r = parallel_evaluate(
            solvers=[wrapper],
            task_subset=partition,  # set to None to evaluate on all tasks
            n_reps=128,  # number of repetitions
            eval_kwargs={'n_calls': 64},
            joblib_kwargs={
                'n_jobs': -1,
                'verbose': 10
            })

        p = calculate_metrics(r)  # returns pandas dataframe

        # load the dataframe with existing results
        df = pd.read_csv(csv_path)

        # get the names of the functions that were actually used.
        # it is assumed that these functions are present in the
        # loaded csv as well.
        names = [f.__name__ for f in partition]
        df = df.set_index('Unnamed: 0')

        # select only the names of tasks in partition
        df = df.loc[names]

        # insert found results
        df[fnc.__name__] = p[fnc.__name__]

        # drop index for proper compatibility with get_average_ranking function
        df = df.reset_index()

        rankings = get_average_ranking(df)
        thisranking = rankings[fnc.__name__]

        # want to minimize ranking. Less ranking means more performant algorithm
        obj = thisranking / len(rankings)

        print('rankings:', rankings)
        print('objective:', thisranking)

        return obj
Beispiel #3
0
def run():
    r = parallel_evaluate(
        solvers=[
            gp_minimize,
            forest_minimize,
            dummy_minimize,
        ],
        task_subset=None,  # automatically selects all tasks
        n_reps=128,
        eval_kwargs={'n_calls': 64},
        joblib_kwargs={
            'n_jobs': -1,
            'verbose': 10
        })
    # it is a good idea to cache results
    pc.dump(r, open('r.bin', 'wb'))
"""
Example of running the benchmarks locally
on skopt and other software.
"""

from bbob.evaluation import parallel_evaluate, plot_results, calculate_metrics
from skopt import forest_minimize
from bbob.wrappers.gpyopt_minimize import gpyopt_minimize
from bbob.wrappers.hyperopt_minimize import hyperopt_minimize

from bbob.tracks.ampgo import Hartmann3_3_ri, Ackley_3_1_r

r = parallel_evaluate(
    solvers=[forest_minimize, gpyopt_minimize, hyperopt_minimize],
    task_subset=[Hartmann3_3_ri,
                 Ackley_3_1_r],  # set to None to evaluate on all tasks
    n_reps=2,  # number of repetitions
    eval_kwargs={'n_calls': 10},
    joblib_kwargs={
        'n_jobs': -1,
        'verbose': 10
    })

p = calculate_metrics(r)  # returns pandas dataframe
p.to_csv('data.csv')
plot_results(r)
Beispiel #5
0
"""This script can be used to compare different 
algorithms between each other."""

from skopt.benchmarks import branin

# helper functions for evaluation of genetic algo
from bbob.evaluation import parallel_evaluate, plot_results, calculate_metrics, get_average_ranking
from bbob.tracks import ampgo

from ga import ga_minimize, rs_minimize
from skopt import dummy_minimize

r = parallel_evaluate(
    solvers=[dummy_minimize, rs_minimize],
    task_subset=ampgo,  # set to None to evaluate on all tasks
    n_reps=8,  # number of repetitions
    eval_kwargs={'n_calls': 64},
    joblib_kwargs={
        'n_jobs': 1,
        'verbose': 10
    })

p = calculate_metrics(r)  # returns pandas dataframe
p.to_csv('data.csv')
print(get_average_ranking('data.csv'))
#plot_results(r)
import sys

from skopt import gp_minimize
from bbob.wrappers.gpyopt_minimize import gpyopt_minimize
from bbob.wrappers.hyperopt_minimize import hyperopt_minimize

methods = [gp_minimize, gpyopt_minimize, hyperopt_minimize]

if sys.version[0] == '3':
    from bbob.wrappers.smac_minimize import smac_minimize
    methods += [smac_minimize]

if sys.version[0] == '2':
    from bbob.wrappers.spearmint_minimize import spearmint_minimize
    methods += [spearmint_minimize]

from bbob.evaluation import parallel_evaluate, calculate_metrics
from bbob.tracks import ampgo

r = parallel_evaluate(solvers=methods,
                      task_subset=[ampgo.Ackley_3_1_r],
                      n_reps=2,
                      joblib_kwargs={
                          'verbose': 10,
                          'n_jobs': 1
                      },
                      eval_kwargs={'n_calls': 10})

#plot_results(r)
m = calculate_metrics(r)
m.to_csv('data.csv')