Esempio n. 1
0
def learn_on_noise(optimizers, dims, points, var, steps, cv, show_plot=True):
    """
    Evaluates the optimizers on one specific variance.

    Parameters
    ----------
    optimizers : list of optimizers
        The optimizers to compare.
    dims : int
        The number of dimensions on which to run.
    points : int
        The number of points per dimension for the noise generation.
    var : float
        The variance for the gaussian with which the noise is smoothed.
    steps : int
        The number of steps for the optimizers to run
    cv : int
        The crossvalidation number for comparison purposes.
    show_plot : bool, optional
        Whether to show the plot in the end. Default is True.
    """
    LAss = ValidationLabAssistant(cv=cv)
    exp_names = []
    noise_gen = gen_noise(dims, points, random_state=42)
    for opt in optimizers:
        exp_names.append(str(opt) + "_" + str(var))
        learn_one_var(opt, LAss, dims, points, var, steps * cv, noise_gen,
                      exp_names[-1])
    for e in exp_names:
        print(
            str(e) + ": " +
            str(sort([x.result for x in LAss.get_best_candidates(e)])))

    if show_plot:
        LAss.plot_validation(exp_names, plot_min=0, plot_max=1)
Esempio n. 2
0
def learn_on_noise(optimizers, dims, points, var, steps, cv, show_plot=True):
    """
    Evaluates the optimizers on one specific variance.

    Parameters
    ----------
    optimizers : list of optimizers
        The optimizers to compare.
    dims : int
        The number of dimensions on which to run.
    points : int
        The number of points per dimension for the noise generation.
    var : float
        The variance for the gaussian with which the noise is smoothed.
    steps : int
        The number of steps for the optimizers to run
    cv : int
        The crossvalidation number for comparison purposes.
    show_plot : bool, optional
        Whether to show the plot in the end. Default is True.
    """
    LAss = ValidationLabAssistant(cv=cv)
    exp_names = []
    noise_gen = gen_noise(dims, points, random_state=42)
    for opt in optimizers:
        exp_names.append(str(opt) + "_" + str(var))
        learn_one_var(opt, LAss, dims, points, var, steps*cv, noise_gen, exp_names[-1])
    for e in exp_names:
        print(str(e) + ": " + str(sort([x.result for x in LAss.get_best_candidates(e)])))

    if show_plot:
        LAss.plot_validation(exp_names, plot_min=0, plot_max=1)
Esempio n. 3
0
def demo_branin(steps=50, random_steps=10, cv=5, disable_auto_plot=False):
    logging.basicConfig(level=logging.DEBUG)

    #produce the same random state
    random_state_rs = check_random_state(42)

    param_defs = {
        "x": MinMaxNumericParamDef(-5, 10),
        "y": MinMaxNumericParamDef(0, 15)
    }

    LAss = ValidationLabAssistant(cv=cv, disable_auto_plot=disable_auto_plot)
    LAss.init_experiment("RandomSearch",
                         "RandomSearch",
                         param_defs,
                         minimization=True,
                         optimizer_arguments={"random_state": random_state_rs})

    optimizers = ["RandomSearch", "BayOpt_EI"]
    optimizer_arguments = [{
        "random_state": random_state_rs
    }, {
        "initial_random_runs": random_steps
    }]

    #evaluate random search for 10 steps use these steps as init value for bayesian
    for i in range(random_steps * cv):
        evaluated_candidate = single_branin_evaluation_step(
            LAss, 'RandomSearch')

    #now clone experiment for each optimizer
    for j in range(1, len(optimizers)):
        LAss.clone_experiments_by_name(
            exp_name=optimizers[0],
            new_exp_name=optimizers[j],
            optimizer="BayOpt",
            optimizer_arguments=optimizer_arguments[j])
    logger.info("Random Initialization Phase Finished.")
    logger.info("Competitive Evaluation Phase starts now.")

    #from there on go step by step all models
    for i in range(random_steps * cv, steps * cv):
        for optimizer in optimizers:
            single_branin_evaluation_step(LAss, optimizer)

    #plot results comparatively
    # a very detailed plot containing all points
    LAss.plot_result_per_step(optimizers)

    #a plot only showing the evaluation of the best result
    LAss.plot_validation(optimizers)
Esempio n. 4
0
def demo_on_MNIST(random_steps, steps, cv=1):
    """
    Demos the learning on MNIST.

    Parameters
    ----------
    random_steps : int
        The number of initial random steps. This is shared by all optimizers.
    steps : int
        The number of total steps. Must be greater than random_steps.

    cv : int
        The number of crossvalidations for evaluation.
    """
    X, Z, VX, VZ, TX, TZ, image_dims = load_MNIST()

    # The current parameter definitions are the more successful ones.
    # However, this can be compared by changing the lines commented.
    param_defs = {
        #"step_rate": MinMaxNumericParamDef(0, 1),
        "step_rate": AsymptoticNumericParamDef(0, 1),
        #"momentum": MinMaxNumericParamDef(0, 1),
        "momentum": AsymptoticNumericParamDef(1, 0),
        'decay': MinMaxNumericParamDef(0, 1),
        "c_wd": MinMaxNumericParamDef(0, 1)
    }

    LAss = ValidationLabAssistant(cv=cv)
    experiments = ["random_mnist", "bay_mnist_ei_L-BFGS-B"]#, "bay_mnist_ei_rand"]
    LAss.init_experiment("random_mnist", "RandomSearch", param_defs, minimization=True)

    global start_time
    start_time = time.time()
    #First, the random steps
    for i in range(random_steps*cv):
        print("%s\tBeginning with random initialization. Step %i/%i" %(str(time.time()-start_time), i, random_steps*cv))
        do_evaluation(LAss, "random_mnist", X, Z, VX, VZ)

    LAss.clone_experiments_by_name(exp_name=experiments[0], new_exp_name=experiments[1],
                               optimizer="BayOpt",
                               optimizer_arguments={"initial_random_runs": random_steps})

    #learn the rest
    for i in range((steps-random_steps)*cv):
        for opt in experiments:
            print("%s\tBeginning with %s, step %i/%i" %(time.time() - start_time, opt, i+1+random_steps*cv, steps*cv))
            do_evaluation(LAss, opt, X, Z, VX, VZ)

    for opt in experiments:
        logger.info("Best %s score:  %s" %(opt, [x.result for x in LAss.get_best_candidates(opt)]))
        print("Best %s score:  %s" %(opt, [x.result for x in LAss.get_best_candidates(opt)]))
    LAss.plot_result_per_step(experiments, title="Neural Network on MNIST.", plot_min=0.0, plot_max=1.0)
Esempio n. 5
0
def evaluate_performance(optimizers, dims, points, min_var, max_var, step_var,
                         steps, cv):
    """
    Evaluates the performance of the optimizers over several variances.

    Parameters
    ----------
    optimizers : list of Optimizers
        The optimizers to evaluate.
    dims : int
        The dimension count over which to evaluate.
    points : int
        The number of points per dimension for the noise.
    min_var : float
        The minimum variance to evaluate.
    max_var : float
        The maximum variance to evaluate.
    step_var : float
        The distance between two variance values to evaluate.
    steps : int
        The number of steps for each of the optimizers to learn.
    cv : int
        The crossvalidation number for comparison purposes.
    """
    LAss = ValidationLabAssistant(cv=cv)
    performances = {}
    variances = {}
    noise_gen = gen_noise(dims, points, random_state=42)
    for o in optimizers:
        print(o)
        performances[o], variances[o] = evaluate_one_opt(
            o, LAss, dims, points, min_var, max_var, step_var, steps * cv,
            noise_gen)
    var_space = np.arange(min_var, max_var, step_var)
    plt.xlabel("smoothing gaussian variance")
    plt.ylabel("best result after %i steps" % steps)
    plt.title("Performance of optimization in dependance on the smoothness.")
    plt.ylim((0, 1))
    for o in optimizers:

        plt.errorbar(var_space,
                     performances[o],
                     label=str(o),
                     yerr=variances[o],
                     linewidth=2.0,
                     capthick=4,
                     capsize=8.0)  #, fmt='o'
    plt.legend(loc='lower right')
    plt.show(True)
def evaluate_on_mnist(optimizer_names,
                      optimizers,
                      param_defs,
                      optimizer_args,
                      regressor,
                      cv=10,
                      percentage=1.,
                      steps=50,
                      random_steps=10,
                      plot_at_end=True,
                      disable_auto_plot=False):
    """
    This evaluates the (pre-initialized) optimizers on a percentage of mnist.

    Parameters
    ----------
    LAss : LabAssistant
        The LabAssistant containing all of the experiments.
    optimizers : list of strings
        The optimizer names used. The first has to be the random optimizer.
    percentage : float, between 0 and 1, optional
        The percentage of MNIST on which we want to evaluate.
    """
    #TODO add args to comment.
    #We first use the sklearn function to get the MNIST dataset. If cached,
    #we use the cached variant.
    if percentage < 0 or percentage > 1:
        raise ValueError("Percentage has to be between 0 and 1, is %f" %
                         percentage)

    mnist = fetch_mldata('MNIST original',
                         data_home=os.environ.get('MNIST_DATA_CACHE',
                                                  '~/.mnist-cache'))

    mnist.data, mnist.target = shuffle(mnist.data,
                                       mnist.target,
                                       random_state=0)

    mnist.data = mnist.data[:int(percentage * mnist.data.shape[0])]
    mnist.target = mnist.target[:int(percentage * mnist.target.shape[0])]

    #train test split
    mnist_data_train, mnist_data_test, mnist_target_train, mnist_target_test = \
        train_test_split(mnist.data, mnist.target, test_size=0.1, random_state=42)

    LAss = ValidationLabAssistant(cv=cv, disable_auto_plot=disable_auto_plot)

    #create random optimizer - assume this is the first one
    LAss.init_experiment(optimizer_names[0],
                         optimizers[0],
                         param_defs,
                         minimization=False,
                         optimizer_arguments=optimizer_args[0])

    #evaluate random search for 10 steps use these steps as init value for bayesian
    for i in range(random_steps * cv):
        print("random step " + str(i))
        do_evaluation(LAss, optimizer_names[0], regressor, mnist_data_train,
                      mnist_data_test, mnist_target_train, mnist_target_test)

    #now clone experiment for each optimizer
    for j in range(1, len(optimizers)):
        LAss.clone_experiments_by_name(exp_name=optimizer_names[0],
                                       new_exp_name=optimizer_names[j],
                                       optimizer=optimizers[j],
                                       optimizer_arguments=optimizer_args[j])

    logger.info("Random Initialization Phase Finished.")
    logger.info("Competitive Evaluation Phase starts now.")

    #from there on go step by step all models
    for i in range(random_steps * cv, steps * cv):
        logger.info("Doing step %i" % i)
        for n in optimizer_names:
            print("normal step " + str(i) + " for " + str(n))
            do_evaluation(LAss, n, regressor, mnist_data_train,
                          mnist_data_test, mnist_target_train,
                          mnist_target_test)

    #finally do an evaluation
    for n in optimizer_names:
        logger.info("Best %s score:  %s" %
                    (n, LAss.get_best_candidate(n).result))

    if plot_at_end:
        LAss.plot_result_per_step(optimizer_names)
        LAss.plot_validation(optimizer_names)
def demo_on_MNIST(random_steps, steps, cv=1):
    """
    Demos the learning on MNIST.

    Parameters
    ----------
    random_steps : int
        The number of initial random steps. This is shared by all optimizers.
    steps : int
        The number of total steps. Must be greater than random_steps.

    cv : int
        The number of crossvalidations for evaluation.
    """
    X, Z, VX, VZ, TX, TZ, image_dims = load_MNIST()

    # The current parameter definitions are the more successful ones.
    # However, this can be compared by changing the lines commented.
    param_defs = {
        #"step_rate": MinMaxNumericParamDef(0, 1),
        "step_rate": AsymptoticNumericParamDef(0, 1),
        #"momentum": MinMaxNumericParamDef(0, 1),
        "momentum": AsymptoticNumericParamDef(1, 0),
        'decay': MinMaxNumericParamDef(0, 1),
        "c_wd": MinMaxNumericParamDef(0, 1)
    }

    LAss = ValidationLabAssistant(cv=cv)
    experiments = ["random_mnist",
                   "bay_mnist_ei_L-BFGS-B"]  #, "bay_mnist_ei_rand"]
    LAss.init_experiment("random_mnist",
                         "RandomSearch",
                         param_defs,
                         minimization=True)

    global start_time
    start_time = time.time()
    #First, the random steps
    for i in range(random_steps * cv):
        print("%s\tBeginning with random initialization. Step %i/%i" %
              (str(time.time() - start_time), i, random_steps * cv))
        do_evaluation(LAss, "random_mnist", X, Z, VX, VZ)

    LAss.clone_experiments_by_name(
        exp_name=experiments[0],
        new_exp_name=experiments[1],
        optimizer="BayOpt",
        optimizer_arguments={"initial_random_runs": random_steps})

    #learn the rest
    for i in range((steps - random_steps) * cv):
        for opt in experiments:
            print("%s\tBeginning with %s, step %i/%i" %
                  (time.time() - start_time, opt, i + 1 + random_steps * cv,
                   steps * cv))
            do_evaluation(LAss, opt, X, Z, VX, VZ)

    for opt in experiments:
        logger.info("Best %s score:  %s" %
                    (opt, [x.result for x in LAss.get_best_candidates(opt)]))
        print("Best %s score:  %s" %
              (opt, [x.result for x in LAss.get_best_candidates(opt)]))
    LAss.plot_result_per_step(experiments,
                              title="Neural Network on MNIST.",
                              plot_min=0.0,
                              plot_max=1.0)