예제 #1
0
def demo_on_MNIST(random_steps, steps, cv=1):
    """
    Demos the learning on MNIST.

    Parameters
    ----------
    random_steps : int
        The number of initial random steps. This is shared by all optimizers.
    steps : int
        The number of total steps. Must be greater than random_steps.

    cv : int
        The number of crossvalidations for evaluation.
    """
    X, Z, VX, VZ, TX, TZ, image_dims = load_MNIST()

    # The current parameter definitions are the more successful ones.
    # However, this can be compared by changing the lines commented.
    param_defs = {
        #"step_rate": MinMaxNumericParamDef(0, 1),
        "step_rate": AsymptoticNumericParamDef(0, 1),
        #"momentum": MinMaxNumericParamDef(0, 1),
        "momentum": AsymptoticNumericParamDef(1, 0),
        'decay': MinMaxNumericParamDef(0, 1),
        "c_wd": MinMaxNumericParamDef(0, 1)
    }

    LAss = ValidationLabAssistant(cv=cv)
    experiments = ["random_mnist", "bay_mnist_ei_L-BFGS-B"]#, "bay_mnist_ei_rand"]
    LAss.init_experiment("random_mnist", "RandomSearch", param_defs, minimization=True)

    global start_time
    start_time = time.time()
    #First, the random steps
    for i in range(random_steps*cv):
        print("%s\tBeginning with random initialization. Step %i/%i" %(str(time.time()-start_time), i, random_steps*cv))
        do_evaluation(LAss, "random_mnist", X, Z, VX, VZ)

    LAss.clone_experiments_by_name(exp_name=experiments[0], new_exp_name=experiments[1],
                               optimizer="BayOpt",
                               optimizer_arguments={"initial_random_runs": random_steps})

    #learn the rest
    for i in range((steps-random_steps)*cv):
        for opt in experiments:
            print("%s\tBeginning with %s, step %i/%i" %(time.time() - start_time, opt, i+1+random_steps*cv, steps*cv))
            do_evaluation(LAss, opt, X, Z, VX, VZ)

    for opt in experiments:
        logger.info("Best %s score:  %s" %(opt, [x.result for x in LAss.get_best_candidates(opt)]))
        print("Best %s score:  %s" %(opt, [x.result for x in LAss.get_best_candidates(opt)]))
    LAss.plot_result_per_step(experiments, title="Neural Network on MNIST.", plot_min=0.0, plot_max=1.0)
예제 #2
0
def learn_on_noise(optimizers, dims, points, var, steps, cv, show_plot=True):
    """
    Evaluates the optimizers on one specific variance.

    Parameters
    ----------
    optimizers : list of optimizers
        The optimizers to compare.
    dims : int
        The number of dimensions on which to run.
    points : int
        The number of points per dimension for the noise generation.
    var : float
        The variance for the gaussian with which the noise is smoothed.
    steps : int
        The number of steps for the optimizers to run
    cv : int
        The crossvalidation number for comparison purposes.
    show_plot : bool, optional
        Whether to show the plot in the end. Default is True.
    """
    LAss = ValidationLabAssistant(cv=cv)
    exp_names = []
    noise_gen = gen_noise(dims, points, random_state=42)
    for opt in optimizers:
        exp_names.append(str(opt) + "_" + str(var))
        learn_one_var(opt, LAss, dims, points, var, steps * cv, noise_gen,
                      exp_names[-1])
    for e in exp_names:
        print(
            str(e) + ": " +
            str(sort([x.result for x in LAss.get_best_candidates(e)])))

    if show_plot:
        LAss.plot_validation(exp_names, plot_min=0, plot_max=1)
예제 #3
0
def learn_on_noise(optimizers, dims, points, var, steps, cv, show_plot=True):
    """
    Evaluates the optimizers on one specific variance.

    Parameters
    ----------
    optimizers : list of optimizers
        The optimizers to compare.
    dims : int
        The number of dimensions on which to run.
    points : int
        The number of points per dimension for the noise generation.
    var : float
        The variance for the gaussian with which the noise is smoothed.
    steps : int
        The number of steps for the optimizers to run
    cv : int
        The crossvalidation number for comparison purposes.
    show_plot : bool, optional
        Whether to show the plot in the end. Default is True.
    """
    LAss = ValidationLabAssistant(cv=cv)
    exp_names = []
    noise_gen = gen_noise(dims, points, random_state=42)
    for opt in optimizers:
        exp_names.append(str(opt) + "_" + str(var))
        learn_one_var(opt, LAss, dims, points, var, steps*cv, noise_gen, exp_names[-1])
    for e in exp_names:
        print(str(e) + ": " + str(sort([x.result for x in LAss.get_best_candidates(e)])))

    if show_plot:
        LAss.plot_validation(exp_names, plot_min=0, plot_max=1)
def demo_on_MNIST(random_steps, steps, cv=1):
    """
    Demos the learning on MNIST.

    Parameters
    ----------
    random_steps : int
        The number of initial random steps. This is shared by all optimizers.
    steps : int
        The number of total steps. Must be greater than random_steps.

    cv : int
        The number of crossvalidations for evaluation.
    """
    X, Z, VX, VZ, TX, TZ, image_dims = load_MNIST()

    # The current parameter definitions are the more successful ones.
    # However, this can be compared by changing the lines commented.
    param_defs = {
        #"step_rate": MinMaxNumericParamDef(0, 1),
        "step_rate": AsymptoticNumericParamDef(0, 1),
        #"momentum": MinMaxNumericParamDef(0, 1),
        "momentum": AsymptoticNumericParamDef(1, 0),
        'decay': MinMaxNumericParamDef(0, 1),
        "c_wd": MinMaxNumericParamDef(0, 1)
    }

    LAss = ValidationLabAssistant(cv=cv)
    experiments = ["random_mnist",
                   "bay_mnist_ei_L-BFGS-B"]  #, "bay_mnist_ei_rand"]
    LAss.init_experiment("random_mnist",
                         "RandomSearch",
                         param_defs,
                         minimization=True)

    global start_time
    start_time = time.time()
    #First, the random steps
    for i in range(random_steps * cv):
        print("%s\tBeginning with random initialization. Step %i/%i" %
              (str(time.time() - start_time), i, random_steps * cv))
        do_evaluation(LAss, "random_mnist", X, Z, VX, VZ)

    LAss.clone_experiments_by_name(
        exp_name=experiments[0],
        new_exp_name=experiments[1],
        optimizer="BayOpt",
        optimizer_arguments={"initial_random_runs": random_steps})

    #learn the rest
    for i in range((steps - random_steps) * cv):
        for opt in experiments:
            print("%s\tBeginning with %s, step %i/%i" %
                  (time.time() - start_time, opt, i + 1 + random_steps * cv,
                   steps * cv))
            do_evaluation(LAss, opt, X, Z, VX, VZ)

    for opt in experiments:
        logger.info("Best %s score:  %s" %
                    (opt, [x.result for x in LAss.get_best_candidates(opt)]))
        print("Best %s score:  %s" %
              (opt, [x.result for x in LAss.get_best_candidates(opt)]))
    LAss.plot_result_per_step(experiments,
                              title="Neural Network on MNIST.",
                              plot_min=0.0,
                              plot_max=1.0)