Esempio n. 1
0
def demo_on_MNIST(random_steps, steps, cv=1):
    """
    Demos the learning on MNIST.

    Parameters
    ----------
    random_steps : int
        The number of initial random steps. This is shared by all optimizers.
    steps : int
        The number of total steps. Must be greater than random_steps.

    cv : int
        The number of crossvalidations for evaluation.
    """
    X, Z, VX, VZ, TX, TZ, image_dims = load_MNIST()

    # The current parameter definitions are the more successful ones.
    # However, this can be compared by changing the lines commented.
    param_defs = {
        #"step_rate": MinMaxNumericParamDef(0, 1),
        "step_rate": AsymptoticNumericParamDef(0, 1),
        #"momentum": MinMaxNumericParamDef(0, 1),
        "momentum": AsymptoticNumericParamDef(1, 0),
        'decay': MinMaxNumericParamDef(0, 1),
        "c_wd": MinMaxNumericParamDef(0, 1)
    }

    LAss = ValidationLabAssistant(cv=cv)
    experiments = ["random_mnist", "bay_mnist_ei_L-BFGS-B"]#, "bay_mnist_ei_rand"]
    LAss.init_experiment("random_mnist", "RandomSearch", param_defs, minimization=True)

    global start_time
    start_time = time.time()
    #First, the random steps
    for i in range(random_steps*cv):
        print("%s\tBeginning with random initialization. Step %i/%i" %(str(time.time()-start_time), i, random_steps*cv))
        do_evaluation(LAss, "random_mnist", X, Z, VX, VZ)

    LAss.clone_experiments_by_name(exp_name=experiments[0], new_exp_name=experiments[1],
                               optimizer="BayOpt",
                               optimizer_arguments={"initial_random_runs": random_steps})

    #learn the rest
    for i in range((steps-random_steps)*cv):
        for opt in experiments:
            print("%s\tBeginning with %s, step %i/%i" %(time.time() - start_time, opt, i+1+random_steps*cv, steps*cv))
            do_evaluation(LAss, opt, X, Z, VX, VZ)

    for opt in experiments:
        logger.info("Best %s score:  %s" %(opt, [x.result for x in LAss.get_best_candidates(opt)]))
        print("Best %s score:  %s" %(opt, [x.result for x in LAss.get_best_candidates(opt)]))
    LAss.plot_result_per_step(experiments, title="Neural Network on MNIST.", plot_min=0.0, plot_max=1.0)
Esempio n. 2
0
def demo_branin(steps=50, random_steps=10, cv=5, disable_auto_plot=False):
    logging.basicConfig(level=logging.DEBUG)

    #produce the same random state
    random_state_rs = check_random_state(42)

    param_defs = {
        "x": MinMaxNumericParamDef(-5, 10),
        "y": MinMaxNumericParamDef(0, 15)
    }

    LAss = ValidationLabAssistant(cv=cv, disable_auto_plot=disable_auto_plot)
    LAss.init_experiment("RandomSearch",
                         "RandomSearch",
                         param_defs,
                         minimization=True,
                         optimizer_arguments={"random_state": random_state_rs})

    optimizers = ["RandomSearch", "BayOpt_EI"]
    optimizer_arguments = [{
        "random_state": random_state_rs
    }, {
        "initial_random_runs": random_steps
    }]

    #evaluate random search for 10 steps use these steps as init value for bayesian
    for i in range(random_steps * cv):
        evaluated_candidate = single_branin_evaluation_step(
            LAss, 'RandomSearch')

    #now clone experiment for each optimizer
    for j in range(1, len(optimizers)):
        LAss.clone_experiments_by_name(
            exp_name=optimizers[0],
            new_exp_name=optimizers[j],
            optimizer="BayOpt",
            optimizer_arguments=optimizer_arguments[j])
    logger.info("Random Initialization Phase Finished.")
    logger.info("Competitive Evaluation Phase starts now.")

    #from there on go step by step all models
    for i in range(random_steps * cv, steps * cv):
        for optimizer in optimizers:
            single_branin_evaluation_step(LAss, optimizer)

    #plot results comparatively
    # a very detailed plot containing all points
    LAss.plot_result_per_step(optimizers)

    #a plot only showing the evaluation of the best result
    LAss.plot_validation(optimizers)
def evaluate_on_mnist(optimizer_names,
                      optimizers,
                      param_defs,
                      optimizer_args,
                      regressor,
                      cv=10,
                      percentage=1.,
                      steps=50,
                      random_steps=10,
                      plot_at_end=True,
                      disable_auto_plot=False):
    """
    This evaluates the (pre-initialized) optimizers on a percentage of mnist.

    Parameters
    ----------
    LAss : LabAssistant
        The LabAssistant containing all of the experiments.
    optimizers : list of strings
        The optimizer names used. The first has to be the random optimizer.
    percentage : float, between 0 and 1, optional
        The percentage of MNIST on which we want to evaluate.
    """
    #TODO add args to comment.
    #We first use the sklearn function to get the MNIST dataset. If cached,
    #we use the cached variant.
    if percentage < 0 or percentage > 1:
        raise ValueError("Percentage has to be between 0 and 1, is %f" %
                         percentage)

    mnist = fetch_mldata('MNIST original',
                         data_home=os.environ.get('MNIST_DATA_CACHE',
                                                  '~/.mnist-cache'))

    mnist.data, mnist.target = shuffle(mnist.data,
                                       mnist.target,
                                       random_state=0)

    mnist.data = mnist.data[:int(percentage * mnist.data.shape[0])]
    mnist.target = mnist.target[:int(percentage * mnist.target.shape[0])]

    #train test split
    mnist_data_train, mnist_data_test, mnist_target_train, mnist_target_test = \
        train_test_split(mnist.data, mnist.target, test_size=0.1, random_state=42)

    LAss = ValidationLabAssistant(cv=cv, disable_auto_plot=disable_auto_plot)

    #create random optimizer - assume this is the first one
    LAss.init_experiment(optimizer_names[0],
                         optimizers[0],
                         param_defs,
                         minimization=False,
                         optimizer_arguments=optimizer_args[0])

    #evaluate random search for 10 steps use these steps as init value for bayesian
    for i in range(random_steps * cv):
        print("random step " + str(i))
        do_evaluation(LAss, optimizer_names[0], regressor, mnist_data_train,
                      mnist_data_test, mnist_target_train, mnist_target_test)

    #now clone experiment for each optimizer
    for j in range(1, len(optimizers)):
        LAss.clone_experiments_by_name(exp_name=optimizer_names[0],
                                       new_exp_name=optimizer_names[j],
                                       optimizer=optimizers[j],
                                       optimizer_arguments=optimizer_args[j])

    logger.info("Random Initialization Phase Finished.")
    logger.info("Competitive Evaluation Phase starts now.")

    #from there on go step by step all models
    for i in range(random_steps * cv, steps * cv):
        logger.info("Doing step %i" % i)
        for n in optimizer_names:
            print("normal step " + str(i) + " for " + str(n))
            do_evaluation(LAss, n, regressor, mnist_data_train,
                          mnist_data_test, mnist_target_train,
                          mnist_target_test)

    #finally do an evaluation
    for n in optimizer_names:
        logger.info("Best %s score:  %s" %
                    (n, LAss.get_best_candidate(n).result))

    if plot_at_end:
        LAss.plot_result_per_step(optimizer_names)
        LAss.plot_validation(optimizer_names)
def demo_on_MNIST(random_steps, steps, cv=1):
    """
    Demos the learning on MNIST.

    Parameters
    ----------
    random_steps : int
        The number of initial random steps. This is shared by all optimizers.
    steps : int
        The number of total steps. Must be greater than random_steps.

    cv : int
        The number of crossvalidations for evaluation.
    """
    X, Z, VX, VZ, TX, TZ, image_dims = load_MNIST()

    # The current parameter definitions are the more successful ones.
    # However, this can be compared by changing the lines commented.
    param_defs = {
        #"step_rate": MinMaxNumericParamDef(0, 1),
        "step_rate": AsymptoticNumericParamDef(0, 1),
        #"momentum": MinMaxNumericParamDef(0, 1),
        "momentum": AsymptoticNumericParamDef(1, 0),
        'decay': MinMaxNumericParamDef(0, 1),
        "c_wd": MinMaxNumericParamDef(0, 1)
    }

    LAss = ValidationLabAssistant(cv=cv)
    experiments = ["random_mnist",
                   "bay_mnist_ei_L-BFGS-B"]  #, "bay_mnist_ei_rand"]
    LAss.init_experiment("random_mnist",
                         "RandomSearch",
                         param_defs,
                         minimization=True)

    global start_time
    start_time = time.time()
    #First, the random steps
    for i in range(random_steps * cv):
        print("%s\tBeginning with random initialization. Step %i/%i" %
              (str(time.time() - start_time), i, random_steps * cv))
        do_evaluation(LAss, "random_mnist", X, Z, VX, VZ)

    LAss.clone_experiments_by_name(
        exp_name=experiments[0],
        new_exp_name=experiments[1],
        optimizer="BayOpt",
        optimizer_arguments={"initial_random_runs": random_steps})

    #learn the rest
    for i in range((steps - random_steps) * cv):
        for opt in experiments:
            print("%s\tBeginning with %s, step %i/%i" %
                  (time.time() - start_time, opt, i + 1 + random_steps * cv,
                   steps * cv))
            do_evaluation(LAss, opt, X, Z, VX, VZ)

    for opt in experiments:
        logger.info("Best %s score:  %s" %
                    (opt, [x.result for x in LAss.get_best_candidates(opt)]))
        print("Best %s score:  %s" %
              (opt, [x.result for x in LAss.get_best_candidates(opt)]))
    LAss.plot_result_per_step(experiments,
                              title="Neural Network on MNIST.",
                              plot_min=0.0,
                              plot_max=1.0)
Esempio n. 5
0
def evaluate_on_mnist(
    optimizer_names,
    optimizers,
    param_defs,
    optimizer_args,
    regressor,
    cv=10,
    percentage=1.0,
    steps=50,
    random_steps=10,
    plot_at_end=True,
    disable_auto_plot=False,
):
    """
    This evaluates the (pre-initialized) optimizers on a percentage of mnist.

    Parameters
    ----------
    LAss : LabAssistant
        The LabAssistant containing all of the experiments.
    optimizers : list of strings
        The optimizer names used. The first has to be the random optimizer.
    percentage : float, between 0 and 1, optional
        The percentage of MNIST on which we want to evaluate.
    """
    # TODO add args to comment.
    # We first use the sklearn function to get the MNIST dataset. If cached,
    # we use the cached variant.
    if percentage < 0 or percentage > 1:
        raise ValueError("Percentage has to be between 0 and 1, is %f" % percentage)

    mnist = fetch_mldata("MNIST original", data_home=os.environ.get("MNIST_DATA_CACHE", "~/.mnist-cache"))

    mnist.data, mnist.target = shuffle(mnist.data, mnist.target, random_state=0)

    mnist.data = mnist.data[: int(percentage * mnist.data.shape[0])]
    mnist.target = mnist.target[: int(percentage * mnist.target.shape[0])]

    # train test split
    mnist_data_train, mnist_data_test, mnist_target_train, mnist_target_test = train_test_split(
        mnist.data, mnist.target, test_size=0.1, random_state=42
    )

    LAss = ValidationLabAssistant(cv=cv, disable_auto_plot=disable_auto_plot)

    # create random optimizer - assume this is the first one
    LAss.init_experiment(
        optimizer_names[0], optimizers[0], param_defs, minimization=False, optimizer_arguments=optimizer_args[0]
    )

    # evaluate random search for 10 steps use these steps as init value for bayesian
    for i in range(random_steps * cv):
        print("random step " + str(i))
        do_evaluation(
            LAss,
            optimizer_names[0],
            regressor,
            mnist_data_train,
            mnist_data_test,
            mnist_target_train,
            mnist_target_test,
        )

    # now clone experiment for each optimizer
    for j in range(1, len(optimizers)):
        LAss.clone_experiments_by_name(
            exp_name=optimizer_names[0],
            new_exp_name=optimizer_names[j],
            optimizer=optimizers[j],
            optimizer_arguments=optimizer_args[j],
        )

    logger.info("Random Initialization Phase Finished.")
    logger.info("Competitive Evaluation Phase starts now.")

    # from there on go step by step all models
    for i in range(random_steps * cv, steps * cv):
        logger.info("Doing step %i" % i)
        for n in optimizer_names:
            print("normal step " + str(i) + " for " + str(n))
            do_evaluation(LAss, n, regressor, mnist_data_train, mnist_data_test, mnist_target_train, mnist_target_test)

    # finally do an evaluation
    for n in optimizer_names:
        logger.info("Best %s score:  %s" % (n, LAss.get_best_candidate(n).result))

    if plot_at_end:
        LAss.plot_result_per_step(optimizer_names)
        LAss.plot_validation(optimizer_names)