Exemplo n.º 1
0
    def compute(self, config, budget, **kwargs):
        """
        Evaluates the configuration on the defined budget and returns the validation performance.

        Args:
            config: dictionary containing the sampled configurations by the optimizer
            budget: (float) amount of time/epochs/etc. the model can use to train
        Returns:
            dictionary with mandatory fields:
                'loss' (scalar)
                'info' (dict)
        """
        lr = config["learning_rate"]
        num_filters = config["num_filters"]
        batch_size = config["batch_size"]
        filter_size = config["filter_size"]

        epochs = budget

        # train and validate your convolutional neural networks here
        learning_curve, model, validation_loss = train_and_validate(
            self.x_train, self.y_train, self.x_valid, self.y_valid, epochs, lr,
            num_filters, batch_size, filter_size)
        # We minimize so make sure you return the validation error here
        validation_error = np.float64(validation_loss)

        return ({
            'loss':
            validation_error,  # this is the a mandatory field to run hyperband
            'info':
            {}  # can be used for any user-defined information - also mandatory
        })
Exemplo n.º 2
0
def try_hyperparameters(learning_rates, kernel_sizes):

    learning_curves = []
    for lr, ks in product(learning_rates, kernel_sizes):
        lc, _ = cnn.train_and_validate(x_train, y_train, x_valid, y_valid,
                                       epochs, lr, num_filters, batch_size, ks)
        learning_curves.append(lc)

    return np.array(learning_curves).T
Exemplo n.º 3
0
    def compute(self, config, budget, **kwargs):
        """
        Evaluates the configuration on the defined budget and returns the validation performance.

        Args:
            config: dictionary containing the sampled configurations by the optimizer
            budget: (float) amount of time/epochs/etc. the model can use to train
        Returns:
            dictionary with mandatory fields:
                'loss' (scalar)
                'info' (dict)
        """
        lr = config["lr"]
        num_filters = config["num_filters"]
        batch_size = config["batch_size"]
        filter_size = config["filter_size"]

        epochs = budget

        # TODO: train and validate your convolutional neural networks here
        learning_curve, model = train_and_validate(
            self.x_train,
            self.y_train,
            self.x_valid,
            self.y_valid,
            epochs,
            lr,
            num_filters,
            batch_size,
            filter_size,
            run_id="lr.{}.bs.{}.nf.{}.fs.{}".format(
                lr, batch_size, num_filters, filter_size
            ),
        )

        val_categorical_accuracy = test(self.x_valid, self.y_valid, model)[1]
        val_error = 1 - val_categorical_accuracy
        # TODO: We minimize so make sure you return the validation error here
        return {
            "loss": val_error,  # this is the a mandatory field to run hyperband
            "info": {
                "validation accuracy": val_categorical_accuracy
            },  # can be used for any user-defined information - also mandatory
        }
Exemplo n.º 4
0
# Plots the performance of the best found validation error over time
all_runs = res.get_all_runs()
print('All runs:')
print(all_runs)
# Let's plot the observed losses grouped by budget,
import hpbandster.visualization as hpvis

hpvis.losses_over_time(all_runs)

import matplotlib.pyplot as plt
plt.savefig("random_search.png")

# TODO: retrain the best configuration (called incumbent) and compute the test error
# get the best hyperparameters
learning_rate = id2config[incumbent]['config']['learning_rate']
batch_size = id2config[incumbent]['config']['batch_size']
filter_size = id2config[incumbent]['config']['filter_size']
num_filters = id2config[incumbent]['config']['num_filters']

# load data
x_train, y_train, x_valid, y_valid, x_test, y_test = mnist('../exercise1/data')
# train the best model agian
learning_curve, model, _ = train_and_validate(x_train, y_train, x_valid,
                                              y_valid, 12, learning_rate,
                                              num_filters, batch_size,
                                              filter_size)

test_err = test(x_test, y_test, model)
print(test_err)
Exemplo n.º 5
0
if __name__ == "__main__":

    # get data
    train_x, train_y, val_x, val_y, test_x, test_y = mnist('../exercise1/data')

    # 2: test the effect of the learning rate
    learning_rates = [0.1, 0.01, 0.001, 0.0001]

    learning_curves_lr = []
    for lr in learning_rates:
        learning_curve, model, _ = train_and_validate(train_x,
                                                      train_y,
                                                      val_x,
                                                      val_y,
                                                      num_epochs=NUM_EPOCHS,
                                                      lr=lr,
                                                      num_filters=16,
                                                      kernel_size=3,
                                                      batch_size=64)
        learning_curves_lr.append(learning_curve)

        # compute the network's test error
        test_err = test(test_x, test_y, model)
        print('Final test error: %.4f' % test_err)
        print()

    # plot all learning curves in one figure (validation performance after
    # each epoch)
    plot_learning_curves(
        learning_curves_lr, {
Exemplo n.º 6
0
# Each optimizer returns a hpbandster.core.result.Result object.
# It holds information about the optimization run like the incumbent (=best) configuration.
# For further details about the Result object, see its documentation.
# Here we simply print out the best config and some statistics about the performed runs.
id2config = res.get_id2config_mapping()
incumbent = res.get_incumbent_id()
incumb_conf = id2config[incumbent]['config']

print('Best found configuration:', incumb_conf)

# Plots the performance of the best found validation error over time
all_runs = res.get_all_runs()
# Let's plot the observed losses grouped by budget,
import hpbandster.visualization as hpvis

hpvis.losses_over_time(all_runs)

import matplotlib.pyplot as plt
plt.savefig("rs.pdf", format='pdf')

# TODO: retrain the best configuration (called incumbent) and compute the test error

x_train, y_train, x_valid, y_valid, x_test, y_test = cnn.mnist()
lcurve, incumbent = cnn.train_and_validate(
    np.vstack((x_train, x_valid)), np.vstack((y_train, y_valid)), None, None,
    args.budget, incumb_conf['learning_rate'], incumb_conf['num_filters'],
    incumb_conf['batch_size'],
    (incumb_conf['filter_size'], incumb_conf['filter_size']))

test_error = cnn.test(x_test, y_test, incumbent)
Exemplo n.º 7
0
            dictionary with mandatory fields:
                'loss' (scalar)
                'info' (dict)
        """
        lr = config["learning_rate"]
        num_filters = config["num_filters"]
        batch_size = config["batch_size"]
        filter_size = config["filter_size"]
<<<<<<< HEAD
=======

>>>>>>> upstream/master
        epochs = budget

        # TODO: train and validate your convolutional neural networks here
        curve, _ = train_and_validate(self.x_train, self.y_train, self.x_valid, self.y_valid, epochs, lr, num_filters, batch_size, filter_size=filter_size)
        # validation error of the last epoch
        validation_error = 1 - curve[-1][1]


        # TODO: We minimize so make sure you return the validation error here
        return ({
            'loss': validation_error,  # this is the a mandatory field to run hyperband
            'info': {}  # can be used for any user-defined information - also mandatory
        })

    @staticmethod
    def get_configspace():
        cs = CS.ConfigurationSpace()

        lr = CSH.UniformFloatHyperparameter('learning_rate', lower=1e-4, upper=1e-1, default_value=1e-2, log=True)