Exemple #1
0
def search_model(experiment_label, steps, batch_size=32):
    """ This is where we put everythin together.
    We get the dataset, build the Training and Experiment objects, and run the experiment.
    The experiments logs are generated in ~/minos/experiment_label
    We use the CpuEnvironment to have the experiment run on the cpu, with 2 parralel processes.
    We could use GpuEnvironment to use GPUs, and specify which GPUs to use, and how many tasks
    per GPU
    """
    batch_iterator, test_batch_iterator, nb_classes = get_reuters_dataset(
        batch_size, max_words)
    layout = build_layout(max_words, nb_classes)
    training = Training(Objective('categorical_crossentropy'),
                        Optimizer(optimizer='Adam'),
                        Metric('categorical_accuracy'),
                        epoch_stopping_condition(), batch_size)
    parameters = custom_experiment_parameters()
    experiment = Experiment(experiment_label,
                            layout,
                            training,
                            batch_iterator,
                            test_batch_iterator,
                            CpuEnvironment(n_jobs=2),
                            parameters=parameters)
    run_ga_search_experiment(experiment,
                             population_size=100,
                             generations=steps,
                             resume=False,
                             log_level='DEBUG')
Exemple #2
0
def train_multi_gpu(max_words=1000, batch_size=32):
    batch_iterator, test_batch_iterator, nb_classes = get_reuters_dataset(
        batch_size, max_words)
    experiment = create_experiment(max_words, nb_classes, batch_size)
    blueprint = create_random_blueprint(experiment)
    devices = ['/gpu:0', '/gpu:1']
    trainer = ModelTrainer(batch_iterator, test_batch_iterator)
    with tempfile.TemporaryDirectory() as tmp_dir:
        model, history, _duration = trainer.train(blueprint,
                                                  devices,
                                                  save_best_model=True,
                                                  model_filename=join(
                                                      tmp_dir, 'model'))
        metric = get_associated_validation_metric(
            blueprint.training.metric.metric)
        epoch = numpy.argmax(history.history[metric])
        score = history.history[metric][epoch]
        print('Final training score %r after %d epoch' % (score, epoch))

        test_size = 10
        y_true = numpy.argmax(test_batch_iterator.y[0][:test_size], axis=1)
        y_pred = numpy.argmax(model.predict(
            test_batch_iterator.X[0][:test_size]),
                              axis=1)
        evaluation = numpy.mean(y_true == y_pred)
        print('Final evaluation score %f' % evaluation)
        print('Predictions (true, pred) %r' %
              list(zip(y_true.tolist(), y_pred.tolist())))