}

    sherpa_algo = sherpa.algorithms.GPyOpt(max_num_trials=80)
    study = sherpa.Study(parameters=params,
                         algorithm=sherpa_algo,
                         lower_is_better=True)
    for trial in study:
        trial_id = trial.id
        savename = '{}/model/model.{}.h5'.format(algorithm, trial_id)
        pars = deepcopy(trial.parameters)

        for k, v in baseline_params.items():
            pars[k] = v
        pars['batches_per_epoch'] = set_batches_per_epoch(pars['batch_size'])
        model = init_model(pars)
        adam = init_adam(pars)
        datagen = init_datagen(pars)
        model.compile(optimizer=adam, loss='categorical_crossentropy')

        # Train but reserve the last little bit of data for
        # a final testing set.
        t_loss, v_loss = train(model,
                               pars,
                               X_train,
                               Y_train,
                               X_test[:6000],
                               Y_test[:6000],
                               patience=10,
                               savename=savename,
                               datagen=datagen)
Example #2
0
        'beta2':0.998234
    }
    baseline_params['batches_per_epoch'] = set_batches_per_epoch(baseline_params['batch_size'])

    sherpa_algo = sherpa.algorithms.GPyOpt(max_num_trials=200)
    study = sherpa.Study(parameters=params, algorithm=sherpa_algo, lower_is_better=True)
    for trial in study:
        trial_id = trial.id
        savename = '{}/model/model.{}.h5'.format(algorithm, trial_id)
        pars = deepcopy(trial.parameters)

        for k,v in baseline_params.items():
            pars[k] = v
        
        model = init_model(baseline_params)
        adam = init_adam(baseline_params)
        datagen = init_datagen(pars)
        model.compile(optimizer=adam, loss='categorical_crossentropy')

        # Train but reserve the last little bit of data for
        # a final testing set.
        t_loss, v_loss = train(model, baseline_params, X_train, Y_train, X_test[:6000], Y_test[:6000],
                               patience=10, savename=savename, datagen=datagen)

        # Log
        study.add_observation(trial=trial, iteration=0, objective=min(v_loss))

        # Save if it is not saved
        if not os.path.exists(savename):
            model.save(savename)
Example #3
0
        'depth': 3,
        'dense_neurons': 512,
        'init_filters': 20,
        'use_batchnorm': True,
        'dropout': 0.4,
        'batch_size': 128,
        'max_epochs': 5,
        'learning_rate': 0.008,
        'beta1': 0.5,
        'beta2': 0.999
    }
    params['batches_per_epoch'] = set_batches_per_epoch(params['batch_size'])
    print(params)

    model = init_model(params)
    adam = init_adam(params)
    model.compile(optimizer=adam, loss='categorical_crossentropy')
    print(model.summary())

    # Train but reserve the last little bit of data for
    # a final testing set.
    t_loss, v_loss = train(model,
                           params,
                           X_train,
                           Y_train,
                           X_test[:6000],
                           Y_test[:6000],
                           patience=10,
                           savename=savename)

    # Save if it is not saved