def main(): best_run, best_model = optim.minimize(model=model, data=data, algo=tpe.suggest, max_evals=20, trials=Trials()) print(best_run)
def test_advanced_callbacks(): X_train, Y_train, X_test, Y_test = data() best_run, best_model = optim.minimize(model=create_model, data=data, algo=tpe.suggest, max_evals=1, trials=Trials(), verbose=False)
def test_simple(): X_train, Y_train, X_test, Y_test = data() trials = Trials() best_run, best_model = optim.minimize(model=model, data=data, algo=tpe.suggest, max_evals=1, trials=trials, verbose=False)
def main(): # TODO: Parsing the list arguments parser = argparse.ArgumentParser() parser.add_argument('-indir', required=True, type=str) parser.add_argument('-outdir', required=True, type=str) args = parser.parse_args() global path2indir path2indir = args.indir # global path2outputdir # path2outputdir = args.outdir best_run, best_model = optim.minimize(model=model, data=data, algo=tpe.suggest, max_evals=10, trials=Trials()) print(best_run)
else: model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, kernel_initializer='uniform', activation='relu')) model.add(Dropout(0.2)) model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, kernel_initializer='uniform', activation='relu')) model.add(Dropout(0.2)) model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, kernel_initializer='uniform', activation='relu')) model.add(Dropout(0.2)) model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid')) adam = keras.optimizers.Adam(lr=0.001) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) model.fit(X_train, Y_train, validation_data=(X_val,Y_val), batch_size={{choice([2500, 5000, 10000, 15000, 20000])}}, nb_epoch=50, verbose=2) score, acc = model.evaluate(X_val, Y_val, verbose=0) print('Validation accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model} X_train, Y_train, X_val, Y_val = data() trials = Trials() best_run, best_model, space = optim.minimize(model=model, data=data, algo=tpe.suggest, max_evals=50, trials=trials, eval_space=True, return_space=True) print "Best model:" print(best_run) fout = open("report-folds-extra.txt","a") fout.write("Best performing model chosen hyper-parameters for Fold " + str(sys.argv[1]) + " sub_fold " + str(sys.argv[2]) + "\n") fout.write(str(best_run)) fout.write("\n") fout.write("*****************************************\n") fout.close()
def main(): n_folds = 5 try: opts, args = getopt.getopt(sys.argv[1:], '', ['window_size=', 'wiki=', 'n_feature_maps=', 'epochs=', 'undersample=', 'n_feature_maps=', 'criterion=', 'optimizer=', 'model=', 'genia=', 'tacc=', 'layers=', 'hyperopt=', 'model_name=']) except getopt.GetoptError as error: print error sys.exit(2) model_type = 'nn' window_size = 5 wiki = True n_feature_maps = 100 epochs = 20 undersample = False binary_cross_entropy = False criterion = 'categorical_crossentropy' optimizer = 'adam' k = 2 use_genia = False using_tacc = False layer_sizes = [] hyperopt = False model_name = 'model' for opt, arg in opts: if opt == '--window_size': window_size = int(arg) elif opt == '--wiki': if arg == 0: wiki = False elif opt == '--epochs': epochs = int(arg) elif opt == '--layers': layer_sizes = arg.split(',') elif opt == '--n_feature_maps': n_feature_maps = int(arg) elif opt == '--undersample': option = int(arg) if option == 1: undersample = True elif opt == '--n_feature_maps': n_feature_maps = int(arg) elif opt == '--criterion': criterion = arg elif opt == '--optimizer': optimizer = arg elif opt == '--model': model_type = arg elif opt == '--genia': if int(arg) == 1: use_genia= True elif opt == '--tacc': if int(arg) == 1: using_tacc = True elif opt == '--hyperopt': if int(arg) == 1: hyperopt = True elif opt == '--model_name': model_name = arg else: print "Option {} is not valid!".format(opt) if criterion == 'binary_crossentropy': binary_cross_entropy = True k = 1 print('Loading word2vec model...') if wiki: print 'Using wiki word2vec...' word2vec_model = 'wikipedia-pubmed-and-PMC-w2v.bin' else: print 'Using non-wiki word2vec...' word2vec_model = 'PubMed-w2v.bin' w2v = Word2Vec.load_word2vec_format(word2vec_model, binary=True) print('Loaded word2vec model') pmids_dict, pmids, abstracts, lbls, vectorizer, groups_map, one_hot, dicts = \ parse_summerscales.get_tokens_and_lbls( make_pmids_dict=True, sen=True, use_genia=use_genia, using_tacc=using_tacc) all_pmids = pmids_dict.keys() n = len(all_pmids) kf = KFold(n, random_state=1337, shuffle=True, n_folds=n_folds) accuracies = [] recalls = [] precisions = [] f1_scores = [] aucs = [] global model for fold_idx, (train, test) in enumerate(kf): print("on fold %s" % fold_idx) train_pmids = [all_pmids[pmid_idx] for pmid_idx in train] test_pmids = [all_pmids[pmid_idx] for pmid_idx in test] print train_pmids print('loading data...') if model_type == 'cnn': X_train, y_train = _prep_data(train_pmids, pmids_dict, w2v, window_size, model_type, binary_ce=binary_cross_entropy) X_test, y_test = _prep_data(test_pmids, pmids_dict, w2v, window_size, model_type, binary_ce=binary_cross_entropy) elif model_type == 'nn': X_train, y_train = _prep_data(train_pmids, pmids_dict, w2v, window_size, model_type, binary_ce=binary_cross_entropy) X_test, y_test = _prep_data(test_pmids, pmids_dict, w2v, window_size, model_type, binary_ce=binary_cross_entropy) elif model_type == 'ladder': X_train, y_train = _prep_data(train_pmids, pmids_dict, w2v, window_size, model_type, binary_ce=binary_cross_entropy) X_test, y_test = _prep_data(test_pmids, pmids_dict, w2v, window_size, model_type, binary_ce=binary_cross_entropy) if undersample: # Undersample the non group tags at random....probably a bad idea... if binary_cross_entropy: idx_undersample = numpy.where(y_train == 0)[0] idx_postive = numpy.where(y_train == 1)[0] else: idx_undersample = numpy.where(y_train[:, 1] == 0)[0] idx_postive = numpy.where(y_train[:, 1] == 1)[0] random_negative_sample = numpy.random.choice(idx_undersample, idx_postive.shape[0]) if model_type == 'nn': X_train_postive = X_train[idx_postive, :] X_train_negative = X_train[random_negative_sample, :] else: X_train_postive = X_train[idx_postive, :, :, :] X_train_negative = X_train[random_negative_sample, :, :, :] if binary_cross_entropy: y_train_postive = y_train[idx_postive] y_train_negative = y_train[random_negative_sample] else: y_train_postive = y_train[idx_postive, :] y_train_negative = y_train[random_negative_sample, :] X_train = numpy.vstack((X_train_postive, X_train_negative)) if binary_cross_entropy: y_train = numpy.hstack((y_train_postive, y_train_negative)) else: y_train = numpy.vstack((y_train_postive, y_train_negative)) print('loaded data...') if model_type == 'cnn': model = GroupCNN(window_size=window_size, n_feature_maps=n_feature_maps, k_output=k, name=model_name) elif model_type == 'nn': model = GroupNN(window_size=window_size, k=k, hyperparameter_search=hyperopt, name=model_name) if hyperopt: best_run, best_model = optim.minimize(model=_model, data=_data, algo=tpe.suggest, max_evals=5, trials=Trials()) model.model = best_model else: model.train(X_train, y_train, epochs, optim_algo=optimizer, criterion=criterion) words = [] for pmid in test_pmids: words.extend(pmids_dict[pmid][0]) predictions = model.predict_classes(X_test) predicted_words = crf.output2words(predictions, words) y_test_arg_max = numpy.argmax(y_test, axis=1) true_words = crf.output2words(y_test_arg_max, words) accuracy, f1_score, precision, auc, recall = model.test(X_test, y_test) recall, precision, f1_score = crf.eveluate(predicted_words, true_words) print "Accuracy: {}".format(accuracy) print "F1: {}".format(f1_score) print "Precision: {}".format(precision) print "AUC: {}".format(auc) print "Recall: {}".format(recall) accuracies.append(accuracy) f1_scores.append(f1_score) precisions.append(precision) aucs.append(auc) recalls.append(recall) mean_accuracy = numpy.mean(accuracies) mean_f1_score = numpy.mean(f1_scores) mean_precision = numpy.mean(precisions) mean_auc_score = numpy.mean(aucs) mean_recall = numpy.mean(recalls) mean_accuracy_string = "Mean Accuracy: {}".format(mean_accuracy) mean_f1_score_string = "Mean F1: {}".format(mean_f1_score) mean_precision_string = "Mean Precision: {}".format(mean_precision) mean_auc_score_string = "Mean AUC: {}".format(mean_auc_score) mean_recall_string = "Mean Recall: {}".format(mean_recall) print mean_accuracy_string print mean_f1_score_string print mean_precision_string print mean_auc_score_string print mean_recall_string results = open('{}_fold_results'.format(model.model_name), 'w+') results.write(mean_accuracy_string) results.write(mean_f1_score_string) results.write(mean_precision_string) results.write(mean_auc_score_string) results.write(mean_recall_string)
model.add(Activation('relu')) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer={{choice(['rmsprop', 'adam', 'sgd'])}}, metrics=['accuracy']) model.fit(X_train, Y_train, batch_size={{choice([64, 128])}}, epoch=10, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model} if __name__ == '__main__': best_run, best_model = optim.minimize(model=model, data=data, algo=tpe.suggest, max_evals=50, trials=Trials(), eval_space=True) print(best_run) X_train, Y_train, X_test, Y_test = data() print("Evalutation of best performing model:") print(best_model.evaluate(X_test, Y_test))
cPickle.dump(ds, open(dataset + '_db.p', 'wb')) else: print("Loading data from " + dataset + " data set...") ds = cPickle.load(open(dataset + '_db.p', 'rb')) if feature_extract: extractor = FeatureExtraction() extractor.extract_dataset(ds.data, nb_samples=len(ds.targets), dataset=dataset) try: trials = Trials() best_run, best_model = optim.minimize(model=create_model, data=get_data, algo=tpe.suggest, max_evals=6, trials=trials) U_train, X_train, Y_train, U_test, X_test, Y_test = get_data() best_model_idx = 1 best_score = 0.0 for i in range(1, (globalvars.globalVar + 1)): print("Evaluate models:") # load model model_path = 'weights_blstm_hyperas_' + str(i) + '.h5' model = load_model(model_path) scores = model.evaluate([U_test, X_test], Y_test)
model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test)) score, acc = model.evaluate(x_test, y_test, verbose=0) out = { 'loss': -acc, 'score': score, 'status': STATUS_OK, 'model_params': params, } # optionally store a dump of your model here so you can get it from the database later temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5' model.save(temp_name) with open(temp_name, 'rb') as infile: model_bytes = infile.read() out['model_serial'] = model_bytes return out if __name__ == "__main__": trials = mongoexp.MongoTrials('mongo://*****:*****@mongodb.host:27017/jobs/jobs', exp_key='mnist_test') best_run, best_model = optim.minimize(model=create_model, data=data, algo=tpe.suggest, max_evals=10, trials=trials, keep_temp=True) # this last bit is important print("Best performing model chosen hyper-parameters:") print(best_run)
from keras.optimizers import RMSprop model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense({{choice([256, 512, 1024])}})) model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms) model.fit(X_train, Y_train, batch_size={{choice([64, 128])}}, nb_epoch=1, show_accuracy=True, verbose=2, validation_data=(X_test, Y_test)) score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0) print('Test accuracy:', score[1]) return {'loss': -score[1], 'status': STATUS_OK} if __name__ == '__main__': best_run = optim.minimize(model=model, data=data, algo=tpe.suggest, max_evals=10, trials=Trials()) print(best_run)
if not os.path.isdir(OUTDIR): raise inputs = { 'GPU': GPU, 'NB_EPOCHS': NB_EPOCHS, 'VGG_WEIGHTS': VGG_WEIGHTS, 'INDIR': INDIR } with open('inputs.pickle', 'wb') as handle: pickle.dump(inputs, handle) best_run, best_model_dict = optim.minimize(model=model, data=data, algo=tpe.suggest, max_evals=N_TRIALS, trials=Trials()) train_data_dir = INDIR + 'BerryPhotos/train' train_images = [] train_labels = [] train_path_e = train_data_dir + "/early/" train_path_g = train_data_dir + "/good/" train_path_l = train_data_dir + "/late/" train_paths = [train_path_e, train_path_g, train_path_l] train_filenames_e = os.listdir(train_path_e) train_filenames_g = os.listdir(train_path_g) train_filenames_l = os.listdir(train_path_l)
model.add(Activation('relu')) model.add(Dropout({{uniform(0, 1)}})) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) model.fit(X_train, Y_train, batch_size={{choice([64, 128])}}, nb_epoch=1, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model} if __name__ == '__main__': X_train, Y_train, X_test, Y_test = data() functions=[visualization_mnist] best_run, best_model = optim.minimize(model=model, data=data, functions=functions, algo=tpe.suggest, max_evals=5, trials=Trials()) print("Evalutation of best performing model:") print(best_model.evaluate(X_test, Y_test))
x = Dense({{choice([64, 128, 256])}})(x) x = Activation("relu")(x) x = Dropout({{uniform(0.1, 0.5)}})(x) predictions = Dense(1)(x) model = Model(inputs=[inputs], outputs=[predictions]) model.compile(loss="mse", optimizer={{choice(["adam", "RMSprop"])}}) model.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=NUM_EPOCHS, verbose=2, validation_data=(X_test, y_test)) score = model.evaluate(X_test, y_test, verbose=0) return {'loss': -score, 'status': STATUS_OK, 'model': model} if __name__ == '__main__': best_run, best_model = optim.minimize(model=keras_model, data=preprocess_data, algo=tpe.suggest, max_evals=50, trials=Trials()) X_train, X_test, y_train, y_test = preprocess_data() score = best_model.evaluate(X_test, y_test) print("\n The score on the test set is {:.2e}".format(score)) print(best_run)
newline = newline.replace('MODEL_ARCH',model_arch) newline = newline.replace('PREFIX',args.prefix) fout.write(newline) sys.path.append(tmpdir) mymodel = __import__(model_arch) if args.sgmodel: allmodels = [basename(args.modeltop)] args.modeltop = dirname(args.modeltop) else: allmodels = [x for x in listdir(args.modeltop) if isdir(join(args.modeltop,x))] if args.hyper: ## Hyper-parameter tuning MAX_EVAL = args.hyperiter best_run, best_model = optim.minimize(model=mymodel.model,data=mymodel.data,algo=tpe.suggest,max_evals=MAX_EVAL,trials=Trials()) best_archit,best_optim = best_model open(architecture_file, 'w').write(best_archit) cPickle.dump(best_optim,open(optimizer_file,'wb') ) if args.train: ### Training from keras.models import model_from_json from keras.callbacks import ModelCheckpoint model = model_from_json(open(architecture_file).read()) best_optim = cPickle.load(open(optimizer_file,'rb')) model.compile(loss='binary_crossentropy', optimizer=best_optim,metrics=['accuracy']) checkpointer = ModelCheckpoint(filepath=weight_file, verbose=1, save_best_only=True) trainsample = join(topdir,data_code)+'.target*train'
graph.add_node( Convolution2D(CNN_filters, CNN_rows, cols, activation='sigmoid' ) , name='Conv', input='Reshape') sh = graph.nodes['Conv'].output_shape graph.add_node( MaxPooling2D(pool_size=(sh[-2], sh[-1])) , name='MaxPool', input='Conv') graph.add_node( Flatten() , name='Flat', input='MaxPool') graph.add_node( Dense(Dense_size, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})) , name='Dtxt', input='Flat') graph.add_node( Dropout({{uniform(0, 1)}}) , name='Dropout1', input='Dtxt') graph.add_input(name='av_data', input_shape=[train_data['AV'].shape[-1]]) graph.add_node( Dense(Dense_size2, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})) , name='Dav', input='av_data') graph.add_node( Dropout({{uniform(0, 1)}}) , name='Dropout2', input='Dav') graph.add_node( Dense(Dense_size3, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})), name='Dense1', inputs=['Dropout2', 'Dropout1'], merge_mode='concat') graph.add_node( Dropout({{uniform(0, 1)}}) , name='Dropout3', input='Dense1') graph.add_node( Dense(out_dim, activation='linear') , name='Dense2', input='Dropout3') graph.add_output(name='output', input = 'Dense2') graph.compile(optimizer=opt, loss={'output':'rmse'}) graph.fit( { 'txt_data':train_data['features'], 'av_data':train_data['AV'], 'output':train_data['labels'] }, nb_epoch=500, batch_size=64 ) scores = graph.evaluate({'txt_data':validation_data['features'], 'av_data':validation_data['AV'], 'output':validation_data['labels']}) print(scores) return {'loss': scores, 'status': STATUS_OK} if __name__ == '__main__': best_run = optim.minimize(keras_model, algo=tpe.suggest, max_evals=1000, trials=Trials()) pprint(best_run)
model = Sequential() model.add(Dense({{choice([54, 27, 13])}}, input_dim=54, init='normal', activation='linear')) model.add(Dense({{choice([104, 54, 27, 13])}}, init='normal', activation='linear')) if conditional({{choice(['three', 'four'])}}) == 'four': model.add(Dense({{choice([27, 13, 7])}}, activation='linear')) model.add(Dense(1, init='normal', activation='linear')) # Compile model model.compile(loss='mse', optimizer='rmsprop') model.fit(X_train, y_train, nb_epoch=50, batch_size={{choice([64, 128, 256])}}, verbose=2) acc = model.evaluate(X_test, y_test) print('\nTest accuracy:', acc) return {'loss': acc, 'status': STATUS_OK, 'model': model} if __name__ == '__main__': best_run, best_model = optim.minimize(model=model, data=get_data, algo=tpe.suggest, max_evals=100, trials=Trials()) X_train, y_train, X_test, y_test = get_data() print("Evaluation of best performing model:") print(best_model.evaluate(X_test, y_test)) y_test_est = best_model.predict(X_test) print("neural net hyperas MSE; %f" % mean_squared_error(y_test, y_test_est)) print("neural net hyperas score: %f" % r2_score(y_test, y_test_est)) print "best run is: " print best_run
# Optimizer sgd = optimizers.SGD(lr=learning_rate, momentum=momentum, decay=0, nesterov=False) # Create and train model model = Model(inputs = x_input, outputs = predictions) model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) model.fit(x=x_train,y= y_train, validation_split = 0.1, batch_size = batch_size ,epochs = epochs, verbose = 1) metrics = model.evaluate(x=x_test, y=y_test, batch_size=batch_size, verbose=0, sample_weight=None, steps=None) accuracy = metrics[1] return {'loss': 1-accuracy, 'status': STATUS_OK, 'model': model} if __name__ == "__main__": x_train, y_train, x_test, y_test = mnist_data() best_run, best_model = optim.minimize(model=create_model, data = mnist_data, algo=tpe.suggest, max_evals=5, trials=Trials()) print("Evalutation of best performing model:") print(best_model.evaluate(x_test, y_test)) print("Best performing model chosen hyper-parameters:") print(best_run)
model.add(Activation('relu')) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer={{choice(['rmsprop', 'adam', 'sgd'])}}, metrics=['accuracy']) model.fit(X_train, Y_train, batch_size={{choice([64, 128])}}, nb_epoch=1, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model} if __name__ == '__main__': trials = Trials() best_run, best_model = optim.minimize(model=model, data=data, algo=tpe.suggest, max_evals=5, trials=trials) for trial in trials: print(trial) X_train, Y_train, X_test, Y_test = data() print("Evalutation of best performing model:") print(best_model.evaluate(X_test, Y_test))
rms = RMSprop() model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy]) model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y, batch_size=128, epochs=epochs, verbose=1, validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y)) y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]]) tr_acc = compute_accuracy(tr_y, y_pred) y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]]) te_acc = compute_accuracy(te_y, y_pred) print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc)) print('* Accuracy on test set: %0.2f%%' % (100 * te_acc)) return {'loss': -te_acc, 'status': STATUS_OK, 'model': model} if __name__ == '__main__': tr_pairs, tr_y, te_pairs, te_y,input_shape = data() best_run, best_model = optim.minimize(model=create_model, data=data, functions = [process_data,create_base_network,euclidean_distance,contrastive_loss,eucl_dist_output_shape,create_pairs,accuracy,compute_accuracy], algo=tpe.suggest,max_evals=100,trials=Trials()) print("best model",best_model) print("best run",best_run) print("Evalutation of best performing model:") loss,te_acc = best_model.evaluate([te_pairs[:, 0], te_pairs[:, 1]], te_y) print("best prediction accuracy on test data %0.2f%%" % (100 * te_acc))
model.add(Convolution1D({{choice([64, 128])}}, {{choice([6, 8])}}, border_mode='valid', activation='relu', subsample_length=1)) model.add(MaxPooling1D(pool_length=pool_length)) model.add(LSTM(lstm_output_size)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print('Train...') model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=(X_test, y_test)) score, acc = model.evaluate(X_test, y_test, batch_size=batch_size) print('Test score:', score) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model} if __name__ == '__main__': best_run, best_model = optim.minimize(model=model, data=data, algo=rand.suggest, max_evals=5, trials=Trials()) print(best_run)
model.add(Dense(100)) # We can also choose between complete sets of layers model.add({{choice([Dropout(0.5), Activation('linear')])}}) model.add(Activation('relu')) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer={{choice(['rmsprop', 'adam', 'sgd'])}}) model.fit(X_train, Y_train, batch_size={{choice([64, 128])}}, nb_epoch=1, show_accuracy=True, verbose=2, validation_data=(X_test, Y_test)) score, acc = model.evaluate(X_test, Y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model} if __name__ == '__main__': best_run, best_model = optim.minimize(model=model, data=data, algo=tpe.suggest, max_evals=5, trials=Trials()) X_train, Y_train, X_test, Y_test = data() print("Evalutation of best performing model:") print(best_model.evaluate(X_test, Y_test))