def main(): args = argumentparser.ArgumentParser() ta_csv = args.data_dir + "train1.csv" ts_csv = args.data_dir + "test1.csv" train_pair(args, ta_csv, ts_csv)
def main(): args = argumentparser.ArgumentParser() train(args)
lst = [(t['misc']['vals'], -t['result']['loss']) for t in trials.trials] new = [] for dict, val in lst: dict['val'] = val new.append(dict) keys = new[0].keys() with open(csvfile, 'wb') as output_file: dict_writer = csv.DictWriter(output_file, keys) dict_writer.writeheader() dict_writer.writerows(new) if __name__ == '__main__': args = argumentparser.ArgumentParser() if (args.dataset == 'pun'): x_train, y_train, x_test, y_test, embedding_matrix, nb_classes = pun( args) space = { 'optimizer': hp.choice('optimizer', ['adadelta', 'rmsprop']), 'batch_size': hp.choice('batch_size', [32, 64]), 'filter_size': hp.choice('filter_size', [3, 4, 5]), 'nb_filter': hp.choice('nb_filter', [75, 100]), 'dropout1': hp.uniform('dropout1', 0.25, 0.75), 'dropout2': hp.uniform('dropout2', 0.25, 0.75), 'use_embeddings': True, 'embeddings_trainable': False, 'lstm_hs': hp.choice('lstm_hs', [32, 50, 64]) }