Beispiel #1
0
# Split data in to features and target
x_train = features.values
y_train = target.values

# Apply min max normalization
scaler = MinMaxScaler().fit(x_train)
x_train = scaler.transform(x_train)

# Find best parameters
grid_search.fit(x_train, y_train)
print(grid_search.best_params_)
print(grid_search.best_score_)

# Retrain model with best parameters found from grid search
best_params = grid_search.best_params_
model = model2(input_dim,
               loss=best_params['loss'],
               r1=best_params['r1'],
               l1=best_params['l1'],
               r2=best_params['r2'],
               l2=best_params['l2'])
model.fit(x_train,
          y_train,
          epochs=best_params['epochs'],
          batch_size=best_params['batch_size'],
          verbose=1)

# save the model
filename = '../models/final_ANN_model.h5'
model.save(filename)
                      callbacks=[early_stopping, tensorboard])
            fit_models.append(model)

        predictions = [
            dataload.predict_sequences_multiple(model, X_test, seq_len,
                                                predict_len)
            for model in top_models
        ]
        scores = [
            model.evaluate(X_test, y_test, verbose=0) for model in top_models
        ]

        # Save results
        os.makedirs(results_fname)
        folder_name = 'seq_len_{}'.format(seq_len)
        os.makedirs('{}/{}'.format(results_fname, folder_name))
        results.to_csv('{0}/{1}/results.csv'.format(results_fname,
                                                    folder_name))
        top_model_plots = [(predictions[i], 'Model {}'.format(i + 1))
                           for i in range(len(predictions))]
        plot_results_multiple(top_model_plots,
                              y_test,
                              predict_len,
                              fig_path='{0}/{1}/plots.pdf'.format(
                                  results_fname, folder_name))
        index = 1
        for model in fit_models:
            model.save('{}/{}/model-{}.h5'.format(results_fname, folder_name,
                                                  index))
            index = index + 1
Beispiel #3
0
                      shuffle=shuffle,
                      validation_split=validation_split)
    grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
    grid_result = grid.fit(encoded_X_train, Y_train)
    # summarize results
    print("Best: %f using %s" %
          (grid_result.best_score_, grid_result.best_params_))
    means = grid_result.cv_results_['mean_test_score']
    stds = grid_result.cv_results_['std_test_score']
    params = grid_result.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print("%f (%f) with: %r" % (mean, stdev, param))
else:
    model = create_model(neurons=100,
                         optimizer='rmsprop',
                         init='glorot_uniform')
    # model.fit(train_data, train_label, batch_size=20, epochs=100, shuffle=True, verbose=1, validation_split=0.2)
    model.fit(encoded_X_train,
              Y_train,
              batch_size=10,
              epochs=150,
              shuffle=True,
              verbose=1,
              validation_split=0.2)
    result = model.evaluate(encoded_X_test, Y_test, batch_size=1000)

    print('loss:%5.6f   acct:%5.6f' % (result[0], result[1]))

    # Save the trained model to disk
    model.save(MODEL_FILENAME)
    print(model.predict(encoded_X_train))