def main():
    y_trues = []
    y_preds = []

    for i in xrange(0, len(model_types)):
        model_type = model_types[i]

        # test dataset
        model_test_dataset = 'dataset_' + model_type
        # path to saved model files
        saved_model_weights_path = './trained_for_pred/' + \
            model_type + '/model/Best-weights.h5'
        saved_model_arch_path = './trained_for_pred/' + \
            model_type + '/model/scratch_model.json'
        test_data_dir = './datasets/' + model_test_dataset + '/test'

        # init DataManager class
        dataManager = DataManager(img_height, img_width)

        # load model
        print("===================== load model =========================")
        model = load_model(saved_model_arch_path, saved_model_weights_path)
        # get test data
        print("===================== load data =========================")
        test_data = dataManager.get_test_data(test_data_dir)
        # start the eval process
        print("===================== start eval =========================")
        y_true = test_data.classes
        # Confution Matrix and Classification Report
        Y_pred = model.predict_generator(test_data,
                                         num_of_test_samples // batch_size)
        y_pred = np.argmax(Y_pred, axis=1)

        y_trues.append(y_true)
        y_preds.append(y_pred)

    # init PlotData class
    plotData = PlotData()
    # Compute ROC curve and ROC area for each class
    plotData.plot_roc(y_trues, y_preds, colors, linestyles, legends,
                      save_plt_roc)
Exemple #2
0
def main():
    # init DataManager class
    dataManager = DataManager(img_height, img_width)
    # init PlotData class
    plotData = PlotData()
    # load model
    print("===================== load model =========================")
    model = load_model()
    # get test data
    print("===================== load data =========================")
    test_data = dataManager.get_test_data(test_data_dir)
    # start the eval process
    print("===================== start eval =========================")
    y_true = test_data.classes
    # Confution Matrix and Classification Report
    Y_pred = model.predict_generator(test_data, num_of_test_samples // batch_size)
    y_pred = np.argmax(Y_pred, axis=1)
    # plot confusion matrix
    cm = confusion_matrix(y_true, y_pred)
    plotData.plot_confusion_matrix(
        cm, cm_plot_labels, save_plt_cm, title='Confusion Matrix')
    plotData.plot_confusion_matrix(
        cm, cm_plot_labels, save_plt_normalized_cm, normalize=True, title='Normalized Confusion Matrix')
    # Compute ROC curve and ROC area for each class
    roc_auc = plotData.plot_roc(y_true, y_pred, save_plt_roc)
    mae = mean_absolute_error(y_true, y_pred)
    mse = mean_squared_error(y_true, y_pred)
    accuracy = accuracy_score(y_true, y_pred)

    print('mean absolute error: ' + str(mae))
    print('mean squared error: ' + str(mse))
    print('Area Under the Curve (AUC): ' + str(roc_auc))
    c_report = classification_report(
        y_true, y_pred, target_names=cm_plot_labels)
    print(c_report)
    delete_file(save_eval_report)
    with open(save_eval_report, 'a') as f:
        f.write('\n\n')
        f.write('******************************************************\n')
        f.write('**************   Evalaluation Report   ***************\n')
        f.write('******************************************************\n')
        f.write('\n\n')
        f.write('- Accuracy Score: ' + str(accuracy))
        f.write('\n\n')

        f.write('- Mean Absolute Error (MAE): ' + str(mae))
        f.write('\n\n')

        f.write('- Mean Squared Error (MSE): ' + str(mse))
        f.write('\n\n')

        f.write('- Area Under the Curve (AUC): ' + str(roc_auc))
        f.write('\n\n')

        f.write('- Confusion Matrix:\n')
        f.write(str(cm))
        f.write('\n\n')

        f.write('- Normalized Confusion Matrix:\n')
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        f.write(str(cm))
        f.write('\n\n')

        f.write('- Classification report:\n')
        f.write(str(c_report))

        f.close()

    train_validation = ['train', 'validation']
    data = pd.read_csv(train_log_data_path)
    acc = data['acc'].values
    val_acc = data['val_acc'].values
    loss = data['loss'].values
    val_loss = data['val_loss'].values

     
    # plot metrics to the stats dir
    plotData.plot_2d(acc, val_acc, 'epoch', 'accuracy',
                     'Model Accuracy', train_validation, save_plt_accuracy)
    plotData.plot_2d(loss, val_loss, 'epoch', 'loss',
                     'Model Loss', train_validation, save_plt_loss)
    plotData.plot_model_bis(data, save_plt_learning)
    
    '''
Exemple #3
0
# Load configuration
with open('{}/config.pkl'.format(FLAGS.checkpoints_dir), 'rb') as f:
    config = pickle.load(f)

# Load data
dm = DataManager(data_dir=config['data_dir'],
                 stopwords_file=config['stopwords_file'],
                 sequence_len=config['sequence_len'],
                 n_samples=config['n_samples'],
                 test_size=config['test_size'],
                 val_samples=config['batch_size'],
                 random_state=config['random_state'],
                 ensure_preprocessed=True)

# Import graph and evaluate the model using test data
original_text, x_test, y_test, test_seq_len = dm.get_test_data(original_text=True)
graph = tf.Graph()
with graph.as_default():
    sess = tf.Session()

    # Import graph and restore its weights
    print('Restoring graph ...')
    saver = tf.train.import_meta_graph("{}/model.ckpt.meta".format(FLAGS.checkpoints_dir))
    saver.restore(sess, ("{}/model.ckpt".format(FLAGS.checkpoints_dir)))

    # Recover input/output tensors
    input = graph.get_operation_by_name('input').outputs[0]
    target = graph.get_operation_by_name('target').outputs[0]
    seq_len = graph.get_operation_by_name('lengths').outputs[0]
    dropout_keep_prob = graph.get_operation_by_name('dropout_keep_prob').outputs[0]
    predict = graph.get_operation_by_name('final_layer/softmax/predictions').outputs[0]