Beispiel #1
0
                       loss='categorical_crossentropy',
                       metrics=[
                           'accuracy',
                           tf.keras.metrics.CategoricalAccuracy(),
                           Evaluator.precision, Evaluator.recall,
                           Evaluator.fmeasure
                       ])

    dt_start_predict = datetime.now()

    y_pred = best_model.predict(x_test, batch_size=64)

    dt_end_predict = datetime.now()

    # Validation curves
    Evaluator.plot_validation_curves(model_name, history)
    Evaluator.print_validation_report(history)

    # Experimental result
    Evaluator.calculate_measure(best_model, x_test, y_test)

    # Save confusion matrix
    Evaluator.plot_confusion_matrix(model_name,
                                    y_test,
                                    y_pred,
                                    title='Confusion matrix',
                                    normalize=True)

    # Print Training and predicting time
    print('Train time: ' + str((dt_end_train - dt_start_train)))
    print('Predict time: ' + str((dt_end_predict - dt_start_predict)))
    ''' Predict phrase '''
    best_model = cnn_bilstm_att()
    best_model.load_weights('./trained_models/' + model_name+ '.hdf5')
    best_model.compile(optimizer=adam, loss='binary_crossentropy',
                       metrics=['accuracy', tf.keras.metrics.BinaryAccuracy(),
                                Evaluator.precision, Evaluator.recall, Evaluator.fmeasure])

    dt_start_predict = datetime.now()

    y_pred = best_model.predict(x_test, batch_size=64)

    dt_end_predict = datetime.now()

    # Validation curves
    Evaluator.plot_validation_curves(model_name, history, type='binary')
    Evaluator.print_validation_report(history)

    # Experimental result
    result = best_model.evaluate(x_test, y_test, batch_size=64)
    result_dic = dict(zip(best_model.metrics_names, result))

    print('\nAccuracy: {}\n Binary_Accuracy: {}\n'
          'Precision: {}\nRecall: {}\n F-1Score {}\n'
          .format(result_dic['accuracy'], result_dic['binary_accuracy'],
                  result_dic['precision'], result_dic['recall'], result_dic['fmeasure']))

    Evaluator.calculate_measure_binary(best_model, x_test, y_test)

    # Save confusion matrix
    Evaluator.plot_confusion_matrix(model_name, y_test, y_pred, title='Confusion matrix', normalize=False, classes=[0,1])