Пример #1
0
model_checkpoint_callback = ModelCheckpoint(filepath=checkpoint_filepath,
                                            monitor='val_loss',
                                            save_best_only=True,
                                            mode='auto')

model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=[
                  'accuracy',
                  tf.keras.metrics.CategoricalAccuracy(), Evaluator.precision,
                  Evaluator.recall, Evaluator.fmeasure
              ])

history = model.fit(x_train,
                    y_train,
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=0.11,
                    callbacks=[model_checkpoint_callback])

model.load_weights(checkpoint_filepath)  # Error in Windows environment

# Validation curves
Evaluator.plot_validation_curves(model_name, history)

y_pred = model.predict(x_test, batch_size=64)

# Experiment result
Evaluator.calculate_measure(model, x_test, y_test)
Evaluator.plot_confusion_matrix(model_name, y_test, y_pred)
Evaluator.plot_roc_curves(model_name, y_test, y_pred)
Пример #2
0
                       loss='categorical_crossentropy',
                       metrics=[
                           'accuracy',
                           tf.keras.metrics.CategoricalAccuracy(),
                           Evaluator.precision, Evaluator.recall,
                           Evaluator.fmeasure
                       ])

    dt_start_predict = datetime.now()

    y_pred = best_model.predict(x_test, batch_size=64)

    dt_end_predict = datetime.now()

    # Validation curves
    Evaluator.plot_validation_curves(model_name, history)
    Evaluator.print_validation_report(history)

    # Experimental result
    Evaluator.calculate_measure(best_model, x_test, y_test)

    # Save confusion matrix
    Evaluator.plot_confusion_matrix(model_name,
                                    y_test,
                                    y_pred,
                                    title='Confusion matrix',
                                    normalize=True)

    # Print Training and predicting time
    print('Train time: ' + str((dt_end_train - dt_start_train)))
    print('Predict time: ' + str((dt_end_predict - dt_start_predict)))
    ''' Predict phrase '''
    best_model = cnn_bilstm_att()
    best_model.load_weights('./trained_models/' + model_name+ '.hdf5')
    best_model.compile(optimizer=adam, loss='binary_crossentropy',
                       metrics=['accuracy', tf.keras.metrics.BinaryAccuracy(),
                                Evaluator.precision, Evaluator.recall, Evaluator.fmeasure])

    dt_start_predict = datetime.now()

    y_pred = best_model.predict(x_test, batch_size=64)

    dt_end_predict = datetime.now()

    # Validation curves
    Evaluator.plot_validation_curves(model_name, history, type='binary')
    Evaluator.print_validation_report(history)

    # Experimental result
    result = best_model.evaluate(x_test, y_test, batch_size=64)
    result_dic = dict(zip(best_model.metrics_names, result))

    print('\nAccuracy: {}\n Binary_Accuracy: {}\n'
          'Precision: {}\nRecall: {}\n F-1Score {}\n'
          .format(result_dic['accuracy'], result_dic['binary_accuracy'],
                  result_dic['precision'], result_dic['recall'], result_dic['fmeasure']))

    Evaluator.calculate_measure_binary(best_model, x_test, y_test)

    # Save confusion matrix
    Evaluator.plot_confusion_matrix(model_name, y_test, y_pred, title='Confusion matrix', normalize=False, classes=[0,1])