Пример #1
0
    def evaluate(self, X_test, Y_test, Y_test_classes):
        if not self.model:
            raise Exception("Load or fit new model first")

        score, acc = self.model.evaluate(X_test, Y_test, batch_size=3)
        print("Test accuracy:", acc)

        evaluator = Evaluator()
        predictions_encoded = self.model.predict(X_test)
        predictions = self.lb.inverse_transform(
            [np.argmax(pred) for pred in predictions_encoded])
        evaluator.accuracy(Y_test_classes, predictions)
        # evaluator.classification_report(Y_test_classes, predictions)
        evaluator.confusion_matrix(Y_test_classes, predictions)
Пример #2
0
DNN_UNITS = 256
OUTPUT_CLASSES = len(get_classes(data))
DROPOUT_RATE = 0.2
NB_EPOCHS = 7

custom_model = CustomIMapModel(
    vocabulary_size=VOCAB_LENGTH,
    embedding_dimensions=EMB_DIM,
    cnn_filters=CNN_FILTERS,
    dnn_units=DNN_UNITS,
    model_output_classes=OUTPUT_CLASSES,
    dropout_rate=DROPOUT_RATE,
)

custom_model.compile(
    loss="sparse_categorical_crossentropy",
    optimizer="adam",
    metrics=["sparse_categorical_accuracy"],
)

custom_model.fit(train_data, epochs=NB_EPOCHS)

results_predicted = custom_model.predict(test_data)
evaluator = Evaluator()
predictions = lb.inverse_transform(
    [np.argmax(pred) for pred in results_predicted])
test_classes = lb.inverse_transform(y_test)
evaluator.accuracy(test_classes, predictions)
evaluator.classification_report(test_classes, predictions)
evaluator.confusion_matrix(test_classes, predictions)