Example #1
0
    def evaluate(self, X_test, Y_test, Y_test_classes):
        if not self.model:
            raise Exception("Load or fit new model first")

        score, acc = self.model.evaluate(X_test, Y_test, batch_size=3)
        print("Test accuracy:", acc)

        evaluator = Evaluator()
        predictions_encoded = self.model.predict(X_test)
        predictions = self.lb.inverse_transform(
            [np.argmax(pred) for pred in predictions_encoded])
        evaluator.accuracy(Y_test_classes, predictions)
        # evaluator.classification_report(Y_test_classes, predictions)
        evaluator.confusion_matrix(Y_test_classes, predictions)
Example #2
0
class Cross_Validation:
    def __init__(self):
        self.knn_algo = KnnAlgorithm()
        self.evaluator = Evaluator()

    def cross_validation(self, dataset, k, params):
        fold_size = m.floor(len(dataset) / k)
        best_param = 1
        max_a = 0
        for i in range(k):
            folds = np.split(
                dataset,
                [i * fold_size, i * fold_size + fold_size,
                 len(dataset)])
            test = folds[1]
            training = np.concatenate((folds[0], folds[2]))
            curr_p, acc = self.parameter_tuning(training, params, k - 1)
            if max_a < acc:
                max_a = acc
                best_param = curr_p
        print(max_a)
        return best_param

    def parameter_tuning(self, training, params, k):
        fold_size = m.floor(len(training) / k)
        max_a = 0
        best_param = 1
        for i in range(k):
            folds = np.split(
                training,
                [i * fold_size, i * fold_size + fold_size,
                 len(training)])
            validation_data = folds[1]
            training_data = np.concatenate((folds[0], folds[2]))
            columns = int(validation_data.shape[1])

            sections = [int(columns - 1), columns]
            val_data = np.hsplit(validation_data, sections)
            ground_truth = val_data[1]
            val_data = val_data[0]
            for param in params:
                pred = self.knn_algo.predict_multiple(param, training_data,
                                                      val_data)
                cm = self.evaluator.get_cm(pred, ground_truth)
                accuracy = self.evaluator.accuracy(cm)
                if accuracy > max_a:
                    max_a = accuracy
                    best_param = param

        return best_param, max_a
Example #3
0
DNN_UNITS = 256
OUTPUT_CLASSES = len(get_classes(data))
DROPOUT_RATE = 0.2
NB_EPOCHS = 7

custom_model = CustomIMapModel(
    vocabulary_size=VOCAB_LENGTH,
    embedding_dimensions=EMB_DIM,
    cnn_filters=CNN_FILTERS,
    dnn_units=DNN_UNITS,
    model_output_classes=OUTPUT_CLASSES,
    dropout_rate=DROPOUT_RATE,
)

custom_model.compile(
    loss="sparse_categorical_crossentropy",
    optimizer="adam",
    metrics=["sparse_categorical_accuracy"],
)

custom_model.fit(train_data, epochs=NB_EPOCHS)

results_predicted = custom_model.predict(test_data)
evaluator = Evaluator()
predictions = lb.inverse_transform(
    [np.argmax(pred) for pred in results_predicted])
test_classes = lb.inverse_transform(y_test)
evaluator.accuracy(test_classes, predictions)
evaluator.classification_report(test_classes, predictions)
evaluator.confusion_matrix(test_classes, predictions)