Beispiel #1
0
 def print_stats(self, ground_truths, predictions, name):
     cm = confusion_matrix(ground_truths, predictions, self.classes)
     print("###" + name + " Results###\n")
     # print_cm(cm, self.classes)
     # print("\n\n")
     print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=3), self.classes)
     print("\n\n")
     print("Accuracy score: ", accuracy_score(ground_truths, predictions), "\n\n")
     # print("Report")
     # print(classification_report(ground_truths, predictions))
     print("#################################################################end###\n\n\n")
    def model_eval(self, mode="sample"):
        if mode not in {"sample", "clip"}:
            raise Exception("Evaluation mode not allowed.")
        predictions = []
        ground_truths = []
        stats = []
        if mode == "sample":
            files = glob.glob("/user/vlongobardi/late_feature/" +
                              self.feature_name + "/Val/*/*")
            val_gen = self.data_gen(files, 1, "eval")
            for batch in val_gen:
                ground_truth = self.lb.inverse_transform(batch[1])[0]
                pred = self.lb.inverse_transform(self.model.predict(
                    batch[0]))[0]
                predictions.append(pred)
                ground_truths.append(ground_truth)
        else:
            files = glob.glob(
                "/user/vlongobardi/late_feature/emobase2010_full_wav/Val/*/*")
            files = [
                file.replace("emobase2010_full_wav",
                             self.feature_name).split(".")[0] for file in files
            ]
            for file in files:
                ground_truths.append(file.split("/")[-2])
                predictions.append(self.clip_classification(file))

        cm = confusion_matrix(ground_truths, predictions, self.classes)
        stats.append(cm)
        stats.append(
            np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis],
                      decimals=2))
        stats.append(accuracy_score(ground_truths, predictions))
        stats.append(classification_report(ground_truths, predictions))

        print("###Results###")
        for index, elem in enumerate(stats):
            if index < 2:
                print_cm(elem, self.classes)
            elif index == 2:
                print("Accuracy score: ", elem)
            else:
                print("Report")
                print(elem)
            print("\n\n")