Esempio n. 1
0
  def evaluate(self):
    if self.X_development:
      self.Y_development_predicted = self.classifier.predict(self.X_development)
    if self.X_test:
      self.Y_test_predicted = self.classifier.predict(self.X_test)

    self.accuracy, self.precision, self.recall, self.f1score = metrics(self.Y_development, self.Y_development_predicted, self.labels)
Esempio n. 2
0
    def classEvaluation(self, Y_test, Y_predicted, labels):

        print("\n~~~ Class Evaluation ~~~ \n")
        print("Class \t Precision \t Recall \t F-score")

        for label in labels:
            accuracy, precision, recall, f1score = metrics(
                Y_test, Y_predicted, [label])
            print('{} \t {} \t\t {} \t\t {}'.format(label, round(precision, 3),
                                                    round(recall, 3),
                                                    round(f1score, 3)))
Esempio n. 3
0
    def evaluate(self):
        self.Y_development_predicted = self.model.predict(self.X_development)

        self.Y_development_predicted = np.argmax(self.Y_development_predicted,
                                                 axis=1)
        self.Y_development_predicted = [
            self.labels_dict_rev[int(i)]
            for i in list(self.Y_development_predicted)
        ]

        self.Y_development = np.argmax(self.Y_development, axis=1)
        self.Y_development = [
            self.labels_dict_rev[int(i)] for i in list(self.Y_development)
        ]

        self.accuracy, self.precision, self.recall, self.f1score = metrics(
            self.Y_development, self.Y_development_predicted, self.labels)