def dump(self): table = [] metrics = [ 'generality', 'accuracy', 'precision', 'recall', 'fallout', 'f_score' ] # header table.append([''] + metrics) # per class metrics for c in self.classes(): row = [c] for name in metrics: row.append(getattr(self, name)(c)) table.append(row) # averaging metrics for prefix in 'macro', 'micro': row = [prefix + ' average'] for name in metrics: row.append(getattr(self, '%s_%s' % (prefix, name))()) table.append(row) # format numeric cells for row in table: for j, cell in enumerate(row): if isinstance(cell, float): row[j] = '%.2f%%' % (cell * 100) return _pretty_print(table)
def drawConfusionMatrix(self): ''' Draw the Confusion Matrix ''' table = [] #header table.append([''] + list(self._labels)) #class metrics for c in range(len(self._matrix)): table.append([self._labels[c]] + self._matrix[c]) #averaging metrics for prefix in ['TNR/TPR']: table.append([prefix] + [self.sensitivity(), self.specificity()]) return _pretty_print(table)
def dump(self): table = [] metrics = ["generality", "accuracy", "precision", "recall", "fallout", "f_score"] # header table.append([""] + metrics) # per class metrics for c in self.classes(): row = [c] for name in metrics: row.append(getattr(self, name)(c)) table.append(row) # averaging metrics for prefix in "macro", "micro": row = [prefix + " average"] for name in metrics: row.append(getattr(self, "%s_%s" % (prefix, name))()) table.append(row) # format numeric cells for row in table: for j, cell in enumerate(row): if isinstance(cell, float): row[j] = "%.2f%%" % (cell * 100) return _pretty_print(table)