def truePositiveRate(self,classIndex:int): correct=total=0 for j in range(self.m_NumClasses): if j == classIndex: correct+=self.m_ConfusionMatrix[classIndex][j] total+=self.m_ConfusionMatrix[classIndex][j] return Utils.division(correct, total)
def precision(self,classIndex:int): correct=total=0 for i in range(self.m_NumClasses): if i == classIndex: correct+=self.m_ConfusionMatrix[i][classIndex] total+=self.m_ConfusionMatrix[i][classIndex] return Utils.division(correct, total)
def falsePositiveRate(self,classIndex:int): incorrect=total=0 for i in range(self.m_NumClasses): if i != classIndex: for j in range(self.m_NumClasses): if j == classIndex: incorrect+=self.m_ConfusionMatrix[i][j] total+=self.m_ConfusionMatrix[i][j] return Utils.division(incorrect, total)
def matthewsCorrelationCoefficient(self,classIndex:int): numTP=self.numTruePositives(classIndex) numTN=self.numTrueNegatives(classIndex) numFP=self.numFalsePositives(classIndex) numFN=self.numFalseNegatives(classIndex) n=numTP*numTN-numFP*numFN d=(numTP+numFP)*(numTP+numFN)*(numTN+numFP)*(numTN+numFN) d=math.sqrt(d) return Utils.division(n, d)
def weightedAreaUnderPRC(self): classCounts = [0] * self.m_NumClasses classCountSum = 0 for i in range(self.m_NumClasses): for j in range(self.m_NumClasses): classCounts[i] += self.m_ConfusionMatrix[i][j] classCountSum += classCounts[i] auprcTotal = 0 for i in range(self.m_NumClasses): temp = self.areaUnderPRC(i) if classCounts[i] > 0: auprcTotal += temp * classCounts[i] return Utils.division(auprcTotal, classCountSum)
def weightedMatthewsCorrelation(self): classCounts = [0] * self.m_NumClasses classCountSum = 0 for i in range(self.m_NumClasses): for j in range(self.m_NumClasses): classCounts[i] += self.m_ConfusionMatrix[i][j] classCountSum += classCounts[i] mccTotal = 0 for i in range(self.m_NumClasses): temp = self.matthewsCorrelationCoefficient(i) if classCounts[i] > 0: mccTotal += temp * classCounts[i] return Utils.division(mccTotal, classCountSum)
def weightedFMeasure(self): classCounts = [0] * self.m_NumClasses classCountSum = 0 for i in range(self.m_NumClasses): for j in range(self.m_NumClasses): classCounts[i] += self.m_ConfusionMatrix[i][j] classCountSum += classCounts[i] fMeasureTotal = 0 for i in range(self.m_NumClasses): temp = self.fMeasure(i) if classCounts[i] > 0: fMeasureTotal += temp * classCounts[i] return Utils.division(fMeasureTotal, classCountSum)
def weightedPrecision(self): classCounts=[0]*self.m_NumClasses classCountSum=0 for i in range(self.m_NumClasses): for j in range(self.m_NumClasses): classCounts[i]+=self.m_ConfusionMatrix[i][j] classCountSum+=classCounts[i] precisionTotal=0 for i in range(self.m_NumClasses): temp=self.precision(i) if classCounts[i]>0: precisionTotal+=temp*classCounts[i] return Utils.division(precisionTotal, classCountSum)
def weightedTruePositiveRate(self): classCounts=[0]*self.m_NumClasses classCountSum=0 for i in range(self.m_NumClasses): for j in range(self.m_NumClasses): classCounts[i]+=self.m_ConfusionMatrix[i][j] classCountSum+=classCounts[i] truePosTotal=0 for i in range(self.m_NumClasses): temp=self.truePositiveRate(i) if classCounts[i]>0: truePosTotal+=temp*classCounts[i] return Utils.division(truePosTotal, classCountSum)