def N_Abcd(predicted, actual): predicted_txt = [] # abcd = Abcd(db='Traing', rx='Testing') global The def isDef(x): return "Defective" if x > 0 else "Non-Defective" # use the.option.threshold for cart, # rf and where!! for data in predicted: predicted_txt += [ isDef(data)] # this is for defect prediction, binary classes # predicted_txt.append(data) # for multiple classes, just use it score = sk_abcd(predicted_txt, actual) return score
def _Abcd(predicted, actual): predicted_txt = [] # abcd = Abcd(db='Traing', rx='Testing') global The def isDef(x): return "Defective" if x >= The.option.threshold else "Non-Defective" # use the.option.threshold for cart, # rf and where!! for data in predicted: # predicted_txt += [isDef(data)] # this is for defect prediction, binary classes predicted_txt.append(data) # for multiple classes, just use it score = sk_abcd(predicted_txt, actual) # if The.option.tunedobjective == 6: # auc actual_binary = np.array([ 1 if i == "Delay" else 0 for i in actual ]) predicted_binary = np.array([ 1 if i == "Delay" else 0 for i in predicted ]) score[0].append(int(roc_auc_score(actual_binary,predicted_binary)*100)) score[1].append(int(roc_auc_score(actual_binary,predicted_binary)*100)) return score
def callModel(self, clf): predict_result = clf.predict(self.test_X) predict_pro = clf.predict_proba(self.test_X) scores = sk_abcd(predict_result, self.test_Y, predict_pro[:, 1]) return scores[-1]
def callModel(self, clf, threshold): predict_result = clf.predict(self.test_X) # predict_pro = clf.predict_proba(self.test_X) scores = sk_abcd(predict_result, self.test_Y, threshold) return scores[-1]