def test2results(model): frames = [(0,128),(128,256),(256,261),(261,268),(268,275),(275,282),(282,289)] test2resarray = [] print("\nTEST 2 RESULTS: ") for item in frames: x = item[0] y = item[1] test2_pred = model.predict(res2[x:y], batch_size=4) test2_result = np.zeros(test2_pred.shape) #print test1_result.shape test2_result[test2_pred>0.5] = 1 test2resarray.append(test2_result) a = test2_result b = test2_y1[x:y] c = test2_y2[x:y] d = test2_y3[x:y] print("") print(dc(a, b)) print(dc(a, c)) print(dc(a, d)) print(recall(a, b)) print(recall(a, c)) print(recall(a, d)) print(precision(a, b)) print(precision(a, c)) print(precision(a, d))
def test1results(model): frames = [(0,128),(128,256),(256,261),(261,266),(266,315),(315,364),(364,492),(492,620)] test1resarray = [] for item in frames: x = item[0] y = item[1] test1_pred = model.predict(res1[x:y], batch_size=4) test1_result = np.zeros(test1_pred.shape) #print test1_result.shape test1_result[test1_pred>0.5] = 1 test1resarray.append(test1_result) a = test1_result b = test1_y1[x:y] c = test1_y2[x:y] d = test1_y3[x:y] print("") print(dc(a, b)) print(dc(a, c)) print(dc(a, d)) print(recall(a, b)) print(recall(a, c)) print(recall(a, d)) print(precision(a, b)) print(precision(a, c)) print(precision(a, d)) '''
def calculate_metrics(mask1, mask2): true_positives = metric.obj_tpr(mask1, mask2) false_positives = metric.obj_fpr(mask1, mask2) dc = metric.dc(mask1, mask2) hd = metric.hd(mask1, mask2) precision = metric.precision(mask1, mask2) recall = metric.recall(mask1, mask2) ravd = metric.ravd(mask1, mask2) assd = metric.assd(mask1, mask2) asd = metric.asd(mask1, mask2) return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd
def calculate_validation_metrics(probas_pred, image_gt, class_labels=None, num_classes=5): classes = np.arange(probas_pred.shape[-1]) # determine valid classes (those that actually appear in image_gt). Some images may miss some classes classes = [c for c in classes if np.sum(image_gt == c) != 0] image_pred = probas_pred.argmax(-1) assert image_gt.shape == image_pred.shape accuracy = np.sum(image_gt == image_pred) / float(image_pred.size) class_metrics = {} y_true = convert_seg_flat_to_binary_label_indicator_array( image_gt.ravel(), num_classes).astype(int)[:, classes] y_pred = probas_pred.transpose(3, 0, 1, 2).reshape(num_classes, -1).transpose(1, 0)[:, classes] scores = roc_auc_score(y_true, y_pred, None) for i, c in enumerate(classes): true_positives = metric.obj_tpr(image_gt == c, image_pred == c) false_positives = metric.obj_fpr(image_gt == c, image_pred == c) dc = metric.dc(image_gt == c, image_pred == c) hd = metric.hd(image_gt == c, image_pred == c) precision = metric.precision(image_gt == c, image_pred == c) recall = metric.recall(image_gt == c, image_pred == c) ravd = metric.ravd(image_gt == c, image_pred == c) assd = metric.assd(image_gt == c, image_pred == c) asd = metric.asd(image_gt == c, image_pred == c) label = c if class_labels is not None and c in class_labels.keys(): label = class_labels[c] class_metrics[label] = { 'true_positives': true_positives, 'false_positives': false_positives, 'DICE\t\t': dc, 'Hausdorff dist': hd, 'precision\t': precision, 'recall\t\t': recall, 'rel abs vol diff': ravd, 'avg surf dist symm': assd, 'avg surf dist\t': asd, 'roc_auc\t\t': scores[i] } return accuracy, class_metrics
def calculate_metrics(mask1, mask2): try: true_positives = metric.obj_tpr(mask1, mask2) if mask2.sum() != 0: false_positives = metric.obj_fpr(mask1, mask2) else: false_positives = 0 if mask1.sum() == 0 or mask2.sum() == 0: hd = 999 assd = 999 asd = 999 else: hd = 999 #metric.hd(mask1, mask2) assd = 999 #metric.assd(mask1, mask2) asd = 999 #metric.asd(mask1, mask2) dc = metric.dc(mask1, mask2) precision = metric.precision(mask1, mask2) recall = metric.recall(mask1, mask2) ravd = metric.ravd(mask1, mask2) return true_positives, false_positives, dc, hd, precision, recall, ravd, assd, asd except: return 99999, 99999, 99999, 99999, 99999, 99999, 99999, 99999, 99999
except: cv2.imshow( 'Image', utils.array_to_img(seg_lesion, clip=True).astype('uint8')) # lesion ######## seg_lesion_final = np.copy(seg_lesion_arr[k]) y_true_lesion = np.where(mask == 2, 1., 0.) seg_lesion_tmp = np.where(seg_lesion_final == 2, 1., 0.) dice_lesion[n] = metric.dc(seg_lesion_tmp, y_true_lesion) precision_lesion[n] = metric.sensitivity(seg_lesion_tmp, y_true_lesion) sensitivity_lesion[n] = metric.precision(seg_lesion_tmp, y_true_lesion) ############################################################################################# print('dice=', dice_lesion[n], ' ; precision=', precision_lesion[n], ' ; sensitivity=', sensitivity_lesion[n]) if isDbg: if (n % dbg_step == 0) or dice_lesion[n] < min_dice_dbg: # Original Image img = utils.dbg_orig_img(data_path, filename) cv2.waitKey(0) # cv2.destroyAllWindows() # liver if Config.num_classes > 1: img = utils.apply_mask(img,
j = begin_idx for k in range(len(val_img_paths)): curr_sum_ground_truth = np.sum(y_true_liver[k]) # float 64 curr_sum_prediction = np.sum(pred_liver[k]) curr_sum_intersection = np.sum((pred_liver[k] * y_true_liver[k])) # GLOBAL sum_ground_truth += curr_sum_ground_truth sum_prediction += curr_sum_prediction sum_intersection += curr_sum_intersection # LOCAL dice_liver_local[j] = metric.dc(pred_liver[k], y_true_liver[k]) precision_liver_local[j] = metric.sensitivity(pred_liver[k], y_true_liver[k]) sensitivity_liver_local[j] = metric.precision(pred_liver[k], y_true_liver[k]) if isDbg: # Original if (j % dbg_step == 0) or dice_liver_local[j] < min_dice_dbg: img = utils.dbg_orig_img(data_path, val_img_paths[k]) cv2.waitKey(0) cv2.destroyAllWindows() # Liver - gt img = utils.apply_mask(img, mask_arr[k], (0, 0, 255), mask_idx=1) img = utils.apply_mask(img, mask_arr[k], (0, 0, 255), mask_idx=2) cv2.putText(img, '+ GT', (200, 50), cv2.FONT_HERSHEY_SIMPLEX,
def wprecision(x): return precision(*x)
print("Best dc: "+str(bestdc)) model.load_weights('/content/drive/My Drive/Colab/results/best/final.h5') #TEST1 dcoefarr=[] val_pred = model.predict(test1_X, batch_size=4) score = model.evaluate(test1_X, test1_y) print(score) val_result = np.zeros(val_pred.shape) val_result[val_pred>0.5] = 1 dcoef = dc(val_result,test1_y) prec = precision(val_result,test1_y) rec = recall(val_result,test1_y) dcoefarr.append(dcoef) print ("Dice: ",dcoef) print("Precision: ",prec) print("recall: ",rec) #TEST2 dcoefarr=[] val_pred = model.predict(test2_X, batch_size=4) score = model.evaluate(test2_X, test2_y) print(score) val_result = np.zeros(val_pred.shape)