def compute_dice(data_name, patient): gt_dir = os.path.join("data", data_name, "gt_data") pred_dir = os.path.join("data", data_name, "segmentations") gt_mask = load_image(os.path.join(gt_dir, "%s.nii.gz" % patient), tile_image=False) pred_mask = load_image(os.path.join(pred_dir, "%s.nii.gz" % patient), tile_image=False) pred_mask = pred_mask.transpose() dice = compute_mask_dice(gt_mask, pred_mask) return dice
def analyze_image(img_path): """ Analyze an image coming from the self-checkout machine - Extract possible region of interest (roi) using both ycnn algo and circle detection. - Classfify each roi using either matching, svm or size classification. Input: - im_path: string path to image 'media/analysis/im_name.jpg' """ # Get parameters net_rpn, pxl_rpn, ids_rpn = load_net(cfg.NET_DIR_RPN, cfg.NET_NAME_RPN) net_clf, pxl_clf, ids_clf = load_net(cfg.NET_DIR_CLF, cfg.NET_NAME_CLF) # Load image img = np.squeeze(load_image(img_path, tile_image=False, transpose=False)) img = np.tile(img[:, :, np.newaxis], (1, 1, 3)) # Detect menisques rois, ids, scores = get_rpn_rois(img, net_rpn, pxl_rpn, ids_rpn, cfg.NMS_THRESH, cfg.NMS_THRESH_CLS, cfg.CONF_THRESH) # Classify each menisque clf_ids = classify_rois_cds(img, rois, net_clf, pxl_clf, ids_clf) # Plot results if needed if PLOT and img is not None: plot_rectangle(img, rois, clf_ids) return rois, clf_ids
def analyze_image(im_path): """ Analyze an image coming from the self-checkout machine - Extract possible region of interest (roi) using both ycnn algo and circle detection. - Classfify each roi using either matching, svm or size classification. Input: - im_path: string path to image 'media/analysis/im_name.jpg' """ # Get parameters CACHE_MANAGER = CacheManager() net_rpn, pxl_rpn, ids_rpn = CACHE_MANAGER.get_net_rpn() net_clf, pxl_clf, ids_clf = CACHE_MANAGER.get_net_clf() # Load image im = load_image(im_path) # Detect menisques rois, ids, scores = get_rpn_rois(im, net_rpn, pxl_rpn, ids_rpn, cfg.NMS_THRESH, cfg.NMS_THRESH_CLS, cfg.CONF_THRESH) # Classify each menisque clf_ids = classify_rois(im, rois, net_clf, pxl_clf, ids_clf) # Plot results if needed if PLOT and im is not None: plot_rectangle(im, rois, clf_ids) return rois, clf_ids
def extract_seg_map(data_name, algo_version, model=None, viz_results=False): # Numpy data dir data_root_dir = os.path.join("/", "data", "radio-datasets", data_name) data_dir = os.path.join(data_root_dir, "dcm_data") im_dir = os.path.join(data_root_dir, "images") if not os.path.exists(im_dir): os.makedirs(im_dir) seg_dir = os.path.join(data_root_dir, "segmentations%s" % algo_version) if not os.path.exists(seg_dir): os.makedirs(seg_dir) viz_dir = os.path.join(data_root_dir, "results_check%s" % algo_version) if not os.path.exists(viz_dir): os.makedirs(viz_dir) # Load model if model is None: model = load_net_pytorch(cfg.NET_DIR_SEG) for user in tqdm.tqdm(np.sort(os.listdir(os.path.join(data_dir)))): if not os.path.isdir(os.path.join(data_dir, user)): continue for exam in np.sort(os.listdir(os.path.join(data_dir, user))): if not os.path.isdir(os.path.join(data_dir, user, exam)): continue for radio in np.sort(os.listdir(os.path.join(data_dir, user, exam))): if not os.path.isdir(os.path.join(data_dir, user, exam, radio)): continue for filename in os.listdir( os.path.join(data_dir, user, exam, radio)): if filename == "VERSION" or "._" in filename: continue # print(user) # Load image # im_name = "%s_%s_%s_%s" % (user, exam, radio, filename) im_name = user im_path = os.path.join(im_dir, "%s.npy" % im_name) if not os.path.exists(im_path): data_path = os.path.join(data_dir, user, exam, radio, filename) im = load_image(data_path, tile_image=True, data_type="dcm") np.save(im_path, im) # Analyze image seg_map, max_map, _ = get_seg_map(im_path, model) # Save segmentation seg_map_nii = nib.Nifti1Image(seg_map.astype(np.int16), np.eye(4)) nib.save(seg_map_nii, os.path.join(seg_dir, "%s.nii.gz" % im_name)) cv2.imwrite(os.path.join(viz_dir, "%s.jpeg" % im_name), 255 * seg_map) # Visualize results if viz_results: plt.imshow(seg_map) plt.show()
def annotateImage(im_path, id2name): global image, refPt refPt = [] image = load_image(im_path) cv2.namedWindow("image") cv2.setMouseCallback("image", click_and_crop) # Keep looping until the 'c' key is pressed while True: # display the image and wait for a keypress cv2.imshow("image", image) key = cv2.waitKey(1) & 0xFF # if the 'r' key is pressed, reset the cropping region if key == ord("r"): refPt = [] # if the 'c' key is pressed, break from the loop elif key == ord("c"): break # Close all open windows cv2.destroyAllWindows() # if there are two reference points, then crop the region of interest # from the image and display it im_roidb = {"name": im_path, "boxes": []} if len(refPt) > 1 and len(refPt) % 2 == 0: for i in range(int(len(refPt) / 2.)): # Ensure all box directions topleft = (min(refPt[2 * i][0], refPt[2 * i + 1][0]), min(refPt[2 * i][1], refPt[2 * i + 1][1])) bottomright = (max(refPt[2 * i][0], refPt[2 * i + 1][0]), max(refPt[2 * i][1], refPt[2 * i + 1][1])) refPt[2 * i] = topleft refPt[2 * i + 1] = bottomright # Box as [x_min, y_min, x_max, y_max] box = np.asarray([ refPt[2 * i][0], refPt[2 * i][1], refPt[2 * i + 1][0], refPt[2 * i + 1][1] ]) # Label box id_ = labelBox(im_path, box) id_ = id2name[str(id_)] roi = convert_xy_to_wh(box) roi_info = {'box': list(roi), 'id': id_, 'is_background': False} im_roidb["boxes"].append(roi_info) return im_roidb
def export_results(): # Init paths root_dir = os.path.join("/", "home", "yann", "radioAdvisor") data_dir = os.path.join(root_dir, "data", "cancer-du-sein", "sifem_validation") im_dir = os.path.join(root_dir, "data", "cancer-du-sein", "test-images") # Load db db = os.listdir(data_dir) # Get parameters net_rpn, pxl_rpn, ids_rpn = load_net(cfg.NET_DIR_RPN, cfg.NET_NAME_RPN) net_clf, pxl_clf, ids_clf = load_net(cfg.NET_DIR_CLF, cfg.NET_NAME_CLF) # Test images # exam_ids, exam_scores = [], [] results = [] for idx, filename in enumerate(db): if not filename.endswith(".nii.gz") or "._" in filename: continue print(idx, filename) # Load image if needed exam_id = filename.split(".")[0] data_path = os.path.join(data_dir, filename) img_path = os.path.join(im_dir, "%s.npy" % exam_id) if not os.path.exists(img_path): img = np.squeeze( load_image(data_path, tile_image=False, transpose=False)) img = np.tile(img[:, :, np.newaxis], (1, 1, 3)) np.save(img_path, img) else: img = np.load(img_path) # Detect nodules rois, _, det_scores = get_rpn_rois(img, net_rpn, pxl_rpn, ids_rpn, cfg.NMS_THRESH, cfg.NMS_THRESH_CLS, cfg.CONF_THRESH) # Classify detected nodules clf_scores = np.array([[0.5, 0.5]]) if len(rois) > 0: rois = rois[np.argmax(det_scores)][np.newaxis, :] _, clf_scores = classify_rois_cds(img, rois, net_clf, pxl_clf, ids_clf) # Compute area results.append({"examen": exam_id, "prediction": clf_scores[0, 1]}) # Create csv create_csv(results)
def compute_metrics(data_name, algo_version=""): gt_dir = os.path.join("data", data_name, "gt_data") pred_dir = os.path.join("data", data_name, "segmentations%s" % algo_version) ious, dices = [], [] for filename in os.listdir(gt_dir): if not filename.endswith("nii.gz"): continue if filename not in os.listdir(pred_dir): # print(filename) continue gt_mask = load_image(os.path.join(gt_dir, filename), tile_image=False) pred_mask = load_image(os.path.join(pred_dir, filename), tile_image=False) pred_mask = pred_mask.transpose() ious.append(compute_mask_iou(gt_mask, pred_mask)) dices.append(compute_mask_dice(gt_mask, pred_mask)) return ious, dices
def test(csv_name=None): # Get parameters CACHE_MANAGER = CacheManager() net_rpn, pxl_rpn, ids_rpn = CACHE_MANAGER.get_net_rpn() net_f_clf, pxl_f_clf, ids_f_clf = CACHE_MANAGER.get_net_f_clf() net_o_clf, pxl_o_clf, ids_o_clf = CACHE_MANAGER.get_net_o_clf() # Load test db data_dir = os.path.join("data", "test_data") data = { filename.split(".")[0]: [] for filename in os.listdir(data_dir) if filename.endswith(".nii.gz") and "._" not in filename } if csv_name is not None: data = parse_csv(csv_name) # Loop over test test_names = [] test_f_scores, test_l_scores, test_o_scores = np.zeros(0), np.zeros( (0, 2)), np.zeros((0, 2)) for idx, im_name in enumerate(data.keys()): # DEBUG print idx, im_name # Load image im_dir = "test_data" if csv_name is None else "raw_data" im_path = os.path.join("data", im_dir, "%s.nii.gz" % im_name) im = load_image(im_path) # Detect menisques rois, _, _ = get_rpn_rois(im, net_rpn, pxl_rpn, ids_rpn, cfg.NMS_THRESH, cfg.NMS_THRESH_CLS, cfg.CONF_THRESH) # Classify each menisque if cfg.NET_DIR_F_CLF is not None: clf_ids, f_score, l_scores, o_scores = classify_rois( im, rois, net_f_clf, pxl_f_clf, ids_f_clf, net_o_clf, pxl_o_clf, ids_o_clf) else: clf_ids, f_score, l_scores, o_scores = classify_rois_( im, rois, net_o_clf, pxl_o_clf, ids_o_clf) # Store results test_names.append(im_name) test_f_scores = np.hstack((test_f_scores, f_score)) test_l_scores = np.vstack((test_l_scores, l_scores)) test_o_scores = np.vstack((test_o_scores, o_scores)) create_csv(test_names, test_f_scores, test_l_scores, test_o_scores)
def export_results(dataset_name): # Init paths root_dir = os.path.join("/", "home", "yann", "radioAdvisor") data_dir = os.path.join(root_dir, "data", "sarco", "raw_data_") im_dir = os.path.join(root_dir, "data", "sarco", "images") res_dir = os.path.join(root_dir, "data", "sarco", "results", "axone") # Load db db = os.listdir(data_dir) # Load model model = load_net_pytorch(cfg.NET_DIR_SEG) # Test images exam_ids, areas = [], [] for idx, filename in enumerate(db): if ".dcm" not in filename: continue if "._" in filename: continue print(idx, filename) # Load image if needed exam_id = filename.split(".")[0] data_path = os.path.join(data_dir, filename) im_path = os.path.join(im_dir, "%s.npy" % exam_id) if not os.path.exists(im_path): im = load_image(data_path, tile_image=True, data_type="dcm") np.save(im_path, im) # Segment image seg_map, max_map, _ = get_seg_map(im_path, model, 1, 50, 14) # Save results seg_map_nii = nib.Nifti1Image(seg_map.astype(np.int16), np.eye(4)) nib.save(seg_map_nii, os.path.join(res_dir, "%s.nii.gz" % exam_id)) # Compute area pixel_size = get_pixel_size(data_path) areas.append(pixel_size * np.sum(seg_map)) exam_ids.append(exam_id) # Create csv create_csv(exam_ids, areas)
def annotate_im_with_points(im_path, img_title="", segment_only=False): img = np.squeeze(load_image(im_path, tile_image=False, transpose=False)) img = np.tile(img[:, :, np.newaxis], (1, 1, 3)) annotator = Annotator(img, segment_only=segment_only, id2name=ID2NAME, annotator_name=img_title) annotator.run_interface() cv2.destroyAllWindows() # Retrieve boxes & labels roidb = [{ "box": list(obj.get_box_coords(coord_format="xywh")), "points": obj.pnts, "id": obj.id, "name": obj.label } for obj in annotator.objects] db = {"name": im_path, "boxes": roidb} return db
def labelBox(im_path, box): im = load_image(im_path) cv2.rectangle(im, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2) cv2.imshow(im_path, im) cv2.waitKey(1000) # Populate box for this image print("Selected box at: ", box) while True: label = int(input("What is the label for this box? ")) if label < np.inf: print("%s" % "Bonjour %s") print(str(label)) break else: print("Label is less than: %s" % str(10)) # close all open windows cv2.destroyAllWindows() return label
def test(dataset_name, gauss_xy, bilat_xy, bilat_rgb, test_idx=None): # Init paths root_dir = os.path.join("/", "home", "yann", "radioAdvisor") data_dir = os.path.join(root_dir, "data", "sarco", "raw_data") im_dir = os.path.join(root_dir, "data", "sarco", "images") # Load db extractions_dir = os.path.join("/", "data", "train_extracts", "radio_extractions") db = np.load( os.path.join(extractions_dir, "imdb_val_%s.npy" % dataset_name)) # Load image info xl_db = {} xl_db.update( load_seg_annotation( os.path.join(root_dir, "data", "sarco", "dataset_train"))) xl_db.update( load_seg_annotation( os.path.join(root_dir, "data", "sarco", "dataset_val"))) # Load model model = load_net_pytorch(cfg.NET_DIR_SEG) # Test images IoUs, areas = [], [] for idx, (im_roidb) in enumerate(db): if test_idx is not None: im_roidb = db[test_idx] # print(idx, im_roidb["name"]) # Get info exam_id = im_roidb["name"].split("/")[-1].split(".")[0] info = xl_db[exam_id] # Load image if needed im_path = os.path.join(im_dir, "%s.npy" % exam_id) if not os.path.exists(im_path): data_path = os.path.join(data_dir, info["data_file"]) im = load_image(data_path, tile_image=True, data_type="dcm") np.save(im_path, im) # Load ground truth annot_path = os.path.join(data_dir, info["annot_file"]) label = load_image(annot_path, tile_image=False, data_type="nii") # Segment image seg_map, max_map, cls_prob = get_seg_map(im_path, model, gauss_xy, bilat_xy, bilat_rgb) # Compute IoU inter_area = np.sum(seg_map * label) union_area = np.sum((seg_map + label) > 0) IoUs.append(inter_area / union_area) # Store areas areas.append([np.sum(label), np.sum(seg_map)]) # Print stats print("mean iou: %s" % np.mean(IoUs)) print("MSE areas: %s" % mean_squared_error(np.array(areas)[:, 0], np.array(areas)[:, 1])) return IoUs, areas
def test(test_idx=None): # Get parameters CACHE_MANAGER = CacheManager() net_rpn, pxl_rpn, ids_rpn = CACHE_MANAGER.get_net_rpn() net_f_clf, pxl_f_clf, ids_f_clf = CACHE_MANAGER.get_net_f_clf() net_o_clf, pxl_o_clf, ids_o_clf = CACHE_MANAGER.get_net_o_clf() # Load test db test_db = np.load( os.path.join("database", "clf", "imdb_test_clf_radio_v2.npy")) # Init test metrics stats = { 'well_cl': 0, 'bad_cl': 0, 'cmpt_cl': 0, 'well_det': 0, 'bad_det': 0, 'fp': 0, 'cmpt_det': 0, 'all_well': 0, 'all_cmpt': 0 } roc = { "fissure": { "scores": [], "label": [] }, "localisation": { "scores": [], "label": [] }, "orientation": { "scores": [], "label": [] } } # Loop over test for idx, im_roidb in enumerate(test_db): # DEBUG if test_idx is not None: im_roidb = test_db[test_idx] print idx, im_roidb["name"] # Load image im_name = im_roidb["name"].split("/")[-1].split(".")[0] im_path = os.path.join("data", "raw_data", "%s.nii.gz" % im_name) im = load_image(im_path) # Detect menisques rois, _, _ = get_rpn_rois(im, net_rpn, pxl_rpn, ids_rpn, cfg.NMS_THRESH, cfg.NMS_THRESH_CLS, cfg.CONF_THRESH) # Classify each menisque if cfg.NET_DIR_F_CLF is not None: clf_ids, f_score, l_scores, o_scores = classify_rois( im, rois, net_f_clf, pxl_f_clf, ids_f_clf, net_o_clf, pxl_o_clf, ids_o_clf) else: clf_ids, f_score, l_scores, o_scores = classify_rois_( im, rois, net_o_clf, pxl_o_clf, ids_o_clf) # Evaluate results evaluate_results(im, im_roidb["boxes"], rois, clf_ids, stats) # Evaluate roc evaluate_roc(im, im_roidb["boxes"], f_score, l_scores, o_scores, roc) # Print results print_results(stats) # Get AUC score compute_test_score(roc)