def make_overlap_us_seg(us_path, seg_paths, save_path, pass_without_seg=True): us_path = norm_path(us_path) seg_paths = [norm_path(p) for p in seg_paths] save_path = norm_path(save_path) if not os.path.exists(save_path): os.makedirs(save_path) us_dict = image_dict(us_path) key = lambda p: os.path.splitext(os.path.split(p)[-1])[0].split('#')[0] seg_dict_list = [image_dict(p, key=key) for p in seg_paths] for name in us_dict: seg_path_list = [] for seg_dict in seg_dict_list: if name in seg_dict: seg_path_list.append(seg_dict[name]) else: seg_path_list.append(None) if pass_without_seg and None in seg_path_list: continue img = overlap_us_seg(us_dict[name], seg_path_list) save_file = os.path.join(save_path, name + '.png') print(save_file) cv2.imwrite(save_file, img)
def getNonKidneyFiles(self, path): us_files = image_dict(path) seg_files = image_dict(args.seg_data) non_kidney_files = [ us_files[file] for file in us_files if file not in seg_files ] return non_kidney_files
def getKidneyFiles(self, path): us_files = image_dict(path) seg_files = image_dict(args.seg_data) kidney_files = [ us_files[file] for file in us_files if file in seg_files ] return kidney_files
def load_kidney(self, dataset_dir, subset, seg_dir=None): """Load a subset of the nuclei dataset. dataset_dir: Root directory of the dataset subset: Subset to load. Either the name of the sub-directory, such as stage1_train, stage1_test, ...etc. or, one of: * train: stage1_train excluding validation images * val: validation images from VAL_IMAGE_IDS """ if subset != "": dataset_subset_dir = os.path.join(dataset_dir, subset) else: dataset_subset_dir = dataset_dir self.class_list = [ 'bg', # class_id 1 'kidney' ] # class_id 2 # add classes for id, name in enumerate(self.class_list, start=1): self.add_class("kidney", id, name) print("add class: {} {}".format(id, name)) # seg_dir is None on detect mode if seg_dir: mask_dict = dict() for id, name in enumerate(self.class_list, start=1): mask_class_dict = image_dict(os.path.join(seg_dir, name)) for _name, _path in mask_class_dict.items(): mask_dict[_name] = [id, _path] print("find mask id:{} name:{} count:{}".format( id, name, len(mask_class_dict))) # Add images for file in image_list(dataset_subset_dir): _, name, _ = split_path(file) if name not in mask_dict.keys(): continue class_id, mask_path = mask_dict[name] self.add_image(source="kidney", image_id=name, path=file, mask_path=mask_path, class_id=class_id) else: image_files = image_list(dataset_subset_dir) # Add images for file in image_files: _, name, _ = split_path(file) self.add_image(source="kidney", image_id=name, path=file, mask_path=None, class_id=None) print('count added image', subset, len(self.image_info))
def load_kidney(self, dataset_dir, subset, seg_dir=None): """Load a subset of the nuclei dataset. dataset_dir: Root directory of the dataset subset: Subset to load. Either the name of the sub-directory, such as stage1_train, stage1_test, ...etc. or, one of: * train: stage1_train excluding validation images * val: validation images from VAL_IMAGE_IDS """ # Add classes. We have one class. # Naming the dataset nucleus, and the class nucleus self.add_class("kidney", 1, "kidney") # Which subset? # "val": use hard-coded list above # "train": use data from stage1_train minus the hard-coded list above # else: use the data from the specified sub-directory # assert subset in ["train", "val", ""] if subset != "": dataset_subset_dir = os.path.join(dataset_dir, subset) else: dataset_subset_dir = dataset_dir # seg_dir is None on detect mode if seg_dir: # read mask path mask_dict = image_dict(seg_dir) # read image path image_files = image_list(dataset_subset_dir) # Add images for file in image_files: _, name, _ = split_path(file) # detect mode ? if seg_dir: # only use image with mask if name not in mask_dict.keys(): continue mask_path = mask_dict[name] else: mask_path = None self.add_image( source="kidney", image_id=name, path=file, mask_path=mask_path) print('cnt img', subset, len(self.image_info))
def main(us_path, mask_path, result_path, scale=None, view_scale=None, use_top_n_greatest_width=True, size=None, horizontally=True, crop=False, denoise=True): ''' center boolean: 신장의 중앙 정렬 여부 scale float: 영상 축적 고정 only_greatest_width boolean: 신장 영상중 가장 큰것만 사용할지 여부 resize (W, H): 최종 이미지 크기 ''' us_dict = image_dict(norm_path(us_path)) mask_dict = image_dict(norm_path(mask_path)) result_path = norm_path(result_path, True) for patient, dicoms in Per_patient_fast(): patient_datas = list() for dicom in dicoms: Name = dicom['Name'][:-4] # remove file ext Diagnosis = dicom['Diagnosis'] AccNo = dicom['AccNo'] PhysicalUnitsXDirection = dicom['PhysicalUnitsXDirection'] PhysicalUnitsYDirection = dicom['PhysicalUnitsYDirection'] PhysicalDeltaY = dicom['PhysicalDeltaY'] # cm per pixels PhysicalDeltaX = dicom['PhysicalDeltaX'] # 1 픽셀이 몇 cm 인지 나타냄 # checking the kidney if Name not in mask_dict: # pass the non-kidney images continue # for debug # if Name != '1.2.840.113663.1500.1.295077244.3.8.20101014.92756.625': # continue # checking physical unit(mm) if PhysicalUnitsXDirection != '3' or PhysicalUnitsYDirection != '3' \ and not PhysicalDeltaY and not PhysicalDeltaX: continue PhysicalDeltaY = float(PhysicalDeltaY) PhysicalDeltaX = float(PhysicalDeltaX) # read image us_img = cv2.imread(us_dict[Name], cv2.IMREAD_GRAYSCALE) print(us_dict[Name]) mask_img = cv2.imread(mask_dict[Name], cv2.IMREAD_GRAYSCALE) # assert scale != view_scale, "use only scale or view_scale" if horizontally: angle = calculate_angle(mask_img) mask_img = rotate_bound(mask_img, angle) us_img = rotate_bound(us_img, angle) if scale: us_img = resize_physical_unit(us_img, (PhysicalDeltaX, PhysicalDeltaY), scale) mask_img = resize_physical_unit( mask_img, (PhysicalDeltaX, PhysicalDeltaY), scale) if view_scale: bbX, bbY, bbW, bbH = find_bounding_square(mask_img) fx = view_scale[0] / bbW fy = view_scale[1] / bbH us_img = cv2.resize(us_img, None, fx=fx, fy=fy) mask_img = cv2.resize(mask_img, None, fx=fx, fy=fy) if size: #numpy 좌표값을 얻어낸다 -> argwhere mask_pts = np.argwhere(mask_img == 255) #평균을 구한다 mask_cx, mask_cy = int(np.mean(mask_pts[:, 1])), int( np.mean(mask_pts[:, 0])) #캔버스를 만들고 붙혀넣는다 us_canvas = np.zeros(size, dtype=np.uint8) mask_canvas = np.zeros(size, dtype=np.uint8) # # for debug # us_canvas[:] = 100 # mask_canvas[:] = 100 canvas_h, canvas_w = size canvas_cx, canvas_cy = canvas_w // 2, canvas_h // 2 dst_x, dst_y = canvas_cx - mask_cx, canvas_cy - mask_cy us_img = paste(dst=us_canvas, src=us_img, dst_x=dst_x, dst_y=dst_y) mask_img = paste(dst=mask_canvas, src=mask_img, dst_x=dst_x, dst_y=dst_y) if crop: mask = mask_img.astype(np.bool) us_img = us_img * mask if denoise: us_img = cv2.fastNlMeansDenoising(us_img, None, 10, 7, 21) data = dict() data['us'] = us_img data['mask'] = mask_img data['info'] = dicom patient_datas.append(data) # sort by kidney value patient_datas = sorted( patient_datas, key=lambda x: np.count_nonzero(x['mask'] == 255), reverse=True) # use one or all if use_top_n_greatest_width: cut_n = use_top_n_greatest_width patient_datas = patient_datas[:cut_n] accNo = patient['AccNo'] diagnosis = patient['Diagnosis'] train_val = 'val' if accNo in isangmi_accno_list else 'train' save_path = os.path.join(result_path, train_val, diagnosis, accNo) ''' if args.preprocess_denoise: np_out = np.asarray(out) np_out = cv2.fastNlMeansDenoising(np_out, None, 10, 7, 21) out = Image.fromarray(np.uint8(np_out)) return out''' # save image for order, data in enumerate(patient_datas): us_img = data['us'] name = data['info']['Name'][:-4] save_file = os.path.join(save_path, name + '.png') if not os.path.exists(save_path): os.makedirs(save_path) print(save_file) cv2.imwrite(save_file, us_img) # save order of images if patient_datas: save_file = os.path.join(save_path, 'order.txt') with open(save_file, 'wt') as f: for order, data in enumerate(patient_datas): name = data['info']['Name'][:-4] f.write(name + '\n')