def test_load_wsi_head(): wsi_img_path = os.path.join(PRJ_PATH, "test/data/Slides/CropBreastSlide.tiff") wsi_header = pyramid.load_wsi_head(wsi_img_path) print("WSI level dimension info:") level_num = wsi_header.level_count for ind in np.arange(level_num): print("level {:2d} size: {}".format(ind, wsi_header.level_dimensions[ind]))
def check_slide_properties(slide_path): wsi_head = pyramid.load_wsi_head(slide_path) flag = True # if wsi_head.level_count <= 2: # print("{} has {} levels".format(wsi_head._filename, wsi_head.level_count)) # flag = False # print(wsi_head.level_downsamples) if np.absolute(wsi_head.level_downsamples[2] - 16) > 0.01: print("{} scale is not {}".format(wsi_head._filename, 4)) flag = False return flag
def slide_combine_mask(slides_dir, id_list, slide_index, display_level=2): """ Load slide segmentation mask. """ slide_path = os.path.join(slides_dir, 'OriginalImage/' + id_list[slide_index] + ".svs") if not os.path.exists(slide_path): slide_path = os.path.join( slides_dir, 'OriginalImage/' + id_list[slide_index] + ".SVS") wsi_head = pyramid.load_wsi_head(slide_path) new_size = (wsi_head.level_dimensions[display_level][1], wsi_head.level_dimensions[display_level][0]) slide_img = wsi_head.read_region((0, 0), display_level, wsi_head.level_dimensions[display_level]) slide_img = np.asarray(slide_img)[:, :, :3] # lo # ad and resize whole mask whole_mask_path = os.path.join( slides_dir, 'WholeMask/' + id_list[slide_index] + "_whole.tif") whole_mask_img = io.imread(whole_mask_path) resize_whole_mask = (transform.resize(whole_mask_img, new_size, order=0) * 255).astype(np.uint8) # load and resize viable mask viable_mask_path = os.path.join( slides_dir, 'ViableMask/' + id_list[slide_index] + "_viable.tif") viable_mask_img = io.imread(viable_mask_path) resize_viable_mask = ( transform.resize(viable_mask_img, new_size, order=0) * 255).astype( np.uint8) # show the mask fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(16, 5)) ax1.imshow(slide_img) ax1.set_title('Slide Image') ax2.imshow(resize_whole_mask) ax2.set_title('Whole Tumor Mask') ax3.imshow(resize_viable_mask) ax3.set_title('Viable Tumor Mask') plt.tight_layout() plt.show() dir_path = './data/MyMasks_level_' + str(display_level) if not os.path.exists(dir_path): os.mkdir(dir_path) save_path = os.path.join( dir_path, 'level_' + str(display_level) + '_' + id_list[slide_index] + ".png") fig.savefig(save_path)
def check_slide_mask(slides_dir, slide_filenames, slide_index, display_level=2): """ Load slide segmentation mask. """ slide_path = os.path.join(slides_dir, slide_filenames[slide_index] + ".svs") if not os.path.exists(slide_path): slide_path = os.path.join(slides_dir, slide_filenames[slide_index] + ".SVS") wsi_head = pyramid.load_wsi_head(slide_path) new_size = (wsi_head.level_dimensions[display_level][1], wsi_head.level_dimensions[display_level][0]) slide_img = wsi_head.read_region((0, 0), display_level, wsi_head.level_dimensions[display_level]) slide_img = np.asarray(slide_img)[:, :, :3] # load and resize whole mask whole_mask_path = os.path.join(slides_dir, slide_filenames[slide_index] + "_whole.tif") whole_mask_img = io.imread(whole_mask_path) resize_whole_mask = (transform.resize(whole_mask_img, new_size, order=0) * 255).astype(np.uint8) # load and resize viable mask viable_mask_path = os.path.join( slides_dir, slide_filenames[slide_index] + "_viable.tif") viable_mask_img = io.imread(viable_mask_path) resize_viable_mask = ( transform.resize(viable_mask_img, new_size, order=0) * 255).astype( np.uint8) # show the mask fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(16, 5)) ax1.imshow(slide_img) ax1.set_title('Slide Image') ax2.imshow(resize_whole_mask) ax2.set_title('Whole Tumor Mask') ax3.imshow(resize_viable_mask) ax3.set_title('Viable Tumor Mask') plt.tight_layout() # plt.show() save_path = os.path.join(os.path.dirname(slides_dir), "Visualization/Masks", slide_filenames[slide_index] + ".png") fig.savefig(save_path)
def locate_tissue(slides_dir): slide_list = [] svs_file_list = filesystem.find_ext_files(slides_dir, "svs") slide_list.extend(svs_file_list) SVS_file_list = filesystem.find_ext_files(slides_dir, "SVS") slide_list.extend(SVS_file_list) tissue_dir = os.path.join(os.path.dirname(slides_dir), "Visualization/TissueLoc") filesystem.overwrite_dir(tissue_dir) for ind, slide_path in enumerate(slide_list): print("processing {}/{}".format(ind+1, len(slide_list))) # locate tissue contours with default parameters cnts, d_factor = tl.locate_tissue_cnts(slide_path, max_img_size=2048, smooth_sigma=13, thresh_val=0.88, min_tissue_size=120000) cnts = sorted(cnts, key=lambda x: cv2.contourArea(x), reverse=True) # if len(cnts) != 1: # print("There are {} contours in {}".format(len(cnts), os.path.basename(slide_path))) # load slide select_level, select_factor = tl.select_slide_level(slide_path, max_size=2048) wsi_head = pyramid.load_wsi_head(slide_path) slide_img = wsi_head.read_region((0, 0), select_level, wsi_head.level_dimensions[select_level]) slide_img = np.asarray(slide_img)[:,:,:3] slide_img = np.ascontiguousarray(slide_img, dtype=np.uint8) # change not valid poly to convex_hull cnt_arr = cv_cnt_to_np_arr(cnts[0]) cnt_poly = np_arr_to_poly(cnt_arr) if cnt_poly.is_valid == True: valid_cnt = cnts[0].astype(int) else: valid_arr = poly_to_np_arr(cnt_poly.convex_hull) valid_cnt = np_arr_to_cv_cnt(valid_arr).astype(int) cv2.drawContours(slide_img, [valid_cnt], 0, (0, 255, 0), 8) # overlay and save # cv2.drawContours(slide_img, cnts, 0, (0, 255, 0), 8) tissue_save_name = os.path.splitext(os.path.basename(slide_path))[0] + ".png" tissue_save_path = os.path.join(tissue_dir, tissue_save_name) io.imsave(tissue_save_path, slide_img)
def test_slide_seg(args): model = None if args.model_name == "UNet": model = UNet(n_channels=args.in_channels, n_classes=args.class_num) elif args.model_name == "PSP": model = pspnet.PSPNet(n_classes=19, input_size=(512, 512)) model.classification = nn.Conv2d(512, args.class_num, kernel_size=1) else: raise AssertionError("Unknow modle: {}".format(args.model_name)) model_path = os.path.join(args.model_dir, args.tumor_type, args.split, args.best_model) model = nn.DataParallel(model) model.load_state_dict(torch.load(model_path)) model.cuda() model.eval() since = time.time() result_dir = os.path.join(args.result_dir, args.tumor_type) filesystem.overwrite_dir(result_dir) slide_names = get_slide_filenames(args.slides_dir) if args.save_org and args.tumor_type == "viable": org_result_dir = os.path.join(result_dir, "Level0") filesystem.overwrite_dir(org_result_dir) for num, cur_slide in enumerate(slide_names): print("--{:02d}/{:02d} Slide:{}".format(num+1, len(slide_names), cur_slide)) metrics = defaultdict(float) # load level-2 slide slide_path = os.path.join(args.slides_dir, cur_slide+".svs") if not os.path.exists(slide_path): slide_path = os.path.join(args.slides_dir, cur_slide+".SVS") wsi_head = pyramid.load_wsi_head(slide_path) p_level = args.slide_level pred_h, pred_w = (wsi_head.level_dimensions[p_level][1], wsi_head.level_dimensions[p_level][0]) slide_img = wsi_head.read_region((0, 0), p_level, wsi_head.level_dimensions[p_level]) slide_img = np.asarray(slide_img)[:,:,:3] coors_arr = wsi_stride_splitting(pred_h, pred_w, patch_len=args.patch_len, stride_len=args.stride_len) patch_arr, wmap = gen_patch_wmap(slide_img, coors_arr, plen=args.patch_len) patch_dset = PatchDataset(patch_arr, mask_arr=None, normalize=args.normalize, tumor_type=args.tumor_type) patch_loader = DataLoader(patch_dset, batch_size=args.batch_size, shuffle=False, num_workers=4, drop_last=False) ttl_samples = 0 pred_map = np.zeros_like(wmap).astype(np.float32) for ind, patches in enumerate(patch_loader): inputs = Variable(patches.cuda()) with torch.no_grad(): outputs = model(inputs) preds = F.sigmoid(outputs) preds = torch.squeeze(preds, dim=1).data.cpu().numpy() if (ind+1)*args.batch_size <= len(coors_arr): patch_coors = coors_arr[ind*args.batch_size:(ind+1)*args.batch_size] else: patch_coors = coors_arr[ind*args.batch_size:] for ind, coor in enumerate(patch_coors): ph, pw = coor[0], coor[1] pred_map[ph:ph+args.patch_len, pw:pw+args.patch_len] += preds[ind] ttl_samples += inputs.size(0) prob_pred = np.divide(pred_map, wmap) slide_pred = (prob_pred > 0.5).astype(np.uint8) pred_save_path = os.path.join(result_dir, cur_slide + "_" + args.tumor_type + ".tif") io.imsave(pred_save_path, slide_pred*255) if args.save_org and args.tumor_type == "viable": org_w, org_h = wsi_head.level_dimensions[0] org_pred = transform.resize(prob_pred, (org_h, org_w)) org_pred = (org_pred > 0.5).astype(np.uint8) org_save_path = os.path.join(org_result_dir, cur_slide[-3:] + ".tif") imsave(org_save_path, org_pred, compress=9) time_elapsed = time.time() - since print('Testing takes {:.0f}m {:.2f}s'.format(time_elapsed // 60, time_elapsed % 60))
def gen_samples(slides_dir, patch_level, patch_size, tumor_type, slide_list, dset, overlap_mode): # prepare saving directory patch_path = os.path.join(os.path.dirname(slides_dir), "Patches", tumor_type) patch_img_dir = os.path.join(patch_path, dset, "imgs") if not os.path.exists(patch_img_dir): os.makedirs(patch_img_dir) patch_mask_dir = os.path.join(patch_path, dset, "masks") if not os.path.exists(patch_mask_dir): os.makedirs(patch_mask_dir) # processing slide one-by-one ttl_patch = 0 slide_list.sort() for ind, ele in enumerate(slide_list): print("Processing {} {}/{}".format(ele, ind+1, len(slide_list))) cur_slide_path = os.path.join(slides_dir, ele+".svs") if os.path.exists(cur_slide_path): cur_slide_path = os.path.join(slides_dir, ele+".svs") # locate contours and generate batches based on tissue contours cnts, d_factor = tl.locate_tissue_cnts(cur_slide_path, max_img_size=2048, smooth_sigma=13, thresh_val=0.88, min_tissue_size=120000) select_level, select_factor = tl.select_slide_level(cur_slide_path, max_size=2048) cnts = sorted(cnts, key=lambda x: cv2.contourArea(x), reverse=True) # scale contour to slide level 2 wsi_head = pyramid.load_wsi_head(cur_slide_path) cnt_scale = select_factor / int(wsi_head.level_downsamples[patch_level]) tissue_arr = cv_cnt_to_np_arr(cnts[0] * cnt_scale).astype(np.int32) # convert tissue_arr to convex if poly is not valid tissue_poly = np_arr_to_poly(tissue_arr) if tissue_poly.is_valid == False: tissue_arr = poly_to_np_arr(tissue_poly.convex_hull).astype(int) coors_arr = None if overlap_mode == "half_overlap": level_w, level_h = wsi_head.level_dimensions[patch_level] coors_arr = contour.contour_patch_splitting_half_overlap(tissue_arr, level_h, level_w, patch_size, inside_ratio=0.80) elif overlap_mode == "self_overlap": coors_arr = contour.contour_patch_splitting_self_overlap(tissue_arr, patch_size, inside_ratio=0.80) else: raise NotImplementedError("unknown overlapping mode") wsi_img = wsi_head.read_region((0, 0), patch_level, wsi_head.level_dimensions[patch_level]) wsi_img = np.asarray(wsi_img)[:,:,:3] mask_path = os.path.join(slides_dir, "_".join([ele, tumor_type+".tif"])) mask_img = io.imread(mask_path) wsi_mask = (transform.resize(mask_img, wsi_img.shape[:2], order=0) * 255).astype(np.uint8) * 255 if dset == "val": test_slides_dir = os.path.join(os.path.dirname(slides_dir), "TestSlides") if not os.path.exists(os.path.join(test_slides_dir, cur_slide_path)): shutil.copy(cur_slide_path, test_slides_dir) if not os.path.exists(os.path.join(test_slides_dir, mask_path)): shutil.copy(mask_path, test_slides_dir) for cur_arr in coors_arr: cur_h, cur_w = cur_arr[0], cur_arr[1] cur_patch = wsi_img[cur_h:cur_h+patch_size, cur_w:cur_w+patch_size] if cur_patch.shape[0] != patch_size or cur_patch.shape[1] != patch_size: continue cur_mask = wsi_mask[cur_h:cur_h+patch_size, cur_w:cur_w+patch_size] # background RGB (235, 210, 235) * [0.299, 0.587, 0.114] if patch.patch_bk_ratio(cur_patch, bk_thresh=0.864) > 0.88: continue if overlap_mode == "half_overlap" and tumor_type == "viable": pixel_ratio = np.sum(cur_mask > 0) * 1.0 / cur_mask.size if pixel_ratio < 0.05: continue patch_name = ele + "_" + str(uuid.uuid1())[:8] io.imsave(os.path.join(patch_img_dir, patch_name+".jpg"), cur_patch) io.imsave(os.path.join(patch_mask_dir, patch_name+".png"), cur_mask) ttl_patch += 1 print("There are {} patches in total.".format(ttl_patch))