# Load samples # samples = [os.path.basename(x) for x in glob(str(args.dataset_root / '*XZ'))] # Load with specific name samples = os.listdir(args.dataset_root) samples.sort() #samples = [samples[id] for id in [106]] # Get intended samples from list # Skip the completed samples if args.completed > 0: samples = samples[args.completed:] for idx, sample in enumerate(samples): print(f'==> Processing sample {idx + 1} of {len(samples)}: {sample}') # Load image stacks data_xz, files = load(str(args.dataset_root / sample), rgb=True, axis=(1, 2, 0)) data_yz = np.transpose(data_xz, (0, 2, 1, 3)) # Y-Z-X-Ch mask_xz = np.zeros(data_xz.shape)[:, :, :, 0] # Remove channel dimension mask_yz = np.zeros(data_yz.shape)[:, :, :, 0] # Loop for image slices # 1st orientation with torch.no_grad(): # Do not update gradients for slice_idx in tqdm(range(data_yz.shape[2]), desc='Running inference, YZ'): mask_yz[:, :, slice_idx] = inference(model, args, config, data_yz[:, :, slice_idx, :]) # 2nd orientation
# Initialize results results = {'Sample': [], 'Most frequent thickness value': []} # Loop for samples args.save_dir.mkdir(exist_ok=True) (args.save_dir / 'Visualizations').mkdir(exist_ok=True) samples = os.listdir(str(args.mask_path)) #samples = [os.path.basename(x) for x in glob(str(args.mask_path / '*'))] samples.sort() for sample in tqdm(samples, 'Analysing thickness'): # Sample path thickness_path = base_path / sample / subdir # Load image stacks if thickness_path.exists(): data, files = load(str(thickness_path), rgb=False, n_jobs=args.n_threads) else: continue # Visualize thickness map print_orthogonal(data, savepath=str(args.save_dir / 'Visualizations' / ('CCTh_' + sample + '.png'))) # Histogram data = data.flatten() #data = data[data != 0] # Remove zeros max_value = np.max(data) hist, bin_edges = np.histogram(data * 3.2, bins=254, range=[1, max_value]) # Exclude background plt.hist(data, bins=range(1, max_value)) plt.title(sample) plt.savefig(str(args.save_dir / 'Visualizations' / ('histogram_' + sample + '.png')))
) parser.add_argument( '--mask_dir', type=pathlib.Path, #default='/media/dios/dios2/RabbitSegmentation/µCT/predictions_4fold/8C_M1_lateral_condyle_XZ/Largest') default= '/media/dios/dios2/RabbitSegmentation/Histology/Insaf_series/Masks/Binned3/Binned2/Binned3' ) parser.add_argument('--crop', type=str, default='value') parser.add_argument('--saved', type=bool, default=True) parser.add_argument('--plot', type=bool, default=False) parser.add_argument('--largest', type=bool, default=False) args = parser.parse_args() # Load and set paths im_files, data = load(args.dataset_root, rgb=True, uCT=False) mask_files, mask = load(args.mask_dir, rgb=False, uCT=False) # Expand mask to 3 channels # mask_large = np.zeros((mask.shape[0], mask.shape[1], 3, mask.shape[2])) # for i in range(mask_large.shape[2]): # mask_large[:, :, i, :] = mask if args.crop == 'bbox': # Get bounding box for masks and crop data + mask contours = [] removed = 0 for sample in tqdm(range(len(mask)), 'Getting bounding boxes'): try: bbox, contour = bounding_box(mask[sample - removed], largest=args.largest)
parser.add_argument('--plot', type=bool, default=False) parser.add_argument('--dtype', type=str, choices=['.bmp', '.png', '.tif'], default='.bmp') args = parser.parse_args() # Loop for samples args.save_dir.mkdir(exist_ok=True) samples = os.listdir(str(args.data_path)) #samples = [os.path.basename(x) for x in glob(str(args.mask_path / '*.png'))] samples.sort() for sample in tqdm(samples, 'Smoothing'): #try: # Load image _, data = load(str(args.data_path / sample), uCT=True) data_scaled = map_uint16_to_uint8(data, lower_bound=0, upper_bound=40000) print_orthogonal(data_scaled) img = cv2.imread(str(args.mask_path / sample), cv2.IMREAD_GRAYSCALE) if args.plot: plt.imshow(img) plt.title('Loaded image') plt.show() # Opening kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=args.k_closing) img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel=kernel) #plt.imshow(img); plt.title('Closing'); plt.show()
def evaluation_runner(args, config, save_dir): """ Calculates evaluation metrics on predicted masks against target. :param args: :param config: :param save_dir: :return: """ start_eval = time() # Evaluation arguments args.image_path = args.data_location / 'images' args.mask_path = args.data_location / 'masks' args.pred_path = args.data_location / 'predictions' args.save_dir = args.data_location / 'evaluation' args.save_dir.mkdir(exist_ok=True) args.n_labels = 2 # Snapshots to be evaluated if type(save_dir) != list: save_dir = [save_dir] # Iterate through snapshots for snap in save_dir: # Initialize results results = {'Sample': [], 'Dice': [], 'IoU': [], 'Similarity': []} # Loop for samples (args.save_dir / ('visualizations_' + snap.name)).mkdir(exist_ok=True) samples = os.listdir(str(args.mask_path)) samples.sort() try: for idx, sample in enumerate(samples): print( f'==> Processing sample {idx + 1} of {len(samples)}: {sample}' ) # Load image stacks if config['training']['experiment'] == '3D': mask, files_mask = load(str(args.mask_path / sample), axis=(0, 2, 1), rgb=False, n_jobs=args.n_threads) pred, files_pred = load(str(args.pred_path / snap.name / sample), axis=(0, 2, 1), rgb=False, n_jobs=args.n_threads) data, files_data = load(str(args.image_path / sample), axis=(0, 2, 1), rgb=False, n_jobs=args.n_threads) # Crop in case of inconsistency crop = min(pred.shape, mask.shape) mask = mask[:crop[0], :crop[1], :crop[2]] pred = pred[:crop[0], :crop[1], :crop[2]] else: data = cv2.imread(str(args.image_path / sample)) mask = cv2.imread(str(args.mask_path / sample), cv2.IMREAD_GRAYSCALE) pred = cv2.imread(str(args.pred_path / snap.name / sample), cv2.IMREAD_GRAYSCALE) if pred is None: sample = sample[:-4] + '.bmp' pred = cv2.imread( str(args.pred_path / snap.name / sample), cv2.IMREAD_GRAYSCALE) elif mask is None: mask = cv2.imread(str(args.mask_path / sample), cv2.IMREAD_GRAYSCALE) # Crop in case of inconsistency crop = min(pred.shape, mask.shape) mask = mask[:crop[0], :crop[1]] pred = pred[:crop[0], :crop[1]] # Evaluate metrics conf_matrix = calculate_conf(pred.astype(np.bool), mask.astype(np.bool), args.n_labels) dice = calculate_dice(conf_matrix)[1] iou = calculate_iou(conf_matrix)[1] sim = calculate_volumetric_similarity(conf_matrix)[1] print( f'Sample {sample}: dice = {dice}, IoU = {iou}, similarity = {sim}' ) # Save predicted full mask if config['training']['experiment'] == '3D': print_orthogonal( data, invert=False, res=3.2, cbar=True, savepath=str(args.save_dir / ('visualizations_' + snap.name) / (sample + '_input.png')), scale_factor=1500) print_orthogonal( data, mask=mask, invert=False, res=3.2, cbar=True, savepath=str(args.save_dir / ('visualizations_' + snap.name) / (sample + '_reference.png')), scale_factor=1500) print_orthogonal( data, mask=pred, invert=False, res=3.2, cbar=True, savepath=str(args.save_dir / ('visualizations_' + snap.name) / (sample + '_prediction.png')), scale_factor=1500) # Update results results['Sample'].append(sample) results['Dice'].append(dice) results['IoU'].append(iou) results['Similarity'].append(sim) except AttributeError: print(f'Sample {sample} failing. Skipping to next one.') continue # Add average value to results['Sample'].append('Average values') results['Dice'].append(np.average(results['Dice'])) results['IoU'].append(np.average(results['IoU'])) results['Similarity'].append(np.average(results['Similarity'])) # Write to excel writer = pd.ExcelWriter( str(args.save_dir / ('metrics_' + str(snap.name))) + '.xlsx') df1 = pd.DataFrame(results) df1.to_excel(writer, sheet_name='Metrics') writer.save() print( f'Metrics evaluated in {(time() - start_eval) // 60} minutes, {(time() - start_eval) % 60} seconds.' )
'Mean thickness': [], 'Median thickness': [], 'Thickness STD': [], 'Maximum thickness': [] } t = strftime(f'%Y_%m_%d_%H_%M') # Loop for samples for sample in samples: time_sample = time() print(f'Processing sample {sample}') # Load prediction pred, files = load(str(args.masks / sample), axis=( 1, 2, 0, )) # Downscale #pred = (ndi.zoom(pred, 0.25) > 126).astype(np.bool) if args.plot: print_orthogonal(pred, savepath=str(args.th_maps / 'visualization' / (sample + '_pred.png'))) # Median filter pred = ndi.median_filter(pred, size=args.median) if args.plot: print_orthogonal(pred,
# Initialize results results = {'Sample': [], 'Dice': [], 'IoU': [], 'Similarity': []} # Loop for samples (args.save_dir / ('visualizations_' + snap)).mkdir(exist_ok=True) samples = os.listdir(str(args.mask_path)) samples.sort() for idx, sample in enumerate(samples): sleep(0.5) print(f'==> Processing sample {idx + 1} of {len(samples)}: {sample}') # Load image stacks if 'subdir_mask' in locals(): mask, files_mask = load(str(args.mask_path / sample / subdir_mask), rgb=False, n_jobs=args.n_threads) else: mask, files_mask = load(str(args.mask_path / sample), axis=(0, 2, 1), rgb=False, n_jobs=args.n_threads) if 'subdir' in locals(): pred, files_pred = load(str(args.prediction_path / snap / sample / subdir), rgb=False, n_jobs=args.n_threads) else: pred, files_pred = load(str(args.prediction_path / snap / sample), axis=(0, 2, 1), rgb=False, n_jobs=args.n_threads) data, files_data = load(str(args.image_path / sample), axis=(0, 2, 1), rgb=False, n_jobs=args.n_threads) # Crop in case of inconsistency crop = min(pred.shape, mask.shape) mask = mask[:crop[0], :crop[1], :crop[2]] pred = pred[:crop[0], :crop[1], :crop[2]] # Evaluate metrics conf_matrix = calculate_conf(pred.astype(np.bool), mask.astype(np.bool), args.n_labels)
default='/media/dios/dios2/RabbitSegmentation/µCT/predictions_best_fold/') parser.add_argument('--crop', type=bool, default=False) parser.add_argument('--saved', type=bool, default=True) parser.add_argument('--plot', type=bool, default=True) parser.add_argument('--largest', type=bool, default=False) args = parser.parse_args() im_paths = os.listdir(args.dataset_root) mask_paths = os.listdir(args.mask_dir) for dataset in range(len(im_paths)): # Load and set paths i_path = args.dataset_root / im_paths[dataset] m_path = args.mask_dir / mask_paths[dataset] / 'Largest' im_files, data = load(str(i_path), rgb=True, uCT=True) mask_files, mask = load(str(m_path), rgb=False, uCT=True) # Get bounding box for masks and crop data + mask contours = [] removed = 0 for sample in tqdm(range(len(mask)), 'Getting bounding boxes'): try: bbox, contour = bounding_box(mask[sample - removed], largest=args.largest) except ValueError: # Empty mask data.pop(sample) mask.pop(sample) removed += 1 continue # Add contour to list
# Initialize results results = {'Sample': [], 'Average thickness': []} # Loop for samples args.save_dir.mkdir(exist_ok=True) #samples = os.listdir(str(args.mask_path)) samples = [os.path.basename(x) for x in glob(str(args.mask_path / '*'))] samples.sort() for sample in tqdm(samples, 'Analysing thickness'): # New sample #sample_name = sample.rsplit('_', 1)[0] thickness_list = [] # Load image stacks if 'subdir' in locals(): mask, files_mask = load(str(args.mask_path / sample / subdir), rgb=False, n_jobs=args.n_threads) else: mask, files_mask = load(str(args.mask_path / sample), rgb=False, n_jobs=args.n_threads) for slice in range(mask.shape[0]): img = mask[slice, :, :] if np.max(img) == 0: continue # Threshold >= 125 img = cv2.threshold(img, 125, 255, cv2.THRESH_BINARY)[1] # Calculate thickness thickness_list.append(np.sum(img.flatten() / 255) / img.shape[1])