def recommend_slices_parallel(prediction_path, uncertainty_path, gt_path, save_path, find_best_slices_func, num_slices, slice_gap, default_size): prediction_filenames = utils.load_filenames(prediction_path) uncertainty_filenames = utils.load_filenames(uncertainty_path) gt_filenames = utils.load_filenames(gt_path) pool = mp.Pool(processes=8) start_time = time.time() results = pool.map( partial(recommend_slices_single_case, prediction_filenames=prediction_filenames, uncertainty_filenames=uncertainty_filenames, gt_filenames=gt_filenames, save_path=save_path, find_best_slices_func=find_best_slices_func, num_slices=num_slices, slice_gap=slice_gap, default_size=default_size), range(len(uncertainty_filenames))) print("Recommend slices elapsed time: ", time.time() - start_time) results = np.asarray(results) total_recommended_slices = results[:, 0] total_gt_slices = results[:, 1] total_recommended_slices = np.sum(total_recommended_slices) total_gt_slices = np.sum(total_gt_slices) total_ratio = total_recommended_slices / total_gt_slices print("total recommended slices: {}, total gt slices: {}, total ratio: {}". format(total_recommended_slices, total_gt_slices, total_ratio)) return total_ratio
def check_all_predictions_exist(): filenames = utils.load_filenames(refined_prediction_save_path) nr_predictions = len(utils.load_filenames(prediction_path)) counter = 0 for filename in filenames: if ".nii.gz" in filename: counter += 1 return bool(counter == nr_predictions)
def add_to_images_or_masks(image_path, guiding_mask_path, save_path, is_mask=False): image_filenames = utils.load_filenames(image_path) guiding_mask_filenames = utils.load_filenames(guiding_mask_path) for i in tqdm(range(len(image_filenames))): image, affine, spacing, header = utils.load_nifty(image_filenames[i]) guiding_mask, _, _, _ = utils.load_nifty(guiding_mask_filenames[i]) image = np.stack([image, guiding_mask], axis=-1) utils.save_nifty(save_path + os.path.basename(image_filenames[i]), image, affine, spacing, header, is_mask=is_mask)
def recommend_slices(prediction_path, uncertainty_path, gt_path, save_path, find_best_slices_func, num_slices, slice_gap, default_size): prediction_filenames = utils.load_filenames(prediction_path) uncertainty_filenames = utils.load_filenames(uncertainty_path) gt_filenames = utils.load_filenames(gt_path) total_recommended_slices = 0 total_gt_slices = 0 for i in tqdm(range(len(uncertainty_filenames))): uncertainty, affine, spacing, header = utils.load_nifty( uncertainty_filenames[i]) prediction, _, _, _ = utils.load_nifty(prediction_filenames[i]) gt, _, _, _ = utils.load_nifty(gt_filenames[i]) adapted_slice_gap = adapt_slice_gap(uncertainty, slice_gap, default_size) # indices_dim_0: Sagittal # indices_dim_1: Coronal # indices_dim_2: Axial indices_dim_0, indices_dim_1, indices_dim_2 = find_best_slices_func( prediction, uncertainty, num_slices, adapted_slice_gap) recommended_slices = len(indices_dim_0) + len(indices_dim_1) + len( indices_dim_2) gt_slices = comp_gt_slices(gt) total_recommended_slices += recommended_slices total_gt_slices += gt_slices print( "name: {} recommended slices: {}, gt slices: {}, ratio: {}".format( os.path.basename(uncertainty_filenames[i]), recommended_slices, gt_slices, recommended_slices / gt_slices)) # print("indices_dim_0: {}, indices_dim_1: {}, indices_dim_2: {}".format(indices_dim_0, indices_dim_1, indices_dim_2)) filtered_mask = filter_mask(gt, indices_dim_0, indices_dim_1, indices_dim_2) utils.save_nifty(save_path + os.path.basename(uncertainty_filenames[i])[:-7] + "_0001.nii.gz", filtered_mask, affine, spacing, header, is_mask=True) total_ratio = total_recommended_slices / total_gt_slices print("total recommended slices: {}, total gt slices: {}, total ratio: {}". format(total_recommended_slices, total_gt_slices, total_ratio)) return total_ratio
def rename(case_path): filenames = utils.load_filenames(case_path + "/", extensions=None) for filename in filenames: name = os.path.basename(filename) if "label" in name and ".nii.gz" in name: os.rename(filename, case_path + "/mask.nii.gz") elif ".txt" in name: os.rename(filename, case_path + "/label_table.txt") elif ".nii.gz" in name: os.rename(filename, case_path + "/image.nii.gz")
def remove_label(load_path, save_path, labels_to_remove): save_path = utils.fix_path(save_path) load_path = utils.fix_path(load_path) filenames = utils.load_filenames(load_path) for filename in tqdm(filenames): basename = os.path.basename(filename) mask, affine, spacing, header = utils.load_nifty(filename) for label in labels_to_remove: mask[mask == label] = 0 mask = np.rint(mask) mask = mask.astype(int) utils.save_nifty(save_path + basename, mask, affine, spacing, header)
def select_rois(img_dir, uncertainty_mask_dir, save_dir, window_size_percentage=0.02, window_per_border=3, max_rois=5, min_z_distance_percentage=0.1, max_iou=0.1): imgs_filenames = utils.load_filenames(img_dir) uncertainty_masks_filenames = utils.load_filenames(uncertainty_mask_dir) uncertainty_masks = [utils.load_nifty(uncertainty_mask_filename)[0] for uncertainty_mask_filename in uncertainty_masks_filenames] uncertainty_masks = [utils.normalize(uncertainty_mask) for uncertainty_mask in uncertainty_masks] uncertainty_masks_size_mean = comp_uncertainty_masks_mean(uncertainty_masks) window_shapes = comp_window_shapes(uncertainty_masks_size_mean, window_size_percentage, window_per_border) for i in tqdm(range(len(imgs_filenames))): img, affine, spacing, header = utils.load_nifty(imgs_filenames[i]) if len(img.shape) == 4: # TODO: Remove modality in the case of prostate dataset, remove in final version img = img[..., 0] img_reoriented = utils.reorient(img, affine) # TODO: Reorient ist hardcoded uncertainty_mask_reoriented = utils.reorient(uncertainty_masks[i], affine) rois = [] # Each entry is [roi_sum, x, y, z, width, length] for window_shape in tqdm(window_shapes): window_shape_rois = comp_rois_single_window_shape(uncertainty_mask_reoriented, window_shape) rois.extend(window_shape_rois) rois = np.asarray(rois) rois = filter_rois(rois, max_rois, uncertainty_mask_reoriented.shape, min_z_distance_percentage, max_iou) rois = extract_rois(img_reoriented, uncertainty_mask_reoriented, rois) save_rois(save_dir, os.path.basename(uncertainty_masks_filenames[i][:-7]) + "/", rois, img, uncertainty_masks[i], affine, spacing, header)
def comp_guiding_mask(load_path, save_path, slice_gap, default_size, slice_depth=3): filenames = utils.load_filenames(load_path) for filename in tqdm(filenames): mask, affine, spacing, header = utils.load_nifty(filename) adapted_slice_gap = adapt_slice_gap(mask, slice_gap, default_size) mask_slices = comp_slices_mask(mask, adapted_slice_gap, slice_depth=slice_depth) utils.save_nifty(save_path + os.path.basename(filename), mask_slices, affine, spacing, header, is_mask=True)
def copy_masks_for_inference(load_dir, save_dir): filenames = utils.load_filenames(load_dir) quarter = int(len(filenames) / 4) filenames0 = filenames[:quarter] filenames1 = filenames[quarter:quarter * 2] filenames2 = filenames[quarter * 2:quarter * 3] filenames3 = filenames[quarter * 3:] save_dir0 = save_dir[:-1] + "_temp0/" save_dir1 = save_dir[:-1] + "_temp1/" save_dir2 = save_dir[:-1] + "_temp2/" save_dir3 = save_dir[:-1] + "_temp3/" for filename in filenames0: copyfile(filename, save_dir0 + os.path.basename(filename)) for filename in filenames1: copyfile(filename, save_dir1 + os.path.basename(filename)) for filename in filenames2: copyfile(filename, save_dir2 + os.path.basename(filename)) for filename in filenames3: copyfile(filename, save_dir3 + os.path.basename(filename))
def comp_uncertainties(load_dir, save_dir, uncertainty_estimator, type="part"): load_dir = utils.fix_path(load_dir) save_dir = utils.fix_path(save_dir) filenames = utils.load_filenames(load_dir) cases, nr_labels, nr_parts = group_data(filenames) print("nr_cases: ", len(cases)) print("nr_labels: ", nr_labels) print("nr_parts: ", nr_parts) for case in tqdm(cases): for label in range(nr_labels + 1): predictions = [] for part in range(nr_parts + 1): name = load_dir + str(case).zfill(4) + "_" + str( label) + "_" + type + "_" + str(part) + ".nii.gz" prediction, affine, spacing, header = utils.load_nifty(name) predictions.append(prediction.astype(np.float16)) predictions = np.stack(predictions) uncertainty = uncertainty_estimator(predictions) name = save_dir + str(case).zfill(4) + "_" + str(label) + ".nii.gz" utils.save_nifty(name, uncertainty, affine, spacing, header)
def evaluate(prediction_dir, ground_truth_dir, uncertainty_dir, labels): prediction_filenames = utils.load_filenames(prediction_dir) ground_truth_filenames = [ os.path.join(ground_truth_dir, os.path.basename(prediction_filename)) for prediction_filename in prediction_filenames ] uncertainty_filenames = [] for prediction_filename in prediction_filenames: basename = os.path.basename(prediction_filename) uncertainty_label_filenames = [] for label in labels: filename = os.path.join( uncertainty_dir, '{}_{}.nii.gz'.format(basename[:-7], label)) uncertainty_label_filenames.append(filename) uncertainty_filenames.append(uncertainty_label_filenames) uncertainty_filenames = np.asarray(uncertainty_filenames) prediction_filenames, ground_truth_filenames, uncertainty_filenames = remove_missing_cases( prediction_filenames, ground_truth_filenames, uncertainty_filenames) results = [] for i, label in enumerate(tqdm(labels)): results.append( evaluate_label(prediction_filenames, ground_truth_filenames, uncertainty_filenames[:, i], label)) for i in range(len(labels)): label = results[i]["label"] thresholds = results[i]["thresholds"] threshold_scores = results[i]["threshold_scores"] for i in range(len(thresholds)): print( "Label: {}, Threshold: {}, Dice Score: {}, Uncertainty Dice Score 1: {}, Uncertainty Dice Score 2: {}, Uncertainty Miss Coverage Ratio: {}, Uncertainty GT Ratio: {}" .format(label, thresholds[i], round(threshold_scores[i][0], 3), round(threshold_scores[i][1], 3), round(threshold_scores[i][2], 3), round(threshold_scores[i][3], 3), round(threshold_scores[i][4], 3))) print("---------------------------------------")
def inference(available_devices, gt_path): input_path = "/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/" + task + "/imagesTs_temp" # output_path = "/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task072_allGuided_ggo/Task072_allGuided_ggo_predictionsTs" start_time = time.time() filenames = utils.load_filenames(refined_prediction_save_path, extensions=None) print("load_filenames: ", time.time() - start_time) start_time = time.time() for filename in filenames: os.remove(filename) parts_to_process = [0, 1, 2, 3] waiting = [] finished = [] wait_time = 5 start_inference_time = time.time() print("remove: ", time.time() - start_time) print("Starting inference...") while parts_to_process: if available_devices: device = available_devices[0] available_devices = available_devices[1:] part = parts_to_process[0] parts_to_process = parts_to_process[1:] print("Processing part {} on device {}...".format(part, device)) command = 'nnUNet_predict -i ' + str(input_path) + str( part ) + ' -o ' + str( refined_prediction_save_path ) + ' -tr nnUNetTrainerV2Guided3 -t ' + task + ' -m 3d_fullres -f 0 -d ' + str( device ) + ' -chk model_best --disable_tta --num_threads_preprocessing 1 --num_threads_nifti_save 1' p = subprocess.Popen(command, shell=True, stdout=subprocess.DEVNULL, preexec_fn=os.setsid) waiting.append([part, device, p, time.time()]) else: for w in waiting: if w[2].poll() is not None: print("Finished part {} on device {} after {}s.".format( w[0], w[1], time.time() - w[3])) available_devices.append(w[1]) finished.append(w[0]) waiting.remove(w) break time.sleep(wait_time) print("All parts are being processed.") def check_all_predictions_exist(): filenames = utils.load_filenames(refined_prediction_save_path) nr_predictions = len(utils.load_filenames(prediction_path)) counter = 0 for filename in filenames: if ".nii.gz" in filename: counter += 1 return bool(counter == nr_predictions) while waiting and len(finished) < 4 and not check_all_predictions_exist(): time.sleep(wait_time) print("All predictions finished.") time.sleep(30) print("Cleaning up threads") # [os.killpg(os.getpgid(p.pid), signal.SIGTERM) for p in finished] [os.killpg(os.getpgid(p[2].pid), signal.SIGTERM) for p in waiting] os.remove(refined_prediction_save_path + "/plans.pkl") print("Total inference time {}s.".format(time.time() - start_inference_time)) print("All parts finished processing.") mean_dice_score, median_dice_score = evaluate( gt_path, refined_prediction_save_path, (0, 1)) return mean_dice_score, median_dice_score
def rename_guiding_masks(data_path): filenames = utils.load_filenames(data_path) for i, filename in enumerate(filenames): basename = str(i + 1).zfill(4) + "_0001.nii.gz" os.rename(filename, data_path + basename)
def evaluate(data_dir, prediction_dir, ground_truth_dir, uncertainty_dir, labels, end=None, step=None, parallel=False): if end is not None: thresholds = np.arange(0.0, end, step) else: thresholds = None print("Thresholds: ", thresholds) prediction_filenames = utils.load_filenames(prediction_dir) ground_truth_filenames = [ os.path.join(ground_truth_dir, os.path.basename(prediction_filename)) for prediction_filename in prediction_filenames ] uncertainty_filenames = [] for prediction_filename in prediction_filenames: basename = os.path.basename(prediction_filename) uncertainty_label_filenames = [] for label in labels: filename = os.path.join( uncertainty_dir, '{}_{}.nii.gz'.format(basename[:-7], label)) uncertainty_label_filenames.append(filename) uncertainty_filenames.append(uncertainty_label_filenames) uncertainty_filenames = np.asarray(uncertainty_filenames) prediction_filenames, ground_truth_filenames, uncertainty_filenames = remove_missing_cases( prediction_filenames, ground_truth_filenames, uncertainty_filenames) results = [] start_time = time.time() for i, label in enumerate(tqdm(labels)): predictions, ground_truths, uncertainties = load_data( prediction_filenames, ground_truth_filenames, uncertainty_filenames[:, i]) predictions, ground_truths = binarize_data_by_label( predictions, ground_truths, label) if thresholds is None: thresholds = find_best_threshold(predictions, ground_truths, uncertainties) if isinstance(thresholds, Number): thresholds = [thresholds] if not parallel: for threshold in thresholds: result = evaluate_threshold(predictions, ground_truths, uncertainties, threshold) # result["label"] = label # result["threshold"] = threshold results.append(result) else: with Pool(processes=4 ) as pool: # multiprocessing.cpu_count() kills memory results = pool.map( partial(evaluate_threshold, predictions=predictions, ground_truths=ground_truths, uncertainties=uncertainties), thresholds) results = [{ "label": label, "threshold": thresholds[i], "dice_score": results[i][0], "uncertainty_sum": results[i][1] } for i in range(len(results))] # TODO: Old for key in results[0].keys(): plt.plot(thresholds, [result[key] for result in results], label=key) plt.legend(loc="upper left") plt.xlim(0, end) plt.ylim(0, 2) plt.savefig(data_dir + os.path.basename(uncertainty_dir[:-1]) + "_end" + str(end) + "_step" + str(step) + '.png') for result in results: print(result) with open( data_dir + os.path.basename(uncertainty_dir[:-1]) + "_end" + str(end) + "_step" + str(step) + ".pkl", 'wb') as handle: pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL) print("Elapsed time (evaluate): ", time.time() - start_time)
from medseg import utils import os path = "/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task072_allGuided_ggo/guiding_masks/" index = 110 filenames = utils.load_filenames(path) for filename in filenames: os.rename(filename, filename[:-7] + "_tmp.nii.gz") filenames = utils.load_filenames(path) for filename in filenames: os.rename(filename, path + str(index).zfill(4) + "_0001.nii.gz") # _0000 index += 1
def round_masks(load_path, save_path): filenames = utils.load_filenames(load_path) for filename in tqdm(filenames): round_mask(filename, save_path)