Example #1
0
    def __init__(
        self,
        output_dir: str = "./",
        output_postfix: str = "trans",
        output_ext: str = ".nii.gz",
        resample: bool = True,
        mode: Union[GridSampleMode, InterpolateMode, str] = "nearest",
        padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
        scale: Optional[int] = None,
        dtype: DtypeLike = np.float64,
        output_dtype: DtypeLike = np.float32,
        save_batch: bool = False,
        squeeze_end_dims: bool = True,
        data_root_dir: str = "",
        print_log: bool = True,
    ) -> None:
        self.saver: Union[NiftiSaver, PNGSaver]
        if output_ext in (".nii.gz", ".nii"):
            self.saver = NiftiSaver(
                output_dir=output_dir,
                output_postfix=output_postfix,
                output_ext=output_ext,
                resample=resample,
                mode=GridSampleMode(mode),
                padding_mode=padding_mode,
                dtype=dtype,
                output_dtype=output_dtype,
                squeeze_end_dims=squeeze_end_dims,
                data_root_dir=data_root_dir,
                print_log=print_log,
            )
        elif output_ext == ".png":
            self.saver = PNGSaver(
                output_dir=output_dir,
                output_postfix=output_postfix,
                output_ext=output_ext,
                resample=resample,
                mode=InterpolateMode(mode),
                scale=scale,
                data_root_dir=data_root_dir,
                print_log=print_log,
            )
        else:
            raise ValueError(f"unsupported output extension: {output_ext}.")

        self.save_batch = save_batch
#plt.show()
#fig2.savefig('Pancreas_Plot.png')
"""## Check best model output with the input image and label"""
"""## Makes the Inferences """

out_dir = "//home//imoreira//Data//Kidneys_Best_Model"
#out_dir = "C:\\Users\\isasi\\Downloads\\Pancreas_Best_Model"
model.load_state_dict(
    torch.load(os.path.join(out_dir, "best_metric_model.pth")))
model.eval()

with torch.no_grad():
    #saver = NiftiSaver(output_dir='C:\\Users\\isasi\\Downloads\\Pancreas_Segs_Out')
    saver = NiftiSaver(output_dir='//home//imoreira//Kidneys_Segs_Out',
                       output_postfix="seg_kidneys",
                       output_ext=".nii.gz",
                       mode="nearest",
                       padding_mode="zeros")
    for i, train_inf_data in enumerate(train_inf_loader):
        #for test_data in test_loader:
        train_inf_images = train_inf_data["image"].to(device)
        roi_size = (96, 96, 96)
        sw_batch_size = 4

        val_outputs = sliding_window_inference(train_inf_images,
                                               roi_size,
                                               sw_batch_size,
                                               model,
                                               overlap=0.8)

        # val_outputs = torch.squeeze(val_outputs, dim=1)
Example #3
0
plt.show()
fig2.savefig('Lungs_Plot.png')
"""## Check best model output with the input image and label"""
"""## Makes the Inferences """

out_dir = "//home//imoreira//Data//Output"
#out_dir = "C:\\Users\\isasi\\Downloads\\Output"
model.load_state_dict(
    torch.load(os.path.join(out_dir, "best_metric_model.pth")))
model.eval()

with torch.no_grad():
    #saver = NiftiSaver(output_dir='C:\\Users\\isasi\\Downloads\\Segmentations')
    saver = NiftiSaver(output_dir='//home//imoreira//Segmentations',
                       output_postfix="seg_lungs",
                       output_ext=".nii.gz",
                       mode="nearest",
                       padding_mode="zeros")

    for i, test_data in enumerate(test_loader):
        test_images = test_data["image"].to(device)
        roi_size = (160, 160, 160)
        sw_batch_size = 4
        val_outputs_1 = sliding_window_inference(test_images, roi_size,
                                                 sw_batch_size, model)

        val_outputs_2 = sliding_window_inference(test_images, roi_size,
                                                 sw_batch_size, model)

        val_outputs_1 = val_outputs_1.argmax(dim=1, keepdim=True)
        val_outputs_2 = val_outputs_2.argmax(dim=1, keepdim=True)
# plt.show()
# fig2.savefig('Training_Plot.png')
"""## Check best model output with the input image and label"""
"""## Makes the Inferences """
###
out_dir = "//home//imoreira//Data//MO_Best_Model"
#out_dir = "C:\\Users\\isasi\\Downloads\\Bladder_Best_Model"
model.load_state_dict(
    torch.load(os.path.join(out_dir, "best_metric_model.pth")))
model.eval()
with torch.no_grad():
    #saver = NiftiSaver(output_dir='C:\\Users\\isasi\\Downloads\\Bladder_Segs_Out')
    saver = NiftiSaver(
        output_dir='//home//imoreira//Segs_Out//',
        #output_dir='C:\\Users\\isasi\\Downloads\\Segs_Out',
        output_postfix="seg",
        output_ext=".nii.gz",
        mode="nearest",
        padding_mode="zeros")
    for i, test_data in enumerate(test_loader):
        #for test_data in test_loader:
        test_images = test_data["image"].to(device)
        roi_size = (96, 96, 96)
        sw_batch_size = 4

        val_outputs = sliding_window_inference(test_images,
                                               roi_size,
                                               sw_batch_size,
                                               model,
                                               overlap=0.8)
        val_outputs = val_outputs.argmax(dim=1, keepdim=True)
Example #5
0
    strides=(2, 2, 2, 2),
    num_res_units=2,
    norm=Norm.BATCH,
).to(device)
loss_function = DiceLoss(to_onehot_y=True, softmax=False, sigmoid=True)
optimizer = torch.optim.Adam(model.parameters(), 1e-4)
"""## Makes the Inferences """

model.load_state_dict(
    torch.load(os.path.join(out_dir, "best_metric_model.pth")))
model.eval()
with torch.no_grad():
    #saver = NiftiSaver(output_dir='C:\\Users\\isasi\\Downloads\\Bladder_Segs_Out')
    saver = NiftiSaver(output_dir='//home//imoreira//Segs_Adam',
                       output_postfix="seg",
                       output_ext=".nii.gz",
                       mode="nearest",
                       padding_mode="zeros")
    for test_data in test_loader:
        test_images = test_data["image"].to(device)
        roi_size = (160, 160, 160)
        sw_batch_size = 4
        val_outputs = sliding_window_inference(
            test_images,
            roi_size,
            sw_batch_size,
            model  #overlap=0.8
        )
        val_outputs = val_outputs.argmax(dim=1, keepdim=True)
        val_outputs = val_outputs.cpu().clone().numpy()
        val_outputs = val_outputs.astype(np.int)