val_outputs_2 = sliding_window_inference(test_images, roi_size, sw_batch_size, model) val_outputs_1 = val_outputs_1.argmax(dim=1, keepdim=True) val_outputs_2 = val_outputs_2.argmax(dim=1, keepdim=True) #a = val_outputs_1.cpu().detach().numpy() #b = val_outputs_2.cpu().detach().numpy() first_lung = largest(val_outputs_1) second_lung = largest(val_outputs_2 - first_lung) #second_largest = largest(second_lung) #val_outputs = both_lungs #else: #both_lungs = largest(val_outputs) g = ndimage.sum(first_lung) * 0.10 if ndimage.sum(second_lung) >= g: both_lungs = first_lung + second_lung both_lungs = both_lungs.cpu().clone().numpy() both_lungs = both_lungs.astype(np.bool) else: both_lungs = largest(val_outputs_1) both_lungs = both_lungs.cpu().clone().numpy() both_lungs = both_lungs.astype(np.bool) saver.save_batch(both_lungs, test_data["image_meta_dict"]) print("FINISH!!")
with torch.no_grad(): #saver = NiftiSaver(output_dir='C:\\Users\\isasi\\Downloads\\Pancreas_Segs_Out') saver = NiftiSaver(output_dir='//home//imoreira//Kidneys_Segs_Out', output_postfix="seg_kidneys", output_ext=".nii.gz", mode="nearest", padding_mode="zeros") for i, train_inf_data in enumerate(train_inf_loader): #for test_data in test_loader: train_inf_images = train_inf_data["image"].to(device) roi_size = (96, 96, 96) sw_batch_size = 4 val_outputs = sliding_window_inference(train_inf_images, roi_size, sw_batch_size, model, overlap=0.8) # val_outputs = torch.squeeze(val_outputs, dim=1) val_outputs = val_outputs.argmax(dim=1, keepdim=True) #val_outputs = largest(val_outputs) val_outputs = val_outputs.cpu().clone().numpy() val_outputs = val_outputs.astype(np.bool) saver.save_batch(val_outputs, train_inf_data["image_meta_dict"])
#saver = NiftiSaver(output_dir='C:\\Users\\isasi\\Downloads\\Bladder_Segs_Out') saver = NiftiSaver( output_dir='//home//imoreira//Segs_Out//', #output_dir='C:\\Users\\isasi\\Downloads\\Segs_Out', output_postfix="seg", output_ext=".nii.gz", mode="nearest", padding_mode="zeros") for i, test_data in enumerate(test_loader): #for test_data in test_loader: test_images = test_data["image"].to(device) roi_size = (96, 96, 96) sw_batch_size = 4 val_outputs = sliding_window_inference(test_images, roi_size, sw_batch_size, model, overlap=0.8) val_outputs = val_outputs.argmax(dim=1, keepdim=True) #val_outputs = val_outputs.squeeze(dim=0).cpu().clone().numpy() #val_outputs = largest(val_outputs) val_outputs = val_outputs.cpu().clone().numpy() val_outputs = val_outputs.astype(np.int) #val_outputs = torch.argmax(val_outputs, dim=1) #val_outputs = val_outputs.squeeze(dim=0).cpu().data.numpy() saver.save_batch(val_outputs, test_data["image_meta_dict"])