def test_cval(self): device = "cuda" if torch.cuda.is_available() else "cpu:0" inputs = torch.ones((1, 1, 3, 3)).to(device=device) roi_shape = (5, 5) sw_batch_size = 10 def compute(data): return data + data.sum() result = sliding_window_inference( inputs, roi_shape, sw_batch_size, compute, overlap=0.5, padding_mode="constant", cval=-1, mode="constant", sigma_scale=1.0, ) expected = np.ones((1, 1, 3, 3)) * -6.0 np.testing.assert_allclose(result.cpu().numpy(), expected, rtol=1e-4) result = SlidingWindowInferer(roi_shape, sw_batch_size, overlap=0.5, mode="constant", cval=-1)(inputs, compute) np.testing.assert_allclose(result.cpu().numpy(), expected, rtol=1e-4)
def test_sliding_window_default(self, image_shape, roi_shape, sw_batch_size, overlap, mode, device): n_total = np.prod(image_shape) if mode == "constant": inputs = torch.arange(n_total, dtype=torch.float).reshape(*image_shape) else: inputs = torch.ones(*image_shape, dtype=torch.float) if device.type == "cuda" and not torch.cuda.is_available(): device = torch.device("cpu:0") def compute(data): return data + 1 if mode == "constant": expected_val = np.arange( n_total, dtype=np.float32).reshape(*image_shape) + 1.0 else: expected_val = np.ones(image_shape, dtype=np.float32) + 1.0 result = sliding_window_inference(inputs.to(device), roi_shape, sw_batch_size, compute, overlap, mode=mode) np.testing.assert_string_equal(device.type, result.device.type) np.testing.assert_allclose(result.cpu().numpy(), expected_val) result = SlidingWindowInferer(roi_shape, sw_batch_size, overlap, mode)(inputs.to(device), compute) np.testing.assert_string_equal(device.type, result.device.type) np.testing.assert_allclose(result.cpu().numpy(), expected_val)
def test_args_kwargs(self): device = "cuda" if torch.cuda.is_available() else "cpu:0" inputs = torch.ones((1, 1, 3, 3)).to(device=device) t1 = torch.ones(1).to(device=device) t2 = torch.ones(1).to(device=device) roi_shape = (5, 5) sw_batch_size = 10 def compute(data, test1, test2): return data + test1 + test2 result = sliding_window_inference( inputs, roi_shape, sw_batch_size, compute, 0.5, "constant", 1.0, "constant", 0.0, device, device, t1, test2=t2, ) expected = np.ones((1, 1, 3, 3)) + 2.0 np.testing.assert_allclose(result.cpu().numpy(), expected, rtol=1e-4) result = SlidingWindowInferer(roi_shape, sw_batch_size, overlap=0.5, mode="constant", cval=-1)(inputs, compute, t1, test2=t2) np.testing.assert_allclose(result.cpu().numpy(), expected, rtol=1e-4)
def test_naive_predictor(self, input_param, input_shape): net = NaiveNetwork(**input_param) net2 = NaiveNetwork2(**input_param) inferer = SlidingWindowInferer(roi_size=16, overlap=0.25, cache_roi_weight_map=True) network_output_keys = ["cls", "box_reg"] input_data = torch.randn(input_shape) result = predict_with_inferer(input_data, net, network_output_keys, inferer=inferer) self.assertTrue(len(result["cls"]) == 1) result = net(input_data) self.assertTrue(len(result["cls"]) == input_data.shape[0]) ensure_dict_value_to_list_(result) self.assertTrue(len(result["cls"]) == 1) result = predict_with_inferer(input_data, net2, network_output_keys, inferer=inferer) self.assertTrue(len(result["cls"]) == 2) result = net2(input_data) self.assertTrue(len(result["cls"]) == 2) ensure_dict_value_to_list_(result) self.assertTrue(len(result["cls"]) == 2)
def inference(args): # load hyper parameters task_id = args.task_id checkpoint = args.checkpoint val_output_dir = "./runs_{}_fold{}_{}/".format(args.task_id, args.fold, args.expr_name) sw_batch_size = args.sw_batch_size infer_output_dir = os.path.join(val_output_dir, task_name[task_id]) window_mode = args.window_mode eval_overlap = args.eval_overlap amp = args.amp tta_val = args.tta_val multi_gpu_flag = args.multi_gpu local_rank = args.local_rank if not os.path.exists(infer_output_dir): os.makedirs(infer_output_dir) if multi_gpu_flag: dist.init_process_group(backend="nccl", init_method="env://") device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) else: device = torch.device("cuda") properties, test_loader = get_data(args, mode="test") net = get_network(properties, task_id, val_output_dir, checkpoint) net = net.to(device) if multi_gpu_flag: net = DistributedDataParallel(module=net, device_ids=[device], find_unused_parameters=True) net.eval() inferrer = DynUNetInferrer( device=device, val_data_loader=test_loader, network=net, output_dir=infer_output_dir, n_classes=len(properties["labels"]), inferer=SlidingWindowInferer( roi_size=patch_size[task_id], sw_batch_size=sw_batch_size, overlap=eval_overlap, mode=window_mode, ), amp=amp, tta_val=tta_val, ) inferrer.run()
def ensemble_evaluate(post_transforms, models): evaluator = EnsembleEvaluator( device=device, val_data_loader=test_loader, pred_keys=opt.pred_keys, networks=models, inferer=SlidingWindowInferer(roi_size=opt.patch_size, sw_batch_size=opt.batch_size, overlap=0.5), post_transform=post_transforms, key_val_metric={ "test_mean_dice": MeanDice( include_background=True, output_transform=lambda x: (x["pred"], x["label"]), ) }, ) evaluator.run()
def inferer(self, data=None) -> Inferer: input_shape = data[self.input_key].shape if data else None roi_size = data.get("roi_size", self.roi_size) if data else self.roi_size sw_batch_size = data.get("sw_batch_size", 1) if data else 1 sw_overlap = data.get("sw_overlap", 0.25) if data else 0.25 device = data.get("device") sliding = False if input_shape and roi_size: for i in range(len(roi_size)): if input_shape[-i] > roi_size[-i]: sliding = True if sliding: return SlidingWindowInferer( roi_size=roi_size, overlap=sw_overlap, sw_batch_size=sw_batch_size, sw_device=device, device=device, ) return SimpleInferer()
def train(args): # load hyper parameters task_id = args.task_id fold = args.fold val_output_dir = "./runs_{}_fold{}_{}/".format(task_id, fold, args.expr_name) log_filename = "nnunet_task{}_fold{}.log".format(task_id, fold) log_filename = os.path.join(val_output_dir, log_filename) interval = args.interval learning_rate = args.learning_rate max_epochs = args.max_epochs multi_gpu_flag = args.multi_gpu amp_flag = args.amp lr_decay_flag = args.lr_decay sw_batch_size = args.sw_batch_size tta_val = args.tta_val batch_dice = args.batch_dice window_mode = args.window_mode eval_overlap = args.eval_overlap local_rank = args.local_rank determinism_flag = args.determinism_flag determinism_seed = args.determinism_seed if determinism_flag: set_determinism(seed=determinism_seed) if local_rank == 0: print("Using deterministic training.") # transforms train_batch_size = data_loader_params[task_id]["batch_size"] if multi_gpu_flag: dist.init_process_group(backend="nccl", init_method="env://") device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) else: device = torch.device("cuda") properties, val_loader = get_data(args, mode="validation") _, train_loader = get_data(args, batch_size=train_batch_size, mode="train") # produce the network checkpoint = args.checkpoint net = get_network(properties, task_id, val_output_dir, checkpoint) net = net.to(device) if multi_gpu_flag: net = DistributedDataParallel(module=net, device_ids=[device], find_unused_parameters=True) optimizer = torch.optim.SGD( net.parameters(), lr=learning_rate, momentum=0.99, weight_decay=3e-5, nesterov=True, ) scheduler = torch.optim.lr_scheduler.LambdaLR( optimizer, lr_lambda=lambda epoch: (1 - epoch / max_epochs)**0.9) # produce evaluator val_handlers = [ StatsHandler(output_transform=lambda x: None), CheckpointSaver(save_dir=val_output_dir, save_dict={"net": net}, save_key_metric=True), ] evaluator = DynUNetEvaluator( device=device, val_data_loader=val_loader, network=net, n_classes=len(properties["labels"]), inferer=SlidingWindowInferer( roi_size=patch_size[task_id], sw_batch_size=sw_batch_size, overlap=eval_overlap, mode=window_mode, ), post_transform=None, key_val_metric={ "val_mean_dice": MeanDice( include_background=False, output_transform=lambda x: (x["pred"], x["label"]), ) }, val_handlers=val_handlers, amp=amp_flag, tta_val=tta_val, ) # produce trainer loss = DiceCELoss(to_onehot_y=True, softmax=True, batch=batch_dice) train_handlers = [] if lr_decay_flag: train_handlers += [ LrScheduleHandler(lr_scheduler=scheduler, print_lr=True) ] train_handlers += [ ValidationHandler(validator=evaluator, interval=interval, epoch_level=True), StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]), ] trainer = DynUNetTrainer( device=device, max_epochs=max_epochs, train_data_loader=train_loader, network=net, optimizer=optimizer, loss_function=loss, inferer=SimpleInferer(), post_transform=None, key_train_metric=None, train_handlers=train_handlers, amp=amp_flag, ) # run logger = logging.getLogger() formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s") # Setup file handler fhandler = logging.FileHandler(log_filename) fhandler.setLevel(logging.INFO) fhandler.setFormatter(formatter) # Configure stream handler for the cells chandler = logging.StreamHandler() chandler.setLevel(logging.INFO) chandler.setFormatter(formatter) # Add both handlers if local_rank == 0: logger.addHandler(fhandler) logger.addHandler(chandler) logger.setLevel(logging.INFO) trainer.run()
def validation(args): # load hyper parameters task_id = args.task_id sw_batch_size = args.sw_batch_size tta_val = args.tta_val window_mode = args.window_mode eval_overlap = args.eval_overlap multi_gpu_flag = args.multi_gpu local_rank = args.local_rank amp = args.amp # produce the network checkpoint = args.checkpoint val_output_dir = "./runs_{}_fold{}_{}/".format(task_id, args.fold, args.expr_name) if multi_gpu_flag: dist.init_process_group(backend="nccl", init_method="env://") device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) else: device = torch.device("cuda") properties, val_loader = get_data(args, mode="validation") net = get_network(properties, task_id, val_output_dir, checkpoint) net = net.to(device) if multi_gpu_flag: net = DistributedDataParallel(module=net, device_ids=[device], find_unused_parameters=True) n_classes = len(properties["labels"]) net.eval() evaluator = DynUNetEvaluator( device=device, val_data_loader=val_loader, network=net, n_classes=n_classes, inferer=SlidingWindowInferer( roi_size=patch_size[task_id], sw_batch_size=sw_batch_size, overlap=eval_overlap, mode=window_mode, ), post_transform=None, key_val_metric={ "val_mean_dice": MeanDice( include_background=False, output_transform=lambda x: (x["pred"], x["label"]), ) }, additional_metrics=None, amp=amp, tta_val=tta_val, ) evaluator.run() if local_rank == 0: print(evaluator.state.metrics) results = evaluator.state.metric_details["val_mean_dice"] if n_classes > 2: for i in range(n_classes - 1): print("mean dice for label {} is {}".format( i + 1, results[:, i].mean()))
def run_training_test(root_dir, device="cuda:0", amp=False): images = sorted(glob(os.path.join(root_dir, "img*.nii.gz"))) segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz"))) train_files = [{ "image": img, "label": seg } for img, seg in zip(images[:20], segs[:20])] val_files = [{ "image": img, "label": seg } for img, seg in zip(images[-20:], segs[-20:])] # define transforms for image and segmentation train_transforms = Compose([ LoadNiftid(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), RandCropByPosNegLabeld(keys=["image", "label"], label_key="label", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4), RandRotate90d(keys=["image", "label"], prob=0.5, spatial_axes=[0, 2]), ToTensord(keys=["image", "label"]), ]) val_transforms = Compose([ LoadNiftid(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), ToTensord(keys=["image", "label"]), ]) # create a training data loader train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4) # create a validation data loader val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0) val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4) # create UNet, DiceLoss and Adam optimizer net = monai.networks.nets.UNet( dimensions=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) loss = monai.losses.DiceLoss(sigmoid=True) opt = torch.optim.Adam(net.parameters(), 1e-3) lr_scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=2, gamma=0.1) val_post_transforms = Compose([ Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold_values=True), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ]) val_handlers = [ StatsHandler(output_transform=lambda x: None), TensorBoardStatsHandler(log_dir=root_dir, output_transform=lambda x: None), TensorBoardImageHandler(log_dir=root_dir, batch_transform=lambda x: (x["image"], x["label"]), output_transform=lambda x: x["pred"]), CheckpointSaver(save_dir=root_dir, save_dict={"net": net}, save_key_metric=True), ] evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, network=net, inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), post_transform=val_post_transforms, key_val_metric={ "val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"])) }, additional_metrics={ "val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"])) }, val_handlers=val_handlers, amp=True if amp else False, ) train_post_transforms = Compose([ Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold_values=True), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ]) train_handlers = [ LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True), ValidationHandler(validator=evaluator, interval=2, epoch_level=True), StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]), TensorBoardStatsHandler(log_dir=root_dir, tag_name="train_loss", output_transform=lambda x: x["loss"]), CheckpointSaver(save_dir=root_dir, save_dict={ "net": net, "opt": opt }, save_interval=2, epoch_level=True), ] trainer = SupervisedTrainer( device=device, max_epochs=5, train_data_loader=train_loader, network=net, optimizer=opt, loss_function=loss, inferer=SimpleInferer(), post_transform=train_post_transforms, key_train_metric={ "train_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"])) }, train_handlers=train_handlers, amp=True if amp else False, ) trainer.run() return evaluator.state.best_metric
def run_inference_test(root_dir, model_file, device="cuda:0", amp=False): images = sorted(glob(os.path.join(root_dir, "im*.nii.gz"))) segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz"))) val_files = [{ "image": img, "label": seg } for img, seg in zip(images, segs)] # define transforms for image and segmentation val_transforms = Compose([ LoadNiftid(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), ToTensord(keys=["image", "label"]), ]) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4) # create UNet, DiceLoss and Adam optimizer net = monai.networks.nets.UNet( dimensions=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) val_post_transforms = Compose([ Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold_values=True), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ]) val_handlers = [ StatsHandler(output_transform=lambda x: None), CheckpointLoader(load_path=f"{model_file}", load_dict={"net": net}), SegmentationSaver( output_dir=root_dir, batch_transform=lambda batch: batch["image_meta_dict"], output_transform=lambda output: output["pred"], ), ] evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, network=net, inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), post_transform=val_post_transforms, key_val_metric={ "val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"])) }, additional_metrics={ "val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"])) }, val_handlers=val_handlers, amp=True if amp else False, ) evaluator.run() return evaluator.state.best_metric
def run_training_test(root_dir, device="cuda:0", amp=False, num_workers=4): images = sorted(glob(os.path.join(root_dir, "img*.nii.gz"))) segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz"))) train_files = [{"image": img, "label": seg} for img, seg in zip(images[:20], segs[:20])] val_files = [{"image": img, "label": seg} for img, seg in zip(images[-20:], segs[-20:])] # define transforms for image and segmentation train_transforms = Compose( [ LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), RandCropByPosNegLabeld( keys=["image", "label"], label_key="label", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4 ), RandRotate90d(keys=["image", "label"], prob=0.5, spatial_axes=[0, 2]), ToTensord(keys=["image", "label"]), ] ) val_transforms = Compose( [ LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), ToTensord(keys=["image", "label"]), ] ) # create a training data loader train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=num_workers) # create a validation data loader val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0) val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=num_workers) # create UNet, DiceLoss and Adam optimizer net = monai.networks.nets.UNet( spatial_dims=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) loss = monai.losses.DiceLoss(sigmoid=True) opt = torch.optim.Adam(net.parameters(), 1e-3) lr_scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=2, gamma=0.1) summary_writer = SummaryWriter(log_dir=root_dir) val_postprocessing = Compose( [ ToTensord(keys=["pred", "label"]), Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold=0.5), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ] ) class _TestEvalIterEvents: def attach(self, engine): engine.add_event_handler(IterationEvents.FORWARD_COMPLETED, self._forward_completed) def _forward_completed(self, engine): pass val_handlers = [ StatsHandler(iteration_log=False), TensorBoardStatsHandler(summary_writer=summary_writer, iteration_log=False), TensorBoardImageHandler( log_dir=root_dir, batch_transform=from_engine(["image", "label"]), output_transform=from_engine("pred") ), CheckpointSaver(save_dir=root_dir, save_dict={"net": net}, save_key_metric=True), _TestEvalIterEvents(), ] evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, network=net, inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), postprocessing=val_postprocessing, key_val_metric={ "val_mean_dice": MeanDice(include_background=True, output_transform=from_engine(["pred", "label"])) }, additional_metrics={"val_acc": Accuracy(output_transform=from_engine(["pred", "label"]))}, metric_cmp_fn=lambda cur, prev: cur >= prev, # if greater or equal, treat as new best metric val_handlers=val_handlers, amp=bool(amp), to_kwargs={"memory_format": torch.preserve_format}, amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32}, ) train_postprocessing = Compose( [ ToTensord(keys=["pred", "label"]), Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold=0.5), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ] ) class _TestTrainIterEvents: def attach(self, engine): engine.add_event_handler(IterationEvents.FORWARD_COMPLETED, self._forward_completed) engine.add_event_handler(IterationEvents.LOSS_COMPLETED, self._loss_completed) engine.add_event_handler(IterationEvents.BACKWARD_COMPLETED, self._backward_completed) engine.add_event_handler(IterationEvents.MODEL_COMPLETED, self._model_completed) def _forward_completed(self, engine): pass def _loss_completed(self, engine): pass def _backward_completed(self, engine): pass def _model_completed(self, engine): pass train_handlers = [ LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True), ValidationHandler(validator=evaluator, interval=2, epoch_level=True), StatsHandler(tag_name="train_loss", output_transform=from_engine("loss", first=True)), TensorBoardStatsHandler( summary_writer=summary_writer, tag_name="train_loss", output_transform=from_engine("loss", first=True) ), CheckpointSaver(save_dir=root_dir, save_dict={"net": net, "opt": opt}, save_interval=2, epoch_level=True), _TestTrainIterEvents(), ] trainer = SupervisedTrainer( device=device, max_epochs=5, train_data_loader=train_loader, network=net, optimizer=opt, loss_function=loss, inferer=SimpleInferer(), postprocessing=train_postprocessing, key_train_metric={"train_acc": Accuracy(output_transform=from_engine(["pred", "label"]))}, train_handlers=train_handlers, amp=bool(amp), optim_set_to_none=True, to_kwargs={"memory_format": torch.preserve_format}, amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32}, ) trainer.run() return evaluator.state.best_metric
def val_inferer(self, context: Context): return SlidingWindowInferer(roi_size=self.spatial_size, sw_batch_size=8)
def test_sigma(self): device = "cuda" if torch.cuda.is_available() else "cpu:0" inputs = torch.ones((1, 1, 7, 7)).to(device=device) roi_shape = (3, 3) sw_batch_size = 10 class _Pred: add = 1 def compute(self, data): self.add += 1 return data + self.add result = sliding_window_inference( inputs, roi_shape, sw_batch_size, _Pred().compute, overlap=0.5, padding_mode="constant", cval=-1, mode="constant", sigma_scale=1.0, ) expected = np.array([[[ [3.0000, 3.0000, 3.0000, 3.0000, 3.0000, 3.0000, 3.0000], [3.0000, 3.0000, 3.0000, 3.0000, 3.0000, 3.0000, 3.0000], [3.3333, 3.3333, 3.3333, 3.3333, 3.3333, 3.3333, 3.3333], [3.6667, 3.6667, 3.6667, 3.6667, 3.6667, 3.6667, 3.6667], [4.3333, 4.3333, 4.3333, 4.3333, 4.3333, 4.3333, 4.3333], [4.5000, 4.5000, 4.5000, 4.5000, 4.5000, 4.5000, 4.5000], [5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000, 5.0000], ]]]) np.testing.assert_allclose(result.cpu().numpy(), expected, rtol=1e-4) result = sliding_window_inference( inputs, roi_shape, sw_batch_size, _Pred().compute, overlap=0.5, padding_mode="constant", cval=-1, mode="gaussian", sigma_scale=1.0, ) expected = np.array([[[ [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], [ 3.3271625, 3.3271623, 3.3271623, 3.3271623, 3.3271623, 3.3271623, 3.3271625 ], [ 3.6728377, 3.6728377, 3.6728377, 3.6728377, 3.6728377, 3.6728377, 3.6728377 ], [ 4.3271623, 4.3271623, 4.3271627, 4.3271627, 4.3271627, 4.3271623, 4.3271623 ], [ 4.513757, 4.513757, 4.513757, 4.513757, 4.513757, 4.513757, 4.513757 ], [4.9999995, 5.0, 5.0, 5.0, 5.0, 5.0, 4.9999995], ]]]) np.testing.assert_allclose(result.cpu().numpy(), expected, rtol=1e-4) result = SlidingWindowInferer(roi_shape, sw_batch_size, overlap=0.5, mode="gaussian", sigma_scale=1.0)(inputs, _Pred().compute) np.testing.assert_allclose(result.cpu().numpy(), expected, rtol=1e-4) result = SlidingWindowInferer(roi_shape, sw_batch_size, overlap=0.5, mode="gaussian", sigma_scale=[1.0, 1.0])(inputs, _Pred().compute) np.testing.assert_allclose(result.cpu().numpy(), expected, rtol=1e-4)
def evaluate(args): if args.local_rank == 0 and not os.path.exists(args.dir): # create 16 random image, mask paris for evaluation print(f"generating synthetic data to {args.dir} (this may take a while)") os.makedirs(args.dir) # set random seed to generate same random data for every node np.random.seed(seed=0) for i in range(16): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(args.dir, f"img{i:d}.nii.gz")) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(args.dir, f"seg{i:d}.nii.gz")) # initialize the distributed evaluation process, every GPU runs in a process dist.init_process_group(backend="nccl", init_method="env://") images = sorted(glob(os.path.join(args.dir, "img*.nii.gz"))) segs = sorted(glob(os.path.join(args.dir, "seg*.nii.gz"))) val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)] # define transforms for image and segmentation val_transforms = Compose( [ LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys="image"), ToTensord(keys=["image", "label"]), ] ) # create a evaluation data loader val_ds = Dataset(data=val_files, transform=val_transforms) # create a evaluation data sampler val_sampler = DistributedSampler(val_ds, shuffle=False) # sliding window inference need to input 1 image in every iteration val_loader = DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=2, pin_memory=True, sampler=val_sampler) # create UNet, DiceLoss and Adam optimizer device = torch.device(f"cuda:{args.local_rank}") torch.cuda.set_device(device) net = monai.networks.nets.UNet( dimensions=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) # wrap the model with DistributedDataParallel module net = DistributedDataParallel(net, device_ids=[device]) val_post_transforms = Compose( [ Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold_values=True), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ] ) val_handlers = [ CheckpointLoader( load_path="./runs/checkpoint_epoch=4.pt", load_dict={"net": net}, # config mapping to expected GPU device map_location={"cuda:0": f"cuda:{args.local_rank}"}, ), ] if dist.get_rank() == 0: logging.basicConfig(stream=sys.stdout, level=logging.INFO) val_handlers.extend( [ StatsHandler(output_transform=lambda x: None), SegmentationSaver( output_dir="./runs/", batch_transform=lambda batch: batch["image_meta_dict"], output_transform=lambda output: output["pred"], ), ] ) evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, network=net, inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), post_transform=val_post_transforms, key_val_metric={ "val_mean_dice": MeanDice( include_background=True, output_transform=lambda x: (x["pred"], x["label"]), device=device, ) }, additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]), device=device)}, val_handlers=val_handlers, # if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation amp=True if monai.config.get_torch_version_tuple() >= (1, 6) else False, ) evaluator.run() dist.destroy_process_group()
def run_inference_test(root_dir, model_file, device="cuda:0", amp=False, num_workers=4): images = sorted(glob(os.path.join(root_dir, "im*.nii.gz"))) segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz"))) val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)] # define transforms for image and segmentation val_transforms = Compose( [ LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), ToTensord(keys=["image", "label"]), ] ) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=num_workers) # create UNet, DiceLoss and Adam optimizer net = monai.networks.nets.UNet( spatial_dims=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) val_postprocessing = Compose( [ ToTensord(keys=["pred", "label"]), Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold=0.5), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), # test the case that `pred` in `engine.state.output`, while `image_meta_dict` in `engine.state.batch` SaveImaged( keys="pred", meta_keys=PostFix.meta("image"), output_dir=root_dir, output_postfix="seg_transform" ), ] ) val_handlers = [ StatsHandler(iteration_log=False), CheckpointLoader(load_path=f"{model_file}", load_dict={"net": net}), SegmentationSaver( output_dir=root_dir, output_postfix="seg_handler", batch_transform=from_engine(PostFix.meta("image")), output_transform=from_engine("pred"), ), ] evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, network=net, inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), postprocessing=val_postprocessing, key_val_metric={ "val_mean_dice": MeanDice(include_background=True, output_transform=from_engine(["pred", "label"])) }, additional_metrics={"val_acc": Accuracy(output_transform=from_engine(["pred", "label"]))}, val_handlers=val_handlers, amp=bool(amp), ) evaluator.run() return evaluator.state.best_metric
def main(tempdir): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) ################################ DATASET ################################ # create a temporary directory and 40 random image, mask pairs print(f"generating synthetic data to {tempdir} (this may take a while)") for i in range(40): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz")) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz")) images = sorted(glob(os.path.join(tempdir, "img*.nii.gz"))) segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) train_files = [{"image": img, "label": seg} for img, seg in zip(images[:20], segs[:20])] val_files = [{"image": img, "label": seg} for img, seg in zip(images[-20:], segs[-20:])] # define transforms for image and segmentation train_transforms = Compose( [ LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys="image"), RandCropByPosNegLabeld( keys=["image", "label"], label_key="label", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4 ), RandRotate90d(keys=["image", "label"], prob=0.5, spatial_axes=[0, 2]), ToTensord(keys=["image", "label"]), ] ) val_transforms = Compose( [ LoadImaged(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys="image"), ToTensord(keys=["image", "label"]), ] ) # create a training data loader train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4) # create a validation data loader val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0) val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4) ################################ DATASET ################################ ################################ NETWORK ################################ # create UNet, DiceLoss and Adam optimizer device = torch.device("cuda" if torch.cuda.is_available() else "cpu") net = monai.networks.nets.UNet( dimensions=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) ################################ NETWORK ################################ ################################ LOSS ################################ loss = monai.losses.DiceLoss(sigmoid=True) ################################ LOSS ################################ ################################ OPT ################################ opt = torch.optim.Adam(net.parameters(), 1e-3) ################################ OPT ################################ ################################ LR ################################ lr_scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=2, gamma=0.1) ################################ LR ################################ val_post_transforms = Compose( [ Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold_values=True), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ] ) val_handlers = [ StatsHandler(output_transform=lambda x: None), TensorBoardStatsHandler(log_dir="./runs/", output_transform=lambda x: None), TensorBoardImageHandler( log_dir="./runs/", batch_transform=lambda x: (x["image"], x["label"]), output_transform=lambda x: x["pred"], ), CheckpointSaver(save_dir="./runs/", save_dict={"net": net}, save_key_metric=True), ] evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, network=net, inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), post_transform=val_post_transforms, key_val_metric={ "val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"])) }, additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))}, val_handlers=val_handlers, # if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation amp=True if monai.utils.get_torch_version_tuple() >= (1, 6) else False, ) train_post_transforms = Compose( [ Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold_values=True), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ] ) train_handlers = [ LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True), ValidationHandler(validator=evaluator, interval=2, epoch_level=True), StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]), TensorBoardStatsHandler(log_dir="./runs/", tag_name="train_loss", output_transform=lambda x: x["loss"]), CheckpointSaver(save_dir="./runs/", save_dict={"net": net, "opt": opt}, save_interval=2, epoch_level=True), ] trainer = SupervisedTrainer( device=device, max_epochs=5, train_data_loader=train_loader, network=net, optimizer=opt, loss_function=loss, inferer=SimpleInferer(), post_transform=train_post_transforms, key_train_metric={"train_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))}, train_handlers=train_handlers, # if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP training amp=True if monai.utils.get_torch_version_tuple() >= (1, 6) else False, ) trainer.run()
def train(index): # ---------- Build the nn-Unet network ------------ if opt.resolution is None: sizes, spacings = opt.patch_size, opt.spacing else: sizes, spacings = opt.patch_size, opt.resolution strides, kernels = [], [] while True: spacing_ratio = [sp / min(spacings) for sp in spacings] stride = [ 2 if ratio <= 2 and size >= 8 else 1 for (ratio, size) in zip(spacing_ratio, sizes) ] kernel = [3 if ratio <= 2 else 1 for ratio in spacing_ratio] if all(s == 1 for s in stride): break sizes = [i / j for i, j in zip(sizes, stride)] spacings = [i * j for i, j in zip(spacings, stride)] kernels.append(kernel) strides.append(stride) strides.insert(0, len(spacings) * [1]) kernels.append(len(spacings) * [3]) net = monai.networks.nets.DynUNet( spatial_dims=3, in_channels=opt.in_channels, out_channels=opt.out_channels, kernel_size=kernels, strides=strides, upsample_kernel_size=strides[1:], res_block=True, # act=act_type, # norm=Norm.BATCH, ).to(device) from torch.autograd import Variable from torchsummaryX import summary data = Variable( torch.randn(int(opt.batch_size), int(opt.in_channels), int(opt.patch_size[0]), int(opt.patch_size[1]), int(opt.patch_size[2]))).cuda() out = net(data) summary(net, data) print("out size: {}".format(out.size())) # if opt.preload is not None: # net.load_state_dict(torch.load(opt.preload)) # ---------- ------------------------ ------------ optim = torch.optim.Adam(net.parameters(), lr=opt.lr) lr_scheduler = torch.optim.lr_scheduler.LambdaLR( optim, lr_lambda=lambda epoch: (1 - epoch / opt.epochs)**0.9) loss_function = monai.losses.DiceCELoss(sigmoid=True) val_post_transforms = Compose([ Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold_values=True), # KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]) ]) val_handlers = [ StatsHandler(output_transform=lambda x: None), CheckpointSaver(save_dir="./runs/", save_dict={"net": net}, save_key_metric=True), ] evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loaders[index], network=net, inferer=SlidingWindowInferer(roi_size=opt.patch_size, sw_batch_size=opt.batch_size, overlap=0.5), post_transform=val_post_transforms, key_val_metric={ "val_mean_dice": MeanDice( include_background=True, output_transform=lambda x: (x["pred"], x["label"]), ) }, val_handlers=val_handlers) train_post_transforms = Compose([ Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold_values=True), # KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ]) train_handlers = [ ValidationHandler(validator=evaluator, interval=5, epoch_level=True), LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True), StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]), CheckpointSaver(save_dir="./runs/", save_dict={ "net": net, "opt": optim }, save_final=True, epoch_level=True), ] trainer = SupervisedTrainer( device=device, max_epochs=opt.epochs, train_data_loader=train_loaders[index], network=net, optimizer=optim, loss_function=loss_function, inferer=SimpleInferer(), post_transform=train_post_transforms, amp=False, train_handlers=train_handlers, ) trainer.run() return net
def test_multioutput(self): device = "cuda" if torch.cuda.is_available() else "cpu:0" inputs = torch.ones((1, 6, 20, 20)).to(device=device) roi_shape = (8, 8) sw_batch_size = 10 def compute(data): return data + 1, data[:, ::3, ::2, ::2] + 2, data[:, ::2, ::4, :: 4] + 3 def compute_dict(data): return { 1: data + 1, 2: data[:, ::3, ::2, ::2] + 2, 3: data[:, ::2, ::4, ::4] + 3 } result = sliding_window_inference( inputs, roi_shape, sw_batch_size, compute, 0.5, "constant", 1.0, "constant", 0.0, device, device, has_tqdm, None, ) result_dict = sliding_window_inference( inputs, roi_shape, sw_batch_size, compute_dict, 0.5, "constant", 1.0, "constant", 0.0, device, device, has_tqdm, None, ) expected = (np.ones((1, 6, 20, 20)) + 1, np.ones( (1, 2, 10, 10)) + 2, np.ones((1, 3, 5, 5)) + 3) expected_dict = { 1: np.ones((1, 6, 20, 20)) + 1, 2: np.ones((1, 2, 10, 10)) + 2, 3: np.ones((1, 3, 5, 5)) + 3 } for rr, ee in zip(result, expected): np.testing.assert_allclose(rr.cpu().numpy(), ee, rtol=1e-4) for rr, _ in zip(result_dict, expected_dict): np.testing.assert_allclose(result_dict[rr].cpu().numpy(), expected_dict[rr], rtol=1e-4) result = SlidingWindowInferer(roi_shape, sw_batch_size, overlap=0.5, mode="constant", cval=-1, progress=has_tqdm)(inputs, compute) for rr, ee in zip(result, expected): np.testing.assert_allclose(rr.cpu().numpy(), ee, rtol=1e-4) result_dict = SlidingWindowInferer(roi_shape, sw_batch_size, overlap=0.5, mode="constant", cval=-1, progress=has_tqdm)(inputs, compute_dict) for rr, _ in zip(result_dict, expected_dict): np.testing.assert_allclose(result_dict[rr].cpu().numpy(), expected_dict[rr], rtol=1e-4)
def inferer(self, data=None) -> Inferer: return SlidingWindowInferer(roi_size=(160, 160, 160))
def val_inferer(self, context: Context): return SlidingWindowInferer(roi_size=(160, 160, 160), sw_batch_size=1, overlap=0.25)
def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # create a temporary directory and 40 random image, mask paris tempdir = tempfile.mkdtemp() print(f"generating synthetic data to {tempdir} (this may take a while)") for i in range(40): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz")) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz")) images = sorted(glob(os.path.join(tempdir, "img*.nii.gz"))) segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) train_files = [{ Keys.IMAGE: img, Keys.LABEL: seg } for img, seg in zip(images[:20], segs[:20])] val_files = [{ Keys.IMAGE: img, Keys.LABEL: seg } for img, seg in zip(images[-20:], segs[-20:])] # define transforms for image and segmentation train_transforms = Compose([ LoadNiftid(keys=[Keys.IMAGE, Keys.LABEL]), AsChannelFirstd(keys=[Keys.IMAGE, Keys.LABEL], channel_dim=-1), ScaleIntensityd(keys=[Keys.IMAGE, Keys.LABEL]), RandCropByPosNegLabeld(keys=[Keys.IMAGE, Keys.LABEL], label_key=Keys.LABEL, size=[96, 96, 96], pos=1, neg=1, num_samples=4), RandRotate90d(keys=[Keys.IMAGE, Keys.LABEL], prob=0.5, spatial_axes=[0, 2]), ToTensord(keys=[Keys.IMAGE, Keys.LABEL]), ]) val_transforms = Compose([ LoadNiftid(keys=[Keys.IMAGE, Keys.LABEL]), AsChannelFirstd(keys=[Keys.IMAGE, Keys.LABEL], channel_dim=-1), ScaleIntensityd(keys=[Keys.IMAGE, Keys.LABEL]), ToTensord(keys=[Keys.IMAGE, Keys.LABEL]), ]) # create a training data loader train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4, collate_fn=list_data_collate) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate) # create UNet, DiceLoss and Adam optimizer device = torch.device("cuda:0") net = monai.networks.nets.UNet( dimensions=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) loss = monai.losses.DiceLoss(do_sigmoid=True) opt = torch.optim.Adam(net.parameters(), 1e-3) val_handlers = [StatsHandler(output_transform=lambda x: None)] evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, network=net, inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), val_handlers=val_handlers, key_val_metric={ "val_mean_dice": MeanDice(include_background=True, add_sigmoid=True, output_transform=lambda x: (x[Keys.PRED], x[Keys.LABEL])) }, additional_metrics=None, ) train_handlers = [ ValidationHandler(validator=evaluator, interval=2, epoch_level=True), StatsHandler(tag_name="train_loss", output_transform=lambda x: x[Keys.INFO][Keys.LOSS]), ] trainer = SupervisedTrainer( device=device, max_epochs=5, train_data_loader=train_loader, network=net, optimizer=opt, loss_function=loss, inferer=SimpleInferer(), train_handlers=train_handlers, amp=False, key_train_metric=None, ) trainer.run() shutil.rmtree(tempdir)
def configure(self): self.set_device() network = UNet( dimensions=3, in_channels=1, out_channels=2, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, norm=Norm.BATCH, ).to(self.device) if self.multi_gpu: network = DistributedDataParallel( module=network, device_ids=[self.device], find_unused_parameters=False, ) train_transforms = Compose([ LoadImaged(keys=("image", "label")), EnsureChannelFirstd(keys=("image", "label")), Spacingd(keys=("image", "label"), pixdim=[1.0, 1.0, 1.0], mode=["bilinear", "nearest"]), ScaleIntensityRanged( keys="image", a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True, ), CropForegroundd(keys=("image", "label"), source_key="image"), RandCropByPosNegLabeld( keys=("image", "label"), label_key="label", spatial_size=(96, 96, 96), pos=1, neg=1, num_samples=4, image_key="image", image_threshold=0, ), RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5), ToTensord(keys=("image", "label")), ]) train_datalist = load_decathlon_datalist(self.data_list_file_path, True, "training") if self.multi_gpu: train_datalist = partition_dataset( data=train_datalist, shuffle=True, num_partitions=dist.get_world_size(), even_divisible=True, )[dist.get_rank()] train_ds = CacheDataset( data=train_datalist, transform=train_transforms, cache_num=32, cache_rate=1.0, num_workers=4, ) train_data_loader = DataLoader( train_ds, batch_size=2, shuffle=True, num_workers=4, ) val_transforms = Compose([ LoadImaged(keys=("image", "label")), EnsureChannelFirstd(keys=("image", "label")), ScaleIntensityRanged( keys="image", a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True, ), CropForegroundd(keys=("image", "label"), source_key="image"), ToTensord(keys=("image", "label")), ]) val_datalist = load_decathlon_datalist(self.data_list_file_path, True, "validation") val_ds = CacheDataset(val_datalist, val_transforms, 9, 0.0, 4) val_data_loader = DataLoader( val_ds, batch_size=1, shuffle=False, num_workers=4, ) post_transform = Compose([ Activationsd(keys="pred", softmax=True), AsDiscreted( keys=["pred", "label"], argmax=[True, False], to_onehot=True, n_classes=2, ), ]) # metric key_val_metric = { "val_mean_dice": MeanDice( include_background=False, output_transform=lambda x: (x["pred"], x["label"]), device=self.device, ) } val_handlers = [ StatsHandler(output_transform=lambda x: None), CheckpointSaver( save_dir=self.ckpt_dir, save_dict={"model": network}, save_key_metric=True, ), TensorBoardStatsHandler(log_dir=self.ckpt_dir, output_transform=lambda x: None), ] self.eval_engine = SupervisedEvaluator( device=self.device, val_data_loader=val_data_loader, network=network, inferer=SlidingWindowInferer( roi_size=[160, 160, 160], sw_batch_size=4, overlap=0.5, ), post_transform=post_transform, key_val_metric=key_val_metric, val_handlers=val_handlers, amp=self.amp, ) optimizer = torch.optim.Adam(network.parameters(), self.learning_rate) loss_function = DiceLoss(to_onehot_y=True, softmax=True) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1) train_handlers = [ LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True), ValidationHandler(validator=self.eval_engine, interval=self.val_interval, epoch_level=True), StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]), TensorBoardStatsHandler( log_dir=self.ckpt_dir, tag_name="train_loss", output_transform=lambda x: x["loss"], ), ] self.train_engine = SupervisedTrainer( device=self.device, max_epochs=self.max_epochs, train_data_loader=train_data_loader, network=network, optimizer=optimizer, loss_function=loss_function, inferer=SimpleInferer(), post_transform=post_transform, key_train_metric=None, train_handlers=train_handlers, amp=self.amp, ) if self.local_rank > 0: self.train_engine.logger.setLevel(logging.WARNING) self.eval_engine.logger.setLevel(logging.WARNING)
def main(tempdir): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # create a temporary directory and 40 random image, mask pairs print(f"generating synthetic data to {tempdir} (this may take a while)") for i in range(5): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz")) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz")) images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)] # model file path model_file = glob("./runs/net_key_metric*")[0] # define transforms for image and segmentation val_transforms = Compose( [ LoadNiftid(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys="image"), ToTensord(keys=["image", "label"]), ] ) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4) # create UNet, DiceLoss and Adam optimizer device = torch.device("cuda" if torch.cuda.is_available() else "cpu") net = monai.networks.nets.UNet( dimensions=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) val_post_transforms = Compose( [ Activationsd(keys="pred", sigmoid=True), AsDiscreted(keys="pred", threshold_values=True), KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), ] ) val_handlers = [ StatsHandler(output_transform=lambda x: None), CheckpointLoader(load_path=model_file, load_dict={"net": net}), SegmentationSaver( output_dir="./runs/", batch_transform=lambda batch: batch["image_meta_dict"], output_transform=lambda output: output["pred"], ), ] evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, network=net, inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), post_transform=val_post_transforms, key_val_metric={ "val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"])) }, additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))}, val_handlers=val_handlers, # if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation amp=True if monai.config.get_torch_version_tuple() >= (1, 6) else False, ) evaluator.run()
def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # create a temporary directory and 40 random image, mask paris tempdir = tempfile.mkdtemp() print(f"generating synthetic data to {tempdir} (this may take a while)") for i in range(5): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz")) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz")) images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) val_files = [{ "image": img, "label": seg } for img, seg in zip(images, segs)] # define transforms for image and segmentation val_transforms = Compose([ LoadNiftid(keys=["image", "label"]), AsChannelFirstd(keys=["image", "label"], channel_dim=-1), ScaleIntensityd(keys=["image", "label"]), ToTensord(keys=["image", "label"]), ]) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4) # create UNet, DiceLoss and Adam optimizer device = torch.device("cuda:0") net = monai.networks.nets.UNet( dimensions=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ).to(device) val_post_transforms = Compose([ Activationsd(keys="pred", output_postfix="act", sigmoid=True), AsDiscreted(keys="pred_act", output_postfix="dis", threshold_values=True), KeepLargestConnectedComponentd(keys="pred_act_dis", applied_values=[1], output_postfix=None), ]) val_handlers = [ StatsHandler(output_transform=lambda x: None), CheckpointLoader(load_path="./runs/net_key_metric=0.9101.pth", load_dict={"net": net}), SegmentationSaver( output_dir="./runs/", batch_transform=lambda x: { "filename_or_obj": x["image.filename_or_obj"], "affine": x["image.affine"], "original_affine": x["image.original_affine"], "spatial_shape": x["image.spatial_shape"], }, output_transform=lambda x: x["pred_act_dis"], ), ] evaluator = SupervisedEvaluator( device=device, val_data_loader=val_loader, network=net, inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), post_transform=val_post_transforms, key_val_metric={ "val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred_act_dis"], x["label"])) }, additional_metrics={ "val_acc": Accuracy( output_transform=lambda x: (x["pred_act_dis"], x["label"])) }, val_handlers=val_handlers, ) evaluator.run() shutil.rmtree(tempdir)