def test_saved_content(self): with tempfile.TemporaryDirectory() as tempdir: rank = dist.get_rank() # set up engine def _train_func(engine, batch): return torch.zeros(8 + rank * 2) engine = Engine(_train_func) # set up testing handler saver = ClassificationSaver(output_dir=tempdir, filename="predictions.csv", save_rank=1) saver.attach(engine) # rank 0 has 8 images, rank 1 has 10 images data = [{"filename_or_obj": ["testfile" + str(i) for i in range(8 * rank, (8 + rank) * (rank + 1))]}] engine.run(data, max_epochs=1) filepath = os.path.join(tempdir, "predictions.csv") if rank == 1: self.assertTrue(os.path.exists(filepath)) with open(filepath, "r") as f: reader = csv.reader(f) i = 0 for row in reader: self.assertEqual(row[0], "testfile" + str(i)) self.assertEqual(np.array(row[1:]).astype(np.float32), 0.0) i += 1 self.assertEqual(i, 18)
def test_saved_content(self): default_dir = os.path.join(".", "tempdir") shutil.rmtree(default_dir, ignore_errors=True) # set up engine def _train_func(engine, batch): return torch.zeros(8) engine = Engine(_train_func) # set up testing handler saver = ClassificationSaver(output_dir=default_dir, filename="predictions.csv") saver.attach(engine) data = [{"filename_or_obj": ["testfile" + str(i) for i in range(8)]}] engine.run(data, max_epochs=1) filepath = os.path.join(default_dir, "predictions.csv") self.assertTrue(os.path.exists(filepath)) with open(filepath, "r") as f: reader = csv.reader(f) i = 0 for row in reader: self.assertEqual(row[0], "testfile" + str(i)) self.assertEqual(np.array(row[1:]).astype(np.float32), 0.0) i += 1 self.assertEqual(i, 8) shutil.rmtree(default_dir)
def test_saved_content(self): with tempfile.TemporaryDirectory() as tempdir: # set up engine def _train_func(engine, batch): return torch.zeros(8) engine = Engine(_train_func) # set up testing handler saver = CSVSaver(output_dir=tempdir, filename="predictions2.csv") ClassificationSaver(output_dir=tempdir, filename="predictions1.csv").attach(engine) ClassificationSaver(saver=saver).attach(engine) data = [{ "filename_or_obj": ["testfile" + str(i) for i in range(8)] }] engine.run(data, max_epochs=1) def _test_file(filename): filepath = os.path.join(tempdir, filename) self.assertTrue(os.path.exists(filepath)) with open(filepath, "r") as f: reader = csv.reader(f) i = 0 for row in reader: self.assertEqual(row[0], "testfile" + str(i)) self.assertEqual( np.array(row[1:]).astype(np.float32), 0.0) i += 1 self.assertEqual(i, 8) _test_file("predictions1.csv") _test_file("predictions2.csv")
def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz", ] # 2 binary labels for gender classification: man and woman labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) val_files = [{"img": img, "label": label} for img, label in zip(images, labels)] # define transforms for image val_transforms = Compose( [ LoadNiftid(keys=["img"]), AddChanneld(keys=["img"]), ScaleIntensityd(keys=["img"]), Resized(keys=["img"], spatial_size=(96, 96, 96)), ToTensord(keys=["img"]), ] ) # create DenseNet121 net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,) device = torch.device("cuda:0") def prepare_batch(batch, device=None, non_blocking=False): return _prepare_batch((batch["img"], batch["label"]), device, non_blocking) metric_name = "Accuracy" # add evaluation metric to the evaluator engine val_metrics = {metric_name: Accuracy()} # Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration, # user can add output_transform to return other values evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch) # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( name="evaluator", output_transform=lambda x: None, # no need to print loss value, so disable per iteration output ) val_stats_handler.attach(evaluator) # for the array data format, assume the 3rd item of batch data is the meta_data prediction_saver = ClassificationSaver( output_dir="tempdir", name="evaluator", batch_transform=lambda batch: {"filename_or_obj": batch["img.filename_or_obj"]}, output_transform=lambda output: output[0].argmax(1), ) prediction_saver.attach(evaluator) # the model was trained by "densenet_training_dict" example CheckpointLoader(load_path="./runs/net_checkpoint_20.pth", load_dict={"net": net}).attach(evaluator) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) state = evaluator.run(val_loader) print(state)
val_metrics, device, True, prepare_batch=prepare_batch) # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( name='evaluator', output_transform=lambda x: None # no need to print loss value, so disable per iteration output ) val_stats_handler.attach(evaluator) # for the array data format, assume the 3rd item of batch data is the meta_data prediction_saver = ClassificationSaver( output_dir='tempdir', batch_transform=lambda batch: batch[2], output_transform=lambda output: output[0].argmax(1)) prediction_saver.attach(evaluator) # the model was trained by "densenet_training_array" example CheckpointLoader(load_path='./runs/net_checkpoint_40.pth', load_dict={ 'net': net }).attach(evaluator) # create a validation data loader val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
def main(): monai.config.print_config() logging.basicConfig(stream=sys.stdout, level=logging.INFO) # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/ images = [ os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz" ]), os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz" ]), os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz" ]), os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz" ]), os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz" ]), os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz" ]), os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz" ]), os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz" ]), os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz" ]), os.sep.join([ "workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz" ]), ] # 2 binary labels for gender classification: man and woman labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64) # define transforms for image val_transforms = Compose( [ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()]) # define image dataset val_ds = ImageDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False) # create DenseNet121 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device) metric_name = "Accuracy" # add evaluation metric to the evaluator engine val_metrics = {metric_name: Accuracy()} def prepare_batch(batch, device=None, non_blocking=False): return _prepare_batch((batch[0], batch[1]), device, non_blocking) # Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration, # user can add output_transform to return other values evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch) # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( name="evaluator", output_transform=lambda x: None, # no need to print loss value, so disable per iteration output ) val_stats_handler.attach(evaluator) # for the array data format, assume the 3rd item of batch data is the meta_data prediction_saver = ClassificationSaver( output_dir="tempdir", batch_transform=lambda batch: batch[2], output_transform=lambda output: output[0].argmax(1), ) prediction_saver.attach(evaluator) # the model was trained by "densenet_training_array" example CheckpointLoader(load_path="./runs_array/net_checkpoint_20.pt", load_dict={ "net": net }).attach(evaluator) # create a validation data loader val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) state = evaluator.run(val_loader) print(state)
metric_name = 'Accuracy' # add evaluation metric to the evaluator engine val_metrics = {metric_name: Accuracy()} # ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration, # user can add output_transform to return other values evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch) # add stats event handler to print validation stats via evaluator val_stats_handler = StatsHandler( name='evaluator', output_transform=lambda x: None # no need to print loss value, so disable per iteration output ) val_stats_handler.attach(evaluator) # for the array data format, assume the 3rd item of batch data is the meta_data prediction_saver = ClassificationSaver(output_dir='tempdir', name='evaluator', batch_transform=lambda batch: {'filename_or_obj': batch['img.filename_or_obj']}, output_transform=lambda output: output[0].argmax(1)) prediction_saver.attach(evaluator) # the model was trained by "densenet_training_dict" example CheckpointLoader(load_path='./runs/net_checkpoint_40.pth', load_dict={'net': net}).attach(evaluator) # create a validation data loader val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available()) state = evaluator.run(val_loader)