Exemplo n.º 1
0
    def pre_transforms(self, data=None):
        t = [
            LoadImaged(keys="image", reader="ITKReader"),
            EnsureChannelFirstd(keys="image"),
            Orientationd(keys="image", axcodes="RAS"),
            ScaleIntensityRanged(keys="image", a_min=-175, a_max=250, b_min=0.0, b_max=1.0, clip=True),
        ]
        if self.type == InferType.DEEPEDIT:
            t.extend(
                [
                    AddGuidanceFromPointsCustomd(ref_image="image", guidance="guidance", label_names=self.labels),
                    Resized(keys="image", spatial_size=self.spatial_size, mode="area"),
                    ResizeGuidanceMultipleLabelCustomd(guidance="guidance", ref_image="image"),
                    AddGuidanceSignalCustomd(
                        keys="image", guidance="guidance", number_intensity_ch=self.number_intensity_ch
                    ),
                ]
            )
        else:
            t.extend(
                [
                    Resized(keys="image", spatial_size=self.spatial_size, mode="area"),
                    DiscardAddGuidanced(
                        keys="image", label_names=self.labels, number_intensity_ch=self.number_intensity_ch
                    ),
                ]
            )

        t.append(EnsureTyped(keys="image", device=data.get("device") if data else None))
        return t
Exemplo n.º 2
0
    def test_invalid_inputs(self):
        with self.assertRaises(NotImplementedError):
            resize = Resized(keys="img", spatial_size=(128, 128, 3), interp_order="order")
            resize({"img": self.imt[0]})

        with self.assertRaises(ValueError):
            resize = Resized(keys="img", spatial_size=(128,), interp_order="order")
            resize({"img": self.imt[0]})
Exemplo n.º 3
0
 def test_longest_shape(self, input_param, expected_shape):
     input_data = {
         "img": np.random.randint(0, 2, size=[3, 4, 7, 10]),
         "label": np.random.randint(0, 2, size=[3, 4, 7, 10]),
     }
     input_param["size_mode"] = "longest"
     rescaler = Resized(**input_param)
     result = rescaler(input_data)
     for k in rescaler.keys:
         np.testing.assert_allclose(result[k].shape[1:], expected_shape)
     set_track_meta(False)
     result = Resized(**input_param)(input_data)
     self.assertNotIsInstance(result["img"], MetaTensor)
     np.testing.assert_allclose(result["img"].shape[1:], expected_shape)
     set_track_meta(True)
Exemplo n.º 4
0
 def pre_transforms(self):
     t = [
         LoadImaged(keys="image", reader="nibabelreader"),
         AddChanneld(keys="image"),
         # Spacing might not be needed as resize transform is used later.
         # Spacingd(keys="image", pixdim=self.spacing),
         RandAffined(
             keys="image",
             prob=1,
             rotate_range=(np.pi / 4, np.pi / 4, np.pi / 4),
             padding_mode="zeros",
             as_tensor_output=False,
         ),
         RandFlipd(keys="image", prob=0.5, spatial_axis=0),
         RandRotated(keys="image",
                     range_x=(-5, 5),
                     range_y=(-5, 5),
                     range_z=(-5, 5)),
         Resized(keys="image", spatial_size=self.spatial_size),
     ]
     # If using TTA for deepedit
     if self.deepedit:
         t.append(DiscardAddGuidanced(keys="image"))
     t.append(ToTensord(keys="image"))
     return Compose(t)
Exemplo n.º 5
0
 def train_pre_transforms(self, context: Context):
     # Dataset preparation
     t: List[Any] = [
         LoadImaged(keys=("image", "label")),
         AddChanneld(keys=("image", "label")),
         SpatialCropForegroundd(keys=("image", "label"),
                                source_key="label",
                                spatial_size=self.roi_size),
         Resized(keys=("image", "label"),
                 spatial_size=self.model_size,
                 mode=("area", "nearest")),
         NormalizeIntensityd(keys="image", subtrahend=208.0,
                             divisor=388.0),  # type: ignore
     ]
     if self.dimension == 3:
         t.append(FindAllValidSlicesd(label="label", sids="sids"))
     t.extend([
         AddInitialSeedPointd(label="label",
                              guidance="guidance",
                              sids="sids"),
         AddGuidanceSignald(image="image", guidance="guidance"),
         EnsureTyped(keys=("image", "label"), device=context.device),
         SelectItemsd(keys=("image", "label", "guidance")),
     ])
     return t
Exemplo n.º 6
0
 def val_pre_transforms(self, context: Context):
     return [
         LoadImaged(keys=("image", "label"), reader="ITKReader"),
         NormalizeLabelsInDatasetd(keys="label", label_names=self._labels),
         EnsureChannelFirstd(keys=("image", "label")),
         Orientationd(keys=["image", "label"], axcodes="RAS"),
         # This transform may not work well for MR images
         ScaleIntensityRanged(keys=("image"),
                              a_min=-175,
                              a_max=250,
                              b_min=0.0,
                              b_max=1.0,
                              clip=True),
         Resized(keys=("image", "label"),
                 spatial_size=self.spatial_size,
                 mode=("area", "nearest")),
         # Transforms for click simulation
         FindAllValidSlicesMissingLabelsd(keys="label", sids="sids"),
         AddInitialSeedPointMissingLabelsd(keys="label",
                                           guidance="guidance",
                                           sids="sids"),
         AddGuidanceSignalCustomd(
             keys="image",
             guidance="guidance",
             number_intensity_ch=self.number_intensity_ch),
         #
         ToTensord(keys=("image", "label")),
         SelectItemsd(keys=("image", "label", "guidance", "label_names")),
     ]
Exemplo n.º 7
0
 def pre_transforms(self, data=None) -> Sequence[Callable]:
     t = [
         LoadImaged(keys="image"),
         AsChannelFirstd(keys="image"),
         Spacingd(keys="image",
                  pixdim=[1.0] * self.dimension,
                  mode="bilinear"),
         AddGuidanceFromPointsd(ref_image="image",
                                guidance="guidance",
                                dimensions=self.dimension),
     ]
     if self.dimension == 2:
         t.append(Fetch2DSliced(keys="image", guidance="guidance"))
     t.extend([
         AddChanneld(keys="image"),
         SpatialCropGuidanced(keys="image",
                              guidance="guidance",
                              spatial_size=self.spatial_size),
         Resized(keys="image", spatial_size=self.model_size, mode="area"),
         ResizeGuidanced(guidance="guidance", ref_image="image"),
         NormalizeIntensityd(keys="image", subtrahend=208,
                             divisor=388),  # type: ignore
         AddGuidanceSignald(image="image", guidance="guidance"),
         EnsureTyped(keys="image",
                     device=data.get("device") if data else None),
     ])
     return t
Exemplo n.º 8
0
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
    images = [
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
    ]

    # 2 binary labels for gender classification: man and woman
    labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
    val_files = [{"img": img, "label": label} for img, label in zip(images, labels)]

    # Define transforms for image
    val_transforms = Compose(
        [
            LoadNiftid(keys=["img"]),
            AddChanneld(keys=["img"]),
            ScaleIntensityd(keys=["img"]),
            Resized(keys=["img"], spatial_size=(96, 96, 96)),
            ToTensord(keys=["img"]),
        ]
    )

    # create a validation data loader
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())

    # Create DenseNet121
    device = torch.device("cuda:0")
    model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)

    model.load_state_dict(torch.load("best_metric_model.pth"))
    model.eval()
    with torch.no_grad():
        num_correct = 0.0
        metric_count = 0
        saver = CSVSaver(output_dir="./output")
        for val_data in val_loader:
            val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device)
            val_outputs = model(val_images).argmax(dim=1)
            value = torch.eq(val_outputs, val_labels)
            metric_count += len(value)
            num_correct += value.sum().item()
            saver.save_batch(val_outputs, val_data["img_meta_dict"])
        metric = num_correct / metric_count
        print("evaluation metric:", metric)
        saver.finalize()
Exemplo n.º 9
0
 def test_longest_shape(self, input_param, expected_shape):
     input_data = {
         "img": np.random.randint(0, 2, size=[3, 4, 7, 10]),
         "label": np.random.randint(0, 2, size=[3, 4, 7, 10]),
     }
     input_param["size_mode"] = "longest"
     rescaler = Resized(**input_param)
     result = rescaler(input_data)
     for k in rescaler.keys:
         np.testing.assert_allclose(result[k].shape[1:], expected_shape)
Exemplo n.º 10
0
 def pre_transforms(self, data):
     return [
         LoadImaged(keys="image"),
         AsChannelFirstd(keys="image"),
         Spacingd(keys="image", pixdim=[1.0, 1.0, 1.0], mode="bilinear"),
         AddGuidanceFromPointsd(ref_image="image", guidance="guidance", dimensions=3),
         AddChanneld(keys="image"),
         SpatialCropGuidanced(keys="image", guidance="guidance", spatial_size=self.spatial_size),
         Resized(keys="image", spatial_size=self.model_size, mode="area"),
         ResizeGuidanced(guidance="guidance", ref_image="image"),
         NormalizeIntensityd(keys="image", subtrahend=208, divisor=388),
         AddGuidanceSignald(image="image", guidance="guidance"),
     ]
Exemplo n.º 11
0
 def test_correct_results(self, spatial_size, interp_order):
     resize = Resized("img", spatial_size, interp_order)
     _order = 0
     if interp_order.endswith("linear"):
         _order = 1
     expected = list()
     for channel in self.imt[0]:
         expected.append(
             skimage.transform.resize(
                 channel, spatial_size, order=_order, clip=False, preserve_range=False, anti_aliasing=False,
             )
         )
     expected = np.stack(expected).astype(np.float32)
     out = resize({"img": self.imt[0]})["img"]
     np.testing.assert_allclose(out, expected, atol=0.9)
Exemplo n.º 12
0
    def test_inverse_compose(self):
        transform = Compose(
            [
                Resized(keys="img", spatial_size=[100, 100, 100]),
                OneOf(
                    [
                        RandScaleIntensityd(keys="img", factors=0.5, prob=1.0),
                        RandShiftIntensityd(keys="img", offsets=0.5, prob=1.0),
                    ]
                ),
            ]
        )
        transform.set_random_state(seed=0)
        result = transform({"img": np.ones((1, 101, 102, 103))})

        result = transform.inverse(result)
        # invert to the original spatial shape
        self.assertTupleEqual(result["img"].shape, (1, 101, 102, 103))
Exemplo n.º 13
0
 def pre_transforms(self, data=None):
     return [
         LoadImaged(keys="image"),
         AsChannelFirstd(keys="image"),
         Spacingd(keys="image", pixdim=[1.0, 1.0], mode="bilinear"),
         AddGuidanceFromPointsd(ref_image="image",
                                guidance="guidance",
                                dimensions=2),
         Fetch2DSliced(keys="image", guidance="guidance"),
         AddChanneld(keys="image"),
         SpatialCropGuidanced(keys="image",
                              guidance="guidance",
                              spatial_size=[256, 256]),
         Resized(keys="image", spatial_size=[256, 256], mode="area"),
         ResizeGuidanced(guidance="guidance", ref_image="image"),
         NormalizeIntensityd(keys="image", subtrahend=208,
                             divisor=388),  # type: ignore
         AddGuidanceSignald(image="image", guidance="guidance"),
     ]
Exemplo n.º 14
0
 def test_correct_results(self, spatial_size, order, mode, cval, clip,
                          preserve_range, anti_aliasing):
     resize = Resized("img", spatial_size, order, mode, cval, clip,
                      preserve_range, anti_aliasing)
     expected = list()
     for channel in self.imt[0]:
         expected.append(
             skimage.transform.resize(
                 channel,
                 spatial_size,
                 order=order,
                 mode=mode,
                 cval=cval,
                 clip=clip,
                 preserve_range=preserve_range,
                 anti_aliasing=anti_aliasing,
             ))
     expected = np.stack(expected).astype(np.float32)
     self.assertTrue(
         np.allclose(resize({"img": self.imt[0]})["img"], expected))
Exemplo n.º 15
0
def get_pre_transforms(roi_size, model_size, dimensions):
    t = [
        LoadImaged(keys=('image', 'label')),
        AddChanneld(keys=('image', 'label')),
        SpatialCropForegroundd(keys=('image', 'label'),
                               source_key='label',
                               spatial_size=roi_size),
        Resized(keys=('image', 'label'),
                spatial_size=model_size,
                mode=('area', 'nearest')),
        NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0)
    ]
    if dimensions == 3:
        t.append(FindAllValidSlicesd(label='label', sids='sids'))
    t.extend([
        AddInitialSeedPointd(label='label', guidance='guidance', sids='sids'),
        AddGuidanceSignald(image='image', guidance='guidance'),
        ToTensord(keys=('image', 'label'))
    ])
    return Compose(t)
Exemplo n.º 16
0
def get_pre_transforms(roi_size, model_size, dimensions):
    t = [
        LoadImaged(keys=("image", "label")),
        AddChanneld(keys=("image", "label")),
        SpatialCropForegroundd(keys=("image", "label"),
                               source_key="label",
                               spatial_size=roi_size),
        Resized(keys=("image", "label"),
                spatial_size=model_size,
                mode=("area", "nearest")),
        NormalizeIntensityd(keys="image", subtrahend=208.0, divisor=388.0),
    ]
    if dimensions == 3:
        t.append(FindAllValidSlicesd(label="label", sids="sids"))
    t.extend([
        AddInitialSeedPointd(label="label", guidance="guidance", sids="sids"),
        AddGuidanceSignald(image="image", guidance="guidance"),
        EnsureTyped(keys=("image", "label")),
    ])
    return Compose(t)
Exemplo n.º 17
0
 def test_correct_results(self, output_spatial_shape, order, mode, cval,
                          clip, preserve_range, anti_aliasing,
                          anti_aliasing_sigma):
     resize = Resized('img', output_spatial_shape, order, mode, cval, clip,
                      preserve_range, anti_aliasing, anti_aliasing_sigma)
     expected = list()
     for channel in self.imt[0]:
         expected.append(
             skimage.transform.resize(
                 channel,
                 output_spatial_shape,
                 order=order,
                 mode=mode,
                 cval=cval,
                 clip=clip,
                 preserve_range=preserve_range,
                 anti_aliasing=anti_aliasing,
                 anti_aliasing_sigma=anti_aliasing_sigma))
     expected = np.stack(expected).astype(np.float32)
     self.assertTrue(
         np.allclose(resize({'img': self.imt[0]})['img'], expected))
Exemplo n.º 18
0
    def test_correct_results(self, spatial_size, mode):
        resize = Resized("img", spatial_size, mode=mode)
        _order = 0
        if mode.endswith("linear"):
            _order = 1
        if spatial_size == (32, -1):
            spatial_size = (32, 64)
        expected = [
            skimage.transform.resize(channel,
                                     spatial_size,
                                     order=_order,
                                     clip=False,
                                     preserve_range=False,
                                     anti_aliasing=False)
            for channel in self.imt[0]
        ]

        expected = np.stack(expected).astype(np.float32)
        for p in TEST_NDARRAYS:
            out = resize({"img": p(self.imt[0])})["img"]
            assert_allclose(out, expected, type_test=False, atol=0.9)
Exemplo n.º 19
0
 def test_invalid_inputs(self, _, order, raises):
     with self.assertRaises(raises):
         resize = Resized(keys='img',
                          output_spatial_shape=(128, 128, 3),
                          order=order)
         resize({'img': self.imt[0]})
Exemplo n.º 20
0
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
    images = [
        "/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz",
    ]
    # 2 binary labels for gender classification: man and woman
    labels = np.array(
        [0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0])
    train_files = [{
        "img": img,
        "label": label
    } for img, label in zip(images[:10], labels[:10])]
    val_files = [{
        "img": img,
        "label": label
    } for img, label in zip(images[-10:], labels[-10:])]

    # define transforms for image
    train_transforms = Compose([
        LoadNiftid(keys=["img"]),
        AddChanneld(keys=["img"]),
        ScaleIntensityd(keys=["img"]),
        Resized(keys=["img"], spatial_size=(96, 96, 96)),
        RandRotate90d(keys=["img"], prob=0.8, spatial_axes=[0, 2]),
        ToTensord(keys=["img"]),
    ])
    val_transforms = Compose([
        LoadNiftid(keys=["img"]),
        AddChanneld(keys=["img"]),
        ScaleIntensityd(keys=["img"]),
        Resized(keys=["img"], spatial_size=(96, 96, 96)),
        ToTensord(keys=["img"]),
    ])

    # define dataset, data loader
    check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
    check_loader = DataLoader(check_ds,
                              batch_size=2,
                              num_workers=4,
                              pin_memory=torch.cuda.is_available())
    check_data = monai.utils.misc.first(check_loader)
    print(check_data["img"].shape, check_data["label"])

    # create DenseNet121, CrossEntropyLoss and Adam optimizer
    net = monai.networks.nets.densenet.densenet121(
        spatial_dims=3,
        in_channels=1,
        out_channels=2,
    )
    loss = torch.nn.CrossEntropyLoss()
    lr = 1e-5
    opt = torch.optim.Adam(net.parameters(), lr)
    device = torch.device("cuda:0")

    # Ignite trainer expects batch=(img, label) and returns output=loss at every iteration,
    # user can add output_transform to return other values, like: y_pred, y, etc.
    def prepare_batch(batch, device=None, non_blocking=False):

        return _prepare_batch((batch["img"], batch["label"]), device,
                              non_blocking)

    trainer = create_supervised_trainer(net,
                                        opt,
                                        loss,
                                        device,
                                        False,
                                        prepare_batch=prepare_batch)

    # adding checkpoint handler to save models (network params and optimizer stats) during training
    checkpoint_handler = ModelCheckpoint("./runs/",
                                         "net",
                                         n_saved=10,
                                         require_empty=False)
    trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED,
                              handler=checkpoint_handler,
                              to_save={
                                  "net": net,
                                  "opt": opt
                              })

    # StatsHandler prints loss at every iteration and print metrics at every epoch,
    # we don't set metrics for trainer here, so just print loss, user can also customize print functions
    # and can use output_transform to convert engine.state.output if it's not loss value
    train_stats_handler = StatsHandler(name="trainer")
    train_stats_handler.attach(trainer)

    # TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler
    train_tensorboard_stats_handler = TensorBoardStatsHandler()
    train_tensorboard_stats_handler.attach(trainer)

    # set parameters for validation
    validation_every_n_epochs = 1

    metric_name = "Accuracy"
    # add evaluation metric to the evaluator engine
    val_metrics = {
        metric_name: Accuracy(),
        "AUC": ROCAUC(to_onehot_y=True, add_softmax=True)
    }
    # Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration,
    # user can add output_transform to return other values
    evaluator = create_supervised_evaluator(net,
                                            val_metrics,
                                            device,
                                            True,
                                            prepare_batch=prepare_batch)

    # add stats event handler to print validation stats via evaluator
    val_stats_handler = StatsHandler(
        name="evaluator",
        output_transform=lambda x:
        None,  # no need to print loss value, so disable per iteration output
        global_epoch_transform=lambda x: trainer.state.epoch,
    )  # fetch global epoch number from trainer
    val_stats_handler.attach(evaluator)

    # add handler to record metrics to TensorBoard at every epoch
    val_tensorboard_stats_handler = TensorBoardStatsHandler(
        output_transform=lambda x:
        None,  # no need to plot loss value, so disable per iteration output
        global_epoch_transform=lambda x: trainer.state.epoch,
    )  # fetch global epoch number from trainer
    val_tensorboard_stats_handler.attach(evaluator)

    # add early stopping handler to evaluator
    early_stopper = EarlyStopping(
        patience=4,
        score_function=stopping_fn_from_metric(metric_name),
        trainer=trainer)
    evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED,
                                handler=early_stopper)

    # create a validation data loader
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    val_loader = DataLoader(val_ds,
                            batch_size=2,
                            num_workers=4,
                            pin_memory=torch.cuda.is_available())

    @trainer.on(Events.EPOCH_COMPLETED(every=validation_every_n_epochs))
    def run_validation(engine):
        evaluator.run(val_loader)

    # create a training data loader
    train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
    train_loader = DataLoader(train_ds,
                              batch_size=2,
                              shuffle=True,
                              num_workers=4,
                              pin_memory=torch.cuda.is_available())

    train_epochs = 30
    state = trainer.run(train_loader, train_epochs)
Exemplo n.º 21
0
    0,
    Rotate90d(KEYS, k=2, spatial_axes=(1, 2)),
))

TESTS.append((
    "RandRotate90d 3d",
    "3D",
    0,
    RandRotate90d(KEYS, prob=1, spatial_axes=(1, 2)),
))

TESTS.append(
    ("Spacingd 3d", "3D", 3e-2, Spacingd(KEYS, [0.5, 0.7, 0.9],
                                         diagonal=False)))

TESTS.append(("Resized 2d", "2D", 2e-1, Resized(KEYS, [50, 47])))

TESTS.append(("Resized 3d", "3D", 5e-2, Resized(KEYS, [201, 150, 78])))

TESTS.append((
    "Zoomd 1d",
    "1D odd",
    0,
    Zoomd(KEYS, zoom=2, keep_size=False),
))

TESTS.append((
    "Zoomd 2d",
    "2D",
    2e-1,
    Zoomd(KEYS, zoom=0.9),
Exemplo n.º 22
0
labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0])
train_files = [{
    'img': img,
    'label': label
} for img, label in zip(images[:10], labels[:10])]
val_files = [{
    'img': img,
    'label': label
} for img, label in zip(images[-10:], labels[-10:])]

# Define transforms for image
train_transforms = Compose([
    LoadNiftid(keys=['img']),
    AddChanneld(keys=['img']),
    ScaleIntensityd(keys=['img']),
    Resized(keys=['img'], spatial_size=(96, 96, 96)),
    RandRotate90d(keys=['img'], prob=0.8, spatial_axes=[0, 2]),
    ToTensord(keys=['img'])
])
val_transforms = Compose([
    LoadNiftid(keys=['img']),
    AddChanneld(keys=['img']),
    ScaleIntensityd(keys=['img']),
    Resized(keys=['img'], spatial_size=(96, 96, 96)),
    ToTensord(keys=['img'])
])

# Define dataset, data loader
check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
check_loader = DataLoader(check_ds,
                          batch_size=2,
Exemplo n.º 23
0
    def setup(self, X_train, X_test, y_train, y_test, learning_rate, optimizer,
              loss_function):

        # Extract images and labels from the participant files in directory

        self.train_files = [{
            "img": img,
            "label": label
        } for img, label in zip(X_train, y_train)]
        self.val_files = [{
            "img": img,
            "label": label
        } for img, label in zip(X_test, y_test)]

        # Target (_,_,_) Resampling Shape

        if self.pytorch_version == 1:  # AlexNet Requires this Size
            s_size = 227
        else:
            s_size = 96

        # Define transforms for image

        self.train_transforms = Compose([
            LoadNiftid(keys=["img"]),
            AddChanneld(keys=["img"]),
            ScaleIntensityd(keys=["img"]),
            Resized(keys=["img"], spatial_size=(s_size, s_size, s_size)),
            RandRotate90d(keys=["img"], prob=0.8, spatial_axes=[0, 2]),
            ToTensord(keys=["img"]),
        ])
        self.val_transforms = Compose([
            LoadNiftid(keys=["img"]),
            AddChanneld(keys=["img"]),
            ScaleIntensityd(keys=["img"]),
            Resized(keys=["img"], spatial_size=(s_size, s_size, s_size)),
            ToTensord(keys=["img"]),
        ])

        # Dataset and Dataloader for PyTorch

        check_ds = monai.data.Dataset(data=self.train_files,
                                      transform=self.train_transforms)
        check_loader = DataLoader(check_ds,
                                  batch_size=2,
                                  num_workers=4,
                                  pin_memory=torch.cuda.is_available())
        check_data = monai.utils.misc.first(check_loader)
        print(check_data["img"].shape, check_data["label"])

        # Training DataLoader

        self.train_ds = monai.data.Dataset(data=self.train_files,
                                           transform=self.train_transforms)
        self.train_loader = DataLoader(self.train_ds,
                                       batch_size=2,
                                       shuffle=True,
                                       num_workers=4,
                                       pin_memory=torch.cuda.is_available())

        # Testing DataLoader

        self.val_ds = monai.data.Dataset(data=self.val_files,
                                         transform=self.val_transforms)
        self.val_loader = DataLoader(self.val_ds,
                                     batch_size=2,
                                     num_workers=4,
                                     pin_memory=torch.cuda.is_available())

        # Using CUDA if Available

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        print("DEVICE: ", "cuda" if torch.cuda.is_available() else "cpu")

        # Model Selection based on Input Parameters
        # Output shape dependent on regression/classification
        if self.pytorch_version == 1:
            self.model = AlexNet3D()
            if torch.cuda.is_available():
                self.model.cuda()
        elif self.pytorch_version == 2:
            self.model = resnet.resnet_101(pretrained=self.pretrained_resnet,
                                           progress=True)
            if torch.cuda.is_available():
                self.model.cuda()
        elif self.pytorch_version == 121:
            self.model = monai.networks.nets.densenet.densenet121(
                spatial_dims=3, in_channels=1,
                out_channels=self.output_shape).to(self.device)
        elif self.pytorch_version == 169:
            self.model = monai.networks.nets.densenet.densenet264(
                spatial_dims=3, in_channels=1,
                out_channels=self.output_shape).to(self.device)
        elif self.pytorch_version == 201:
            self.model = monai.networks.nets.densenet.densenet169(
                spatial_dims=3, in_channels=1,
                out_channels=self.output_shape).to(self.device)
        elif self.pytorch_version == 264:
            self.model = monai.networks.nets.densenet.densenet201(
                spatial_dims=3, in_channels=1,
                out_channels=self.output_shape).to(self.device)

        # Grid Search Parameters: Optimizer, Loss Function and Learning Rate

        if optimizer == "SGD":
            self.optimizer = torch.optim.SGD(self.model.parameters(),
                                             learning_rate)
        elif optimizer == "Adam":
            self.optimizer = torch.optim.Adam(self.model.parameters(),
                                              learning_rate)

        if self.task == "regression":
            self.loss_function = torch.nn.MSELoss()
        else:
            if loss_function == "CrossEntropyLoss":
                self.loss_function = torch.nn.CrossEntropyLoss()
            elif loss_function == "MSELoss":
                self.loss_function = torch.nn.MSELoss()
            elif loss_function == "NLLLoss":
                self.loss_function = torch.nn.NLLLoss()
        return [X_train, X_test, y_train, y_test]
Exemplo n.º 24
0
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
    images = [
        "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz",
    ]
    # 2 binary labels for gender classification: man and woman
    labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0])
    val_files = [{"img": img, "label": label} for img, label in zip(images, labels)]

    # define transforms for image
    val_transforms = Compose(
        [
            LoadNiftid(keys=["img"]),
            AddChanneld(keys=["img"]),
            ScaleIntensityd(keys=["img"]),
            Resized(keys=["img"], spatial_size=(96, 96, 96)),
            ToTensord(keys=["img"]),
        ]
    )

    # create DenseNet121
    net = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,)
    device = torch.device("cuda:0")

    def prepare_batch(batch, device=None, non_blocking=False):
        return _prepare_batch((batch["img"], batch["label"]), device, non_blocking)

    metric_name = "Accuracy"
    # add evaluation metric to the evaluator engine
    val_metrics = {metric_name: Accuracy()}
    # Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration,
    # user can add output_transform to return other values
    evaluator = create_supervised_evaluator(net, val_metrics, device, True, prepare_batch=prepare_batch)

    # add stats event handler to print validation stats via evaluator
    val_stats_handler = StatsHandler(
        name="evaluator",
        output_transform=lambda x: None,  # no need to print loss value, so disable per iteration output
    )
    val_stats_handler.attach(evaluator)

    # for the array data format, assume the 3rd item of batch data is the meta_data
    prediction_saver = ClassificationSaver(
        output_dir="tempdir",
        name="evaluator",
        batch_transform=lambda batch: {"filename_or_obj": batch["img.filename_or_obj"]},
        output_transform=lambda output: output[0].argmax(1),
    )
    prediction_saver.attach(evaluator)

    # the model was trained by "densenet_training_dict" example
    CheckpointLoader(load_path="./runs/net_checkpoint_20.pth", load_dict={"net": net}).attach(evaluator)

    # create a validation data loader
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())

    state = evaluator.run(val_loader)
    print(state)
Exemplo n.º 25
0
 def test_invalid_inputs(self, _, order, raises):
     with self.assertRaises(raises):
         resize = Resized(keys="img",
                          spatial_size=(128, 128, 3),
                          order=order)
         resize({"img": self.imt[0]})
Exemplo n.º 26
0
        if i == 0:
            continue  # skipping background
        b_label = labels == i
        bb = BoundingRect()(b_label[None])
        area = (bb[0, 1] - bb[0, 0]) * (bb[0, 3] - bb[0, 2]) * (bb[0, 5] -
                                                                bb[0, 4])
        if area <= 500:
            continue
        print(bb, area)
        s = [bb[0, 0] - 16, bb[0, 2] - 16, bb[0, 4] - 16]
        e = [bb[0, 1] + 16, bb[0, 3] + 16, bb[0, 5] + 16]

        # generate lesion patches based on the bounding boxes
        data_out = AddChanneld(keys)(data)
        data_out = SpatialCropd(keys, roi_start=s, roi_end=e)(data_out)
        resize = Resized(keys, patch_size, mode=("trilinear", "nearest"))
        data_out = resize(data_out)

        patch_out = (
            f"{folder}/patch/lesion_{s[0]}_{s[1]}_{s[2]}_{e[0]}_{e[1]}_{e[2]}_{name_id}"
        )
        label_out = (
            f"{folder}/patch/labels_{s[0]}_{s[1]}_{s[2]}_{e[0]}_{e[1]}_{e[2]}_{name_id}"
        )
        write_nifti(data_out["image"][0], file_name=patch_out)
        write_nifti(data_out["label"][0], file_name=label_out)

        # generate random negative samples
        rand_data_out = AddChanneld(keys)(data)
        rand_data_out["inv_label"] = (rand_data_out["label"] == 0
                                      )  # non-lesion sampling map
Exemplo n.º 27
0
Arquivo: test.py Projeto: ckbr0/RIS
def main(train_output):
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)
    print_config()

    # Setup directories
    dirs = setup_directories()

    # Setup torch device
    device, using_gpu = create_device("cuda")

    # Load and randomize images

    # HACKATON image and segmentation data
    hackathon_dir = os.path.join(dirs["data"], 'HACKATHON')
    map_fn = lambda x: (x[0], int(x[1]))
    with open(os.path.join(hackathon_dir, "train.txt"), 'r') as fp:
        train_info_hackathon = [
            map_fn(entry.strip().split(',')) for entry in fp.readlines()
        ]
    image_dir = os.path.join(hackathon_dir, 'images', 'train')
    seg_dir = os.path.join(hackathon_dir, 'segmentations', 'train')
    _train_data_hackathon = get_data_from_info(image_dir,
                                               seg_dir,
                                               train_info_hackathon,
                                               dual_output=False)
    large_image_splitter(_train_data_hackathon, dirs["cache"])

    balance_training_data(_train_data_hackathon, seed=72)

    # PSUF data
    """psuf_dir = os.path.join(dirs["data"], 'psuf')
    with open(os.path.join(psuf_dir, "train.txt"), 'r') as fp:
        train_info = [entry.strip().split(',') for entry in fp.readlines()]
    image_dir = os.path.join(psuf_dir, 'images')
    train_data_psuf = get_data_from_info(image_dir, None, train_info)"""
    # Split data into train, validate and test
    train_split, test_data_hackathon = train_test_split(_train_data_hackathon,
                                                        test_size=0.2,
                                                        shuffle=True,
                                                        random_state=42)
    #train_data_hackathon, valid_data_hackathon = train_test_split(train_split, test_size=0.2, shuffle=True, random_state=43)
    # Setup transforms

    # Crop foreground
    crop_foreground = CropForegroundd(
        keys=["image"],
        source_key="image",
        margin=(5, 5, 0),
        #select_fn = lambda x: x != 0
    )
    # Crop Z
    crop_z = RelativeCropZd(keys=["image"], relative_z_roi=(0.07, 0.12))
    # Window width and level (window center)
    WW, WL = 1500, -600
    ct_window = CTWindowd(keys=["image"], width=WW, level=WL)
    spatial_pad = SpatialPadd(keys=["image"], spatial_size=(-1, -1, 30))
    resize = Resized(keys=["image"],
                     spatial_size=(int(512 * 0.50), int(512 * 0.50), -1),
                     mode="trilinear")

    # Create transforms
    common_transform = Compose([
        LoadImaged(keys=["image"]),
        ct_window,
        CTSegmentation(keys=["image"]),
        AddChanneld(keys=["image"]),
        resize,
        crop_foreground,
        crop_z,
        spatial_pad,
    ])
    hackathon_train_transfrom = Compose([
        common_transform,
        ToTensord(keys=["image"]),
    ]).flatten()
    psuf_transforms = Compose([
        LoadImaged(keys=["image"]),
        AddChanneld(keys=["image"]),
        ToTensord(keys=["image"]),
    ])

    # Setup data
    #set_determinism(seed=100)
    test_dataset = PersistentDataset(data=test_data_hackathon[:],
                                     transform=hackathon_train_transfrom,
                                     cache_dir=dirs["persistent"])
    test_loader = DataLoader(test_dataset,
                             batch_size=2,
                             shuffle=True,
                             pin_memory=using_gpu,
                             num_workers=1,
                             collate_fn=PadListDataCollate(
                                 Method.SYMMETRIC, NumpyPadMode.CONSTANT))

    # Setup network, loss function, optimizer and scheduler
    network = nets.DenseNet121(spatial_dims=3, in_channels=1,
                               out_channels=1).to(device)

    # Setup validator and trainer
    valid_post_transforms = Compose([
        Activationsd(keys="pred", sigmoid=True),
    ])

    # Setup tester
    tester = Tester(device=device,
                    test_data_loader=test_loader,
                    load_dir=train_output,
                    out_dir=dirs["out"],
                    network=network,
                    post_transform=valid_post_transforms,
                    non_blocking=using_gpu,
                    amp=using_gpu)

    # Run tester
    tester.run()
Exemplo n.º 28
0
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
    images = [
        "/workspace/data/medical/ixi/IXI-T1/IXI314-IOP-0889-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI249-Guys-1072-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI609-HH-2600-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI173-HH-1590-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI020-Guys-0700-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI342-Guys-0909-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI134-Guys-0780-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI577-HH-2661-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI066-Guys-0731-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI130-HH-1528-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz",
        "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz",
    ]
    # 2 binary labels for gender classification: man and woman
    labels = np.array(
        [0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0])
    train_files = [{
        "img": img,
        "label": label
    } for img, label in zip(images[:10], labels[:10])]
    val_files = [{
        "img": img,
        "label": label
    } for img, label in zip(images[-10:], labels[-10:])]

    # Define transforms for image
    train_transforms = Compose([
        LoadNiftid(keys=["img"]),
        AddChanneld(keys=["img"]),
        ScaleIntensityd(keys=["img"]),
        Resized(keys=["img"], spatial_size=(96, 96, 96)),
        RandRotate90d(keys=["img"], prob=0.8, spatial_axes=[0, 2]),
        ToTensord(keys=["img"]),
    ])
    val_transforms = Compose([
        LoadNiftid(keys=["img"]),
        AddChanneld(keys=["img"]),
        ScaleIntensityd(keys=["img"]),
        Resized(keys=["img"], spatial_size=(96, 96, 96)),
        ToTensord(keys=["img"]),
    ])

    # Define dataset, data loader
    check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
    check_loader = DataLoader(check_ds,
                              batch_size=2,
                              num_workers=4,
                              pin_memory=torch.cuda.is_available())
    check_data = monai.utils.misc.first(check_loader)
    print(check_data["img"].shape, check_data["label"])

    # create a training data loader
    train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
    train_loader = DataLoader(train_ds,
                              batch_size=2,
                              shuffle=True,
                              num_workers=4,
                              pin_memory=torch.cuda.is_available())

    # create a validation data loader
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    val_loader = DataLoader(val_ds,
                            batch_size=2,
                            num_workers=4,
                            pin_memory=torch.cuda.is_available())

    # Create DenseNet121, CrossEntropyLoss and Adam optimizer
    device = torch.device("cuda:0")
    model = monai.networks.nets.densenet.densenet121(
        spatial_dims=3,
        in_channels=1,
        out_channels=2,
    ).to(device)
    loss_function = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), 1e-5)

    # start a typical PyTorch training
    val_interval = 2
    best_metric = -1
    best_metric_epoch = -1
    writer = SummaryWriter()
    for epoch in range(5):
        print("-" * 10)
        print(f"epoch {epoch + 1}/{5}")
        model.train()
        epoch_loss = 0
        step = 0
        for batch_data in train_loader:
            step += 1
            inputs, labels = batch_data["img"].to(
                device), batch_data["label"].to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            epoch_len = len(train_ds) // train_loader.batch_size
            print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
            writer.add_scalar("train_loss", loss.item(),
                              epoch_len * epoch + step)
        epoch_loss /= step
        print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")

        if (epoch + 1) % val_interval == 0:
            model.eval()
            with torch.no_grad():
                y_pred = torch.tensor([], dtype=torch.float32, device=device)
                y = torch.tensor([], dtype=torch.long, device=device)
                for val_data in val_loader:
                    val_images, val_labels = val_data["img"].to(
                        device), val_data["label"].to(device)
                    y_pred = torch.cat([y_pred, model(val_images)], dim=0)
                    y = torch.cat([y, val_labels], dim=0)

                acc_value = torch.eq(y_pred.argmax(dim=1), y)
                acc_metric = acc_value.sum().item() / len(acc_value)
                auc_metric = compute_roc_auc(y_pred,
                                             y,
                                             to_onehot_y=True,
                                             add_softmax=True)
                if acc_metric > best_metric:
                    best_metric = acc_metric
                    best_metric_epoch = epoch + 1
                    torch.save(model.state_dict(), "best_metric_model.pth")
                    print("saved new best metric model")
                print(
                    "current epoch: {} current accuracy: {:.4f} current AUC: {:.4f} best accuracy: {:.4f} at epoch {}"
                    .format(epoch + 1, acc_metric, auc_metric, best_metric,
                            best_metric_epoch))
                writer.add_scalar("val_accuracy", acc_metric, epoch + 1)
    print(
        f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}"
    )
    writer.close()
Exemplo n.º 29
0
for acc in [True, False]:
    TESTS.append(("Orientationd 3d", "3D", 0, True,
                  Orientationd(KEYS, "RAS", as_closest_canonical=acc)))

TESTS.append(("Rotate90d 2d", "2D", 0, True, Rotate90d(KEYS)))

TESTS.append(
    ("Rotate90d 3d", "3D", 0, True, Rotate90d(KEYS, k=2, spatial_axes=(1, 2))))

TESTS.append(("RandRotate90d 3d", "3D", 0, True,
              RandRotate90d(KEYS, prob=1, spatial_axes=(1, 2))))

TESTS.append(("Spacingd 3d", "3D", 3e-2, True,
              Spacingd(KEYS, [0.5, 0.7, 0.9], diagonal=False)))

TESTS.append(("Resized 2d", "2D", 2e-1, True, Resized(KEYS, [50, 47])))

TESTS.append(("Resized 3d", "3D", 5e-2, True, Resized(KEYS, [201, 150, 78])))

TESTS.append(("Resized longest 2d", "2D", 2e-1, True,
              Resized(KEYS, 47, "longest", "area")))

TESTS.append(("Resized longest 3d", "3D", 5e-2, True,
              Resized(KEYS, 201, "longest", "trilinear", True)))

TESTS.append(("Lambdad 2d", "2D", 5e-2, False,
              Lambdad(KEYS,
                      func=lambda x: x + 5,
                      inv_func=lambda x: x - 5,
                      overwrite=True)))
Exemplo n.º 30
0
 def test_invalid_inputs(self):
     with self.assertRaises(TypeError):
         resize = Resized(keys="img",
                          spatial_size=(128, 128, 3),
                          order="order")
         resize({"img": self.imt[0]})