def test_use_case(self):
        with tempfile.TemporaryDirectory() as tempdir:
            img_ = nib.Nifti1Image(np.random.randint(0, 2, size=(20, 20, 20)),
                                   np.eye(4))
            seg_ = nib.Nifti1Image(np.random.randint(0, 2, size=(20, 20, 20)),
                                   np.eye(4))
            img_name, seg_name = os.path.join(tempdir,
                                              "img.nii.gz"), os.path.join(
                                                  tempdir, "seg.nii.gz")
            nib.save(img_, img_name)
            nib.save(seg_, seg_name)
            img_list, seg_list = [img_name], [seg_name]

            img_xform = _TestCompose([
                EnsureChannelFirst(),
                Spacing(pixdim=(1.5, 1.5, 3.0)),
                RandAdjustContrast()
            ])
            seg_xform = _TestCompose([
                EnsureChannelFirst(),
                Spacing(pixdim=(1.5, 1.5, 3.0), mode="nearest")
            ])
            img_dataset = ImageDataset(
                image_files=img_list,
                seg_files=seg_list,
                transform=img_xform,
                seg_transform=seg_xform,
                image_only=False,
                transform_with_metadata=True,
            )
            self.assertTupleEqual(img_dataset[0][0].shape, (1, 14, 14, 7))
            self.assertTupleEqual(img_dataset[0][1].shape, (1, 14, 14, 7))
Ejemplo n.º 2
0
 def test_correct_results(self, gamma):
     adjuster = RandAdjustContrast(prob=1.0, gamma=gamma)
     result = adjuster(self.imt)
     epsilon = 1e-7
     img_min = self.imt.min()
     img_range = self.imt.max() - img_min
     expected = np.power(((self.imt - img_min) / float(img_range + epsilon)), adjuster.gamma_value) * \
         img_range + img_min
     np.testing.assert_allclose(expected, result, rtol=1e-05)
 def test_correct_results(self, gamma):
     adjuster = RandAdjustContrast(prob=1.0, gamma=gamma)
     for p in TEST_NDARRAYS:
         result = adjuster(p(self.imt))
         epsilon = 1e-7
         img_min = self.imt.min()
         img_range = self.imt.max() - img_min
         expected = (np.power(
             ((self.imt - img_min) / float(img_range + epsilon)),
             adjuster.gamma_value) * img_range + img_min)
         assert_allclose(expected, result, rtol=1e-05, type_test=False)
    def test_tranform_randomized(self, input):
        # Compose deterministic and randomized transforms
        transforms = Compose([
            Range("flip")(Flip()),
            Rotate90(),
            Range()(RandAdjustContrast(prob=0.0)),
            Range("random flip")(RandFlip(prob=1.0)),
            ToTensor(),
        ])
        # Apply transforms
        output = transforms(input)

        # Decorate with NVTX Range
        transforms1 = Range()(transforms)
        transforms2 = Range("Transforms2")(transforms)
        transforms3 = Range(name="Transforms3", methods="__call__")(transforms)

        # Apply transforms with Range
        output1 = transforms1(input)
        output2 = transforms2(input)
        output3 = transforms3(input)

        # Check if the outputs are equal
        self.assertIsInstance(output, torch.Tensor)
        self.assertIsInstance(output1, torch.Tensor)
        self.assertIsInstance(output2, torch.Tensor)
        self.assertIsInstance(output3, torch.Tensor)
        np.testing.assert_equal(output.numpy(), output1.numpy())
        np.testing.assert_equal(output.numpy(), output2.numpy())
        np.testing.assert_equal(output.numpy(), output3.numpy())

        # Check if the first randomized is RandAdjustContrast
        for tran in transforms.transforms:
            if isinstance(tran, Randomizable):
                self.assertIsInstance(tran, RandAdjustContrast)
                break
Ejemplo n.º 5
0
        RandGaussianNoise(prob=1.0)
    ]),
    Compose([
        LoadNifti(image_only=True),
        AddChannel(),
        RandGaussianNoise(prob=1.0)
    ]),
    (0, 1),
    (1, 128, 128, 128),
]

TEST_CASE_2 = [
    Compose([
        LoadNifti(image_only=True),
        AddChannel(),
        RandAdjustContrast(prob=1.0)
    ]),
    Compose([
        LoadNifti(image_only=True),
        AddChannel(),
        RandAdjustContrast(prob=1.0)
    ]),
    (0, 1),
    (1, 128, 128, 128),
]


class TestCompose(Compose):
    def __call__(self, input_):
        img, metadata = self.transforms[0](input_)
        img = self.transforms[1](img)
Ejemplo n.º 6
0
import numpy as np
from parameterized import parameterized
from torch.utils.data import DataLoader

from monai.data import ArrayDataset
from monai.transforms import AddChannel, Compose, LoadNifti, RandAdjustContrast, RandGaussianNoise, Spacing

TEST_CASE_1 = [
    Compose([LoadNifti(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]),
    Compose([LoadNifti(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]),
    (0, 1),
    (1, 128, 128, 128),
]

TEST_CASE_2 = [
    Compose([LoadNifti(image_only=True), AddChannel(), RandAdjustContrast(prob=1.0)]),
    Compose([LoadNifti(image_only=True), AddChannel(), RandAdjustContrast(prob=1.0)]),
    (0, 1),
    (1, 128, 128, 128),
]


class TestCompose(Compose):
    def __call__(self, input_):
        img, metadata = self.transforms[0](input_)
        img = self.transforms[1](img)
        img, _, _ = self.transforms[2](img, metadata["affine"])
        return self.transforms[3](img), metadata


TEST_CASE_3 = [
Ejemplo n.º 7
0
TEST_CASE_ARRAY_1 = [np.random.randn(3, 10, 10)]

TEST_CASE_DICT_0 = [{"image": np.random.randn(3, 3)}]
TEST_CASE_DICT_1 = [{"image": np.random.randn(3, 10, 10)}]

TEST_CASE_TORCH_0 = [torch.randn(3, 3)]
TEST_CASE_TORCH_1 = [torch.randn(3, 10, 10)]

TEST_CASE_WRAPPER = [np.random.randn(3, 10, 10)]

TEST_CASE_RECURSIVE_0 = [
    torch.randn(3, 3),
    Compose([
        ToNumpy(),
        Flip(),
        RandAdjustContrast(prob=0.0),
        RandFlip(prob=1.0),
        ToTensor()
    ]),
]
TEST_CASE_RECURSIVE_1 = [
    torch.randn(3, 3),
    Compose([
        ToNumpy(),
        Flip(),
        Compose([RandAdjustContrast(prob=0.0),
                 RandFlip(prob=1.0)]),
        ToTensor()
    ]),
]
TEST_CASE_RECURSIVE_2 = [
Ejemplo n.º 8
0
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu

    # suppress printing if not master
    if args.multiprocessing_distributed and args.gpu != 0:

        def print_pass(*args):
            pass

        builtins.print = print_pass

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    if args.rank == 0:
        configure(os.path.join('./exp', args.exp_name))

    # create model
    model_patch = moco.builder_v3.MoCo(Encoder, args.num_patch, args.moco_dim,
                                       args.moco_k_patch, args.moco_m,
                                       args.moco_t, args.mlp)

    model_graph = moco.builder_graph.MoCo(GraphNet, args.gpu, args.moco_dim,
                                          args.moco_k_graph, args.moco_m,
                                          args.moco_t, args.mlp)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model_patch.cuda(args.gpu)
            model_graph.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size_patch = int(args.batch_size_patch / ngpus_per_node)
            args.batch_size_graph = int(args.batch_size_graph / ngpus_per_node)
            args.workers_patch = int(
                (args.workers_patch + ngpus_per_node - 1) / ngpus_per_node)
            args.workers_graph = int(
                (args.workers_graph + ngpus_per_node - 1) / ngpus_per_node)
            model_patch = torch.nn.parallel.DistributedDataParallel(
                model_patch, device_ids=[args.gpu])
            model_graph = torch.nn.parallel.DistributedDataParallel(
                model_graph,
                device_ids=[args.gpu],
                find_unused_parameters=True)
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
        # comment out the following line for debugging
        raise NotImplementedError("Only DistributedDataParallel is supported.")
    else:
        # AllGather implementation (batch shuffle, queue update, etc.) in
        # this code only supports DistributedDataParallel.
        raise NotImplementedError("Only DistributedDataParallel is supported.")

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer_patch = torch.optim.SGD(model_patch.parameters(),
                                      args.lr,
                                      momentum=args.momentum,
                                      weight_decay=args.weight_decay)
    optimizer_graph = torch.optim.SGD(model_graph.parameters(),
                                      args.lr,
                                      momentum=args.momentum,
                                      weight_decay=args.weight_decay)
    # save the initial model
    if not args.resume:
        save_checkpoint(
            {
                'epoch': 0,
                'arch': args.arch,
                'state_dict': model_patch.state_dict(),
                'optimizer': optimizer_patch.state_dict(),
            },
            is_best=False,
            filename=os.path.join(os.path.join('./exp', args.exp_name),
                                  'checkpoint_patch_init.pth.tar'))
        save_checkpoint(
            {
                'epoch': 0,
                'arch': args.arch,
                'state_dict': model_graph.state_dict(),
                'optimizer': optimizer_graph.state_dict(),
            },
            is_best=False,
            filename=os.path.join(os.path.join('./exp', args.exp_name),
                                  'checkpoint_graph_init.pth.tar'))

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint_patch = torch.load(args.resume)
                checkpoint_graph = torch.load(args.resume_graph)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint_patch = torch.load(args.resume, map_location=loc)
                checkpoint_graph = torch.load(args.resume_graph,
                                              map_location=loc)
            args.start_epoch = checkpoint_patch['epoch']
            model_patch.load_state_dict(checkpoint_patch['state_dict'])
            model_graph.load_state_dict(checkpoint_graph['state_dict'])
            optimizer_patch.load_state_dict(checkpoint_patch['optimizer'])
            optimizer_graph.load_state_dict(checkpoint_graph['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint_patch['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            exit()

    transform_re = Rand3DElastic(
        mode='bilinear',
        prob=1.0,
        sigma_range=(8, 12),
        magnitude_range=(0, 1024 + 240),  #[-1024, 240] -> [0, 1024+240]
        spatial_size=(32, 32, 32),
        translate_range=(12, 12, 12),
        rotate_range=(np.pi / 18, np.pi / 18, np.pi / 18),
        scale_range=(0.1, 0.1, 0.1),
        padding_mode='border',
        device=torch.device('cuda:' + str(args.gpu)))
    transform_rgn = RandGaussianNoise(prob=0.25, mean=0.0, std=50)
    transform_rac = RandAdjustContrast(prob=0.25)

    train_tratransforms = Compose([transform_rac, transform_rgn, transform_re])

    train_dataset_patch = COPD_dataset_patch(
        "train", args, moco.loader.TwoCropsTransform(train_tratransforms))
    train_dataset_graph = COPD_dataset_graph(
        "train", args, moco.loader.TwoCropsTransform(train_tratransforms))

    if args.distributed:
        train_sampler_patch = torch.utils.data.distributed.DistributedSampler(
            train_dataset_patch)
        train_sampler_graph = torch.utils.data.distributed.DistributedSampler(
            train_dataset_graph)
    else:
        train_sampler = None

    train_loader_patch = torch.utils.data.DataLoader(
        train_dataset_patch,
        batch_size=args.batch_size_patch,
        shuffle=(train_sampler_patch is None),
        num_workers=args.workers_patch,
        pin_memory=True,
        sampler=train_sampler_patch,
        drop_last=True)
    train_loader_graph = torch.utils.data.DataLoader(
        train_dataset_graph,
        batch_size=args.batch_size_graph,
        shuffle=(train_sampler_graph is None),
        num_workers=args.workers_graph,
        pin_memory=True,
        sampler=train_sampler_graph,
        drop_last=True)

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler_patch.set_epoch(epoch)
        adjust_learning_rate(optimizer_patch, epoch, args)
        # train for one epoch
        train_patch(train_loader_patch, model_patch, criterion,
                    optimizer_patch, epoch, args)
        # save model for every epoch
        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model_patch.state_dict(),
                    'optimizer': optimizer_patch.state_dict(),
                },
                is_best=False,
                filename=os.path.join(
                    os.path.join('./exp', args.exp_name),
                    'checkpoint_patch_{:04d}.pth.tar'.format(epoch)))

        for sub_epoch in range(args.num_sub_epoch):
            if args.distributed:
                train_sampler_graph.set_epoch(args.num_sub_epoch * epoch +
                                              sub_epoch)
            adjust_learning_rate(optimizer_graph,
                                 args.num_sub_epoch * epoch + sub_epoch, args)
            train_graph(train_loader_graph, model_graph, model_patch,
                        criterion, optimizer_graph,
                        args.num_sub_epoch * epoch + sub_epoch, args)
            if not args.multiprocessing_distributed or (
                    args.multiprocessing_distributed
                    and args.rank % ngpus_per_node == 0):
                save_checkpoint(
                    {
                        'epoch': args.num_sub_epoch * epoch + sub_epoch + 1,
                        'arch': args.arch,
                        'state_dict': model_graph.state_dict(),
                        'optimizer': optimizer_graph.state_dict(),
                    },
                    is_best=False,
                    filename=os.path.join(
                        os.path.join('./exp', args.exp_name),
                        'checkpoint_graph_{:04d}.pth.tar'.format(
                            args.num_sub_epoch * epoch + sub_epoch)))
Ejemplo n.º 9
0
# limitations under the License.

import unittest
import os
import shutil
import numpy as np
import tempfile
import nibabel as nib
from parameterized import parameterized
from monai.data import ArrayDataset
from monai.transforms import Compose, LoadNifti, AddChannel, RandAdjustContrast, Spacing

TEST_CASE_1 = [
    Compose([LoadNifti(image_only=True),
             AddChannel(),
             RandAdjustContrast()]),
    Compose([LoadNifti(image_only=True),
             AddChannel(),
             RandAdjustContrast()]),
    (0, 1),
    (1, 128, 128, 128),
]


class TestCompose(Compose):
    def __call__(self, input_):
        img, metadata = self.transforms[0](input_)
        img = self.transforms[1](img)
        img, _, _ = self.transforms[2](img, metadata["affine"])
        return self.transforms[3](img), metadata