예제 #1
0
    def get_transforms(self):
        transforms = Compose()

        for transform in self.master_config:
            transforms.add(transform)

        return transforms
예제 #2
0
파일: dataset.py 프로젝트: funkelab/lisl
class DSBTestAugmentations(Dataset):

    def __init__(self,
                 dataset,
                 scale=1.,
                 output_shape=None):
        self.root_dataset = dataset
        self.scale = scale
        self.output_shape = output_shape
        self._test_transforms = None

    @property
    def test_transforms(self):
        if self._test_transforms is None:
            self._test_transforms = Compose(QuantileNormalize(apply_to=[0]),
                                            Scale(self.scale))
            if self.output_shape is not None:
                self._test_transforms.add(CenterCrop(self.output_shape))
                
        return self._test_transforms

    def __len__(self):
        return len(self.root_dataset)

    def __getitem__(self, index):

        x, y = self.root_dataset[index]
        y = y.astype(np.double)
        x, y = self.test_transforms(x, y)
        return x, y
예제 #3
0
파일: dataset.py 프로젝트: funkelab/lisl
 def test_transforms(self):
     if self._test_transforms is None:
         self._test_transforms = Compose(QuantileNormalize(apply_to=[0]),
                                         Scale(self.scale))
         if self.output_shape is not None:
             self._test_transforms.add(CenterCrop(self.output_shape))
             
     return self._test_transforms
예제 #4
0
파일: loader.py 프로젝트: wolny/lsfm_utils
    def get_transforms(self):
        transforms = Compose(RandomFlip3D(), RandomRotate())

        # Elastic transforms can be skipped by setting elastic_transform to false in the
        # yaml config file.
        if self.master_config.get('elastic_transform'):
            elastic_transform_config = self.master_config.get(
                'elastic_transform')
            transforms.add(
                ElasticTransform(
                    alpha=elastic_transform_config.get('alpha', 2000.),
                    sigma=elastic_transform_config.get('sigma', 50.),
                    order=elastic_transform_config.get('order', 0)))

        for_validation = self.master_config.get('for_validation', False)
        # if we compute the affinities on the gpu, or use the feeder for validation only,
        # we don't need to add the affinity transform here
        if not for_validation:
            assert self.affinity_config is not None
            # we apply the affinity target calculation only to the segmentation (1)
            transforms.add(
                affinity_config_to_transform(apply_to=[1],
                                             **self.affinity_config))

        transforms.add(InvertAffinities(apply_to=[1]))

        return transforms
예제 #5
0
파일: master.py 프로젝트: vzinche/neurofire
 def get_transforms(self):
     transforms = Compose(
         RandomFlip3D(),
         RandomRotate(),
         ElasticTransform(alpha=2000., sigma=50.),  # Hard coded for now
         AsTorchBatch(2))
     return transforms
예제 #6
0
def test_full_pipeline():
    import h5py
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    from inferno.io.transform import Compose
    from inferno.io.transform.generic import Normalize, Cast, AsTorchBatch
    #tiktorch = TikTorch('/export/home/jhugger/sfb1129/test_configs_tiktorch/config/')
    tiktorch = TikTorch('/home/jo/config/')

    #with h5py.File('/export/home/jhugger/sfb1129/sample_C_20160501.hdf') as f:
    with h5py.File('/home/jo/sfb1129/sample_C_20160501.hdf') as f:
        cremi_raw = f['volumes']['raw'][:, 0:512, 0:512]

    transform = Compose(Normalize(), Cast('float32'))
    inputs = [transform(cremi_raw[i:i + 1]) for i in range(1)]

    halo = tiktorch.halo
    max_shape = tiktorch.dry_run([512, 512])

    print(f'Halo: {halo}')
    print(f'max_shape: {max_shape}')

    out = tiktorch.forward(inputs)

    return 0
예제 #7
0
    def get_additional_transforms(self, master_config):
        transforms = self.transforms if self.transforms is not None else Compose(
        )

        master_config = {} if master_config is None else master_config

        # Replicate and downscale batch:
        if master_config.get("downscale_and_crop") is not None:
            ds_config = master_config.get("downscale_and_crop")
            apply_to = [conf.pop('apply_to') for conf in ds_config]
            transforms.add(ReplicateTensorsInBatch(apply_to))
            for indx, conf in enumerate(ds_config):
                transforms.add(
                    DownSampleAndCropTensorsInBatch(apply_to=[indx],
                                                    order=None,
                                                    **conf))

        # crop invalid affinity labels and elastic augment reflection padding assymetrically
        crop_config = master_config.get('crop_after_target', {})
        if crop_config:
            # One might need to crop after elastic transform to avoid edge artefacts of affinity
            # computation being warped into the FOV.
            transforms.add(VolumeAsymmetricCrop(**crop_config))

        transforms.add(AsTorchBatch(3, add_channel_axis_if_necessary=True))

        return transforms
예제 #8
0
 def get_transforms(self):
     # no NEDT inversion for ISBI since labels give neuron rather than boundary probabilities
     transforms = Compose(
         NegativeExponentialDistanceTransform(gain=self.nedt_gain, invert=False),
         Cast(self.dtype)
     )
     return transforms
예제 #9
0
 def get_transforms(self):
     transforms = Compose(
         Normalize(),
         # after normalize since raw data comes in uint8
         AdditiveGaussianNoise(sigma=.025),
         Cast(self.dtype))
     return transforms
    def get_additional_transforms(self, transform_config):
        transforms = self.transforms if self.transforms is not None else Compose(
        )

        stack_scaling_factors = transform_config["stack_scaling_factors"]

        # Replicate and downscale batch:
        num_inputs = len(stack_scaling_factors)
        input_indices = list(range(num_inputs))

        transforms.add(ReplicateBatch(num_inputs))
        inv_scaling_facts = deepcopy(stack_scaling_factors)
        inv_scaling_facts.reverse()
        for in_idx, dws_fact, crop_fact in zip(input_indices,
                                               stack_scaling_factors,
                                               inv_scaling_facts):
            transforms.add(
                DownsampleAndCrop3D(apply_to=[in_idx],
                                    order=2,
                                    zoom_factor=dws_fact,
                                    crop_factor=crop_fact))

        transforms.add(AsTorchBatch(3))

        return transforms
예제 #11
0
파일: raw.py 프로젝트: vzinche/neurofire
 def get_transforms(self, mean, std, sigma, p_augment_ws,
                    zero_mean_unit_variance):
     transforms = Compose(Cast(self.dtype))
     # add normalization (zero mean / unit variance)
     if zero_mean_unit_variance:
         transforms.add(Normalize(mean=mean, std=std))
     else:
         transforms.add(Normalize01())
     # add noise transform if specified
     if sigma is not None:
         transforms.add(AdditiveNoise(sigma=sigma))
     # add watershed super-pixel augmentation is specified
     if p_augment_ws > 0.:
         assert WatershedAugmentation is not None
         transforms.add(WatershedAugmentation(p_augment_ws, invert=True))
     return transforms
예제 #12
0
파일: dataset.py 프로젝트: funkelab/lisl
    def get_transforms(self):
        global_transforms = Compose(RandomRotate(),
                             RandomTranspose(),
                             RandomFlip(),
                             # RandomGammaCorrection(),
                             ElasticTransform(alpha=2000., sigma=50.),)

        return global_transforms
예제 #13
0
 def get_transforms(self):
     transforms = []
     if self.label_volume:
         transforms.append(ConnectedComponents3D())
     if self.binarize:
         transforms.append(BinarizeSegmentation())
     transforms.append(Cast(self.dtype))
     return Compose(*transforms)
예제 #14
0
 def forward(self, image: NDArray) -> RPCFuture[NDArray]:
     # todo: do transform in separate thread
     transform = Compose(*[
         get_transform(name, **kwargs)
         for name, kwargs in self.test_transforms.items()
     ])
     return self.handler.forward(data=TikTensor(
         transform(image.as_numpy()).astype(numpy.float32),
         id_=image.id)).map(lambda val: NDArray(val.as_numpy(), id_=val.id))
예제 #15
0
 def get_transforms(self):
     all_transforms = [RandomRotate()]
     if 'elastic_transform' in self.master_config:
         all_transforms.append(ElasticTransform(**self.master_config.get('elastic_transform',
                                                                         {})))
     if self.master_config.get('crop_after_elastic_transform', False):
         all_transforms\
             .append(CenterCrop(**self.master_config.get('crop_after_elastic_transform')))
     all_transforms.append(AsTorchBatch(2))
     transforms = Compose(*all_transforms)
     return transforms
예제 #16
0
    def get_transforms(self):
        transforms = Compose(RandomFlip3D(), RandomRotate())

        # Elastic transforms can be skipped by setting elastic_transform to false in the
        # yaml config file.
        if self.master_config.get('elastic_transform'):
            elastic_transform_config = self.master_config.get(
                'elastic_transform')
            transforms.add(
                ElasticTransform(
                    alpha=elastic_transform_config.get('alpha', 2000.),
                    sigma=elastic_transform_config.get('sigma', 50.),
                    order=elastic_transform_config.get('order', 0)))

        # TODO doesn't look like we have misalignment, so should be fine
        # if we do not use random slides
        # random slide augmentation
        if self.master_config.get('random_slides', False):
            assert False, "No random slides for now"
            ouput_shape = self.master_config.get('shape_after_slide', None)
            max_misalign = self.master_config.get('max_misalign', None)
            transforms.add(
                RandomSlide(output_image_size=ouput_shape,
                            max_misalign=max_misalign))

        # if we compute the affinities on the gpu, or use the feeder for validation only,
        # we don't need to add the affinity transform here
        if self.affinity_config is not None:
            # we apply the affinity target calculation only to the segmentation (1)
            transforms.add(
                affinity_config_to_transform(apply_to=[1],
                                             **self.affinity_config))

        # Next: crop invalid affinity labels and elastic augment reflection padding assymetrically
        crop_config = self.master_config.get('crop_after_target', {})
        if crop_config:
            # One might need to crop after elastic transform to avoid edge artefacts of affinity
            # computation being warped into the FOV.
            transforms.add(VolumeAsymmetricCrop(**crop_config))

        return transforms
예제 #17
0
파일: dataset.py 프로젝트: funkelab/lisl
    def train_transforms(self):
        if self._train_transforms is None:
            self._train_transforms = Compose(RandomRotate(),
                                     RandomTranspose(),
                                     RandomFlip(),
                                     QuantileNormalize(apply_to=[0]),
                                     Scale(self.scale),
                                     # RandomGammaCorrection(),
                                     ElasticTransform(alpha=2000., sigma=50.),
                                     RandomCrop(self.output_shape))

        return self._train_transforms
예제 #18
0
    def get_transforms(self):
        transforms = Compose(RandomFlip3D(), RandomRotate())

        # Elastic transforms can be skipped by
        # setting elastic_transform to false in the
        # yaml config file.
        if self.master_config.get('elastic_transform'):
            elastic_transform_config = self.master_config.get(
                'elastic_transform')
            transforms.add(
                ElasticTransform(
                    alpha=elastic_transform_config.get('alpha', 2000.),
                    sigma=elastic_transform_config.get('sigma', 50.),
                    order=elastic_transform_config.get('order', 0)))

        # random slide augmentation
        if self.master_config.get('random_slides', False):
            # TODO slide probability
            ouput_shape = self.master_config.get('shape_after_slide', None)
            max_misalign = self.master_config.get('max_misalign', None)
            transforms.add(
                RandomSlide(output_image_size=ouput_shape,
                            max_misalign=max_misalign))

        # affinity transforms for affinity targets
        # we apply the affinity target calculation only to the segmentation (1)
        assert self.affinity_config is not None
        transforms.add(
            affinity_config_to_transform(apply_to=[1], **self.affinity_config))

        # TODO: add transfrom for directional DT

        # crop invalid affinity labels and elastic augment reflection padding assymetrically
        crop_config = self.master_config.get('crop_after_target', {})
        if crop_config:
            # One might need to crop after elastic transform to avoid edge artefacts of affinity
            # computation being warped into the FOV.
            transforms.add(VolumeAsymmetricCrop(**crop_config))

        return transforms
예제 #19
0
 def get_transforms(self):
     # The Segmentation2Affinities adds a channel dimension. Now depending on how many
     # orders were requested, we dispatch Segmentation2Affinities or
     # Segmentation2MultiOrderAffinities.
     transforms = Compose()
     # Cast to the right dtype
     transforms.add(Cast(self.dtype))
     # Run connected components to shuffle the labels
     transforms.add(ConnectedComponents3D(label_segmentation=True))
     # Make affinity maps
     transforms.add(
         Segmentation2MultiOrderAffinities(
             dim=self.affinity_dim,
             orders=pyu.to_iterable(self.affinity_order),
             add_singleton_channel_dimension=True,
             retain_segmentation=self.retain_segmentation))
     return transforms
예제 #20
0
    def test_model(self):
        self.setUp()
        shape = self.handler.binary_dry_run([1250, 1250])
        transform = Compose(Normalize(), Cast('float32'))

        with h5py.File(
                '/export/home/jhugger/sfb1129/sample_C_20160501.hdf') as f:
            #with h5py.File('/home/jo/sfb1129/sample_C_20160501.hdf') as f:
            cremi_raw = f['volumes']['raw'][0:1, 0:shape[0], 0:shape[1]]

        input_tensor = torch.from_numpy(transform(cremi_raw[0:1]))
        out = self.handler.forward(torch.unsqueeze(input_tensor, 0))
        import scipy
        scipy.misc.imsave('/export/home/jhugger/sfb1129/tiktorch/out.jpg',
                          out[0, 0].data.cpu().numpy())
예제 #21
0
def test_dunet():
    import h5py
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    from inferno.io.transform import Compose
    from inferno.io.transform.generic import Normalize, Cast, AsTorchBatch
    #tiktorch = TikTorch('/export/home/jhugger/sfb1129/test_configs_tiktorch/config/')
    tiktorch = TikTorch('/home/jo/config/')

    #with h5py.File('/export/home/jhugger/sfb1129/sample_C_20160501.hdf') as f:
    with h5py.File('/home/jo/sfb1129/sample_C_20160501.hdf') as f:
        cremi_raw = f['volumes']['raw'][:, 0:1024, 0:1024]

    transform = Compose(Normalize(), Cast('float32'))
    tikin_list = [TikIn([transform(cremi_raw[i:i + 1]) for i in range(1)])]
    inputs = [transform(cremi_raw[i:i + 1]) for i in range(2)]

    out = tiktorch.forward(inputs)
    return 0
    def get_additional_transforms(self, master_config):
        transforms = self.transforms if self.transforms is not None else Compose(
        )

        master_config = {} if master_config is None else master_config
        # TODO: somehow merge with the trainer loader...

        # Replicate and downscale batch:
        if master_config.get("downscale_and_crop") is not None:
            ds_config = master_config.get("downscale_and_crop")
            apply_to = [conf.pop('apply_to') for conf in ds_config]
            transforms.add(ReplicateBatchGeneralized(apply_to))
            for indx, conf in enumerate(ds_config):
                transforms.add(
                    DownsampleAndCrop3D(apply_to=[indx], order=None, **conf))

        # # # affinity transforms for affinity targets
        # # # we apply the affinity target calculation only to the segmentation (1)
        # if master_config.get("affinity_config") is not None:
        #     affs_config = master_config.get("affinity_config")
        #     global_kwargs = affs_config.pop("global", {})
        #     # TODO: define computed affs not in this way, but with a variable in config...
        #     nb_affs = len(affs_config)
        #     assert nb_affs == num_inputs
        #     # all_affs_kwargs = [deepcopy(global_kwargs) for _ in range(nb_affs)]
        #     for input_index in affs_config:
        #         affs_kwargs = deepcopy(global_kwargs)
        #         affs_kwargs.update(affs_config[input_index])
        #         transforms.add(affinity_config_to_transform(apply_to=[input_index+num_inputs], **affs_kwargs))

        # crop invalid affinity labels and elastic augment reflection padding assymetrically
        crop_config = master_config.get('crop_after_target', {})
        if crop_config:
            # One might need to crop after elastic transform to avoid edge artefacts of affinity
            # computation being warped into the FOV.
            transforms.add(VolumeAsymmetricCrop(**crop_config))

        transforms.add(AsTorchBatch(3, add_channel_axis_if_necessary=True))

        # transforms.add(CheckBatchAndChannelDim(3))

        return transforms
예제 #23
0
    def test_model(self):
        self.setUp()
        # shape = self.handler.binary_dry_run([2000, 2000])
        transform = Compose(Normalize(), Cast("float32"))

        # with h5py.File('/export/home/jhugger/sfb1129/sample_C_20160501.hdf') as f:
        with h5py.File(
                "/export/home/jhugger/sfb1129/sample_C_20160501.hdf") as f:
            cremi_raw = f["volumes"]["raw"][0:1, 0:1248, 0:1248]

        input_tensor = torch.from_numpy(transform(cremi_raw[0:1]))
        input_tensor = torch.rand(1, 572, 572)
        print(torch.unsqueeze(input_tensor, 0).shape)
        out = self.handler.forward(torch.unsqueeze(input_tensor, 0))
        import scipy

        scipy.misc.imsave("/export/home/jhugger/sfb1129/tiktorch/out.jpg",
                          out[0, 0].data.cpu().numpy())
        scipy.misc.imsave("/home/jo/server/tiktorch/out.jpg",
                          out[0, 0].data.cpu().numpy())
    def inferno_build_criterion(self):
        print("Building criterion")
        loss_kwargs = self.get("trainer/criterion/kwargs", {})
        # from vaeAffs.models.losses import EncodingLoss, PatchLoss, PatchBasedLoss, StackedAffinityLoss
        loss_name = self.get("trainer/criterion/loss_name",
                             "inferno.extensions.criteria.set_similarity_measures.SorensenDiceLoss")
        loss_config = {loss_name: loss_kwargs}

        criterion = create_instance(loss_config, self.CRITERION_LOCATIONS)
        transforms = self.get("trainer/criterion/transforms")
        if transforms is not None:
            assert isinstance(transforms, list)
            transforms_instances = []
            # Build transforms:
            for transf in transforms:
                transforms_instances.append(create_instance(transf, []))
            # Wrap criterion:
            criterion = LossWrapper(criterion, transforms=Compose(*transforms_instances))

        self._trainer.build_criterion(criterion)
        self._trainer.build_validation_criterion(criterion)
예제 #25
0
파일: master.py 프로젝트: vzinche/neurofire
 def get_transforms(self):
     transforms = Compose(RandomFlip3D(), RandomRotate())
     if 'elastic_transform' in self.master_config:
         # Elastic transforms can be skipped by setting elastic_transform to false in the
         # yaml config file.
         if self.master_config.get('elastic_transform'):
             elastic_transform_config = self.master_config.get(
                 'elastic_transform')
             transforms.add(
                 ElasticTransform(
                     alpha=elastic_transform_config.get('alpha', 2000.),
                     sigma=elastic_transform_config.get('sigma', 50.),
                     order=elastic_transform_config.get('order', 1)))
     else:
         # Preserve legacy behaviour
         transforms.add(ElasticTransform(alpha=2000., sigma=50.))
     if self.master_config.get('crop_after_elastic_transform', False):
         # One might need to crop after elastic transform to avoid edge artefacts of affinity
         # computation being warped into the FOV.
         transforms.add(
             CenterCrop(**self.master_config.get(
                 'crop_after_elastic_transform', {})))
     return transforms
예제 #26
0
파일: raw.py 프로젝트: vzinche/neurofire
 def get_transforms(self, mean, std):
     transforms = Compose(Cast(self.dtype), Normalize(mean=mean, std=std))
     return transforms
예제 #27
0
 def get_transforms(self):
     transforms = Compose(
         Segmentation2Membranes(dtype=self.dtype),
         NegativeExponentialDistanceTransform(gain=self.nedt_gain),
         Cast(self.dtype))
     return transforms
예제 #28
0
 def get_transforms(self):
     if self.apply_on_image:
         transforms = Compose(ConnectedComponents2D(), Cast(self.dtype))
     else:
         transforms = Compose(ConnectedComponents3D(), Cast(self.dtype))
     return transforms
예제 #29
0
 def get_transforms(self):
     transforms = Compose(ConnectedComponents3D(label_segmentation=True),
                          Cast(self.dtype))
     return transforms
예제 #30
0
 def get_transforms(self):
     if self.label_volume:
         transforms = Compose(ConnectedComponents3D(), Cast(self.dtype))
     else:
         transforms = Cast(self.dtype)
     return transforms