Example #1
0
 def get(self, image, snr=None, **kwargs):
     image[image < 0] = 0
     peak = np.max(image)
     rescale = snr**2 / peak
     noisy_image = Image(np.random.poisson(image * rescale) / rescale)
     noisy_image.properties = image.properties
     return noisy_image
Example #2
0
    def get(self, image, snr, background, **kwargs):
        image[image < 0] = 0

        peak = np.abs(np.max(image) - background)

        rescale = snr**2 / peak**2
        noisy_image = Image(np.random.poisson(image * rescale) / rescale)
        noisy_image.properties = image.properties
        return noisy_image


## IMGAUG IMGCORRUPTLIKE
# Currently unavailable until there's a better way to implement constricted datatypes (only uint8)
# Please see https://github.com/aleju/imgaug/blob/master/imgaug/augmenters/imgcorruptlike.py
# for source implementation

# import imgaug.augmenters as iaa
# import deeptrack as dt
# import inspect

# def init_method(self, **kwargs):
#     dt.ImgAug.__init__(self, **kwargs)

# augs = inspect.getmembers(iaa.blur, lambda x: inspect.isclass(x))

# for augname, aug in augs:

#     print(augname, aug.__module__)

#     globals()[augname] = type(augname, (aug, dt.ImgAug), {
#         "augmenter": aug,
#         "__init__": init_method})
Example #3
0
    def get(self, image, snr, background, **kwargs):
        image[image < 0] = 0

        peak = np.abs(np.max(image) - background)

        rescale = snr**2 / peak**2
        noisy_image = Image(np.random.poisson(image * rescale) / rescale)
        noisy_image.properties = image.properties
        return noisy_image
Example #4
0
    def get(self, image, features=None, axis=None):

        image_list = [feature.resolve(image) for feature in features]

        merged_image = Image(np.concatenate(image_list, axis=axis))
        
        image = Image(image)
        num_properties = len(image.properties)
        
        merged_properties = image.properties

        for im in image_list:
            merged_properties += im.properties[num_properties:]

        merged_image.properties = merged_properties

        return merged_image
    def get(self,
            image,
            angle=None,
            axes=(1, 0),
            reshape=None,
            order=None,
            mode=None,
            cval=None,
            prefilter=None,
            **kwargs):

        new_image = Image(
            ndimage.rotate(image,
                           angle,
                           axes=axes,
                           reshape=reshape,
                           order=order,
                           mode=mode,
                           cval=cval,
                           prefilter=prefilter))
        new_image.properties = image.properties

        return new_image
Example #6
0
def _create_volume(list_of_scatterers,
                   pad=(0, 0, 0, 0),
                   upscaled_output_region=(None, None, None, None),
                   refractive_index_medium=1.33,
                   upscale=1,
                   **kwargs):
    # Converts a list of scatterers into a volume.

    if not isinstance(list_of_scatterers, list):
        list_of_scatterers = [list_of_scatterers]

    volume = np.zeros((1, 1, 1), dtype=np.complex)
    limits = None
    OR = np.zeros((4, ))
    OR[0] = np.inf if upscaled_output_region[0] is None else int(
        upscaled_output_region[0] - pad[0])
    OR[1] = -np.inf if upscaled_output_region[1] is None else int(
        upscaled_output_region[1] - pad[1])
    OR[2] = np.inf if upscaled_output_region[2] is None else int(
        upscaled_output_region[2] + pad[2])
    OR[3] = -np.inf if upscaled_output_region[3] is None else int(
        upscaled_output_region[3] + pad[3])

    for scatterer in list_of_scatterers:

        position = _get_position(scatterer, mode="corner", return_z=True)

        if scatterer.get_property("intensity", None) is not None:
            scatterer_value = scatterer.get_property("intensity")
        elif scatterer.get_property("refractive_index", None) is not None:
            scatterer_value = scatterer.get_property(
                "refractive_index") - refractive_index_medium
        else:
            scatterer_value = scatterer.get_property("value")

        scatterer = scatterer * scatterer_value

        if limits is None:
            limits = np.zeros((3, 2), dtype=np.int32)
            limits[:, 0] = np.floor(position).astype(np.int32)
            limits[:, 1] = np.floor(position).astype(np.int32) + 1

        if (position[0] + scatterer.shape[0] < OR[0] or position[0] > OR[2]
                or position[1] + scatterer.shape[1] < OR[1]
                or position[1] > OR[3]):
            continue

        padded_scatterer = Image(
            np.pad(scatterer, [(2, 2), (2, 2), (2, 2)],
                   'constant',
                   constant_values=0))
        padded_scatterer.properties = scatterer.properties
        scatterer = padded_scatterer
        position = _get_position(scatterer, mode="corner", return_z=True)
        shape = np.array(scatterer.shape)

        if position is None:
            RuntimeWarning(
                "Optical device received an image without a position property. It will be ignored."
            )
            continue

        splined_scatterer = np.zeros_like(scatterer)

        x_off = position[0] - np.floor(position[0])
        y_off = position[1] - np.floor(position[1])

        kernel = np.array([[0, 0, 0],
                           [0, (1 - x_off) * (1 - y_off), (1 - x_off) * y_off],
                           [0, x_off * (1 - y_off), x_off * y_off]])

        for z in range(scatterer.shape[2]):
            splined_scatterer[:, :, z] = convolve(scatterer[:, :, z],
                                                  kernel,
                                                  mode="constant")

        scatterer = splined_scatterer
        position = np.floor(position)
        new_limits = np.zeros(limits.shape, dtype=np.int32)
        for i in range(3):
            new_limits[i, :] = (
                np.min([limits[i, 0], position[i]]),
                np.max([limits[i, 1], position[i] + shape[i]]),
            )

        if not (np.array(new_limits) == np.array(limits)).all():
            new_volume = np.zeros(np.diff(new_limits,
                                          axis=1)[:, 0].astype(np.int32),
                                  dtype=np.complex)
            old_region = (limits - new_limits).astype(np.int32)
            limits = limits.astype(np.int32)
            new_volume[old_region[0, 0]:old_region[0, 0] + limits[0, 1] -
                       limits[0, 0], old_region[1, 0]:old_region[1, 0] +
                       limits[1, 1] - limits[1, 0],
                       old_region[2, 0]:old_region[2, 0] + limits[2, 1] -
                       limits[2, 0]] = volume
            volume = new_volume
            limits = new_limits

        within_volume_position = position - limits[:, 0]

        # NOTE: Maybe shouldn't be additive.
        volume[int(within_volume_position[0]):int(within_volume_position[0] +
                                                  shape[0]),
               int(within_volume_position[1]):int(within_volume_position[1] +
                                                  shape[1]),
               int(within_volume_position[2]):int(within_volume_position[2] +
                                                  shape[2])] += scatterer
    return volume, limits
Example #7
0
    def get(self, illuminated_volume, limits, fields, **kwargs):
        ''' Convolves the image with a pupil function
        '''
        # Pad volume
        padded_volume, limits = self._pad_volume(illuminated_volume,
                                                 limits=limits,
                                                 **kwargs)

        # Extract indexes of the output region
        pad = kwargs.get("padding", (0, 0, 0, 0))
        output_region = np.array(
            kwargs.get("upscaled_output_region", (None, None, None, None)))
        output_region[0] = None if output_region[0] is None else int(
            output_region[0] - limits[0, 0] - pad[0])
        output_region[1] = None if output_region[1] is None else int(
            output_region[1] - limits[1, 0] - pad[1])
        output_region[2] = None if output_region[2] is None else int(
            output_region[2] - limits[0, 0] + pad[2])
        output_region[3] = None if output_region[3] is None else int(
            output_region[3] - limits[1, 0] + pad[3])

        padded_volume = padded_volume[output_region[0]:output_region[2],
                                      output_region[1]:output_region[3], :]
        z_limits = limits[2, :]

        output_image = Image(np.zeros((*padded_volume.shape[0:2], 1)))

        index_iterator = range(padded_volume.shape[2])
        z_iterator = np.linspace(z_limits[0],
                                 z_limits[1],
                                 num=padded_volume.shape[2],
                                 endpoint=False)

        zero_plane = np.all(padded_volume == 0, axis=(0, 1), keepdims=False)
        # z_values = z_iterator[~zero_plane]

        volume = pad_image_to_fft(padded_volume, axes=(0, 1))

        voxel_size = kwargs['voxel_size']

        pupils = (self._pupil(
            volume.shape[:2], defocus=[1], include_aberration=False, **kwargs)
                  + self._pupil(volume.shape[:2],
                                defocus=[-z_limits[1]],
                                include_aberration=True,
                                **kwargs))

        pupil_step = np.fft.fftshift(pupils[0])

        if "illumination" in kwargs:
            light_in = np.ones(volume.shape[:2], dtype=np.complex)
            light_in = kwargs["illumination"].resolve(light_in, **kwargs)
            light_in = np.fft.fft2(light_in)
        else:
            light_in = np.zeros(volume.shape[:2], dtype=np.complex)
            light_in[0, 0] = light_in.size

        K = 2 * np.pi / kwargs["wavelength"]

        field_z = [_get_position(field, return_z=True)[-1] for field in fields]
        field_offsets = [
            field.get_property("offset_z", default=0) for field in fields
        ]

        z = z_limits[1]
        for i, z in zip(index_iterator, z_iterator):
            light_in = light_in * pupil_step

            to_remove = []
            for idx, fz in enumerate(field_z):
                if fz < z:
                    propagation_matrix = self._pupil(
                        fields[idx].shape,
                        defocus=[z - fz - field_offsets[idx] / voxel_size[-1]],
                        include_aberration=False,
                        **kwargs)[0]
                    propagation_matrix = propagation_matrix * np.exp(
                        1j * voxel_size[-1] * 2 * np.pi / kwargs["wavelength"]
                        * kwargs["refractive_index_medium"] * (z - fz))
                    light_in += np.fft.fft2(
                        fields[idx][:, :,
                                    0]) * np.fft.fftshift(propagation_matrix)
                    to_remove.append(idx)

            for idx in reversed(to_remove):
                fields.pop(idx)
                field_z.pop(idx)
                field_offsets.pop(idx)

            if zero_plane[i]:
                continue

            ri_slice = volume[:, :, i]
            light = np.fft.ifft2(light_in)
            light_out = light * np.exp(1j * ri_slice * voxel_size[-1] * K)
            light_in = np.fft.fft2(light_out)

        # Add remaining fields
        for idx, fz in enumerate(field_z):
            prop_dist = z - fz - field_offsets[idx] / voxel_size[-1]
            propagation_matrix = self._pupil(fields[idx].shape,
                                             defocus=[prop_dist],
                                             include_aberration=False,
                                             **kwargs)[0]
            propagation_matrix = propagation_matrix * np.exp(
                -1j * voxel_size[-1] * 2 * np.pi / kwargs["wavelength"] *
                kwargs["refractive_index_medium"] * prop_dist)
            light_in += np.fft.fft2(
                fields[idx][:, :, 0]) * np.fft.fftshift(propagation_matrix)

        light_in_focus = light_in * np.fft.fftshift(pupils[-1])

        output_image = np.fft.ifft2(
            light_in_focus)[:padded_volume.shape[0], :padded_volume.shape[1]]
        output_image = np.expand_dims(output_image, axis=-1)
        output_image = Image(output_image[pad[0]:-pad[2], pad[1]:-pad[3]])

        if not kwargs.get("return_field", False):
            output_image = np.square(np.abs(output_image))

        output_image.properties = illuminated_volume.properties

        return output_image
Example #8
0
    def get(self, illuminated_volume, limits, **kwargs):
        ''' Convolves the image with a pupil function
        '''
        # Pad volume
        padded_volume, limits = self._pad_volume(illuminated_volume,
                                                 limits=limits,
                                                 **kwargs)

        # Extract indexes of the output region
        pad = kwargs.get("padding", (0, 0, 0, 0))
        output_region = np.array(
            kwargs.get("upscaled_output_region", (None, None, None, None)))
        output_region[0] = None if output_region[0] is None else int(
            output_region[0] - limits[0, 0] - pad[0])
        output_region[1] = None if output_region[1] is None else int(
            output_region[1] - limits[1, 0] - pad[1])
        output_region[2] = None if output_region[2] is None else int(
            output_region[2] - limits[0, 0] + pad[2])
        output_region[3] = None if output_region[3] is None else int(
            output_region[3] - limits[1, 0] + pad[3])

        padded_volume = padded_volume[output_region[0]:output_region[2],
                                      output_region[1]:output_region[3], :]
        z_limits = limits[2, :]

        output_image = Image(np.zeros((*padded_volume.shape[0:2], 1)))

        index_iterator = range(padded_volume.shape[2])

        # Get planes in volume where not all values are 0.
        z_iterator = np.linspace(z_limits[0],
                                 z_limits[1],
                                 num=padded_volume.shape[2],
                                 endpoint=False)
        zero_plane = np.all(padded_volume == 0, axis=(0, 1), keepdims=False)
        z_values = z_iterator[~zero_plane]

        # Further pad image to speed up fft
        volume = pad_image_to_fft(padded_volume, axes=(0, 1))

        pupils = self._pupil(volume.shape[:2], defocus=z_values, **kwargs)
        pupil_iterator = iter(pupils)

        # Loop through voluma and convole sample with pupil function
        for i, z in zip(index_iterator, z_iterator):

            if zero_plane[i]:
                continue

            image = volume[:, :, i]
            pupil = Image(next(pupil_iterator))

            psf = np.square(np.abs(np.fft.ifft2(np.fft.fftshift(pupil))))
            optical_transfer_function = np.fft.fft2(psf)

            fourier_field = np.fft.fft2(image)
            convolved_fourier_field = fourier_field * optical_transfer_function

            field = Image(np.fft.ifft2(convolved_fourier_field))

            # Discard remaining imaginary part (should be 0 up to rounding error)
            field = np.real(field)

            output_image[:, :, 0] += field[:padded_volume.
                                           shape[0], :padded_volume.shape[1]]

        output_image = output_image[pad[0]:-pad[2], pad[1]:-pad[3]]
        try:
            output_image.properties = illuminated_volume.properties + pupil.properties
        except UnboundLocalError:
            output_image.properties = illuminated_volume.properties

        return output_image
Example #9
0
def _create_volume(list_of_scatterers,
                   pad=(0, 0, 0, 0),
                   output_region=(None, None, None, None),
                   refractive_index_medium=1.33,
                   **kwargs):
    # Converts a list of scatterers into a volume.

    if not isinstance(list_of_scatterers, list):
        list_of_scatterers = [list_of_scatterers]
    volume = np.zeros((1, 1, 1), dtype=np.complex)

    # x, y, z limits of the volume
    limits = np.array([(0, 1), (0, 1), (0, 1)])

    OR = np.zeros((4, ))
    for scatterer in list_of_scatterers:

        position = _get_position(scatterer, mode="corner", return_z=True)

        if scatterer.get_property("intensity", None) is not None:
            scatterer_value = scatterer.get_property("intensity")
        elif scatterer.get_property("refractive_index", None) is not None:
            scatterer_value = scatterer.get_property(
                "refractive_index") - refractive_index_medium
        else:
            scatterer_value = scatterer.get_property("value")

        scatterer = scatterer * scatterer_value

        if limits is None:
            limits = np.zeros((3, 2))
            limits[:, 0] = np.round(position).astype(np.int32)
            limits[:, 1] = np.round(position).astype(np.int32) + 1

        OR[0] = np.inf if output_region[0] is None else int(output_region[0] -
                                                            limits[0, 0] -
                                                            pad[0])
        OR[1] = -np.inf if output_region[1] is None else int(output_region[1] -
                                                             limits[1, 0] -
                                                             pad[1])
        OR[2] = np.inf if output_region[2] is None else int(output_region[2] -
                                                            limits[0, 0] +
                                                            pad[2])
        OR[3] = -np.inf if output_region[3] is None else int(output_region[3] -
                                                             limits[1, 0] +
                                                             pad[3])

        if (position[0] + scatterer.shape[0] < OR[0] or position[0] > OR[2]
                or position[1] + scatterer.shape[1] < OR[1]
                or position[1] > OR[3]):
            continue

        padded_scatterer = Image(
            np.pad(scatterer, [(2, 2), (2, 2), (0, 0)],
                   'constant',
                   constant_values=0))
        padded_scatterer.properties = scatterer.properties
        scatterer = padded_scatterer

        position = _get_position(scatterer, mode="corner", return_z=True)
        shape = np.array(scatterer.shape)

        if position is None:
            RuntimeWarning(
                "Optical device received a feature without a position property. It will be ignored."
            )
            continue

        x_pos = position[0] + np.arange(scatterer.shape[0])
        y_pos = position[1] + np.arange(scatterer.shape[1])

        target_x_pos = np.round(x_pos)
        target_y_pos = np.round(y_pos)

        splined_scatterer = np.zeros_like(scatterer)
        for z in range(scatterer.shape[2]):

            scatterer_spline = RectBivariateSpline(x_pos, y_pos,
                                                   np.real(scatterer[:, :, z]))
            splined_scatterer[1:-1, 1:-1,
                              z] = scatterer_spline(target_x_pos[1:-1],
                                                    target_y_pos[1:-1])

            if scatterer.dtype == np.complex:
                scatterer_spline = RectBivariateSpline(
                    x_pos, y_pos, np.imag(scatterer[:, :, z]))
                splined_scatterer[1:-1, 1:-1, z] += 1j * \
                    scatterer_spline(target_x_pos[1:-1], target_y_pos[1:-1])

        scatterer = splined_scatterer
        position = np.round(position)
        new_limits = np.zeros(limits.shape, dtype=np.int32)
        for i in range(3):
            new_limits[i, :] = (
                np.min([limits[i, 0], position[i]]),
                np.max([limits[i, 1], position[i] + shape[i]]),
            )

        if not (np.array(new_limits) == np.array(limits)).all():
            new_volume = np.zeros(np.diff(new_limits,
                                          axis=1)[:, 0].astype(np.int32),
                                  dtype=np.complex)
            old_region = (limits - new_limits).astype(np.int32)
            limits = limits.astype(np.int32)
            new_volume[old_region[0, 0]:old_region[0, 0] + limits[0, 1] -
                       limits[0, 0], old_region[1, 0]:old_region[1, 0] +
                       limits[1, 1] - limits[1, 0],
                       old_region[2, 0]:old_region[2, 0] + limits[2, 1] -
                       limits[2, 0]] = volume
            volume = new_volume
            limits = new_limits

        within_volume_position = position - limits[:, 0]

        # NOTE: Maybe shouldn't be additive.
        volume[int(within_volume_position[0]):int(within_volume_position[0] +
                                                  shape[0]),
               int(within_volume_position[1]):int(within_volume_position[1] +
                                                  shape[1]),
               int(within_volume_position[2]):int(within_volume_position[2] +
                                                  shape[2])] += scatterer

    return volume, limits
Example #10
0
    def get(self, illuminated_volume, limits, **kwargs):
        ''' Convolves the image with a pupil function
        '''

        # Pad volume
        padded_volume, limits = self._pad_volume(illuminated_volume,
                                                 limits=limits,
                                                 **kwargs)

        # Extract indexes of the output region
        pad = kwargs.get("padding", (0, 0, 0, 0))
        output_region = np.array(
            kwargs.get("output_region", (None, None, None, None)))
        output_region[0] = None if output_region[0] is None else int(
            output_region[0] - limits[0, 0] - pad[0])
        output_region[1] = None if output_region[1] is None else int(
            output_region[1] - limits[1, 0] - pad[1])
        output_region[2] = None if output_region[2] is None else int(
            output_region[2] - limits[0, 0] + pad[2])
        output_region[3] = None if output_region[3] is None else int(
            output_region[3] - limits[1, 0] + pad[3])

        padded_volume = padded_volume[output_region[0]:output_region[2],
                                      output_region[1]:output_region[3], :]
        z_limits = limits[2, :]

        output_image = Image(np.zeros((*padded_volume.shape[0:2], 1)))

        index_iterator = range(padded_volume.shape[2])
        z_iterator = np.linspace(z_limits[0],
                                 z_limits[1],
                                 num=padded_volume.shape[2],
                                 endpoint=False)

        zero_plane = np.all(padded_volume == 0, axis=(0, 1), keepdims=False)
        # z_values = z_iterator[~zero_plane]

        volume = pad_image_to_fft(padded_volume, axes=(0, 1))

        voxel_size = kwargs['voxel_size']

        pupils = (self._pupil(
            volume.shape[:2], defocus=[1], include_aberration=False, **kwargs)
                  + self._pupil(volume.shape[:2],
                                defocus=[-z_limits[1]],
                                include_aberration=True,
                                **kwargs))

        pupil_step = np.fft.fftshift(pupils[0])

        if "illumination" in kwargs:
            light_in = np.ones(volume.shape[:2])
            light_in = kwargs["illumination"].resolve(light_in, **kwargs)
            light_in = np.fft.fft2(light_in)
        else:
            light_in = np.zeros(volume.shape[:2])
            light_in[0, 0] = light_in.size

        K = 2 * np.pi / kwargs["wavelength"]

        for i, z in zip(index_iterator, z_iterator):

            light_in = light_in * pupil_step

            if zero_plane[i]:
                continue

            ri_slice = volume[:, :, i]

            light = np.fft.ifft2(light_in)

            light_out = light * np.exp(1j * ri_slice * voxel_size[-1] * K)

            light_in = np.fft.fft2(light_out)

        light_in_focus = light_in * np.fft.fftshift(pupils[-1])

        output_image = np.fft.ifft2(
            light_in_focus)[:padded_volume.shape[0], :padded_volume.shape[1]]
        output_image = np.expand_dims(output_image, axis=-1)
        output_image = Image(output_image[pad[0]:-pad[2], pad[1]:-pad[3]])

        if not kwargs.get("return_field", False):
            output_image = np.square(np.abs(output_image))

        output_image.properties = illuminated_volume.properties

        return output_image