コード例 #1
0
ファイル: test_bal_image.py プロジェクト: pradal/timagetk
 def test_equality(self):
     shape, dtype = (2, 3, 4), np.uint16
     sp_ref = SpatialImage(np.zeros(shape, dtype))
     bal_ref = BalImage(sp_ref)
     # Same matrix
     bal_image_1 = BalImage(sp_ref)
     assert bal_ref == bal_image_1
     assert (bal_ref != bal_image_1) is False
     # Different shape
     zero = SpatialImage(np.zeros((4, 3, 2), dtype))
     bal_image_2 = BalImage(zero)
     assert bal_ref != bal_image_2
     # Different values
     one = SpatialImage(np.ones(shape, dtype))
     bal_image_3 = BalImage(one)
     assert bal_ref != bal_image_3
     # Different resolution
     zero = SpatialImage(np.zeros(shape, dtype), voxelsize=[0.5, 0.5, 0.5])
     bal_image_4 = BalImage(zero)
     assert bal_ref != bal_image_4
     # Different dtype
     zero_uint8 = SpatialImage(np.zeros(shape, dtype=np.uint8))
     bal_image_5 = BalImage(zero_uint8)
     assert bal_ref.to_spatial_image().dtype == np.uint16
     assert bal_image_5.to_spatial_image().dtype == np.uint8
     assert bal_ref != bal_image_5
コード例 #2
0
def membrane_image_segmentation(img, gaussian_sigma=0.75, h_min=None, segmentation_gaussian_sigma=0.5, volume_threshold=None):

    voxelsize = np.array(img.voxelsize)

    if h_min is None:
        h_min = 2 if img.dtype==np.uint8 else 1000

    smooth_image = nd.gaussian_filter(img, sigma=gaussian_sigma / voxelsize).astype(img.dtype)
    smooth_img = SpatialImage(smooth_image, voxelsize=voxelsize)

    ext_img = h_transform(smooth_img, h=h_min, method='min')

    seed_img = region_labeling(ext_img, low_threshold=1, high_threshold=h_min, method='connected_components')

    seg_smooth_image = nd.gaussian_filter(img, sigma=segmentation_gaussian_sigma / voxelsize).astype(img.dtype)
    seg_smooth_img = SpatialImage(seg_smooth_image, voxelsize=voxelsize)

    seg_img = segmentation(seg_smooth_img, seed_img, control='first', method='seeded_watershed')

    if volume_threshold is not None:
        seg_volumes = dict(zip(np.arange(seg_img.max()) + 1,
                               nd.sum(np.prod(voxelsize) * np.ones_like(seg_img),
                                      seg_img,
                                      index=np.arange(seg_img.max()) + 1)))

        labels_to_remove = np.array(list(seg_volumes.keys()))[np.array(list(seg_volumes.values())) > volume_threshold]
        print("--> Removing too large labels :", labels_to_remove)
        for l in labels_to_remove:
            seg_img[seg_img == l] = 1

    return seg_img
コード例 #3
0
    def test_get_methods(self):

        tmp_arr = np.ones((5, 5), dtype=np.uint8)
        vox = [1.0, 1.0]
        orig = [0, 0]
        img = SpatialImage(tmp_arr)
        metadata = {
            'dim': 2,
            'extent': [5.0, 5.0],
            'shape': (5, 5),
            'type': 'uint8',
            'voxelsize': [1.0, 1.0],
            'origin': [0, 0],
            'max': 1,
            'mean': 1.0,
            'min': 1
        }

        self.assertEqual(img.get_type(), tmp_arr.dtype)
        self.assertEqual(img.get_voxelsize(), vox)
        self.assertEqual(img.get_shape(), tmp_arr.shape)
        self.assertEqual(img.get_origin(), orig)
        self.assertEqual(img.get_extent(),
                         [tmp_arr.shape[0], tmp_arr.shape[1]])
        self.assertEqual(img.get_dim(), 2)
        self.assertDictEqual(img.get_metadata(), metadata)
コード例 #4
0
def read_image(im_fname, channel_names=None, pattern='..CZXY.'):
    """
    Read CZI, LSM, TIF and INR images based on the 'im_fname' extension.

    Parameters
    ----------
    im_fname : str
        filename of the image to read.
    channel_names : list(str), optional
        list of channel names to use if im_fname is a multi-channel image
    pattern : str, optional
        CZI data ordering pattern, often '..CZXY.' or '.C.ZXY.'

    Returns
    -------
    im : SpatialImage|dict(SpatialImage)
        SpatialImage or dictionary of SpatialImages if a multi-channel images
    """
    if im_fname.endswith(".inr") or im_fname.endswith(".inr.gz"):
        im = imread(im_fname)
    elif im_fname.endswith(".tif"):
        im = imread(im_fname)
    elif im_fname.endswith(".lsm"):
        im = read_lsm(im_fname)
        if isinstance(im, dict):
            im = {
                k: SpatialImage(ch, voxelsize=ch.voxelsize)
                for k, ch in im.items()
            }
            if channel_names is not None:
                im = replace_channel_names(im, channel_names)
        else:
            im = SpatialImage(im, voxelsize=im.voxelsize)
    elif im_fname.endswith(".czi"):
        im = read_czi(im_fname, channel_names, pattern=pattern)
        # try:
        #     im2 = read_czi(im_fname)
        #     assert isinstance(im2, dict)
        # except:
        #     del im2
        # else:
        #     im = im2
        for k, ch in im.items():
            if not isinstance(ch, SpatialImage):
                im[k] = SpatialImage(ch, voxelsize=ch.voxelsize)
    else:
        raise TypeError("Unknown reader for file '{}'".format(im_fname))
    return im
コード例 #5
0
def signal_subtraction(img2seg, img2sub):
    """
    Performs SpatialImage subtraction.

    Parameters
    ----------
    img2seg : str
        image to segment.
    img2sub : str, optional
        image to subtract to the image to segment.
    """
    vxs = img2seg.voxelsize
    ori = img2seg.origin()
    md = img.metadata

    try:
        assert np.allclose(img2seg.shape, img2sub.shape)
    except AssertionError:
        raise ValueError("Input images does not have the same shape!")
    img2sub = read_image(substract_inr)
    # img2sub = morphology(img2sub, method='erosion', radius=3.)
    tmp_im = img2seg - img2sub
    tmp_im[img2seg <= img2sub] = 0
    img2seg = SpatialImage(tmp_im, voxelsize=vxs, origin=ori, metadata_dict=md)

    return img2seg
コード例 #6
0
def segmentation_surface_topomesh(seg_img,
                                  background=1,
                                  resampling_voxelsize=1.,
                                  maximal_length=5.,
                                  microscope_orientation=None,
                                  compute_curvature=False):

    if microscope_orientation is None:
        microscope_orientation = 1 - 2 * (np.mean(seg_img[:, :, 0]) > np.mean(
            seg_img[:, :, -1]))

    binary_img = SpatialImage(nd.gaussian_filter(
        255 * (seg_img != background),
        sigma=1. / np.array(seg_img.voxelsize)).astype(np.uint8),
                              voxelsize=seg_img.voxelsize)

    if resampling_voxelsize is not None:
        binary_img = isometric_resampling(binary_img,
                                          method=resampling_voxelsize,
                                          option='linear')
    else:
        resampling_voxelsize = np.array(binary_img.voxelsize)

    surface_topomesh = nuclei_image_surface_topomesh(
        binary_img,
        nuclei_sigma=resampling_voxelsize,
        density_voxelsize=resampling_voxelsize,
        maximal_length=maximal_length,
        intensity_threshold=64,
        padding=False,
        decimation=100)

    surface_topomesh = up_facing_surface_topomesh(
        surface_topomesh,
        normal_method='orientation',
        upwards=microscope_orientation == 1,
        down_facing_threshold=0)

    compute_topomesh_property(surface_topomesh,
                              'normal',
                              2,
                              normal_method='orientation')
    compute_topomesh_vertex_property_from_faces(surface_topomesh,
                                                'normal',
                                                neighborhood=3,
                                                adjacency_sigma=1.2)

    if compute_curvature:
        curvature_properties = [
            'mean_curvature', 'gaussian_curvature', 'principal_curvature_min',
            'principal_curvature_max'
        ]
        compute_topomesh_property(surface_topomesh, 'mean_curvature', 2)
        for property_name in curvature_properties:
            compute_topomesh_vertex_property_from_faces(surface_topomesh,
                                                        property_name,
                                                        neighborhood=3,
                                                        adjacency_sigma=1.2)

    return surface_topomesh
コード例 #7
0
def binary_dilation(image, iterations=DEF_ITERS, connectivity=DEF_CONNECT):
    """Morpholocial dilation on binary image.

    Parameters
    ----------
    image : SpatialImage
        input image to transform
    iterations : int, optional
        number of iterations to performs with structuring element, default is 1
    connectivity : int, optional
        use it to override the default 'sphere' parameter for the structuring
        element, equivalent to 'connectivity=18'

    Returns
    -------
    SpatialImage
        transformed image with its metadata
    """
    ori = image.origin
    vxs = image.voxelsize
    md = image.metadata
    struct = ndimage.generate_binary_structure(3, connectivity)
    out_img = ndimage.binary_dilation(image.get_array(),
                                      structure=struct,
                                      iterations=iterations)
    return SpatialImage(out_img, origin=ori, voxelsize=vxs, metadata=md)
コード例 #8
0
ファイル: create_images.py プロジェクト: gcerutti/SamMaps
def create_two_label_image(labA=2, labB=3, x=30, y=30, z=10, vxs=(1., 1., 1.)):
    """
    Create a labelled image (SpatialImage) with two labels splitting it in two
    equal parts (along the x-axis).
    The image shape is given by 'x', 'y' & 'z'.
    Use an unsigned 8-bit encoding.

    Parameters
    ----------
    labA : int, optional
        label A
    labB : int, optional
        label B
    x : int, optional
        shape along the x dimension
    y : int, optional
        shape along the y dimension
    z : int, optional
        shape along the z dimension
    vxs : tuple|list|np.array
        voxelsize of the returned SpatialImage

    Returns
    -------
    im : SpatialImage
        the labelled image
    """
    i = int(x / 2.)
    arr = np.zeros((x, y, z), dtype=np.uint8)
    arr[0:i, :, :] = labA
    arr[i:, :, :] = labB
    im = SpatialImage(arr, voxelsize=vxs, origin=(0, 0, 0))
    return im
コード例 #9
0
ファイル: vt_image.py プロジェクト: VirtualPlants/timagetk
    def __init__(self, sp_img):
        """
        VT_Image constructor

        Parameters
        ----------
        :param *SpatialImage* sp_img: SpatialImage instance --- image and metadata
        """
        if not isinstance(sp_img, SpatialImage):
            print('Warning : sp_img is not a SpatialImage instance')
            sp_img = SpatialImage(sp_img)

        if sp_img.get_dim()==2: # 2D management
            sp_img = sp_img.to_3D()

        self._data = sp_img
        self.vt_image = sp_img_to_vt_img(sp_img)
        libvt.VT_AllocArrayImage(pointer(self.vt_image))
コード例 #10
0
ファイル: vt_image.py プロジェクト: pradal/timagetk
    def __init__(self, sp_img):
        """
        VT_Image constructor

        Parameters
        ----------
        :param *SpatialImage* sp_img: SpatialImage instance --- image and metadata
        """
        if not isinstance(sp_img, SpatialImage):
            print('Warning : sp_img is not a SpatialImage instance')
            sp_img = SpatialImage(sp_img)

        if sp_img.get_dim() == 2:  # 2D management
            sp_img = sp_img.to_3D()

        self._data = sp_img
        self.vt_image = sp_img_to_vt_img(sp_img)
        libvt.VT_AllocArrayImage(pointer(self.vt_image))
コード例 #11
0
ファイル: vt_image.py プロジェクト: pradal/timagetk
def vt_img_to_sp_img(vt_image):
    """
    _VT_IMAGE structure to SpatialImage
    """
    dt = vt_type_to_c_type(vt_image.type)
    x, y, z, v = vt_image.dim.x, vt_image.dim.y, vt_image.dim.z, vt_image.dim.v
    size = x * y * z * v
    vx, vy, vz = vt_image.siz.x, vt_image.siz.z, vt_image.siz.z
    _ct_array = (dt * size).from_address(vt_image.buf)
    _np_array = np.ctypeslib.as_array(_ct_array)
    # -- This used to be  arr =  np.array(_np_array.reshape(z,x,y).transpose(2,1,0)).
    # but that is wrong. first the shape is x,y,z. Then the transposition
    # doesn't fix the byte ordering which for some reason must be read in
    # Fortran order --
    out_arr = np.array(_np_array.reshape(x, y, z, order="F"))
    out_sp_img = SpatialImage(out_arr, voxelsize=[vx, vy, vz])
    if 1 in out_sp_img.get_shape():  # 2D management
        out_sp_img = out_sp_img.to_2D()
    return out_sp_img
コード例 #12
0
ファイル: vt_image.py プロジェクト: VirtualPlants/timagetk
def vt_img_to_sp_img(vt_image):
    """
    _VT_IMAGE structure to SpatialImage
    """
    dt = vt_type_to_c_type(vt_image.type)
    x, y, z, v = vt_image.dim.x, vt_image.dim.y, vt_image.dim.z, vt_image.dim.v
    size = x * y * z * v
    vx, vy, vz = vt_image.siz.x, vt_image.siz.z, vt_image.siz.z
    _ct_array = (dt * size).from_address(vt_image.buf)
    _np_array = np.ctypeslib.as_array(_ct_array)
    # -- This used to be  arr =  np.array(_np_array.reshape(z,x,y).transpose(2,1,0)).
    # but that is wrong. first the shape is x,y,z. Then the transposition
    # doesn't fix the byte ordering which for some reason must be read in
    # Fortran order --
    out_arr = np.array(_np_array.reshape(x, y, z, order="F"))
    out_sp_img = SpatialImage(out_arr, voxelsize=[vx, vy, vz])
    if 1 in out_sp_img.get_shape(): # 2D management
        out_sp_img = out_sp_img.to_2D()
    return out_sp_img
コード例 #13
0
def tissue_layer_images(tissue):
    assert tissue.label.get_property('layer') is not None

    cell_layer = array_dict(tissue.label.get_property('layer'))
    cell_layer[1] = 0

    seg_img = tissue.label.image

    all_layer_img = LabelledImage(SpatialImage(array_dict(cell_layer).values(
        seg_img.get_array()),
                                               voxelsize=seg_img.voxelsize),
                                  no_label_id=0)

    layer_images = {}
    for layer in [1, 2, 3, 4]:
        layer_images[layer] = LabelledImage(SpatialImage(
            (all_layer_img.get_array() > layer - 1).astype(np.uint8),
            voxelsize=seg_img.voxelsize),
                                            no_label_id=0)

    return layer_images
コード例 #14
0
ファイル: vt_image.py プロジェクト: pradal/timagetk
def new_vt_image(sp_img_in, dtype=None):
    """
    """
    if isinstance(sp_img_in, SpatialImage):
        if dtype is None:
            dtype = sp_img_in.dtype
        sp_img_out = SpatialImage(np.zeros((sp_img_in.shape), dtype=dtype),
                                  voxelsize=sp_img_in.get_voxelsize())
        vt_res = VT_Image(sp_img_out)
        return vt_res
    else:
        print('sp_img_in is not a SpatialImage instance')
        return
コード例 #15
0
ファイル: create_images.py プロジェクト: gcerutti/SamMaps
def create_two_sided_intensity_image(x=30,
                                     y=30,
                                     z=10,
                                     vxs=(1., 1., 1.),
                                     max_sig_dist=8.,
                                     signal_dist_func='linear'):
    """
    Create a two sided intensity image with a separation splitting the image in
    two equal parts (along the x-axis).
    The image shape is given by 'x', 'y' & 'z'.
    Use an unsigned 8-bit encoding.

    Parameters
    ----------
    x : int, optional
        shape along the x dimension
    y : int, optional
        shape along the y dimension
    z : int, optional
        shape along the z dimension
    vxs : tuple|list|np.array
        voxelsize of the returned SpatialImage
    max_sig_dist : float
        max distance to the membrane, in real units (ie. depend on 'vxs'), at
        which there is no more signal (signal intensity = 0)
    signal_dist_func : str
        string mathing the name of function computing the signal decrease as a
        function of the distance to the membrane (middle of the image here.)
    """
    i = int(x / 2.)
    arr = np.zeros((x, y, z), dtype=np.uint8)
    # - Define the decreasing signal values according to membrane distance
    vox_dist = int(
        max_sig_dist /
        vxs[0])  # we split the image along the x axis, hence the x-voxelsize!
    try:
        assert vox_dist < i
    except AssertionError:
        raise ValueError(
            "Parameters 'max_sig_dist' is too big (ie. at {}, but should be < {})"
            .format(max_sig_dist, i * vxs[0]))

    decrease_sig2dist = signal2dist(vox_dist, signal_dist_func)

    # - Add these values to the intensity image:
    for d in range(vox_dist):
        arr[(i - 1) - d, :, :] = arr[i + d, :, :] = decrease_sig2dist[d]

    return SpatialImage(arr, voxelsize=vxs, origin=(0, 0, 0))
コード例 #16
0
    def test_get_methods(self):

        tmp_arr = np.ones((5,5),dtype=np.uint8)
        vox = [1.0, 1.0]
        orig = [0, 0]
        img = SpatialImage(tmp_arr)
        metadata = {'dim': 2, 'extent': [5.0, 5.0], 'shape': (5, 5), 'type': 'uint8',
                    'voxelsize': [1.0, 1.0], 'origin': [0, 0], 'max': 1, 'mean': 1.0,
                    'min': 1}

        self.assertEqual(img.get_type(), tmp_arr.dtype)
        self.assertEqual(img.get_voxelsize(), vox)
        self.assertEqual(img.get_shape(), tmp_arr.shape)
        self.assertEqual(img.get_origin(), orig)
        self.assertEqual(img.get_extent(), [tmp_arr.shape[0], tmp_arr.shape[1]])
        self.assertEqual(img.get_dim(), 2)
        self.assertDictEqual(img.get_metadata(), metadata)
コード例 #17
0
ファイル: resample.py プロジェクト: pradal/timagetk
def subsample(image, factor=[2, 2, 1], option='gray'):
    """
    Subsample a *SpatialImage* (2D/3D, grayscale/label)

    Parameters
    ----------
    :param *SpatialImage* image: input *SpatialImage*

    :param list factor: list of dimensions or *SpatialImage*

    :param str option: option can be either 'gray' or 'label'

    Returns
    ----------
    :return: *SpatialImage* output image

    Example
    -------
    >>> output_image = subsample(input_image)
    """
    poss_opt = ['gray', 'label']
    if option not in poss_opt:
        option = 'gray'


    if isinstance(image, SpatialImage) and image.get_dim() in [2, 3]:
        if image.get_dim()==2:
            image = image.to_3D()
            factor.append(1)

        shape, extent = image.get_shape(), image.get_extent()
        new_shape = [int(np.ceil(shape[ind]/factor[ind])) for ind in range(image.get_dim())]
        new_vox = [extent[ind]/new_shape[ind] for ind in range(image.get_dim())]
        tmp_img = np.zeros((new_shape[0],new_shape[1],new_shape[2]),dtype=image.dtype)
        tmp_img = SpatialImage(tmp_img, voxelsize=new_vox)
        if option=='gray':
            param_str_2 = '-resize -interpolation linear'
        elif option=='label':
            param_str_2 = '-resize -interpolation nearest'

        out_img = apply_trsf(image, bal_transformation=None, template_img=tmp_img, param_str_2=param_str_2)
        if 1 in out_img.get_shape():
            out_img = out_img.to_2D()
        return out_img
    else:
        raise TypeError('Input image must be a SpatialImage')
        return
コード例 #18
0
ファイル: resample.py プロジェクト: pradal/timagetk
def resample_isotropic(image, voxelsize, option='gray'):
    """
    Resample into an isotropic dataset

    Parameters
    ----------
    :param *SpatialImage* sp_img: *SpatialImage*, 3D input image

    :param float voxelsize: voxelsize value

    :param str option: option can be either 'gray' or 'label'

    Returns
    ----------
    :return: ``SpatialImage`` instance -- output image and metadata

    Example
    -------
    >>> output_image = resample_isotropic(input_image, voxelsize=0.4)
    """
    if isinstance(image, SpatialImage) and image.get_dim()==3:

        poss_opt = ['gray', 'label']
        if option not in poss_opt:
            option = 'gray'

        extent = image.get_extent()
        new_vox = [voxelsize, voxelsize, voxelsize]
        new_shape = [int(np.ceil(extent[ind]/new_vox[ind])) for ind in range(image.get_dim())]
        tmp_img = np.zeros((new_shape[0],new_shape[1],new_shape[2]),dtype=image.dtype)
        tmp_img = SpatialImage(tmp_img, voxelsize=new_vox)

        if option=='gray':
            param_str_2 = '-resize -interpolation linear'
        elif option=='label':
            param_str_2 = '-resize -interpolation nearest'

        out_img = apply_trsf(image, bal_transformation=None, template_img=tmp_img, param_str_2=param_str_2)
        if 1 in out_img.get_shape():
            out_img = out_img.to_2D()
        return out_img
    else:
        print('sp_img must be a SpatialImage instance')
        return
コード例 #19
0
def seed_image_from_points(size,
                           voxelsize,
                           positions,
                           point_radius=1.0,
                           background_label=1):
    """
    Generate a SpatialImage of a given shape with labelled spherical regions around points
    """

    seed_img = background_label * np.ones(tuple(size), np.uint16)

    size = np.array(size)
    voxelsize = np.array(voxelsize)

    for p in positions.keys():
        image_neighborhood = np.array(np.ceil(point_radius / voxelsize), int)
        neighborhood_coords = np.mgrid[
            -image_neighborhood[0]:image_neighborhood[0] + 1,
            -image_neighborhood[1]:image_neighborhood[1] + 1,
            -image_neighborhood[2]:image_neighborhood[2] + 1]
        neighborhood_coords = np.concatenate(
            np.concatenate(np.transpose(neighborhood_coords,
                                        (1, 2, 3, 0)))) + np.array(
                                            positions[p] / voxelsize, int)
        neighborhood_coords = np.minimum(
            np.maximum(neighborhood_coords, np.array([0, 0, 0])), size - 1)
        neighborhood_coords = array_unique(neighborhood_coords)

        neighborhood_distance = np.linalg.norm(
            neighborhood_coords * voxelsize - positions[p], axis=1)
        neighborhood_coords = neighborhood_coords[
            neighborhood_distance <= point_radius]
        neighborhood_coords = tuple(np.transpose(neighborhood_coords))

        seed_img[neighborhood_coords] = p

    return SpatialImage(seed_img, voxelsize=list(voxelsize))
コード例 #20
0
ファイル: PIN_quantif_test.py プロジェクト: gcerutti/SamMaps
membrane_ch_name = 'PI'
path_suffix = "test_offset/"
# im_tif = "qDII-CLV3-PIN1-PI-E37-LD-SAM7-T5-P2.tif"
im_tif = "qDII-CLV3-PIN1-PI-E37-LD-SAM7-T14-P2.tif"

im_fname = image_dirname + path_suffix + im_tif

from vplants.tissue_nukem_3d.microscopy_images.read_microscopy_image import read_tiff_image
ori = [0, 0, 0]
voxelsize = (0.208, 0.208, 0.677)
signal_names = ['DIIV', 'PIN1', 'PI', 'TagBFP', 'CLV3']

img_dict = read_tiff_image(im_fname,
                           channel_names=signal_names,
                           voxelsize=voxelsize)
img_dict['PI'] = SpatialImage(img_dict['PI'], voxelsize=voxelsize, origin=ori)
img_dict['PIN1'] = SpatialImage(img_dict['PIN1'],
                                voxelsize=voxelsize,
                                origin=ori)
PIN_signal_im = img_dict['PIN1']
PI_signal_im = img_dict['PI']

###############################################################################
# -- PI signal segmentation:
###############################################################################
from os.path import exists
seg_img_fname = image_dirname + path_suffix + splitext_zip(
    im_tif)[0] + '_seg.inr'
if not exists(seg_img_fname):
    print "\n - Performing isometric resampling of the image to segment..."
    from timagetk.algorithms import isometric_resampling
コード例 #21
0
ファイル: test_bal_image.py プロジェクト: pradal/timagetk
 def test_build_from_empty(self):
     shape = (2, 3, 4)
     bal_image = BalImage(shape=shape)
     bal_image.c_display()
     ref = SpatialImage(np.zeros((shape), dtype=np.uint8))
     self.check_image(ref, bal_image)
コード例 #22
0
def get_mask(img):
    return SpatialImage(img.get_array() != 0,
                        voxelsize=img.voxelsize,
                        dtype="uint8")
コード例 #23
0
    for it in xrange(15):
        background_img = morphology(
            background_img, param_str_2='-operation erosion -iterations 10')
    # ---- Detect small regions defined as background and remove them:
    connected_background_components, n_components = nd.label(background_img)
    components_area = nd.sum(np.ones_like(connected_background_components),
                             connected_background_components,
                             index=np.arange(n_components) + 1)
    largest_component = (np.arange(n_components) +
                         1)[np.argmax(components_area)]
    background_img = (
        connected_background_components == largest_component).astype(np.uint16)
    # ---- Finaly add the background and make a SpatialImage:
    seed_img[background_img == 1] = 1
    del smooth_img_bck, background_img
    seed_img = SpatialImage(seed_img, voxelsize=voxelsize)
    # world.add(seed_img,"seed_image", colormap="glasbey", alphamap="constant",voxelsize=microscope_orientation*voxelsize, bg_id=0)

    # -- Performs automatic seeded watershed using previously created seed image:
    print "\n# - Seeded watershed using seed EXPERT seed positions..."
    smooth_img = linear_filtering(img,
                                  std_dev=std_dev,
                                  method='gaussian_smoothing')
    seg_im = segmentation(smooth_img, seed_img)
    # Use largest bounding box to determine background value:
    background = get_background_value(seg_im, microscope_orientation)
    print "Detected background value:", background
    # world.add(seg_im,"seg_image", colormap="glasbey", alphamap="constant",voxelsize=microscope_orientation*voxelsize, bg_id=background)

    # -- Create a vertex_topomesh from detected cell positions:
    print "\n# - Extracting 'barycenter' & 'L1' properties from segmented image..."
コード例 #24
0
reference_name = 'TagBFP'
channel_names = ['DIIV', 'PIN1', 'PI', 'TagBFP', 'CLV3']
signal_names = channel_names
compute_ratios = [n in ['DIIV'] for n in signal_names]
microscope_orientation = -1

image_filename = microscopy_dirname + "/RAW/" + filename
image_dict = read_czi_image(image_filename, channel_names=channel_names)

no_organ_filename = microscopy_dirname + "/TIF-No-organs/" + filename[:-4] + "-No-organs.tif"
if os.path.exists(no_organ_filename):
    no_organ_dict = read_tiff_image(no_organ_filename,
                                    channel_names=channel_names)
    voxelsize = image_dict[reference_name].voxelsize
    for channel in channel_names:
        image_dict[channel] = SpatialImage(no_organ_dict[channel],
                                           voxelsize=voxelsize)

# detection step
for rescaling in [False, True]:
    reference_img = image_dict[reference_name]
    suffix = ""
    if rescaling:
        reference_img = sl_equalize_adapthist(reference_img)
        suffix += "_AdaptHistEq"
    # world.add(reference_img,'nuclei_image',colormap='invert_grey',voxelsize=microscope_orientation*np.array(image_dict[reference_name].voxelsize))
    # world['nuclei_image']['intensity_range'] = (2000,20000)

    # if 'PI' in channel_names:
    # pi_img = image_dict['PI']
    # world.add(pi_img,'membrane_image',colormap='Reds',voxelsize=microscope_orientation*np.array(image_dict[reference_name].voxelsize))
    # world['membrane_image']['intensity_range'] = (5000,30000)
コード例 #25
0
import numpy as np
from timagetk.components import SpatialImage
from timagetk.algorithms.trsf import compose_trsf
from timagetk.wrapping import BalTrsf
from timagetk.wrapping.bal_trsf import TRSF_TYPE_DICT
from timagetk.wrapping.bal_trsf import TRSF_UNIT_DICT
trsf_type = 'VECTORFIELD_3D'
trsf_unit = 'REAL_UNIT'

arr2 = np.zeros((30, 30, 15))
im2 = SpatialImage(arr2, origin=[0, 0, 0], voxelsize=[1., 1., 2.])

if isinstance(trsf_type, str):
    trsf_type = TRSF_TYPE_DICT[trsf_type]
if isinstance(trsf_unit, str):
    trsf_unit = TRSF_UNIT_DICT[trsf_unit]

t01 = BalTrsf(trsf_type=trsf_type, trsf_unit=trsf_unit)
t01.read(
    '/home/jonathan/Projects/TissueAnalysis/timagetk/timagetk/share/data/vf3d_id_20_20_10.trsf'
)
t12 = BalTrsf(trsf_type=trsf_type, trsf_unit=trsf_unit)
t12.read(
    '/home/jonathan/Projects/TissueAnalysis/timagetk/timagetk/share/data/vf3d_id_30_30_15.trsf'
)

t02 = compose_trsf([t01, t12], template_img=im2)
コード例 #26
0
    def test_set_methods(self):

        tmp_arr = np.ones((5,5),dtype=np.uint8)
        img = SpatialImage(tmp_arr)
        new_vox = [0.5, 0.5]
        new_orig = [1, 1]
        new_ext = [tmp_arr.shape[0]*new_vox[0], tmp_arr.shape[1]*new_vox[1]]
        new_type = np.uint16
        new_met = img.get_metadata()
        new_met['name'] = 'img_test'

        img.set_voxelsize(new_vox)
        self.assertEqual(img.get_voxelsize(), new_vox)
        self.assertEqual(img.get_metadata()['voxelsize'], new_vox)
        self.assertEqual(img.get_extent(), new_ext)
        self.assertEqual(img.get_metadata()['extent'], new_ext)
        img = img.set_type(new_type)
        self.assertEqual(img.get_type(), 'uint16')
        self.assertEqual(img.get_metadata()['type'], 'uint16')
        img.set_origin(new_orig)
        self.assertEqual(img.get_origin(), new_orig)
        self.assertEqual(img.get_metadata()['origin'], new_orig)
        img.set_extent([10.0, 5.0])
        self.assertEqual(img.get_voxelsize(), [2.0,1.0])
        self.assertEqual(img.get_metadata()['extent'], [10.0, 5.0])
        self.assertEqual(img.get_metadata()['voxelsize'], [2.0,1.0])
        img.set_metadata(new_met)
        self.assertDictEqual(img.get_metadata(), new_met)
        img.set_pixel([2,2], 10)
        self.assertEqual(img.get_pixel([2,2]), 10)

        #--- numpy compatibility (for example transposition)
        new_arr = np.ones((10,5), dtype=np.uint8)
        img = SpatialImage(new_arr, voxelsize=[0.5,1])
        img = img.transpose()
        self.assertEqual(img.get_metadata()['shape'], (5,10))
        self.assertEqual(img.get_metadata()['voxelsize'], [1.0,0.5])
        self.assertEqual(img.get_metadata()['voxelsize'], img.get_voxelsize())
        self.assertEqual(img.get_metadata()['extent'], [5.0,5.0])
        self.assertEqual(img.get_metadata()['extent'], img.get_extent())
コード例 #27
0
def apply_mask(img, mask):
    return SpatialImage(img.get_array() * mask,
                        origin=img.origin,
                        voxelsize=img.voxelsize,
                        metadata=img.metadata)
コード例 #28
0
ファイル: fusion.py プロジェクト: pradal/timagetk
def fusion(list_images, iterations=None):
    """
    Multiview reconstruction (registration)

    Parameters
    ----------
    :param list list_images: list of input ``SpatialImage``

    :param int iterations: number of iterations, optional. Default: 5

    Returns
    ----------
    :return: ``SpatialImage`` instance -- image and metadata

    Example
    -------
    >>> from timagetk.util import data_path
    >>> from timagetk.components import imread
    >>> from timagetk.algorithms import fusion
    >>> vues = [0, 1, 2]
    >>> list_images = [imread(data_path('fusion_img_' + str(vue) + '.inr'))
                       for vue in vues]
    >>> fus_img = fusion(list_images)
    """
    if iterations is None:
        iterations = 5
    else:
        iterations = int(abs(iterations))
    #--- check: list of SpatialImage images
    conds_init = isinstance(list_images, list) and len(list_images) >= 2
    conds_list_img = [
        0 if isinstance(sp_img, SpatialImage) else 1 for sp_img in list_images
    ]
    #--- end check
    if conds_init and 1 not in conds_list_img:

        succ_ref_img = []
        vox_list = [sp_img.get_voxelsize() for sp_img in list_images]
        vox_list = [i for i in itertools.chain.from_iterable(vox_list)
                    ]  # voxel list
        ext_list = [sp_img.get_extent() for sp_img in list_images]
        ext_list = [i for i in itertools.chain.from_iterable(ext_list)
                    ]  # extent list

        if list_images[0].get_dim() == 3:
            min_vox, val = np.min(vox_list), int(
                np.max(ext_list) / np.min(vox_list))
            tmp_arr = np.zeros((val, val, val), dtype=list_images[0].dtype)
            template_img = SpatialImage(tmp_arr,
                                        voxelsize=[min_vox, min_vox, min_vox])

        init_ref = apply_trsf(list_images[0],
                              bal_transformation=None,
                              template_img=template_img)
        succ_ref_img.append(init_ref)

        init_trsf_list, init_img_list = [], []
        for ind, sp_img in enumerate(list_images):
            if ind > 0:
                trsf_rig, res_rig = blockmatching(
                    sp_img, init_ref, param_str_2='-trsf-type rigid -py-ll 1')

                trsf_aff, res_aff = blockmatching(
                    sp_img,
                    init_ref,
                    left_transformation=trsf_rig,
                    param_str_2='-trsf-type affine')

                tmp_trsf = compose_trsf([trsf_rig, trsf_aff])
                trsf_def, res_def = blockmatching(
                    sp_img,
                    init_ref,
                    init_result_transformation=tmp_trsf,
                    param_str_2='-trsf-type vectorfield')

                out_trsf = BalTransformation(c_bal_trsf=trsf_def)
                init_trsf_list.append(out_trsf)
                init_img_list.append(res_def)
        init_img_list.append(init_ref)
        mean_ref = mean_images(init_img_list)
        mean_trsf = mean_trsfs(init_trsf_list)
        mean_trsf_inv = inv_trsf(mean_trsf)
        mean_ref_update = apply_trsf(mean_ref,
                                     mean_trsf_inv,
                                     template_img=template_img)
        succ_ref_img.append(mean_ref_update)
        for index in range(0, iterations):
            init_trsf_list, init_img_list = [], []
            for ind, sp_img in enumerate(list_images):
                trsf_rig, res_rig = blockmatching(
                    sp_img,
                    mean_ref_update,
                    param_str_2='-trsf-type rigid -py-ll 1')
                trsf_aff, res_aff = blockmatching(
                    sp_img,
                    mean_ref_update,
                    left_transformation=trsf_rig,
                    param_str_2='-trsf-type affine')
                tmp_trsf = compose_trsf([trsf_rig, trsf_aff])
                trsf_def, res_def = blockmatching(
                    sp_img,
                    mean_ref_update,
                    init_result_transformation=tmp_trsf,
                    param_str_2='-trsf-type vectorfield')
                out_trsf = BalTransformation(c_bal_trsf=trsf_def)
                init_trsf_list.append(out_trsf)
                init_img_list.append(res_def)
            init_img_list.append(mean_ref_update)
            mean_ref = mean_images(init_img_list)
            mean_trsf = mean_trsfs(init_trsf_list)
            mean_trsf_inv = inv_trsf(mean_trsf)
            mean_ref_update = apply_trsf(mean_ref,
                                         mean_trsf_inv,
                                         template_img=template_img)
            succ_ref_img.append(mean_ref_update)
        return succ_ref_img[-1]
    else:
        print('Incorrect specification')
        return
コード例 #29
0
# Create a seed image fro the nuclei barycenters:
seed_img = seed_image_from_points(membrane_img.shape,
                                  membrane_img.voxelsize,
                                  positions,
                                  background_label=0)

# Add the "background seed":
background_threshold = 2000.
smooth_img_bck = linearfilter(membrane_img,
                              param_str_2='-x 0 -y 0 -z 0 -sigma 3.0')
background_img = (smooth_img_bck < background_threshold).astype(np.uint16)
for it in xrange(10):
    background_img = morphology(
        background_img, param_str_2='-operation erosion -iterations 10')
seed_img += background_img
seed_img = SpatialImage(seed_img, voxelsize=membrane_img.voxelsize)
#world.add(seed_img,'seed_image',colormap='glasbey',alphamap='constant',bg_id=0)
segmented_filename = image_dirname + "/" + nomenclature_names[
    filename] + "/" + nomenclature_names[
        filename] + "_corrected_nuclei_seed.inr"
imsave(segmented_filename, seed_img)

seed_img = isometric_resampling(seed_img, option='label')

std_dev = 2.0
membrane_img = isometric_resampling(membrane_img)
vxs = membrane_img.voxelsize

try:
    from equalization import z_slice_contrast_stretch
    from slice_view import slice_view
コード例 #30
0
ファイル: temporal_matching.py プロジェクト: pradal/timagetk
times = [3,4]
segmentation_list, feature_space_list, back_id_list = [], [], []

for ind, val in enumerate(times):
    img = imread(data_path('time_' + str(val) + '_seg.inr'))

    # subimage extraction
    shape = img.get_shape()
    indices = [0,shape[0]-1,0,shape[1]-1,0,5]
    img = img.get_region(indices=indices)

    # remove small cells
    labels = np.unique(img).tolist()
    img = labels_post_processing(img, method='labels_erosion', radius=2)
    img[img==0] = np.min(labels)
    img = SpatialImage(img, voxelsize=img.get_voxelsize())

    # save input labeled images
    res_name = 'example_track_time_' + str(val) + '_seg.inr'
    imsave(out_path+res_name, img)

    labels = np.unique(img).tolist() # list of labels
    back_id = np.min(labels) # background identifier
    back_id_list.append(back_id)
    labels.remove(back_id)

    # feature space computation
    obj_gf = GeometricalFeatures(img, label=labels)
    feature_space = obj_gf.compute_feature_space()
    segmentation_list.append(img), feature_space_list.append(feature_space)
コード例 #31
0
ファイル: segmentation2stl.py プロジェクト: gcerutti/SamMaps
fname = '/data/Meristems/Carlos/PIN_maps/nuclei_images/qDII-PIN1-CLV3-PI-LD_E37_171113_sam07_t14/qDII-PIN1-CLV3-PI-LD_E37_171113_sam07_t14_PI_raw_segmented.inr.gz'
im = read_image(fname)

import tissue_printer
reload(tissue_printer)
from tissue_printer import *


# - Performs vtkDiscreteMarchingCubes on labelled image (with all labels!)
# dmc = vtk_dmc(im, mesh_fineness=3.0)
# write_stl(bin_dmc, splitext_zip(fname)[0]+'.stl')


# - Performs vtkDiscreteMarchingCubes on inside/outside masked image:
back_id = 1
# -- Convert the labelled image into inside/outside mask image:
mask = np.array(im != back_id, dtype="uint8")
bin_im = SpatialImage(mask, voxelsize=im.voxelsize)
# -- Run vtkDiscreteMarchingCubes:
bin_dmc = vtk_dmc(bin_im)
# from openalea.cellcomplex.property_topomesh.utils.image_tools import image_to_vtk_polydata
# bin_dmc = image_to_vtk_polydata(bin_im, mesh_fineness=5.0)
# from openalea.cellcomplex.property_topomesh.utils.image_tools import image_to_vtk_cell_polydata
# bin_dmc = image_to_vtk_cell_polydata(bin_im, mesh_fineness=3.0)

stl_fname = splitext_zip(fname)[0]+'_binary.stl'
write_stl(bin_dmc, stl_fname)

from vplants.tissue_analysis.image2vtk import mlab_vtkSurface_viewer
mlab_vtkSurface_viewer(bin_dmc)
コード例 #32
0
 path_suffix, img2seg_fname = get_nomenclature_channel_fname(raw_czi_fname, nom_file, ref_ch_name)
 print "\n - Loading image to segment: {}".format(img2seg_fname)
 img2seg = imread(image_dirname + path_suffix + img2seg_fname)
 vxs = np.array(img2seg.voxelsize)
 ori = np.array(img2seg.origin)
 # -- Get the file name and path of the channel to substract to the image to segment:
 # used to clear-out the cells for better segmentation
 if clearing_ch_name:
     path_suffix, substract_img_fname = get_nomenclature_channel_fname(raw_czi_fname, nom_file, clearing_ch_name)
     print "\n - Loading image to substract: {}".format(substract_img_fname)
     substract_img = imread(image_dirname + path_suffix + substract_img_fname)
     # substract the 'CLV3' signal from the 'PI' since it might have leaked:
     print "\n - Performing images substraction..."
     tmp_im = img2seg - substract_img
     tmp_im[img2seg <= substract_img] = 0
     img2seg = SpatialImage(tmp_im, voxelsize=vxs, origin=ori)
     del tmp_im
 # -- Display the image to segment:
 # world.add(img2seg,'{}_channel'.format(ref_ch_name), colormap='invert_grey', voxelsize=microscope_orientation*vxs)
 # world['{}_channel'.format(ref_ch_name)]['intensity_range'] = (-1, 2**16)
 # -- Adaptative histogram equalization of the image to segment:
 print "\n - Performing adaptative histogram equalization of the image to segment..."
 img2seg = z_slice_equalize_adapthist(img2seg)
 # -- Performs isometric resampling of the image to segment:
 print "\n - Performing isometric resampling of the image to segment..."
 img2seg = isometric_resampling(img2seg)
 iso_vxs = np.array(img2seg.voxelsize)
 iso_shape = img2seg.shape
 # -- Display the isometric version of the "equalized" image to segment:
 # world.add(img2seg,'{}_channel_equalized_isometric'.format(ref_ch_name), colormap='invert_grey', voxelsize=microscope_orientation*iso_vxs)
 # world['{}_channel_equalized_isometric'.format(ref_ch_name)]['intensity_range'] = (-1, 2**16)
コード例 #33
0
    def test_set_methods(self):

        tmp_arr = np.ones((5, 5), dtype=np.uint8)
        img = SpatialImage(tmp_arr)
        new_vox = [0.5, 0.5]
        new_orig = [1, 1]
        new_ext = [
            tmp_arr.shape[0] * new_vox[0], tmp_arr.shape[1] * new_vox[1]
        ]
        new_type = np.uint16
        new_met = img.get_metadata()
        new_met['name'] = 'img_test'

        img.set_voxelsize(new_vox)
        self.assertEqual(img.get_voxelsize(), new_vox)
        self.assertEqual(img.get_metadata()['voxelsize'], new_vox)
        self.assertEqual(img.get_extent(), new_ext)
        self.assertEqual(img.get_metadata()['extent'], new_ext)
        img = img.set_type(new_type)
        self.assertEqual(img.get_type(), 'uint16')
        self.assertEqual(img.get_metadata()['type'], 'uint16')
        img.set_origin(new_orig)
        self.assertEqual(img.get_origin(), new_orig)
        self.assertEqual(img.get_metadata()['origin'], new_orig)
        img.set_extent([10.0, 5.0])
        self.assertEqual(img.get_voxelsize(), [2.0, 1.0])
        self.assertEqual(img.get_metadata()['extent'], [10.0, 5.0])
        self.assertEqual(img.get_metadata()['voxelsize'], [2.0, 1.0])
        img.set_metadata(new_met)
        self.assertDictEqual(img.get_metadata(), new_met)
        img.set_pixel([2, 2], 10)
        self.assertEqual(img.get_pixel([2, 2]), 10)

        #--- numpy compatibility (for example transposition)
        new_arr = np.ones((10, 5), dtype=np.uint8)
        img = SpatialImage(new_arr, voxelsize=[0.5, 1])
        img = img.transpose()
        self.assertEqual(img.get_metadata()['shape'], (5, 10))
        self.assertEqual(img.get_metadata()['voxelsize'], [1.0, 0.5])
        self.assertEqual(img.get_metadata()['voxelsize'], img.get_voxelsize())
        self.assertEqual(img.get_metadata()['extent'], [5.0, 5.0])
        self.assertEqual(img.get_metadata()['extent'], img.get_extent())
コード例 #34
0
size = np.array(img.shape)
voxelsize = np.array(img.voxelsize)

# Mask
#------------------------------
## mask image obtein by maximum intensity projection :
# mask_filename = image_dirname+"/"+filename+"/"+filename+"_projection_mask.inr.gz"
## 3D mask image obtein by piling a mask for each slice :
mask_filename = image_dirname + "/" + filename + "/" + filename + "_mask.inr.gz"
if exists(mask_filename):
    mask_img = imread(mask_filename)
else:
    mask_img = np.ones_like(img)

img[mask_img == 0] = 0
img = SpatialImage(img, voxelsize=voxelsize)

# world.add(mask_img,"mask",voxelsize=microscope_orientation*np.array(mask_img.voxelsize),colormap='grey',alphamap='constant',bg_id=255)
# world.add(img,"reference_image",colormap="invert_grey",voxelsize=microscope_orientation*voxelsize)

# Corrected image of detected seed = ground truth
#---------------------------------------------------
xp_topomesh_fname = image_dirname + "/" + filename + "/" + filename + "_EXPERT_seed.ply"
# xp_topomesh_fname = image_dirname+"/"+filename+"/"+filename+"_nuclei_detection_topomesh_corrected_AdaptHistEq.ply"

expert_topomesh = read_ply_property_topomesh(xp_topomesh_fname)
expert_positions = expert_topomesh.wisp_property('barycenter', 0)
# Convert coordinates into voxel units:
expert_coords = expert_positions.values() / (microscope_orientation *
                                             voxelsize)
# ???