Ejemplo n.º 1
0
def test_accuracy():
    ''' Verify that our implementation returns exactly the same as scikit
    '''
    base_dir = '/home/omar/data/DATA_NeoBrainS12/'
    neo_subject = '30wCoronal/example2/'

    # Read subject files
    t2CurrentSubjectName  = base_dir + 'trainingDataNeoBrainS12/'+neo_subject+'T2_1-1.nii.gz'
    t2CurrentSubject_data = nib.load(t2CurrentSubjectName).get_data()
    affineT2CS            = nib.load(t2CurrentSubjectName).get_affine()
    zoomsT2CS             = nib.load(t2CurrentSubjectName).get_header().get_zooms()[:3]

    n_zooms = (zoomsT2CS[0],zoomsT2CS[0],zoomsT2CS[0])
    t2CurrentSubject_data,affineT2CS = reslice(t2CurrentSubject_data,affineT2CS,zoomsT2CS,n_zooms)

    S = t2CurrentSubject_data.astype(np.float64)

    max_radius = 4
    D = SequencialSphereDilation(S)
    for r in range(1, 1+max_radius):
        D.expand(S)
        expected = dilation(S, ball(r))
        actual = D.get_current_dilation()
        assert_array_equal(expected, actual)
        expected = closing(S, ball(r))
        actual = D.get_current_closing()
        assert_array_equal(expected, actual)
Ejemplo n.º 2
0
def test_large_radius():
    ''' Compare execution time against scikit: single closing case
    Here, our implementation does not take advantage of smaller radius results
    so ours is slower than scikit, but it uses significantly less memory.
    '''
    base_dir = '/home/omar/data/DATA_NeoBrainS12/'
    neo_subject = '30wCoronal/example2/'

    # Read subject files
    t2CurrentSubjectName  = base_dir + 'trainingDataNeoBrainS12/'+neo_subject+'T2_1-1.nii.gz'
    t2CurrentSubject_data = nib.load(t2CurrentSubjectName).get_data()
    affineT2CS            = nib.load(t2CurrentSubjectName).get_affine()
    zoomsT2CS             = nib.load(t2CurrentSubjectName).get_header().get_zooms()[:3]
    # Step 1.4 - Resampling for isotropic voxels

    n_zooms = (zoomsT2CS[0],zoomsT2CS[0],zoomsT2CS[0])
    t2CurrentSubject_data,affineT2CS = reslice(t2CurrentSubject_data,affineT2CS,zoomsT2CS,n_zooms)

    S = t2CurrentSubject_data.astype(np.float64)

    ###########compare times#########
    # in-house
    radius = 15
    start = time.time()
    d = isotropic_dilation(S, radius)
    c = isotropic_erosion(d, radius)
    end = time.time()
    print('Elapsed (in-home): %f'%(end-start,))

    # scikit
    start = time.time()
    expected = closing(S, ball(radius))
    end = time.time()
    print('Elapsed (scikit): %f'%(end-start,))
Ejemplo n.º 3
0
def sphere(file_roi, vol, mm_x=0, mm_y=0, mm_z=0, radius=4, dtype=np.uint8):
    img = nibabel.load(vol)
    header = img.get_header()
    voxels_size= header.get_zooms()
    coord_center_voxel = mm_to_voxel(vol, [[mm_x, mm_y, mm_z]])    
    shape_x, shape_y, shape_z = img.shape[0], img.shape[1], img.shape[2]
    roi = np.zeros((shape_x, shape_y, shape_z))
    
    radius = radius / voxels_size[0]
    if radius > 1 :
       radius -= 1 

    first_x = coord_center_voxel[0][0] - radius
    first_y = coord_center_voxel[0][1] - radius
    first_z = coord_center_voxel[0][2] - radius

    elem = morphology.ball(radius)
    
    #check voxel size is isotropic
    #check radius is at least one voxel
    for x in range(0,elem.shape[0]):
            for y in range(0, elem.shape[1]):
                    for z in range(0, elem.shape[2]):
                        x_ = int(first_x) + x
                        y_ = int(first_y) + y
                        z_ = int(first_z) + z
                        roi[x_, y_, z_] = elem[x,y,z]
  
    roi_img = nibabel.Nifti1Image(roi, img.get_affine(),img.get_header() )
    nibabel.save(roi_img, file_roi)
Ejemplo n.º 4
0
 def test_find_surface_pores(self):
     from skimage.morphology import ball
     net = op.network.CubicTemplate(template=ball(3), spacing=1)
     net.clear(mode='labels')
     assert net.labels() == ['pore.all', 'throat.all']
     topotools.find_surface_pores(network=net)
     assert net.num_pores('surface') == 66
Ejemplo n.º 5
0
def get_reconstructed_vasculature(dict_nodes_radius, shape):
    """
    Return 2D or 3D array of vessels reconstructed
    Parameters
    ----------
    dict_nodes_radius : dict
        key: non-zero co-ordinate, value : radius
    shape : tuple
        reconstructed array shape

    Returns
    -------
    reconstructed_image : 2D or 3D array
        reconstructed vasculature
    """
    reconstructed_image = np.zeros(shape, dtype=bool)
    for dest, radius in dict_nodes_radius.items():
        if len(shape) == 2:
            selem = morphology.disk(int(radius)).astype(bool)
        elif len(shape) == 3:
            selem = morphology.ball(int(radius)).astype(bool)
        reconstructed_ith_image = np.zeros(shape, dtype=bool)
        reconstructed_ith_image[dest] = 1
        reconstructed_ith_image = ndimage.morphology.binary_dilation(reconstructed_ith_image, structure=selem)
        del selem
        reconstructed_image = np.logical_or(reconstructed_image, reconstructed_ith_image)
        del reconstructed_ith_image
    return reconstructed_image
Ejemplo n.º 6
0
def refine_aseg(aseg, ball_size=4):
    """
    First step to reconcile ANTs' and FreeSurfer's brain masks.

    Here, the ``aseg.mgz`` mask from FreeSurfer is refined in two
    steps, using binary morphological operations:

      1. With a binary closing operation the sulci are included
         into the mask. This results in a smoother brain mask
         that does not exclude deep, wide sulci.

      2. Fill any holes (typically, there could be a hole next to
         the pineal gland and the corpora quadrigemina if the great
         cerebral brain is segmented out).


    """
    # Read aseg data
    bmask = aseg.copy()
    bmask[bmask > 0] = 1
    bmask = bmask.astype(np.uint8)

    # Morphological operations
    selem = sim.ball(ball_size)
    newmask = sim.binary_closing(bmask, selem)
    newmask = binary_fill_holes(newmask.astype(np.uint8), selem).astype(np.uint8)

    return newmask.astype(np.uint8)
Ejemplo n.º 7
0
def create_synth_dict(radii, box_radius):
    """
    This function creates a collection of spherical templates of different sizes.
    
    Parameters
    ----------
    radii : int 
        radii coubld be 1xN vector but currently is an integer
    box_radius : float

    Returns
    -------
    ndarray
        dictionary of template vectors, of size (box_length ** 3 x length(radii)), where 
        box_length = box_radius*2 +1 and radii is an input to the function which contains a vector 
        of different sphere sizes.
    """
    
    box_length = int(box_radius * 2 + 1)     #used for array dimension
    dict = np.zeros((box_length**3, np.size(radii)), dtype='float32')
    cvox = int((box_length-1)/2 + 1)
    
    for i in range(len(radii)):
        template = np.zeros((box_length, box_length, box_length))
        template[cvox, cvox, cvox] = 1
        dict[:, i] = np.reshape(ndi.binary_dilation(template, ball((radii[i] - 1)/2)), (box_length**3))
        dict[:, i] = dict[:, i]/(LA.norm(dict[:, i]))
        
    return(dict)
Ejemplo n.º 8
0
    def _run_interface(self, runtime):

        in_files = self.inputs.in_files

        if self.inputs.enhance_t2:
            in_files = [_enhance_t2_contrast(f, newpath=runtime.cwd)
                        for f in in_files]

        masknii = compute_epi_mask(
            in_files,
            lower_cutoff=self.inputs.lower_cutoff,
            upper_cutoff=self.inputs.upper_cutoff,
            connected=self.inputs.connected,
            opening=self.inputs.opening,
            exclude_zeros=self.inputs.exclude_zeros,
            ensure_finite=self.inputs.ensure_finite,
            target_affine=self.inputs.target_affine,
            target_shape=self.inputs.target_shape
        )

        if self.inputs.closing:
            closed = sim.binary_closing(masknii.get_data().astype(
                np.uint8), sim.ball(1)).astype(np.uint8)
            masknii = masknii.__class__(closed, masknii.affine,
                                        masknii.header)

        if self.inputs.fill_holes:
            filled = binary_fill_holes(masknii.get_data().astype(
                np.uint8), sim.ball(6)).astype(np.uint8)
            masknii = masknii.__class__(filled, masknii.affine,
                                        masknii.header)

        if self.inputs.no_sanitize:
            in_file = self.inputs.in_files
            if isinstance(in_file, list):
                in_file = in_file[0]
            nii = nb.load(in_file)
            qform, code = nii.get_qform(coded=True)
            masknii.set_qform(qform, int(code))
            sform, code = nii.get_sform(coded=True)
            masknii.set_sform(sform, int(code))

        self._results['out_mask'] = fname_presuffix(
            self.inputs.in_files[0], suffix='_mask', newpath=runtime.cwd)
        masknii.to_filename(self._results['out_mask'])
        return runtime
def test_get_boundaries_of_image_3d():
    # Test if equivalent diameter of the maximum intensity project of edges of the object is same
    # as the input sphere, measure.regionprops, 3D perimeter parameter not implemented in skimage
    radius = 4
    binary = morphology.ball(radius)
    boundary = radius_skeleton.get_boundaries_of_image(binary)
    maxip = np.amax(boundary, 0)
    nose.tools.assert_almost_equal(measure.regionprops(binary)[0].equivalent_diameter,
                                   measure.regionprops(maxip)[0].equivalent_diameter, places=1)
def mask_data(f):
    file_id = f.split('/')[-3]
    seg = irtk.imread(f,force_neurological=True) > 0

    r = 10
    x_min,y_min,z_min,x_max,y_max,z_max = seg.bbox()
    seg = seg[max(0,z_min-3*r):min(z_max+3*r+1,seg.shape[0]),
              max(0,y_min-3*r):min(y_max+3*r+1,seg.shape[1]),
              max(0,x_min-3*r):min(x_max+3*r+1,seg.shape[2])]
    ball = morphology.ball( 5 )
    seg = irtk.Image( nd.binary_dilation(seg,ball), seg.get_header() )
    ball = morphology.ball( r )
    seg = irtk.Image( nd.binary_closing(seg,ball), seg.get_header() )
    
    seg = seg.bbox(crop=True)
        
    seg_file = output_dir + '/seg_' + file_id + ".nii.gz"
    irtk.imwrite( seg_file, seg )
Ejemplo n.º 11
0
def dilate(data, radius):
    """
    Dilate data using ball structuring element
    :param data: 2d or 3d array
    :param radius: radius of structuring element
    :return: data dilated
    """
    from skimage.morphology import dilation, ball
    selem = ball(radius)
    return dilation(data, selem=selem, out=None)
Ejemplo n.º 12
0
def erode(data, radius):
    """
    Erode data using ball structuring element
    :param data: 2d or 3d array
    :param radius: radius of structuring element
    :return: data eroded
    """
    from skimage.morphology import binary_erosion, ball
    selem = ball(radius)
    return binary_erosion(data, selem=selem, out=None)
Ejemplo n.º 13
0
def binary_find_boundaries(image):
    if image.dtype != np.bool:
        raise ValueError('image must have dtype = \'bool\'')
    if image.ndim == 2:
        selem = disk(1)
    elif image.ndim == 3:
        selem = ball(1)
    else:
        raise ValueError('image must be 2D or 3D')
    eroded = binary_erosion(image, selem)
    return (image & (~eroded))
Ejemplo n.º 14
0
def erode(data, radius):
    """
    Erode data using ball structuring element
    :param data: 2d or 3d array
    :param radius: radius of structuring element
    :return: data eroded
    """
    from skimage.morphology import erosion, ball
    if len(radius) == 1:
        # define structured element as a ball
        selem = ball(radius[0])
    else:
        # define structured element as a box with input dimensions
        selem = np.ones((radius[0], radius[1], radius[2]), dtype=np.dtype)
    return erosion(data, selem=selem, out=None)
Ejemplo n.º 15
0
def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4):
    """
    Grow mask including pixels that have a high likelihood.
    GM tissue parameters are sampled in image patches of ``ww`` size.

    This is inspired on mindboggle's solution to the problem:
    https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660

    """
    selem = sim.ball(bw)

    if ants_segs is None:
        ants_segs = np.zeros_like(aseg, dtype=np.uint8)

    aseg[aseg == 42] = 3  # Collapse both hemispheres
    gm = anat.copy()
    gm[aseg != 3] = 0

    refined = refine_aseg(aseg)
    newrefmask = sim.binary_dilation(refined, selem) - refined
    indices = np.argwhere(newrefmask > 0)
    for pixel in indices:
        # When ATROPOS identified the pixel as GM, set and carry on
        if ants_segs[tuple(pixel)] == 2:
            refined[tuple(pixel)] = 1
            continue

        window = gm[
            pixel[0] - ww:pixel[0] + ww,
            pixel[1] - ww:pixel[1] + ww,
            pixel[2] - ww:pixel[2] + ww
        ]
        if np.any(window > 0):
            mu = window[window > 0].mean()
            sigma = max(window[window > 0].std(), 1.e-5)
            zstat = abs(anat[tuple(pixel)] - mu) / sigma
            refined[tuple(pixel)] = int(zstat < zval)

    refined = sim.binary_opening(refined, selem)
    return refined
Ejemplo n.º 16
0
def test_performance():
    ''' Compare execution time against scikit, sequencial closing case
    '''
    base_dir = '/home/omar/data/DATA_NeoBrainS12/'
    neo_subject = '30wCoronal/example2/'

    # Read subject files
    t2CurrentSubjectName  = base_dir + 'trainingDataNeoBrainS12/'+neo_subject+'T2_1-1.nii.gz'
    t2CurrentSubject_data = nib.load(t2CurrentSubjectName).get_data()
    affineT2CS            = nib.load(t2CurrentSubjectName).get_affine()
    zoomsT2CS             = nib.load(t2CurrentSubjectName).get_header().get_zooms()[:3]
    # Step 1.4 - Resampling for isotropic voxels

    n_zooms = (zoomsT2CS[0],zoomsT2CS[0],zoomsT2CS[0])
    t2CurrentSubject_data,affineT2CS = reslice(t2CurrentSubject_data,affineT2CS,zoomsT2CS,n_zooms)

    S = t2CurrentSubject_data.astype(np.float64)
    S = S[:S.shape[0]//4, :S.shape[1]//4, :S.shape[2]//4]

    ###########compare times#########
    # in-house
    start = time.time()
    max_radius = 11
    D = SequencialSphereDilation(S)
    for r in range(max_radius):
        print('Computing radius %d...'%(r+1,))
        D.expand(S)
        actual = D.get_current_closing()
        del actual
    del D
    end = time.time()
    print('Elapsed (in-home): %f'%(end-start,))
    # scikit
    start = time.time()
    for r in range(max_radius):
        print('Computing radius %d...'%(1+r,))
        expected = closing(S, ball(1+r))
        del expected
    end = time.time()
    print('Elapsed (scikit): %f'%(end-start,))
Ejemplo n.º 17
0
def morphop(im, operation='open', radius='5'):
    """Perform a morphological operation with spherical structuring element.

    Parameters
    ----------
    im : array, shape (M, N[, P])
        2D or 3D grayscale image.
    operation : string, optional
        The operation to perform. Choices are 'opening', 'closing',
        'erosion', and 'dilation'. Imperative verbs also work, e.g.
        'dilate'.
    radius : int, optional
        The radius of the structuring element (disk or ball) used.

    Returns
    -------
    imout : array, shape (M, N[, P])
        The transformed image.

    Raises
    ------
    ValueError : if the image is not 2D or 3D.
    """
    if im.ndim == 2:
        selem = skmorph.disk(radius)
    elif im.ndim == 3:
        selem = skmorph.ball(radius)
    else:
        raise ValueError("Image input to 'morphop' should be 2D or 3D"
                         ", got %iD" % im.ndim)
    if operation.startswith('open'):
        imout = nd.grey_opening(im, footprint=selem)
    elif operation.startswith('clos'):
        imout = nd.grey_closing(im, footprint=selem)
    elif operation.startswith('dila'):
        imout = nd.grey_dilation(im, footprint=selem)
    elif operation.startswith('ero'):
        imout = nd.grey_erosion(im, footprint=selem)
    return imout
Ejemplo n.º 18
0
def segment_vessels(vessel_probability, probability_threshold, dilation_size, minimum_size):
    
    """
    This function produces a binary image with segmented vessels from a probability map (from
    ilastik or another classifier).
    
    Parameters
    ----------
    vessel_probability : ndarray
        Nr x Nc x Nz matrix which contains the probability of each voxel being a vessel.
        
    probability_threshold : float
        threshold between (0,1) to apply to probability map (only consider voxels for which
        vessel_probability(r,c,z) > probability_threshold).
        
    dilation_size : int
        Sphere Structural Element diameter size.
    
    minimum_size : int
        components smaller than this are removed from image.
    
    Returns
    -------
    ndarry
        Binary Image 
    """
    smallsize = 100 # components smaller than this size are removed. WHY Fixed Size??
    
    unfiltered_im = (vessel_probability >= probability_threshold)
    im_removed_small_objects = morphology.remove_small_objects(unfiltered_im, 
                                                               min_size = smallsize, in_place = True)
    
    dilated_im = ndi.binary_dilation(im_removed_small_objects, morphology.ball((dilation_size-1)/2))
    image_out = morphology.remove_small_objects(dilated_im, min_size = minimum_size, 
                                                in_place = True)
    return(image_out)
Ejemplo n.º 19
0
def pixelwise_transform(mask,
                        dilation_radius=None,
                        data_format=None,
                        separate_edge_classes=False):
    """Transforms a label mask for a z stack edge, interior, and background

    Args:
        mask (numpy.array): tensor of labels
        dilation_radius (int):  width to enlarge the edge feature of
            each instance
        data_format (str): A string, one of ``channels_last`` (default)
            or ``channels_first``. The ordering of the dimensions in the
            inputs. ``channels_last`` corresponds to inputs with shape
            ``(batch, height, width, channels)`` while ``channels_first``
            corresponds to inputs with shape
            ``(batch, channels, height, width)``.
        separate_edge_classes (bool): Whether to separate the cell edge class
            into 2 distinct cell-cell edge and cell-background edge classes.

    Returns:
        numpy.array: An array with the same shape as ``mask``, except the
        channel axis will be a one-hot encoded semantic segmentation for
        3 main features:
        ``[cell_edge, cell_interior, background]``.
        If ``separate_edge_classes`` is ``True``, the ``cell_interior``
        feature is split into 2 features and the resulting channels are:
        ``[bg_cell_edge, cell_cell_edge, cell_interior, background]``.
    """
    if data_format is None:
        data_format = K.image_data_format()

    if data_format == 'channels_first':
        channel_axis = 0
    else:
        channel_axis = -1

    # Detect the edges and interiors
    edge = find_boundaries(mask, mode='inner').astype('int')
    interior = np.logical_and(edge == 0, mask > 0).astype('int')

    strel = ball(1) if mask.ndim > 2 else disk(1)
    if not separate_edge_classes:
        if dilation_radius:
            dil_strel = ball(dilation_radius) if mask.ndim > 2 else disk(
                dilation_radius)
            # Thicken cell edges to be more pronounced
            edge = binary_dilation(edge, selem=dil_strel)

            # Thin the augmented edges by subtracting the interior features.
            edge = (edge - interior > 0).astype('int')

        background = (1 - edge - interior > 0)
        background = background.astype('int')

        all_stacks = [edge, interior, background]

        return np.stack(all_stacks, axis=channel_axis)

    # dilate the background masks and subtract from all edges for background-edges
    background = (mask == 0).astype('int')
    dilated_background = binary_dilation(background, strel)

    background_edge = (edge - dilated_background > 0).astype('int')

    # edges that are not background-edges are interior-edges
    interior_edge = (edge - background_edge > 0).astype('int')

    if dilation_radius:
        dil_strel = ball(dilation_radius) if mask.ndim > 2 else disk(
            dilation_radius)
        # Thicken cell edges to be more pronounced
        interior_edge = binary_dilation(interior_edge, selem=dil_strel)
        background_edge = binary_dilation(background_edge, selem=dil_strel)

        # Thin the augmented edges by subtracting the interior features.
        interior_edge = (interior_edge - interior > 0).astype('int')
        background_edge = (background_edge - interior > 0).astype('int')

    background = (1 - background_edge - interior_edge - interior > 0)
    background = background.astype('int')

    all_stacks = [background_edge, interior_edge, interior, background]

    return np.stack(all_stacks, axis=channel_axis)
def test_get_reconstructed_vasculature_3d_ball():
    radius = 6
    original = morphology.ball(radius)
    dict_nodes_radius = _helper_radius(original, radius)
    predicted = radius_skeleton.get_reconstructed_vasculature(dict_nodes_radius, original.shape)
    nose.tools.assert_equal(sklearn.metrics.f1_score(original.flatten(), predicted.flatten()), 1)
if args.remove_small_objects > 0:
    seg = irtk.Image( morphology.remove_small_objects(seg.view(np.ndarray).astype('bool'),
                                                      min_size=args.remove_small_objects).astype('uint8'),
                      seg.get_header() )

print 2, seg.shape, seg_init.shape
    
if args.select:
    seg = irtk.Image( morphology.watershed(seg,seg_init,mask=seg ),
                      seg.get_header() )

print 3, seg.shape, seg_init.shape

if args.dilate > 0:
    ball = morphology.ball(args.dilate)
    seg = irtk.Image( nd.binary_dilation( seg.view(np.ndarray),
                                          structure=ball,
                                          #iterations=args.dilate,
                                          ).astype('uint8'),
                      seg.get_header() )

print 4, seg.shape, seg_init.shape

if args.hull:
    #seg = morphology.convex_hull_image(seg)
    #ZYX = np.where(seg)
    ZYX = np.transpose(np.nonzero(seg))
    print ZYX
    pts = seg.ImageToWorld( ZYX[:,::-1] )
    hull = ConvexHull(pts,qhull_options="Qx Qs QbB QJ")
Ejemplo n.º 22
0
left_lung = irtk.imread("left_lung_prior.nii.gz",force_neurological=False)
right_lung = irtk.imread("right_lung_prior.nii.gz",force_neurological=False)
liver = irtk.imread("liver_prior.nii.gz",force_neurological=False)

average = irtk.imread("/vol/medic02/users/kpk09/gitlab/fetus-detector/body-detector/notebooks/tmp/new_average_heart_center.nii.gz",force_neurological=False)

seg = irtk.imread("/vol/medic02/users/kpk09/gitlab/fetus-detector/body-detector/notebooks/tmp/seg_template.nii.gz",force_neurological=False)

res = irtk.zeros(average.get_header(),dtype='int32')

heart_center = np.array(nd.center_of_mass( (seg == 5).view(np.ndarray) ),
                      dtype='float32')
heart = np.argwhere( (seg == 5).view(np.ndarray) ).astype('float32')
r_heart = np.linalg.norm(heart_center-heart,axis=1).mean()

ball = irtk.Image( morphology.ball(r_heart) )
ball.header['origin'] = np.array([0,0,0],dtype='float64')
ball2 = ball.transform(target=liver)
res[ball2>0] = 5

brain_center = np.array(nd.center_of_mass( (seg == 2).view(np.ndarray) ),
                      dtype='float32')
brain = np.argwhere( (seg == 2).view(np.ndarray) ).astype('float32')
r_brain = np.linalg.norm(brain_center-brain,axis=1).mean()

ball = irtk.Image( morphology.ball(r_brain) )
ball.header['origin'] = np.array(liver.ImageToWorld(brain_center[::-1]),dtype='float64')
ball2 = ball.transform(target=liver)
res[ball2>0] = 2

threshold = 0.5
Ejemplo n.º 23
0
# fake prediction
pred = label.copy()
pred[0:200, 50:100, 111:300] = 1

S = morphology.skeletonize_3d(label.astype(np.uint8))
S = S.astype(bool)

# calculate centerline score
# number of pixels of sceleton inside pred / number of pixels in sceleton
cl_score = np.count_nonzero(np.logical_and(S, pred)) / np.count_nonzero(S)

# dilate label massive
# to generate hull

element = morphology.ball(5)  # good value seems in between 3 and 5
element = element.astype(bool)

H = ndimage.morphology.binary_dilation(label, iterations=1, structure=element)

# 1 - number of pixels of prediction outside hull / number of pixels of prediction inside hull ?
# or just total number of pixels of prediction
out_score = 1 - np.count_nonzero(np.logical_and(np.logical_not(H),
                                                pred)) / np.count_nonzero(pred)

img = nib.Nifti1Image(S.astype(np.uint8), np.eye(4))
nib.save(img, origin.replace('.nii.gz', '_sceleton.nii.gz'))

img = nib.Nifti1Image(H.astype(np.uint8), np.eye(4))
nib.save(img, origin.replace('.nii.gz', '_hull.nii.gz'))
def preprocess_training_data( patient_id,
                              img_folder,
                              seg_folder,
                              resample,
                              offline=False,
                              online=True):
    if offline or online:
        if ( offline
             and os.path.exists( "offline_preprocessing/"+patient_id+"_img.nii.gz" )
             and os.path.exists( "offline_preprocessing/"+patient_id+"_seg.nii.gz" ) ):
                 return
        img = irtk.imread( img_folder + "/" + patient_id + ".nii.gz",
                           dtype='float32' )
        seg = irtk.imread( seg_folder +"/"+patient_id+"_seg.nii.gz",
                           dtype="uint8" )

        wall = nd.binary_dilation( seg,
                                   morphology.ball(int(12.5*0.001/seg.header['pixelSize'][0])) )
        wall = wall.astype('int')
        points = np.transpose(np.nonzero(wall))[::4]
        center,S,V = fit_ellipsoidPCA( points )
        if V[0,0] < 0:
            V *= -1
        
        points = np.transpose(np.nonzero(wall))
        projections = np.dot(points-center,V[0])

        # valves
        index = projections > (projections.max() - 40.0*0.001/seg.header['pixelSize'][0])

        #print "VALVE size:",np.sum(index), projections.max(), 40.0*0.001/seg.header['pixelSize'][0]
    
        wall[points[index,0],
             points[index,1],
             points[index,2]] = 2

        #print "VALVE1", wall.max()

        wall = irtk.Image(wall,seg.get_header())
    
        img = img.resample( pixelSize=resample, interpolation='linear' ).rescale(0,1000)
        seg = seg.transform(target=img,interpolation="nearest").astype('uint8')
        wall = wall.transform(target=img,interpolation="nearest").astype('uint8')
 
        wall[seg>0] = 0
        seg[wall==1] = 2
        seg[wall==2] = 3

        #print "VALVE2", seg.max()
    
        #irtk.imwrite("debug/"+patient_id+"_border.nii.gz",seg)
    
        seg[img==0] = 255

        if offline:
            irtk.imwrite( "offline_preprocessing/"+patient_id+"_img.nii.gz", img )
            irtk.imwrite( "offline_preprocessing/"+patient_id+"_seg.nii.gz", seg )
            return

    if not online:
        img = irtk.imread( "offline_preprocessing/"+patient_id+"_img.nii.gz" )
        seg = irtk.imread( "offline_preprocessing/"+patient_id+"_seg.nii.gz" )
        
    mask = irtk.ones( img.get_header(), dtype='uint8' )
    mask[img==0] = 0

    return { 'patient_id': patient_id,
             'img' : img,
             'seg' : seg,
             'mask' : mask }
Ejemplo n.º 25
0
ax[3].imshow(glob_otsu, cmap=plt.cm.gray)
ax[3].set_title('Global Otsu ($t=%d$)' % t_glob_otsu)

for a in ax:
    a.axis('off')

plt.tight_layout()

######################################################################
# The example compares the local threshold with the global threshold in 3D

brain = exposure.rescale_intensity(data.brain().astype(float))

radius = 5
neighborhood = ball(radius)

# t_loc_otsu is an image
t_loc_otsu = rank.otsu(brain, neighborhood)
loc_otsu = brain >= t_loc_otsu

# t_glob_otsu is a scalar
t_glob_otsu = threshold_otsu(brain)
glob_otsu = brain >= t_glob_otsu

fig, axes = plt.subplots(nrows=2,
                         ncols=2,
                         figsize=(12, 12),
                         sharex=True,
                         sharey=True)
ax = axes.ravel()
Ejemplo n.º 26
0
# ---------------------------
# After segmenting the lung structures from the CT Scanned images, our task is to find the candidate regions with nodules since the search space is very large. Also, whole image can't be classified directly using 3D CNNs due to limit on computation, we need to find possible regions of cancer and then classify them. It was found in experiments that all the region of interests have intensity > -400 HU. So, we used this threshold to filter the darker regions. This reduces the number of candidates by a large number and preserves all the important regions with high recall. We then classify all the candidate points to reduce the False Positives.

# In[13]:

segmented_ct_scan[segmented_ct_scan < THRES] = -1000

plot_ct_scan(segmented_ct_scan)

# After filtering, there are still lot of noise because of blood vessels. Different from the original version, I just perform a opening operation to get rid of them

# In[14]:

from skimage.morphology import opening

selem = ball(3)  #was 2
binary = binary_opening(segmented_ct_scan, selem)  # was closing
segmented_ct_scan = opening(segmented_ct_scan, selem)

label_scan = label(binary)

areas = [r.area for r in regionprops(label_scan)]
#areas.sort()
print(sum(areas))
#for r in regionprops(label_scan):
#    max_x, max_y, max_z = 0, 0, 0
#    min_x, min_y, min_z = 1000, 1000, 1000
#
#    for c in r.coords:
#        max_z = max(c[0], max_z)
#        max_y = max(c[1], max_y)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 14 11:41:47 2018

@author: haswani
"""
import scipy.ndimage as nd
import numpy as np
from skimage.morphology import skeletonize, skeletonize_3d
from astropy.io import fits
from skimage.morphology import binary_opening, binary_dilation, binary_erosion, closing, dilation, opening, ball, remove_small_holes

cube = fits.getdata('ngc3627_co21_12m+7m+tp_mask.fits')

selem = ball(3)

dskel = skeletonize_3d(cube)
ddilate = dilation(dskel, selem)
dclose = closing(ddilate)
dskel2 = skeletonize_3d(dclose)

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

footprint = np.ones((3, 3, 3))

count = nd.generic_filter(dskel2.astype(np.int), np.sum, footprint=footprint)

out = np.where(count)
fig = plt.figure()
Ejemplo n.º 28
0
def regionprops_3D(im):
    r"""
    Calculates various metrics for each labeled region in a 3D image.

    The ``regionsprops`` method in **skimage** is very thorough for 2D images,
    but is a bit limited when it comes to 3D images, so this function aims
    to fill this gap.

    Parameters
    ----------
    im : array_like
        An imaging containing at least one labeled region.  If a boolean image
        is received than the ``True`` voxels are treated as a single region
        labeled ``1``.  Regions labeled 0 are ignored in all cases.

    Returns
    -------
    props : list
        An augmented version of the list returned by skimage's ``regionprops``.
        Information, such as ``volume``, can be found for region A using the
        following syntax: ``result[A-1].volume``.

        The returned list contains all the metrics normally returned by
        **skimage.measure.regionprops** plus the following:

        'slice': Slice indices into the image that can be used to extract the
        region

        'volume': Volume of the region in number of voxels.

        'bbox_volume': Volume of the bounding box that contains the region.

        'border': The edges of the region, found as the locations where
        the distance transform is 1.

        'inscribed_sphere': An image containing the largest sphere can can
        fit entirely inside the region.

        'surface_mesh_vertices': Obtained by applying the marching cubes
        algorithm on the region, AFTER first blurring the voxel image.  This
        allows marching cubes more freedom to fit the surface contours. See
        also ``surface_mesh_simplices``

        'surface_mesh_simplices': This accompanies ``surface_mesh_vertices``
        and together they can be used to define the region as a mesh.

        'surface_area': Calculated using the mesh obtained as described above,
        using the ``porespy.metrics.mesh_surface_area`` method.

        'sphericity': Defined as the ratio of the area of a sphere with the
        same volume as the region to the actual surface area of the region.

        'skeleton': The medial axis of the region obtained using the
        ``skeletonize_3D`` method from **skimage**.

        'convex_volume': Same as convex_area, but translated to a more
        meaningful name.

    See Also
    --------
    snow_partitioning

    Notes
    -----
    This function may seem slow compared to the skimage version, but that is
    because they defer calculation of certain properties until they are
    accessed, while this one evalulates everything (inlcuding the deferred
    properties from skimage's ``regionprops``)

    Regions can be identified using a watershed algorithm, which can be a bit
    tricky to obtain desired results.  *PoreSpy* includes the SNOW algorithm,
    which may be helpful.

    """
    print('_' * 60)
    print('Calculating regionprops')

    results = regionprops(im, coordinates='xy')
    for i in tqdm(range(len(results))):
        mask = results[i].image
        mask_padded = sp.pad(mask, pad_width=1, mode='constant')
        temp = spim.distance_transform_edt(mask_padded)
        dt = extract_subsection(temp, shape=mask.shape)
        # ---------------------------------------------------------------------
        # Slice indices
        results[i].slice = results[i]._slice
        # ---------------------------------------------------------------------
        # Volume of regions in voxels
        results[i].volume = results[i].area
        # ---------------------------------------------------------------------
        # Volume of bounding box, in voxels
        results[i].bbox_volume = sp.prod(mask.shape)
        # ---------------------------------------------------------------------
        # Create an image of the border
        results[i].border = dt == 1
        # ---------------------------------------------------------------------
        # Create an image of the maximal inscribed sphere
        r = dt.max()
        inv_dt = spim.distance_transform_edt(dt < r)
        results[i].inscribed_sphere = inv_dt < r
        # ---------------------------------------------------------------------
        # Find surface area using marching cubes and analyze the mesh
        tmp = sp.pad(sp.atleast_3d(mask), pad_width=1, mode='constant')
        tmp = spim.convolve(tmp, weights=ball(1)) / 5
        verts, faces, norms, vals = marching_cubes_lewiner(volume=tmp, level=0)
        results[i].surface_mesh_vertices = verts
        results[i].surface_mesh_simplices = faces
        area = mesh_surface_area(verts, faces)
        results[i].surface_area = area
        # ---------------------------------------------------------------------
        # Find sphericity
        vol = results[i].volume
        r = (3 / 4 / sp.pi * vol)**(1 / 3)
        a_equiv = 4 * sp.pi * (r)**2
        a_region = results[i].surface_area
        results[i].sphericity = a_equiv / a_region
        # ---------------------------------------------------------------------
        # Find skeleton of region
        results[i].skeleton = skeletonize_3d(mask)
        # ---------------------------------------------------------------------
        # Volume of convex image, equal to area in 2D, so just translating
        results[i].convex_volume = results[i].convex_area

    return results
Ejemplo n.º 29
0
def regions_to_network(im, dt=None, voxel_size=1):
    r"""
    Analyzes an image that has been partitioned into pore regions and extracts
    the pore and throat geometry as well as network connectivity.

    Parameters
    ----------
    im : ND-array
        An image of the pore space partitioned into individual pore regions.
        Note that this image must have zeros indicating the solid phase.

    dt : ND-array
        The distance transform of the pore space.  If not given it will be
        calculated, but it can save time to provide one if available.

    voxel_size : scalar
        The resolution of the image, expressed as the length of one side of a
        voxel, so the volume of a voxel would be **voxel_size**-cubed.  The
        default is 1, which is useful when overlaying the PNM on the original
        image since the scale of the image is alway 1 unit lenth per voxel.

    Returns
    -------
    A dictionary containing all the pore and throat size data, as well as the
    network topological information.  The dictionary names use the OpenPNM
    convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
    directly to an OpenPNM network object using the ``update`` command.
    """
    print('_' * 60)
    print('Extracting pore and throat information from image')
    from skimage.morphology import disk, square, ball, cube
    if im.ndim == 2:
        cube = square
        ball = disk

#    if ~sp.any(im == 0):
#        raise Exception('The received image has no solid phase (0\'s)')

    if dt is None:
        dt = spim.distance_transform_edt(im > 0)
        dt = spim.gaussian_filter(input=dt, sigma=0.5)

    # Get 'slices' into im for each pore region
    slices = spim.find_objects(im)

    # Initialize arrays
    Ps = sp.arange(1, sp.amax(im) + 1)
    Np = sp.size(Ps)
    p_coords = sp.zeros((Np, im.ndim), dtype=float)
    p_volume = sp.zeros((Np, ), dtype=float)
    p_dia_local = sp.zeros((Np, ), dtype=float)
    p_dia_global = sp.zeros((Np, ), dtype=float)
    p_label = sp.zeros((Np, ), dtype=int)
    p_area_surf = sp.zeros((Np, ), dtype=int)
    t_conns = []
    t_dia_inscribed = []
    t_area = []
    t_perimeter = []
    t_coords = []
    dt_shape = sp.array(dt.shape)
    # Start extracting size information for pores and throats

    for i in tqdm(Ps):
        pore = i - 1
        if slices[pore] is None:
            continue
        s = extend_slice(slices[pore], im.shape)
        sub_im = im[s]
        sub_dt = dt[s]
        pore_im = sub_im == i
        padded_mask = sp.pad(pore_im, pad_width=1, mode='constant')
        pore_dt = spim.distance_transform_edt(padded_mask)
        s_offset = sp.array([i.start for i in s])
        p_label[pore] = i
        p_coords[pore, :] = spim.center_of_mass(pore_im) + s_offset
        p_volume[pore] = sp.sum(pore_im)
        p_dia_local[pore] = 2 * sp.amax(pore_dt)
        p_dia_global[pore] = 2 * sp.amax(sub_dt)
        p_area_surf[pore] = sp.sum(pore_dt == 1)
        im_w_throats = spim.binary_dilation(input=pore_im, structure=ball(1))
        im_w_throats = im_w_throats * sub_im
        Pn = sp.unique(im_w_throats)[1:] - 1
        for j in Pn:
            if j > pore:
                t_conns.append([pore, j])
                vx = sp.where(im_w_throats == (j + 1))
                t_dia_inscribed.append(2 * sp.amax(sub_dt[vx]))
                t_perimeter.append(sp.sum(sub_dt[vx] < 2))
                t_area.append(sp.size(vx[0]))
                t_inds = tuple([i + j for i, j in zip(vx, s_offset)])
                temp = sp.where(dt[t_inds] == sp.amax(dt[t_inds]))[0][0]
                if im.ndim == 2:
                    t_coords.append(tuple((t_inds[0][temp], t_inds[1][temp])))
                else:
                    t_coords.append(
                        tuple((t_inds[0][temp], t_inds[1][temp],
                               t_inds[2][temp])))
    # Clean up values
    Nt = len(t_dia_inscribed)  # Get number of throats
    if im.ndim == 2:  # If 2D, add 0's in 3rd dimension
        p_coords = sp.vstack((p_coords.T, sp.zeros((Np, )))).T
        t_coords = sp.vstack((sp.array(t_coords).T, sp.zeros((Nt, )))).T

    net = {}
    net['pore.all'] = sp.ones((Np, ), dtype=bool)
    net['throat.all'] = sp.ones((Nt, ), dtype=bool)
    net['pore.coords'] = sp.copy(p_coords) * voxel_size
    net['pore.centroid'] = sp.copy(p_coords) * voxel_size
    net['throat.centroid'] = sp.array(t_coords) * voxel_size
    net['throat.conns'] = sp.array(t_conns)
    net['pore.label'] = sp.array(p_label)
    net['pore.volume'] = sp.copy(p_volume) * (voxel_size**3)
    net['throat.volume'] = sp.zeros((Nt, ), dtype=float)
    net['pore.diameter'] = sp.copy(p_dia_local) * voxel_size
    net['pore.inscribed_diameter'] = sp.copy(p_dia_local) * voxel_size
    net['pore.equivalent_diameter'] = 2 * (
        (3 / 4 * net['pore.volume'] / sp.pi)**(1 / 3))
    net['pore.extended_diameter'] = sp.copy(p_dia_global) * voxel_size
    net['pore.surface_area'] = sp.copy(p_area_surf) * (voxel_size)**2
    net['throat.diameter'] = sp.array(t_dia_inscribed) * voxel_size
    net['throat.inscribed_diameter'] = sp.array(t_dia_inscribed) * voxel_size
    net['throat.area'] = sp.array(t_area) * (voxel_size**2)
    net['throat.perimeter'] = sp.array(t_perimeter) * voxel_size
    net['throat.equivalent_diameter'] = ((sp.array(t_area) *
                                          (voxel_size**2))**(0.5))
    P12 = net['throat.conns']
    PT1 = (sp.sqrt(
        sp.sum(((p_coords[P12[:, 0]] - t_coords) * voxel_size)**2, axis=1)))
    PT2 = (sp.sqrt(
        sp.sum(((p_coords[P12[:, 1]] - t_coords) * voxel_size)**2, axis=1)))
    net['throat.total_length'] = PT1 + PT2
    PT1 = PT1 - p_dia_local[P12[:, 0]] / 2 * voxel_size
    PT2 = PT2 - p_dia_local[P12[:, 1]] / 2 * voxel_size
    net['throat.length'] = PT1 + PT2
    dist = (p_coords[P12[:, 0]] - p_coords[P12[:, 1]]) * voxel_size
    net['throat.direct_length'] = sp.sqrt(sp.sum(dist**2, axis=1))
    # Make a dummy openpnm network to get the conduit lengths
    pn = op.network.GenericNetwork()
    pn.update(net)
    pn.add_model(propname='throat.endpoints',
                 model=op_gm.throat_endpoints.spherical_pores,
                 pore_diameter='pore.inscribed_diameter',
                 throat_diameter='throat.inscribed_diameter')
    pn.add_model(propname='throat.conduit_lengths',
                 model=op_gm.throat_length.conduit_lengths)
    pn.add_model(propname='pore.area', model=op_gm.pore_area.sphere)
    net['throat.endpoints.head'] = pn['throat.endpoints.head']
    net['throat.endpoints.tail'] = pn['throat.endpoints.tail']
    net['throat.conduit_lengths.pore1'] = pn['throat.conduit_lengths.pore1']
    net['throat.conduit_lengths.pore2'] = pn['throat.conduit_lengths.pore2']
    net['throat.conduit_lengths.throat'] = pn['throat.conduit_lengths.throat']
    net['pore.area'] = pn['pore.area']
    prj = pn.project
    prj.clear()
    wrk = op.Workspace()
    wrk.close_project(prj)

    return net
Ejemplo n.º 30
0
### Arrays for finding the average and stdv:
accfracs_p_collect = np.zeros((Nsteps, Nthr, Nr))
porefracs_collect = np.zeros((Nsteps, Nthr, Nr))
diffporefrac_collect = np.zeros((Nsteps, Nthr, Nr - 1))
differenceporefrac_collect = np.zeros((Nsteps, Nthr, Nr - 1))
poredistr_collect = np.zeros((Nsteps, Nthr, Nr - 1))
ball_elements = np.zeros((Nsteps, Nthr, Nr))

# Readying the structuring elements before reading the data
structuring_elements = []
ball_elements_stored = np.zeros(Nr)

for i in range(Nr):
    radius = radii[i]
    if isball == True:
        structuring_elements.append(morphology.ball(radius))
    if iscube == True:
        structuring_elements.append(morphology.cube(radius))
    ball_elements_stored[i] = np.sum(
        np.sum(np.sum(structuring_elements[i]))
    )  # Can do this outside of the loop, but it is probably not very costly anyways.

print('!!! timestepnumbers:', timestepnumbers)
# Can loop from here:
for timeind in range(Nsteps):
    timestep = timestepnumbers[timeind]
    infilename_vmat = infilename_totalbase + '_vox_matrix_timestep' + timestep + '.npy'  # _vox_matrix_timestep
    infilename_x = infilename_totalbase + '_x_timestep' + timestep + '.npy'
    print('TIMESTEP:', timestep)

    print('infilename_vmat:', infilename_vmat)
Ejemplo n.º 31
0
def threshold(roi):
    """Thresholds the ROI, with options for various techniques as well as
    post-thresholding morphological filtering.
    
    Args:
        roi: Region of interest, given as [z, y, x].
    
    Returns:
        The thresholded region.
    """
    settings = config.roi_profile
    thresh_type = settings["thresholding"]
    size = settings["thresholding_size"]
    thresholded = roi
    roi_thresh = 0

    # various thresholding model
    if thresh_type == "otsu":
        try:
            roi_thresh = filters.threshold_otsu(roi, size)
            thresholded = roi > roi_thresh
        except ValueError as e:
            # np.histogram may give an error apparently if any NaN, so
            # workaround is set all elements in ROI to False
            print(e)
            thresholded = roi > np.max(roi)
    elif thresh_type == "local":
        roi_thresh = np.copy(roi)
        for i in range(roi_thresh.shape[0]):
            roi_thresh[i] = filters.threshold_local(roi_thresh[i],
                                                    size,
                                                    mode="wrap")
        thresholded = roi > roi_thresh
    elif thresh_type == "local-otsu":
        # TODO: not working yet
        selem = morphology.disk(15)
        print(np.min(roi), np.max(roi))
        roi_thresh = np.copy(roi)
        roi_thresh = libmag.normalize(roi_thresh, -1.0, 1.0)
        print(roi_thresh)
        print(np.min(roi_thresh), np.max(roi_thresh))
        for i in range(roi.shape[0]):
            roi_thresh[i] = filters.rank.otsu(roi_thresh[i], selem)
        thresholded = roi > roi_thresh
    elif thresh_type == "random_walker":
        thresholded = segmenter.segment_rw(roi, size)

    # dilation/erosion, adjusted based on overall intensity
    thresh_mean = np.mean(thresholded)
    print("thresh_mean: {}".format(thresh_mean))
    selem_dil = None
    selem_eros = None
    if thresh_mean > 0.45:
        thresholded = morphology.erosion(thresholded, morphology.cube(1))
        selem_dil = morphology.ball(1)
        selem_eros = morphology.octahedron(1)
    elif thresh_mean > 0.35:
        thresholded = morphology.erosion(thresholded, morphology.cube(2))
        selem_dil = morphology.ball(2)
        selem_eros = morphology.octahedron(1)
    elif thresh_mean > 0.3:
        selem_dil = morphology.ball(1)
        selem_eros = morphology.cube(5)
    elif thresh_mean > 0.1:
        selem_dil = morphology.ball(1)
        selem_eros = morphology.cube(4)
    elif thresh_mean > 0.05:
        selem_dil = morphology.octahedron(2)
        selem_eros = morphology.octahedron(2)
    else:
        selem_dil = morphology.octahedron(1)
        selem_eros = morphology.octahedron(2)
    if selem_dil is not None:
        thresholded = morphology.dilation(thresholded, selem_dil)
    if selem_eros is not None:
        thresholded = morphology.erosion(thresholded, selem_eros)
    return thresholded
Ejemplo n.º 32
0
def regionprops_3D(im):
    r"""
    Calculates various metrics for each labeled region in a 3D image.

    The ``regionsprops`` method in **skimage** is very thorough for 2D images,
    but is a bit limited when it comes to 3D images, so this function aims
    to fill this gap.

    Parameters
    ----------
    im : array_like
        An imaging containing at least one labeled region.  If a boolean image
        is received than the ``True`` voxels are treated as a single region
        labeled ``1``.  Regions labeled 0 are ignored in all cases.

    Returns
    -------
    An augmented version of the list returned by skimage's ``regionprops``.
    Information, such as ``volume``, can be found for region A using the
    following syntax: ``result[A-1].volume``.

    Notes
    -----
    This function may seem slow compared to the skimage version, but that is
    because they defer calculation of certain properties until they are
    accessed while this one evalulates everything (inlcuding the deferred
    properties from skimage's ``regionprops``)

    Regions can be identified using a watershed algorithm, which can be a bit
    tricky to obtain desired results.  *PoreSpy* includes the SNOW algorithm,
    which may be helpful.

    """
    print('_' * 60)
    print('Calculating regionprops')

    results = regionprops(im, coordinates='xy')
    for i in tqdm(range(len(results))):
        mask = results[i].image
        mask_padded = sp.pad(mask, pad_width=1, mode='constant')
        temp = spim.distance_transform_edt(mask_padded)
        dt = extract_subsection(temp, shape=mask.shape)
        # ---------------------------------------------------------------------
        # Slice indices
        results[i].slice = results[i]._slice
        # ---------------------------------------------------------------------
        # Volume of regions in voxels
        results[i].volume = results[i].area
        # ---------------------------------------------------------------------
        # Volume of bounding box, in voxels
        results[i].bbox_volume = sp.prod(mask.shape)
        # ---------------------------------------------------------------------
        # Create an image of the border
        results[i].border = dt == 1
        # ---------------------------------------------------------------------
        # Create an image of the maximal inscribed sphere
        r = dt.max()
        inv_dt = spim.distance_transform_edt(dt < r)
        results[i].inscribed_sphere = inv_dt < r
        # ---------------------------------------------------------------------
        # Find surface area using marching cubes and analyze the mesh
        tmp = sp.pad(sp.atleast_3d(mask), pad_width=1, mode='constant')
        tmp = spim.convolve(tmp, weights=ball(1)) / 5
        verts, faces, norms, vals = marching_cubes_lewiner(volume=tmp, level=0)
        results[i].surface_mesh_vertices = verts
        results[i].surface_mesh_simplices = faces
        area = mesh_surface_area(verts, faces)
        results[i].surface_area = area
        # ---------------------------------------------------------------------
        # Find sphericity
        vol = results[i].volume
        r = (3 / 4 / sp.pi * vol)**(1 / 3)
        a_equiv = 4 * sp.pi * (r)**2
        a_region = results[i].surface_area
        results[i].sphericity = a_equiv / a_region
        # ---------------------------------------------------------------------
        # Find skeleton of region
        results[i].skeleton = skeletonize_3d(mask)
        # ---------------------------------------------------------------------
        # Volume of convex image, equal to area in 2D, so just translating
        results[i].convex_volume = results[i].convex_area
        # ---------------------------------------------------------------------
        # Convert region grid to a graph
        am = grid_to_graph(*mask.shape, mask=mask)
        results[i].graph = am

    return results
Ejemplo n.º 33
0
def noise_type_inbetween(inputVolume):
    
    inputVolume = inputVolume.astype(np.uint8)
    
    # generate filter for convolution 
    struct = morphology.ball(7)
    l = struct.shape[0]
    shell = np.zeros(struct.shape, dtype=int)
    
    for x in np.arange(l):
        for y in np.arange(l):
            nz = np.nonzero(struct[x,y,:])[0]
            if len(nz) == 1:
                shell[x,y,nz[0]] = 1
            elif len(nz) >= 2:
                shell[x,y,nz[0]] = 1
                shell[x,y,nz[-1]] = 1
                
        
    
    A = ndimage.convolve(inputVolume, shell)
    A = np.logical_and(A>=1, A<=3)

    # generate filter for convolution
    struct = ndimage.generate_binary_structure(3, 1)
    
    struct = ndimage.iterate_structure(struct, 15).astype(int)
    
    
    struct = morphology.ball(15)
    l = struct.shape[0]
    shell = np.zeros(struct.shape, dtype=int)
    
    for x in np.arange(l):
        for y in np.arange(l):
            nz = np.nonzero(struct[x,y,:])[0]
            if len(nz) == 1:
                shell[x,y,nz[0]] = 1
            elif len(nz) >= 2:
                shell[x,y,nz[0]] = 1
                shell[x,y,nz[-1]] = 1
                

    
    B = ndimage.convolve(inputVolume, shell)
    
    B = np.logical_and(B>=1, B<=3)
    
    V = np.logical_or(A, B)
    
    
    # random zero out entries
    mask = np.random.random_sample(V.shape)
    
    mask = mask >= 0.9
    
    V = mask * V
    
    V_mid = ndimage.binary_dilation(V)
    V_out = ndimage.binary_dilation(V_mid)
    
    V = V.astype(np.uint8) + V_mid.astype(np.uint8) + V_out.astype(np.uint8)
    
    # add another mask?
    mask2 = np.random.random_sample(V.shape)
    
    mask2 = mask2 >= 0.5
    
    V = mask2* V
    
    
    return V
        xyz = np.asarray(
            [points["x"].values, points["y"].values,
             points["z"].values]).T  #cells are counted in horizontal volumes
        # init empty vol
        cell_map = np.zeros((z_dim, y_dim, x_dim)).astype('uint16')
        #fill volume
        for x, y, z in xyz:
            try:
                cell_map[z - 1:z + 2, y,
                         x] = 5000  # z dilation of a single plane
            except Exception as e:
                # Some cells will fall outside the volume - just how clearmap works
                print(e)
        #apply x y dilation
        r = 2
        selem = ball(r)[int(r / 2)]
        cell_map = np.asarray([
            cv2.dilate(cell_map[i], selem, iterations=1)
            for i in range(cell_map.shape[0])
        ])

        done_files = set([int(z) for z in os.listdir(progress_dir)])
        all_files = set(range(vol.bounds.minpt.z, vol.bounds.maxpt.z + 1))

        to_upload = [int(z) for z in list(all_files.difference(done_files))]
        to_upload.sort()
        print(f"Have {len(to_upload)} planes to upload")
        with ProcessPoolExecutor(max_workers=16) as executor:
            executor.map(process_slice, to_upload)

    elif step == 'step2':  # downsampling
img = irtk.imread( args.img, dtype='float32', force_neurological=True ).rescale(0,1000)

if args.thorax:
    seg[seg==4] = 4  # no liver

# crop
x_min,y_min,z_min,x_max,y_max,z_max = (seg > 0).bbox()
tmp_seg = seg[max(0,z_min-args.narrow_band-1):min(seg.shape[0],z_max+args.narrow_band+1+1),
              max(0,y_min-args.narrow_band-1):min(seg.shape[1],y_max+args.narrow_band+1+1),
              max(0,x_min-args.narrow_band-1):min(seg.shape[2],x_max+args.narrow_band+1+1)]
tmp_img = img[max(0,z_min-args.narrow_band-1):min(img.shape[0],z_max+args.narrow_band+1+1),
              max(0,y_min-args.narrow_band-1):min(img.shape[1],y_max+args.narrow_band+1+1),
              max(0,x_min-args.narrow_band-1):min(img.shape[2],x_max+args.narrow_band+1+1)]

background = (nd.binary_dilation( tmp_seg>0,
                                  structure=morphology.ball(args.narrow_band) ) == 0).astype('int32')

if args.thorax:
    tmp_seg[background>0] = 4
else:
    tmp_seg[background>0] = 5

if args.debug:
    debug_seg = tmp_seg.transform(target=img,interpolation='nearest')
    debug_seg[irtk.largest_connected_component(debug_seg==0)>0] = debug_seg.max()
    irtk.imwrite("debug_seg.nii.gz",debug_seg)
    irtk.imwrite("debug_background.nii.gz",debug_seg!=5)

tmp_img = tmp_img.rescale(-1,1)
labels = random_walker( tmp_img.view(np.ndarray),
                        tmp_seg.view(np.ndarray),
Ejemplo n.º 36
0
def segment_images(inpDir, outDir, config_data):
    """ Workflow for data with shell like shapes 
    such as lamin B1 (interphase-specific)

    Args:
        inpDir : path to the input directory
        outDir : path to the output directory
        config_data : path to the configuration file
    """

    logging.basicConfig(
        format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("main")
    logger.setLevel(logging.INFO)

    inpDir_files = os.listdir(inpDir)
    for i, f in enumerate(inpDir_files):
        logger.info('Segmenting image : {}'.format(f))

        # Load image
        br = BioReader(os.path.join(inpDir, f))
        image = br.read_image()
        structure_channel = 0
        struct_img0 = image[:, :, :, structure_channel, 0]
        struct_img0 = struct_img0.transpose(2, 0, 1).astype(np.float32)

        # main algorithm
        intensity_scaling_param = config_data['intensity_scaling_param']
        struct_img = intensity_normalization(
            struct_img0, scaling_param=intensity_scaling_param)
        gaussian_smoothing_sigma = config_data['gaussian_smoothing_sigma']
        structure_img_smooth = image_smoothing_gaussian_3d(
            struct_img, sigma=gaussian_smoothing_sigma)
        middle_frame_method = config_data['middle_frame_method']
        mid_z = get_middle_frame(structure_img_smooth,
                                 method=middle_frame_method)
        f2_param = config_data['f2_param']
        bw_mid_z = filament_2d_wrapper(structure_img_smooth[mid_z, :, :],
                                       f2_param)
        hole_max = config_data['hole_max']
        hole_min = config_data['hole_min']
        bw_fill_mid_z = hole_filling(bw_mid_z, hole_min, hole_max)
        seed = get_3dseed_from_mid_frame(
            np.logical_xor(bw_fill_mid_z, bw_mid_z), struct_img.shape, mid_z,
            hole_min)
        bw_filled = watershed(
            struct_img, seed.astype(int), watershed_line=True) > 0
        seg = np.logical_xor(bw_filled, dilation(bw_filled, selem=ball(1)))
        seg = seg > 0
        out_img = seg.astype(np.uint8)
        out_img[out_img > 0] = 255

        # create output image
        out_img = out_img.transpose(1, 2, 0)
        out_img = out_img.reshape(
            (out_img.shape[0], out_img.shape[1], out_img.shape[2], 1, 1))

        # write image using BFIO
        bw = BioWriter(os.path.join(outDir, f), metadata=br.read_metadata())
        bw.num_x(out_img.shape[1])
        bw.num_y(out_img.shape[0])
        bw.num_z(out_img.shape[2])
        bw.num_c(out_img.shape[3])
        bw.num_t(out_img.shape[4])
        bw.pixel_type(dtype='uint8')
        bw.write_image(out_img)
        bw.close_image()
def test_get_reconstructed_vasculature_3d_ball():
    radius = 6
    original = morphology.ball(radius)
    dict_nodes_radius = _helper_radius(original, radius)
    predicted = radius_skeleton.get_reconstructed_vasculature(dict_nodes_radius, original.shape)
    nose.tools.assert_equal(metrics.f1_score(original, predicted), 1)
Ejemplo n.º 38
0
def generate_surface_marching_cubes(molecule, remove_hoh=False, scaling=1.,
                                    probe_radius=1.4):
    """Generates a molecular surface mesh using the marching_cubes
    method from scikit-image. Ignores hydrogens present in the molecule.

    Parameters
    ----------
    molecule : oddt.toolkit.Molecule object
        Molecule for which the surface will be generated

    remove_hoh : bool (default = False)
        If True, remove waters from the molecule before generating the surface.
        Requires molecule.protein to be set to True.

    scaling : float (default = 1.0)
        Expands the grid in which computation is done by a factor of scaling.
        Results in a more accurate representation of the surface, but increases
        computation time.

    probe_radius : float (default = 1.4)
        Radius of a ball used to patch up holes inside the molecule
        resulting from some molecular distances being larger
        (usually in protein). Basically reduces the surface to one
        accesible by other molecules of radius smaller than probe_radius.

    Returns
    -------
    verts : numpy array
        Spatial coordinates for mesh vertices.

    faces : numpy array
        Faces are defined by referencing vertices from verts.
    """
    # Input validation
    if not isinstance(molecule, oddt.toolkit.Molecule):
        raise TypeError('molecule needs to be of type oddt.toolkit.Molecule')
    if not (isinstance(probe_radius, Number) and probe_radius >= 0):
        raise ValueError('probe_radius needs to be a positive number')

    # Removing waters and hydrogens
    atom_dict = molecule.atom_dict
    atom_dict = atom_dict[atom_dict['atomicnum'] != 1]
    if remove_hoh:
        if molecule.protein is not True:
            raise ValueError('Residue names are needed for water removal, '
                             'molecule.protein property must be set to True')
        no_hoh = atom_dict['resname'] != 'HOH'
        atom_dict = atom_dict[no_hoh]

    # Take a molecule's coordinates and atom radii and scale if necessary
    coords = atom_dict['coords'] * scaling
    radii = atom_dict['radius'] * scaling

    # More input validation
    if radii.min() < 1:
        raise ValueError('Scaling times the radius of the smallest atom must '
                         'be larger than 1')
    # Create a ball for each atom in the molecule
    ball_dict = {radius: ball(radius, dtype=bool) for radius in set(radii)}
    ball_radii = np.array([ball_dict[radius].shape[0] for radius in radii])

    # Transform the coordinates because the grid starts at (0, 0 ,0)
    min_coords = np.min(coords, axis=0)
    max_rad = np.max(ball_radii, axis=0)
    adjusted = np.round(coords - min_coords + max_rad * 5).astype(np.int64)
    offset = adjusted[0] - coords[0]

    # Calculate boundries in the grid for each ball.
    ball_coord_min = (adjusted.T - np.floor(ball_radii / 2).astype(np.int64)).T
    ball_coord_max = (ball_coord_min.T + ball_radii).T

    # Create the grid
    grid = np.zeros(shape=ball_coord_max.max(axis=0) + int(8 * scaling), dtype=bool)

    # Place balls in grid
    for radius, coord_min, coord_max in zip(radii, ball_coord_min, ball_coord_max):
        grid[coord_min[0]:coord_max[0],
             coord_min[1]:coord_max[1],
             coord_min[2]:coord_max[2]] += ball_dict[radius]
    spacing = (1 / scaling,) * 3

    # Hole-filling with morphological closing
    grid = binary_closing(grid, ball(probe_radius * 2 * scaling))

    # Marching cubes
    verts, faces = marching_cubes(grid, level=0, spacing=spacing)[:2]

    # Verts already scaled by the marching cubes function (spacing parameter)
    # Only need to scale the offset
    # Results in skimage version lower than 0.11 are offset by 1 in each direction
    if LooseVersion(skimage_version) < LooseVersion('0.11'):
        verts += 1 / scaling
    return verts - offset / scaling, faces
Ejemplo n.º 39
0
def colocalize_blobs(roi, blobs, thresh=None):
    """Co-localize blobs from different channels based on surrounding
    intensities.
    
    Thresholds for detection are first identified in each channel by taking
    the blobs in the given channel, finding the surrounding intensities,
    and taking a low (5th) percentile. Then for each channel, the
    surrounding intensities of blobs in that channel are compared with
    the thresholds in the other channels. Blobs exceeding any given
    threshold are considered to co-localize in that channel.
    
    Args:
        roi (:obj:`np.ndarray`): Region of interest as a 3D+channel array.
        blobs (:obj:`np.ndarray`): Blobs as a 2D array in the format
            ``[[z, y, x, radius, confirmation, truth, channel...], ...]``.
        thresh (int, float, str): Threshold percentile of intensities from
            pixels surrounding each blob in the given channel. Use "min"
            to instead take the mininimum average intensity of all blobs
            in the channel. Defaults to None to use "min".

    Returns:
        :obj:`np.ndarray`: 2D Numpy array of same length as ``blobs`` with
        a column for each channel where 1 indicates that the corresponding
        blob has signal is present in the given channels at the blob's
        location, and 0 indicates insufficient signal.

    """
    if blobs is None or roi is None or len(roi.shape) < 4:
        return None
    if thresh is None:
        thresh = "min"
    print("Colocalizing blobs based on image intensity across channels")
    threshs = []
    selem = morphology.ball(2)

    # find only blobs in ROI since blobs list may include blobs from immediate
    # surrounds, but ROI is not available for them
    blobs_roi, blobs_roi_mask = detector.get_blobs_in_roi(blobs, (0, 0, 0),
                                                          roi.shape[:3],
                                                          reverse=False)
    blobs_chl = detector.Blobs.get_blobs_channel(blobs_roi)
    blobs_range_chls = []

    # get labeled masks of blobs for each channel and threshold intensities
    mask_roi = np.ones(roi.shape[:3], dtype=int)
    mask_roi_chls = []
    for chl in range(roi.shape[3]):
        # label a mask with blob indices surrounding each blob
        blobs_chl_mask = np.isin(blobs_chl, chl)
        blobs_range = np.where(blobs_chl_mask)[0]
        blobs_range_chls.append(blobs_range)
        mask = np.copy(mask_roi) * -1
        mask[tuple(
            libmag.coords_for_indexing(
                blobs_roi[blobs_chl_mask, :3].astype(int)))] = blobs_range
        mask = morphology.dilation(mask, selem=selem)
        mask_roi_chls.append(mask)

        if thresh == "min":
            # set minimum average surrounding intensity of all blobs as thresh
            threshs.append(None if len(blobs_range) == 0 else np.amin(
                [np.mean(roi[mask == b, chl]) for b in blobs_range]))
        else:
            # set a percentile of intensities surrounding all blobs in channel
            # as threshold for that channel, or the whole ROI if no blobs
            mask_blobs = mask >= 0
            roi_mask = roi if np.sum(mask_blobs) < 1 else roi[mask_blobs, chl]
            threshs.append(np.percentile(roi_mask, thresh))

    channels = np.unique(
        detector.Blobs.get_blobs_channel(blobs_roi)).astype(int)
    colocs_roi = np.zeros((blobs_roi.shape[0], roi.shape[3]), dtype=np.uint8)
    for chl in channels:
        # get labeled mask of blobs in the given channel
        mask = mask_roi_chls[chl]
        blobs_range = blobs_range_chls[chl]
        for chl_other in channels:
            if threshs[chl_other] is None: continue
            for blobi in blobs_range:
                # find surrounding intensity of blob in another channel
                mask_blob = mask == blobi
                blob_avg = np.mean(roi[mask_blob, chl_other])
                if config.verbose:
                    print(blobi,
                          detector.Blobs.get_blobs_channel(blobs_roi[blobi]),
                          blobs_roi[blobi, :3], blob_avg, threshs[chl_other])
                if blob_avg >= threshs[chl_other]:
                    # intensities in another channel around blob's position
                    # is above that channel's threshold
                    colocs_roi[blobi, chl_other] = 1

    # create array for all blobs including those outside ROI
    colocs = np.zeros((blobs.shape[0], roi.shape[3]), dtype=np.uint8)
    colocs[blobs_roi_mask] = colocs_roi
    if config.verbose:
        for i, (blob, coloc) in enumerate(zip(blobs_roi, colocs)):
            print(i, detector.Blobs.get_blobs_channel(blob), blob[:3], coloc)
    return colocs
Ejemplo n.º 40
0
def main():
    start_time = datetime.now()
    args = surfcut_parser().parse_args()

    image_path = args.image_path

    print("Loading data")
    data = tifffile.imread(image_path)

    print("Converting to 8 bit")
    data = data.astype(np.uint8)

    print("Smoothing")
    filtered = np.copy(data)
    for idx, plane in enumerate(filtered):
        filtered[idx] = filters.gaussian_filter(plane, args.gauss_sigma)

    print("Thresholding")
    binary = filtered > args.threshold

    del filtered
    print("Detecting edges")
    binary = edge_detect(binary)

    if args.morphology:
        print("Eroding to depth")
        eroded_surface = binary_erosion(binary, selem=ball(args.shift))

        print("Dilating and eroding")
        dilated = binary_dilation(eroded_surface, selem=ball(args.depth))
        eroded = binary_erosion(eroded_surface, selem=ball(args.depth))
        del eroded_surface

        print("Obtaining border")
        border = dilated ^ eroded

        print("Masking data")
        masked = data * border

    else:
        print("Shifting binary object down")
        shift_mag = int(args.shift + (args.depth / 2))
        down_shift = binary[0:-shift_mag]
        padding = np.zeros((shift_mag, binary.shape[1], binary.shape[2]))
        down_shift = np.append(padding, down_shift, axis=0)

        print("Shifting binary object up")
        shift_mag = int(args.shift - (args.depth / 2))
        up_shift = binary[0:-shift_mag]
        padding = np.zeros((shift_mag, binary.shape[1], binary.shape[2]))
        up_shift = np.append(padding, up_shift, axis=0)
        del binary

        print("Generating mask")
        mask = up_shift - down_shift
        del up_shift
        del down_shift
        mask = mask > 0

        print("Masking data")
        masked = data * mask

    print("Projecting data")
    projection = np.max(masked, axis=0)

    print(f"Finished. Total time taken: {format(datetime.now() - start_time)}")

    if not args.no_viewer:
        print("Opening viewer")
        if args.morphology:
            view(data, border, projection)
        else:
            view(data, masked, projection)
Ejemplo n.º 41
0
#!/usr/bin/python3

import numpy as np
import matplotlib.pyplot as plt
from skimage.morphology import ball
from quantimpy import minkowski as mk
from quantimpy import morphology as mp
from math import prod

image0 = np.zeros([64, 64, 64], dtype=bool)
image0[8:57, 8:57, 8:57] = ball(24, dtype=bool)
res0 = np.array([2.0, 2.0, 2.0])
ext0 = [0, image0.shape[0] * res0[0], 0, image0.shape[1] * res0[1]]

image1 = np.zeros([128, 128, 128], dtype=bool)
image1[16:113, 16:113, 16:113] = ball(48, dtype=bool)

image2 = np.zeros([256, 256, 256], dtype=bool)
image2[32:225, 32:225, 32:225] = ball(96, dtype=bool)
res2 = np.array([0.5, 0.5, 0.5])
ext2 = [0, image2.shape[0] * res2[0], 0, image2.shape[1] * res2[1]]

image3 = np.zeros([128, 256, 256], dtype=bool)
image3[16:113, 32:225, 32:225] = ball(96, dtype=bool)[0::2, :]
res3 = np.array([1.0, 0.5, 0.5])
ext3 = [0, image3.shape[0] * res3[0], 0, image3.shape[1] * res3[1]]

#plt.gray()
#plt.imshow(image0[:,:,32],extent=ext0)
#plt.show()
#
Ejemplo n.º 42
0
def RSA(im, radius, volume_fraction=1, mode='extended'):
    r"""
    Generates a sphere or disk packing using Random Sequential Addition, which
    ensures that spheres do not overlap but does not guarantee they are
    tightly packed.

    Each sphere is filled with 1's, and the center is marked with a 2.  This
    allows easy boolean masking to extract only the centers, which can be
    converted to coordinates using ``scipy.where`` and used for other purposes.

    Parameters
    ----------
    im : ND-array
        The image into which the spheres should be inserted.  By accepting an
        image rather than a shape, it allows users to insert spheres into an
        already existing image.  To begin the process, start with an array of
        zero such as ``im = np.zeros([200, 200], dtype=bool)``.
    radius : int
        The radius of the disk or sphere to insert.
    volume_fraction : scalar
        The fraction of the image that should be filled with spheres.  The
        spheres are addeds 1's, so each sphere addition increases the
        ``volume_fraction`` until the specified limit is reach.
    mode : string
        Controls how the edges of the image are handled.  Options are:

        'extended' - Spheres are allowed to extend beyond the edge of the image

        'contained' - Spheres are all completely within the image

        'periodic' - The portion of a sphere that extends beyond the image is
        inserted into the opposite edge of the image (Not Implemented Yet!)

    References
    ----------
    [1] Random Heterogeneous Materials, S. Torquato (2001)

    """
    # Note: The 2D vs 3D splitting of this just me being lazy...I can't be
    # bothered to figure it out programmatically right now
    # TODO: Ideally the spheres should be added periodically
    print(78*'―')
    print('RSA: Adding spheres of size ' + str(radius))
    d2 = len(im.shape) == 2
    mrad = 2*radius + 1
    if d2:
        im_strel = disk(radius)
        mask_strel = disk(mrad)
    else:
        im_strel = ball(radius)
        mask_strel = ball(mrad)
    if sp.any(im > 0):
        mask = ps.tools.fftmorphology(im > 0, im_strel > 0, mode='dilate')
        mask = mask.astype(int)
    else:
        mask = sp.zeros_like(im)
    if mode == 'contained':
        mask = _remove_edge(mask, radius)
    elif mode == 'extended':
        pass
    elif mode == 'periodic':
        raise Exception('Periodic edges are not implemented yet')
    else:
        raise Exception('Unrecognized mode: ' + mode)
    vf = im.sum()/im.size
    free_spots = sp.argwhere(mask == 0)
    i = 0
    while vf <= volume_fraction and len(free_spots) > 0:
        choice = sp.random.randint(0, len(free_spots), size=1)
        if d2:
            [x, y] = free_spots[choice].flatten()
            im = _fit_strel_to_im_2d(im, im_strel, radius, x, y)
            mask = _fit_strel_to_im_2d(mask, mask_strel, mrad, x, y)
            im[x, y] = 2
        else:
            [x, y, z] = free_spots[choice].flatten()
            im = _fit_strel_to_im_3d(im, im_strel, radius, x, y, z)
            mask = _fit_strel_to_im_3d(mask, mask_strel, mrad, x, y, z)
            im[x, y, z] = 2
        free_spots = sp.argwhere(mask == 0)
        vf = im.sum()/im.size
        i += 1
    if vf > volume_fraction:
        print('Volume Fraction', volume_fraction, 'reached')
    if len(free_spots) == 0:
        print('No more free spots', 'Volume Fraction', vf)
    return im
Ejemplo n.º 43
0
def func(path, output):

    cwd = "/".join(
        os.path.realpath(__file__).replace("\\", "/").split("/")[:-1]) + "/"

    #print(cwd)
    #print(" :) ")

    name = cwd + "model.h5"
    #name = "\.model.h5"

    # get model
    get_model()

    # load model
    model = load_model(name, compile=False)

    print("preprocessing...")
    nib_volume = nib.load(path)
    new_spacing = [1., 1., 1.]
    resampled_volume = resample_to_output(nib_volume, new_spacing, order=1)
    data = resampled_volume.get_data().astype('float32')

    curr_shape = data.shape

    # resize to get (512, 512) output images
    img_size = 512
    data = zoom(data,
                [img_size / data.shape[0], img_size / data.shape[1], 1.0],
                order=1)

    # intensity normalization
    intensity_clipping_range = [-150, 250
                                ]  # HU clipping limits (Pravdaray's configs)
    data = intensity_normalization(
        volume=data, intensity_clipping_range=intensity_clipping_range)

    # fix orientation
    data = np.rot90(data, k=1, axes=(0, 1))
    data = np.flip(data, axis=0)

    print("predicting...")
    # predict on data
    pred = np.zeros_like(data).astype(np.float32)
    for i in tqdm(range(data.shape[-1]), "pred: "):
        pred[..., i] = model.predict(
            np.expand_dims(np.expand_dims(np.expand_dims(data[..., i], axis=0),
                                          axis=-1),
                           axis=0))[0, ..., 1]
    del data

    # threshold
    pred = (pred >= 0.4).astype(int)

    # fix orientation back
    pred = np.flip(pred, axis=0)
    pred = np.rot90(pred, k=-1, axes=(0, 1))

    print("resize back...")
    # resize back from 512x512
    pred = zoom(pred,
                [curr_shape[0] / img_size, curr_shape[1] / img_size, 1.0],
                order=1)
    pred = (pred >= 0.5).astype(np.float32)

    print("morphological post-processing...")
    # morpological post-processing
    # 1) first erode
    pred = binary_erosion(pred.astype(bool), ball(3)).astype(np.float32)

    # 2) keep only largest connected component
    labels = label(pred)
    regions = regionprops(labels)
    area_sizes = []
    for region in regions:
        area_sizes.append([region.label, region.area])
    area_sizes = np.array(area_sizes)
    tmp = np.zeros_like(pred)
    tmp[labels == area_sizes[np.argmax(area_sizes[:, 1]), 0]] = 1
    pred = tmp.copy()
    del tmp, labels, regions, area_sizes

    # 3) dilate
    pred = binary_dilation(pred.astype(bool), ball(3))

    # 4) remove small holes
    pred = remove_small_holes(pred.astype(bool),
                              area_threshold=0.001 *
                              np.prod(pred.shape)).astype(np.float32)

    print("saving...")
    pred = pred.astype(np.uint8)
    img = nib.Nifti1Image(pred, affine=resampled_volume.affine)
    resampled_lab = resample_from_to(img, nib_volume, order=0)
    nib.save(resampled_lab, output)
Ejemplo n.º 44
0
def brain_masker(in_file, out_file=None, padding=5):
    """Use grayscale morphological operations to obtain a quick mask of EPI data."""
    from pathlib import Path
    import re
    import nibabel as nb
    import numpy as np
    from scipy import ndimage
    from skimage.morphology import ball
    from skimage.filters import threshold_otsu
    from skimage.segmentation import random_walker

    # Load data
    img = nb.load(in_file)
    data = np.pad(img.get_fdata(dtype="float32"), padding)
    hdr = img.header.copy()

    # Cleanup background and invert intensity
    data[data < np.percentile(data[data > 0], 15)] = 0
    data[data > 0] -= data[data > 0].min()
    datainv = -data.copy()
    datainv -= datainv.min()
    datainv /= datainv.max()

    # Grayscale closing to enhance CSF layer surrounding the brain
    closed = ndimage.grey_closing(datainv, structure=ball(1))
    denoised = ndimage.median_filter(closed, footprint=ball(3))
    th = threshold_otsu(denoised)

    # Rough binary mask
    closedbin = np.zeros_like(closed)
    closedbin[closed < th] = 1
    closedbin = ndimage.binary_opening(closedbin, ball(3)).astype("uint8")

    label_im, nb_labels = ndimage.label(closedbin)
    sizes = ndimage.sum(closedbin, label_im, range(nb_labels + 1))
    mask = sizes == sizes.max()
    closedbin = mask[label_im]
    closedbin = ndimage.binary_closing(closedbin, ball(5)).astype("uint8")

    # Prepare markers
    markers = np.ones_like(closed, dtype="int8") * 2
    markers[1:-1, 1:-1, 1:-1] = 0
    closedbin_dil = ndimage.binary_dilation(closedbin, ball(5))
    markers[closedbin_dil] = 0
    closed_eroded = ndimage.binary_erosion(closedbin, structure=ball(5))
    markers[closed_eroded] = 1

    # Run random walker
    closed[closed > 0.0] -= closed[closed > 0.0].min()
    segtarget = (2 * closed / closed.max()) - 1.0
    labels = random_walker(segtarget,
                           markers,
                           spacing=img.header.get_zooms()[:3],
                           return_full_prob=True)[..., padding:-padding,
                                                  padding:-padding,
                                                  padding:-padding]

    out_mask = Path(out_file or "brain_mask.nii.gz").absolute()

    hdr.set_data_dtype("uint8")
    img.__class__((labels[0, ...] >= 0.5).astype("uint8"), img.affine,
                  hdr).to_filename(out_mask)

    out_probseg = re.sub(r"\.nii(\.gz)$", r"_probseg.nii\1",
                         str(out_mask).replace("_mask.", "."))
    hdr.set_data_dtype("float32")
    img.__class__((labels[0, ...]), img.affine, hdr).to_filename(out_probseg)

    out_brain = re.sub(r"\.nii(\.gz)$", r"_brainmasked.nii\1",
                       str(out_mask).replace("_mask.", "."))
    data = np.asanyarray(img.dataobj)
    data[labels[0, ...] < 0.5] = 0
    img.__class__(data, img.affine, img.header).to_filename(out_brain)

    return str(out_brain), str(out_probseg), str(out_mask)
def test_get_radius_3d_ball():
    radius = 5
    _helper_radius(morphology.ball(radius), radius)
Ejemplo n.º 46
0
def Workflow_cetn2(
    struct_img: np.ndarray,
    rescale_ratio: float = -1,
    output_type: str = "default",
    output_path: Union[str, Path] = None,
    fn: Union[str, Path] = None,
    output_func=None,
):
    """
    classic segmentation workflow wrapper for structure CETN2

    Parameter:
    -----------
    struct_img: np.ndarray
        the 3D image to be segmented
    rescale_ratio: float
        an optional parameter to allow rescale the image before running the
        segmentation functions, default is no rescaling
    output_type: str
        select how to handle output. Currently, four types are supported:
        1. default: the result will be saved at output_path whose filename is
            original name without extention + "_struct_segmentaiton.tiff"
        2. array: the segmentation result will be simply returned as a numpy array
        3. array_with_contour: segmentation result will be returned together with
            the contour of the segmentation
        4. customize: pass in an extra output_func to do a special save. All the
            intermediate results, names of these results, the output_path, and the
            original filename (without extension) will be passed in to output_func.
    """
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param_seg = [12, 160, 300, 2000]
    intensity_norm_param_peak = [5000]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    dot_3d_sigma = 1
    dot_3d_cutoff = 0.04
    minArea = 3
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img_for_seg = intensity_normalization(
        struct_img.copy(), scaling_param=intensity_norm_param_seg
    )
    struct_img_for_peak = intensity_normalization(
        struct_img.copy(), scaling_param=intensity_norm_param_peak
    )

    out_img_list.append(struct_img_for_seg.copy())
    out_name_list.append("im_norm")

    # rescale if needed
    if rescale_ratio > 0:
        struct_img_for_seg = zoom(
            struct_img_for_seg, (1, rescale_ratio, rescale_ratio), order=2
        )

        struct_img_for_seg = (struct_img_for_seg - struct_img_for_seg.min() + 1e-8) / (
            struct_img_for_seg.max() - struct_img_for_seg.min() + 1e-8
        )

        struct_img_for_peak = zoom(
            struct_img_for_peak, (1, rescale_ratio, rescale_ratio), order=2
        )

        struct_img_for_peak = (
            struct_img_for_peak - struct_img_for_peak.min() + 1e-8
        ) / (struct_img_for_peak.max() - struct_img_for_peak.min() + 1e-8)

        gaussian_smoothing_truncate_range = (
            gaussian_smoothing_truncate_range * rescale_ratio
        )

    # smoothing with gaussian filter
    structure_img_smooth_for_seg = image_smoothing_gaussian_slice_by_slice(
        struct_img_for_seg,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range,
    )

    out_img_list.append(structure_img_smooth_for_seg.copy())
    out_name_list.append("im_smooth")

    ###################
    # core algorithm
    ###################

    # step 1: LOG 3d
    response = dot_3d(structure_img_smooth_for_seg, log_sigma=dot_3d_sigma)
    bw = response > dot_3d_cutoff
    bw = remove_small_objects(bw > 0, min_size=minArea, connectivity=1, in_place=False)

    out_img_list.append(bw.copy())
    out_name_list.append("interm_mask")

    # step 2: 'local_maxi + watershed' for cell cutting
    local_maxi = peak_local_max(
        struct_img_for_peak, labels=label(bw), min_distance=2, indices=False
    )

    out_img_list.append(local_maxi.copy())
    out_name_list.append("interm_local_max")

    distance = distance_transform_edt(bw)
    im_watershed = watershed(
        -distance,
        label(dilation(local_maxi, selem=ball(1))),
        mask=bw,
        watershed_line=True,
    )

    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(
        im_watershed, min_size=minArea, connectivity=1, in_place=False
    )

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append("bw_final")

    if output_type == "default":
        # the default final output, simply save it to the output path
        save_segmentation(seg, False, Path(output_path), fn)
    elif output_type == "customize":
        # the hook for passing in a customized output function
        # use "out_img_list" and "out_name_list" in your hook to
        # customize your output functions
        output_func(out_img_list, out_name_list, Path(output_path), fn)
    elif output_type == "array":
        return seg
    elif output_type == "array_with_contour":
        return (seg, generate_segmentation_contour(seg))
    else:
        raise NotImplementedError("invalid output type: {output_type}")
Ejemplo n.º 47
0
def vessel_seg_post_proc():
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = MPI.COMM_WORLD.Get_size()
    name = MPI.Get_processor_name()
    start_time = int(time.time())
    # Get the list of all segmented sub-volume files.
    input_files = sorted(glob(outimage_file_location + '/*subvol*.h5'))
    if not input_files:
        print("*** Did not find any sub-volume segmented file in location %s ***" % outimage_file_location)
        return
    
    # Get the "Vessel" label index
    vessel_label_defined, vessel_label_idx = save_prob_map('Vessel')
    if vessel_label_defined == False:
        print("Vessel class is not labeled in the Ilastik training data file, no processing will take place")
        return
    # Shape/Dimension of the volume image is available in the last sub-volume file.
    volume_ds_shape = np.zeros((3,), dtype='uint64')
    # Last file has the dimensions for the volume.
    f = h5py.File(input_files[-1], 'r')
    volshape = f['orig_indices']
    volume_ds_shape[0] = volshape[1]
    volume_ds_shape[1] = volshape[3]
    volume_ds_shape[2] = volshape[5]
    f.close()
    # Get the list of segmented datasets
    # seg_ds_list = f.keys()
    labeld_obj = get_ilastik_labels()
    ds_name = labeld_obj[vessel_label_idx]
    # Create an hdf file to contain the post segmentation cell volume image.
    par, name = os.path.split(post_seg_volume_location)
    seg_volume_file = post_seg_volume_location + '/volume_vessel_' + name + '.h5'
    if rank == 0:
        print("Post segmentation directory is %s, number of file is %d and number of python processes is %d" % 
              (post_seg_volume_location, len(input_files), size))
        print("Volume shape is", volume_ds_shape)
    # Create directory for the post segmentation processing if it does not exist.
    if rank == 0:
        if not os.path.exists(post_seg_volume_location):
            os.mkdir(post_seg_volume_location)
            print("File directory for post segmentation did not exist, was created")
    comm.Barrier()
    
    # Need Parallel HDF for faster processing. However the below test lets processing to continue even if
    # Parallel HDF is not available.
    if size == 1:
        vol_img_file = h5py.File(seg_volume_file, 'w')
    else:
        vol_img_file = h5py.File(seg_volume_file, 'w', driver='mpio', comm=comm)
    
    if rank == 0:
        print("Dataset name to apply post processing is %s" % ds_name)
    vol_seg_dataset = vol_img_file.create_dataset(ds_name, volume_ds_shape, dtype='uint32',
                                                  chunks=(1, il_sub_vol_y, il_sub_vol_z))
    iterations = int(len(input_files) / size) + (len(input_files) % size > 0)
    for idx in range(iterations):
        if (rank + (size * idx)) >= len(input_files):
            print("\nBREAKING out, my rank is %d, number of files is %d, size is %d and idx is %d" % 
                  (rank, len(input_files), size, idx))
            break
        print("*** Working on file %s and rank is %d ***" % (input_files[rank + size * idx], rank))
        subvol_file = h5py.File(input_files[rank + size * idx], 'r')
        # Retrieve indices into the whole volume.
        orig_idx_ds = subvol_file['orig_indices']
        orig_idx = orig_idx_ds[...]
        
        # Retrieve overlap size to the right and left side of the sub-volume.
        right_overlapds = subvol_file['right_overlap']
        rightoverlap = right_overlapds[...]
        left_overlapds = subvol_file['left_overlap']
        leftoverlap = left_overlapds[...]
        
        myds = subvol_file[ds_name]
        subvoldata = myds[...]
        x_dim = subvoldata.shape[0]
        y_dim = subvoldata.shape[1]
        z_dim = subvoldata.shape[2]
        subvoldata = subvoldata > 0
        subvoldata = ndi.binary_fill_holes(subvoldata)
        subvoldata = morphology.erosion(subvoldata, morphology.ball(2))
        subvoldata = morphology.dilation(subvoldata, morphology.ball(2))
        subvoldata = ndi.binary_fill_holes(subvoldata)
        subvoldata = morphology.remove_small_objects(subvoldata, MINSZ_VESSEL, connectivity=2)
        subvoldata = morphology.label(subvoldata.astype('uint32'))
        
        vol_seg_dataset[orig_idx[0]:orig_idx[1], orig_idx[2]:orig_idx[3], orig_idx[4]:orig_idx[5]] = \
            subvoldata[leftoverlap[0] : x_dim - rightoverlap[0],
                       leftoverlap[1] : y_dim - rightoverlap[1],
                       leftoverlap[2] : z_dim - rightoverlap[2]]
        subvol_file.close()
    vol_img_file.close()
    print("Time to execute vessel_seg_post_proc() is %d seconds and rank is %d" % ((time.time() - start_time), rank))
Ejemplo n.º 48
0
def acompcor_masks(in_files, is_aseg=False, zooms=None):
    """
    Generate aCompCor masks.

    This function selects the CSF partial volume map from the input,
    and generates the WM and combined CSF+WM masks for aCompCor.

    The implementation deviates from Behzadi et al.
    Their original implementation thresholded the CSF and the WM partial-volume
    masks at 0.99 (i.e., 99% of the voxel volume is filled with a particular tissue),
    and then binary eroded that 2 voxels:

    > Anatomical data were segmented into gray matter, white matter,
    > and CSF partial volume maps using the FAST algorithm available
    > in the FSL software package (Smith et al., 2004). Tissue partial
    > volume maps were linearly interpolated to the resolution of the
    > functional data series using AFNI (Cox, 1996). In order to form
    > white matter ROIs, the white matter partial volume maps were
    > thresholded at a partial volume fraction of 0.99 and then eroded by
    > two voxels in each direction to further minimize partial voluming
    > with gray matter. CSF voxels were determined by first thresholding
    > the CSF partial volume maps at 0.99 and then applying a threedimensional
    > nearest neighbor criteria to minimize multiple tissue
    > partial voluming. Since CSF regions are typically small compared
    > to white matter regions mask, erosion was not applied.

    This particular procedure is not generalizable to BOLD data with different voxel zooms
    as the mathematical morphology operations will be scaled by those.
    Also, from reading the excerpt above and the tCompCor description, I (@oesteban)
    believe that they always operated slice-wise given the large slice-thickness of
    their functional data.

    Instead, *fMRIPrep*'s implementation deviates from Behzadi's implementation on two
    aspects:

      * the masks are prepared in high-resolution, anatomical space and then
        projected into BOLD space; and,
      * instead of using binary erosion, a dilated GM map is generated -- thresholding
        the corresponding PV map at 0.05 (i.e., pixels containing at least 5% of GM tissue)
        and then subtracting that map from the CSF, WM and CSF+WM (combined) masks.
        This should be equivalent to eroding the masks, except that the erosion
        only happens at direct interfaces with GM.

    When the probseg maps provene from FreeSurfer's ``recon-all`` (i.e., they are
    discrete), binary maps are *transformed* into some sort of partial volume maps
    by means of a Gaussian smoothing filter with sigma adjusted by the size of the
    BOLD data.

    """
    from pathlib import Path
    import numpy as np
    import nibabel as nb
    from scipy.ndimage import binary_dilation
    from skimage.morphology import ball

    csf_file = in_files[2]  # BIDS labeling (CSF=2; last of list)
    # Load PV maps (fast) or segments (recon-all)
    gm_vf = nb.load(in_files[0])
    wm_vf = nb.load(in_files[1])
    csf_vf = nb.load(csf_file)

    # Prepare target zooms
    imgzooms = np.array(gm_vf.header.get_zooms()[:3], dtype=float)
    if zooms is None:
        zooms = imgzooms
    zooms = np.array(zooms, dtype=float)

    if not is_aseg:
        gm_data = gm_vf.get_fdata() > 0.05
        wm_data = wm_vf.get_fdata()
        csf_data = csf_vf.get_fdata()
    else:
        csf_file = mask2vf(
            csf_file,
            zooms=zooms,
            out_file=str(Path("acompcor_csf.nii.gz").absolute()),
        )
        csf_data = nb.load(csf_file).get_fdata()
        wm_data = mask2vf(in_files[1], zooms=zooms)

        # We do not have partial volume maps (recon-all route)
        gm_data = np.asanyarray(gm_vf.dataobj, np.uint8) > 0

    # Dilate the GM mask
    gm_data = binary_dilation(gm_data, structure=ball(3))

    # Output filenames
    wm_file = str(Path("acompcor_wm.nii.gz").absolute())
    combined_file = str(Path("acompcor_wmcsf.nii.gz").absolute())

    # Prepare WM mask
    wm_data[gm_data] = 0  # Make sure voxel does not contain GM
    nb.Nifti1Image(wm_data, gm_vf.affine, gm_vf.header).to_filename(wm_file)

    # Prepare combined CSF+WM mask
    comb_data = csf_data + wm_data
    comb_data[gm_data] = 0  # Make sure voxel does not contain GM
    nb.Nifti1Image(comb_data, gm_vf.affine,
                   gm_vf.header).to_filename(combined_file)
    return [csf_file, wm_file, combined_file]
Ejemplo n.º 49
0
 def test_3D_template(self):
     net = op.network.CubicTemplate(template=ball(5), spacing=1)
     assert net.Np == 515
     assert net.Nt == 1302
Ejemplo n.º 50
0
def Workflow_npm1_comb(struct_img,
                       mitotic_stage,
                       rescale_ratio,
                       output_type,
                       output_path,
                       fn,
                       output_func=None):
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [20, 25]
    gaussian_smoothing_sigma = 0.25
    gaussian_smoothing_truncate_range = 4.0
    dot_2d_sigma = 1
    dot_2d_sigma_extra = 3
    dot_2d_cutoff = 0.035
    dot_2d_cutoff_extra = 0.01
    minArea = 2
    low_level_min_size = 700
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = np.reciprocal(struct_img)  # inverting to detect dark spot
    struct_img = intensity_normalization(struct_img,
                                         scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append('im_norm')

    # rescale if needed
    # if rescale_ratio>0:
    #     struct_img = resize(struct_img, [1, rescale_ratio, rescale_ratio], method="cubic")
    #     struct_img = (struct_img - struct_img.min() + 1e-8)/(struct_img.max() - struct_img.min() + 1e-8)
    #     gaussian_smoothing_truncate_range = gaussian_smoothing_truncate_range * rescale_ratio

    # smoothing with gaussian filter
    structure_img_smooth = image_smoothing_gaussian_3d(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range)

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append('im_smooth')

    ###################
    # core algorithm
    ###################

    # step 1: low level thresholding
    # global_median_1 = np.percentile(structure_img_smooth,35)
    global_median_2 = np.percentile(structure_img_smooth,
                                    30)  # 70 for M6M7, 30 for rest

    # th_low_level_1 = global_median_1
    th_low_level_2 = global_median_2
    # bw_low_level = (structure_img_smooth > th_low_level_1) + (structure_img_smooth > th_low_level_2)
    bw_low_level = structure_img_smooth > th_low_level_2
    bw_low_level = remove_small_objects(bw_low_level,
                                        min_size=low_level_min_size,
                                        connectivity=1,
                                        in_place=True)
    seg = dilation(bw_low_level, selem=ball(2))

    seg = np.invert(seg)
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    ####################
    # POST-Processing using other segmentations structures
    ####################

    other_segs_path = "/allen/aics/assay-dev/computational/data/dna_cell_seg_on_production_data/NPM1/new_dna_mem_segmentation/"
    mem_segs_path = other_segs_path + fn + "_mem_segmentation.tiff"
    dna_segs_path = other_segs_path + fn + "_dna_segmentation.tiff"

    if not os.path.exists(mem_segs_path):
        mem_segs_path = other_segs_path + fn + ".ome_mem_segmentation.tiff"
        dna_segs_path = other_segs_path + fn + ".ome_dna_segmentation.tiff"

    mito_seed_path_root = "/allen/aics/assay-dev/computational/data/NPM1_segmentation_improvement/images_with_mitosis/M67/mito_seg/"
    mito_seed_path = mito_seed_path_root + fn + ".tiff"

    # Generate seed for mitotic cell
    if not os.path.exists(mito_seed_path):
        mito_seed_path = mito_seed_path_root + fn + ".ome.tiff"
    mito_seed_3d = imread(mito_seed_path)

    mito_seed_img = np.amax(mito_seed_3d, axis=0)
    mito_seed = com(mito_seed_img)

    # label the segmentations
    # mem_label, num_feat_mem = labeling(imread(mem_segs_path)) # not labeling correctly
    dna_label, num_feat_dna = labeling(imread(dna_segs_path))

    # label
    # label_mito = mem_label[int(np.floor(mem_label.shape[0]/2)),int(mito_seed[0]),int(mito_seed[1])]

    # seg = seg * ((mem_label == label_mito)*1)
    seg[mito_seed_3d == 0] = 0
    seg[dna_label > 0] = 0

    out_img_list.append(seg.copy())
    out_name_list.append('bw_fine')

    fn += "_combined"

    if output_type == 'default':
        # the default final output
        save_segmentation(seg, False, output_path, fn)
    elif output_type == 'AICS_pipeline':
        # pre-defined output function for pipeline data
        save_segmentation(seg, True, output_path, fn)
    elif output_type == 'customize':
        # the hook for passing in a customized output function
        output_fun(out_img_list, out_name_list, output_path, fn)
    else:
        # the hook for pre-defined RnD output functions (AICS internal)
        img_list, name_list = NPM1_output(out_img_list, out_name_list,
                                          output_type, output_path, fn)
        if output_type == 'QCB':
            return img_list, name_list
Ejemplo n.º 51
0
def preprocess_training_data(patient_id,
                             img_folder,
                             seg_folder,
                             resample,
                             offline=False,
                             online=True):
    if offline or online:
        if (offline and os.path.exists("offline_preprocessing/" + patient_id +
                                       "_img.nii.gz")
                and os.path.exists("offline_preprocessing/" + patient_id +
                                   "_seg.nii.gz")):
            return
        img = irtk.imread(img_folder + "/" + patient_id + ".nii.gz",
                          dtype='float32')
        seg = irtk.imread(seg_folder + "/" + patient_id + "_seg.nii.gz",
                          dtype="uint8")

        wall = nd.binary_dilation(
            seg,
            morphology.ball(int(12.5 * 0.001 / seg.header['pixelSize'][0])))
        wall = wall.astype('int')
        points = np.transpose(np.nonzero(wall))[::4]
        center, S, V = fit_ellipsoidPCA(points)
        if V[0, 0] < 0:
            V *= -1

        points = np.transpose(np.nonzero(wall))
        projections = np.dot(points - center, V[0])

        # valves
        index = projections > (projections.max() -
                               40.0 * 0.001 / seg.header['pixelSize'][0])

        #print "VALVE size:",np.sum(index), projections.max(), 40.0*0.001/seg.header['pixelSize'][0]

        wall[points[index, 0], points[index, 1], points[index, 2]] = 2

        #print "VALVE1", wall.max()

        wall = irtk.Image(wall, seg.get_header())

        img = img.resample(pixelSize=resample,
                           interpolation='linear').rescale(0, 1000)
        seg = seg.transform(target=img,
                            interpolation="nearest").astype('uint8')
        wall = wall.transform(target=img,
                              interpolation="nearest").astype('uint8')

        wall[seg > 0] = 0
        seg[wall == 1] = 2
        seg[wall == 2] = 3

        #print "VALVE2", seg.max()

        #irtk.imwrite("debug/"+patient_id+"_border.nii.gz",seg)

        seg[img == 0] = 255

        if offline:
            irtk.imwrite("offline_preprocessing/" + patient_id + "_img.nii.gz",
                         img)
            irtk.imwrite("offline_preprocessing/" + patient_id + "_seg.nii.gz",
                         seg)
            return

    if not online:
        img = irtk.imread("offline_preprocessing/" + patient_id +
                          "_img.nii.gz")
        seg = irtk.imread("offline_preprocessing/" + patient_id +
                          "_seg.nii.gz")

    mask = irtk.ones(img.get_header(), dtype='uint8')
    mask[img == 0] = 0

    return {
        'patient_id': patient_id,
        'img': img,
        'seg': seg,
        'extra_layers': np.array([], dtype='float32'),
        'metadata': None,
        'mask': mask
    }
Ejemplo n.º 52
0
def calc_metrics(pred, target, skel, device):
    """
    calculate metrics e.g. dice, centerline score
    """

    S = skel
    S = S.to(device, dtype=torch.bool)

    #debug
    # pred = copy.deepcopy(target)
    # end debug!!!!!
    # print('minmax pred', torch.min(pred).item(), torch.max(pred).item())

    pred = pred.detach()
    pred = pred.to(torch.bool)
    target = target.to(torch.bool)

    # calculate centerline score
    # number of pixels of sceleton inside pred / number of pixels in sceleton

    nom = torch.sum(S & pred, dtype=torch.float32)
    denom = torch.sum(S, dtype=torch.float32)
    if denom == 0:
        print('Skeleton empty, cl_score=nan')
        cl_score = float('nan')
    else:
        cl_score = nom / denom
        cl_score = cl_score.to(device='cpu').item()

    # dilate target/label massive
    # to generate hull
    ball_r = 5
    element = morphology.ball(ball_r)  # good value seems in between 3 and 5
    element = torch.from_numpy(element).to(device, dtype=torch.float32)
    element = torch.unsqueeze(torch.unsqueeze(element, 0), 0)

    # dilation: use torch conv3d
    H = torch.nn.functional.conv3d(target.to(dtype=torch.float32),
                                   element,
                                   padding=ball_r)
    H = H >= 1

    # 1 - number of pixels of prediction outside hull / number of pixels of prediction inside hull ?
    # or just total number of pixels of prediction
    nom = torch.sum(~H & pred, dtype=torch.float32)
    denom = torch.sum(pred, dtype=torch.float32)
    out_score = 1 - nom / denom
    out_score = out_score.to('cpu')
    # print('out_score', nom, '/', denom)
    # print('cl_score', cl_score.item(), 'out_score', out_score.item())

    # multiply with batch size!
    # this was a bad idea! wrong!
    batch_size = pred.shape[0]
    # print('Batch size:', batch_size)
    # cl_score = batch_size * cl_score.item()
    # out_score = batch_size * out_score.item()

    tensor_sum = pred.float().sum() + target.float().sum()
    if tensor_sum == 0:
        print('Warning, tensor_sum is zero, dice will be nan')
        dice = float('nan')
    else:
        intersection = torch.sum(pred & target, dtype=torch.float32)
        dice = (2 * intersection) / tensor_sum
        # print('dice', dice)
        dice = dice.to('cpu').item()

    return cl_score, out_score.item(), dice
def test_get_radius_3d_ball():
    radius = 5
    _helper_radius(morphology.ball(radius), radius)
Ejemplo n.º 54
0
def plot_3d_orientation_map(name,
                            lat_data,
                            azth_data,
                            radius_structure_elem=1,
                            output_dir=None,
                            width=512,
                            height=512,
                            camera_azth=44.5,
                            camera_elev=35.8,
                            camera_roll=0.0,
                            camera_fov=35.0,
                            camera_zoom=0.0035,
                            camera_loc=(67.0, 81.6, 45.2),
                            xlabel='',
                            ylabel='',
                            zlabel='',
                            axis_color='w',
                            background_color='k'):
    """Renders orientation data in 3D with RGB angular color-coding.

    Parameters
    ----------
    name : str
        Indicates the name of the output png file.

    lat_data : 3D array
        Indicates the 3D array containing latitude / elevation angle at every point of
        the skeleton in radians.

    azth_data : 3D array
        Indicates the 3D array containing azimuth angle at every point of the skeleton
        in radians.

    radius_structure_elem : integer
        Indicates the size of the structure element of the dilation process to
        thicken the skeleton.

    output_dir : str
        Indicates the path to the output folder where the image will be stored.

    width : int
        Indicates the width of the visualization window.

    height : int
        Indicates the width of the visualization window.

    camera_azth : float
        Indicates the azimuth angle of the camera.

    camera_elev : float
        Indicates the latitude / elevation angle of the camera.

    camera_roll : float
        Indicates the roll angle of the camera.

    camera_fov : float
        Indicates the field of view of the camera.

    camera_zoom : float
        Indicates the zoom level of the camera.

    camera_loc : tuple
        Indicates the camera location.

    xlabel : str
        Indicates the label along the x-axis.

    ylabel : str
        Indicates the label along the y-axis.

    zlabel : str
        Indicates the label along the z-axis.

    axis_color : str
        Indicates the color of axes.

    background_color : str
        Indicates the background color of the figure.
    """
    if not visvis_available:
        print(
            'The visvis package is not found. The visualization cannot be done.'
        )
        return

    rmin, rmax, cmin, cmax, zmin, zmax = _bbox_3D(azth_data)

    azth, lat = azth_data[rmin:rmax, cmin:cmax, zmin:zmax], \
                np.abs(lat_data[rmin:rmax, cmin:cmax, zmin:zmax])

    skel = azth.copy().astype(np.float32)
    skel[skel.nonzero()] = 1.

    azth = ndi.grey_dilation(azth,
                             structure=morphology.ball(radius_structure_elem))
    lat = ndi.grey_dilation(lat,
                            structure=morphology.ball(radius_structure_elem))
    skel = ndi.binary_dilation(
        skel, structure=morphology.ball(radius_structure_elem))

    Z, Y, X = skel.nonzero()
    vol_orient = np.zeros(skel.shape + (3, ), dtype=np.float32)

    print(vol_orient.size, vol_orient[skel.nonzero()].size)

    for z, y, x in zip(Z, Y, X):
        vol_orient[z, y, x] = geo2rgb(lat[z, y, x], azth[z, y, x])

    app = vv.use()

    fig = vv.figure()
    fig._currentAxes = None
    fig.relativeFontSize = 2.
    fig.position.w = width
    fig.position.h = height

    t = vv.volshow(vol_orient[:, :, :], renderStyle='iso')
    t.isoThreshold = 0.5

    a = vv.gca()
    a.camera.azimuth = camera_azth
    a.camera.elevation = camera_elev
    a.camera.roll = camera_roll
    a.camera.fov = camera_fov
    a.camera.zoom = camera_zoom
    a.camera.loc = camera_loc

    a.bgcolor = background_color
    a.axis.axisColor = axis_color
    a.axis.xLabel = xlabel
    a.axis.yLabel = ylabel
    a.axis.zLabel = zlabel

    # def mouseUp(event):
    #     print 'mouseUp!!'
    #     a = vv.gca()
    #     print a.camera.GetViewParams()
    #
    # a.eventMouseUp.Bind(mouseUp)
    # fig.eventMouseUp.Bind(mouseUp)
    #
    # a.Draw()
    # fig.DrawNow()

    if output_dir is not None:
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        vv.screenshot(os.path.join(output_dir,
                                   '{}_3d_orientation.png'.format(name)),
                      sf=1,
                      bg=background_color)
    app.Run()
Ejemplo n.º 55
0
parser = argparse.ArgumentParser(
    description='' )
parser.add_argument( '--seg', type=str, required=True )
parser.add_argument( '--img', type=str, required=True )
parser.add_argument( '--output', type=str, required=True )
parser.add_argument( '--narrow_band', type=int, default=5 )
parser.add_argument( '--debug', action="store_true", default=False )

args = parser.parse_args()

seg = irtk.imread( args.seg, dtype='int32', force_neurological=True )
img = irtk.imread( args.img, dtype='float32', force_neurological=True ).rescale(0,1000)

res = irtk.zeros( seg.get_header(), dtype='uint8' )

ball = morphology.ball( args.narrow_band )

nb_labels = 5

# for i in range(1,5):
#     tmp_seg = (seg==i).astype('int32')
#     # crop
#     x_min,y_min,z_min,x_max,y_max,z_max = (tmp_seg).bbox()
#     mask = tmp_seg[max(0,z_min-2*args.narrow_band):min(seg.shape[0],z_max+2*args.narrow_band+1),
#                       max(0,y_min-2*args.narrow_band):min(seg.shape[1],y_max+2*args.narrow_band+1),
#                       max(0,x_min-2*args.narrow_band):min(seg.shape[2],x_max+2*args.narrow_band+1)]
#     tmp_img = img[max(0,z_min-2*args.narrow_band):min(img.shape[0],z_max+2*args.narrow_band+1),
#                   max(0,y_min-2*args.narrow_band):min(img.shape[1],y_max+2*args.narrow_band+1),
#                   max(0,x_min-2*args.narrow_band):min(img.shape[2],x_max+2*args.narrow_band+1)]

#     background = (nd.binary_dilation( mask, structure=ball ) == 0).astype('int32')
Ejemplo n.º 56
0
def plot_3d_diameter_map(name,
                         data,
                         unit_scale=1.0,
                         measure_quantity='vox',
                         radius_structure_elem=1,
                         output_dir=None,
                         width=512,
                         height=512,
                         camera_azth=44.5,
                         camera_elev=35.8,
                         camera_roll=0.0,
                         camera_fov=35.0,
                         camera_zoom=0.0035,
                         camera_loc=(67.0, 81.6, 45.2),
                         xlabel='',
                         ylabel='',
                         zlabel='',
                         axis_color='w',
                         background_color='k',
                         cb_x_offset=10):
    """Renders orientation data in 3D with RGB angular color-coding.

    Parameters
    ----------
    name : str
        Indicates the name of the output png file.

    data : 3D array
        Indicates the 3D array containing diameter at every point of the skeleton.

    unit_scale : float
        Indicates the scale factor of the data values.

    measure_quantity : str
        Indicates the name of measure of the values.

    radius_structure_elem : integer
        Indicates the size of the structure element of the dilation process to
        thicken the skeleton.

    output_dir : str
        Indicates the path to the output folder where the image will be stored.

    camera_azth : float
        Indicates the azimuth angle of the camera.

    width : int
        Indicates the width of the visualization window.

    height : int
        Indicates the width of the visualization window.

    camera_elev : float
        Indicates the latitude / elevation angle of the camera.

    camera_roll : float
        Indicates the roll angle of the camera.

    camera_fov : float
        Indicates the field of view of the camera.

    camera_zoom : float
        Indicates the zoom level of the camera.

    camera_loc : tuple
        Indicates the camera location.

    xlabel : str
        Indicates the label along the x-axis.

    ylabel : str
        Indicates the label along the y-axis.

    zlabel : str
        Indicates the label along the z-axis.

    axis_color : str
        Indicates the color of axes.

    background_color : str
        Indicates the background color of the figure.

    cb_x_offset : int
        Indicates the offset of the colorbar from the right window side.
    """
    if not visvis_available:
        print(
            'The visvis package is not found. The visualization cannot be done.'
        )
        return

    rmin, rmax, cmin, cmax, zmin, zmax = _bbox_3D(data)
    dmtr = data[rmin:rmax, cmin:cmax, zmin:zmax] * unit_scale
    skel = np.zeros_like(dmtr, dtype=np.uint8)
    skel[dmtr.nonzero()] = 1

    dmtr = ndi.grey_dilation(dmtr,
                             structure=morphology.ball(radius_structure_elem))
    skel = ndi.binary_dilation(
        skel,
        structure=morphology.ball(radius_structure_elem)).astype(np.float32)
    skel[skel.nonzero()] = 1.

    dmtr = dmtr * skel

    app = vv.use()

    fig = vv.figure()
    fig._currentAxes = None
    fig.relativeFontSize = 2.
    fig.position.w = width
    fig.position.h = height

    t = vv.volshow(dmtr[:, :, :], renderStyle='iso')
    t.isoThreshold = 0.5
    t.colormap = vv.CM_JET

    a = vv.gca()
    a.camera.azimuth = camera_azth
    a.camera.elevation = camera_elev
    a.camera.roll = camera_roll
    a.camera.fov = camera_fov
    a.camera.zoom = camera_zoom
    a.camera.loc = camera_loc

    a.bgcolor = background_color
    a.axis.axisColor = axis_color
    a.axis.xLabel = xlabel
    a.axis.yLabel = ylabel
    a.axis.zLabel = zlabel

    cb = vv.colorbar()
    cb.SetLabel('Diameter, [{}]'.format(measure_quantity))
    cb._label.position.x += cb_x_offset

    if output_dir is not None:
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        vv.screenshot(os.path.join(output_dir,
                                   '{}_3d_diameter.png'.format(name)),
                      sf=1,
                      bg='w')
    app.Run()
Ejemplo n.º 57
0
def pixelwise_transform(mask,
                        dilation_radius=None,
                        data_format=None,
                        separate_edge_classes=False):
    """Transforms a label mask for a z stack edge, interior, and background

    Args:
        mask (tensor): tensor of labels
        dilation_radius (int):  width to enlarge the edge feature of
            each instance
        data_format (str): 'channels_first' or 'channels_last'
        separate_edge_classes (bool): Whether to separate the cell edge class
            into 2 distinct cell-cell edge and cell-background edge classes.

    Returns:
        numpy.array: one-hot encoded tensor of masks:
            if not separate_edge_classes: [cell_edge, cell_interior, background]
            otherwise: [bg_cell_edge, cell_cell_edge, cell_interior, background]
    """
    if data_format is None:
        data_format = K.image_data_format()

    if data_format == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = len(mask.shape) - 1

    mask = np.squeeze(mask, axis=channel_axis)

    # Detect the edges and interiors
    new_masks = np.zeros(mask.shape)
    edges = np.zeros(mask.shape)
    strel = ball(1) if mask.ndim > 3 else disk(1)
    for cell_label in np.unique(mask):
        if cell_label != 0:
            for i in range(mask.shape[0]):
                # get the cell interior
                img = mask[i] == cell_label
                img = binary_erosion(img, strel)
                new_masks[i] += img

    interiors = np.multiply(new_masks, mask)
    edges = (mask - interiors > 0).astype('int')
    interiors = (interiors > 0).astype('int')

    if not separate_edge_classes:
        if dilation_radius:
            dil_strel = ball(dilation_radius) if mask.ndim > 3 else disk(
                dilation_radius)
            # Thicken cell edges to be more pronounced
            for i in range(edges.shape[0]):
                edges[i] = binary_dilation(edges[i], selem=dil_strel)

            # Thin the augmented edges by subtracting the interior features.
            edges = (edges - interiors > 0).astype('int')

        background = (1 - edges - interiors > 0)
        background = background.astype('int')

        all_stacks = [edges, interiors, background]

        return np.stack(all_stacks, axis=channel_axis)

    # dilate the background masks and subtract from all edges for background-edges
    dilated_background = np.zeros(mask.shape)
    for i in range(mask.shape[0]):
        background = (mask[i] == 0).astype('int')
        dilated_background[i] = binary_dilation(background, strel)

    background_edges = (edges - dilated_background > 0).astype('int')

    # edges that are not background-edges are interior-edges
    interior_edges = (edges - background_edges > 0).astype('int')

    if dilation_radius:
        dil_strel = ball(dilation_radius) if mask.ndim > 3 else disk(
            dilation_radius)
        # Thicken cell edges to be more pronounced
        for i in range(edges.shape[0]):
            interior_edges[i] = binary_dilation(interior_edges[i],
                                                selem=dil_strel)
            background_edges[i] = binary_dilation(background_edges[i],
                                                  selem=dil_strel)

        # Thin the augmented edges by subtracting the interior features.
        interior_edges = (interior_edges - interiors > 0).astype('int')
        background_edges = (background_edges - interiors > 0).astype('int')

    background = (1 - background_edges - interior_edges - interiors > 0)
    background = background.astype('int')

    all_stacks = [background_edges, interior_edges, interiors, background]

    return np.stack(all_stacks, axis=channel_axis)
Ejemplo n.º 58
0
                                   randint(255)]
                                  for i in range(len(labs_props[0]))
                                  ]).astype('uint8'))
    del labs

    chan_file = path.join(out_exp_dir,
                          "labs_{}.tif".format(chan_names[syn_type][1]))
    if not force and path.isfile(chan_file):
        labs = imread(chan_file)
    else:
        log("  " + chan_names[syn_type][1], logf)
        ran[1] = True
        imgs_1 = gaussian(imgs[:, :, :, 1], sigma=sigmas[1])
        M = max(imgs_1.flatten())

        res = entropy(imgs_1, ball(5))
        imsave(
            path.join(out_exp_dir,
                      "entropy_{}.tif".format(chan_names[syn_type][1])), res)

        labs_1 = remove_small_objects(label(imgs_1 > intens_range[-1] * M),
                                      min_size=min_syn_marker_size,
                                      in_place=True)
        labs_1 = remove_large_objects(labs_1, max_syn_marker_size)
        props = regionprops(labs_1)
        max_clusts = len(props)
        best_labs = array(labs_1)
        best_th = intens_range[-1] * M
        log(
            "   {}% ({:.2f}): {}".format(int(intens_range[-1] * 100),
                                         intens_range[-1] * M, len(props)),