def get_radius_2d(binary_image, skeleton_image, boundary_image, pix_size=None):
    """
    Returns a dictionary, image both contatining radius at a non-zero coordinate
    on centerline or skeleton
    Parameters
    ----------
    binary_image : 2D array
        binary image of (m, n) shape

    skeleton_image : 2D array
        skeletonized image of binary_image of (m, n) shape

    boundary_image : 2D array
        boundaries of objects in binary_image

    pix_size : list
        list of 2 variables giving voxel size or pixel size, giving resolution in x, y

    Returns
    -------
    dict_nodes_radius : dict
        key: non-zero co-ordinate, value : radius

    Notes
    ------
    Calculates radius as distance of node on the skeleton/skeleton
    to nearest non-zero co-ordinate on the boundaries of the vessel
    """
    skeleton_image_copy = copy.deepcopy(skeleton_image)
    skeleton_image_copy[skeleton_image == 0] = 255
    skeleton_image_copy[boundary_image == 1] = 0
    eucledian_radius_image = ndimage.distance_transform_bf(skeleton_image_copy, metric='taxicab', sampling=pix_size)
    list_nzi = map(tuple, np.transpose(np.nonzero(skeleton_image)))
    dict_nodes_radius = {item: eucledian_radius_image[item] for item in list_nzi}
    return dict_nodes_radius
    def dilatation_example():
        a = np.zeros((5, 5))
        a[2, 2] = 1
        a_3 = ndimage.binary_dilation(a, structure=np.ones(
            (3, 3))).astype(a.dtype)
        a_4 = ndimage.binary_dilation(a, structure=np.ones(
            (4, 4))).astype(a.dtype)
        plt.figure()
        show_images_and_hists([a * 255, a_3 * 255, a_4 * 255])
        # Also work for grey values
        im = np.zeros((64, 64))
        x, y = (63 * np.randomsimple_segmentation.random(
            (2, 8))).astype(np.int)
        im[x, y] = np.arange(8)

        bigger_points = ndimage.grey_dilation(im,
                                              size=(5, 5),
                                              structure=np.ones((5, 5)))
        smaller_points = ndimage.grey_erosion(im,
                                              size=(5, 5),
                                              structure=np.ones((5, 5)))
        plt.figure()
        show_images_and_hists([im, bigger_points, smaller_points])

        square = np.zeros((16, 16))
        square[4:-4, 4:-4] = 1
        dist = ndimage.distance_transform_bf(square)
        dilate_dist = ndimage.grey_dilation(dist, size=(3, 3), \
            structure=np.ones((3, 3)))

        erosed_dist = ndimage.grey_erosion(dist, size=(3, 3), \
            structure=np.ones((3, 3)))

        plt.figure()
        show_images_and_hists([dist, dilate_dist, erosed_dist])
def process(arg):
  #  el = ndimage.generate_binary_structure(2,1)
  #  eli = astype(np.int)
    im = np.zeros((64,64))
    np.random.seed(2)
    x, y = (63*np.random.random((2,8))).astype(np.int)
    im[x,y] = np.arange(8)

    bigger_points = ndimage.grey_dilation(im, size=(5,5), structure=np.ones((5,5)))

    square = np.zeros((16,16))
    square[4:-4, 4:-4] = 1
    dist = ndimage.distance_transform_bf(square)
    dilate_dist = ndimage.grey_dilation(dist, size=(3,3), structure=np.ones((3,3)))

    plt.figure(figsize=(12.5,3))
    plt.subplot(141)
    plt.imshow(im, interpolation='nearest', cmap=plt.cm.spectral)
    plt.axis('off')
    plt.subplot(142)
    plt.imshow(bigger_points, interpolation='nearest', cmap=plt.cm.spectral)
    plt.axis('off')
    plt.subplot(143)
    plt.imshow(dist, interpolation='nearest', cmap=plt.cm.spectral)
    plt.axis('off')
    plt.subplot(144)
    plt.imshow(dilate_dist, interpolation='nearest', cmap=plt.cm.spectral)
    plt.axis('off')

    plt.subplots_adjust(wspace=0, hspace=0.02, top=0.99, bottom=0.01, left=0.01, right=0.99)
    plt.show()
示例#4
0
def imageseg(Cont_Image):
    """imageseg('Image Name')

    This program takes an image that has been pre-proccessed by an edge finding script as its sole input, segments it, and spits out a segmented image file and a pandas dataframe of individual particle positions.

    This function works by creating a binary of an image that has been run through edge detection software, then finding the center of those particles through an Euclidean Distance function.  This was chosen over the typical watershed iterative erosion method because of its increased control in finding the center of particles, allowing for greater detection of overlapped and small particles.

    Methodology ideas pulled from the SciKit Image example pages (https://scikit-image.org) as well as the Open CV example pages (https://opencv.org) and Adrian Rosebrock's blog (https://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/)."""

    proccessedImage = np.array(Cont_Image, dtype=np.uint8)

    kernel = np.ones((5, 6), np.uint8)
    opening = cv2.morphologyEx(Cont_Image, cv2.MORPH_OPEN, kernel)
    canny = cv2.Canny(opening, 100, 150, 3, L2gradient=True)

    ret, binary = cv2.threshold(Cont_Image, 0, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    distTransform = ndimage.distance_transform_bf(binary)
    localMax = peak_local_max(distTransform,
                              indices=False,
                              min_distance=20,
                              labels=binary)
    label = ndimage.label(localMax)[0]
    segments = watershed(-distTransform, label, mask=binary)

    segment_locations = segmentparser(segments, binary)

    return segments, segment_locations, opening, canny
示例#5
0
def test_distance_transform_edt01(type_):
    # euclidean distance transform (edt)
    data = np.array(
        [
            [0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 1, 1, 1, 0, 0, 0],
            [0, 0, 1, 1, 1, 1, 1, 0, 0],
            [0, 0, 1, 1, 1, 1, 1, 0, 0],
            [0, 0, 1, 1, 1, 1, 1, 0, 0],
            [0, 0, 0, 1, 1, 1, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0],
        ],
        type_,
    )
    out, ft = stats.distance_transform_edt_float32(data, return_indices=True)
    bf = ndimage.distance_transform_bf(data, "euclidean")
    assert_array_almost_equal(bf, out)

    dt = ft - np.indices(ft.shape[1:], dtype=ft.dtype)
    dt = dt.astype(np.float64)
    np.multiply(dt, dt, dt)
    dt = np.add.reduce(dt, axis=0)
    np.sqrt(dt, dt)

    assert_array_almost_equal(bf, dt)
示例#6
0
 def _make_strel(self, r):
     D = 2*sp.ceil(r)
     if sp.mod(D, 2) == 0:
         D += 1
     strel = sp.ones((D, D, D))
     strel[D/2, D/2, D/2] = 0
     strel = spim.distance_transform_bf(strel) <= r
     return strel
示例#7
0
 def _make_strel(self, r):
     D = 2 * sp.ceil(r)
     if sp.mod(D, 2) == 0:
         D += 1
     strel = sp.ones((D, D, D))
     strel[D / 2, D / 2, D / 2] = 0
     strel = spim.distance_transform_bf(strel) <= r
     return strel
示例#8
0
def distance_from_inclusion(geometry, p, **kwargs):
    r"""
    Genrate spatially correlated pore seeds by calculating distance from random
    locations (inclusions) in the domain

    Parameters
    ----------
    p : float
        The fraction of pores in the domain that are set as 'seeds' for the
        distance calculation

    Returns
    -------
    A list of distance values (in voxels) between each pore and it nearest
    seed pore.  A list of voxel distances is returned rather than normalized
    seeds between 0:1 so that the user can manipulate the map as desired, by
    applying desired thresholds and/or scaling to get 0:1 seeds.

    Notes
    -----
    - This method uses image analysis tools, so only works on Cubic networks
    - At present the result contains edge artifacts since no inclusions are present
      beyond the image boundary

    Examples
    --------
    >>> import OpenPNM
    >>> pn = OpenPNM.Network.Cubic(shape=[50,50,50])
    >>> geom = OpenPNM.Geometry.GenericGeometry(network=pn,pores=pn.Ps,throats=pn.Ts)
    >>> model = OpenPNM.Geometry.models.pore_seed.distance_from_inclusion
    >>> geom.add_model(propname='pore.seed', model=model, p=0.001)
    >>> im = pn.asarray(geom['pore.seed'])

    Visualizing the end result can be done with:

    .. code-block:: python

        matplotlib.pyplot.imshow(im[:,25,:],interpolation='none')

    """
    import scipy.ndimage as _spim

    net = geometry._net
    # The following will only work on Cubic networks
    x = net._shape[0]
    y = net._shape[1]
    z = net._shape[2]
    img = _sp.rand(x, y, z) > p
    # Pad image by tiling
    a = _sp.tile(img, [3, 3, 3])
    b = a[x:-x, y:-y, z:-z]
    # Perform distance transform
    img = _spim.distance_transform_bf(b)
    # Convert back to pore-list
    values = img.flatten()
    values = values[geometry.map_pores(target=net, pores=geometry.Ps)]
    return values
示例#9
0
def distance_from_inclusion(geometry, p, **kwargs):
    r"""
    Genrate spatially correlated pore seeds by calculating distance from random
    locations (inclusions) in the domain

    Parameters
    ----------
    p : float
        The fraction of pores in the domain that are set as 'seeds' for the
        distance calculation

    Returns
    -------
    A list of distance values (in voxels) between each pore and it nearest
    seed pore.  A list of voxel distances is returned rather than normalized
    seeds between 0:1 so that the user can manipulate the map as desired, by
    applying desired thresholds and/or scaling to get 0:1 seeds.

    Notes
    -----
    - This method uses image analysis tools, so only works on Cubic networks
    - At present the result contains edge artifacts since no inclusions are
    present beyond the image boundary

    Examples
    --------
    >>> import OpenPNM
    >>> pn = OpenPNM.Network.Cubic(shape=[50, 50, 50])
    >>> geom = OpenPNM.Geometry.GenericGeometry(network=pn, pores=pn.Ps,
    ...                                         throats=pn.Ts)
    >>> model = OpenPNM.Geometry.models.pore_seed.distance_from_inclusion
    >>> geom.add_model(propname='pore.seed', model=model, p=0.001)
    >>> im = pn.asarray(geom['pore.seed'])

    Visualizing the end result can be done with:

    .. code-block:: python

        matplotlib.pyplot.imshow(im[:,25,:],interpolation='none')

    """
    import scipy.ndimage as _spim
    net = geometry._net
    # The following will only work on Cubic networks
    x = net._shape[0]
    y = net._shape[1]
    z = net._shape[2]
    img = _sp.rand(x, y, z) > p
    # Pad image by tiling
    a = _sp.tile(img, [3, 3, 3])
    b = a[x:-x, y:-y, z:-z]
    # Perform distance transform
    img = _spim.distance_transform_bf(b)
    # Convert back to pore-list
    values = img.flatten()
    values = values[geometry.map_pores(target=net, pores=geometry.Ps)]
    return values
 def denoising_example2():
     # Median filter: better result for straight boundaries (low curvature):
     im = np.zeros((20, 20))
     im[5:-5, 5:-5] = 1
     im = ndimage.distance_transform_bf(im)
     im_noise = im + 0.2 * np.random.randn(*im.shape)
     im_med = ndimage.median_filter(im_noise, 3)
     plt.figure()
     show_images_and_hists([im, im_noise, im_med],
                           ['original', 'noisy', 'med'])
 def add_blob(self, blob, blob_slice, thr=None):
     self.blob = np.array(blob, np.uint8)
     # self.blob_nt, self.blob_nx, self.blob_ny = blob.shape
     self.blob_slice = blob_slice
     self.blob_rect = np.array([[sl.start, sl.stop] for sl in blob_slice],
                               np.uint16)
     self.threshold = thr
     if self.seed is None:
         dt = ndi.distance_transform_bf(blob)
         self.seed = np.unravel_index(dt.argmax(), blob.shape)
     self.create_geometry()
     return self
def gray_dilation():
    """
    灰度图的形态修改
    :return:
    """
    # 灰度值图像
    im = np.zeros((64, 64))
    np.random.seed(2)
    x, y = (63 * np.random.random((2, 8))).astype(np.int)
    im[x, y] = np.arange(8)
    # print_image_pixel(im)
    # 灰度膨胀
    bigger_points = ndimage.grey_dilation(im,
                                          size=(5, 5),
                                          structure=np.ones((5, 5)))
    # print_image_pixel(bigger_points)

    square = np.zeros((16, 16))
    square[4:-4, 4:-4] = 1
    dist = ndimage.distance_transform_bf(square)
    dilate_dist = ndimage.grey_dilation(dist,
                                        size=(3, 3),
                                        structure=np.ones((3, 3)))

    images = [im, bigger_points, square, dist, dilate_dist]
    pil_image_demo.plt_images(images, 3)

    plt.figure(figsize=(12.5, 3))
    plt.subplot(141)
    plt.imshow(im, interpolation='nearest')
    plt.axis('off')
    plt.subplot(142)
    plt.imshow(bigger_points, interpolation='nearest')
    plt.axis('off')
    plt.subplot(143)
    plt.imshow(dist, interpolation='nearest')
    plt.axis('off')
    plt.subplot(144)
    plt.imshow(dilate_dist, interpolation='nearest')
    plt.axis('off')

    plt.subplots_adjust(wspace=0,
                        hspace=0.02,
                        top=0.99,
                        bottom=0.01,
                        left=0.01,
                        right=0.99)
    plt.show()
示例#13
0
    def measure_thickness(self, volume, obj_intensity=None):
        '''
        Measure average thickness of an object.
        '''

        # Apply threshold:
        self.treshold(volume, obj_intensity)

        # Skeletonize:
        skeleton = morphology.skeletonize(volume.data.get_data())

        # Compute distance across the wall:
        distance = ndimage.distance_transform_bf(volume.data.get_data()) * 2

        # Average distance:
        return numpy.mean(distance[skeleton])
 def denoising_example3():
     # Other rank filter: ndimage.maximun_filter, ndimage.percentile_filter
     im = np.zeros((20, 20))
     im[5:-5, 5:-5] = 1
     im = ndimage.distance_transform_bf(im)
     im_noise = im + 0.2 * np.random.randn(*im.shape)
     im_max = ndimage.maximum_filter(im_noise, 3)
     im_p25 = ndimage.percentile_filter(im_noise, 25, 3)
     im_p50 = ndimage.percentile_filter(im_noise, 50, 3)
     im_p75 = ndimage.percentile_filter(im_noise, 75, 3)
     plt.figure()
     show_images_and_hists([im, im_noise, im_max],
                           ['original', 'noisy', 'max'])
     plt.figure()
     show_images_and_hists([im, im_noise, im_p25, im_p50, im_p75],
                           ['original', 'noisy', '25%', '50%', '75%'])
示例#15
0
 def makeDistanceTransform(self, method='edt'):
     """Create Distance transformed image
     Methods for the distance transform:
         - bf    Brute force
         - cdt   Checkerboard
         - edt   Euclidean (default)
     """
     if (method == 'edt'):
         self.imDistanceTransform = ndimage.distance_transform_edt(
             self._imSolidPore, sampling=self._spacing)
     elif (method == 'bf'):
         self.imDistanceTransform = ndimage.distance_transform_bf(
             self._imSolidPore, sampling=self._spacing)
     elif (method == 'cdt'):
         self.imDistanceTransform = ndimage.distance_transform_cdt(
             self._imSolidPore, sampling=self._spacing)
示例#16
0
def test_distance_transform_edt4(type_):
    data = np.array(
        [
            [0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 1, 1, 1, 0, 0, 0],
            [0, 0, 1, 1, 1, 1, 1, 0, 0],
            [0, 0, 1, 1, 1, 1, 1, 0, 0],
            [0, 0, 1, 1, 1, 1, 1, 0, 0],
            [0, 0, 0, 1, 1, 1, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0],
        ],
        type_,
    )
    ref = ndimage.distance_transform_bf(data, "euclidean", sampling=[2, 1])
    out = stats.distance_transform_edt_float32(data, sampling=[2, 1])
    assert_array_almost_equal(ref, out)
示例#17
0
def make_list_of_targets_and_seeds(tgt_small,
                                   tgt_coords_small,
                                   tgt_masks_small,
                                   init_lists=True,
                                   seed_value=1,
                                   targets=[],
                                   seeds=[],
                                   masks=[],
                                   coords=[],
                                   seed_method='center'):
    '''if no list is sent create lists of targets and their seeds, 
  if a list is sent, append the new values '''
    if init_lists:
        targets = []
        seeds = []
        masks = []
        coords = []
    for i_tgt, i_coords, i_mask in zip(tgt_small, tgt_coords_small,
                                       tgt_masks_small):
        target_temp = np.zeros((np.shape(i_tgt)[0], np.shape(i_tgt)[1], 2))
        target_temp[..., 0] = i_tgt
        mask_mini_closed = binary_fill_holes(i_tgt > 0)
        target_temp[..., 1] = mask_mini_closed
        targets.append((target_temp).astype('float32'))
        if seed_method == 'center':
            #set the seed in the center of the mask
            mask_mini_dt = distance_transform_bf(mask_mini_closed)
            seed = mask_mini_dt == np.max(mask_mini_dt)
            if np.sum(seed) > 1:
                yy, xx = np.where(seed == 1)
                seed = np.zeros_like(mask_mini_dt)
                seed[yy[0], xx[0]] = seed_value
        else:
            #set the seed in the pixel with largest intensity
            yy, xx = np.where(i_tgt == np.max(i_tgt))
            seed = np.zeros_like(i_tgt)
            seed[yy, xx] = seed_value

        seeds.append(seed)
        coords.append(i_coords)
        masks.append(i_mask)
    return targets, coords, masks, seeds
示例#18
0
def get_radius_2d(binary_image, skeleton_image, boundary_image, pix_size=None):
    """
    Returns a dictionary, image both contatining radius at a non-zero coordinate
    on centerline or skeleton
    Parameters
    ----------
    binary_image : 2D array
        binary image of (m, n) shape

    skeleton_image : 2D array
        skeletonized image of binary_image of (m, n) shape

    boundary_image : 2D array
        boundaries of objects in binary_image

    pix_size : list
        list of 2 variables giving voxel size or pixel size, giving resolution in x, y

    Returns
    -------
    dict_nodes_radius : dict
        key: non-zero co-ordinate, value : radius

    Notes
    ------
    Calculates radius as distance of node on the skeleton/skeleton
    to nearest non-zero co-ordinate on the boundaries of the vessel
    """
    skeleton_image_copy = copy.deepcopy(skeleton_image)
    skeleton_image_copy[skeleton_image == 0] = 255
    skeleton_image_copy[boundary_image == 1] = 0
    eucledian_radius_image = ndimage.distance_transform_bf(skeleton_image_copy,
                                                           metric='taxicab',
                                                           sampling=pix_size)
    list_nzi = map(tuple, np.transpose(np.nonzero(skeleton_image)))
    dict_nodes_radius = {
        item: eucledian_radius_image[item]
        for item in list_nzi
    }
    return dict_nodes_radius
示例#19
0
def distance_from_inclusion(geometry, p, **kwargs):
    r'''
    Genrate spatially correlated pore seeds by calculating distance from random 
    locations (inclusions) in the domain
    
    Parameters
    ----------
    p : float
        The fraction of pores in the domain that are set as 'seeds' for the 
        distance calculation
    
    Returns
    -------
    A list of distance values (in voxels) between each pore and it nearest
    seed pore.  A list of voxel distances is returned rather than normalized 
    seeds between 0:1 so that the user can manipulate the map as desired, by 
    applying desired thresholds and/or scaling to get 0:1 seeds.
    
    Notes
    -----
    This method uses image analysis type tools, so only works on Cubic networks
    '''
    import scipy.ndimage as _spim
    net = geometry._net
    #The following will only work on Cubic networks
    x = net._shape[0]
    y = net._shape[1]
    z = net._shape[2]
    img = _sp.rand(x, y, z) > p
    #Pad image by tiling
    a = _sp.tile(img, [3, 3, 3])
    b = a[x:-x, y:-y, z:-z]
    #Perform distance transform
    img = _spim.distance_transform_bf(b)
    #Convert back to pore-list
    values = img.flatten()
    values = values[geometry['pore.map']]
    return values
示例#20
0
def autoMask(img, perc_radius=None, debug_bool=False):
    from scipy import ndimage
    da = np.uint16(img)
    COG = tuple(map(int, ndimage.center_of_mass(img)))
    markers = np.zeros(da.shape, dtype=np.int8)
    #set outside brain to 1
    markers.flat[0] = 1
    #set inside brain to 2
    markers.flat[np.ravel_multi_index(COG, markers.shape)] = 2
    mask = ndimage.watershed_ift(da, markers)-1
    if debug_bool:
        print len(np.flatnonzero(mask))
    if isinstance(perc_radius, float):
        #find distance to background
        dm = ndimage.distance_transform_bf(mask)
        #find radius of phantom (i.e. centre is most distant from background)
        radius = dm.flatten().max()
        #select only pixel outside range desired
        sel = np.flatnonzero(dm.flatten() <= (1.-perc_radius)*radius)
        #set those pixels to zero in the final mask
        mask.flat[sel] = 0
        if debug_bool:
            print len(np.flatnonzero(mask)), radius, (1. -perc_radius)*radius
    return np.uint8(mask)
示例#21
0
def change_contrast(filepath, level):

    img = Image.open(filepath)
    img.load()
    #img = misc.img(gray=True).astype(float)

    factor = (259 * (level+255)) / (255 * (259-level))
    for x in range(img.size[0]):
        for y in range(img.size[1]):
            color = img.getpixel((x, y))
            new_color = tuple(int(factor * (c-128) + 128) for c in color)
            img.putpixel((x, y), new_color)

    
    blurred_f = ndimage.gaussian_filter(img, 3)	
    filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1)
    alpha = 20
    sharpened = blurred_f + alpha * (blurred_f - filter_blurred_f)
 
    im = ndimage.distance_transform_bf(sharpened)
    im_noise = im + 0.2 * np.random.randn(*im.shape)
    im_med = ndimage.median_filter(im_noise, 3)

    return im_med
示例#22
0
import matplotlib.pyplot as plt
import scipy.ndimage as ndi
import numpy as np

img = np.zeros((16, 16))
img[4:-4, 4:-4] = 1

img = ndi.distance_transform_bf(img)

dilation = ndi.grey_dilation(img, size=(3, 3), structure=np.ones((3, 3)))

erosion = ndi.grey_erosion(img, size=(3, 3), structure=np.ones((3, 3)))

output = [img, dilation, erosion]
titles = ['Original', 'Dilation', 'Erosion']

for i in range(3):
    print(output[i])
    plt.subplot(1, 3, i + 1)
    plt.imshow(output[i], interpolation='nearest', cmap='spectral')
    plt.title(titles[i])
    plt.axis('off')
plt.show()
示例#23
0
def generate_base_points(num_points, domain_size, prob=None):
    r"""
    Generates a set of base points for passing into the DelaunayVoronoiDual
    class.  The points can be distributed in spherical, cylindrical, or
    rectilinear patterns.

    Parameters
    ----------
    num_points : scalar
        The number of base points that lie within the domain.  Note that the
        actual number of points returned will be larger, with the extra points
        lying outside the domain.

    domain_size : list or array
        Controls the size and shape of the domain, as follows:

        **sphere** : If a single value is received, its treated as the radius
        [r] of a sphere centered on [0, 0, 0].

        **cylinder** : If a two-element list is received it's treated as the
        radius and height of a cylinder [r, z] positioned at [0, 0, 0] and
        extending in the positive z-direction.

        **rectangle** : If a three element list is received, it's treated
        as the outer corner of rectangle [x, y, z] whose opposite corner lies
        at [0, 0, 0].

    prob : 3D array, optional
        A 3D array that contains fractional (0-1) values indicating the
        liklihood that a point in that region should be kept.  If not specified
        an array containing 1's in the shape of a sphere, cylinder, or cube is
        generated, depnending on the give ``domain_size`` with zeros outside.
        When specifying a custom probabiliy map is it recommended to also set
        values outside the given domain to zero.  If not, then the correct
        shape will still be returned, but with too few points in it.

    Notes
    -----
    This method places the given number of points within the specified domain,
    then reflects these points across each domain boundary.  This results in
    smooth flat faces at the boundaries once these excess pores are trimmed.

    The reflection approach tends to create larger pores near the surfaces, so
    it might be necessary to use the ``prob`` argument to specify a slightly
    higher density of points near the surfaces.

    For rough faces, it is necessary to define a larger than desired domain
    then trim to the desired size.  This will discard the reflected points
    plus some of the original points.

    Examples
    --------
    The following generates a spherical array with higher values near the core.
    It uses a distance transform to create a sphere of radius 10, then a
    second distance transform to create larger values in the center away from
    the sphere surface.  These distance values could be further skewed by
    applying a power, with values higher than 1 resulting in higher values in
    the core, and fractional values smoothinging them out a bit.

    >>> import OpenPNM as op
    >>> import scipy as sp
    >>> import scipy.ndimage as spim
    >>> im = sp.ones([21, 21, 21], dtype=int)
    >>> im[10, 10, 10] = 0
    >>> im = spim.distance_transform_edt(im) <= 20  # Create sphere of 1's
    >>> prob = spim.distance_transform_edt(im)
    >>> prob = prob / sp.amax(prob)  # Normalize between 0 and 1
    >>> pts = op.Network.tools.generate_base_points(num_points=50,
    ...                                             domain_size=[2],
    ...                                             prob=prob)
    >>> net = op.Network.DelaunayVoronoiDual(points=pts, domain_size=[2])
    """
    def _try_points(num_points, prob):
        prob = _sp.array(prob)/_sp.amax(prob)  # Ensure prob is normalized
        base_pts = []
        N = 0
        while N < num_points:
            pt = _sp.random.rand(3)  # Generate a point
            # Test whether to keep it or not
            [indx, indy, indz] = _sp.floor(pt*_sp.shape(prob)).astype(int)
            if _sp.random.rand(1) <= prob[indx][indy][indz]:
                base_pts.append(pt)
                N += 1
        base_pts = _sp.array(base_pts)
        return base_pts
    if len(domain_size) == 1:  # Spherical
        domain_size = _sp.array(domain_size)
        if prob is None:
            prob = _sp.ones([41, 41, 41])
            prob[20, 20, 20] = 0
            prob = _spim.distance_transform_bf(prob) <= 20
        base_pts = _try_points(num_points, prob)
        # Convert to spherical coordinates
        [X, Y, Z] = _sp.array(base_pts - [0.5, 0.5, 0.5]).T  # Center at origin
        r = 2*_sp.sqrt(X**2 + Y**2 + Z**2)*domain_size[0]
        theta = 2*_sp.arctan(Y/X)
        phi = 2*_sp.arctan(_sp.sqrt(X**2 + Y**2)/Z)
        # Trim points outside the domain (from improper prob images)
        inds = r <= domain_size[0]
        [r, theta, phi] = [r[inds], theta[inds], phi[inds]]
        # Reflect base points across perimeter
        new_r = 2*domain_size - r
        r = _sp.hstack([r, new_r])
        theta = _sp.hstack([theta, theta])
        phi = _sp.hstack([phi, phi])
        # Convert to Cartesean coordinates
        X = r*_sp.cos(theta)*_sp.sin(phi)
        Y = r*_sp.sin(theta)*_sp.sin(phi)
        Z = r*_sp.cos(phi)
        base_pts = _sp.vstack([X, Y, Z]).T
    elif len(domain_size) == 2:  # Cylindrical
        domain_size = _sp.array(domain_size)
        if prob is None:
            prob = _sp.ones([41, 41, 41])
            prob[20, 20, :] = 0
            prob = _spim.distance_transform_bf(prob) <= 20
        base_pts = _try_points(num_points, prob)
        # Convert to cylindrical coordinates
        [X, Y, Z] = _sp.array(base_pts - [0.5, 0.5, 0]).T  # Center on z-axis
        r = 2*_sp.sqrt(X**2 + Y**2)*domain_size[0]
        theta = 2*_sp.arctan(Y/X)
        z = Z*domain_size[1]
        # Trim points outside the domain (from improper prob images)
        inds = r <= domain_size[0]
        [r, theta, z] = [r[inds], theta[inds], z[inds]]
        inds = ~((z > domain_size[1]) + (z < 0))
        [r, theta, z] = [r[inds], theta[inds], z[inds]]
        # Reflect base points about faces and perimeter
        new_r = 2*domain_size[0] - r
        r = _sp.hstack([r, new_r])
        theta = _sp.hstack([theta, theta])
        z = _sp.hstack([z, z])
        r = _sp.hstack([r, r, r])
        theta = _sp.hstack([theta, theta, theta])
        z = _sp.hstack([z, -z, 2-z])
        # Convert to Cartesean coordinates
        X = r*_sp.cos(theta)
        Y = r*_sp.sin(theta)
        Z = z
        base_pts = _sp.vstack([X, Y, Z]).T
    elif len(domain_size) == 3:  # Rectilinear
        domain_size = _sp.array(domain_size)
        Nx, Ny, Nz = domain_size
        if prob is None:
            prob = _sp.ones([10, 10, 10], dtype=float)
        base_pts = _try_points(num_points, prob)
        base_pts = base_pts*domain_size
        # Reflect base points about all 6 faces
        orig_pts = base_pts
        base_pts = _sp.vstack((base_pts, [-1, 1, 1]*orig_pts +
                                         [2.0*Nx, 0, 0]))
        base_pts = _sp.vstack((base_pts, [1, -1, 1]*orig_pts +
                                         [0, 2.0*Ny, 0]))
        base_pts = _sp.vstack((base_pts, [1, 1, -1]*orig_pts +
                                         [0, 0, 2.0*Nz]))
        base_pts = _sp.vstack((base_pts, [-1, 1, 1]*orig_pts))
        base_pts = _sp.vstack((base_pts, [1, -1, 1]*orig_pts))
        base_pts = _sp.vstack((base_pts, [1, 1, -1]*orig_pts))
    return base_pts
示例#24
0
def nonconformal_subdomain_3d(face_lst,
                              vertices,
                              N,
                              scale,
                              offset,
                              lmbda_overlap=False,
                              centroid=None):
    """
    Generate a numpy array that can be used to mask a NGSolve function based on
    partitioning a mesh's interior domain into sections that will have
    different boundary conditions. Only works in 3D.

    Args:
        face_lst (list): List of the vertices and outwards facing normals of
                         the interior domain's faces.
        vertices (list): List of paths to .msh or .stl files defining the
                         different boundary regions of the domain.
        N (list): Number of mesh elements in each direction (N+1 nodes).
        scale (list): Extent of the meshed domain in each direction ([-2,2]
                      cube -> scale=[4,4,4]).
        offset (list): Centers the meshed domain in each direction ([-2,2]
                       cube -> offset=[2,2,2]).
        lmbda_overlap (float or False): Measure of the diffuseness of the
                                       boundary between sections (sharp
                                       boundary if False).
        centroid (tuple or None): Coordinates of the point to use as the
                                  centroid of the split (centroid of domain if
                                  None).

    Returns:
        mask (numpy array): Mask of domain.
    """

    shape = (int(N[0] + 1), int(N[1] + 1), int(N[2] + 1))

    # Averaging the vertex coordinates is a naive method of finding the
    # centroid. However, given that the polygon boundary came from a .stl
    # file it can probably be assumed that the boundary points are
    # reasonably evenly spaced and the centroid calculation will not be
    # overly biased.
    if centroid is None:
        full_vertex_lst = [
            face[i:i + 3] for face in face_lst for i in [0, 3, 6, 9]
        ]
        centroid = np.mean(np.array(full_vertex_lst), axis=0)

    # Get the boundary edges and vertices of the boundary section.
    edge_lst, boundary_lst = mesh_helpers.get_mesh_boundary_3d(vertices)
    edge_lst = np.array(edge_lst)
    boundary_lst = np.array(boundary_lst)

    # Construct a polygon by connecting the boundary edges of the boundary
    # section to the conformal mesh's centroid. Any points within this
    # polygon (extending the edges to the limits of the nonconformal mesh)
    # will be included in this boundary section's mask. All other points
    # will be set to zero (assumed to belong to a different boundary
    # section).
    # Weight the coordinates of the conformal mesh's centroid equally to
    # the entire averaged coordinates of the boundary section to keep from
    # heavily biasing the polygon's centroid towards the boundary section.
    cx_poly = (np.mean(edge_lst[:, [0, 3]]) + centroid[0]) / 2
    cy_poly = (np.mean(edge_lst[:, [1, 4]]) + centroid[1]) / 2
    cz_poly = (np.mean(edge_lst[:, [2, 5]]) + centroid[2]) / 2
    centroid_poly = np.array([cx_poly, cy_poly, cz_poly])

    # A point is only inside the polygon if for every triangular face
    # comprising the polygon the vector from the point to the face's
    # midpoint is in the opposite direction to the face's outwards facing
    # surface normal.
    mask = np.ones(shape)
    for i in range(len(boundary_lst)):
        p1 = boundary_lst[i, 0:3]
        p2 = boundary_lst[i, 3:6]
        midpoint = (p1 + p2 + centroid) / 3.0

        n = np.cross(p1 - centroid, p2 - centroid)
        n *= (-1)**(np.dot(midpoint - centroid_poly, n) < 0.0)

        for j in range(shape[0]):
            for k in range(shape[1]):
                for m in range(shape[2]):
                    # Only consider points not already in mask.
                    if mask[j, k, m] != 0.0:
                        p = np.array([j, k, m]) * scale / N - offset
                        if np.dot(p - midpoint, n) > 0.0:
                            mask[j, k, m] = 0.0

    if not lmbda_overlap:
        pass
    else:
        # The different boundary sections diffuse into each other. Each
        # boundary section is weighted 0.5 at the border between the two
        # sections and diffuses following the error function's
        # distribution.
        dt_in = spimg.distance_transform_bf(mask, 'chessboard',
                                            1).astype(np.float64)
        dt_in /= lmbda_overlap
        dt_in[np.where(dt_in != 0.0)] += (0.5 - 1.0 / lmbda_overlap)
        mask_in = spec.erf(dt_in)

        dt_out = spimg.distance_transform_bf((1.0 - mask), 'chessboard',
                                             1).astype(np.float64)
        dt_out /= lmbda_overlap
        dt_out[np.where(dt_out != 0.0)] += (0.5 - 1.0 / lmbda_overlap)
        mask_out = 1.0 - spec.erf(dt_out)
        mask_out[np.where(mask_out == 1.0)] = 0.0

        mask = mask_in + mask_out

    return mask
示例#25
0
def nonconformal_subdomain_2d(boundary_lst,
                              vertices,
                              N,
                              scale,
                              offset,
                              lmbda_overlap=False,
                              centroid=None):
    """
    Generate a numpy array that can be used to mask a NGSolve function based on
    partitioning a mesh's interior domain into sections that will have
    different boundary conditions. Only works in 2D.

    Args:
        boundary_lst (list): List of coordinates of an interior domain's
                             boundary vertices in counterclockwise order.
        vertices (list): List of coordinates of the two vertices that denote
                         the different sections of the interior boundary.
        N (list): Number of mesh elements in each direction (N+1 nodes).
        scale (list): Extent of the meshed domain in each direction ([-2,2]
                      square -> scale=[4,4]).
        offset (list): Centers the meshed domain in each direction ([-2,2]
                       square -> offset=[2,2]).
        lmbda_overlap (float or False): Measure of the diffuseness of the
                                       boundary between sections (sharp
                                       boundary if False).
        centroid (tuple or None): Coordinates of the point to use as the
                                  centroid of the split (centroid of domain if
                                  None).

    Returns:
        mask (numpy array): Mask of domain.
    """

    shape = (int(N[0] + 1), int(N[1] + 1))

    if boundary_lst[0] != boundary_lst[-1]:
        boundary_lst.append(boundary_lst[0])

    # Averaging the vertex coordinates is a naive method of finding the
    # centroid. However, given that the polygon boundary came from a .stl
    # file it can probably be assumed that the boundary points are
    # reasonably evenly spaced and the centroid calculation will not be
    # overly biased.
    if centroid is None:
        cx, cy = np.mean(np.array(boundary_lst), axis=0)
        cx = int(round((cx + offset[0]) * N[0] / scale[0]))
        cy = int(round((cy + offset[1]) * N[1] / scale[1]))
    else:
        cx, cy = centroid
        cx = int(round((cx + offset[0]) * N[0] / scale[0]))
        cy = int(round((cy + offset[1]) * N[1] / scale[1]))

    # A region defined by three points (two vertices and the centroid)
    # contains all points which fall between the two lines defined by each
    # point and the centroid. Since the vertices are ordered
    # counterclockwise, if a third line is defined between the centroid and
    # a point of interest that point only falls between the original two
    # lines if the angle between its line and the line of the first vertex
    # is smaller than the angle between the original two lines.
    x1, y1 = vertices[0]
    x2, y2 = vertices[1]
    x1 = int(round((x1 + offset[0]) * N[0] / scale[0]))
    y1 = int(round((y1 + offset[1]) * N[1] / scale[1]))
    x2 = int(round((x2 + offset[0]) * N[0] / scale[0]))
    y2 = int(round((y2 + offset[1]) * N[1] / scale[1]))

    angle12 = mesh_helpers.angle_between((x1, y1), (cx, cy), (x2, y2))
    mask = np.zeros(shape)
    for j in range(shape[0]):
        for k in range(shape[1]):
            angle1p = mesh_helpers.angle_between((x1, y1), (cx, cy), (j, k))
            if angle1p < angle12:
                mask[j, k] = 1.0

    if not lmbda_overlap:
        pass
    else:
        # The different boundary sections diffuse into each other. Each
        # boundary section is weighted 0.5 at the border between the two
        # sections and diffuses following the error function's
        # distribution.
        dt_in = spimg.distance_transform_bf(mask, 'chessboard',
                                            1).astype(np.float64)
        dt_in /= lmbda_overlap
        dt_in[np.where(dt_in != 0.0)] += (0.5 - 1.0 / lmbda_overlap)
        mask_in = spec.erf(dt_in)

        dt_out = spimg.distance_transform_bf((1.0 - mask), 'chessboard',
                                             1).astype(np.float64)
        dt_out /= lmbda_overlap
        dt_out[np.where(dt_out != 0.0)] += (0.5 - 1.0 / lmbda_overlap)
        mask_out = 1.0 - spec.erf(dt_out)
        mask_out[np.where(mask_out == 1.0)] = 0.0

        mask = mask_in + mask_out

    return mask
示例#26
0
文件: process.py 项目: cicwi/flexCALC
def append_tile(array, geom, tot_array, tot_geom):
    """
    Append a tile to a larger arrayset.
    Args:
        
        array: projection stack
        geom: geometry descritption
        tot_array: output array
        tot_geom: output geometry
        
    """

    print('Stitching a tile...')

    # Assuming all projections have equal number of angles and same pixel sizes
    total_shape = tot_array.shape[::2]
    det_shape = array.shape[::2]

    if numpy.abs(tot_geom['det_pixel'] - geom['det_pixel']) > 1e-6:
        raise Exception(
            'This array has different detector pixels! %f v.s. %f. Aborting!' %
            (geom['det_pixel'], tot_geom['det_pixel']))

    if tot_array.shape[1] != array.shape[1]:
        raise Exception(
            'This array has different number of projections from the others. %u v.s. %u. Aborting!'
            % (array.shape[1], tot_array.shape[1]))

    total_size = tot_geom.detector_size(total_shape)
    det_size = geom.detector_size(det_shape)

    # Offset from the left top corner:
    y0, x0 = tot_geom.detector_centre()
    y, x = geom.detector_centre()

    x_offset = ((x - x0) + total_size[1] / 2 - det_size[1] / 2) / geom.pixel[1]
    y_offset = ((y - y0) + total_size[0] / 2 - det_size[0] / 2) / geom.pixel[0]

    # Round em up!
    x_offset = int(numpy.round(x_offset))
    y_offset = int(numpy.round(y_offset))

    # Pad image to get the same size as the total_slice:
    pad_x = tot_array.shape[2] - array.shape[2]
    pad_y = tot_array.shape[0] - array.shape[0]

    # Collapce both arraysets and compute residual shift
    shift = _find_shift_(tot_array, array, [y_offset, x_offset])

    x_offset += shift[1]
    y_offset += shift[0]

    # Precompute weights:
    base0 = (tot_array[:, ::100, :].mean(1)) != 0

    new0 = numpy.zeros_like(base0)
    # Shift image:
    new0[:det_shape[0], :det_shape[1]] = 1.0
    new0 = interp.shift(new0, [y_offset, x_offset], order=1)
    #new0[y_offset:int(y_offset+det_shape[0]), x_offset:int(x_offset + det_shape[1])] = 1.0

    base_dist = ndimage.distance_transform_bf(base0)
    new_dist = ndimage.distance_transform_bf(new0)

    # Trim edges to avoid interpolation errors:
    base_dist -= 1
    new_dist -= 1

    base_dist *= base_dist > 0
    new_dist *= new_dist > 0
    norm = (base_dist + new_dist)
    norm[norm == 0] = numpy.inf

    time.sleep(0.5)

    # Apply offsets:
    for ii in tqdm(range(tot_array.shape[1]), unit='img'):

        # Pad to match sizes:
        new = numpy.pad(array[:, ii, :], ((0, pad_y), (0, pad_x)),
                        mode='constant')

        # Apply shift:
        if (x_offset != 0) | (y_offset != 0):

            # Shift image:
            new = interp.shift(new, [y_offset, x_offset], order=1)

        # Add two images in a smart way:
        base = tot_array[:, ii, :]

        # Create distances to edge:
        tot_array[:, ii, :] = ((base_dist * base) + (new_dist * new)) / norm
示例#27
0
def generate_base_points(num_points, domain_size, prob=None):
    r"""
    Generates a set of base points for passing into the DelaunayVoronoiDual
    class.  The points can be distributed in spherical, cylindrical, or
    rectilinear patterns.

    Parameters
    ----------
    num_points : scalar
        The number of base points that lie within the domain.  Note that the
        actual number of points returned will be larger, with the extra points
        lying outside the domain.

    domain_size : list or array
        Controls the size and shape of the domain, as follows:

        **sphere** : If a single value is received, its treated as the radius
        [r] of a sphere centered on [0, 0, 0].

        **cylinder** : If a two-element list is received it's treated as the
        radius and height of a cylinder [r, z] positioned at [0, 0, 0] and
        extending in the positive z-direction.

        **rectangle** : If a three element list is received, it's treated
        as the outer corner of rectangle [x, y, z] whose opposite corner lies
        at [0, 0, 0].

    prob : 3D array, optional
        A 3D array that contains fractional (0-1) values indicating the
        liklihood that a point in that region should be kept.  If not specified
        an array containing 1's in the shape of a sphere, cylinder, or cube is
        generated, depnending on the give ``domain_size`` with zeros outside.
        When specifying a custom probabiliy map is it recommended to also set
        values outside the given domain to zero.  If not, then the correct
        shape will still be returned, but with too few points in it.

    Notes
    -----
    This method places the given number of points within the specified domain,
    then reflects these points across each domain boundary.  This results in
    smooth flat faces at the boundaries once these excess pores are trimmed.

    The reflection approach tends to create larger pores near the surfaces, so
    it might be necessary to use the ``prob`` argument to specify a slightly
    higher density of points near the surfaces.

    For rough faces, it is necessary to define a larger than desired domain
    then trim to the desired size.  This will discard the reflected points
    plus some of the original points.

    Examples
    --------
    The following generates a spherical array with higher values near the core.
    It uses a distance transform to create a sphere of radius 10, then a
    second distance transform to create larger values in the center away from
    the sphere surface.  These distance values could be further skewed by
    applying a power, with values higher than 1 resulting in higher values in
    the core, and fractional values smoothinging them out a bit.

    >>> import OpenPNM as op
    >>> import scipy as sp
    >>> import scipy.ndimage as spim
    >>> im = sp.ones([21, 21, 21], dtype=int)
    >>> im[10, 10, 10] = 0
    >>> im = spim.distance_transform_edt(im) <= 20  # Create sphere of 1's
    >>> prob = spim.distance_transform_edt(im)
    >>> prob = prob / sp.amax(prob)  # Normalize between 0 and 1
    >>> pts = op.Network.tools.generate_base_points(num_points=50,
    ...                                             domain_size=[2],
    ...                                             prob=prob)
    >>> net = op.Network.DelaunayVoronoiDual(points=pts, domain_size=[2])
    """
    def _try_points(num_points, prob):
        prob = _sp.array(prob)/_sp.amax(prob)  # Ensure prob is normalized
        base_pts = []
        N = 0
        while N < num_points:
            pt = _sp.random.rand(3)  # Generate a point
            # Test whether to keep it or not
            [indx, indy, indz] = _sp.floor(pt*_sp.shape(prob)).astype(int)
            if _sp.random.rand(1) <= prob[indx][indy][indz]:
                base_pts.append(pt)
                N += 1
        base_pts = _sp.array(base_pts)
        return base_pts
    if len(domain_size) == 1:  # Spherical
        domain_size = _sp.array(domain_size)
        if prob is None:
            prob = _sp.ones([41, 41, 41])
            prob[20, 20, 20] = 0
            prob = _spim.distance_transform_bf(prob) <= 20
        base_pts = _try_points(num_points, prob)
        # Convert to spherical coordinates
        [X, Y, Z] = _sp.array(base_pts - [0.5, 0.5, 0.5]).T  # Center at origin
        r = 2*_sp.sqrt(X**2 + Y**2 + Z**2)*domain_size[0]
        theta = 2*_sp.arctan(Y/X)
        phi = 2*_sp.arctan(_sp.sqrt(X**2 + Y**2)/Z)
        # Trim points outside the domain (from improper prob images)
        inds = r <= domain_size[0]
        [r, theta, phi] = [r[inds], theta[inds], phi[inds]]
        # Reflect base points across perimeter
        new_r = 2*domain_size - r
        r = _sp.hstack([r, new_r])
        theta = _sp.hstack([theta, theta])
        phi = _sp.hstack([phi, phi])
        # Convert to Cartesean coordinates
        X = r*_sp.cos(theta)*_sp.sin(phi)
        Y = r*_sp.sin(theta)*_sp.sin(phi)
        Z = r*_sp.cos(phi)
        base_pts = _sp.vstack([X, Y, Z]).T
    elif len(domain_size) == 2:  # Cylindrical
        domain_size = _sp.array(domain_size)
        if prob is None:
            prob = _sp.ones([41, 41, 41])
            prob[20, 20, :] = 0
            prob = _spim.distance_transform_bf(prob) <= 20
        base_pts = _try_points(num_points, prob)
        # Convert to cylindrical coordinates
        [X, Y, Z] = _sp.array(base_pts - [0.5, 0.5, 0]).T  # Center on z-axis
        r = 2*_sp.sqrt(X**2 + Y**2)*domain_size[0]
        theta = 2*_sp.arctan(Y/X)
        z = Z*domain_size[1]
        # Trim points outside the domain (from improper prob images)
        inds = r <= domain_size[0]
        [r, theta, z] = [r[inds], theta[inds], z[inds]]
        inds = ~((z > domain_size[1]) + (z < 0))
        [r, theta, z] = [r[inds], theta[inds], z[inds]]
        # Reflect base points about faces and perimeter
        new_r = 2*domain_size[0] - r
        r = _sp.hstack([r, new_r])
        theta = _sp.hstack([theta, theta])
        z = _sp.hstack([z, z])
        r = _sp.hstack([r, r, r])
        theta = _sp.hstack([theta, theta, theta])
        z = _sp.hstack([z, -z, 2-z])
        # Convert to Cartesean coordinates
        X = r*_sp.cos(theta)
        Y = r*_sp.sin(theta)
        Z = z
        base_pts = _sp.vstack([X, Y, Z]).T
    elif len(domain_size) == 3:  # Rectilinear
        domain_size = _sp.array(domain_size)
        Nx, Ny, Nz = domain_size
        if prob is None:
            prob = _sp.ones([10, 10, 10], dtype=float)
        base_pts = _try_points(num_points, prob)
        base_pts = base_pts*domain_size
        # Reflect base points about all 6 faces
        orig_pts = base_pts
        base_pts = _sp.vstack((base_pts, [-1, 1, 1]*orig_pts +
                                         [2.0*Nx, 0, 0]))
        base_pts = _sp.vstack((base_pts, [1, -1, 1]*orig_pts +
                                         [0, 2.0*Ny, 0]))
        base_pts = _sp.vstack((base_pts, [1, 1, -1]*orig_pts +
                                         [0, 0, 2.0*Nz]))
        base_pts = _sp.vstack((base_pts, [-1, 1, 1]*orig_pts))
        base_pts = _sp.vstack((base_pts, [1, -1, 1]*orig_pts))
        base_pts = _sp.vstack((base_pts, [1, 1, -1]*orig_pts))
    return base_pts
import numpy as np
import scipy
from scipy import ndimage
import matplotlib.pyplot as plt

im = np.zeros((20, 20))
im[5:-5, 5:-5] = 1
im = ndimage.distance_transform_bf(im)
im_noise = im + 0.2*np.random.randn(*im.shape)

im_med = ndimage.median_filter(im_noise, 3)

plt.figure(figsize=(16, 5))

plt.subplot(141)
plt.imshow(im, interpolation='nearest')
plt.axis('off')
plt.title('Original image', fontsize=20)
plt.subplot(142)
plt.imshow(im_noise, interpolation='nearest', vmin=0, vmax=5)
plt.axis('off')
plt.title('Noisy image', fontsize=20)
plt.subplot(143)
plt.imshow(im_med, interpolation='nearest', vmin=0, vmax=5)
plt.axis('off')
plt.title('Median filter', fontsize=20)
plt.subplot(144)
plt.imshow(np.abs(im - im_med), cmap=plt.cm.hot, interpolation='nearest')
plt.axis('off')
plt.title('Error', fontsize=20)
"""

import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt

im = np.zeros((64, 64))
np.random.seed(2)
x, y = (63*np.random.random((2, 8))).astype(np.int)
im[x, y] = np.arange(8)

bigger_points = ndimage.grey_dilation(im, size=(5, 5), structure=np.ones((5, 5)))

square = np.zeros((16, 16))
square[4:-4, 4:-4] = 1
dist = ndimage.distance_transform_bf(square)
dilate_dist = ndimage.grey_dilation(dist, size=(3, 3), \
        structure=np.ones((3, 3)))

plt.figure(figsize=(12.5, 3))
plt.subplot(141)
plt.imshow(im, interpolation='nearest', cmap=plt.cm.nipy_spectral)
plt.axis('off')
plt.subplot(142)
plt.imshow(bigger_points, interpolation='nearest', cmap=plt.cm.nipy_spectral)
plt.axis('off')
plt.subplot(143)
plt.imshow(dist, interpolation='nearest', cmap=plt.cm.nipy_spectral)
plt.axis('off')
plt.subplot(144)
plt.imshow(dilate_dist, interpolation='nearest', cmap=plt.cm.nipy_spectral)
示例#30
0
# Sharpen
face = misc.face(gray=True).astype(float)
blurred_f = ndimage.gaussian_filter(face, 3)

# Increase edge weight
filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1)
alpha = 30
sharpened = blurred_f + alpha * (blurred_f - filter_blurred_f)

# Denoise
noisy = f + 0.4 * f.std() * np.random.random(f.shape)
gauss_denoised = ndimage.gaussian_filter(noisy, 2)
med_denoised = ndimage.median_filter(noisy, 3)
im = np.zeros((20, 20))
im[5:-5, 5:-5] = 1
im = ndimage.distance_transform_bf(im)
im_noise = im + 0.2 * np.random.randn(*im.shape)
im_med = ndimage.median_filter(im_noise, 3)

# Structure an element
el = ndimage.generate_binary_structure(2, 1)

# Erosion
a = np.zeros((7, 7), dtype=np.int)
ndimage.binary_erosion(a).astype(a.dtype)
ndimage.binary_erosion(a, structure=np.ones((5, 5))).astype(a.dtype)

# Dilation
ndimage.binary_dilation(a).astype(a.dtype)

# Opening
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt

im = np.zeros((64, 64))
np.random.seed(2)
x, y = (63 * np.random.random((2, 8))).astype(np.int)
im[x, y] = np.arange(8)

bigger_points = ndimage.grey_dilation(im,
                                      size=(5, 5),
                                      structure=np.ones((5, 5)))

square = np.zeros((16, 16))
square[4:-4, 4:-4] = 1
dist = ndimage.distance_transform_bf(square)
dilate_dist = ndimage.grey_dilation(dist, size=(3, 3), \
        structure=np.ones((3, 3)))

plt.figure(figsize=(12.5, 3))
plt.subplot(141)
plt.imshow(im, interpolation='nearest', cmap=plt.cm.spectral)
plt.axis('off')
plt.subplot(142)
plt.imshow(bigger_points, interpolation='nearest', cmap=plt.cm.spectral)
plt.axis('off')
plt.subplot(143)
plt.imshow(dist, interpolation='nearest', cmap=plt.cm.spectral)
plt.axis('off')
plt.subplot(144)
plt.imshow(dilate_dist, interpolation='nearest', cmap=plt.cm.spectral)
示例#32
0
def append_tile(data, geom, tot_data, tot_geom):
    """
    Append a tile to a larger dataset.
    Args:
        
        data: projection stack
        geom: geometry descritption
        tot_data: output array
        tot_geom: output geometry
        
    """

    print('Stitching a tile...')

    # Assuming all projections have equal number of angles and same pixel sizes
    total_shape = tot_data.shape[::2]
    det_shape = data.shape[::2]

    total_size = flexData.detector_size(total_shape, tot_geom)
    det_size = flexData.detector_size(det_shape, geom)

    # Offset from the left top corner:
    x0 = tot_geom['det_hrz']
    y0 = tot_geom['det_vrt']

    x = geom['det_hrz']
    y = geom['det_vrt']

    x_offset = (
        (x - x0) + total_size[1] / 2 - det_size[1] / 2) / geom['det_pixel']
    y_offset = (
        (y - y0) + total_size[0] / 2 - det_size[0] / 2) / geom['det_pixel']

    # Round em up!
    x_offset = int(numpy.round(x_offset))
    y_offset = int(numpy.round(y_offset))

    # Pad image to get the same size as the total_slice:
    pad_x = tot_data.shape[2] - data.shape[2]
    pad_y = tot_data.shape[0] - data.shape[0]

    # Collapce both datasets and compute residual shift
    shift = _find_shift_(tot_data, data, [y_offset, x_offset])

    x_offset += shift[1]
    y_offset += shift[0]

    flexUtil.progress_bar(0)

    # Precompute weights:
    base0 = (tot_data[:, ::100, :].mean(1)) != 0

    new0 = numpy.zeros_like(base0)
    # Shift image:
    new0[:det_shape[0], :det_shape[1]] = 1.0
    new0 = interp.shift(new0, [y_offset, x_offset], order=1)
    #new0[y_offset:int(y_offset+det_shape[0]), x_offset:int(x_offset + det_shape[1])] = 1.0

    base_dist = ndimage.distance_transform_bf(base0)
    new_dist = ndimage.distance_transform_bf(new0)

    # Trim edges to avoid interpolation errors:
    base_dist -= 1
    new_dist -= 1

    base_dist *= base_dist > 0
    new_dist *= new_dist > 0
    norm = (base_dist + new_dist)
    norm[norm == 0] = numpy.inf

    # Apply offsets:
    for ii in range(tot_data.shape[1]):

        # Pad to match sizes:
        new = numpy.pad(data[:, ii, :], ((0, pad_y), (0, pad_x)),
                        mode='constant')

        # Apply shift:
        if (x_offset != 0) | (y_offset != 0):

            # Shift image:
            new = interp.shift(new, [y_offset, x_offset], order=1)

        # Add two images in a smart way:
        base = tot_data[:, ii, :]

        # Create distances to edge:
        tot_data[:, ii, :] = ((base_dist * base) + (new_dist * new)) / norm

        flexUtil.progress_bar((ii + 1) / tot_data.shape[1])