示例#1
0
def distance_transform_lin(im, axis=0, mode='both'):
    r"""
    Replaces each void voxel with the linear distance to the nearest solid
    voxel along the specified axis.

    Parameters
    ----------
    im : ND-array
        The image of the porous material with ``True`` values indicating the
        void phase (or phase of interest)

    axis : scalar
        The direction along which the distance should be measured, the default
        is 0 (i.e. along the x-direction)

    mode : string
        Controls how the distance is measured.  Options are:

        *'forward'* - Distances are measured in the increasing direction along
        the specified axis

        *'reverse'* - Distances are measured in the reverse direction.
        *'backward'* is also accepted.

        *'both'* - Distances are calculated in both directions (by recursively
        calling itself), then reporting the minimum value of the two results.
    """
    if mode in ['backward', 'reverse']:
        im = sp.flip(im, axis)
        im = distance_transform_lin(im=im, axis=axis, mode='forward')
        im = sp.flip(im, axis)
        return im
    elif mode in ['both']:
        im_f = distance_transform_lin(im=im, axis=axis, mode='forward')
        im_b = distance_transform_lin(im=im, axis=axis, mode='backward')
        return sp.minimum(im_f, im_b)
    else:
        b = sp.cumsum(im > 0, axis=axis)
        c = sp.diff(b * (im == 0), axis=axis)
        d = sp.minimum.accumulate(c, axis=axis)
        if im.ndim == 1:
            e = sp.pad(d, pad_width=[1, 0], mode='constant', constant_values=0)
        elif im.ndim == 2:
            ax = [[[1, 0], [0, 0]], [[0, 0], [1, 0]]]
            e = sp.pad(d,
                       pad_width=ax[axis],
                       mode='constant',
                       constant_values=0)
        elif im.ndim == 3:
            ax = [[[1, 0], [0, 0], [0, 0]], [[0, 0], [1, 0], [0, 0]],
                  [[0, 0], [0, 0], [1, 0]]]
            e = sp.pad(d,
                       pad_width=ax[axis],
                       mode='constant',
                       constant_values=0)
        f = im * (b + e)
        return f
示例#2
0
文件: RIOlib.py 项目: grg2rsr/RIOlib
def calc_random_pattern_blocks(tCorr=100,prob=0.5,tStart=0,tDuration=1000,tTotal=1000,channel=0,name=''):
    """Calculate a sequence of random states with a fixed time length, and a 
    settable probability to be in either state.

    Parameters
    ----------
    tCorr: float
        the length of a state (in ms). Note that this can be a float, but will 
        be ultimately rounded to ms.
    
    prob: float
        the probability for each block to be in the 'high' state.

    tStart: int
        the time point at which the random changes start (in ms)
        
    tDuration: int
        the total length of the time section in which random changes can occur
        (in ms)
        
    tTotal: int
        the total length of the pattern (in ms)
        
    channel: int
        the channel to switch
        
    name: str
        the name of the pattern
        
    Returns
    -------
    Pattern: RIOpattern
        The generated RIOpattern instance    
    
    """
    
    nBlocks = int(tDuration / tCorr)
    Pattern = (sp.rand(nBlocks) < prob).astype('float32')
    state_vec = sp.repeat(Pattern,tCorr)
    
    # acount for rounding errors
    if state_vec.shape[0] < tDuration:
        state_vec = sp.pad(state_vec,(0,tDuration - state_vec.shape[0]), mode='minimum')
    else:
        Pattern = Pattern[:tTotal]
    
    # pad to final size
    state_vec = sp.pad(state_vec, (tStart,tTotal-tStart-tDuration), mode='minimum')
    
    Pattern = RIOpattern(name=name, Pulses=States2RIOpulses(state_vec,channel), total_duration=tTotal)
    return Pattern, state_vec
示例#3
0
def mesh_region(region: bool, strel=None):
    r"""
    Creates a tri-mesh of the provided region using the marching cubes
    algorithm

    Parameters
    ----------
    im : ND-array
        A boolean image with ``True`` values indicating the region of interest

    strel : ND-array
        The structuring element to use when blurring the region.  The blur is
        perfomed using a simple convolution filter.  The point is to create a
        greyscale region to allow the marching cubes algorithm some freedom
        to conform the mesh to the surface.  As the size of ``strel`` increases
        the region will become increasingly blurred and inaccurate. The default
        is a spherical element with a radius of 1.

    Returns
    -------
    mesh : tuple
        A named-tuple containing ``faces``, ``verts``, ``norm``, and ``val``
        as returned by ``scikit-image.measure.marching_cubes`` function.

    """
    im = region
    if im.ndim != im.squeeze().ndim:
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
                      ' unexpected behavior.')
    if strel is None:
        if region.ndim == 3:
            strel = ball(1)
        if region.ndim == 2:
            strel = disk(1)
    pad_width = sp.amax(strel.shape)
    if im.ndim == 3:
        padded_mask = sp.pad(im, pad_width=pad_width, mode='constant')
        padded_mask = spim.convolve(padded_mask * 1.0,
                                    weights=strel) / sp.sum(strel)
    else:
        padded_mask = sp.reshape(im, (1, ) + im.shape)
        padded_mask = sp.pad(padded_mask, pad_width=pad_width, mode='constant')
    verts, faces, norm, val = marching_cubes_lewiner(padded_mask)
    result = namedtuple('mesh', ('verts', 'faces', 'norm', 'val'))
    result.verts = verts - pad_width
    result.faces = faces
    result.norm = norm
    result.val = val
    return result
示例#4
0
	def pad_zero(self,nonmonopole_to_zero=True):
		if self.ndim == 1: self.s = scipy.pad(self.s,pad_width=((1,0)),mode='constant',constant_values=0.)
		else:
			for idim in range(self.ndim): self.s[idim] = scipy.pad(self.s[idim],pad_width=((1,0)),mode='constant',constant_values=0.)
		self.window = scipy.pad(self.window,pad_width=((0,0),)+((1,0),)*self.ndim,mode='edge')
		if hasattr(self,'error'): self.error = scipy.pad(self.error,pad_width=((1,0),)*self.ndim,mode='edge')
		if nonmonopole_to_zero:
			if self.ndim == 1:
				for pole in self:
					if pole != self.zero: self.window[self.index(pole),0] = 0.
			else:
				for pole in self:
					for idim in range(self.ndim):
						if pole[idim] != 0: utils.fill_axis(self.window,axis=(0,1+idim),slices=(self.index(pole),0),values=0.)
示例#5
0
 def pad_zero(self):
     self.k = scipy.pad(self.k,
                        pad_width=((1, 0)),
                        mode='constant',
                        constant_values=0.)
     self.mu = scipy.pad(self.mu,
                         pad_width=((1, 1)),
                         mode='constant',
                         constant_values=((0, 1)))
     self.window = scipy.pad(self.window,
                             pad_width=((1, 0), (1, 1)),
                             mode='edge')
     self.error = scipy.pad(self.error,
                            pad_width=((1, 0), (1, 0)),
                            mode='edge')
示例#6
0
    def apply(self, data):
        #Data arrays dimensions and padding
        FT = data
        (dim_x, dim_y) = np.shape(FT)
        FT_padded = scipy.pad(array=FT, pad_width=[1, 1], mode='constant', constant_values=0)
        (dim_x_padded, dim_y_padded) = np.shape(FT_padded)

        #Ft-H*FT
        H_padded = np.zeros(FT_padded.shape)
        for i in range (1, dim_x_padded-1):
            for j in range(1, dim_y_padded-1):
                entry = FT_padded[i-1:i+2, j-1:j+2]
                #print(np.shape(entry))
                valor = entry*af
                #print(valor)
                H_padded[i-1:i+2, j-1:j+2] = valor
                #print(H_padded)
        H = np.zeros(FT.shape)
        for i in range (1, dim_x_padded-1):
            for j in range(1, dim_y_padded-1):
                H[i-1, j-1] = H_padded[i, j]
        #print(H)
        exp_FT_H_FT_P_2 = (np.exp((FT-(H*FT)+np.angle(FT))))**2
        #print(exp_FT_H_FT_P_2)
        return exp_FT_H_FT_P_2
def create_caffe_input_file(file_ids, width):    
    """Creates LMDB databases containing training and test sets derived from the ground truths of the simulated data. 
    ``width`` is the size of the windows to use."""  
    im_padding = ((width/2, width/2), (width/2, width/2), (0, 0))
    ims = [get_simulated_im(file_id)[0] for file_id in file_ids]
    ims = [(im - im.mean())/im.std() for im in ims]
    ims = [sp.pad(im, im_padding, mode='reflect') for im in ims]
    
    truth_padding =  ((width/2, width/2), (width/2, width/2))
    truths = [get_simulated_im(file_id)[1] for file_id in file_ids]
    truths = [sp.pad(truth, truth_padding, mode='reflect') for truth in truths]
    
    centers = get_centers(truths, width/2)
    training_centers, training_labels, test_centers, test_labels = make_labelled_sets(centers)

    fill_database('temporary/train_simulated.db', ims, training_centers, training_labels, width)
    fill_database('temporary/test_simulated.db', ims, test_centers, test_labels, width)
示例#8
0
 def __init__(self,
              shape,
              spacing=1,
              label_1='primary',
              label_2='secondary',
              **kwargs):
     super().__init__(**kwargs)
     spacing = sp.array(spacing)
     shape = sp.array(shape)
     # Deal with non-3D shape arguments
     shape = sp.pad(shape, [0, 3 - shape.size],
                    mode='constant',
                    constant_values=1)
     net = Cubic(shape=shape, spacing=1)
     net['throat.' + label_1] = True
     net['pore.' + label_1] = True
     single_dim = shape == 1
     shape[single_dim] = 2
     dual = Cubic(shape=shape - 1, spacing=1)
     faces = [['front', 'back'], ['left', 'right'], ['top', 'bottom']]
     faces = [faces[i] for i in sp.where(~single_dim)[0]]
     faces = sp.array(faces).flatten().tolist()
     dual.add_boundary_pores(faces)
     # Add secondary network name as a label
     dual['pore.' + label_2] = True
     dual['throat.' + label_2] = True
     # Shift coordinates prior to stitching
     dual['pore.coords'] += 0.5 * (~single_dim)
     topotools.stitch(net,
                      dual,
                      P_network=net.Ps,
                      P_donor=dual.Ps,
                      len_max=1)
     net['throat.interconnect'] = net['throat.stitched']
     del net['throat.stitched']
     # Clean-up labels
     net['pore.surface'] = False
     net['throat.surface'] = False
     for face in faces:
         # Remove face label from secondary network since it's internal now
         Ps = net.pores(labels=[face, label_2], mode='xnor')
         net['pore.' + face][Ps] = False
         Ps = net.pores(labels=[face + '_boundary'])
         net['pore.' + face][Ps] = True
         Ps = net.pores(face)
         net['pore.surface'][Ps] = True
         Ts = net.find_neighbor_throats(pores=Ps, mode='xnor')
         net['throat.surface'][Ts] = True
         net['throat.' + face] = net.tomask(throats=Ts)
     [net.pop(item) for item in net.labels() if 'boundary' in item]
     # Label non-surface pores and throats as internal
     net['pore.internal'] = True
     net['throat.internal'] = True
     # Transfer all dictionary items from 'net' to 'self'
     [self.update({item: net[item]}) for item in net]
     ws.close_project(net.project)
     # Finally, scale network to requested spacing
     net['pore.coords'] *= spacing
def preprocess(path, config, scale = 3):
    img = imread(path)

    a = img

    #print("path:", path)
    #cv2.imwrite(os.path.join(os.getcwd(),config.result_dir+'/input.png'), img)
    
    
    #print("img:", img.shape, "scale:", scale)

    label_ = modcrop(img, scale)
    
    #print("label_:", label_.shape)
    
    bicbuic_img = cv2.resize(label_,None,fx = 1.0/scale ,fy = 1.0/scale, interpolation = cv2.INTER_CUBIC)# Resize by scaling factor
    
    #print("bicbuic_img:", bicbuic_img.shape)
    
    input_ = cv2.resize(bicbuic_img,None,fx=scale ,fy=scale, interpolation = cv2.INTER_CUBIC)# Resize by scaling factor

    #print("input_:", input_.shape)
    
    if config.is_train == False:
        cv2.imwrite(os.path.join(os.getcwd(),config.result_dir+'/interpolated.png'), input_)
    
    
    H = input_.shape[0]
    
    s = 33 #21 # 33
    #print((float(H)/s), (H//s))
    
    border_size = s #((float(H)/s) - (H//s))*s #* 8

    border_size = int(border_size / 2)

    #border_size = (12 / 2) # + 1 # Because we need overall padding 12 to be available <<-- Train
    
    #border_size = 16 # <<-- Test
    
    #print("border_size:", border_size)
    
    #input_ = scipy.pad(input_, ((border_size,border_size),(border_size,border_size)), mode='reflect')
    if config.is_train == False:
        #input_ = scipy.pad(input_, ((border_size,border_size + 1),(border_size,border_size + 1)), mode='reflect')
        #input_ = scipy.pad(input_, ((border_size,border_size),(border_size,border_size)), mode='reflect')
        input_ = scipy.pad(input_, ((border_size,border_size),(border_size,border_size)), mode='reflect')
    
        #cv2.imwrite(os.path.join(os.getcwd(),config.result_dir+'/bordered.png'), input_)
    
        #print("shape matches:", a.shape, input_.shape)

    #print("shape matches:", input_.shape == a.shape)
    
    #input_ = cv2.resize(input_, (input_.shape[0] + 20, input_.shape[1] + 20))
    #print("input_:", input_.shape)

    return input_, label_, a.shape
示例#10
0
	def pad_k(self,k,mode='constant',constant_values=0.,**kwargs):
		pad_start = scipy.sum(k<self.k[0])
		pad_end = scipy.sum(k>self.k[-1])
		for key in self.FIELDS:
			pad_width = ((0,0))*(self[key].ndim-1) + ((pad_start,pad_end))
			self[key] = scipy.pad(self[key],pad_width=pad_width,mode=mode,constant_values=constant_values)
		self['k'][:pad_start] = k[:pad_start]
		self['k'][-pad_end:] = k[-pad_end:]
		for key in kwargs:
			self[key][:pad_start] = kwargs[key][:pad_start]
			self[key][-pad_end:] = kwargs[key][-pad_end:]
def create_caffe_input_file(file_ids, width):
    """Creates LMDB databases containing training and test sets derived from the ground truths of the simulated data. 
    ``width`` is the size of the windows to use."""
    im_padding = ((width / 2, width / 2), (width / 2, width / 2), (0, 0))
    ims = [get_simulated_im(file_id)[0] for file_id in file_ids]
    ims = [(im - im.mean()) / im.std() for im in ims]
    ims = [sp.pad(im, im_padding, mode='reflect') for im in ims]

    truth_padding = ((width / 2, width / 2), (width / 2, width / 2))
    truths = [get_simulated_im(file_id)[1] for file_id in file_ids]
    truths = [sp.pad(truth, truth_padding, mode='reflect') for truth in truths]

    centers = get_centers(truths, width / 2)
    training_centers, training_labels, test_centers, test_labels = make_labelled_sets(
        centers)

    fill_database('temporary/train_simulated.db', ims, training_centers,
                  training_labels, width)
    fill_database('temporary/test_simulated.db', ims, test_centers,
                  test_labels, width)
示例#12
0
	def gradient(self,parameters,**kwargs):
		toret = {par:0. for par in parameters}
		ixstart = 0
		for likelihood in self:
			pars = [par for par in parameters if par in likelihood.parameters]
			ixend = ixstart
			if pars:
				gradient = likelihood.gradient(pars,**kwargs)
				ixend = ixstart + gradient.values()[0].shape[-1]
				for par in pars:
					toret[par] += scipy.pad(gradient[par],pad_width=(ixstart,self.nbins-ixend),mode='constant',constant_values=0.)
			ixstart = ixend
		return toret
def score_image(im, model, width):
    """Scores every pixel in ``im`` by applying ``model`` to windows of ``width`` onto the image"""
    padding = ((width/2, width/2), (width/2, width/2), (0, 0))
    im = sp.pad(im, padding, mode='reflect')    
    
    im = (im - im.mean())/im.std()
    window_centers = get_window_centers(im, width=width)
    window_gen = window_generator(im, window_centers, width=width)
    
    score_list = score_windows(window_gen, model, total_count=len(window_centers))
    unpadded_window_centers = window_centers - width/2    
    
    return unpadded_window_centers, score_list
示例#14
0
def pad_array_width(a, target_width):
    width = a.shape[1]

    right_padding = target_width - width

    if right_padding < 0:
        # if image width is larger than target_width, crop the image
        return a[:, :target_width]

    horizontal_padding = (0, right_padding)
    vertical_padding = (0, 0)
    depth_padding = (0, 0)
    return scipy.pad(
        a, pad_width=[vertical_padding, horizontal_padding, depth_padding])
示例#15
0
 def __init__(self, shape=None, spacing=[1, 1, 1], label_1='primary',
              label_2='secondary', **kwargs):
     super().__init__(**kwargs)
     spacing = sp.array(spacing)
     shape = sp.array(shape)
     # Deal with non-3D shape arguments
     shape = sp.pad(shape, [0, 3-shape.size], mode='constant', constant_values=1)
     net = Cubic(shape=shape, spacing=[1, 1, 1])
     net['throat.'+label_1] = True
     net['pore.'+label_1] = True
     single_dim = shape == 1
     shape[single_dim] = 2
     dual = Cubic(shape=shape-1, spacing=[1, 1, 1])
     faces = [['front', 'back'], ['left', 'right'], ['top', 'bottom']]
     faces = [faces[i] for i in sp.where(~single_dim)[0]]
     faces = sp.array(faces).flatten().tolist()
     dual.add_boundaries(faces)
     # Add secondary network name as a label
     dual['pore.'+label_2] = True
     dual['throat.'+label_2] = True
     # Shift coordinates prior to stitching
     dual['pore.coords'] += 0.5*(~single_dim)
     stitch(net, dual, P_network=net.Ps, P_donor=dual.Ps, len_max=1)
     net['throat.interconnect'] = net['throat.stitched']
     del net['throat.stitched']
     net['pore.coords'] *= spacing
     # Clean-up labels
     net['pore.surface'] = False
     net['throat.surface'] = False
     for face in faces:
         # Remove face label from secondary network since it's internal now
         Ps = net.pores(labels=[face, label_2], mode='intersection')
         net['pore.'+face][Ps] = False
         Ps = net.pores(labels=[face+'_boundary'])
         net['pore.'+face][Ps] = True
         Ps = net.pores(face)
         net['pore.surface'][Ps] = True
         Ts = net.find_neighbor_throats(pores=Ps, mode='intersection')
         net['throat.surface'][Ts] = True
         net['throat.'+face] = net.tomask(throats=Ts)
     [net.pop(item) for item in net.labels() if 'boundary' in item]
     # Label non-surface pores and throats as internal
     net['pore.internal'] = ~net['pore.surface']
     Ts = net.find_neighbor_throats(pores=net['pore.internal'])
     net['throat.internal'] = False
     net['throat.internal'][Ts] = True
     # Transfer all dictionary items from 'net' to 'self'
     [self.update({item: net[item]}) for item in net]
     del self.workspace[net.name]
示例#16
0
def pad_faces(im, faces):
    r"""
    Pads the input image at specified faces. This shape of image is
    same as the output image of add_boundary_regions function.

    Parameters
    ----------
    im : ND_array
        The image that needs to be padded

    faces : list of strings
        Labels indicating where image needs to be padded. Given a 3D image
        of shape ``[x, y, z] = [i, j, k]``, the following conventions are used
        to indicate along which axis the padding should be applied:

        * 'left' -> ``x = 0``
        * 'right' -> ``x = i``
        * 'front' -> ``y = 0``
        * 'back' -> ``y = j``
        * 'bottom' -> ``z = 0``
        * 'top' -> ``z = k``

    Returns
    -------
    A image padded at specified face(s)

    See also
    --------
    add_boundary_regions
    """
    if im.ndim != im.squeeze().ndim:
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
                      ' unexpected behavior.')
    f = faces
    if f is not None:
        if im.ndim == 2:
            faces = [(int('left' in f) * 3, int('right' in f) * 3),
                     (int(('front') in f) * 3 or int(('bottom') in f) * 3,
                      int(('back') in f) * 3 or int(('top') in f) * 3)]

        if im.ndim == 3:
            faces = [(int('left' in f) * 3, int('right' in f) * 3),
                     (int('front' in f) * 3, int('back' in f) * 3),
                     (int('top' in f) * 3, int('bottom' in f) * 3)]
        im = sp.pad(im, pad_width=faces, mode='edge')
    else:
        im = im
    return im
def score_image(im, model, width):
    """Scores every pixel in ``im`` by applying ``model`` to windows of ``width`` onto the image"""
    padding = ((width / 2, width / 2), (width / 2, width / 2), (0, 0))
    im = sp.pad(im, padding, mode='reflect')

    im = (im - im.mean()) / im.std()
    window_centers = get_window_centers(im, width=width)
    window_gen = window_generator(im, window_centers, width=width)

    score_list = score_windows(window_gen,
                               model,
                               total_count=len(window_centers))
    unpadded_window_centers = window_centers - width / 2

    return unpadded_window_centers, score_list
示例#18
0
 def apply(self, data):
     F1_padded = scipy.pad(array=data, pad_width=[1, 1], mode='constant', constant_values=0)
     (dim_x_padded, dim_y_padded) = np.shape(F1_padded)
     G_padded = np.zeros(F1_padded.shape)
     for i in range (1, dim_x_padded-1):
         for j in range(1, dim_y_padded-1):
             entry = F1_padded[i-1:i+2, j-1:j+2]
             valor = entry*af
             G_padded[i-1:i+2, j-1:j+2] = valor
             #print(G_padded)
     G = np.zeros(data.shape)
     for i in range (1, dim_x_padded-1):
         for j in range(1, dim_y_padded-1):
             G[i-1, j-1] = G_padded[i, j]
     #print(G)
     return G
示例#19
0
def find_outer_region(im, r=0):
    r"""
    Finds regions of the image that are outside of the solid matrix.

    This function uses the rolling ball method to define where the outer region
    ends and the void space begins.

    This function is particularly useful for samples that do not fill the
    entire rectangular image, such as cylindrical cores or samples with non-
    parallel faces.

    Parameters
    ----------
    im : ND-array
        Image of the porous material with 1's for void and 0's for solid

    r : scalar
        The radius of the rolling ball to use.  If not specified then a value
        is calculated as twice maximum of the distance transform.  The image
        size is padded by this amount in all directions, so the image can
        become quite large and unwieldy if too large a value is given.

    Returns
    -------
    image : ND-array
        A boolean mask the same shape as ``im``, containing True in all voxels
        identified as *outside* the sample.

    """
    if r == 0:
        dt = spim.distance_transform_edt(input=im)
        r = int(sp.amax(dt)) * 2
    im_padded = sp.pad(array=im,
                       pad_width=r,
                       mode='constant',
                       constant_values=True)
    dt = spim.distance_transform_edt(input=im_padded)
    seeds = (dt >= r) + get_border(shape=im_padded.shape)
    # Remove seeds not connected to edges
    labels = spim.label(seeds)[0]
    mask = labels == 1  # Assume label of 1 on edges, assured by adding border
    dt = spim.distance_transform_edt(~mask)
    outer_region = dt < r
    outer_region = extract_subsection(im=outer_region, shape=im.shape)
    return outer_region
示例#20
0
 def _find_blocks(self, array, trim_edges=False):
     array = sp.clip(array, a_min=0, a_max=1)
     temp = sp.pad(array, pad_width=1, mode='constant', constant_values=0)
     end_pts = sp.where(sp.ediff1d(temp) == -1)[0]  # Find 1->0 transitions
     end_pts -= 1  # To adjust for 0 padding
     seg_len = sp.cumsum(array)[end_pts]
     seg_len[1:] = seg_len[1:] - seg_len[:-1]
     start_pts = end_pts - seg_len + 1
     a = dict()
     a['start'] = start_pts
     a['end'] = end_pts
     a['length'] = seg_len
     if trim_edges:
         if (a['start'].size > 0) and (a['start'][0] == 0):
             [a.update({item: a[item][1:]}) for item in a]
         if (a['end'].size > 0) and (a['end'][-1] == sp.size(array) - 1):
             [a.update({item: a[item][:-1]}) for item in a]
     return a
示例#21
0
 def _find_blocks(self, array, trim_edges=False):
     array = sp.clip(array, a_min=0, a_max=1)
     temp = sp.pad(array, pad_width=1, mode='constant', constant_values=0)
     end_pts = sp.where(sp.ediff1d(temp) == -1)[0]  # Find 1->0 transitions
     end_pts -= 1  # To adjust for 0 padding
     seg_len = sp.cumsum(array)[end_pts]
     seg_len[1:] = seg_len[1:] - seg_len[:-1]
     start_pts = end_pts - seg_len + 1
     a = dict()
     a['start'] = start_pts
     a['end'] = end_pts
     a['length'] = seg_len
     if trim_edges:
         if (a['start'].size > 0) and (a['start'][0] == 0):
             [a.update({item: a[item][1:]}) for item in a]
         if (a['end'].size > 0) and (a['end'][-1] == sp.size(array)-1):
             [a.update({item: a[item][:-1]}) for item in a]
     return a
示例#22
0
def simple_otsu(im, trim_solid=True):
    r"""
    Uses Otsu's method to find a threshold, then uses binary opening and
    closing to remove noise.

    Parameters
    ----------
    im : ND-image
        The greyscale image of the porous medium to be binarized.

    trim_solid : Boolean
        If True (default) then all solid voxels not connected to an image
        boundary are trimmed.

    Returns
    -------
    An ND-image the same size as ``im`` but with True and False values
    indicating the binarized or segmented phases.

    Examples
    --------

    >>> im = ps.generators.blobs([300, 300], porosity=0.5)
    >>> im = ps.generators.add_noise(im)
    >>> im = spim.gaussian_filter(im, sigma=1)
    >>> im = simple_otsu(im)

    """
    if im.ndim == 2:
        ball = disk
        cube = square
    im = sp.pad(array=im, pad_width=1, mode='constant', constant_values=1)
    val = filters.threshold_otsu(im)
    im = im >= val
    # Remove speckled noise from void and solid phases
    im = spim.binary_closing(input=im, structure=ball(1))
    im = spim.binary_opening(input=im, structure=ball(1))
    # Clean up edges
    im = spim.binary_closing(input=im, structure=cube(3))
    im = spim.binary_opening(input=im, structure=cube(3))
    im = im[[slice(1, im.shape[d] - 1) for d in range(im.ndim)]]
    temp = clear_border(~im)
    im = im + temp
    return im
示例#23
0
def pad_image_height(img, target_height):
    a = tf.keras.preprocessing.image.img_to_array(img)

    height = a.shape[0]

    padding_amount = target_height - height

    assert padding_amount >= 0

    top_padding = padding_amount // 2
    if padding_amount % 2 == 0:
        vertical_padding = (top_padding, top_padding)
    else:
        vertical_padding = (top_padding, top_padding + 1)

    horizontal_padding = (0, 0)
    depth_padding = (0, 0)
    return scipy.pad(
        a, pad_width=[vertical_padding, horizontal_padding, depth_padding])
示例#24
0
def template_match(AnalogSignal, Templates_sim):
    """
    performs a template match of each simulated template waveform to the
    AnalogSignal.

    Args:
        AnalogSignal (neo.core.AnalogSignal): the AnalogSignal for the template
            match
        Templates_sim (neo.core.AnalogSignal): the simulated templates

    Returns:
        neo.core.AnalogSignal: the resulting scores of the template match
    """
    # TODO OPTIMIZE - multithread template match

    N = Templates_sim.shape[1]
    wsize = Templates_sim.shape[0]

    # prep
    Scores = sp.zeros((AnalogSignal.shape[0], N)).astype('float32')
    data_ = AnalogSignal.magnitude.astype('float32')
    Templates_sim = Templates_sim.magnitude.astype('float32')
    Npad = wsize - 1

    # template match run
    for i in range(N):
        res = cv2.matchTemplate(data_, Templates_sim[:, i], method=1).flatten()
        Scores[:, i] = sp.pad(res, (0, Npad),
                              mode='constant',
                              constant_values=1)
    Scores = 1 - Scores  # remap from 0 to 1

    # to neo object
    Scores = neo.core.AnalogSignal(Scores,
                                   units=pq.dimensionless,
                                   t_start=AnalogSignal.times[0],
                                   sampling_rate=AnalogSignal.sampling_rate,
                                   kind='Scores')
    return Scores
示例#25
0
    def apply(self, data):
        FT = data
        F2 = np.zeros(FT.shape)
        F2_padded = scipy.pad(array=F2, pad_width=[1, 1], mode='constant', constant_values=0)
        (dim_x_padded, dim_y_padded) = np.shape(F2_padded)
        #Note that here we suppose that radius ρ equals 1
        #Note that we're getting minimum value from each FTk,ρ that is padded, but the numbers are all negative so this doesn't matter
        for i in range (1, dim_x_padded-1):
            for j in range(1, dim_y_padded-1):
                entry = FT[i-1:i+2, j-1:j+2]
                flattened = np.matrix.flatten(entry)
                valor = min(flattened)
                F2[i-1, j-1] = valor
        #print(F2)

        S2 = np.zeros(FT.shape)
        (dim_x, dim_y) = np.shape(FT)
        for i in range (0, dim_x):
            for j in range(0, dim_y):
                min_val = np.subtract(FT[i, j],F2[i, j])
                #print(min_val)
                S2[i, j] = (min_val)
        #print(S2)
        return S2
示例#26
0
def snow(im,
         voxel_size=1,
         boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
         marching_cubes_area=False):
    r"""
    Analyzes an image that has been partitioned into void and solid regions
    and extracts the void and solid phase geometry as well as network
    connectivity.

    Parameters
    ----------
    im : ND-array
        Binary image in the Boolean form with True’s as void phase and False’s
        as solid phase.
    voxel_size : scalar
        The resolution of the image, expressed as the length of one side of a
        voxel, so the volume of a voxel would be **voxel_size**-cubed.  The
        default is 1, which is useful when overlaying the PNM on the original
        image since the scale of the image is alway 1 unit lenth per voxel.
    boundary_faces : list of strings
        Boundary faces labels are provided to assign hypothetical boundary
        nodes having zero resistance to transport process. For cubical
        geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
        ‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
        assigned then all six faces will be selected as boundary nodes
        automatically which can be trimmed later on based on user requirements.
    marching_cubes_area : bool
        If ``True`` then the surface area and interfacial area between regions
        will be using the marching cube algorithm. This is a more accurate
        representation of area in extracted network, but is quite slow, so
        it is ``False`` by default.  The default method simply counts voxels
        so does not correctly account for the voxelated nature of the images.

    Returns
    -------
    A dictionary containing the void phase size data, as well as the network
    topological information.  The dictionary names use the OpenPNM
    convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
    directly to an OpenPNM network object using the ``update`` command.
    """

    # -------------------------------------------------------------------------
    # SNOW void phase
    tup = snow_partitioning(im=im, return_all=True)
    im = tup.im
    dt = tup.dt
    regions = tup.regions
    peaks = tup.peaks
    b_num = sp.amax(regions)
    # -------------------------------------------------------------------------
    # Boundary Conditions
    regions = add_boundary_regions(regions=regions, faces=boundary_faces)
    # -------------------------------------------------------------------------
    # Padding distance transform to extract geometrical properties
    f = boundary_faces
    if f is not None:
        if im.ndim == 2:
            faces = [(int('left' in f) * 3, int('right' in f) * 3),
                     (int(('front') in f) * 3 or int(('bottom') in f) * 3,
                      int(('back') in f) * 3 or int(('top') in f) * 3)]

        if im.ndim == 3:
            faces = [(int('left' in f) * 3, int('right' in f) * 3),
                     (int('front' in f) * 3, int('back' in f) * 3),
                     (int('top' in f) * 3, int('bottom' in f) * 3)]
        dt = sp.pad(dt, pad_width=faces, mode='edge')
        im = sp.pad(im, pad_width=faces, mode='edge')
    else:
        dt = dt
    regions = regions * im
    regions = make_contiguous(regions)
    # -------------------------------------------------------------------------
    # Extract void and throat information from image
    net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)
    # -------------------------------------------------------------------------
    # Extract marching cube surface area and interfacial area of regions
    if marching_cubes_area:
        areas = region_surface_areas(regions=regions)
        interface_area = region_interface_areas(regions=regions,
                                                areas=areas,
                                                voxel_size=voxel_size)
        net['pore.surface_area'] = areas * voxel_size**2
        net['throat.area'] = interface_area.area
    # -------------------------------------------------------------------------
    # Find void to void connections of boundary and internal voids
    boundary_labels = net['pore.label'] > b_num
    loc1 = net['throat.conns'][:, 0] < b_num
    loc2 = net['throat.conns'][:, 1] >= b_num
    pore_labels = net['pore.label'] <= b_num
    loc3 = net['throat.conns'][:, 0] < b_num
    loc4 = net['throat.conns'][:, 1] < b_num
    net['pore.boundary'] = boundary_labels
    net['throat.boundary'] = loc1 * loc2
    net['pore.internal'] = pore_labels
    net['throat.internal'] = loc3 * loc4
    # -------------------------------------------------------------------------
    # label boundary pore faces
    if f is not None:
        coords = net['pore.coords']
        condition = coords[net['pore.internal']]
        dic = {
            'left': 0,
            'right': 0,
            'front': 1,
            'back': 1,
            'top': 2,
            'bottom': 2
        }
        if all(coords[:, 2] == 0):
            dic['top'] = 1
            dic['bottom'] = 1
        for i in f:
            if i in ['left', 'front', 'bottom']:
                net['pore.{}'.format(i)] = (coords[:, dic[i]] < min(
                    condition[:, dic[i]]))
            elif i in ['right', 'back', 'top']:
                net['pore.{}'.format(i)] = (coords[:, dic[i]] > max(
                    condition[:, dic[i]]))

    class network_dict(dict):
        pass

    net = network_dict(net)
    net.im = im
    net.dt = dt
    net.regions = regions
    net.peaks = peaks
    return net
示例#27
0
def make_amb(Fsorg,m_up,plen,pulse,nspec=128,winname = 'boxcar'):
    """
        Make the ambiguity function dictionary that holds the lag ambiguity and
        range ambiguity. Uses a sinc function weighted by a blackman window. Currently
        only set up for an uncoded pulse.

        Args:
            Fsorg (:obj:`float`): A scalar, the original sampling frequency in Hertz.
            m_up (:obj:`int`): The upsampled ratio between the original sampling rate and the rate of
            the ambiguity function up sampling.
            plen (:obj:`int`): The length of the pulse in samples at the original sampling frequency.
            nlags (:obj:`int`): The number of lags used.

        Returns:
            Wttdict (:obj:`dict`): A dictionary with the keys 'WttAll' which is the full ambiguity function
            for each lag, 'Wtt' is the max for each lag for plotting, 'Wrange' is the
            ambiguity in the range with the lag dimension summed, 'Wlag' The ambiguity
            for the lag, 'Delay' the numpy array for the lag sampling, 'Range' the array
            for the range sampling and 'WttMatrix' for a matrix that will impart the ambiguity
            function on a pulses.
    """
    nspec = int(nspec)
    nlags = len(pulse)
    # make the sinc
    nsamps = sp.floor(8.5*m_up)
    nsamps = int(nsamps-(1-sp.mod(nsamps, 2)))
    # need to incorporate summation rule
    vol = 1.
    nvec = sp.arange(-sp.floor(nsamps/2.0), sp.floor(nsamps/2.0)+1)
    pos_windows = ['boxcar', 'triang', 'blackman', 'hamming', 'hann',
                   'bartlett', 'flattop', 'parzen', 'bohman', 'blackmanharris',
                   'nuttall', 'barthann']
    curwin = scisig.get_window(winname, nsamps)
    # Apply window to the sinc function. This will act as the impulse respons of the filter
    outsinc = curwin*sp.sinc(nvec/m_up)
    outsinc = outsinc/sp.sum(outsinc)
    dt = 1/(Fsorg*m_up)
    #make delay vector
    Delay_num = sp.arange(-(len(nvec)-1),m_up*(nlags+5))
    Delay = Delay_num*dt

    t_rng = sp.arange(0, 1.5*plen, dt)
    if len(t_rng) > 2e4:
        raise ValueError('The time array is way too large. plen should be in seconds.')
    numdiff = len(Delay)-len(outsinc)
    numback = int(nvec.min()/m_up-Delay_num.min())
    numfront = numdiff-numback
#    outsincpad  = sp.pad(outsinc,(0,numdiff),mode='constant',constant_values=(0.0,0.0))
    outsincpad  = sp.pad(outsinc,(numback, numfront), mode='constant',
                         constant_values=(0.0, 0.0))
    (d2d, srng)=sp.meshgrid(Delay, t_rng)
    # envelop function
    t_p = sp.arange(nlags)/Fsorg
    envfunc = sp.interp(sp.ravel(srng-d2d), t_p,pulse, left=0., right=0.).reshape(d2d.shape)
#    envfunc = sp.zeros(d2d.shape)
#    envfunc[(d2d-srng+plen-Delay.min()>=0)&(d2d-srng+plen-Delay.min()<=plen)]=1
    envfunc = envfunc/sp.sqrt(envfunc.sum(axis=0).max())
    #create the ambiguity function for everything
    Wtt = sp.zeros((nlags, d2d.shape[0], d2d.shape[1]))
    cursincrep = sp.tile(outsincpad[sp.newaxis, :], (len(t_rng), 1))
    Wt0 = cursincrep*envfunc
    Wt0fft = sp.fft(Wt0, axis=1)
    for ilag in sp.arange(nlags):
        cursinc = sp.roll(outsincpad, ilag*m_up)
        cursincrep = sp.tile(cursinc[sp.newaxis, :], (len(t_rng), 1))
        Wta = cursincrep*envfunc
        #do fft based convolution, probably best method given sizes
        Wtafft = scfft.fft(Wta, axis=1)

        nmove = len(nvec)-1
        Wtt[ilag] = sp.roll(scfft.ifft(Wtafft*sp.conj(Wt0fft), axis=1).real,
                            nmove, axis=1)

    # make matrix to take
    imat = sp.eye(nspec)
    tau = sp.arange(-sp.floor(nspec/2.), sp.ceil(nspec/2.))/Fsorg
    tauint = Delay
    interpmat = spinterp.interp1d(tau, imat, bounds_error=0, axis=0)(tauint)
    lagmat = sp.dot(Wtt.sum(axis=1), interpmat)
    W0 = lagmat[0].sum()
    for ilag in range(nlags):
        lagmat[ilag] = ((vol+ilag)/(vol*W0))*lagmat[ilag]

    Wttdict = {'WttAll':Wtt, 'Wtt':Wtt.max(axis=0), 'Wrange':Wtt.sum(axis=1),
               'Wlag':Wtt.sum(axis=2), 'Delay':Delay, 'Range':v_C_0*t_rng/2.0,
               'WttMatrix':lagmat}
    return Wttdict
示例#28
0
def make_amb(Fsorg, m_up, plen, pulse, nspec=128, winname='boxcar'):
    """
        Make the ambiguity function dictionary that holds the lag ambiguity and
        range ambiguity. Uses a sinc function weighted by a blackman window. Currently
        only set up for an uncoded pulse.

        Args:
            Fsorg (:obj:`float`): A scalar, the original sampling frequency in Hertz.
            m_up (:obj:`int`): The upsampled ratio between the original sampling rate and the rate of
            the ambiguity function up sampling.
            plen (:obj:`int`): The length of the pulse in samples at the original sampling frequency.
            nlags (:obj:`int`): The number of lags used.

        Returns:
            Wttdict (:obj:`dict`): A dictionary with the keys 'WttAll' which is the full ambiguity function
            for each lag, 'Wtt' is the max for each lag for plotting, 'Wrange' is the
            ambiguity in the range with the lag dimension summed, 'Wlag' The ambiguity
            for the lag, 'Delay' the numpy array for the lag sampling, 'Range' the array
            for the range sampling and 'WttMatrix' for a matrix that will impart the ambiguity
            function on a pulses.
    """
    nspec = int(nspec)
    nlags = len(pulse)
    # make the sinc
    nsamps = sp.floor(8.5 * m_up)
    nsamps = int(nsamps - (1 - sp.mod(nsamps, 2)))
    # need to incorporate summation rule
    vol = 1.
    nvec = sp.arange(-sp.floor(nsamps / 2.0), sp.floor(nsamps / 2.0) + 1)
    pos_windows = [
        'boxcar', 'triang', 'blackman', 'hamming', 'hann', 'bartlett',
        'flattop', 'parzen', 'bohman', 'blackmanharris', 'nuttall', 'barthann'
    ]
    curwin = scisig.get_window(winname, nsamps)
    # Apply window to the sinc function. This will act as the impulse respons of the filter
    outsinc = curwin * sp.sinc(nvec / m_up)
    outsinc = outsinc / sp.sum(outsinc)
    dt = 1 / (Fsorg * m_up)
    #make delay vector
    Delay_num = sp.arange(-(len(nvec) - 1), m_up * (nlags + 5))
    Delay = Delay_num * dt

    t_rng = sp.arange(0, 1.5 * plen, dt)
    if len(t_rng) > 2e4:
        raise ValueError(
            'The time array is way too large. plen should be in seconds.')
    numdiff = len(Delay) - len(outsinc)
    numback = int(nvec.min() / m_up - Delay_num.min())
    numfront = numdiff - numback
    #    outsincpad  = sp.pad(outsinc,(0,numdiff),mode='constant',constant_values=(0.0,0.0))
    outsincpad = sp.pad(outsinc, (numback, numfront),
                        mode='constant',
                        constant_values=(0.0, 0.0))
    (d2d, srng) = sp.meshgrid(Delay, t_rng)
    # envelop function
    t_p = sp.arange(nlags) / Fsorg
    envfunc = sp.interp(sp.ravel(srng - d2d), t_p, pulse, left=0.,
                        right=0.).reshape(d2d.shape)
    #    envfunc = sp.zeros(d2d.shape)
    #    envfunc[(d2d-srng+plen-Delay.min()>=0)&(d2d-srng+plen-Delay.min()<=plen)]=1
    envfunc = envfunc / sp.sqrt(envfunc.sum(axis=0).max())
    #create the ambiguity function for everything
    Wtt = sp.zeros((nlags, d2d.shape[0], d2d.shape[1]))
    cursincrep = sp.tile(outsincpad[sp.newaxis, :], (len(t_rng), 1))
    Wt0 = cursincrep * envfunc
    Wt0fft = sp.fft(Wt0, axis=1)
    for ilag in sp.arange(nlags):
        cursinc = sp.roll(outsincpad, ilag * m_up)
        cursincrep = sp.tile(cursinc[sp.newaxis, :], (len(t_rng), 1))
        Wta = cursincrep * envfunc
        #do fft based convolution, probably best method given sizes
        Wtafft = scfft.fft(Wta, axis=1)

        nmove = len(nvec) - 1
        Wtt[ilag] = sp.roll(scfft.ifft(Wtafft * sp.conj(Wt0fft), axis=1).real,
                            nmove,
                            axis=1)

    # make matrix to take
    imat = sp.eye(nspec)
    tau = sp.arange(-sp.floor(nspec / 2.), sp.ceil(nspec / 2.)) / Fsorg
    tauint = Delay
    interpmat = spinterp.interp1d(tau, imat, bounds_error=0, axis=0)(tauint)
    lagmat = sp.dot(Wtt.sum(axis=1), interpmat)
    W0 = lagmat[0].sum()
    for ilag in range(nlags):
        lagmat[ilag] = ((vol + ilag) / (vol * W0)) * lagmat[ilag]

    Wttdict = {
        'WttAll': Wtt,
        'Wtt': Wtt.max(axis=0),
        'Wrange': Wtt.sum(axis=1),
        'Wlag': Wtt.sum(axis=2),
        'Delay': Delay,
        'Range': v_C_0 * t_rng / 2.0,
        'WttMatrix': lagmat
    }
    return Wttdict
示例#29
0
def make_amb(Fsorg,m_up,plen,nlags,nspec=128,winname = 'boxcar'):
    """ Make the ambiguity function dictionary that holds the lag ambiguity and
    range ambiguity. Uses a sinc function weighted by a blackman window. Currently
    only set up for an uncoded pulse.
    Inputs:
        Fsorg: A scalar, the original sampling frequency in Hertz.
        m_up: The upsampled ratio between the original sampling rate and the rate of
        the ambiguity function up sampling.
        plen: The length of the pulse in samples at the original sampling frequency.
        nlags: The number of lags used.
    Outputs:
        Wttdict: A dictionary with the keys 'WttAll' which is the full ambiguity function
        for each lag, 'Wtt' is the max for each lag for plotting, 'Wrange' is the
        ambiguity in the range with the lag dimension summed, 'Wlag' The ambiguity
        for the lag, 'Delay' the numpy array for the lag sampling, 'Range' the array
        for the range sampling and 'WttMatrix' for a matrix that will impart the ambiguity
        function on a pulses.
    """

    # make the sinc
    nsamps = sp.floor(8.5*m_up)
    nsamps = nsamps-(1-sp.mod(nsamps,2))

    nvec = sp.arange(-sp.floor(nsamps/2.0),sp.floor(nsamps/2.0)+1)
    pos_windows = ['boxcar', 'triang', 'blackman', 'hamming', 'hann', 'bartlett', 'flattop', 'parzen', 'bohman', 'blackmanharris', 'nuttall', 'barthann']
    curwin = scisig.get_window(winname,nsamps)
    outsinc = curwin*sp.sinc(nvec/m_up)
    outsinc = outsinc/sp.sum(outsinc)
    dt = 1/(Fsorg*m_up)
    Delay = sp.arange(-(len(nvec)-1),m_up*(nlags+5))*dt
    t_rng = sp.arange(0,1.5*plen,dt)
    numdiff = len(Delay)-len(outsinc)
    outsincpad  = sp.pad(outsinc,(0,numdiff),mode='constant',constant_values=(0.0,0.0))
    (srng,d2d)=sp.meshgrid(t_rng,Delay)
    # envelop function
    envfunc = sp.zeros(d2d.shape)
    envfunc[(d2d-srng+plen-Delay.min()>=0)&(d2d-srng+plen-Delay.min()<=plen)]=1
    envfunc = envfunc/sp.sqrt(envfunc.sum(axis=0).max())
    #create the ambiguity function for everything
    Wtt = sp.zeros((nlags,d2d.shape[0],d2d.shape[1]))
    cursincrep = sp.tile(outsincpad[:,sp.newaxis],(1,d2d.shape[1]))
    Wt0 = Wta = cursincrep*envfunc
    Wt0fft = sp.fft(Wt0,axis=0)
    for ilag in sp.arange(nlags):
        cursinc = sp.roll(outsincpad,ilag*m_up)
        cursincrep = sp.tile(cursinc[:,sp.newaxis],(1,d2d.shape[1]))
        Wta = cursincrep*envfunc
        #do fft based convolution, probably best method given sizes
        Wtafft = scfft.fft(Wta,axis=0)
        if ilag==0:
            nmove = len(nvec)-1
        else:
            nmove = len(nvec)
        Wtt[ilag] = sp.roll(scfft.ifft(Wtafft*sp.conj(Wt0fft),axis=0).real,nmove,axis=0)

    # make matrix to take
#    imat = sp.eye(nspec)
#    tau = sp.arange(-sp.floor(nspec/2.),sp.ceil(nspec/2.))/Fsorg
#    tauint = Delay
#    interpmat = spinterp.interp1d(tau,imat,bounds_error=0,axis=0)(tauint)
#    lagmat = sp.dot(Wtt.sum(axis=2),interpmat)

#    # triangle window
    tau = sp.arange(-sp.floor(nspec/2.),sp.ceil(nspec/2.))/Fsorg
    amb1d = plen-tau
    amb1d[amb1d<0]=0.
    amb1d[tau<0]=0.
    amb1d=amb1d/plen
    kp = sp.argwhere(amb1d>0).flatten()
    lagmat = sp.zeros((Wtt.shape[0],nspec))
    lagmat.flat[sp.ravel_multi_index((sp.arange(Wtt.shape[0]),kp),lagmat.shape)]=amb1d[kp]
    Wttdict = {'WttAll':Wtt,'Wtt':Wtt.max(axis=0),'Wrange':Wtt.sum(axis=1),'Wlag':Wtt.sum(axis=2),
               'Delay':Delay,'Range':v_C_0*t_rng/2.0,'WttMatrix':lagmat}
    return Wttdict
示例#30
0
def mirror(im, radius):
    return sp.pad(im, ((radius, radius), (radius, radius)), mode='reflect')
示例#31
0
def fftmorphology(im, strel, mode='opening'):
    r"""
    Perform morphological operations on binary images using fft approach for
    improved performance

    Parameters
    ----------
    im : nd-array
        The binary image on which to perform the morphological operation

    strel : nd-array
        The structuring element to use.  Must have the same dims as ``im``.

    mode : string
        The type of operation to perform.  Options are 'dilation', 'erosion',
        'opening' and 'closing'.

    Returns
    -------
    image : ND-array
        A copy of the image with the specified moropholgical operation applied
        using the fft-based methods available in scipy.fftconvolve.

    Notes
    -----
    This function uses ``scipy.signal.fftconvolve`` which *can* be more than
    10x faster than the standard binary morphology operation in
    ``scipy.ndimage``.  This speed up may not always be realized, depending
    on the scipy distribution used.

    Examples
    --------
    >>> import porespy as ps
    >>> from numpy import array_equal
    >>> import scipy.ndimage as spim
    >>> from skimage.morphology import disk
    >>> im = ps.generators.blobs(shape=[100, 100], porosity=0.8)

    Check that erosion, dilation, opening, and closing are all the same as
    the ``scipy.ndimage`` functions:

    >>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='erosion')
    >>> temp = spim.binary_erosion(im, structure=disk(5))
    >>> array_equal(result, temp)
    True

    >>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='dilation')
    >>> temp = spim.binary_dilation(im, structure=disk(5))
    >>> array_equal(result, temp)
    True

    >>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='opening')
    >>> temp = spim.binary_opening(im, structure=disk(5))
    >>> array_equal(result, temp)
    True

    >>> result = ps.filters.fftmorphology(im, strel=disk(5), mode='closing')
    >>> temp = spim.binary_closing(im, structure=disk(5))
    >>> array_equal(result, temp)
    True

    """
    def erode(im, strel):
        t = fftconvolve(im, strel, mode='same') > (strel.sum() - 0.1)
        return t

    def dilate(im, strel):
        t = fftconvolve(im, strel, mode='same') > 0.1
        return t

    if im.ndim != im.squeeze().ndim:
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
                      ' unexpected behavior.')

    # Perform erosion and dilation
    # The array must be padded with 0's so it works correctly at edges
    temp = sp.pad(array=im, pad_width=1, mode='constant', constant_values=0)
    if mode.startswith('ero'):
        temp = erode(temp, strel)
    if mode.startswith('dila'):
        temp = dilate(temp, strel)

    # Remove padding from resulting image
    if im.ndim == 2:
        result = temp[1:-1, 1:-1]
    elif im.ndim == 3:
        result = temp[1:-1, 1:-1, 1:-1]

    # Perform opening and closing
    if mode.startswith('open'):
        temp = fftmorphology(im=im, strel=strel, mode='erosion')
        result = fftmorphology(im=temp, strel=strel, mode='dilation')
    if mode.startswith('clos'):
        temp = fftmorphology(im=im, strel=strel, mode='dilation')
        result = fftmorphology(im=temp, strel=strel, mode='erosion')

    return result
示例#32
0
def read_tradb(path_to_tradb_file, **kwargs):
    """
    Read tradb file to memory
    :param path_to_tradb_file:          str path to .tradb file
    :return:                            numpy array with time intervals in row 0, trai i on row i
    """
    # initialize sqlite connection
    conn = sqlite3.connect(path_to_tradb_file)
    c = conn.cursor()

    # Read some data characteristics:
    c.execute("SELECT Value FROM tr_globalinfo where Key = 'TRAI'")
    number_of_hits = int(c.fetchall()[0][0])# + 1
    print('\nread_tradb: ' + str(number_of_hits) + ' hits found in ' +
              path_to_tradb_file.split('/')[-1])
    info = pd.read_sql_query("SELECT Pretrigger, Thr, SampleRate, Samples, TR_mV "
                             "FROM view_tr_data WHERE TRAI = 1", conn)
    # Harvest data characteristics to variables:
    threshold = np.around(20*np.log10(info['Thr'][0]), decimals=1)        # in decibels
    samples_per_waveform = info['Samples'][0]
    pretrigger_samples = info['Pretrigger'][0]
    sample_rate = info['SampleRate'][0]             # in Hz
    tr_mV = info['TR_mV'][0]    # Conversion factor for blob data to mV
    print('Waveforms info' + '\n--------------'
          '\nThreshold:\t\t\t' + str(threshold) + ' dB' +
          '\nSamples per waveform:\t\t' + str(samples_per_waveform) +
          '\nPretrigger samples:\t\t' + str(pretrigger_samples) +
          '\nSample rate:\t\t\t' + str(sample_rate/10.0**6) + ' MHz\n')

    trai = sorted(kwargs.get('trai', None))
    if trai:
        if not isinstance(trai, tuple) and not isinstance(trai, list):
            trai = [trai]
        if 0 in trai:
            trai = [i for i in trai if i is not 0]
    if not trai:
        trai = range(1, number_of_hits+1)

    print('read_tradb: loading ' + str(len(trai)) + ' waveform(s) to memory.')
    t_start = time.time()

    """ Retrieval method: single query to pandas, np.frombuffer conversion """      # Promising (1M waveforms in ~10s)
    ae_waveforms = pd.read_sql("SELECT Data FROM view_tr_data WHERE TRAI > 0 AND TRAI <=" + str(max(trai)),
                               conn, coerce_float=False)

    # print np.frombuffer(data['Data'][0], dtype=np.short)
    ae_waveforms = ae_waveforms['Data'].apply(lambda x: np.frombuffer(x, dtype=np.short))
    print('operation took ' + str(round(time.time()-t_start, 3)) + ' seconds\n')

    # Make room for time data in the first row (add one row of padding)
    ae_waveforms = sci.pad(ae_waveforms, (1, 0), mode='constant')
    # Make time array and save to first row
    t = sci.array(range(0, samples_per_waveform))/float(sample_rate)*10.0**6
    t = t - sci.array(info['Pretrigger'])/float(info['SampleRate'])*10.0**6
    ae_waveforms[0] = t

    # Convert all values to mV, except the time row:
    ae_waveforms[1:] = ae_waveforms[1:]*tr_mV

    c.close()
    conn.close()
    return ae_waveforms
示例#33
0
def snow_dual(im, voxel_size=1,
              boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
              marching_cubes_area=False):

    r"""
    Extracts a dual pore and solid network from a binary image using a modified
    version of the SNOW algorithm

    Parameters
    ----------
    im : ND-array
        Binary image in the Boolean form with True’s as void phase and False’s
        as solid phase. It can process the inverted configuration of the
        boolean image as well, but output labelling of phases will be inverted
        and solid phase properties will be assigned to void phase properties
        labels which will cause confusion while performing the simulation.
    voxel_size : scalar
        The resolution of the image, expressed as the length of one side of a
        voxel, so the volume of a voxel would be **voxel_size**-cubed.  The
        default is 1, which is useful when overlaying the PNM on the original
        image since the scale of the image is alway 1 unit lenth per voxel.
    boundary_faces : list of strings
        Boundary faces labels are provided to assign hypothetical boundary
        nodes having zero resistance to transport process. For cubical
        geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
        ‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
        assigned then all six faces will be selected as boundary nodes
        automatically which can be trimmed later on based on user requirements.
    marching_cubes_area : bool
        If ``True`` then the surface area and interfacial area between regions
        will be using the marching cube algorithm. This is a more accurate
        representation of area in extracted network, but is quite slow, so
        it is ``False`` by default.  The default method simply counts voxels
        so does not correctly account for the voxelated nature of the images.

    Returns
    -------
    A dictionary containing all the void and solid phase size data, as well as
    the network topological information.  The dictionary names use the OpenPNM
    convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
    directly to an OpenPNM network object using the ``update`` command.

    References
    ----------
    [1] Gostick, J. "A versatile and efficient network extraction algorithm
    using marker-based watershed segmenation".  Phys. Rev. E 96, 023307 (2017)

    [2] Khan, ZA et al.  "Dual network extraction algorithm to investigate
    multiple transport processes in porous materials: Image-based modeling
    of pore and grain-scale processes. Computers and Chemical Engineering.
    123(6), 64-77 (2019)

    """
    # -------------------------------------------------------------------------
    # SNOW void phase
    pore_regions = snow_partitioning(im, return_all=True)
    # SNOW solid phase
    solid_regions = snow_partitioning(~im, return_all=True)
    # -------------------------------------------------------------------------
    # Combined Distance transform of two phases.
    pore_dt = pore_regions.dt
    solid_dt = solid_regions.dt
    dt = pore_dt + solid_dt
    pore_peaks = pore_regions.peaks
    solid_peaks = solid_regions.peaks
    peaks = pore_peaks + solid_peaks
    # Calculates combined void and solid regions for dual network extraction
    pore_regions = pore_regions.regions
    solid_regions = solid_regions.regions
    pore_region = pore_regions*im
    solid_region = solid_regions*~im
    solid_num = sp.amax(pore_regions)
    solid_region = solid_region + solid_num
    solid_region = solid_region * ~im
    regions = pore_region + solid_region
    b_num = sp.amax(regions)
    # -------------------------------------------------------------------------
    # Boundary Conditions
    regions = add_boundary_regions(regions=regions, faces=boundary_faces)
    # -------------------------------------------------------------------------
    # Padding distance transform to extract geometrical properties
    f = boundary_faces
    if f is not None:
        if im.ndim == 2:
            faces = [(int('left' in f)*3, int('right' in f)*3),
                     (int(('front') in f)*3 or int(('bottom') in f)*3,
                      int(('back') in f)*3 or int(('top') in f)*3)]
        if im.ndim == 3:
            faces = [(int('left' in f)*3, int('right' in f)*3),
                     (int('front' in f)*3, int('back' in f)*3),
                     (int('top' in f)*3, int('bottom' in f)*3)]
        dt = sp.pad(dt, pad_width=faces, mode='edge')
    else:
        dt = dt
    # -------------------------------------------------------------------------
    # Extract void,solid and throat information from image
    net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)
    # -------------------------------------------------------------------------
    # -------------------------------------------------------------------------
    # Extract marching cube surface area and interfacial area of regions
    if marching_cubes_area:
        areas = region_surface_areas(regions=regions)
        interface_area = region_interface_areas(regions=regions, areas=areas,
                                                voxel_size=voxel_size)
        net['pore.surface_area'] = areas * voxel_size**2
        net['throat.area'] = interface_area.area
    # -------------------------------------------------------------------------
    # Find void to void, void to solid and solid to solid throat conns
    loc1 = net['throat.conns'][:, 0] < solid_num
    loc2 = net['throat.conns'][:, 1] >= solid_num
    loc3 = net['throat.conns'][:, 1] < b_num
    pore_solid_labels = loc1 * loc2 * loc3

    loc4 = net['throat.conns'][:, 0] >= solid_num
    loc5 = net['throat.conns'][:, 0] < b_num
    solid_solid_labels = loc4 * loc2 * loc5 * loc3

    loc6 = net['throat.conns'][:, 1] < solid_num
    pore_pore_labels = loc1 * loc6

    loc7 = net['throat.conns'][:, 1] >= b_num
    boundary_throat_labels = loc5 * loc7

    solid_labels = ((net['pore.label'] > solid_num) * ~
                    (net['pore.label'] > b_num))
    boundary_labels = net['pore.label'] > b_num
    b_sa = sp.zeros(len(boundary_labels[boundary_labels == 1.0]))
    # -------------------------------------------------------------------------
    # Calculates void interfacial area that connects with solid and vice versa
    p_conns = net['throat.conns'][:, 0][pore_solid_labels]
    ps = net['throat.area'][pore_solid_labels]
    p_sa = sp.bincount(p_conns, ps)
    s_conns = net['throat.conns'][:, 1][pore_solid_labels]
    s_pa = sp.bincount(s_conns, ps)
    s_pa = sp.trim_zeros(s_pa)  # remove pore surface area labels
    p_solid_surf = sp.concatenate((p_sa, s_pa, b_sa))
    # -------------------------------------------------------------------------
    # Calculates interfacial area using marching cube method
    if marching_cubes_area:
        ps_c = net['throat.area'][pore_solid_labels]
        p_sa_c = sp.bincount(p_conns, ps_c)
        s_pa_c = sp.bincount(s_conns, ps_c)
        s_pa_c = sp.trim_zeros(s_pa_c)  # remove pore surface area labels
        p_solid_surf = sp.concatenate((p_sa_c, s_pa_c, b_sa))
    # -------------------------------------------------------------------------
    # Adding additional information of dual network
    net['pore.solid_void_area'] = (p_solid_surf * voxel_size**2)
    net['throat.void'] = pore_pore_labels
    net['throat.interconnect'] = pore_solid_labels
    net['throat.solid'] = solid_solid_labels
    net['throat.boundary'] = boundary_throat_labels
    net['pore.void'] = net['pore.label'] <= solid_num
    net['pore.solid'] = solid_labels
    net['pore.boundary'] = boundary_labels

    class network_dict(dict):
        pass
    net = network_dict(net)
    net.im = im
    net.dt = dt
    net.regions = regions
    net.peaks = peaks
    net.pore_dt = pore_dt
    net.pore_regions = pore_region
    net.pore_peaks = pore_peaks
    net.solid_dt = solid_dt
    net.solid_regions = solid_region
    net.solid_peaks = solid_peaks

    return net
示例#34
0
 def add_walls(self):
     self.image = sp.pad(self.image,
                         pad_width=1,
                         mode='constant',
                         constant_values=0)
示例#35
0
def regions_to_network(im, dt=None, voxel_size=1):
    r"""
    Analyzes an image that has been partitioned into pore regions and extracts
    the pore and throat geometry as well as network connectivity.

    Parameters
    ----------
    im : ND-array
        An image of the pore space partitioned into individual pore regions.
        Note that this image must have zeros indicating the solid phase.

    dt : ND-array
        The distance transform of the pore space.  If not given it will be
        calculated, but it can save time to provide one if available.

    voxel_size : scalar
        The resolution of the image, expressed as the length of one side of a
        voxel, so the volume of a voxel would be **voxel_size**-cubed.  The
        default is 1, which is useful when overlaying the PNM on the original
        image since the scale of the image is alway 1 unit lenth per voxel.

    Returns
    -------
    A dictionary containing all the pore and throat size data, as well as the
    network topological information.  The dictionary names use the OpenPNM
    convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
    directly to an OpenPNM network object using the ``update`` command.
    """
    print('_' * 60)
    print('Extracting pore and throat information from image')
    from skimage.morphology import disk, square, ball, cube
    if im.ndim == 2:
        cube = square
        ball = disk

#    if ~sp.any(im == 0):
#        raise Exception('The received image has no solid phase (0\'s)')

    if dt is None:
        dt = spim.distance_transform_edt(im > 0)
        dt = spim.gaussian_filter(input=dt, sigma=0.5)

    # Get 'slices' into im for each pore region
    slices = spim.find_objects(im)

    # Initialize arrays
    Ps = sp.arange(1, sp.amax(im) + 1)
    Np = sp.size(Ps)
    p_coords = sp.zeros((Np, im.ndim), dtype=float)
    p_volume = sp.zeros((Np, ), dtype=float)
    p_dia_local = sp.zeros((Np, ), dtype=float)
    p_dia_global = sp.zeros((Np, ), dtype=float)
    p_label = sp.zeros((Np, ), dtype=int)
    p_area_surf = sp.zeros((Np, ), dtype=int)
    mc_sa = sp.zeros((Np, ), dtype=int)
    t_area_mc = []
    t_conns = []
    t_dia_inscribed = []
    t_area = []
    t_perimeter = []
    t_coords = []

    # Start extracting size information for pores and throats
    for i in tqdm(Ps):
        pore = i - 1
        #        if slices[pore] is None:
        #            continue
        s = extend_slice(slices[pore], im.shape)
        sub_im = im[s]
        sub_dt = dt[s]
        pore_im = sub_im == i
        # ---------------------------------------------------------------------
        padded_mask = sp.pad(pore_im, pad_width=1, mode='constant')
        pore_dt = spim.distance_transform_edt(padded_mask)
        if padded_mask.ndim == 3:
            filter_mask = spim.convolve(padded_mask * 1.0,
                                        weights=ball(1)) / sp.sum(ball(1))
            verts, faces, norm, val = measure.marching_cubes_lewiner(
                filter_mask)
        else:
            padded_mask1 = sp.reshape(pore_im, (1, ) + pore_im.shape)
            padded_mask1 = sp.pad(padded_mask1, pad_width=1, mode='constant')
            verts, faces, norm, val = measure.marching_cubes_lewiner(
                padded_mask1)
        mc_sa[pore] = measure.mesh_surface_area(verts, faces)
        # ---------------------------------------------------------------------
        s_offset = sp.array([i.start for i in s])
        p_label[pore] = i
        p_coords[pore, :] = spim.center_of_mass(pore_im) + s_offset
        p_volume[pore] = sp.sum(pore_im)
        p_dia_local[pore] = 2 * sp.amax(pore_dt)
        p_dia_global[pore] = 2 * sp.amax(sub_dt)
        p_area_surf[pore] = sp.sum(pore_dt == 1)
        im_w_throats = spim.binary_dilation(input=pore_im, structure=ball(1))
        im_w_throats = im_w_throats * sub_im
        Pn = sp.unique(im_w_throats)[1:] - 1
        for j in Pn:
            if j > pore:
                t_conns.append([pore, j])
                vx = sp.where(im_w_throats == (j + 1))
                t_dia_inscribed.append(2 * sp.amax(sub_dt[vx]))
                t_perimeter.append(sp.sum(sub_dt[vx] < 2))
                t_area.append(sp.size(vx[0]))
                # -------------------------------------------------------------
                merged_region = im[(
                    min(slices[pore][0].start, slices[j][0].start)
                ):max(slices[pore][0].stop, slices[j][0].stop), (
                    min(slices[pore][1].start, slices[j][1].start)
                ):max(slices[pore][1].stop, slices[j][1].stop)]
                merged_region = ((merged_region == pore + 1) +
                                 (merged_region == j + 1))
                if im.ndim == 3:
                    merged_region = sp.pad(merged_region,
                                           pad_width=1,
                                           mode='constant',
                                           constant_values=0)
                    mfilter = spim.convolve(merged_region * 1.0,
                                            weights=ball(1)) / sp.sum(ball(1))
                    j_mask = im[slices[j]] == j + 1
                    j_mask = sp.pad(j_mask * 1.0,
                                    pad_width=1,
                                    mode='constant',
                                    constant_values=0)
                    jfilter = spim.convolve(j_mask, weights=ball(1)) / sp.sum(
                        ball(1))
                else:
                    merged_region = sp.reshape(merged_region,
                                               (1, ) + merged_region.shape)
                    mfilter = sp.pad(merged_region,
                                     pad_width=1,
                                     mode='constant',
                                     constant_values=0)
                    j_mask = im[slices[j]] == j + 1
                    j_mask = sp.reshape(j_mask, (1, ) + j_mask.shape)
                    jfilter = sp.pad(j_mask * 1.0,
                                     pad_width=1,
                                     mode='constant',
                                     constant_values=0)
                verts1, face1, n1, v1 = measure.marching_cubes_lewiner(mfilter)
                mc_sa_combined = measure.mesh_surface_area(verts1, face1)
                verts2, face2, n2, v2 = measure.marching_cubes_lewiner(jfilter)
                mc_sa_j = measure.mesh_surface_area(verts2, face2)
                mc_area = 0.5 * (mc_sa_j + mc_sa[pore] - mc_sa_combined)

                if mc_area < 0:
                    mc_area = 1.0
                t_area_mc.append(mc_area)
                # -------------------------------------------------------------
                t_inds = tuple([i + j for i, j in zip(vx, s_offset)])
                temp = sp.where(dt[t_inds] == sp.amax(dt[t_inds]))[0][0]
                if im.ndim == 2:
                    t_coords.append(tuple((t_inds[0][temp], t_inds[1][temp])))
                else:
                    t_coords.append(
                        tuple((t_inds[0][temp], t_inds[1][temp],
                               t_inds[2][temp])))
    # Clean up values
    Nt = len(t_dia_inscribed)  # Get number of throats
    if im.ndim == 2:  # If 2D, add 0's in 3rd dimension
        p_coords = sp.vstack((p_coords.T, sp.zeros((Np, )))).T
        t_coords = sp.vstack((sp.array(t_coords).T, sp.zeros((Nt, )))).T

    net = {}
    net['pore.all'] = sp.ones((Np, ), dtype=bool)
    net['throat.all'] = sp.ones((Nt, ), dtype=bool)
    net['pore.coords'] = sp.copy(p_coords) * voxel_size
    net['pore.centroid'] = sp.copy(p_coords) * voxel_size
    net['throat.centroid'] = sp.array(t_coords) * voxel_size
    net['throat.conns'] = sp.array(t_conns)
    net['pore.label'] = sp.array(p_label)
    net['pore.volume'] = sp.copy(p_volume) * (voxel_size**3)
    net['throat.volume'] = sp.zeros((Nt, ), dtype=float)
    net['pore.diameter'] = sp.copy(p_dia_local) * voxel_size
    net['pore.inscribed_diameter'] = sp.copy(p_dia_local) * voxel_size
    net['pore.equivalent_diameter'] = 2 * (
        (3 / 4 * net['pore.volume'] / sp.pi)**(1 / 3))
    net['pore.extended_diameter'] = sp.copy(p_dia_global) * voxel_size
    net['pore.surface_area'] = sp.copy(p_area_surf) * (voxel_size)**2
    net['pore.surface_area_mc'] = sp.copy(mc_sa) * (voxel_size)**2
    net['throat.area_mc'] = sp.array(t_area_mc) * (voxel_size**2)
    net['throat.diameter'] = sp.array(t_dia_inscribed) * voxel_size
    net['throat.inscribed_diameter'] = sp.array(t_dia_inscribed) * voxel_size
    net['throat.area'] = sp.array(t_area) * (voxel_size**2)
    net['throat.perimeter'] = sp.array(t_perimeter) * voxel_size
    net['throat.equivalent_diameter'] = ((sp.array(t_area) *
                                          (voxel_size**2))**(0.5))
    P12 = net['throat.conns']
    PT1 = (sp.sqrt(
        sp.sum(((p_coords[P12[:, 0]] - t_coords) * voxel_size)**2, axis=1)))
    PT2 = (sp.sqrt(
        sp.sum(((p_coords[P12[:, 1]] - t_coords) * voxel_size)**2, axis=1)))
    net['throat.total_length'] = PT1 + PT2
    PT1 = PT1 - p_dia_local[P12[:, 0]] / 2 * voxel_size
    PT2 = PT2 - p_dia_local[P12[:, 1]] / 2 * voxel_size
    net['throat.length'] = PT1 + PT2
    dist = (p_coords[P12[:, 0]] - p_coords[P12[:, 1]]) * voxel_size
    net['throat.direct_length'] = sp.sqrt(sp.sum(dist**2, axis=1))

    return net
示例#36
0
 def add_walls(self):
     self.image = sp.pad(self.image,
                         pad_width=1,
                         mode='constant',
                         constant_values=0)
示例#37
0
def read_tradb(path_to_tradb_file, **kwargs):
    """
    Read tradb file to memory
    :param path_to_tradb_file:          str path to .tradb file
    :return:                            numpy array with time intervals in row 0, trai i on row i
    """
    # initialize sqlite connection
    conn = sqlite3.connect(path_to_tradb_file)
    c = conn.cursor()

    # Read some data characteristics:
    c.execute("SELECT Value FROM tr_globalinfo where Key = 'TRAI'")
    number_of_hits = int(c.fetchall()[0][0])  # + 1
    print('\nread_tradb: ' + str(number_of_hits) + ' hits found in ' +
          path_to_tradb_file.split('/')[-1])
    info = pd.read_sql_query(
        "SELECT Pretrigger, Thr, SampleRate, Samples, TR_mV "
        "FROM view_tr_data WHERE TRAI = 1", conn)
    # Harvest data characteristics to variables:
    threshold = np.around(20 * np.log10(info['Thr'][0]),
                          decimals=1)  # in decibels
    samples_per_waveform = info['Samples'][0]
    pretrigger_samples = info['Pretrigger'][0]
    sample_rate = info['SampleRate'][0]  # in Hz
    tr_mV = info['TR_mV'][0]  # Conversion factor for blob data to mV
    print('Waveforms info' + '\n--------------'
          '\nThreshold:\t\t\t' + str(threshold) + ' dB' +
          '\nSamples per waveform:\t\t' + str(samples_per_waveform) +
          '\nPretrigger samples:\t\t' + str(pretrigger_samples) +
          '\nSample rate:\t\t\t' + str(sample_rate / 10.0**6) + ' MHz\n')

    trai = kwargs.get('trai', None)
    if trai:
        trai = sorted(trai)
        if not isinstance(trai, tuple) and not isinstance(trai, list):
            trai = [trai]
        if 0 in trai:
            trai = [i for i in trai if i is not 0]
    if not trai:
        trai = range(1, number_of_hits + 1)

    print('read_tradb: loading ' + str(len(trai)) + ' waveform(s) to memory.')
    t_start = time.time()
    """ Retrieval method: single query to pandas, np.frombuffer conversion """  # Promising (1M waveforms in ~10s)
    ae_waveforms = pd.read_sql(
        "SELECT Data FROM view_tr_data WHERE TRAI > 0 AND TRAI <=" +
        str(max(trai)),
        conn,
        coerce_float=False)

    # print np.frombuffer(data['Data'][0], dtype=np.short)
    ae_waveforms = ae_waveforms['Data'].apply(
        lambda x: np.frombuffer(x, dtype=np.short))
    print('operation took ' + str(round(time.time() - t_start, 3)) +
          ' seconds\n')

    # Make room for time data in the first row (add one row of padding)
    ae_waveforms = sci.pad(ae_waveforms, (1, 0), mode='constant')
    # Make time array and save to first row
    t = sci.array(range(0,
                        samples_per_waveform)) / float(sample_rate) * 10.0**6
    t = t - sci.array(info['Pretrigger']) / float(info['SampleRate']) * 10.0**6
    ae_waveforms[0] = t

    # Convert all values to mV, except the time row:
    ae_waveforms[1:] = ae_waveforms[1:] * tr_mV

    c.close()
    conn.close()
    return ae_waveforms
示例#38
0
def porosimetry(im, sizes=25, inlets=None, access_limited=True, mode='hybrid'):
    r"""
    Performs a porosimetry simulution on the image

    Parameters
    ----------
    im : ND-array
        An ND image of the porous material containing True values in the
        pore space.

    sizes : array_like or scalar
        The sizes to invade.  If a list of values of provided they are used
        directly.  If a scalar is provided then that number of points spanning
        the min and max of the distance transform are used.

    inlets : ND-array, boolean
        A boolean mask with True values indicating where the invasion
        enters the image.  By default all faces are considered inlets,
        akin to a mercury porosimetry experiment.  Users can also apply
        solid boundaries to their image externally before passing it in,
        allowing for complex inlets like circular openings, etc.  This argument
        is only used if ``access_limited`` is ``True``.

    access_limited : Boolean
        This flag indicates if the intrusion should only occur from the
        surfaces (``access_limited`` is True, which is the default), or
        if the invading phase should be allowed to appear in the core of
        the image.  The former simulates experimental tools like mercury
        intrusion porosimetry, while the latter is useful for comparison
        to gauge the extent of shielding effects in the sample.

    mode : string
        Controls with method is used to compute the result.  Options are:

        'hybrid' - (default) Performs a distance tranform of the void space,
        thresholds to find voxels larger than ``sizes[i]``, trims the resulting
        mask if ``access_limitations`` is ``True``, then dilates it using the
        efficient fft-method to obtain the non-wetting fluid configuration.

        'dt' - Same as 'hybrid', except uses a second distance transform,
        relative to the thresholded mask, to find the invading fluid
        configuration.  The choice of 'dt' or 'hybrid' depends on speed, which
        is system and installation specific.

        'mio' - Using a single morphological image opening step to obtain the
        invading fluid confirguration directly, *then* trims if
        ``access_limitations`` is ``True``.  This method is not ideal and is
        included mostly for comparison purposes.  The morphological operations
        are done using fft-based method implementations.

    Returns
    -------
    image : ND-array
        A copy of ``im`` with voxel values indicating the sphere radius at
        which it becomes accessible from the inlets.  This image can be used
        to find invading fluid configurations as a function of applied
        capillary pressure by applying a boolean comparison:
        ``inv_phase = im > r`` where ``r`` is the radius (in voxels) of the
        invading sphere.  Of course, ``r`` can be converted to capillary
        pressure using your favorite model.

    See Also
    --------
    fftmorphology

    """
    def trim_blobs(im, inlets):
        temp = sp.zeros_like(im)
        temp[inlets] = True
        labels, N = spim.label(im + temp)
        im = im ^ (clear_border(labels=labels) > 0)
        return im

    dt = spim.distance_transform_edt(im > 0)

    if inlets is None:
        inlets = get_border(im.shape, mode='faces')
    inlets = sp.where(inlets)

    if isinstance(sizes, int):
        sizes = sp.logspace(start=sp.log10(sp.amax(dt)), stop=0, num=sizes)
    else:
        sizes = sp.sort(a=sizes)[-1::-1]

    if im.ndim == 2:
        strel = ps_disk
    else:
        strel = ps_ball

    imresults = sp.zeros(sp.shape(im))
    if mode == 'mio':
        pw = int(sp.floor(dt.max()))
        impad = sp.pad(im, mode='symmetric', pad_width=pw)
        imresults = sp.zeros(sp.shape(impad))
        for r in tqdm(sizes):
            imtemp = fftmorphology(impad, strel(r), mode='opening')
            if access_limited:
                imtemp = trim_blobs(imtemp, inlets)
            if sp.any(imtemp):
                imresults[(imresults == 0) * imtemp] = r
        if im.ndim == 2:
            imresults = imresults[pw:-pw, pw:-pw]
        else:
            imresults = imresults[pw:-pw, pw:-pw, pw:-pw]
    elif mode == 'dt':
        for r in tqdm(sizes):
            imtemp = dt >= r
            if access_limited:
                imtemp = trim_blobs(imtemp, inlets)
            if sp.any(imtemp):
                imtemp = spim.distance_transform_edt(~imtemp) < r
                imresults[(imresults == 0) * imtemp] = r
    elif mode == 'hybrid':
        for r in tqdm(sizes):
            imtemp = dt >= r
            if access_limited:
                imtemp = trim_blobs(imtemp, inlets)
            if sp.any(imtemp):
                imtemp = fftconvolve(imtemp, strel(r), mode='same') > 0.0001
                imresults[(imresults == 0) * imtemp] = r
    else:
        raise Exception('Unreckognized mode ' + mode)
    return imresults
示例#39
0
def apply_chords(im, spacing=1, axis=0, trim_edges=True, label=False):
    r"""
    Adds chords to the void space in the specified direction.  The chords are
    separated by 1 voxel plus the provided spacing.

    Parameters
    ----------
    im : ND-array
        An image of the porous material with void marked as ``True``.

    spacing : int
        Separation between chords.  The default is 1 voxel.  This can be
        decreased to 0, meaning that the chords all touch each other, which
        automatically sets to the ``label`` argument to ``True``.

    axis : int (default = 0)
        The axis along which the chords are drawn.

    trim_edges : bool (default = ``True``)
        Whether or not to remove chords that touch the edges of the image.
        These chords are artifically shortened, so skew the chord length
        distribution.

    label : bool (default is ``False``)
        If ``True`` the chords in the returned image are each given a unique
        label, such that all voxels lying on the same chord have the same
        value.  This is automatically set to ``True`` if spacing is 0, but is
        ``False`` otherwise.

    Returns
    -------
    image : ND-array
        A copy of ``im`` with non-zero values indicating the chords.

    See Also
    --------
    apply_chords_3D

    """
    if spacing < 0:
        raise Exception('Spacing cannot be less than 0')
    if spacing == 0:
        label = True
    result = sp.zeros(im.shape, dtype=int)  # Will receive chords at end
    slxyz = [slice(None, None, spacing * (axis != i) + 1) for i in [0, 1, 2]]
    slices = tuple(slxyz[:im.ndim])
    s = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]  # Straight-line structuring element
    if im.ndim == 3:  # Make structuring element 3D if necessary
        s = sp.pad(sp.atleast_3d(s),
                   pad_width=((0, 0), (0, 0), (1, 1)),
                   mode='constant',
                   constant_values=0)
    im = im[slices]
    s = sp.swapaxes(s, 0, axis)
    chords = spim.label(im, structure=s)[0]
    if trim_edges:  # Label on border chords will be set to 0
        chords = clear_border(chords)
    result[slices] = chords  # Place chords into empty image created at top
    if label is False:  # Remove label if not requested
        result = result > 0
    return result