コード例 #1
0
ファイル: utils.py プロジェクト: creare-com/pydem
def get_distance(region, src):
    """
    Compute within-region distances from the src pixels.

    Parameters
    ----------
    region : np.ndarray(shape=(m, n), dtype=bool)
        mask of the region
    src : np.ndarray(shape=(m, n), dtype=bool)
        mask of the source pixels to compute distances from.

    Returns
    -------
    d : np.ndarray(shape=(m, n), dtype=float)
        approximate within-region distance from the nearest src pixel;
        (distances outside of the region are arbitrary).
    """

    dmax = float(region.size)
    d = np.full(region.shape, dmax)
    d[src] = 0
    for n in range(region.size):
        d_orth = minimum_filter(d, footprint=_ORTH2) + 1
        d_diag = minimum_filter(d, (3, 3)) + _SQRT2
        d_adj = np.minimum(d_orth[region], d_diag[region])
        d[region] = np.minimum(d_adj, d[region])
        if (d[region] < dmax).all():
            break
    return d
コード例 #2
0
ファイル: cerena_grid_utils.py プロジェクト: armatita/GEOMS2
def order_statistics_filter(grid,window_size=(3,3,3),statistics_type='median',rank=1):
    filtered = grid.copy()
    if statistics_type=='minimum':
        scifilt.minimum_filter(grid,window_size,None,filtered, mode='nearest')
    elif statistics_type=='maximum':
        scifilt.maximum_filter(grid,window_size,None,filtered, mode='nearest')
    elif statistics_type=='median':
        scifilt.median_filter(grid,window_size,None,filtered, mode='nearest')
    elif statistics_type[:-2]=='percentile' or statistics_type[:-2]=='per':
        per = np.int(statistics_type[-2:])
        scifilt.percentile_filter(grid,per,window_size,None,filtered, mode='nearest')
    elif statistics_type=='rank':
        scifilt.rank_filter(grid,rank,window_size,None,filtered, mode='nearest')
    return filtered
        
    return filtered
コード例 #3
0
def fit_GLC_grid( subjdata, z_limit=ZLIMIT ):
    #thesedata=subjdata
    dims = len(subjdata[0])-1
    
    bounds = [(.00001,50)] + [(-25,25) for _ in range( dims+1 ) ]
    optargs = ( subjdata, z_limit, None, True)
    
    xopt, fopt, grid, Jout = optimize.brute(func=negloglike_reduced 
                                              , ranges = bounds
                                              , args = optargs
                                              , Ns = 5
                                              , full_output=True
                                             )
    
    from scipy.ndimage.filters import minimum_filter
    from scipy.ndimage.morphology import generate_binary_structure
    
    neighborhood = generate_binary_structure( dims+2, dims+2 )
    local_mins = minimum_filter( Jout, footprint=neighborhood ) == Jout
    min_coords = np.array([ g[local_mins] for g in grid ]).T
    xoptglobal = xopt
    foptglobal = fopt
    for coords in min_coords:
        xopt, fopt, iter, im, sm = optimize.fmin(func=negloglike_reduced 
                                                  , x0 = coords
                                                  , args = optargs
                                                  , full_output=True
                                                 )
        if fopt < foptglobal:
            xoptglobal = xopt
            foptglobal = fopt
    
    return xoptglobal, foptglobal
コード例 #4
0
def correct_for_atmo(raw_data, gas_tar, gas_ref, minimum_filter=False, plot_result=True):
    """ estimate the atmospheric constant and removes it from 
    the target gas value """
    atmo_constant = estimate_atmospheric_constant(raw_data, gas_tar,
                                                  reference=gas_ref)
    corrected_data = raw_data.copy()
    
    if minimum_filter:
        # TODO remove trend
        from scipy.ndimage.filters import  minimum_filter
        all_max = max(raw_data[gas_tar].values) - raw_data[gas_tar].mean()
        gas_median_filt = pad.Series(minimum_filter(raw_data[gas_tar].values,
                                                  size=(100,)),
                                    index=raw_data.index)
        corrected_data[gas_tar] = raw_data[gas_tar] - gas_median_filt
        corrected_data[gas_tar] = pad.Series([min(all_max, max(i, 0)) for i in corrected_data[gas_tar].values],
                                   index=raw_data.index)

    # Replace value of target gas by the corrected value
    else:
        corrected_data[gas_tar] = pad.Series(raw_data[gas_tar] - atmo_constant,
                                     index=raw_data.index)
    if plot_result:
        plt.figure(figsize=(12,8))
        raw_data[gas_tar].plot()
        corrected_data[gas_tar].plot(style='r')
        plt.legend(['%s measured' %  gas_tar,
                    '%s corrected for atmospheric constant' %  gas_tar])
        plt.ylabel('%s ppm' %  gas_tar)
    return corrected_data
コード例 #5
0
ファイル: Blob.py プロジェクト: theandygross/Luc
def detect_peaks_local_max(image, radius=10, threshold=25, ax=None):
    '''
    http://stackoverflow.com/questions/9111711/get-coordinates-of-local-maxima-in-2d-array-
    above-certain-value
    '''
    import scipy.ndimage as ndimage
    import scipy.ndimage.filters as filters
    
    neighborhood_size = radius
    foldchange = 1.5
    
    data_max = filters.maximum_filter(image, neighborhood_size)
    maxima = (image == data_max)
    data_min = filters.minimum_filter(image, neighborhood_size)
    fc = ((data_max - data_min)/data_min > foldchange)
    maxima[fc == 0] = 0
    diff = ((data_max - data_min) > threshold)
    maxima[diff == 0] = 0
    
    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    
    y,x = np.where(labeled > 0 )
    peaks = map(tuple, np.array([x,y]).T)
    
    if ax is not None:     
        ax.imshow(image)    
        ax.autoscale(False)
        ax.plot(x,y, 'ro')
        
    return peaks
コード例 #6
0
ファイル: dehaze.py プロジェクト: LouisK130/oii
def dark_channel(image, structure=square(15)):
    (h, w, c) = image.shape
    dark_channels = np.zeros_like(image)
    for i in range(c):
        dark_channels[:, :, i] = minimum_filter(image[:, :, i], footprint=structure)
    dark_channel = np.min(dark_channels, axis=2)
    return dark_channel
コード例 #7
0
ファイル: find_peaks.py プロジェクト: rhambach/TEMimage
  def find_local_maxima(self, data, neighborhood_size):
    """ 
     find local maxima within neighborhood 
      idea from http://stackoverflow.com/questions/9111711
      (get-coordinates-of-local-maxima-in-2d-array-above-certain-value)
    """

    # find local maxima in image (width specified by neighborhood_size)
    data_max = filters.maximum_filter(data,neighborhood_size);
    maxima   = (data == data_max);
    assert np.sum(maxima) > 0;        # we should always find local maxima
  
    # remove connected pixels (plateaus)
    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    maxima *= 0;
    for dx,dy in slices:
      maxima[(dx.start+dx.stop-1)/2, (dy.start+dy.stop-1)/2] = 1

    # calculate difference between local maxima and lowest 
    # pixel in neighborhood (will be used in select_local_maxima)
    data_min = filters.minimum_filter(data,neighborhood_size);
    diff     = data_max - data_min;
    self._maxima = maxima;
    self._diff   = diff;

    return maxima,diff
コード例 #8
0
ファイル: image.py プロジェクト: ShimonaNiharika/MedIA
def local_minima(img, min_distance = 4):
    r"""
    Returns all local minima from an image.
    
    Parameters
    ----------
    img : array_like
        The image.
    min_distance : integer
        The minimal distance between the minimas in voxels. If it is less, only the lower minima is returned.
    
    Returns
    -------
    indices : sequence
        List of all minima indices.
    values : sequence
        List of all minima values.
    """
    # @TODO: Write a unittest for this.
    fits = numpy.asarray(img)
    minfits = minimum_filter(fits, size=min_distance) # default mode is reflect
    minima_mask = fits == minfits
    good_indices = numpy.transpose(minima_mask.nonzero())
    good_fits = fits[minima_mask]
    order = good_fits.argsort()
    return good_indices[order], good_fits[order]
コード例 #9
0
ファイル: SOMTools.py プロジェクト: jgabriellima/SOM
def getSaddlePoints(matrix, gaussian_filter_sigma=0., low=None, high=None):
    if low == None:
        low = matrix.min()
    if high == None:
        high = matrix.max()
    matrix = expandMatrix(matrix)
    neighborhood = morphology.generate_binary_structure(len(matrix.shape),2)
    # apply the local minimum filter; all locations of minimum value
    # in their neighborhood are set to 1
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter
    matrix = filters.minimum_filter(matrix, footprint=neighborhood)
    matrix = condenseMatrix(matrix)
    outPath, clusterPathMat, grad = minPath(matrix)
    flood = numpy.asarray(outPath)
    potential = []
    for e in flood:
        i,j = e
        potential.append(matrix[i,j])
    potential = numpy.asarray(potential)
    potential = scipy.ndimage.filters.gaussian_filter(potential, gaussian_filter_sigma)
    derivative = lambda x: numpy.array(zip(-x,x[1:])).sum(axis=1)
    signproduct = lambda x: numpy.array(zip(x,x[1:])).prod(axis=1)
    potential_prime = derivative(potential)
    signproducts = numpy.sign(signproduct(potential_prime))
    extrema = flood[2:][numpy.where(signproducts<0)[0],:]
    bassinlimits = derivative(signproducts)
    saddlePoints = numpy.asarray(outPath[3:])[bassinlimits==-2]
    saddlePointValues = numpy.asarray(map(lambda x: matrix[x[0],x[1]], saddlePoints))
    saddlePoints = saddlePoints[numpy.logical_and(saddlePointValues>=low, saddlePointValues<=high),:]
    return saddlePoints
コード例 #10
0
ファイル: joestitch.py プロジェクト: LouisK130/oii
def stitch(targets,images):
    mask = rois_mask(targets) # True where image data is
    gaps_mask = mask==False # True where infill needs to go
    # compute bounds relative to the camera field
    (x,y,w,h) = stitched_box(targets)
    uroi = img_as_float(stitch_raw(targets,images,(x,y,w,h))) # stitch with black infill

    # step 1: sparsely sample background mostly ignoring blob
    # compute gradient on both axes
    k = [[-3,-1,0,1,3],
         [-3,-1,0,1,3],
         [-3,-1,0,1,3],
         [-3,-1,0,1,3]]
    gy = convolve(uroi,k)
    gx = convolve(uroi,np.rot90(k))
    # ignore all but low-gradient areas
    bg = (abs(gy+gx) < 0.2) & mask

    # step 2: remove less contiguous areas
    filter_size = max(2,int(max(h,w)/200))
    mf = minimum_filter(bg*1,filter_size)

    # step 3: interpolate between samples
    z = inpaint(uroi*mf,mf==False)

    # step 4: subsample and re-interpolate to degrade artifacts in fill region
    random = RandomState(0)
    (h,w)=z.shape
    ng = random.rand(h,w) < 0.01
    z2 = inpaint(z*ng,ng==False)

    # step 5: final composite
    roi = (z2 * gaps_mask) + uroi
    return (roi * 255).astype(np.uint8), mask
コード例 #11
0
ファイル: utils.py プロジェクト: markmuetz/stormtracks
def find_extrema(array):
    '''
    Takes an array and finds its local extrema.

    Returns an array with 0s for not an extrema, 1s for maxs and -1 for mins
    and a list of the indices of all maximums and minimums

    N.B. this function is much faster than the above.
    '''
    extrema = np.zeros_like(array)
    maximums = []
    minimums = []

    local_max = maximum_filter(array, size=(3, 3)) == array
    local_min = minimum_filter(array, size=(3, 3)) == array
    extrema += local_max
    extrema -= local_min

    where_max = np.where(local_max)
    where_min = np.where(local_min)

    for max_point in zip(where_max[0], where_max[1]):
        if (max_point[0] != 0 and max_point[0] != array.shape[0] - 1 and
            max_point[1] != 0 and max_point[1] != array.shape[1] - 1):
            maximums.append(max_point)

    for min_point in zip(where_min[0], where_min[1]):
        if (min_point[0] != 0 and min_point[0] != array.shape[0] - 1 and
            min_point[1] != 0 and min_point[1] != array.shape[1] - 1):
            minimums.append(min_point)

    return extrema, maximums, minimums
コード例 #12
0
ファイル: max_flt_gdal.py プロジェクト: anvlason/dtm_py
def detect_local_minima(arr):
    # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710
    """
    Takes an array and detects the troughs using the local maximum filter.
    Returns a boolean mask of the troughs (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    # define an connected neighborhood
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure
    neighborhood = ndimage.morphology.generate_binary_structure(len(arr.shape),2)
    # apply the local minimum filter; all locations of minimum value 
    # in their neighborhood are set to 1
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter
    local_min = (filters.minimum_filter(arr, footprint=neighborhood)==arr)
    # local_min is a mask that contains the peaks we are 
    # looking for, but also the background.
    # In order to isolate the peaks we must remove the background from the mask.
    # 
    # we create the mask of the background
    background = (arr==0)
    # 
    # a little technicality: we must erode the background in order to 
    # successfully subtract it from local_min, otherwise a line will 
    # appear along the background border (artifact of the local minimum filter)
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion
    eroded_background = ndimage.morphology.binary_erosion(
        background, structure=neighborhood, border_value=1)
    # 
    # we obtain the final mask, containing only peaks, 
    # by removing the background from the local_min mask
    detected_minima = local_min - eroded_background
    return np.where(detected_minima)       
コード例 #13
0
ファイル: cones_backup.py プロジェクト: rjonnal/cones
def local_normalize(gim,kernel_size):
    maxf = filters.maximum_filter(gim,kernel_size)
    minf = filters.minimum_filter(gim,kernel_size)
    num = gim-minf
    den = maxf-minf
    den[np.where(den==0)]=1.0
    return num/den
コード例 #14
0
def find_objects(data, size=5, thresh=None, N=20, verbosity=1):

  # maximum step height in neighborhood of size 'size'
  data_max = filters.maximum_filter(data, size)
  data_min = filters.minimum_filter(data, size)
  diff = data_max - data_min;

  # determine threshold which gives N pixels
  if thresh is None:  thresh=np.sort(diff.flat)[-N];

  # create mask for image (maximum with high step size)
  maxima = (data == data_max)
  maxima[diff <= thresh] = False
  
  # find connected objects (pixel agglomerates)
  labeled, num_objects = ndimage.label(maxima)
  slices = ndimage.find_objects(labeled)

  # DEBUG
  if verbosity>2:
    plt.figure()
    plt.imshow(np.log(1+np.abs(data)),interpolation='nearest');
    # create overlay with red color (r,g,b,alpha) at each image point 
    overlay = np.zeros(data.shape+(4,));
    overlay[maxima] = (1,1,0,1); # set opacity according to maxima
    plt.imshow(overlay,interpolation='nearest');

  return slices;
コード例 #15
0
ファイル: test.py プロジェクト: Rhoana/TreeFusion
def build_tree(im):
    # im = boundary probability

    print "QS"
    segs = quickshift(im, sigma=2.0, return_tree=False, convert2lab=False, max_dist=10)
    orig_segs = segs.copy()
    print "done"

    # Find all neighboring regions and the values between them
    plus = np.array([
        [0, 1, 0],
        [1, 1, 1],
        [0, 1, 0]]).astype(np.bool)
    lo = minimum_filter(segs, footprint=plus).astype(np.int32)
    hi = maximum_filter(segs, footprint=plus).astype(np.int32)

    counter = ValueCountInt64()
    counter.add_values_pair32(lo.ravel(), hi.ravel())
    loc, hic, edge_counts = counter.get_counts_pair32()
    weighter = WeightedCountInt64()
    weighter.add_values_pair32(lo.ravel(), hi.ravel(), im.astype(np.float32).ravel())
    low, hiw, edge_weights = weighter.get_weights_pair32()

    max_regions = 2 * segs.max() + 1 #  number of regions is max() + 1
    counts = np.zeros((max_regions, max_regions), dtype=np.uint64)
    weights = np.zeros((max_regions, max_regions), dtype=float)
    counts[loc, hic] = edge_counts
    weights[low, hiw] = edge_weights
    # zero diagonal
    counts[np.arange(max_regions), np.arange(max_regions)] = 0

    next_region = segs.max() + 1
    parents = {}
    # set up heap
    heap = [(weights[l, h] / counts[l, h], l, h) for l, h in zip(*np.nonzero(counts))]
    heapify(heap)

    # successively merge regions
    while heap:
        w, lo, hi = heappop(heap)
        if (lo in parents) or (hi in parents):
            continue
        print next_region, max_regions
        parents[lo] = next_region
        parents[hi] = next_region
        counts[next_region, :] = counts[lo, :] + counts[hi, :]
        weights[next_region, :] = weights[lo, :] + weights[hi, :]
        counts[:, next_region] = counts[next_region, :]
        weights[:, next_region] = weights[next_region, :]
        for idx in range(next_region):
            if idx in parents:
                continue
            if counts[idx, next_region] > 0 and (idx not in parents):
                heappush(heap, (weights[idx, next_region] / counts[idx, next_region], idx, next_region))
        segs[segs == lo] = next_region
        segs[segs == hi] = next_region
        next_region += 1
    print "done"
    return orig_segs, parents
コード例 #16
0
def preprocess(image):
    npimg = getImageAsNumpy(image)
    npimg = filters.minimum_filter(npimg,3)
    npimg = invertBackground(npimg)
    npimg = erode(npimg, 2)
    npimg = dilate(npimg, 1)
    npimg = greyThreshold(npimg, 200)
    return npimg
コード例 #17
0
def detect_local_minima(arr):
    neighborhood = morphology.generate_binary_structure(len(arr.shape), 2)
    local_min = (filters.minimum_filter(arr, footprint=neighborhood) == arr)
    background = (arr == 0)
    eroded_background = morphology.binary_erosion(
            background, structure=neighborhood, border_value=1)
    detected_minima = local_min - eroded_background
    return np.where(detected_minima)
コード例 #18
0
def image_reconstruct(uv, scores, find_max_min = True, grasped_position_index = None, initial_grasp_position_index = None):
    bottom_right = map(max, zip(*uv))
    top_left =  map(min, zip(*uv))

    # print 'bottom_right', bottom_right
    # print 'top_left', top_left

    image = np.empty((((bottom_right[1] - top_left[1])/3+1), (bottom_right[0] - top_left[0])/3+1))
    image[:] = np.NAN
    for s,(u,v) in zip(scores,uv):
        u, v = ((u - top_left[0])/3, (v - top_left[1])/3)
        image[v,u] = s
    
    if find_max_min:
        neighborhood_size = 10
        threshold = 0.0 #0025
        image = ndimage.gaussian_filter(image, sigma = 2, mode='constant')
        data_max = maximum_filter(image, neighborhood_size)
        maxima = (image == data_max)
        data_min = minimum_filter(image, neighborhood_size)
        diff = ((data_max - data_min) > threshold)
        maxima[diff == 0] = 0

        labeled, num_objects = ndimage.label(maxima)
        slices = ndimage.find_objects(labeled)
        x, y = [], []
        for dy,dx in slices:
            x_center = (dx.start + dx.stop - 1)/2
            x.append(x_center)
            y_center = (dy.start + dy.stop - 1)/2    
            y.append(y_center)

        # neighborhood = generate_binary_structure(2,2)
        # local_max = maximum_filter(image, footprint=neighborhood)==image
        # print local_max
        # background = (image ==0)
        # eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
        # detected_peaks = local_max - eroded_background


        values = image[y,x]
        zipped = zip(values,x,y)
        zipped.sort(key = lambda t: t[0])
        unzipped = [list(t) for t in zip(*zipped[-4:])]

        # return image, unzipped[1], unzipped[2]

    if grasped_position_index is not None:
        x_grasped, y_grasped = uv[grasped_position_index]
        x_grasped, y_grasped = ((x_grasped - top_left[0])/3, (y_grasped - top_left[1])/3)
        # print x_grasped, y_grasped

        x_initial, y_initial = uv[initial_grasp_position_index] 
        x_initial, y_initial = ((x_initial - top_left[0])/3, (y_initial - top_left[1])/3)
        # print x_initial, y_initial
        return image, unzipped[1], unzipped[2], x_grasped, y_grasped, x_initial, y_initial, unzipped[0]

    return image, unzipped[1], unzipped[2], unzipped[0]
コード例 #19
0
def extracts_minima_areas(arr):
    neighborhood = morphology.generate_binary_structure(len(arr.shape),2)
    local_min = (filters.minimum_filter(arr, footprint=neighborhood)==arr)
    labels = measurements.label(local_min)[0]
    objects = measurements.find_objects(labels)
    areas_and_indices_and_bounding_boxes = []
    for idx, sl in enumerate(objects):
        areas_and_indices_and_bounding_boxes.append((len(arr[sl][labels[sl] == idx + 1]), idx + 1, sl)) # first area, then index, then bounding box
    return sorted(areas_and_indices_and_bounding_boxes), labels
コード例 #20
0
def extrema(mat,mode='wrap',window=10):
    """find the indices of local extrema (min and max)
    in the input array."""
    mn = minimum_filter(mat, size=window, mode=mode)
    mx = maximum_filter(mat, size=window, mode=mode)
    # (mat == mx) true if pixel is equal to the local max
    # (mat == mn) true if pixel is equal to the local in
    # Return the indices of the maxima, minima
    return np.nonzero(mat == mn), np.nonzero(mat == mx)
コード例 #21
0
 def _findObject(self, img):
     '''
     Create a bounding box around the object within an image
     '''
     from imgProcessor.imgSignal import signalMinimum
     # img is scaled already
     i = img > signalMinimum(img)  # img.max()/2.5
     # filter noise, single-time-effects etc. from mask:
     i = minimum_filter(i, 4)
     return boundingBox(i)
コード例 #22
0
ファイル: Fingerprint.py プロジェクト: Amarang/navsa
    def findPeaks(self):

        Pxx_max = filters.maximum_filter(self.Pxx, self.NEIGHBORHOOD_SIZE)
        maxima = (self.Pxx == Pxx_max)
        Pxx_min = filters.minimum_filter(self.Pxx, self.NEIGHBORHOOD_SIZE)
        diff = ((Pxx_max - Pxx_min) > self.THRESHOLD)
        maxima[diff == 0] = 0
        labeled, num_objects = ndimage.label(maxima)
        self.xy = np.array(ndimage.center_of_mass(self.Pxx, labeled, range(1, num_objects+1)))
        self.tpeaks, self.fpeaks = self.times[self.xy[:,1].astype('int')], self.freqs[self.xy[:,0].astype('int')]
コード例 #23
0
ファイル: blob.py プロジェクト: TheLaueLab/blob
def localMinima(data, threshold):
    from numpy import ones, nonzero, transpose

    if threshold is not None:
        peaks = data < threshold
    else:
        peaks = ones(data.shape, dtype=data.dtype)

    peaks &= data == minimum_filter(data, size=(3,) * data.ndim)
    return transpose(nonzero(peaks))
コード例 #24
0
    def find_cells(self):

        cell_object_list = []

        data = scipy.array(self.image)
        data.reshape(self.image.height, self.image.width, self.image.channels)

        data_max = filters.maximum_filter(data, MAXIMA_ALG_NEIGHBORHOOD_SIZE)
        maxima = (data == data_max)
        data_min = filters.minimum_filter(data, MAXIMA_ALG_NEIGHBORHOOD_SIZE)
        diff = ((data_max - data_min) > MAXIMA_ALG_THRESHOLD)
        maxima[diff == 0] = 0

        labeled, num_objects = ndimage.label(maxima)
        slices = ndimage.find_objects(labeled)
        x, y = [], []

        for dy, dx, crap in slices:
            x_center = (dx.start + dx.stop - 1)/2
            x.append(x_center)
            y_center = (dy.start + dy.stop - 1)/2
            y.append(y_center)

            cell = cell_analyzer_objects.CellAnalyzerObject()
            point = cell_analyzer_objects.CellAnalyzerPoint(y_center, x_center)
            cell.add_point(point)

            #HACK HACK HACK: this is to avoid a division by zero!
            #point = cell_analyzer_objects.CellAnalyzerPoint(y_center + 1, x_center)
            #cell.add_point(point)
            #point = cell_analyzer_objects.CellAnalyzerPoint(y_center - 1, x_center)
            #cell.add_point(point)
            #point = cell_analyzer_objects.CellAnalyzerPoint(y_center, x_center + 1)
            #cell.add_point(point)
            #point = cell_analyzer_objects.CellAnalyzerPoint(y_center, x_center - 1)
            #cell.add_point(point)

            cell_object_list.append(cell)


        #plt.imshow(data)
        #plt.savefig('/tmp/data.png', bbox_inches = 'tight')

        #plt.autoscale(False)
        #plt.plot(x,y, 'ro')
        #plt.savefig('/tmp/result.png', bbox_inches = 'tight')

        #create dummy cell objects with only one pixel
        #for x_element in x:
            #print x

        return cell_object_list


        
コード例 #25
0
    def addImg(self, img, maxShear=0.015, maxRot=100, minMatches=12,
               borderWidth=3):  # borderWidth=100
        """
        Args:
            img (path or array): image containing the same object as in the reference image
        Kwargs:
            maxShear (float): In order to define a good fit, refect higher shear values between
                              this and the reference image
            maxRot (float): Same for rotation
            minMatches (int): Minimum of mating points found in both, this and the reference image
        """
        try:
            fit, img, H, H_inv, nmatched = self._fitImg(img)
        except Exception as e:
            print(e)
            return

        # CHECK WHETHER FIT IS GOOD ENOUGH:
        (translation, rotation, scale, shear) = decompHomography(H)
        print('Homography ...\n\ttranslation: %s\n\trotation: %s\n\tscale: %s\n\tshear: %s'
              % (translation, rotation, scale, shear))
        if (nmatched > minMatches
                and abs(shear) < maxShear
                and abs(rotation) < maxRot):
            print('==> img added')
            # HOMOGRAPHY:
            self.Hs.append(H)
            # INVERSE HOMOGRSAPHY
            self.Hinvs.append(H_inv)
            # IMAGES WARPED TO THE BASE IMAGE
            self.fits.append(fit)
            # ADD IMAGE TO THE INITIAL flatField ARRAY:
            i = img > self.signal_ranges[-1][0]

            # remove borders (that might have erroneous light):
            i = minimum_filter(i, borderWidth)

            self._ff_mma.update(img, i)

            # create fit img mask:
            mask = fit < self.signal_ranges[-1][0]
            mask = maximum_filter(mask, borderWidth)
            # IGNORE BORDER
            r = self.remove_border_size
            if r:
                mask[:r, :] = 1
                mask[-r:, :] = 1
                mask[:, -r:] = 1
                mask[:, :r] = 1
            self._fit_masks.append(mask)

            # image added
            return fit
        return False
コード例 #26
0
ファイル: __init__.py プロジェクト: jrcarley/NCEPy
def extrema(mat,mode='wrap',window=10):
  # From: http://matplotlib.org/basemap/users/examples.html

  """find the indices of local extrema (min and max)
  in the input array."""
  mn = minimum_filter(mat, size=window, mode=mode)
  mx = maximum_filter(mat, size=window, mode=mode)
  # (mat == mx) true if pixel is equal to the local max
  # (mat == mn) true if pixel is equal to the local in
  # Return the indices of the maxima, minima
  return np.nonzero(mat == mn), np.nonzero(mat == mx)
コード例 #27
0
def localMaxima(data):

	
     data_max = maximum_filter(data, 5)
     maxima = (data == data_max)
     data_min = minimum_filter(data, 5)
     diff = ((data_max - data_min) > 1000)
     maxima[diff == 0] = 0	
     myArray = np.column_stack(np.where(maxima)) # one array with first column x-coords and second column y-coords of local maxima
     myArray[:,[0, 1]] = myArray[:,[1, 0]]
     return myArray
コード例 #28
0
ファイル: utils.py プロジェクト: creare-com/pydem
def plot_fill_flat(roi, out, region, source, drain, dL, dH):
    from matplotlib import pyplot

    plot_detail = roi.size < 500
    cmap = 'Greens'

    pyplot.figure()

    ax = pyplot.subplot(221)
    pyplot.axis('off')
    pyplot.title('unfilled')
    im = pyplot.imshow(roi, interpolation='none')
    im.set_cmap(cmap)
    if plot_detail:
        y, x = np.where(region); pyplot.plot(x, y, 'k.')
        y, x = np.where(source); pyplot.plot(x, y, lw=0, color='k', marker='$H$', ms=12)
        y, x = np.where(drain);   pyplot.plot(x, y, lw=0, color='k', marker='$L$', ms=12)
    
    pyplot.subplot(222, sharex=ax, sharey=ax)
    pyplot.axis('off')
    pyplot.title('filled')
    im = pyplot.imshow(out, interpolation='none')
    im.set_cmap(cmap)
    if plot_detail:
        for elev in np.unique(out):
            y, x = np.where(out==elev)
            pyplot.plot(x, y, lw=0, color='k', marker='$%.3f$' % elev, ms=20)

    if plot_detail:
        flat = (minimum_filter(out, (3, 3)) >= out) & region
        y, x = np.where(flat); pyplot.plot(x, y, 'r_', ms=24)
        
        pyplot.subplot(223, sharex=ax, sharey=ax)
        pyplot.axis('off')
        pyplot.title('dL')
        im = pyplot.imshow(roi, interpolation='none')
        im.set_cmap(cmap)
        for d in np.unique(dL):
            if d == region.size: continue
            y, x = np.where(dL==d)
            pyplot.plot(x, y, lw=0, color='k', marker='$%.2f$' % d, ms=24)

        pyplot.subplot(224, sharex=ax, sharey=ax)
        pyplot.axis('off')
        pyplot.title('dH')
        im = pyplot.imshow(roi, interpolation='none')
        im.set_cmap(cmap)
        for d in np.unique(dH):
            if d == region.size: continue
            y, x = np.where(dH==d)
            pyplot.plot(x, y, lw=0, color='k', marker='$%.2f$' % d, ms=24)

    pyplot.tight_layout()
コード例 #29
0
ファイル: lineseg.py プロジェクト: AI42/ocropy
def dplineseg1(image,imweight=4,bweight=-1,diagweight=1):
    """A dynamic programming line segmenter.  This computes cuts going from bottom
    to top.  It is only used for testing and is not recommended for actual use because
    these kinds of cuts do not work very well."""
    cimage = imweight*image - bweight*maximum(0,roll(image,-1,1)-image)
    c,s = dpcuts(cimage,alpha=diagweight)
    costs = c[-1]
    costs = filters.gaussian_filter(costs,1)
    mins = find(filters.minimum_filter(costs,8)==costs)
    tracks = dptrack(mins,s)
    # combo = 3*tracks+cimage
    return tracks
コード例 #30
0
def DetectOccPupil(im, blurShape = (9, 9), sigmaBlur = 10, alpha = 2,
                   beta = -1, gamma = 0, minFiltSize = (21, 21), THpup = 0.05,
                    erdSize = (5, 5)):
  gaussBlur = cv2.GaussianBlur(im, blurShape, sigmaBlur)
  invIm = 2 * im - cv2.addWeighted(im, alpha, gaussBlur, beta, gamma)
  noEyeLash = filters.minimum_filter(invIm, minFiltSize)
  noEyeLashNorm = noEyeLash / 255
  noEyeLashNorm[noEyeLashNorm < THpup] = 0
  noEyeLashNorm[noEyeLashNorm > THpup] = 1
  contours, hier = cv2.findContours(noEyeLashNorm)
  contours = [cv2.convexHull(contour) for contour in contours]
  morphShape = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, erdSize)
コード例 #31
0
def green_hotspots(im, sigma=4, padding=0, m=2):
    """RoI generator based on green hotspots.

	The input is first converted from RGB to YUV. Then the U and V channels
	are added and smoothed with a Gaussian filter. The function peak_local_max
	from scipy.spatial is used to find local minimi in the resulting array.

	Arguments
	---------
	im : (?,?,3) numpy array
		Input RGB image.
	sigma : float (>0), optional
		Gaussian filter smoothing parameter. Decreasing sigma will result in
		more hotspots. The default value is 4 is a good starting point.
	padding : int, optional
		Width of boundary layer in which no hotspots will be detected. Keep at
		roughly the box_size to prevent boxes from being clipped by block 
		boundary. Default is 0.
	m : int, optional
		Size of minimum filter. Keep at the default of 2. 

	Returns
	-------
	coords : (?,2) numpy array
		Array containing coordinates of the green hotspots.
	"""

    if padding > 0:
        im = im[padding:-padding, padding:-padding, :]
    im_yuv = skimage.color.rgb2yuv(im)
    im_filtered = im_yuv[:, :, 1] + im_yuv[:, :, 2]
    im_filtered = filters.minimum_filter(im_filtered, m)
    im_filtered = filters.gaussian_filter(im_filtered, sigma)
    mask = peak_local_max(-im_filtered, indices=False)
    mask = np.logical_and(mask, im_filtered != 0)
    coords = np.argwhere(mask == 1)
    return coords + padding
コード例 #32
0
def dark_hotspots(im, sigma=6, padding=0, m=2):
    """RoI generator based on dark hotspots.

	Similar function to green_hotspots. Use this function to detect locations
	of dark lettuce crops. It is based on local minima of the Y band of 
	image converted to YUV.

	Arguments
	---------
	im : (?,?,3) numpy array
		Input RGB image.
	sigma : float (>0), optional
		Gaussian filter smoothing parameter. Decreasing sigma will result in
		more hotspots. The default value is 6 is a good starting point.
	padding : int, optional
		Width of boundary layer in which no hotspots will be detected. Keep at
		roughly the box_size to prevent boxes from being clipped by block 
		boundary. Default is 0.
	m : int, optional
		Size of minimum filter. Keep at the default of 2. 

	Returns
	-------
	coords : (?,2) numpy array
		Array containing coordinates of the dark hotspots.
	"""
    if padding > 0:
        im = im[padding:-padding, padding:-padding, :]
    im_yuv = skimage.color.rgb2yuv(im)
    im_filtered = im_yuv[:, :, 0]
    im_filtered = filters.minimum_filter(im_filtered, m)
    im_filtered = filters.gaussian_filter(im_filtered, sigma)
    mask = peak_local_max(-im_filtered, indices=False)
    mask = np.logical_and(mask, im_filtered != 0)
    coords = np.argwhere(mask == 1)
    return coords + padding
def kpp_seeds(features, objectness, k=100, window=9):
    height = features.shape[0]
    width = features.shape[1]
    seeds = np.zeros((height, width), dtype=np.bool)
    edge = compute_feature_edge(features)
    seeds_candidates = (filters.minimum_filter(edge, window) == edge)

    seeds_candidates_idx = [(i, j)
                            for i, j in zip(*seeds_candidates.nonzero())]
    seed_obj = select_seed_embedding(seeds_candidates, objectness)
    start = np.argmax(seed_obj)

    seeds[seeds_candidates_idx[start][0],
          seeds_candidates_idx[start][1]] = True
    feature_flatten = select_seed_embedding(seeds_candidates, features)
    count = 1
    while count < k:
        seeds_features = select_seed_embedding(seeds, features)
        dist_pair = euclidean_distances(seeds_features, feature_flatten)
        next_seed = np.argmax(np.min(dist_pair, axis=0))
        seeds[seeds_candidates_idx[next_seed][0],
              seeds_candidates_idx[next_seed][1]] = True
        count += 1
    return seeds
コード例 #34
0
def calc_denpt(ourmap, neighbor_thre, ratio):
    binamap = ourmap > 0
    ourmap[binamap == 0] = 0
    data_max = filters.maximum_filter(ourmap, neighbor_thre)
    data_min = filters.minimum_filter(ourmap, neighbor_thre)
    maxima = ourmap == data_max
    diffmap = ((data_max - data_min) > 0.001)
    maxima[diffmap == 0] = 0

    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)

    x, y, sc = [], [], []
    for dy, dx in slices:
        x_center = (dx.start + dx.stop - 1) / 2 * ratio
        x.append(x_center)
        y_center = (dy.start + dy.stop - 1) / 2 * ratio
        y.append(y_center)
        sc.append(ourmap[int(y_center / ratio), int(x_center / ratio)])

    x = np.asarray(x, dtype=np.float32)
    y = np.asarray(y, dtype=np.float32)
    sc = np.asarray(sc, dtype=np.float32)
    return x, y, sc
コード例 #35
0
def r_erosion(image, size, origin=0):
    """Erosion with rectangular structuring element using maximum_filter"""
    return filters.minimum_filter(image, size, origin=origin)
コード例 #36
0
def rg_erosion(image, size, origin=0):
    """Grayscale erosion with maximum/minimum filters."""
    return filters.minimum_filter(image, size, origin=origin)
コード例 #37
0
def MinFilter(img, wlen):
    img_conv_min = [
        minimum_filter(interpolate_replace_nans(img[i], np.ones((29, 29))),
                       size=wlen) for i in range(0, len(img))
    ]
    return img_conv_min
コード例 #38
0
def permutation_STEM(individual,
                     STEM_parameters,
                     filter_size=0.5,
                     move_cutoff=0.5,
                     max_cutoff=0.5,
                     min_cutoff=0.5):
    """Moves surface atoms around based on the difference in the target
    and individual STEM image

    Parameters
    ----------
    STEM_parameters : dict
        Parameters for the STEM calculation. Ideally should be the same as the ones
        used for the STEM fitness/relaxation
    filter_size : float
        Filter size for choosing local maximum in the picture. Filter size is equal
        to average_bond_length * resolution * filter_size.
    move_cutoff : float
        The search radius for selecting an atom to move near a high intensity point.
        Defaults to the average bond distance
    """

    module = STEM(STEM_parameters)
    module.generate_target()
    target = module.target

    image, x_shift, y_shift = module.cross_correlate(
        module.get_image(individual))
    contrast = image - target
    max_max = np.max(contrast)
    min_min = np.min(contrast)

    ###################################
    ## Code for testing the contrast ##
    ###################################
    # import matplotlib.pyplot as plt
    # import matplotlib.cm as cm
    # fig, ax = plt.subplots()
    # fig.colorbar(ax.pcolormesh(contrast, cmap=cm.viridis, linewidths=0))
    # ax.set_xlim((0, STEM_parameters['dimensions'][0] * STEM_parameters['resolution']))
    # ax.set_ylim((0, STEM_parameters['dimensions'][1] * STEM_parameters['resolution']))
    # plt.show()
    # import sys; sys.exit()

    # Find a list of local maximum and local minimum in the image
    cutoff = get_avg_radii(individual) * 2 * 1.1
    move_cutoff *= cutoff
    resolution = module.parameters['resolution']
    size = cutoff * resolution * filter_size

    data_max = filters.maximum_filter(contrast, size=size)
    maxima = ((contrast == data_max) & (contrast > max_max * max_cutoff))
    if len(maxima) == 0:
        return False
    max_coords = np.argwhere(maxima)
    max_xys = (max_coords[:, ::-1] -
               np.array([[x_shift, y_shift]])) / resolution
    max_intensities = np.asarray(
        [data_max[tuple(coord)] for coord in max_coords])
    max_intensities /= sum(max_intensities)

    ###################################
    ## Code for testing the max find ##
    ###################################
    # import matplotlib.pyplot as plt
    # import matplotlib.cm as cm
    # fig, ax = plt.subplots()
    # fig.colorbar(ax.pcolormesh(maxima, cmap=cm.viridis, linewidths=0))
    # ax.set_xlim((0, STEM_parameters['dimensions'][0] * STEM_parameters['resolution']))
    # ax.set_ylim((0, STEM_parameters['dimensions'][1] * STEM_parameters['resolution']))
    # print(len(max_intensities))

    # fig, ax = plt.subplots(num=2)
    # fig.colorbar(ax.pcolormesh(data_max, cmap=cm.viridis, linewidths=0))
    # ax.set_xlim((0, STEM_parameters['dimensions'][0] * STEM_parameters['resolution']))
    # ax.set_ylim((0, STEM_parameters['dimensions'][1] * STEM_parameters['resolution']))

    # plt.show()
    # print(len(max_intensities))
    # import sys; sys.exit()

    data_min = filters.minimum_filter(contrast, size=size)
    minima = ((contrast == data_min) & (contrast < min_min * min_cutoff))
    if len(minima) == 0:
        return False

    min_coords = np.argwhere(minima)
    min_xys = (min_coords[:, ::-1] - [x_shift, y_shift]) / resolution
    min_intensities = np.asarray(
        [data_min[tuple(coord)] for coord in min_coords])
    min_intensities = np.absolute(min_intensities)
    min_intensities /= sum(min_intensities)

    ###################################
    ## Code for testing the min find ##
    ###################################
    # import matplotlib.pyplot as plt
    # import matplotlib.cm as cm
    # fig, ax = plt.subplots(num=1)
    # fig.colorbar(ax.pcolormesh(minima, cmap=cm.viridis, linewidths=0))
    # ax.set_xlim((0, STEM_parameters['dimensions'][0] * STEM_parameters['resolution']))
    # ax.set_ylim((0, STEM_parameters['dimensions'][1] * STEM_parameters['resolution']))

    # fig, ax = plt.subplots(num=2)
    # fig.colorbar(ax.pcolormesh(data_min, cmap=cm.viridis, linewidths=0))
    # ax.set_xlim((0, STEM_parameters['dimensions'][0] * STEM_parameters['resolution']))
    # ax.set_ylim((0, STEM_parameters['dimensions'][1] * STEM_parameters['resolution']))

    # plt.show()
    # print(len(min_intensities))
    # import sys; sys.exit()

    # Get atoms associated with each max and min column
    xys = individual.get_positions()[:, :2]
    max_dists = np.expand_dims(xys, 0) - np.transpose(
        np.expand_dims(max_xys, 0), (1, 0, 2))
    max_dists = np.linalg.norm(max_dists, axis=2)
    max_column_indices = [
        np.where(dists < move_cutoff)[0] for dists in max_dists
    ]

    min_dists = np.expand_dims(xys, 0) - np.transpose(
        np.expand_dims(min_xys, 0), (1, 0, 2))
    min_dists = np.linalg.norm(min_dists, axis=2)
    min_column_indices = [
        np.where(dists < move_cutoff)[0] for dists in min_dists
    ]

    if np.size(min_column_indices) == 0 or np.size(max_column_indices) == 0:
        return False

    # Eliminate columns that cannot be "improved" by a permutation
    syms = np.asarray(individual.get_chemical_symbols())
    unique_syms = np.unique(syms)
    unique_nums = [atomic_numbers[sym] for sym in unique_syms]
    max_sym = unique_syms[np.argmax(unique_nums)]
    min_sym = unique_syms[np.argmin(unique_nums)]

    for i, indices in reversed(list(enumerate(max_column_indices))):
        if all(syms[indices] == min_sym):
            max_column_indices = np.delete(max_column_indices, i, axis=0)
            max_intensities = np.delete(max_intensities, i)
            max_intensities /= np.sum(max_intensities)

    for i, indices in reversed(list(enumerate(min_column_indices))):
        if all(syms[indices] == max_sym):
            min_column_indices = np.delete(min_column_indices, i, axis=0)
            min_intensities = np.delete(min_intensities, i)
            min_intensities /= np.sum(min_intensities)

    # Pick a max column and min column based on their intensities
    if np.size(min_column_indices) == 0 or np.size(max_column_indices) == 0:
        return False

    max_column_indices = max_column_indices[np.random.choice(
        np.arange(len(max_intensities)), p=max_intensities)]
    min_column_indices = min_column_indices[np.random.choice(
        np.arange(len(min_intensities)), p=min_intensities)]

    # Pick a move between the two columns based on differences in atomic numbers
    max_column_numbers = [
        atomic_numbers[sym] for sym in syms[max_column_indices]
    ]
    min_column_numbers = [
        atomic_numbers[sym] for sym in syms[min_column_indices]
    ]

    max_column_numbers = np.expand_dims(max_column_numbers, 0)
    min_column_numbers = np.expand_dims(min_column_numbers, 0).T

    min_max_pairs = np.argwhere(max_column_numbers - min_column_numbers > 0)
    min_index, max_index = min_max_pairs[random.randint(
        0,
        len(min_max_pairs) - 1)]
    min_index, max_index = min_column_indices[min_index], max_column_indices[
        max_index]
    max_symbol = syms[max_index]
    min_symbol = syms[min_index]

    # Switch the atomic symbols
    individual[max_index].symbol = min_symbol
    individual[min_index].symbol = max_symbol

    return
コード例 #39
0
ファイル: censure.py プロジェクト: varnivey/scikit-image
    def detect(self, image):
        """Detect CENSURE keypoints along with the corresponding scale.

        Parameters
        ----------
        image : 2D ndarray
            Input image.

        """

        # (1) First we generate the required scales on the input grayscale
        # image using a bi-level filter and stack them up in `filter_response`.

        # (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on
        # the filter_response to suppress points that are neither minima or
        # maxima in 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray
        # `feature_mask` containing all the minimas and maximas in
        # `filter_response` as True.
        # (3) Then we suppress all the points in the `feature_mask` for which
        # the corresponding point in the image at a particular scale has the
        # ratio of principal curvatures greater than `line_threshold`.
        # (4) Finally, we remove the border keypoints and return the keypoints
        # along with its corresponding scale.

        num_scales = self.max_scale - self.min_scale

        image = np.ascontiguousarray(_prepare_grayscale_input_2D(image))

        # Generating all the scales
        filter_response = _filter_image(image, self.min_scale, self.max_scale,
                                        self.mode)

        # Suppressing points that are neither minima or maxima in their
        # 3 x 3 x 3 neighborhood to zero
        minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
        maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response

        feature_mask = minimas | maximas
        feature_mask[filter_response < self.non_max_threshold] = False

        for i in range(1, num_scales):
            # sigma = (window_size - 1) / 6.0, so the window covers > 99% of
            #                                  the kernel's distribution
            # window_size = 7 + 2 * (min_scale - 1 + i)
            # Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
            _suppress_lines(feature_mask[:, :, i], image,
                            (1 + (self.min_scale + i - 1) / 3.0),
                            self.line_threshold)

        rows, cols, scales = np.nonzero(feature_mask[..., 1:num_scales])
        keypoints = np.column_stack([rows, cols])
        scales = scales + self.min_scale + 1

        if self.mode == 'dob':
            self.keypoints = keypoints
            self.scales = scales
            return

        cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)

        if self.mode == 'octagon':
            for i in range(self.min_scale + 1, self.max_scale):
                c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
                    + OCTAGON_OUTER_SHAPE[i - 1][1]
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))
        elif self.mode == 'star':
            for i in range(self.min_scale + 1, self.max_scale):
                c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
                    + STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
                cumulative_mask |= (
                    _mask_border_keypoints(image.shape, keypoints, c)
                    & (scales == i))

        self.keypoints = keypoints[cumulative_mask]
        self.scales = scales[cumulative_mask]
コード例 #40
0

beta = 2
crcc = beta * cv2.Scharr(oca.real, cv2.CV_64F, 1, 0) + (1/beta) * cv2.Scharr(oca.imag, cv2.CV_64F, 0, 1)


lam = 0.5
weighted_img = (1 - lam) * cv2.filter2D(255 - gray, cv2.CV_64F, wa)
filtered_img = lam * cv2.filter2D(gray, cv2.CV_64F, crcc)
co = filtered_img + weighted_img

# adjust this value down for darker images
threshold = 400

data_max = filters.maximum_filter(co, 11)
data_min = filters.minimum_filter(co, 11)
diff = ((data_max - data_min) > threshold)
maxima = (co == data_max)
maxima[diff == 0] = 0
max_coords = np.transpose(np.where(maxima))

abs_max = co.max()
abs_maxima = (co == abs_max)
abs_coords = np.transpose(np.where(abs_maxima))
print(abs_max)

max_psr = -1
max_psr_coords = []

for coords in max_coords:
    row = coords[0]
コード例 #41
0
def renal_segment_v4(recon,
                     temp_res,
                     spacing,
                     root_path,
                     threshold_multiplier=1):
    # Based on Grabcut_Renal_Dev_v16
    # recon     : 4D dataset [x,y,z,t]
    # temp_res  : temporal resolution in sec
    # spacing   : spatial resolution in mm [x,y,z]

    cleanup_enabled = True

    GC_BGD = 0
    GC_FGD = 1
    GC_PR_BGD = 2
    GC_PR_FGD = 3

    nx, ny, nz, nt = recon.shape

    if issubclass(recon.dtype.type, np.integer):
        recon -= np.amin(recon)
        # Use look up table
        lut = np.arange(np.amax(recon) + 1).astype(np.float)
        lut = (lut / np.amax(lut) * 255).astype(np.uint8)
        recon = lut[recon]
    else:
        recon -= np.amin(recon)
        recon /= np.amax(recon)
        recon = (recon * 255).astype(np.uint8)

    ## Find medulla clusters ##
    sig = recon.reshape(-1, nt)
    mean_sig = np.mean(sig, axis=0)

    mean_sig = gaussian_filter(mean_sig, 10 /
                               temp_res)  # Filter to reduce temporal noise

    t_5p = get_first_crossing_time(mean_sig, 0.10)[0]
    t_5p = np.floor(t_5p).astype(np.int)
    t_start = max(t_5p, 0)

    t_30 = min(t_start + np.ceil(30 / temp_res).astype(np.int), nt - 1)
    t_45 = min(t_start + np.ceil(45 / temp_res).astype(np.int), nt - 1)
    t_60 = min(t_start + np.ceil(60 / temp_res).astype(np.int), nt - 1)
    t_90 = min(t_start + np.ceil(90 / temp_res).astype(np.int), nt - 1)
    t_120 = min(t_start + np.ceil(120 / temp_res).astype(np.int), nt - 1)
    t_180 = min(t_start + np.ceil(180 / temp_res).astype(np.int), nt - 1)
    t_240 = min(t_start + np.ceil(240 / temp_res).astype(np.int), nt - 1)

    # Penalize voxels with high initial intensity
    start_score = recon[:, :, :, t_start].astype(np.float).copy()
    start_score = gaussian_filter(start_score, 2 / spacing)
    start_score -= np.amin(start_score)
    start_score /= np.amax(start_score)
    start_score = 1 - start_score

    # Penalize voxels where motion is dominant
    y = sig[:, t_60:t_120].astype(np.float).T
    x = np.arange(y.shape[0])
    Z = np.polyfit(x, y, 2)
    X = np.vstack((x**2, x**1, x**0))
    y_fit = np.dot(Z.T, X)
    y_res = y.T - y_fit
    y_res_std = np.std(y_res, axis=1)
    y_res_std3d = y_res_std.reshape(nx, ny, nz)
    motion_score = y_res_std3d.copy()
    motion_score = gaussian_filter(motion_score, 2 / spacing)
    motion_score -= np.amin(motion_score)
    motion_score /= np.amax(motion_score)
    motion_score = 1 - motion_score

    # Look at time to peak
    im_90p = get_first_crossing_time(recon[:, :, :, t_start:t_60], 0.90)
    im_90p[im_90p == 0.0] = np.amax(im_90p)
    im_90p = gaussian_filter(im_90p, sigma=2 / spacing)
    im_90p -= np.amin(im_90p)
    im_90p /= np.amax(im_90p)
    im_90p = 1 - im_90p
    nb_mm = 11  # Based on cortical thickness
    nb = np.ceil(nb_mm / spacing).astype(np.int)
    im_90p_filt = im_90p.copy()
    im_90p_filt = maximum_filter(im_90p_filt, nb)
    im_90p_filt = minimum_filter(im_90p_filt, nb)
    med_temp_score = im_90p_filt

    # Get images at predetermined time points
    recon_bl = gaussian_filter(recon.astype(np.float),
                               np.hstack((0 / spacing, 10 / temp_res)))
    recon_start = recon_bl[:, :, :, t_start].astype(np.float)
    recon_30 = recon_bl[:, :, :, t_30].astype(np.float)
    recon_45 = recon_bl[:, :, :, t_45].astype(np.float)
    recon_60 = recon_bl[:, :, :, t_60].astype(np.float)
    recon_90 = recon_bl[:, :, :, t_90].astype(np.float)
    recon_120 = recon_bl[:, :, :, t_120].astype(np.float)
    recon_240 = recon_bl[:, :, :, t_240].astype(np.float)

    # Medulla Marker
    med_marker3_1 = (recon_60 - recon_start)
    med_marker3_1[med_marker3_1 < 0] = 0
    med_marker3_1 = gaussian_filter(med_marker3_1, 2 / spacing)
    med_marker3_1 -= np.amin(med_marker3_1)
    med_marker3_1 /= np.amax(med_marker3_1)

    med_marker3_2 = (recon_120 - recon_60)
    mask = med_marker3_2 > 0
    med_marker3_2[med_marker3_2 < 0] = 0
    med_marker3_2 = gaussian_filter(med_marker3_2, 2 / spacing)
    med_marker3_2 -= np.amin(med_marker3_2)
    med_marker3_2 /= np.amax(med_marker3_2)

    med_marker3 = np.ones((nx, ny, nz), dtype=np.float)
    med_marker3 = med_marker3 * med_marker3_1
    med_marker3 = med_marker3 * med_marker3_2
    med_marker3 = med_marker3 * motion_score
    med_marker3 = med_marker3 * start_score
    med_marker3 = med_marker3 * med_temp_score
    med_marker3 = med_marker3 / np.amax(med_marker3)

    med_marker3[med_marker3 < 0] = 0
    med_marker_filt3 = med_marker3 * mask

    # If available calculate CS Marker
    if (t_180 < nt - 1):
        # Look at time to peak
        cs_im_90 = get_first_crossing_time(recon[:, :, :, t_start:t_180], 0.90)
        cs_im_90 = gaussian_filter(cs_im_90, sigma=2 / spacing)
        cs_im_90 -= np.amin(cs_im_90)
        cs_im_90 /= np.amax(cs_im_90)
        cs_temp_score = cs_im_90

        med_marker4_2 = (recon_240 - recon_120)
        mask = med_marker4_2 > 0
        med_marker4_2[med_marker4_2 < 0] = 0
        med_marker4_2 = gaussian_filter(med_marker4_2, 2 / spacing)
        med_marker4_2 -= np.amin(med_marker4_2)
        med_marker4_2 /= np.amax(med_marker4_2)

        med_marker4 = np.ones((nx, ny, nz), dtype=np.float)
        med_marker4 = med_marker4 * med_marker4_2
        med_marker4 = med_marker4 * motion_score
        med_marker4 = med_marker4 * start_score
        med_marker4 = med_marker4 * cs_temp_score
        med_marker4 = med_marker4 / np.amax(med_marker4)

        med_marker4[med_marker4 < 0] = 0
        med_marker_filt4 = med_marker4 * mask

    # Combine medulla marker with cs marker if available
    if (t_180 < nt - 1):
        med_marker5 = med_marker4 / np.amax(
            med_marker4) + med_marker_filt3 / np.amax(med_marker_filt3)
        med_marker_filt5 = med_marker_filt4 / np.amax(
            med_marker_filt4) + med_marker_filt3 / np.amax(med_marker_filt3)
    else:
        med_marker_filt5 = med_marker_filt3

    med_marker_im3d = med_marker_filt5.copy()
    med_marker_im3d -= np.amin(med_marker_im3d)
    med_marker_im3d /= np.amax(med_marker_im3d)
    med_marker_im3d = (255 * med_marker_im3d).astype(np.uint8)

    med_cutoff_otsu = threshold_otsu(
        med_marker_im3d[med_marker_im3d > 0].ravel()) * threshold_multiplier

    # Connect nearby medulla regions
    nb_mm = 11  # Based on cortical thickness
    nb = np.ceil(nb_mm / spacing).astype(np.int)
    med_valid = med_marker_im3d.astype(np.float) > med_cutoff_otsu
    med_valid_max = maximum_filter(med_valid, nb)
    med_valid_label, num_labels = label(med_valid_max)
    med_valid_label[med_valid == 0] = 0

    # Find the largest 4 clusters
    cluster_size = np.bincount(med_valid_label.ravel())
    cluster_size[0] = 0
    sorted_clusters = np.argsort(cluster_size)[::-1]
    largest_n = 4
    med_valid_largest = np.zeros_like(med_valid_label)
    for i in range(min(largest_n, sorted_clusters.shape[0])):
        cluster_id = sorted_clusters[i]
        if cluster_size[cluster_id] > 0:
            med_valid_largest[med_valid_label == cluster_id] = i + 1

    ## GrabCut Section ##
    tic_cell = time.time()
    nb_mm = 25  # Based on cortical thickness (12.5 mm extension on all sides for max cort. thickness of 11mm)
    nb = np.ceil(nb_mm / spacing).astype(np.int)

    recon_min = np.amin(recon, axis=3, keepdims=True)
    recon_diff = gaussian_filter(recon - recon_min,
                                 np.hstack((0 / spacing, 0 / temp_res)))

    output3d = np.zeros((nx, ny, nz), dtype=np.int)
    output3d_bbox_list = []

    for cluster_id in np.unique(med_valid_largest[med_valid_largest > 0]):
        print("Cluster_id =", cluster_id)

        # Expand the mask by maximum cortical thickness to cover the whole kidney
        potential_mask = med_valid_largest == cluster_id
        potential_mask = maximum_filter(potential_mask, nb)

        # Generate the 3d convex hull
        potential_mask = convex_hull_image_3d(potential_mask)

        # Calculate the bounding box
        bbox = get_bbox(potential_mask)

        # Expand bbox to get more background
        scale = 1.5
        xyz_min, xyz_end = bbox
        xyz_len = xyz_end - xyz_min
        xyz_len_shift = np.round((scale - 1) * xyz_len).astype(np.int)
        new_xyz_min = xyz_min - np.floor(xyz_len_shift / 2).astype(np.int)
        new_xyz_min[new_xyz_min < 0] = 0
        new_xyz_len = xyz_len + xyz_len_shift
        new_xyz_max = new_xyz_min + new_xyz_len
        new_xyz_over = new_xyz_max - [nx, ny, nz]
        new_xyz_over[new_xyz_over < 0] = 0
        new_xyz_len = xyz_len + xyz_len_shift - new_xyz_over
        bbox = (new_xyz_min, new_xyz_min + new_xyz_len)

        bb_l, bb_h = bbox
        potential_mask_bbox = potential_mask[bb_l[0]:bb_h[0], bb_l[1]:bb_h[1],
                                             bb_l[2]:bb_h[2]]

        # Calculate PCA
        recon_diff_bbox = recon_diff[bb_l[0]:bb_h[0], bb_l[1]:bb_h[1],
                                     bb_l[2]:bb_h[2], :]
        nx_bb, ny_bb, nz_bb, nt_bb = recon_diff_bbox.shape
        pca = PCA(n_components=3)
        im3d_pca_bbox = pca.fit_transform(
            recon_diff_bbox.reshape(-1, recon_diff_bbox.shape[-1]).astype(
                np.float))
        im3d_pca_bbox = im3d_pca_bbox.reshape(nx_bb, ny_bb, nz_bb, -1)
        im3d_rgb_bbox = im3d_pca_bbox.copy()
        for ch in range(im3d_rgb_bbox.shape[3]):
            im3d_ch = im3d_rgb_bbox[:, :, :, ch]
            im3d_ch -= np.amin(im3d_ch)
            im3d_ch = im3d_ch / np.amax(im3d_ch) * 255
            im3d_rgb_bbox[:, :, :, ch] = im3d_ch
        im3d_rgb_bbox = im3d_rgb_bbox.astype(np.uint8)

        # Perform GrabCut
        potential_mask3d_bbox = potential_mask_bbox
        img_gc = np.transpose(im3d_rgb_bbox, (2, 0, 1, 3))

        mask_gc = np.zeros(im3d_rgb_bbox.shape[:3], np.uint8)
        mask_gc[potential_mask3d_bbox > 0] = GC_PR_FGD
        mask_gc = np.transpose(mask_gc, (2, 0, 1))

        # Create a directory for temporary files
        buffer_directory = os.path.join(root_path, "Buffer")
        try:
            os.makedirs(buffer_directory)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise

        input_filename = os.path.join(buffer_directory, "input.umt")
        mask_filename = os.path.join(buffer_directory, "mask.umt")
        output_filename = os.path.join(buffer_directory, "output.umt")

        umtWrite(input_filename, img_gc)
        umtWrite(mask_filename, mask_gc)

        program_path = os.path.join(root_path, "CppSource", "GrabCut3d",
                                    "run_grabcut3d")
        tic = time.time()
        ret_val = subprocess.call(
            [program_path, input_filename, mask_filename, output_filename])
        assert ret_val == 0, 'GrabCut function failed!'
        print('Elapsed time:', time.time() - tic)

        output3d_bbox = umtRead(output_filename)
        output3d_bbox = np.transpose(output3d_bbox, (1, 2, 0)) == GC_PR_FGD

        output3d_bbox_list.append(output3d_bbox)
        output3d_slice = output3d[bb_l[0]:bb_h[0], bb_l[1]:bb_h[1],
                                  bb_l[2]:bb_h[2]]
        output3d_slice[output3d_bbox > 0] = output3d_bbox[output3d_bbox > 0]
    print('Cell_Time =', time.time() - tic_cell)

    # Remove temporary files
    shutil.rmtree(buffer_directory)

    ## Label the connected regions ##
    kidney_labels, n_labels = label(output3d)
    cluster_size = np.bincount(kidney_labels.ravel())

    # Eliminate anything below 25% of the largest region
    cluster_size[0] = 0
    cluster_size_cutoff = np.amax(cluster_size) * 0.25
    cluster_size[cluster_size < cluster_size_cutoff] = 0
    cluster_id = 0
    for i in range(n_labels + 1):
        if cluster_size[i] > 0:
            cluster_id += 1
            cluster_size[i] = cluster_id
    kidney_labels = cluster_size[kidney_labels]

    cluster_size = np.bincount(kidney_labels.ravel())

    ## 3D Cleanup ##
    if cleanup_enabled:
        # Fill holes in x, y, z slices for robustness
        # This will fill up the convex volume before erosion.
        output_filled = fill_holes3d(output3d)

        nb_mm = 3.2 * 2  # (2 x minimum cortical thickness)
        nb = np.floor(nb_mm / spacing).astype(
            np.int)  # (floor to be conservative)
        nb[nb < 3] = 3  # At least 1 voxel in each direction

        # Erode the output of the first pass
        # output3d_eroded = minimum_filter(output3d, nb)
        output3d_eroded = minimum_filter(output_filled, nb)
        output3d_opened = maximum_filter(output3d_eroded, nb + 2)

        output3d_cleaned = output3d.copy()
        output3d_cleaned[output3d_opened > 0] = 0

        output3d_final2 = output3d.copy()
        output3d_final2[output3d_opened == 0] = 0

        kidney_labels, n_labels = label(output3d_final2)
        cluster_size = np.bincount(kidney_labels.ravel())

        # Eliminate anything below 25% of the largest region
        cluster_size[0] = 0
        cluster_size_cutoff = np.amax(cluster_size) * 0.25
        cluster_size[cluster_size < cluster_size_cutoff] = 0
        cluster_id = 0
        for i in range(n_labels + 1):
            if cluster_size[i] > 0:
                cluster_id += 1
                cluster_size[i] = cluster_id
        kidney_labels = cluster_size[kidney_labels]

        # Eliminate noisy regions
        sig = recon.reshape(-1, nt)
        kidney_labels_fixed = np.zeros_like(kidney_labels)
        valid_count = 0
        valid_sig = []
        invalid_sig = []
        for cluster_id in np.unique(kidney_labels[kidney_labels > 0]):
            cluster_mask = kidney_labels == cluster_id
            sig_mask = np.mean(sig[cluster_mask.ravel(), :], axis=0)
            t_50p = get_first_crossing_time(sig_mask.reshape(1, -1), 0.50)
            if t_50p < t_start or t_50p > t_30:
                print('Noise_detected!')
                invalid_sig.append(sig_mask)
                continue
            valid_sig.append(sig_mask)
            valid_count += 1
            kidney_labels_fixed[cluster_mask > 0] = valid_count
        kidney_labels = kidney_labels_fixed

    print('Segmentation completed.')
    return kidney_labels
コード例 #42
0
ファイル: detect_sources.py プロジェクト: fred3m/astro-toyz
def detect_sources(img_data,
                   threshold,
                   aperture_type='radius',
                   size=5,
                   footprint=None,
                   bin_struct=None,
                   sigma=2,
                   saturate=None,
                   margin=None):
    """
    Erodes the background to isolate sources and selects the maximum as approximate positions of sources

    Parameters
    ----------
    img_data: numpy 2D array
        image data
    
    threshold: float
        minimum pixel value above the background noise
    
    size: int, optional
        width of the area in which to search for a maximum (for each point)
    footprint: numpy 2D array (dtype=boolean),optional
        Instead of supplying a size, a footprint can be given of a different shape to use for finding a footprint
            example:
                footprint=np.array([
                    [0,0,1,0,0],
                    [0,1,1,1,0],
                    [1,1,1,1,1],
                    [0,1,1,1,0],
                    [0,0,1,0,0]
                ])
            The above example would only seach for a maximum in the pixels labeled by 1 in the region
            centered on a given pixel
    Note: either a size or a footprint must be secified
    
    bin_struct: 2D numpy array,optional
        Minimum structure that regions of the image are shrunk down to in order to isolate maxima
    sigma: float, optional
        This function uses a guassian filter to smooth the image (only for detection of sources),
        which helps eliminate multiple maximum detections for the same object. 'sigma' describes the
        standard deviation of the gaussian kernel used in the filter
    saturate: float,optional
        Value at which CCD's for the detector become saturated and are no longer linear
    margin: int, optional
        Sources close to the edges can be cut off to prevent partial data from becoming mixed up with good detections
      
    Returns
    -------  
    maxima: numpy 2D array
        Approximate locations of the source maximum values.
        To get more accurate positions each maxima should be fit to a desired profile
    """
    # Make a mask where elements above the threshold are True and below the threshold are False.
    # This essentially removes the background and leaves islands of 1's, representing possible sources
    binData = img_data >= threshold

    # The binary_opening function shrinks all of the 'islands' from the previous step into binary structes,
    # then it dilates them again back to their original shape and width.
    # Shape of the created binary structure (if not specified by the user):
    #   010
    #   111
    #   010

    if bin_struct is None:
        bin_struct = ndimage.generate_binary_structure(2, 1)
    binData = ndimage.binary_opening(binData, structure=bin_struct)

    # Use our binary data to mask the image and blur the image so get rid of small local maxima that will
    # give us false positive sources
    data = filters.gaussian_filter(binData * img_data, sigma=sigma)

    # The maximum/minimum filters select max/min value in a square with sides length 'size' centered on
    # each element. Filter out all of the objects below the threshold
    params = {'input': data}
    if aperture_type == 'width':
        params['size'] = size
    elif aperture_type == 'radius':
        params['footprint'] = get_circle_foot(size)
    elif aperture_type == 'footprint':
        params['footprint'] = footprint
    else:
        raise astrotoyz.core.AstroToyzError(
            'Invalid aperture type in detect_sources')

    # Search for the maximum and minimum points to determine the amplitude of the pixel above its neighboring pixels
    # Note: this only gives the amplitude above the background if the background is within size/2 (or the footprint)
    # of a given pixel.
    data_max = filters.maximum_filter(**params)
    maxima = (data == data_max)
    data_min = filters.minimum_filter(**params)
    diff = ((data_max - data_min) > threshold)
    maxima[diff == 0] = 0

    # Filter out the saturated objects
    if saturate is not None:
        maxima[data > saturate] = 0

    # Remove the sources near the margins that will be cut off.
    # TODO: Dump these in another file as they will still be useful in determining isolated
    # neighbors for PSF stars
    if margin is None:
        margin = int(size / 2)
    if isinstance(margin, list):
        maxima[-margin[0]:, :] = 0
        maxima[:margin[1], :] = 0
        maxima[:, -margin[2]:] = 0
        maxima[:, :margin[3]] = 0
    else:
        maxima[-margin:, :] = 0
        maxima[:margin, :] = 0
        maxima[:, -margin:] = 0
        maxima[:, :margin] = 0
    lbl, nbrLbl = ndimage.label(maxima)
    return maxima
コード例 #43
0
    def find_potential_fiducials_sensitive(self, frames, frames_full, bin, bin_full, cropsize=32,target=0,fid_list=[],proc_id=0,num_procs=1,
                                           average_marker=None, threshold=1.7, mrcdata=[], aa=20):

        a, l1 = self.find_potential_fiducials(frames, frames_full, bin, bin_full, cropsize, target, fid_list, proc_id, num_procs, average_marker, sen=True)

        list_cx_cy_imnr = []
        fact = 1.*bin/bin_full
        for imnr in range(proc_id, len(frames_full), num_procs):

            ds = frames[imnr]  #downsample(wiener(full_image),n)**0.7
            ds2 = ds.copy()
            for xx in range(0,len(ds)):
                for yy in range(0,len(ds[0])):
                    dd = median(ds[max(0,xx-10):xx+10,max(0,yy-10):yy+10])


                    ds2[xx,yy] = ds[xx,yy]-dd
            ds = ds2

            minf = minimum_filter( gaussian_filter(ds - gaussian_filter(ds, 5),.1) , footprint=ones((1,1)) )

            hpf = minf - gaussian_filter(minf,4)
            hpf -= hpf.min()
            hpf /= median(hpf)
            hpf = 1-hpf

            gg = ((gaussian_filter(hpf,.1) )  )
            gg -= gg.min()
            gg /= median(gg)

            rr = gaussian_filter( laplace( gaussian_filter( (1- gg/gg.max()), .1) ) , 1)
            rr -= rr.min()
            rr /= rr.max()
            rr += 0.0001

            tot = zeros_like(rr)
            for ii in arange(4.1,3.59,-0.05):
                rrr = rr.copy()
                tot += self.testing_done(rrr,aa=aa,ff=ii)

                ll, nn = label(tot)


                for N in range(1,nn+1):
                    cx,cy = center_of_mass(ll==N)
                    #TOT[int(round(cx)),int(round(cy))] = 1
                    add = True
                    for a,b,c in list_cx_cy_imnr:
                        if abs(cx- a)+abs(cy-b) < 4 and imnr == c:
                            add = False
                            break
                    if add:
                        #dcx,dcy = (cx+(ccx*fact)-ss)/max(fact,1),(cy+(ccy*fact)-ss)/max(fact,1)
                        list_cx_cy_imnr.append([cx,cy,imnr])




        out = l1
        for rx,ry,im in list_cx_cy_imnr:
            #print rx,ry,imnr
            points = self.refine(rx, ry, frames_full[im], im, fact, cropsize)
            #print 'points: ', points
            for cx,cy,ims in points:
                add = True
                for a,b,c in out:
                    if abs(cx- a)+abs(cy-b) < 4 and ims == c:
                        add = False
                        break
                if add:
                    out.append([cx,cy,ims])

        fid_list += out #list_cx_cy_imnr




        return numpy.zeros_like(frames_full[0]), list_cx_cy_imnr
コード例 #44
0
        class_index[k], avg_size[k][0], avg_size[k][1], avg_size[k][2],
        avg_size[k][3])
    prediction_dict[img_id].append(prediction_sent)

    if np.isnan(data).any():
        continue

    w_k, h_k = (avg_size[k][2:4] * (256 / 1024)).astype(np.int)

    # Find local maxima
    neighborhood_size = 100
    threshold = .1

    data_max = filters.maximum_filter(data, neighborhood_size)
    maxima = (data == data_max)
    data_min = filters.minimum_filter(data, neighborhood_size)
    diff = ((data_max - data_min) > threshold)
    maxima[diff == 0] = 0
    for _ in range(5):
        maxima = binary_dilation(maxima)

    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    xy = np.array(
        ndimage.center_of_mass(data, labeled, range(1, num_objects + 1)))

    for pt in xy:
        if data[int(pt[0]), int(pt[1])] > np.max(data) * .9:
            upper = int(max(pt[0] - (h_k / 2), 0.))
            left = int(max(pt[1] - (w_k / 2), 0.))
コード例 #45
0
def HoughLines(edges,x_max,y_max):
    theta_max = 1.0 * math.pi
    theta_min = -1.0 * math.pi / 2.0
    r_min = 0.0
    r_max = math.hypot(x_max, y_max)
    r_dim = 200
    theta_dim = 300
    hough_space = np.zeros((r_dim,theta_dim))
    for edge in edges:
        for itheta in range(theta_dim):
            x,y=edge.pt
            theta = 1.0 * itheta * (theta_max - theta_min) / theta_dim + theta_min
            r = x * math.cos(theta) + y * math.sin(theta)
            ir = round(r_dim * ( 1.0 * r ) / r_max)
            if ir>=0 and ir<r_dim:
                hough_space[ir,itheta] = hough_space[ir,itheta] + 1

    neighborhood_size = 20
    threshold = 20

    data_max = filters.maximum_filter(hough_space, neighborhood_size)
    maxima = (hough_space == data_max)
    data_min = filters.minimum_filter(hough_space, neighborhood_size)
    diff = ((data_max - data_min) > threshold)
    maxima[diff == 0] = 0

    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)

    temp=[]
    for dy,dx in slices:
        x_center = (dx.start + dx.stop - 1)/2
        y_center = (dy.start + dy.stop - 1)/2
        temp.append((hough_space[int(y_center)][int(x_center)],x_center,y_center))

    temp=sorted(temp,reverse=True)
    #print(len(temp),temp)
    x,y=[],[]
    r=[]
    theta=[]
    for _,i,j in temp:
        b=True
        c=True
        for x1,y1 in zip(x,y):
            if dist((x1,y1),(i,j))<10:#checking for close lying hough peaks
                b=False
                break
            if y1<10 and j<10 and (abs(x1-i)<10 or abs(x1-i) in range(190,210)):
                c=False
                break

        if b and c:
            x.append(i)
            y.append(j)
            r.append((1.0 * j * r_max ) / r_dim)
            theta.append(1.0 * i * (theta_max - theta_min) / theta_dim + theta_min)

    plt.imshow(hough_space, origin='lower')
    plt.plot(x,y, 'ro')
    cnt=1
    for i,j in zip(x,y):
        plt.text(i+1,j+1,str(cnt))
        cnt+=1
    plt.savefig('hough_space_maximas.png', bbox_inches = 'tight')
    plt.close()

    return r,theta
コード例 #46
0
ファイル: image_insert_utils.py プロジェクト: trojai/trojai
def valid_locations(img: np.ndarray,
                    pattern: np.ndarray,
                    algo_config: ValidInsertLocationsConfig,
                    protect_wrap: bool = True) -> np.ndarray:
    """
    Returns a list of locations per channel which the pattern can be inserted
    into the img_channel with an overlap algorithm dictated by the appropriate
    inputs

    :param img: a numpy.ndarray which represents the image of shape:
           (nrows, ncols, nchans)
    :param pattern: the pattern to be inserted into the image of shape:
           (prows, pcols, nchans)
    :param algo_config: The provided configuration object specifying the algorithm to use and necessary parameters
    :param protect_wrap: if True, ensures that pattern to be inserted can fit without wrapping and raises an
                         Exception otherwise
    :return: A boolean mask of the same shape as the input image, with True
             indicating that that pixel is a valid location for placement of
             the specified pattern
    """
    num_chans = img.shape[2]

    # broadcast allow_overlap variable if necessary
    allow_overlap = algo_config.allow_overlap
    if not isinstance(allow_overlap, Sequence):
        allow_overlap = [allow_overlap] * num_chans
    elif len(allow_overlap) != num_chans:
        msg = "Length of provided allow_overlap sequence does not equal the number of channels in the image!"
        logger.error(msg)
        raise ValueError(msg)

    # broadcast min_val variable if necessary
    min_val = algo_config.min_val
    if not isinstance(min_val, Sequence):
        min_val = [min_val] * num_chans
    elif len(min_val) != num_chans:
        msg = "Length of provided min_val sequence does not equal the number of channels in the image!"
        logger.error(msg)
        raise ValueError(msg)

    # broadcast threshold_val variable if necessary
    threshold_val = algo_config.threshold_val
    if algo_config.algorithm == 'threshold':
        if not isinstance(threshold_val, Sequence):
            threshold_val = [threshold_val] * num_chans
        elif len(threshold_val) != num_chans:
            msg = "Length of provided threshold_val sequence does not equal the number of channels in the image!"
            logger.error(msg)
            raise ValueError(msg)

    if pattern.shape[2] != num_chans:
        # force user to broadcast the pattern as necessary
        msg = "The # of channels in the pattern does not match the # of channels in the image!"
        logger.error(msg)
        raise ValueError(msg)

    # TODO: look for vectorization opportunities
    output_mask = np.zeros(img.shape, dtype=bool)
    for chan_idx in range(num_chans):
        chan_img = img[:, :, chan_idx]
        chan_pattern = pattern[:, :, chan_idx]
        i_rows, i_cols = chan_img.shape
        p_rows, p_cols = chan_pattern.shape

        if allow_overlap[chan_idx]:
            output_mask[0:i_rows - p_rows + 1, 0:i_cols - p_cols + 1,
                        chan_idx] = True
        else:
            if protect_wrap:
                mask = (chan_img <= min_val[chan_idx])

                # True if image present, False if not
                img_mask = np.logical_not(mask)

                # remove boundaries from valid locations
                mask[i_rows - p_rows + 1:i_rows, :] = False
                mask[:, i_cols - p_cols + 1:i_cols] = False

                # get all edge pixels
                edge_pixels = None
                if algo_config.algorithm != 'bounding_box':
                    edge_pixel_coords = np.nonzero(
                        np.logical_and(
                            np.logical_xor(
                                filters.maximum_filter(img_mask,
                                                       3,
                                                       mode='constant',
                                                       cval=0.0),
                                filters.minimum_filter(img_mask,
                                                       3,
                                                       mode='constant',
                                                       cval=0.0)), img_mask))
                    edge_pixels = zip(edge_pixel_coords[0],
                                      edge_pixel_coords[1])

                if algo_config.algorithm == 'edge_tracing':
                    logger.debug(
                        "Computing valid locations according to edge_tracing algorithm"
                    )
                    edge_pixel_set = set(edge_pixels)
                    # search until all edges have been visited
                    while len(edge_pixel_set) != 0:
                        start_i, start_j = edge_pixel_set.pop()

                        # invalidate relevant pixels for start square
                        top_boundary = max(0, start_i - p_rows + 1)
                        left_boundary = max(0, start_j - p_cols + 1)
                        mask[top_boundary:start_i + 1,
                             left_boundary:start_j + 1] = False

                        curr_i, curr_j = start_i, start_j
                        move = 0, 0
                        while move is not None:
                            # what edge was last traversed
                            action_i, action_j = move
                            # current location
                            curr_i += action_i
                            curr_j += action_j

                            # truncate search when near top or left boundary
                            top_index = max(0, curr_i - p_rows + 1)
                            left_index = max(0, curr_j - p_cols + 1)

                            # update invalidation based on last move, marking a row or column invalid based on the size
                            # of action_i or action_j
                            # if action_i or action_j has absolute value greater than 0, the other must be 0,
                            # i.e diagonal moves of length greater than 1 aren't updated correctly by this
                            if action_i < 0:
                                # update top border
                                mask[top_index:top_index - action_i,
                                     left_index:curr_j + 1] = False
                            elif action_i > 0:
                                # update bottom border
                                mask[curr_i - action_i + 1:curr_i + 1,
                                     left_index:curr_j + 1] = False

                            if action_j < 0:
                                # update left border
                                mask[top_index:curr_i + 1,
                                     left_index:left_index - action_j] = False
                            elif action_j > 0:
                                # update right border
                                mask[top_index:curr_i + 1,
                                     curr_j - action_j + 1:curr_j + 1] = False

                            # obtain next pixel to inspect
                            move = _get_next_edge_from_pixel(
                                curr_i, curr_j, i_rows, i_cols, edge_pixel_set)

                elif algo_config.algorithm == 'brute_force':
                    logger.debug(
                        "Computing valid locations according to brute_force algorithm"
                    )
                    for i, j in edge_pixels:
                        top_index, left_index = max(0, i - p_rows + 1), max(
                            0, j - p_cols + 1)
                        mask[top_index:i + 1, left_index:j + 1] = False

                elif algo_config.algorithm == 'threshold':
                    logger.debug(
                        "Computing valid locations according to threshold algorithm"
                    )
                    for i, j in edge_pixels:
                        mask[max(0, i - p_rows + 1):i + 1,
                             max(0, j - p_cols + 1):j + 1] = False

                    # enumerate all possible invalid locations
                    mask_coords = np.nonzero(np.logical_not(mask))
                    possible_locations = zip(mask_coords[0], mask_coords[1])

                    # if average pixel value in location is below specified value, allow possible trigger overlap
                    for i, j in possible_locations:
                        if i <= i_rows - p_rows and j <= i_cols - p_cols and \
                                np.mean(chan_img[i:i + p_rows, j:j + p_cols]) <= threshold_val[chan_idx]:
                            mask[i][j] = True

                elif algo_config.algorithm == 'bounding_boxes':
                    logger.debug(
                        "Computing valid locations according to bounding_boxes algorithm"
                    )
                    # generate top-left and bottom-right corners of all grid squares
                    top_left_coords = np.swapaxes(np.indices((algo_config.num_boxes, algo_config.num_boxes)), 0, 2) \
                                        .reshape((algo_config.num_boxes * algo_config.num_boxes, 2))
                    bottom_right_coords = top_left_coords + 1

                    # rows give y1, x1, y2, x2 of grid boxes, y2 and x2 exclusive
                    box_coords = np.concatenate(
                        (top_left_coords, bottom_right_coords), axis=1)
                    box_coords = np.multiply(
                        box_coords, np.array([i_rows, i_cols, i_rows, i_cols]))
                    box_coords //= algo_config.num_boxes

                    # generate bounding boxes for image in each grid square
                    bounding_coords = np.apply_along_axis(
                        _get_bounding_box, 1, box_coords, img_mask)

                    # update mask, bounds -> top, left, bottom, right
                    for bounds in bounding_coords:
                        top_index = max(0, bounds[0] - p_rows + 1)
                        left_index = max(0, bounds[1] - p_cols + 1)
                        mask[top_index:bounds[2], left_index:bounds[3]] = False

                output_mask[:, :, chan_idx] = mask

            else:
                msg = "Wrapping for trigger insertion has not been implemented yet!"
                logger.error(msg)
                raise ValueError(msg)

    return output_mask
コード例 #47
0
def complex_flux(spectrogram,
                 diff_frames=None,
                 diff_max_bins=3,
                 temporal_filter=3,
                 temporal_origin=0):
    """
    ComplexFlux.

    ComplexFlux is based on the SuperFlux, but adds an additional local group
    delay based tremolo suppression.

    Parameters
    ----------
    spectrogram : :class:`Spectrogram` instance
        :class:`Spectrogram` instance.
    diff_frames : int, optional
        Number of frames to calculate the diff to.
    diff_max_bins : int, optional
        Number of bins used for maximum filter.
    temporal_filter : int, optional
        Temporal maximum filtering of the local group delay [frames].
    temporal_origin : int, optional
        Origin of the temporal maximum filter.

    Returns
    -------
    complex_flux : numpy array
        ComplexFlux onset detection function.

    References
    ----------
    .. [1] Sebastian Böck and Gerhard Widmer,
           "Local group delay based vibrato and tremolo suppression for onset
           detection",
           Proceedings of the 14th International Society for Music Information
           Retrieval Conference (ISMIR), 2013.

    """
    # create a mask based on the local group delay information
    # take only absolute values of the local group delay and normalize them
    lgd = np.abs(spectrogram.stft.phase().lgd()) / np.pi
    # maximum filter along the temporal axis
    # TODO: use HPSS instead of simple temporal filtering
    if temporal_filter > 0:
        lgd = maximum_filter(lgd,
                             size=[temporal_filter, 1],
                             origin=temporal_origin)
    # lgd = uniform_filter(lgd, size=[1, 3])  # better for percussive onsets
    # create the weighting mask
    try:
        # if the magnitude spectrogram was filtered, use the minimum local
        # group delay value of each filterbank (expanded by one frequency
        # bin in both directions) as the mask
        mask = np.zeros_like(spectrogram)
        num_bins = lgd.shape[1]
        for b in range(mask.shape[1]):
            # determine the corner bins for the mask
            corner_bins = np.nonzero(spectrogram.filterbank[:, b])[0]
            # always expand to the next neighbour
            start_bin = corner_bins[0] - 1
            stop_bin = corner_bins[-1] + 2
            # constrain the range
            if start_bin < 0:
                start_bin = 0
            if stop_bin > num_bins:
                stop_bin = num_bins
            # set mask
            mask[:, b] = np.amin(lgd[:, start_bin:stop_bin], axis=1)
    except AttributeError:
        # if the spectrogram is not filtered, use a simple minimum filter
        # covering only the current bin and its neighbours
        mask = minimum_filter(lgd, size=[1, 3])
    # sum all positive 1st order max. filtered and weighted differences
    diff = spectrogram.diff(diff_frames=diff_frames,
                            diff_max_bins=diff_max_bins,
                            positive_diffs=True)
    return np.asarray(np.sum(diff * mask, axis=1))
コード例 #48
0
def plot_maxmin_points(lon,
                       lat,
                       data,
                       extrema,
                       nsize,
                       symbol,
                       color='k',
                       plotValue=True,
                       transform=None):
    """
    This function will find and plot relative maximum and minimum for a 2D grid. The function
    can be used to plot an H for maximum values (e.g., High pressure) and an L for minimum
    values (e.g., low pressue). It is best to used filetered data to obtain  a synoptic scale
    max/min value. The symbol text can be set to a string value and optionally the color of the
    symbol and any plotted value can be set with the parameter color.

    Parameters
    ----------
        lon : 2D array
            Plotting longitude values
        lat : 2D array
            Plotting latitude values
        data : 2D array
            Data that you wish to plot the max/min symbol placement
        extrema : str
            Either a value of max for Maximum Values or min for Minimum Values
        nsize : int
            Size of the grid box to filter the max and min values to plot a reasonable number
        symbol : str
            Text to be placed at location of max/min value
        color : str
            Name of matplotlib colorname to plot the symbol (and numerical value, if plotted)
        plot_value : Boolean (True/False)
            Whether to plot the numeric value of max/min point

    Return
    ------
        The max/min symbol will be plotted on the current axes within the bounding frame
        (e.g., clip_on=True)
    """
    from scipy.ndimage.filters import maximum_filter, minimum_filter

    if (extrema == 'max'):
        data_ext = maximum_filter(data, nsize, mode='nearest')
    elif (extrema == 'min'):
        data_ext = minimum_filter(data, nsize, mode='nearest')
    else:
        raise ValueError('Value for hilo must be either max or min')

    if lon.ndim == 1:
        lon, lat = np.meshgrid(lon, lat)

    mxx, mxy = np.where(data_ext == data)

    for i in range(len(mxy)):
        ax.text(lon[mxx[i], mxy[i]],
                lat[mxx[i], mxy[i]],
                symbol,
                color=color,
                size=36,
                clip_on=True,
                horizontalalignment='center',
                verticalalignment='center',
                transform=transform)
        ax.text(lon[mxx[i], mxy[i]],
                lat[mxx[i], mxy[i]],
                '\n' + str(np.int(data[mxx[i], mxy[i]])),
                color=color,
                size=12,
                clip_on=True,
                fontweight='bold',
                horizontalalignment='center',
                verticalalignment='top',
                transform=transform)
        ax.plot(lon[mxx[i], mxy[i]],
                lat[mxx[i], mxy[i]],
                marker='o',
                markeredgecolor='black',
                markerfacecolor='white',
                transform=transform)
        ax.plot(lon[mxx[i], mxy[i]],
                lat[mxx[i], mxy[i]],
                marker='x',
                color='black',
                transform=transform)
コード例 #49
0
ファイル: coordinates.py プロジェクト: lazem/odemis
def DivideInNeighborhoods(data, number_of_spots, scale, sensitivity_limit=100):
    """
    Given an image that includes N spots, divides it in N subimages with each of them
    to include one spot. Briefly, it filters the image, finds the N “brightest” spots
    and crops the region around them generating the subimages. This process is repeated
    until image division is feasible.
    data (model.DataArray): 2D array containing the intensity of each pixel
    number_of_spots (int,int): The number of CL spots
    scale (float): Distance between spots in optical grid (in pixels)
    sensitivity_limit (int): Limit of sensitivity
    returns subimages (List of DataArrays): One subimage per spot
            subimage_coordinates (List of tuples): The coordinates of the center of each
                                                subimage with respect to the overall image
    """
    # Denoise
    filtered_image = ndimage.median_filter(data, 3)

    # Bold spots
    # Third parameter must be a length in pixels somewhat larger than a typical
    # spot
    filtered_image = BandPassFilter(filtered_image, 1, 20)

    image = model.DataArray(filtered_image,
                            data.metadata)  # TODO: why a DataArray?
    avg_intensity = numpy.average(image)

    spot_factor = 10
    step = 4
    sensitivity = 4

    # After filtering based on optical scale there is no need to adjust
    # filter window size
    filter_window_size = 8

    # Increase sensitivity until expected number of spots is detected
    while sensitivity <= sensitivity_limit:
        subimage_coordinates = []
        subimages = []

        i_max, j_max = unravel_index(image.argmax(), image.shape)
        i_min, j_min = unravel_index(image.argmin(), image.shape)
        max_diff = image[i_max, j_max] - image[i_min, j_min]
        data_max = filters.maximum_filter(image, filter_window_size)
        data_min = filters.minimum_filter(image, filter_window_size)

        # Determine threshold
        threshold = max_diff / sensitivity

        # Filter the parts of the image with variance in intensity greater
        # than the threshold
        maxima = (image == data_max)
        diff = ((data_max - data_min) > threshold)
        maxima[diff == 0] = 0

        labeled, num_objects = ndimage.label(maxima)

        slices = ndimage.find_objects(labeled)

        # If too many features found, discards the ones too close from each other
        # Note: the main danger is that if the scale is wrong (bigger than the
        # real value), it will remove correct images
        if len(slices) > numpy.prod(number_of_spots):
            logging.debug(
                "Found %d features that could be spots, will be picky",
                len(slices))
            min_dist = max(4, scale / 2.1)  # px
        else:
            min_dist = max(4, scale / 8.1)  # px

        # Go through these parts and crop the subimages based on the neighborhood_size
        # value
        x_center_last, y_center_last = 0, 0
        for dy, dx in slices:
            x_center = (dx.start + dx.stop - 1) / 2
            y_center = (dy.start + dy.stop - 1) / 2

            subimage = image[int(dy.start - 2.5):int(dy.stop + 2.5),
                             int(dx.start - 2.5):int(dx.stop + 2.5)]

            if subimage.shape[0] == 0 or subimage.shape[1] == 0:
                continue

            if (subimage > spot_factor * avg_intensity).sum() < 6:
                continue

            # if spots detected too close keep the brightest one
            # FIXME: it should do it globally: find groups of images too close,
            # and pick the brightest point of each group
            tab = (x_center_last - x_center, y_center_last - y_center)
            if len(subimages) > 0 and math.hypot(tab[0], tab[1]) < min_dist:
                if numpy.sum(subimage) > numpy.sum(
                        subimages[len(subimages) - 1]):
                    subimages.pop()
                    subimage_coordinates.pop()
                    subimage_coordinates.append((x_center, y_center))
                    subimages.append(subimage)
            else:
                subimage_coordinates.append((x_center, y_center))
                subimages.append(subimage)

            x_center_last, y_center_last = x_center, y_center

        # Take care of outliers
        expected_spots = numpy.prod(number_of_spots)
        clean_subimages, clean_subimage_coordinates = FilterOutliers(
            image, subimages, subimage_coordinates, expected_spots)
        if len(clean_subimages) >= numpy.prod(number_of_spots):
            break

        sensitivity += step
    else:
        logging.warning("Giving up finding %d partitions, only found %d",
                        numpy.prod(number_of_spots), len(clean_subimages))

    return clean_subimages, clean_subimage_coordinates
コード例 #50
0
# initialize driver
driver = gdal.GetDriverByName('GTiff')


def write_image(img, filename):
    """
    Write img array to a file with the given filename
    Args:
        img (Band)
        filename (str)
    """
    x_size = img.shape[1]
    y_size = img.shape[0]
    dataset = driver.Create(filename, x_size, y_size)
    dataset.GetRasterBand(1).WriteArray(img)


# load original image
dataset = gdal.Open('img/mozambique-after-subset.tif')
band = dataset.GetRasterBand(1)
img = band.ReadAsArray().astype(np.uint8)

# position of local maxima
data_max = filters.maximum_filter(img, 5)
maxima = (img == data_max)
data_min = filters.minimum_filter(img, 5)
diff = ((data_max - data_min) > 150)
maxima[diff == 0] = 0

write_image(maxima, 'img/maxima.tif')
コード例 #51
0
nc.close()

START_TIME = 0 * 4 + 0  #37 00
END_TIME = 59 * 4 + 3  #37 12
SURFACE_WIND_LEVEL = 5  #1000 hPa
UPPER_WIND_LEVEL = 1  #250 hPa
OUTFILE = "lowpts"
storms = {}
storm_id = 0

f = open(OUTFILE, 'w')  #blank the file
f.close()

for TIME in xrange(START_TIME, END_TIME + 1):
    # find low centers
    data_ext = minimum_filter(pres[TIME], 50, mode='nearest')
    mxy, mxx = np.where(data_ext == pres[TIME])

    # get lat/lon of all low centers
    low_lons = lons[mxx]
    low_lats = lats[mxy]

    #get info on each low
    CROP = 8
    print pres.shape
    min_prs = pres[TIME, mxy, mxx]

    #prepare tc lists
    tc_lons = []
    tc_lats = []
コード例 #52
0
    def search_shortest_path_dws(self, start, goal):
        """
        start = (y, x)
        goal = (y, x)
        """

        start_goal = np.zeros((self.h, self.w), dtype=int)
        cost = np.zeros((self.h, self.w), dtype=int) + 1E10
        done = np.zeros((self.h, self.w), dtype=bool)
        barrier = np.zeros((self.h, self.w), dtype=bool)  # occupancyも含める
        path = np.zeros((self.h, self.w), dtype=int)
        entrance = np.zeros((self.h, self.w), dtype=bool)

        #プーリング用のフィルタ
        g = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])

        for iy in range(self.h):
            for ix in range(self.w):
                if iy == start[0] and ix == start[1]:
                    cost[iy, ix] = 0
                    done[iy, ix] = True
                    start_goal[iy, ix] = -255
                if iy == goal[0] and ix == goal[1]:
                    start_goal[iy, ix] = 255
                if self.data[iy, ix] == 1:  # barrier
                    barrier[iy, ix] = True
                if (iy, ix) in self.entrances:
                    entrance[iy, ix] = True

        barrier = barrier + self.occupancuy
        barrier[start[0], start[1]] = False
        barrier[goal[0], goal[1]] = False

        # plt.imshow(barrier, cmap="gist_yarg")
        # plt.imshow(entrance, cmap=customized_cool)
        # plt.show()

        for i in range(1, 10000000):

            #次に進出するマスのbool
            done_next = maximum_filter(done, footprint=g) * ~done  # 速い
            # done_next = max_pooling(done) * ~done
            is_entrance = done_next * entrance

            #次に進出するマスのcost
            cost_next = minimum_filter(cost, footprint=g) * done_next  # 速い
            # cost_next = min_pooling(cost) * done_next
            cost_next[done_next] += 1

            # is_entranceがTrueのそれぞれのセルについて、
            entrance_xy = list(zip(*np.where(is_entrance == True)))
            for ey, ex in entrance_xy:

                if done[ey - 1, ex] == True and self.room[ey - 1, ex] != True:
                    cost_next[ey, ex] = 10000000
                    done_next[ey, ex] = False
                    # print(f"This is entrance, {ey}, {ex}")

                if done[ey, ex + 1] == True and self.room[ey, ex + 1] != True:
                    cost_next[ey, ex] = 10000000
                    done_next[ey, ex] = False
                    # print(f"This is entrance, {ey}, {ex}")

                if done[ey + 1, ex] == True and self.room[ey + 1, ex] != True:
                    cost_next[ey, ex] = 10000000
                    done_next[ey, ex] = False
                    # print(f"This is entrance, {ey}, {ex}")

                if done[ey, ex - 1] == True and self.room[ey, ex - 1] != True:
                    cost_next[ey, ex] = 10000000
                    done_next[ey, ex] = False
                    # print(f"This is entrance, {ey}, {ex}")

            #costを更新
            cost[done_next] = cost_next[done_next]

            #ただし障害物のコストは10000000とする
            cost[barrier] = 10000000

            #探索終了マスを更新
            done[done_next] = done_next[done_next]

            #ただし障害物は探索終了としない
            done[barrier] = False

            #終了判定
            if done[goal[0], goal[1]] == True:
                break

        # if self.debug:
        # plt.imshow(cost, cmap="jet", vmax=200, vmin=0, alpha=1)
        # barrier[goal[0], goal[1]] = 255
        # barrier[start[0], start[1]] = 255
        # plt.imshow(barrier, cmap=customized_gist_yarg)
        # plt.show()

        point_now = goal
        cost_now = cost[goal[0], goal[1]]
        route = [goal]

        while cost_now > 0:

            #上から来た場合
            try:
                if cost[point_now[0] - 1, point_now[1]] == cost_now - 1:
                    #更新
                    point_now = (point_now[0] - 1, point_now[1])
                    cost_now = cost_now - 1
                    #記録
                    route.append(point_now)
            except:
                pass
            #下から来た場合
            try:
                if cost[point_now[0] + 1, point_now[1]] == cost_now - 1:
                    #更新
                    point_now = (point_now[0] + 1, point_now[1])
                    cost_now = cost_now - 1
                    #記録
                    route.append(point_now)
            except:
                pass
            #左から来た場合
            try:
                if cost[point_now[0], point_now[1] - 1] == cost_now - 1:
                    #更新
                    point_now = (point_now[0], point_now[1] - 1)
                    cost_now = cost_now - 1
                    #記録
                    route.append(point_now)
            except:
                pass
            #右から来た場合
            try:
                if cost[point_now[0], point_now[1] + 1] == cost_now - 1:
                    #更新
                    point_now = (point_now[0], point_now[1] + 1)
                    cost_now = cost_now - 1
                    #記録
                    route.append(point_now)
            except:
                pass

        #ルートを逆順にする
        route = route[::-1]

        for cell in route:
            ix = cell[1]
            iy = cell[0]
            path[iy, ix] = 1

        # if self.debug:
        #     plt.imshow(path, cmap=customized_cool)
        #     plt.imshow(barrier, cmap=customized_gist_yarg)
        #     plt.show()
        return route
コード例 #53
0
def xlevenshtein(a, b, context=1):
    """Calculates the Levensthein distance between a and b
    and generates a list of differences by context."""
    n, m = len(a), len(b)
    sources = empty((m + 1, n + 1), object)
    sources[:, :] = None
    dists = 99999 * ones((m + 1, n + 1))
    dists[0, :] = arange(n + 1)
    for i in range(1, m + 1):
        previous = dists[i - 1, :]
        current = dists[i, :]
        current[0] = i
        for j in range(1, n + 1):
            if previous[j] + 1 < current[j]:
                sources[i, j] = (i - 1, j)
                dists[i, j] = previous[j] + 1
            if current[j - 1] + 1 < current[j]:
                sources[i, j] = (i, j - 1)
                dists[i, j] = current[j - 1] + 1
            delta = 1 * (a[j - 1] != b[i - 1])
            if previous[j - 1] + delta < current[j]:
                sources[i, j] = (i - 1, j - 1)
                dists[i, j] = previous[j - 1] + delta
    cost = current[n]

    # reconstruct the paths and produce two aligned strings
    l = sources[i, n]
    path = []
    while l is not None:
        path.append(l)
        i, j = l
        l = sources[i, j]
    al, bl = [], []
    path = [(n + 2, m + 2)] + path
    for k in range(len(path) - 1):
        i, j = path[k]
        i0, j0 = path[k + 1]
        u = "_"
        v = "_"
        if j != j0 and j0 < n: u = a[j0]
        if i != i0 and i0 < m: v = b[i0]
        al.append(u)
        bl.append(v)
    al = "".join(al[::-1])
    bl = "".join(bl[::-1])

    # now compute a splittable string with the differences
    assert len(al) == len(bl)
    al = " " * context + al + " " * context
    bl = " " * context + bl + " " * context
    assert "~" not in al and "~" not in bl
    same = array([al[i] == bl[i] for i in range(len(al))], 'i')
    same = filters.minimum_filter(same, 1 + 2 * context)
    als = "".join([al[i] if not same[i] else "~" for i in range(len(al))])
    bls = "".join([bl[i] if not same[i] else "~" for i in range(len(bl))])
    # print als
    # print bls
    ags = re.split(r'~+', als)
    bgs = re.split(r'~+', bls)
    confusions = [(a, b) for a, b in zip(ags, bgs) if a != "" or b != ""]
    return cost, confusions
コード例 #54
0
ファイル: TestFilters.py プロジェクト: zgsxwsdxg/NumCpp
def test2D():
    modes = {'reflect' : NumCpp.Mode.REFLECT,
             'constant': NumCpp.Mode.CONSTANT,
             'nearest': NumCpp.Mode.NEAREST,
             'mirror': NumCpp.Mode.MIRROR,
             'wrap': NumCpp.Mode.WRAP}

    for mode in modes.keys():
        print(colored(f'Testing complementaryMedianFilter: mode = {mode}', 'cyan'))
        shape = np.random.randint(1000, 2000, [2,]).tolist()
        cShape = NumCpp.Shape(shape[0], shape[1])
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, shape)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        constantValue = np.random.randint(0, 5, [1,]).item() # only actaully needed for constant boundary condition
        dataOutC = NumCpp.complementaryMedianFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()
        dataOutPy = data - filters.median_filter(data, size=kernalSize, mode=mode, cval=constantValue)
        if np.array_equal(dataOutC, dataOutPy):
            print(colored('\tPASS', 'green'))
        else:
            print(colored('\tFAIL', 'red'))

        print(colored(f'Testing convolve: mode = {mode}', 'cyan'))
        shape = np.random.randint(1000, 2000, [2,]).tolist()
        cShape = NumCpp.Shape(shape[0], shape[1])
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(10, 20, shape).astype(np.double)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        constantValue = np.random.randint(0, 5, [1,]).item() # only actaully needed for constant boundary condition
        weights = np.random.randint(-2, 3, [kernalSize, kernalSize]).astype(np.double)
        cWeights = NumCpp.NdArray(kernalSize)
        cWeights.setArray(weights)
        dataOutC = NumCpp.convolve(cArray, kernalSize, cWeights, modes[mode], constantValue).getNumpyArray()
        dataOutPy = filters.convolve(data, weights, mode=mode, cval=constantValue)
        if np.array_equal(dataOutC, dataOutPy):
            print(colored('\tPASS', 'green'))
        else:
            print(colored('\tFAIL', 'red'))

        print(colored(f'Testing gaussianFilter: mode = {mode}', 'cyan'))
        shape = np.random.randint(1000, 2000, [2,]).tolist()
        cShape = NumCpp.Shape(shape[0], shape[1])
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, shape).astype(np.double)
        cArray.setArray(data)
        constantValue = np.random.randint(0, 5, [1,]).item() # only actaully needed for constant boundary condition
        sigma = np.random.rand(1).item() * 2
        dataOutC = NumCpp.gaussianFilter(cArray, sigma, modes[mode], constantValue).getNumpyArray()
        dataOutPy = filters.gaussian_filter(data, sigma, mode=mode, cval=constantValue)
        if np.array_equal(np.round(dataOutC, 2), np.round(dataOutPy, 2)):
            print(colored('\tPASS', 'green'))
        else:
            print(colored('\tFAIL', 'red'))

        print(colored(f'Testing maximumFilter: mode = {mode}', 'cyan'))
        shape = np.random.randint(1000, 2000, [2,]).tolist()
        cShape = NumCpp.Shape(shape[0], shape[1])
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, shape)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        constantValue = np.random.randint(0, 5, [1,]).item() # only actaully needed for constant boundary condition
        dataOutC = NumCpp.maximumFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()
        dataOutPy = filters.maximum_filter(data, size=kernalSize, mode=mode, cval=constantValue)
        if np.array_equal(dataOutC, dataOutPy):
            print(colored('\tPASS', 'green'))
        else:
            print(colored('\tFAIL', 'red'))

        print(colored(f'Testing medianFilter: mode = {mode}', 'cyan'))
        shape = np.random.randint(1000, 2000, [2,]).tolist()
        cShape = NumCpp.Shape(shape[0], shape[1])
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, shape)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        constantValue = np.random.randint(0, 5, [1,]).item() # only actaully needed for constant boundary condition
        dataOutC = NumCpp.medianFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()
        dataOutPy = filters.median_filter(data, size=kernalSize, mode=mode, cval=constantValue)
        if np.array_equal(dataOutC, dataOutPy):
            print(colored('\tPASS', 'green'))
        else:
            print(colored('\tFAIL', 'red'))

        print(colored(f'Testing minimumFilter: mode = {mode}', 'cyan'))
        shape = np.random.randint(1000, 2000, [2,]).tolist()
        cShape = NumCpp.Shape(shape[0], shape[1])
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, shape)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        constantValue = np.random.randint(0, 5, [1,]).item() # only actaully needed for constant boundary condition
        dataOutC = NumCpp.minimumFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()
        dataOutPy = filters.minimum_filter(data, size=kernalSize, mode=mode, cval=constantValue)
        if np.array_equal(dataOutC, dataOutPy):
            print(colored('\tPASS', 'green'))
        else:
            print(colored('\tFAIL', 'red'))

        print(colored(f'Testing percentileFilter: mode = {mode}', 'cyan'))
        shape = np.random.randint(1000, 2000, [2,]).tolist()
        cShape = NumCpp.Shape(shape[0], shape[1])
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, shape)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        percentile = np.random.randint(0, 101, [1,]).item()
        constantValue = np.random.randint(0, 5, [1,]).item() # only actaully needed for constant boundary condition
        dataOutC = NumCpp.percentileFilter(cArray, kernalSize, percentile, modes[mode], constantValue).getNumpyArray()
        dataOutPy = filters.percentile_filter(data, percentile, size=kernalSize, mode=mode, cval=constantValue)
        if np.array_equal(dataOutC, dataOutPy):
            print(colored('\tPASS', 'green'))
        else:
            print(colored('\tFAIL', 'red'))

        print(colored(f'Testing rankFilter: mode = {mode}', 'cyan'))
        shape = np.random.randint(1000, 2000, [2,]).tolist()
        cShape = NumCpp.Shape(shape[0], shape[1])
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, shape)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        rank = np.random.randint(0, kernalSize**2 - 1, [1,]).item()
        constantValue = np.random.randint(0, 5, [1,]).item() # only actaully needed for constant boundary condition
        dataOutC = NumCpp.rankFilter(cArray, kernalSize, rank, modes[mode], constantValue).getNumpyArray()
        dataOutPy = filters.rank_filter(data, rank, size=kernalSize, mode=mode, cval=constantValue)
        if np.array_equal(dataOutC, dataOutPy):
            print(colored('\tPASS', 'green'))
        else:
            print(colored('\tFAIL', 'red'))

        print(colored(f'Testing uniformFilter: mode = {mode}', 'cyan'))
        shape = np.random.randint(1000, 2000, [2,]).tolist()
        cShape = NumCpp.Shape(shape[0], shape[1])
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, shape).astype(np.double)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        constantValue = np.random.randint(0, 5, [1,]).item() # only actaully needed for constant boundary condition
        dataOutC = NumCpp.uniformFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()
        dataOutPy = filters.uniform_filter(data, size=kernalSize, mode=mode, cval=constantValue)
        if np.array_equal(np.round(dataOutC, 8), np.round(dataOutPy, 8)):
            print(colored('\tPASS', 'green'))
        else:
            print(colored('\tFAIL', 'red'))
コード例 #55
0
def morphOpen(V, footprint):
    ''' computes the morphological opening of V (correlation map) with circular footprint'''
    vrem   = filters.minimum_filter(V, footprint=footprint)
    vrem   = -filters.minimum_filter(-vrem, footprint=footprint)
    return vrem
コード例 #56
0
def bsub(x, diameter=20):
    from scipy.ndimage.filters import minimum_filter
    return x - minimum_filter(x, size=diameter)
コード例 #57
0
    def grid_to_graph(self, scr, grid):
        import scipy.ndimage as ndimage
        import scipy.ndimage.filters as filters
        data = grid
        neighborhood_size = 7
        #threshold_max = 0.5
        threshold_diff = 0.1
        threshold_score = 0.2

        data_max = filters.maximum_filter(data, neighborhood_size)
        maxima = (data == data_max)
        data_min = filters.minimum_filter(data, neighborhood_size)
        #diff = ((data_max - data_min) > threshold_diff)
        #maxima[diff == 0] = 0
        maxima[data_max < 0.2] = 0

        labeled, num_objects = ndimage.label(maxima)
        slices = ndimage.find_objects(labeled)
        xx, yy, score = [], [], []
        for dy, dx in slices:
            x_center = (dx.start + dx.stop - 1) / 2
            y_center = (dy.start + dy.stop - 1) / 2
            s = np.average(data[dy.start:dy.stop + 1, dx.start:dx.stop + 1])
            if s > threshold_score:
                xx.append(x_center / grid.shape[1])
                yy.append(y_center / grid.shape[0])
                score.append(s)

        graph = list(zip(xx, yy, score))
        path = tsalesman(graph)
        paths_final = [path]

        scr_viz = np.copy(scr)
        h, w = scr.shape[0:2]
        #hup, wup = h/grid.shape[0], w/grid.shape[1]
        hup, wup = h, w
        for i, (x, y, s) in enumerate(zip(xx, yy, score)):
            size = 3 * int(s * 10)
            size = size if size % 2 != 0 else size - 1
            scr_viz = util.draw_point(scr_viz,
                                      y=int(y * hup),
                                      x=int(x * wup),
                                      size=size,
                                      color=[0, 255, 0])
            scr_viz = util.draw_text(scr_viz,
                                     y=int(y * hup),
                                     x=int(x * wup),
                                     text=str(i),
                                     color=[0, 255, 0])

        colors = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 255],
                  [0, 0, 0]]
        for path, col in zip(paths_final, colors):
            last_x = None
            last_y = None
            for (x, y, s) in path:
                if last_x is not None:
                    scr_viz = util.draw_line(scr_viz,
                                             y1=int(last_y * hup),
                                             x1=int(last_x * wup),
                                             y2=int(y * hup),
                                             x2=int(x * wup),
                                             color=col,
                                             thickness=2)
                last_x = x
                last_y = y
        misc.imshow(scr_viz)
        """
コード例 #58
0
ファイル: maxmin.py プロジェクト: iguit0/Image-Processing
import matplotlib.pyplot as plt
from scipy import misc
from skimage import img_as_float
from scipy.ndimage import filters


def loadImg(arg):
    return misc.imread(arg)


img_1 = loadImg(sys.argv[1])
saida_1 = sys.argv[2] + '.tif'
saida_2 = sys.argv[3] + '.tif'
mask_size = int(sys.argv[4])

# Converte os pixels em float, com valores entre 0 e 1
img_1 = img_as_float(img_1)

# Aplica o filtro de mínimo
img_saida_min = filters.minimum_filter(img_1,
                                       size=mask_size,
                                       mode='constant',
                                       cval=0)

# Aplica o filtro de máximo
img_saida_max = filters.maximum_filter(img_1, size=mask_size, mode='constant')

# Faz o salvamento das imagens de saída após o processamento
misc.imsave(saida_1, img_saida_min)
misc.imsave(saida_2, img_saida_max)
コード例 #59
0
ファイル: ss_log.py プロジェクト: juglab/PlatyMatch
def local_minima(data):
    peaks = data == minimum_filter(data, size=(3, ) * data.ndim)
    return peaks
コード例 #60
0
def preprocess_image(raw_image,
                     threshold=0.0025,
                     sigma=1.0,
                     minf_size=1,
                     rank_size=1,
                     sobel=True):
    """
    Applies an edge filter followed by a noise reduction filter. Very good
    at locating powder rings and filtering everything else out.
    
    Parameters
    ----------
    raw_image : ndarray
        An image to find the edges of
        
    Returns
    -------
    binary_image : ndarray, np.bool
        A binary image, with "1" where there are powder rings/strong edges
    """

    # flatten the image into a two-D array and later re-process it
    # convert to cheetah-like format
    if raw_image.shape == (4, 16, 185, 194):
        non_flat_img = True
        image = np.zeros((1480, 1552), dtype=np.float)  # flat image
        for i in range(8):
            for j in range(4):
                x_start = 185 * i
                x_stop = 185 * (i + 1)
                y_start = 388 * j
                y_stop = 388 * (j + 1)

                two_by_one = np.hstack(
                    (raw_image[j, i * 2, :, :],
                     raw_image[j, i * 2 + 1, :, :])).astype(np.float)
                image[x_start:x_stop, y_start:y_stop] = two_by_one

    elif len(raw_image.shape) == 2:
        non_flat_img = False
        image = raw_image.astype(np.float)

    else:
        raise ValueError(
            '`raw_image` should be 2d or shape-(4,16,185,194), got'
            ': %s' % str(raw_image.shape))

    # apply rank filter & gaussian filter
    if rank_size > 2:
        image = filters.rank_filter(image, -1, size=rank_size)
    if sigma > 0.1:
        image = filters.gaussian_filter(image, sigma=sigma)

    image -= image.min()
    assert image.min() == 0
    assert image.max() > 0

    # threshold
    image = (image > (image.max() * threshold))

    if minf_size > 2:
        image = filters.minimum_filter(image, size=minf_size)
    if sobel:
        image = np.abs(filters.sobel(image, 0)) + np.abs(
            filters.sobel(image, 1))

    if non_flat_img:
        image = read.enforce_raw_img_shape(image.astype(np.bool))
    else:
        image = image.astype(np.bool)

    return image