Example #1
0
def photonize(img, adu_per_photon, remainder_required=0.9, remainder_min=0.5):
    """
    
    """
    
    img[img < 0.0] = 0.0
    full_photons = (img // adu_per_photon).astype(np.int)
    remainder    = img % adu_per_photon
    
    neighbour_max = filters.maximum_filter(remainder, 
                                           footprint=np.array([[0,1,0],
                                                               [1,0,1],
                                                               [0,1,0]]))
    neighbour_max[neighbour_max < remainder_min] = 0.0 # filter small values
    
    neighbour_max_wc = filters.maximum_filter(remainder, 
                                              footprint=np.array([[0,1,0],
                                                                  [1,1,1],
                                                                  [0,1,0]]))
    local_maxima = (remainder == neighbour_max_wc)
    split_photons = ((remainder + neighbour_max) > remainder_required) *\
                    local_maxima
    
    
    photon_img = full_photons + split_photons
    
    return photon_img
def polylinesFromBinImage(img, minimum_cluster_size=6,
                          remove_small_obj_size=3,
                          reconnect_size=3,
                          max_n_contours=None, max_len_contour=None,
                          copy=True):
    '''
    return a list of arrays of un-branching contours

    img -> (boolean) array 

    optional:
    ---------
    minimum_cluster_size -> minimum number of pixels connected together to build a contour

    ##search_kernel_size -> TODO
    ##min_search_kernel_moment -> TODO

    numeric:
    -------------
    max_n_contours -> maximum number of possible contours in img
    max_len_contour -> maximum contour length

    '''
    assert minimum_cluster_size > 1
    assert reconnect_size % 2, 'ksize needs to be odd'

    # assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd'
    # assume array size parameters, is not given:
    if max_n_contours is None:
        max_n_contours = max(img.shape)
    if max_len_contour is None:
        max_len_contour = sum(img.shape[:2])
    # array containing coord. of all contours:
    contours = np.zeros(shape=(max_n_contours, max_len_contour, 2),
                        dtype=np.uint16)  # if not search_kernel_size else np.float32)

    if img.dtype != np.bool:
        img = img.astype(bool)
    elif copy:
        img = img.copy()

    if remove_small_obj_size:
        remove_small_objects(img, remove_small_obj_size,
                             connectivity=2, in_place=True)
    if reconnect_size:
        # remove gaps
        maximum_filter(img, reconnect_size, output=img)
        # reduce contour width to 1
        img = skeletonize(img)

    n_contours = _populateContoursArray(img, contours, minimum_cluster_size)
    contours = contours[:n_contours]

    l = []
    for c in contours:
        ind = np.zeros(shape=len(c), dtype=bool)
        _getValidInd(c, ind)
        # remove all empty spaces:
        l.append(c[ind])
    return l
Example #3
0
def compute_colseps_conv(binary, scale=1.0, minheight=10, maxcolseps=2):
    """Find column separators by convolution and thresholding.

    Args:
        binary (numpy.array):
        scale (float):
        minheight (int):
        maxcolseps (int):

    Returns:
        Separators
    """

    h, w = binary.shape
    # find vertical whitespace by thresholding
    smoothed = gaussian_filter(1.0*binary, (scale, scale*0.5))
    smoothed = uniform_filter(smoothed, (5.0*scale, 1))
    thresh = (smoothed < np.amax(smoothed)*0.1)
    # find column edges by filtering
    grad = gaussian_filter(1.0*binary, (scale, scale*0.5), order=(0, 1))
    grad = uniform_filter(grad, (10.0*scale, 1))
    grad = (grad > 0.5*np.amax(grad))
    # combine edges and whitespace
    seps = np.minimum(thresh, maximum_filter(grad, (int(scale), int(5*scale))))
    seps = maximum_filter(seps, (int(2*scale), 1))
    # select only the biggest column separators
    seps = morph.select_regions(seps, sl.dim0, min=minheight*scale,
                                nbest=maxcolseps+1)
    return seps
Example #4
0
def ccslineseg(image,debug=0):
    image = 1.0*(image>0.3*amax(image))
    sigma = 10.0
    smooth = filters.gaussian_filter(image,(sigma,1.0*sigma),mode='constant')
    center = (smooth==amax(smooth,axis=0)[newaxis,:])
    center = filters.maximum_filter(center,(3,3))
    center = morph.keep_marked(image>0.5,center)
    center = filters.maximum_filter(center,(2,2))
    center,_ = morph.label(center)
    center = morph.spread_labels(center)
    center *= image
    return center
Example #5
0
    def find_extrema(cls, image):
        """
        Finds extrema, both mininma and maxima, based on local maximum filter.
        Returns extrema in form of two rows, where the first and second are
        positions of x and y, respectively.

        Parameters
        ----------
        image : numpy 2D array
            Monochromatic image or any 2D array.

        Returns
        -------
        min_peaks : numpy array
            Minima positions.
        max_peaks : numpy array
            Maxima positions.
        """

        # define an 3x3 neighborhood
        neighborhood = generate_binary_structure(2, 2)

        # apply the local maximum filter; all pixel of maximal value
        # in their neighborhood are set to 1
        local_min = maximum_filter(-image, footprint=neighborhood) == -image
        local_max = maximum_filter(image, footprint=neighborhood) == image

        # can't distinguish between background zero and filter zero
        background = (image == 0)

        #appear along the bg border (artifact of the local max filter)
        eroded_background = binary_erosion(background,
                                           structure=neighborhood,
                                           border_value=1)

        # we obtain the final mask, containing only peaks,
        # by removing the background from the local_max mask (xor operation)
        min_peaks = local_min ^ eroded_background
        max_peaks = local_max ^ eroded_background

        min_peaks = local_min
        max_peaks = local_max
        min_peaks[[0, -1], :] = False
        min_peaks[:, [0, -1]] = False
        max_peaks[[0, -1], :] = False
        max_peaks[:, [0, -1]] = False

        min_peaks = np.nonzero(min_peaks)
        max_peaks = np.nonzero(max_peaks)

        return min_peaks, max_peaks
Example #6
0
    def find(self, alpha=5):
        """
        Takes an image and detect the peaks usingthe local maximum filter.
        Returns a boolean mask of the peaks (i.e. 1 when
        the pixel's value is the neighborhood maximum, 0 otherwise). Taken from
        http://stackoverflow.com/questions/9111711/
        get-coordinates-of-local-maxima-in-2d-array-above-certain-value
        """
        self.alpha = alpha

        image_max = maximum_filter(self.image_conv, self.win_size)
        maxima = (self.image_conv == image_max)

        self.mean = np.mean(self.image_conv)
        self.std = np.sqrt(np.mean((self.image_conv - self.mean)**2))
        self.threshold = self.alpha*self.std + self.mean

        diff = (image_max > self.threshold)
        maxima[diff == 0] = 0

        labeled, num_objects = label(maxima)
        if num_objects > 0:
            self.positions = maximum_position(self.image, labeled,
                                              range(1, num_objects + 1))
            self.positions = np.array(self.positions).astype(int)
            self.drop_overlapping()
            self.drop_border()
        else:
            self.positions = np.zeros((0, 2), dtype=int)
Example #7
0
def print_stats(fk,threshold):
    import numpy as np
    import scipy as sp
    import scipy.ndimage.filters as filters

    tmp = []
    print '---------------------------'
    print '--- Arrival Information ---'
    print '---------------------------'
    print 
    print 'normalized power (dB)   ', 'velocity (km/s)   ', 'backazimuth (deg)'
    maxxi = (np.where(fk==filters.maximum_filter(fk, 5)))
    this=np.empty([2,len(maxxi[0])])
    lth = np.amin(fk)*threshold
    for i in range(len(maxxi[0])):
        this[0][i]=(maxxi[0][i]-100)*0.5
        this[1][i]=(maxxi[1][i]-100)*0.5
        if (fk[maxxi[0][i],maxxi[1][i]] > lth):
            baz=np.math.atan2(this[0][i],this[1][i])*180.0/3.1415926
            if(baz<0.0):
                baz+=360.0
            xvel = 111.19/sp.sqrt(this[0][i]**2+this[1][i]**2)
            xamp = fk[maxxi[0][i],maxxi[1][i]]
            tmp.append([xamp,xvel,baz])
    tmp.sort(reverse=True)
    for i in range(len(tmp)):
        print '%12.02f %19.02f %19.02f'%(tmp[i][0],tmp[i][1],tmp[i][2])
Example #8
0
File: math2.py Project: tjlane/thor
def find_local_maxima(arr):
    """
    Find local maxima in a multidimensional array `arr`.
    
    Parameters
    ----------
    arr : np.ndarray
        The array to find maxima in
    
    Returns
    -------
    indices : tuple of np.ndarray
        The indices of local maxima in `arr`
    """
    
    # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710
    
    # neighborhood is simply a 3x3x3 array of True
    neighborhood = morphology.generate_binary_structure(len(arr.shape), 2)
    local_max = ( filters.maximum_filter(arr, footprint=neighborhood) == arr )
    
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion
    background = ( arr == 0 )
    eroded_background = morphology.binary_erosion(background,
                                                  structure=neighborhood,
                                                  border_value=1)
        
    # we obtain the final mask, containing only peaks, 
    # by removing the background from the local_min mask
    detected_max = local_max ^ eroded_background # ^ = XOR
    
    return np.where(detected_max)
Example #9
0
def find_extrema(array):
    '''
    Takes an array and finds its local extrema.

    Returns an array with 0s for not an extrema, 1s for maxs and -1 for mins
    and a list of the indices of all maximums and minimums

    N.B. this function is much faster than the above.
    '''
    extrema = np.zeros_like(array)
    maximums = []
    minimums = []

    local_max = maximum_filter(array, size=(3, 3)) == array
    local_min = minimum_filter(array, size=(3, 3)) == array
    extrema += local_max
    extrema -= local_min

    where_max = np.where(local_max)
    where_min = np.where(local_min)

    for max_point in zip(where_max[0], where_max[1]):
        if (max_point[0] != 0 and max_point[0] != array.shape[0] - 1 and
            max_point[1] != 0 and max_point[1] != array.shape[1] - 1):
            maximums.append(max_point)

    for min_point in zip(where_min[0], where_min[1]):
        if (min_point[0] != 0 and min_point[0] != array.shape[0] - 1 and
            min_point[1] != 0 and min_point[1] != array.shape[1] - 1):
            minimums.append(min_point)

    return extrema, maximums, minimums
Example #10
0
def plotPeaks(arr2D, amp_min=DEFAULT_AMP_MIN):
	# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
	struct = generate_binary_structure(2, 1)
	neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

	# find local maxima using our fliter shape
	local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
	background = (arr2D == 0)
	eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

	# Boolean mask of arr2D with True at peaks
	detected_peaks = local_max - eroded_background

	# extract peaks
	amps = arr2D[detected_peaks]
	j, i = np.where(detected_peaks)

	# filter peaks
	amps = amps.flatten()
	peaks = zip(i, j, amps)
	peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

	# get indices for frequency and time
	frequency_idx = [x[1] for x in peaks_filtered]
	time_idx = [x[0] for x in peaks_filtered]

	# scatter of the peaks
	fig, ax = plt.subplots()
	ax.imshow(arr2D)
	ax.scatter(time_idx, frequency_idx)
	ax.set_xlabel('Time')
	ax.set_ylabel('Frequency')
	ax.set_title("Spectrogram")
	plt.gca().invert_yaxis()
	plt.show()
Example #11
0
def detect_local_maxima(vol):
    """
    Takes a 3D volume and detects the peaks using the local maximum filter.
    Returns a boolean mask of the peaks (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    # define a 26-connected neighborhood
    neighborhood = morphology.generate_binary_structure(3,3) # first is dimension, next is relative connectivity

    # apply the local maximum filter; all locations of maximum value 
    # in their neighborhood are set to 1
    local_max = (filters.maximum_filter(vol, footprint=neighborhood)==vol)

    # Remove background
    local_max[vol==0] = 0

    # Find endpoint indici
    [xOrig,yOrig,zOrig] = np.shape(vol)
    x = []
    y = []
    z = []
    for i in range(0,xOrig):
        for j in range(0,yOrig):
            for k in range(0,zOrig):
                if local_max[i,j,k] > 0:
                    x.append(i)
                    y.append(j)
                    z.append(k)

    return x, y, z
Example #12
0
    def find_bright_peaks(self, data, threshold=None, sigma=5, radius=5):
        """
        Find bright peak candidates in (data).  (threshold) specifies a
        threshold value below which an object is not considered a candidate.
        If threshold is blank, a default is calculated using (sigma).
        (radius) defines a pixel radius for determining local maxima--if the
        desired objects are larger in size, specify a larger radius.

        The routine returns a list of candidate object coordinate tuples
        (x, y) in data.
        """
        if threshold == None:
            # set threshold to default if none provided
            threshold = self.get_threshold(data, sigma=sigma)
            self.logger.debug("threshold defaults to %f (sigma=%f)" % (
                threshold, sigma))

        data_max = filters.maximum_filter(data, radius)
        maxima = (data == data_max)
        diff = data_max > threshold
        maxima[diff == 0] = 0

        labeled, num_objects = ndimage.label(maxima)
        slices = ndimage.find_objects(labeled)
        peaks = []
        for dy, dx in slices:
            xc = (dx.start + dx.stop - 1)/2.0
            yc = (dy.start + dy.stop - 1)/2.0

            # This is only an approximate center; use FWHM or centroid
            # calculation to refine further
            peaks.append((xc, yc))

        return peaks
Example #13
0
def latin_filter(line,scale=1.0,r=1.5,debug=0):
    """Filter out noise from a text line in Latin alphabets."""
    bin = (line>0.5*amax(line))
    mask = latin_mask(bin,scale=scale,r=r,debug=debug)
    mask = morph.keep_marked(bin,mask)
    mask = filters.maximum_filter(mask,3)
    return line*mask
Example #14
0
def detect_peaks(image):
    """
    http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array

    Takes an image and detect the peaks using the local maximum filter.
    Returns a boolean mask of the peaks (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    from scipy.ndimage.filters import maximum_filter
    from scipy.ndimage.morphology import generate_binary_structure, binary_erosion

    # define an 8-connected neighborhood
    neighborhood = generate_binary_structure(2,2)

    #apply the local maximum filter; all pixel of maximal value 
    #in their neighborhood are set to 1
    local_max = maximum_filter(image, footprint=neighborhood)==image
    background = (image==0)

    #a little technicality: we must erode the background in order to 
    #successfully subtract it form local_max, otherwise a line will 
    #appear along the background border (artifact of the local maximum filter)
    eroded_background = binary_erosion(background, structure=neighborhood, 
                                       border_value=1)
    detected_peaks = local_max - eroded_background
    peaks = np.array(np.where(detected_peaks)).T

    return peaks
def detect_local_maxima(arr):
    # http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710
    """
    Takes an array and detects the peaks using the local maximum filter.
    Returns a boolean mask of the troughs (i.e. 1 when
    the pixel's value is the neighborhood maximum, 0 otherwise)
    """
    # define an connected neighborhood
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure
    neighborhood = morphology.generate_binary_structure(len(arr.shape),2)
    # apply the local maximum filter; all locations of maximum value 
    # in their neighborhood are set to 1
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#maximum_filter
    local_max = (filters.maximum_filter(arr, footprint=neighborhood)==arr)
    # local_max is a mask that contains the peaks we are 
    # looking for, but also the background.
    # In order to isolate the peaks we must remove the background from the mask.
    # 
    # we create the mask of the background
    background = (arr==0)
    # 
    # a little technicality: we must erode the background in order to 
    # successfully subtract it from local_max, otherwise a line will 
    # appear along the background border (artifact of the local maximum filter)
    # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion
    eroded_background = morphology.binary_erosion(
        background, structure=neighborhood, border_value=1)
    # 
    # we obtain the final mask, containing only peaks, 
    # by removing the background from the local_min mask
    detected_maxima = local_max - eroded_background
    return np.where(detected_maxima)
Example #16
0
def detect_peaks(image):
 """
 from: http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
 Takes an image and detect the peaks usingthe local maximum filter.
 Returns a boolean mask of the peaks (i.e. 1 when
 the pixel's value is the neighborhood maximum, 0 otherwise)
 """

 # define an 8-connected neighborhood
 neighborhood = generate_binary_structure(2,2)

 #apply the local maximum filter; all pixel of maximal value 
 #in their neighborhood are set to 1
 local_max = maximum_filter(image, footprint=neighborhood)==image
 #local_max is a mask that contains the peaks we are 
 #looking for, but also the background.
 #In order to isolate the peaks we must remove the background from the mask.

 #we create the mask of the background
 background = (image==0)

 #a little technicality: we must erode the background in order to 
 #successfully subtract it form local_max, otherwise a line will 
 #appear along the background border (artifact of the local maximum filter)
 eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

 #we obtain the final mask, containing only peaks, 
 #by removing the background from the local_max mask
 detected_peaks = local_max - eroded_background

 return detected_peaks
def detect_local_maxima(image):
    neighborhood = generate_binary_structure(2,2)
    local_max = maximum_filter(image, footprint=neighborhood)==image
    background = (image==0)
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
    detected_peaks = local_max - eroded_background
    return detected_peaks*256.0
Example #18
0
def order_statistics_filter(grid,window_size=(3,3,3),statistics_type='median',rank=1):
    filtered = grid.copy()
    if statistics_type=='minimum':
        scifilt.minimum_filter(grid,window_size,None,filtered, mode='nearest')
    elif statistics_type=='maximum':
        scifilt.maximum_filter(grid,window_size,None,filtered, mode='nearest')
    elif statistics_type=='median':
        scifilt.median_filter(grid,window_size,None,filtered, mode='nearest')
    elif statistics_type[:-2]=='percentile' or statistics_type[:-2]=='per':
        per = np.int(statistics_type[-2:])
        scifilt.percentile_filter(grid,per,window_size,None,filtered, mode='nearest')
    elif statistics_type=='rank':
        scifilt.rank_filter(grid,rank,window_size,None,filtered, mode='nearest')
    return filtered
        
    return filtered
Example #19
0
    def _superflux_diff_spec(spec, diff_frames=1, max_bins=3):
        """
        Calculate the difference spec used for SuperFlux.

        :param spec:        magnitude spectrogram
        :param diff_frames: calculate the difference to the N-th previous frame
        :param max_bins:    number of neighboring bins used for maximum
                            filtering
        :return:            difference spectrogram used for SuperFlux

        Note: If 'max_bins' is greater than 0, a maximum filter of this size
              is applied in the frequency direction. The difference of the
              k-th frequency bin of the magnitude spectrogram is then
              calculated relative to the maximum over m bins of the N-th
              previous frame (e.g. m=3: k-1, k, k+1).

              This method works only properly if the number of bands for the
              filterbank is chosen carefully. A values of 24 (i.e. quarter-tone
              resolution) usually yields good results.

        """
        # init diff matrix
        diff_spec = np.zeros_like(spec)
        if diff_frames < 1:
            raise ValueError("number of diff_frames must be >= 1")
        # widen the spectrogram in frequency dimension by `max_bins`
        max_spec = maximum_filter(spec, size=[1, max_bins])
        # calculate the diff
        diff_spec[diff_frames:] = spec[diff_frames:] - max_spec[0:-diff_frames]
        # keep only positive values
        np.maximum(diff_spec, 0, diff_spec)
        # return diff spec
        return diff_spec
Example #20
0
def get2DPeaks(arr2D):
    """
        Generates peaks of a spectogram.
        Args:
            arr2D: spectogram.
        Returns:
            List of pairs (time, frequency) of peaks.
    """

    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > AMP_MIN]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return zip(frequency_idx, time_idx)
    def adaptive_extrema_finder(self,input_signal,neighbourhood_size,threshold):
	
        #Determine the local maxima in a given neighbourhood using a maximum filter
        #Filter determines the local max in a neighbourhood of size = neighbourhoodSize\
        #and replaces all values in that region by the maximum
        import scipy.ndimage.filters as filters

        maximum_filter_output    = filters.maximum_filter(input_signal, neighbourhood_size)

	
        #Find the maxima points. Returns a Boolean array of the points where the data is 
        #equal to the local maxima
        maxima_points = (input_signal == maximum_filter_output)
	
	
        #Determine the (weighted) average of values in the same neighbourhood to
        #give an idea of the background values. Sigma is determined to cover 99% of the spread
	
        sigma = (neighbourhood_size - 1) / 6.0
        background_filter_output = filters.gaussian_filter(input_signal, sigma)#, output=None, mode='reflect', cval=0.0)
	
        threshold_value = threshold*np.nanstd(input_signal)
	
        threshold_output = ( input_signal > threshold_value)
        maxima_points[np.logical_not(threshold_output)] = 0
	
        return maxima_points
Example #22
0
def nonmaxsuppts(cim, radius, threshold):
    # Extract local maxima by performing a grey scale morphological
    # dilation and then finding points in the corner strength image that
    # match the dilated image and are also greater than the threshold.
    # non-maximum suppression in 5x5 regions

    h, w = cim.shape[0:2]
    cap_length = 1000

    maxH = filters.maximum_filter(cim, (radius,radius))

    cim = cim * (cim == maxH)
    cim = cim * (cim > threshold)

    # sort points by strength and find their positions
    sortIdx = np.argsort(cim.flatten())[::-1]

    count = 0
    for row in range(0,cim.shape[0]):
        for col in range(0,cim.shape[1]):
            if cim[row,col]:
                count += 1

    sortIdx = sortIdx[0:min(count,cap_length)]
    yy = sortIdx / w
    xx = sortIdx % w

    ret_list = list()
    for i in range(0,len(sortIdx)):
        ret_list.append((yy[i],xx[i]))
    return ret_list # THIS IS A TYPE   list<tuples>
def f_output_clean(fk, d, ii, rmvd):
    import numpy as np
    import scipy as sp
    import scipy.ndimage.filters as filters

    r = []

    maxxi = np.where(fk == filters.maximum_filter(fk, 5))
    this = np.empty([2, len(maxxi[0])])
    rg_l = []

    for i in range(len(maxxi[0])):
        this[0][i] = (maxxi[0][i] - 80) * 0.5
        this[1][i] = (maxxi[1][i] - 80) * 0.5
        if 10 * np.log10(fk[maxxi[0][i], maxxi[1][i]] / fk.max()) > rmvd:
            baz = np.math.atan2(this[0][i], this[1][i]) * 180.0 / 3.1415926
            if baz < 0.0:
                baz += 360.0
            xvel = 111.19 / sp.sqrt(this[0][i] ** 2 + this[1][i] ** 2)
            xamp = fk[maxxi[0][i], maxxi[1][i]]

            rg_l.append([xamp, xvel, baz])

    rg_l.sort(reverse=True)

    return d, ii, rg_l
Example #24
0
def local_normalize(gim,kernel_size):
    maxf = filters.maximum_filter(gim,kernel_size)
    minf = filters.minimum_filter(gim,kernel_size)
    num = gim-minf
    den = maxf-minf
    den[np.where(den==0)]=1.0
    return num/den
def get_2D_peaks(array2D):
    # This function is based on the function 'get_2D_peaks()' available at the URL below.
    # https://github.com/worldveil/dejavu/blob/master/dejavu/fingerprint.py
    # Copyright (c) 2013 Will Drevo, use permitted under the terms of the open-source MIT License.

    # Create a filter to extract peaks from the image data.
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, 25)

    # Find local maxima using our fliter shape. These are boolean arrays.
    local_maxima = maximum_filter(array2D, footprint=neighborhood) == array2D
    background = (array2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

    # Boolean mask of array2D with True at peaks.
    detected_peaks = local_maxima - eroded_background

    # Extract peak amplitudes and locations.
    amps = array2D[detected_peaks]
    j, i = numpy.where(detected_peaks)

    # Filter peaks for those exceeding the minimum amplitude.
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > AMP_MIN]

    # Get frequency and time at peaks.
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return (frequency_idx, time_idx)
Example #26
0
	def generateData(self):
		self.frame += 1
		if self.frame == 0 or (self.interval > 0 and self.frame%self.interval==0):
		
			image = self.getInput(0).getData()
			Ixx = self.getInput(1).getData()
			Iyy = self.getInput(2).getData()
			Ixy = self.getInput(3).getData()

			image = image.astype(numpy.float32)
			rows, cols = image.shape[:2]
			
			# compute Harris weights
			hW = (Ixx * Iyy - Ixy**2) / (Ixx + Iyy + 1e-8)
		
			# exclude image boundaries
			hW[:16, :] = 0
			hW[-16:, :] = 0
			hW[:, :16] = 0
			hW[:, -16:] = 0
		
			# non-maximum suppression in 5x5 regions
			maxH = filters.maximum_filter(hW, (5,5))
			hW = hW * (hW == maxH)
		
			sortIdx = numpy.argsort(hW.flatten())[::-1]
			sortIdx = sortIdx[:self.count]
			yy = sortIdx / cols
			xx = sortIdx % cols
			
			# concatenate positions and values
			xyv = numpy.vstack((xx, yy, hW.flatten()[sortIdx])).transpose()
				
			self.getOutput(0).setData(hW)
			self.getOutput(1).setData(xyv)
def find_objects(data, size=5, thresh=None, N=20, verbosity=1):

  # maximum step height in neighborhood of size 'size'
  data_max = filters.maximum_filter(data, size)
  data_min = filters.minimum_filter(data, size)
  diff = data_max - data_min;

  # determine threshold which gives N pixels
  if thresh is None:  thresh=np.sort(diff.flat)[-N];

  # create mask for image (maximum with high step size)
  maxima = (data == data_max)
  maxima[diff <= thresh] = False
  
  # find connected objects (pixel agglomerates)
  labeled, num_objects = ndimage.label(maxima)
  slices = ndimage.find_objects(labeled)

  # DEBUG
  if verbosity>2:
    plt.figure()
    plt.imshow(np.log(1+np.abs(data)),interpolation='nearest');
    # create overlay with red color (r,g,b,alpha) at each image point 
    overlay = np.zeros(data.shape+(4,));
    overlay[maxima] = (1,1,0,1); # set opacity according to maxima
    plt.imshow(overlay,interpolation='nearest');

  return slices;
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D

    background = (arr2D == 0)

    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    return zip(frequency_idx, time_idx)
Example #29
0
  def find_local_maxima(self, data, neighborhood_size):
    """ 
     find local maxima within neighborhood 
      idea from http://stackoverflow.com/questions/9111711
      (get-coordinates-of-local-maxima-in-2d-array-above-certain-value)
    """

    # find local maxima in image (width specified by neighborhood_size)
    data_max = filters.maximum_filter(data,neighborhood_size);
    maxima   = (data == data_max);
    assert np.sum(maxima) > 0;        # we should always find local maxima
  
    # remove connected pixels (plateaus)
    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    maxima *= 0;
    for dx,dy in slices:
      maxima[(dx.start+dx.stop-1)/2, (dy.start+dy.stop-1)/2] = 1

    # calculate difference between local maxima and lowest 
    # pixel in neighborhood (will be used in select_local_maxima)
    data_min = filters.minimum_filter(data,neighborhood_size);
    diff     = data_max - data_min;
    self._maxima = maxima;
    self._diff   = diff;

    return maxima,diff
Example #30
0
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = arr2D == 0
    eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)

    detected_peaks = local_max - eroded_background

    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    if plot:
        # scatter of the peaks
        fig, ax = plt.subplots()
        ax.imshow(arr2D)
        ax.scatter(time_idx, frequency_idx)
        ax.set_xlabel("Time")
        ax.set_ylabel("Frequency")
        ax.set_title("Spectrogram")
        plt.gca().invert_yaxis()
        plt.show()

    return zip(frequency_idx, time_idx)
Peaks_of_Np = np.zeros(numberofdata)
entry1 = r'.\Euglena\N-'
entry2 = r'.\Euglena\N+'
fnamelist1 = glob.glob(os.path.join(entry1, '*.tif'))
fnamelist2 = glob.glob(os.path.join(entry2, '*.tif'))
r = 3
index = 0
for filename in fnamelist1[:numberofdata]:
    im = Image.open(filename)
    imarray = np.array(im)
    imarray[imarray < 5] = 0
    image = imarray[:, :, 1]
    neighborhood_size = 10
    threshold = 25
    data = imarray[:, :, 1]
    data_max = filters.maximum_filter(data, neighborhood_size)
    maxima = (data == data_max)
    data_min = filters.minimum_filter(data, neighborhood_size)
    diff = ((data_max - data_min) > threshold)
    maxima[diff == 0] = 0
    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    x, y = [], []
    for dy, dx in slices:
        x_center = (dx.start + dx.stop - 1) / 2
        x.append(x_center)
        y_center = (dy.start + dy.stop - 1) / 2
        y.append(y_center)
    if len(x) > 0:
        Peaks_of_Nm[index] = len(x)
        new_imarray = imarray
Example #32
0
def keypoints_censure(image,
                      min_scale=1,
                      max_scale=7,
                      mode='DoB',
                      non_max_threshold=0.15,
                      line_threshold=10):
    """
    Extracts CenSurE keypoints along with the corresponding scale using
    either Difference of Boxes, Octagon or STAR bi-level filter.

    Parameters
    ----------
    image : 2D ndarray
        Input image.
    min_scale : int
        Minimum scale to extract keypoints from.
    max_scale : int
        Maximum scale to extract keypoints from. The keypoints will be
        extracted from all the scales except the first and the last i.e.
        from the scales in the range [min_scale + 1, max_scale - 1].
    mode : {'DoB', 'Octagon', 'STAR'}
        Type of bi-level filter used to get the scales of the input image.
        Possible values are 'DoB', 'Octagon' and 'STAR'. The three modes
        represent the shape of the bi-level filters i.e. box(square), octagon
        and star respectively. For instance, a bi-level octagon filter consists
        of a smaller inner octagon and a larger outer octagon with the filter
        weights being uniformly negative in both the inner octagon while
        uniformly positive in the difference region. Use STAR and Octagon for
        better features and DoB for better performance.
    non_max_threshold : float
        Threshold value used to suppress maximas and minimas with a weak
        magnitude response obtained after Non-Maximal Suppression.
    line_threshold : float
        Threshold for rejecting interest points which have ratio of principal
        curvatures greater than this value.

    Returns
    -------
    keypoints : (N, 2) array
        Location of the extracted keypoints in the ``(row, col)`` format.
    scales : (N, 1) array
        The corresponding scale of the N extracted keypoints.

    References
    ----------
    .. [1] Motilal Agrawal, Kurt Konolige and Morten Rufus Blas
           "CenSurE: Center Surround Extremas for Realtime Feature
           Detection and Matching",
           http://link.springer.com/content/pdf/10.1007%2F978-3-540-88693-8_8.pdf

    .. [2] Adam Schmidt, Marek Kraft, Michal Fularz and Zuzanna Domagala
           "Comparative Assessment of Point Feature Detectors and
           Descriptors in the Context of Robot Navigation"
           http://www.jamris.org/01_2013/saveas.php?QUEST=JAMRIS_No01_2013_P_11-20.pdf

    """

    # (1) First we generate the required scales on the input grayscale image
    # using a bi-level filter and stack them up in `filter_response`.
    # (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on the
    # filter_response to suppress points that are neither minima or maxima in
    # 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray `feature_mask`
    # containing all the minimas and maximas in `filter_response` as True.
    # (3) Then we suppress all the points in the `feature_mask` for which the
    # corresponding point in the image at a particular scale has the ratio of
    # principal curvatures greater than `line_threshold`.
    # (4) Finally, we remove the border keypoints and return the keypoints
    # along with its corresponding scale.

    image = np.squeeze(image)
    if image.ndim != 2:
        raise ValueError("Only 2-D gray-scale images supported.")

    mode = mode.lower()
    if mode not in ('dob', 'octagon', 'star'):
        raise ValueError('Mode must be one of "DoB", "Octagon", "STAR".')

    if min_scale < 1 or max_scale < 1 or max_scale - min_scale < 2:
        raise ValueError('The scales must be >= 1 and the number of scales '
                         'should be >= 3.')

    image = img_as_float(image)
    image = np.ascontiguousarray(image)

    # Generating all the scales
    filter_response = _filter_image(image, min_scale, max_scale, mode)

    # Suppressing points that are neither minima or maxima in their 3 x 3 x 3
    # neighbourhood to zero
    minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
    maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response

    feature_mask = minimas | maximas
    feature_mask[filter_response < non_max_threshold] = False

    for i in range(1, max_scale - min_scale):
        # sigma = (window_size - 1) / 6.0, so the window covers > 99% of the
        #                                  kernel's distribution
        # window_size = 7 + 2 * (min_scale - 1 + i)
        # Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
        _suppress_lines(feature_mask[:, :, i], image,
                        (1 + (min_scale + i - 1) / 3.0), line_threshold)

    rows, cols, scales = np.nonzero(feature_mask[..., 1:max_scale - min_scale])
    keypoints = np.column_stack([rows, cols])
    scales = scales + min_scale + 1

    if mode == 'dob':
        return keypoints, scales

    cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)

    if mode == 'octagon':
        for i in range(min_scale + 1, max_scale):
            c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
                + OCTAGON_OUTER_SHAPE[i - 1][1]
            cumulative_mask |= _mask_border_keypoints(image, keypoints, c) \
                               & (scales == i)
    elif mode == 'star':
        for i in range(min_scale + 1, max_scale):
            c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
                + STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
            cumulative_mask |= _mask_border_keypoints(image, keypoints, c) \
                               & (scales == i)

    return keypoints[cumulative_mask], scales[cumulative_mask]
def min_max_filt(arr):
    # Define minimmum and maximum filters for clipping off artefactual IAS protrusions due to dangling epidermis
    # FIX: Perhaps make this variable? User input based?
    out = minimum_filter(maximum_filter(arr, 20), 20)
    return out
Example #34
0
def ensemble_detect(pca_data):
    
    sigma = 3.
    x, y = np.arange(100), np.arange(100)
    matrix = np.zeros((100,100), dtype=np.float32)

    for k in range(len(pca_data.T[0])):
        x0, y0 = pca_data.T[0][k], pca_data.T[1][k]
        gx = np.exp(-(x-x0)**2/(2*sigma**2))
        gy = np.exp(-(y-y0)**2/(2*sigma**2))
        g = np.outer(gx, gy)
        g = g / np.sum(g)  # normalize, if you want that

        matrix+= g
    
    for i in range(0, len(matrix)):
        for j in range(i+1, len(matrix)):
            matrix[i][j],matrix[j][i] = matrix[j][i],matrix[i][j]

    neighborhood_size = 5
    threshold = 0.05

    data = matrix

    data_max = filters.maximum_filter(data, neighborhood_size)
    maxima = (data == data_max)
    data_min = filters.minimum_filter(data, neighborhood_size)
    diff = ((data_max - data_min) > threshold)
    maxima[diff == 0] = 0

    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    x, y = [], []
    for dy,dx in slices:
        x_center = (dx.start + dx.stop - 1)/2
        x.append(x_center)
        y_center = (dy.start + dy.stop - 1)/2    
        y.append(y_center)

    plt.imshow(data)
    #plt.savefig('/tmp/data.png', bbox_inches = 'tight')

    #plt.autoscale(False)
    plt.plot(x,y, 'ro')
    #plt.savefig('/tmp/result.png', bbox_inches = 'tight')



    #plt.imshow(np.flipud(matrix), vmin=0, vmax=1., cmap='viridis')
    plt.xlim(0,100)
    plt.ylim(0,100)
    plt.show()
    
    ensemble_list = []
    for k in range(len(x)): ensemble_list.append([])
    for k in range(len(pca_data)):
        a = [pca_data[k][0], pca_data[k][1]]
        for p in range(len(x)):
            b = [x[p], y[p]]
            if distance.euclidean(a,b)<2:
                ensemble_list[p].append(k)
                break
    
    for k in range(len(ensemble_list)): print ensemble_list[k]
    
    return ensemble_list
Example #35
0
def susanCorner(image,
                radius=3.4,
                fptype=bool,
                t=25,
                gfrac=None,
                cgthresh=None,
                nshades=256):
    """
    Implements SUSAN corner detector algorithm as described in "SUSAN--A
    New Approach to Low Level Image Processing", S. M. Smith and J. M.
    Brady, Technical Report TR95SMSIc (1995), available at:
    http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.24.2763 .
    Alternatively, there is also a slightly abridged version of the
    same reference, with identical title and authors, available at
    International Journal of Computer Vision 23(1): 45-78 (1997).

    image: array_like

        Array containing grayscale image data.  Only grayscale pixel
        values are supported--cannot handle 3-channel color.

    radius: scalar, optional

        Circular footprint radius, in units of pixels; passed down
        as input to masks.circFootprint().  Default is 3.4, as
        recommended in reference.

    fptype: data-type, optional

        Data type of circular footprint; passed to masks.circFootprint().
        Default is bool, since the version of the algorithm originally
        described in the reference did not define the algorithm behavior
        for any other type of footprint (we have trivially extended it
        to cover float type footprints as well, however there is not
        much observable different in the results, as it only affects the
        weighting of the pixels at the edge of the footprint, not the
        central region).

    t: scalar, optional

        Threshold value for color difference required to exclude/include
        a pixel in the USAN region described in the reference article.
        Default is 25 grayscale levels, as suggested by article authors,
        and assumes a grayscale image with a total of 256 distinct
        shades.  As described in eq. 4, this is technically a "soft"
        threshold, rather than a hard cutoff, and moreover it's also
        bidirectional; i.e., a setting of t=25 actually means +/- 25.

    gfrac: scalar, optional

        Fraction of maximum number of USAN pixels to assign to the "g"
        parameter in eq. 3 of the reference article.  Default setting
        of gfrac=None triggers lower level functions to select a
        context-appropriate default value (for corner detection
        specifically, default is gfrac=0.5) as recommended by
        reference article authors.

    cgthesh: scaler, optional

        Threshold value of USAN center of gravity, in units of pixels,
        which will lead to a shift between one underlying set of
        approximations/assumptions vs. another.  Default setting of
        cgthresh=None triggers lower level masks.usan() function to
        select a context-appropriate default value (for corner detection
        specifically, default is cgthresh=0.45*radius) as recommended
        by reference article authors.  See masks.usan() source code
        and reference article for further (extremely esoteric) details.
        For corner detection in particular (less so with edge detection)
        we recommend testing a variety of settings both above and below
        the default as this parameter seems to have a particularly
        strong effect in some cases on signal vs. noise.

    nshades: scalar, optional

        Total number of distinct integer grayscale levels available in
        image format.  Defaults to 256 (i.e., 2**8), appropriate for an
        8 bit image format such as jpeg.  For image formats with higher
        grayscale color resolution, e.g. such as 12-bit or 16-bit, set
        nshades=4096 or nshades=65536 (i.e., 2**12 or 2**16).

    Returns
    -------

    corner: ndarray

        The corner response of the image as calculated by the SUSAN corner
        detection algorithm.
        
    """

    # Get raw corner response; many corners may consist of small clusters of
    # adjacent responsive pixels
    rawcorner = masks.usan(image,
                           mode='Corner',
                           radius=radius,
                           fptype=fptype,
                           t=t,
                           gfrac=gfrac,
                           cgthresh=cgthresh,
                           nshades=nshades)

    # Find maximum corner response within circular USAN footprint (but force
    # footprint type to be bool in this case regardless of user-selected
    # input fptype, because float would make no sense in this context)
    fp = masks.circFootprint(radius=radius, dtype=bool)
    rawmax = filters.maximum_filter(rawcorner, footprint=fp)

    # True corners are those where response is both locally maximum as well
    # as non-zero
    corner = np.where(rawcorner == rawmax, rawcorner, 0)

    return corner
Example #36
0
def wangbrady(image,
              S=0.1,
              T1=500,
              T2=2000,
              weights=[3. / 16, 10. / 16, 3. / 16],
              mfsize=10):
    """
    Implements Wang-Brady style corner detection, based upon the
    description provided in "Real-time corner detection algorithm for
    motion estimation", Han Wang and Michael Brady, Image and Vision
    Computing 13(9): 695-703 (Nov 1995).

    Parameters
    ----------

    image: array_like

        2D array containing grayscale image data.  Only grayscale pixel
        values are supported--cannot handle 3-channel color.

    S: scalar, optional

        Image curvature parameter introduced in eq. 8 and 9 of the
        reference article.  Defaults to 0.1, as suggested by article
        authors.

    T1, T2: scalar, optional

        User-defined thresholds arising in eq. 9 of the reference
        article.  Defaults to (500, 2000), as recommended by article
        authors.

    weights: array_like

        1D array with 3 elements signifying type of derivative mask to
        use (e.g., Sobel, Prewitt, Scharr, etc.)  Defaults to
        [3./16, 10./16, 3./16], corresponding to Scharr.  Values should
        be normalized so that they sum to one; see masks.getDeriv for
        further details.

    mfsize: scalar, optional

        Size value to be passed through down to
        scipy.ndimage.filters.maximum_filter().  Effectively sets the
        maximum permitted density scale for corners, as multiple
        corner-reponsive pixels which are closer together than this
        distance from one another will tend to result in only the pixel
        with the largest corner response being accepted.  See
        scipy.ndimage.filters.maximum_filter() for further details.

    Returns
    -------

    corner: ndarray

        The corner response of the image as calculated by the Wang-Brady
        corner detection algorithm.

    """

    # Substitute very small number for zero, to avoid divide-by-zero error
    def removeZeros(dummy, subvalue=1e-9):
        idx = (dummy == 0)
        dummy[idx] = 1e-9
        return dummy

    # Because we are calculating dxdy cross derivatives, weights in this case
    # must be normalized to have correct sizing relative to other derivatives
    if sum(weights) != 1:
        raise ValueError('Sum of weights must be normalized to 1, due to ' +
                         'use of cross derivatives')

    # Calculate first and second derivatives
    dx = masks.getDeriv(image, weights=weights, axes=[0])
    dy = masks.getDeriv(image, weights=weights, axes=[1])
    dxx = masks.getDeriv(image, weights=weights, axes=[0, 0])
    dyy = masks.getDeriv(image, weights=weights, axes=[1, 1])
    dxy = masks.getDeriv(image, weights=weights, axes=[0, 1])

    # Calculate image gradient squared and tangent second derivative
    grdsqd = removeZeros(dx * dx + dy * dy)
    numerator = removeZeros(dy * dy * dxx - 2 * dx * dy * dxy + dx * dx * dyy)
    dtdt = np.divide(numerator, grdsqd)

    # Apply eq. 9 from reference article, together with requirement that
    # results should all be a local maxima, to obtain final corner response
    gamma = dtdt * dtdt - S * grdsqd
    gmax = filters.maximum_filter(gamma, size=mfsize)
    corner = np.where((gamma == gmax) & (grdsqd > T1) & (gamma > T2), gamma, 0)

    return corner
	finalcube = array(finalcube)
	save('finalcube.npy', finalcube)
	print "errors: %.2f%%" % (100*problems/total)
else:
	finalcube = load('finalcube.npy')
	
mean_cube = zeros(finalcube[0].shape)
for i in x:
	for j in y:
		values = finalcube[:,i,j]
		if np.count_nonzero(values) > 1:
			mean_cube[i,j] = values.mean() * 100.0
		else:
			mean_cube[i,j] = values.max() * 100.0

X, Y = np.meshgrid(x, y, indexing='ij')
cmap = 'jet'
s = 100
l = np.linspace(0,100,100)
ticks = range(0, 101, 10)

xlim(-radius, radius)
ylim(-radius, radius)
Z = maximum_filter(mean_cube, size=s, mode='constant')
Z = median_filter(Z, size=50, mode='constant')
title('Taxa de entrega (%) em topologias em grade')
contourf(X, Y, Z, cmap=cmap, vmin=0.0, vmax=100.0, levels=l)
colorbar(ticks=ticks)

show()
def find_localmax_method_1(TFR, fVec, tVec, eventThresholdByFrequency,
                           classLabels, medianPower, Fs):
    '''
    1st event-finding method (primary event detection method in Shin et
    al. eLife 2017): Find spectral events by first retrieving all local
    maxima in un-normalized TFR using imregionalmax, then selecting
    suprathreshold peaks within the frequency band of interest. This
    method allows for multiple, overlapping events to occur in a given
    suprathreshold region and does not guarantee the presence of
    within-band, suprathreshold activity in any given trial will render
    an event.

    spectralEvents: 12 column matrix for storing local max event metrics:
            trial index,            hit/miss,         maxima frequency,
            lowerbound frequency,     upperbound frequency,
            frequency span,         maxima timing,     event onset timing,
            event offset timing,     event duration, maxima power,
            maxima/median power
    '''
    # Number of elements in discrete frequency spectrum
    flength = TFR.shape[1]
    # Number of point in time
    tlength = TFR.shape[2]
    # Number of trials
    numTrials = TFR.shape[0]

    spectralEvents = []

    # Retrieve all local maxima in TFR using python equivalent of imregionalmax
    for ti in range(numTrials):

        # Get TFR data for this trial [frequency x time]
        thisTFR = TFR[ti, :, :]

        # Find local maxima in the TFR data
        data = thisTFR
        # Find maximum amoung adjacent pixels (3x3 footprint) for each pixel
        data_max = filters.maximum_filter(data, size=(3, 3))
        maxima = (data == data_max)
        data_min = filters.minimum_filter(data, size=(3, 3))
        # Rule out pixels with footprints that have flatlined
        maxima[data_max == data_min] = False
        labeled, num_objects = ndimage.label(maxima)
        xy = np.array(ndimage.center_of_mass(data, labels=labeled,
                                             index=range(1, num_objects + 1)))

        numPeaks = len(xy)

        peakF = []
        peakT = []
        peakPower = []
        for thisXY in xy:
            peakF.append(int(thisXY[0]))
            peakT.append(int(thisXY[1]))
            peakPower.append(thisTFR[peakF[-1], peakT[-1]])

        # Find local maxima lowerbound, upperbound, and full width at half max
        #    for both frequency and time
        Ffwhm = []
        Tfwhm = []
        for lmi in range(numPeaks):
            thisPeakF = peakF[lmi]
            thisPeakT = peakT[lmi]
            thisPeakPower = peakPower[lmi]

            # Indices of TFR frequencies < half max power at the time of a given local peak
            TFRFrequencies = thisTFR[:, thisPeakT]
            lowerInd, upperInd, FWHM = fwhm_lower_upper_bound1(TFRFrequencies,
                                                               thisPeakF, thisPeakPower)
            lowerEdgeFreq = fVec[lowerInd]
            upperEdgeFreq = fVec[upperInd]
            FWHMFreq = FWHM * ( fVec[1] - fVec[0] )

            # Indices of TFR times < half max power at the frequency of a given local peak
            TFRTimes = thisTFR[thisPeakF, :]
            lowerInd, upperInd, FWHM = fwhm_lower_upper_bound1(TFRTimes,
                                                               thisPeakT, thisPeakPower)
            lowerEdgeTime = tVec[lowerInd]
            upperEdgeTime = tVec[upperInd]
            FWHMTime = FWHM / Fs

            # Put peak characteristics to a dictionary
            #        trial index,            hit/miss,         maxima frequency,
            #        lowerbound frequency,     upperbound frequency,
            #        frequency span,         maxima timing,     event onset timing,
            #        event offset timing,     event duration, maxima power,
            #        maxima/median power
            peakParameters = {
                'Trial': ti,
                'Hit/Miss': classLabels[ti],
                'Peak Frequency': fVec[thisPeakF],
                'Lower Frequency Bound': lowerEdgeFreq,
                'Upper Frequency Bound': upperEdgeFreq,
                'Frequency Span': FWHMFreq,
                'Peak Time': tVec[thisPeakT],
                'Event Onset Time': lowerEdgeTime,
                'Event Offset Time': upperEdgeTime,
                'Event Duration': FWHMTime,
                'Peak Power': thisPeakPower,
                'Normalized Peak Power': thisPeakPower / medianPower[thisPeakF],
                'Outlier Event': thisPeakPower > eventThresholdByFrequency[thisPeakF]
            }

            # Build a list of dictionaries
            spectralEvents.append(peakParameters)

    return spectralEvents
Example #39
0
def r_opening(bin, size):
    return filters.maximum_filter(filters.minimum_filter(bin, size),
                                  size,
                                  origin=-1)
ADD_SMALL = False
NUM_SMALL = 0

##### find and store bright regions

REGIONS = []
REGIONS_HPC = []

for i in range(N):

    RAW = DATA["AIA171"][i].data

    MIN_DISTANCE = 1000
    MIN_VALUE = 0.6 * RAW.max()

    max = filters.maximum_filter(RAW, MIN_DISTANCE)
    temp = (RAW == max)
    min = filters.minimum_filter(RAW, MIN_DISTANCE)
    diff = ((max - min) > MIN_VALUE)
    temp[diff == 0] = 0

    labeled, num_objects = ndimage.label(temp)
    xy_maxima = np.array(
        ndimage.center_of_mass(RAW, labeled, range(1, num_objects + 1)))

    xy_maxima = xy_maxima.astype(int)
    REGIONS.append(xy_maxima)

    if xy_maxima.shape[0] == 0:
        REGIONS_HPC.append(np.array([]))
Example #41
0
# Como Executar:
# $ python maxmin.py <img_entrada> <img_saida_min> <imgsaida_max> <mask_size>
# <mask_size> é um número inteiro. Exemplo: Se mask_size=3 então a máscara possui tamanho 3x3.
# Gera duas imagens de saída.

import sys
import os
import numpy as np
from scipy.ndimage import filters
import matplotlib.pyplot as plt
from scipy import misc
from skimage import color, data, util


def loadImg(arg):
    return misc.imread(arg)


img = loadImg(sys.argv[1])
saida1 = sys.argv[2] + '_max.tif'
saida2 = sys.argv[3] + '_min.tif'
ms = sys.argv[4]
ms = int(ms)

img_filtro_max = filters.maximum_filter(img, size=ms)
img_filtro_min = filters.minimum_filter(img, size=ms)

img_saida1 = misc.imsave(saida1, img_filtro_max)
img_saida2 = misc.imsave(saida2, img_filtro_min)

Example #42
0
def moving_window_atribute(data,atribute='mean',window=(3,3,3),percentile=50,rank=1,clip_limits=(0,1),fisher=True,border_mode='nearest'):
    #reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’
    # minimum,maximum,median,percentile,rank,mean,variance,std,clip,sum,product,peak2peak,signal2noise,skewness
    # kurtosis
    if atribute == 'minimum':
        atrib = data.copy()
        scifilt.minimum_filter(data,window,None,atrib, mode=border_mode)
        return atrib
    elif atribute == 'maximum':
        atrib = data.copy()
        scifilt.maximum_filter(data,window,None,atrib, mode=border_mode)
        return atrib
    elif atribute == 'median':
        atrib = data.copy()
        scifilt.median_filter(data,window,None,atrib, mode=border_mode)
        return atrib
    elif atribute == 'percentile':
        atrib = data.copy()
        scifilt.percentile_filter(data,percentile,window,None,atrib, mode=border_mode)
        return atrib
    elif atribute == 'rank':
        atrib = data.copy()
        scifilt.rank_filter(data,rank,window,None,atrib, mode=border_mode)
        return atrib
    elif atribute == 'mean':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    atrib[i,j,k] = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].mean()
        return atrib
    elif atribute == 'variance':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    atrib[i,j,k] = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].var()
        return atrib
    elif atribute == 'std':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    atrib[i,j,k] = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].std()
        return atrib
    elif atribute == 'clip':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    m = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].flatten()
                    l0 = np.percentile(m,clip_limits[0])
                    l1 = np.percentile(m,clip_limits[1])
                    m.clip(l0,l1)
                    atrib[i,j,k] = np.percentile(m,50)
        return atrib
    elif atribute == 'sum':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    atrib[i,j,k] = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].sum()
        return atrib
    elif atribute == 'product':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    atrib[i,j,k] = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].prod()
        return atrib
    elif atribute == 'peak2peak':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    atrib[i,j,k] = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].ptp()
        return atrib
    elif atribute == 'signal2noise':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    m = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].mean()
                    v = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].std()
                    atrib[i,j,k] = m/v
        return atrib
    elif atribute == 'skewness':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    m = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].flatten()
                    v = st.skew(m)
                    atrib[i,j,k] = v
        return atrib
    elif atribute == 'kurtosis':
        atrib = data.copy()
        blocks = atrib.shape
        for i in xrange(blocks[0]):
            for j in xrange(blocks[1]):
                for k in xrange(blocks[2]):
                    m = data[np.clip(i-window[0],0,blocks[0]):i+window[0]+1,np.clip(j-window[1],0,blocks[1]):j+window[1]+1,np.clip(k-window[2],0,blocks[2]):k+window[2]+1].flatten()
                    v = st.kurtosis(m,fisher=fisher)
                    atrib[i,j,k] = v
        return atrib
    else:
        return False
def polylinesFromBinImage(img,
                          minimum_cluster_size=6,
                          remove_small_obj_size=3,
                          reconnect_size=3,
                          max_n_contours=None,
                          max_len_contour=None,
                          copy=True):
    '''
    return a list of arrays of un-branching contours
    
    img -> (boolean) array 
    
    optional:
    ---------
    minimum_cluster_size -> minimum number of pixels connected together to build a contour

    ##search_kernel_size -> TODO
    ##min_search_kernel_moment -> TODO
    
    numeric:
    -------------
    max_n_contours -> maximum number of possible contours in img
    max_len_contour -> maximum contour length
    
    '''
    assert minimum_cluster_size > 1
    assert reconnect_size % 2, 'ksize needs to be odd'

    #assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd'
    #assume array size parameters, is not given:
    if max_n_contours is None:
        max_n_contours = max(img.shape)
    if max_len_contour is None:
        max_len_contour = sum(img.shape[:2])
    #array containing coord. of all contours:
    contours = np.zeros(
        shape=(max_n_contours, max_len_contour, 2),
        dtype=np.uint16)  # if not search_kernel_size else np.float32)

    if img.dtype != np.bool:
        img = img.astype(bool)
    elif copy:
        img = img.copy()

    if remove_small_obj_size:
        remove_small_objects(img,
                             remove_small_obj_size,
                             connectivity=2,
                             in_place=True)
    if reconnect_size:
        #remove gaps
        maximum_filter(img, reconnect_size, output=img)
        #reduce contour width to 1
        img = skeletonize(img)

    n_contours = _populateContoursArray(img, contours, minimum_cluster_size)
    contours = contours[:n_contours]

    l = []
    for c in contours:
        ind = np.zeros(shape=len(c), dtype=bool)
        _getValidInd(c, ind)
        #remove all empty spaces:
        l.append(c[ind])
    return l
Example #44
0
    def compute_colseps_conv(self, binary, scale=1.0):
        """Find column separators by convoluation and
        thresholding."""
        h, w = binary.shape

        # find vertical whitespace by thresholding
        smoothed = gaussian_filter(1.0 * binary, (scale, scale * 0.5))
        smoothed = uniform_filter(smoothed, (5.0 * scale, 1))
        thresh = (smoothed < np.amax(smoothed) * 0.1)

        # find column edges by filtering
        grad = gaussian_filter(1.0 * binary, (scale, scale * 0.5),
                               order=(0, 1))
        grad = uniform_filter(grad, (10.0 * scale, 1))
        grad = (grad > 0.25 * np.amax(grad))
        grad1 = morph.select_regions(grad,
                                     sl.dim0,
                                     min=self.parameter['csminheight'] * scale,
                                     nbest=self.parameter['maxcolseps'] + 10)

        x = (1 - thresh) * (1 - grad1)
        thresh11 = (1 - thresh) * x

        for r in range(0, len(thresh11)):
            count = 0
            for c in range(0, len(thresh11[0])):
                if (thresh11[r][c] == 1):
                    continue
                count += 1
                if (c != len(thresh11[0]) - 1 and thresh11[r][c + 1] == 1):
                    if (count <= 50):
                        for z in range(c - count, c + 1):
                            thresh11[r][z] = 1
                    count = 0

        y = 1 - (thresh11 * (1 - thresh))

        # combine edges and whitespace
        seps = np.minimum(thresh,
                          maximum_filter(grad, (int(scale), int(5 * scale))))
        seps = maximum_filter(seps, (int(2 * scale), 1))

        h, w = seps.shape
        smoothed = gaussian_filter(1.0 * seps, (scale, scale * 0.5))
        smoothed = uniform_filter(smoothed, (5.0 * scale, 1))
        seps1 = (smoothed < np.amax(smoothed) * 0.1)
        seps1 = 1 - seps1

        seps1 = (grad) * seps1

        for c in range(0, len(seps1[0])):
            count = 0
            for r in range(0, len(seps1)):
                if (seps1[r][c] == 1):
                    continue
                count += 1
                if (r != len(seps1) - 1 and seps1[r + 1][c] == 1):
                    if (count <= 400):  # by making it 300 u can improve
                        for z in range(r - count, r + 1):
                            seps1[z][c] = 1
                    count = 0

        seps1 = morph.select_regions(seps1,
                                     sl.dim0,
                                     min=self.parameter['csminheight'] * scale,
                                     nbest=self.parameter['maxcolseps'] + 10)
        seps1 = (seps1 * (1 - y)) + seps1
        for c in range(0, len(seps1[0])):
            for r in range(0, len(seps1)):
                if (seps1[r][c] != 0):
                    seps1[r][c] = 1

        for c in range(0, len(seps1[0])):
            count = 0
            for r in range(0, len(seps1)):
                if (seps1[r][c] == 1):
                    continue
                count += 1
                if (r != len(seps1) - 1 and seps1[r + 1][c] == 1):
                    if (count <= 350):
                        for z in range(r - count, r + 1):
                            seps1[z][c] = 1
                    count = 0

        return seps1
Example #45
0
def spot_mask(I, threshold=5):
    """Generate a "spot mask" for a diffraction image with Bragg spots. 
    I: 2D nummpy array of type uint16
    return value: 2D numpy array the float with the same dimensions as I.
    Pixels the are part of a spot have the value 1.
    All other pixle have the value 0.
    threshold: peak detection threshold as multiple of sigma
    """
    from numpy import cast, float32, shape, sum, sqrt, array
    from pylab import seed, random_sample
    from scipy.ndimage.filters import correlate, maximum_filter, median_filter

    # Subtract 10 count offset from active area of image.
    I = cast[float32](I)
    I -= 10 * (I > 0)

    # 13 July 2014; mask beam passing through beam attenuator.
    #I[490:502,490:502] = 0. # 13 July 2014
    I[489:501, 485:497] = 0.  # 25 Oct 2014

    # Add random numbers to eliminate identical values.
    seed([1])
    I += (random_sample(shape(I)) - 0.5) / 10

    # Generate kernels for image filters.
    footprint0 = [[0,1,1,1,0],\
                  [1,1,1,1,1],\
                  [1,1,1,1,1],\
                  [1,1,1,1,1],\
                  [0,1,1,1,0]]
    N0 = sum(footprint0)
    footprint0 = array(footprint0)
    weights0 = footprint0 * 1. / N0

    footprint1 = [[1,1,1],\
                  [1,1,1],\
                  [1,1,1]]
    footprint1 = array(footprint1)
    N1 = sum(footprint1)
    weights1 = footprint1 * 1. / N1

    footprint2 = [[0,1,1,1,0],\
                  [1,0,0,0,1],\
                  [1,0,0,0,1],\
                  [1,0,0,0,1],\
                  [0,1,1,1,0]]
    footprint2 = array(footprint2)
    N2 = sum(footprint2)
    weights2 = footprint2 * 1. / N2

    footprint3 = [[0,0,1,1,1,0,0],\
                  [0,1,0,0,0,1,0],\
                  [1,0,0,0,0,0,1],\
                  [1,0,0,0,0,0,1],\
                  [1,0,0,0,0,0,1],\
                  [0,1,0,0,0,1,0],\
                  [0,0,1,1,1,0,0]]
    footprint3 = array(footprint3)
    N3 = sum(footprint3)
    weights3 = footprint3 * 1. / N3

    # Find spots and generate S_mask.
    S1 = correlate(I, weights1)
    S3 = median_filter(I, footprint=footprint3)
    I_max = maximum_filter(I, footprint=footprint0)
    S_mask = (I >= I_max) & ((S1 - S3) / sqrt(S1 / N1 + S3 / N3) > threshold)
    N_spots = sum(S_mask)
    S_mask = correlate(S_mask, footprint0)

    # Zero left and rightmost columns to correct for edge effects.
    S_mask[0:3, :] = False  # vertical left
    S_mask[-3:, :] = False  # vertical right
    return S_mask
Example #46
0
def find_local_peaks(image,
                     min_distance,
                     threshold=0,
                     local_threshold=0,
                     exclude_border=0,
                     exclude_adjacent=False):
    """Return peaks in an image as a Points object.
    
    Peaks are the local maxima in a region of `2 * min_distance + 1`
    (i.e. peaks are separated by at least `min_distance`).
    
    A maximum filter is used for finding local maxima. This operation dilates 
    the original image. After comparison of the dilated and original image, 
    this function returns the coordinates or a mask of the peaks where the 
    dilated image equals the original image.
    
    Parameters
    ----------
    image : ndarray
        Input image.
    min_distance : int
        Minimum number of pixels separating peaks in a region of `2 *
        min_distance + 1` (i.e. peaks are separated by at least
        `min_distance`).
        To find the maximum number of peaks, use `min_distance=1`.
    threshold : float, optional
        Minimum relative intensity of peaks. By default, the threshold 
        is zero.
    local_threshold : float, optional
        Minimum local relative intensity of peaks. A minimum filter is used 
        for finding the baseline for comparing the local intensity.
        By default, the local threshold is zero.
    exclude_border : int, optional
        If nonzero, `exclude_border` excludes peaks from
        within `exclude_border`-pixels of the border of the image.
    exclude_adjacent : bool, optional
        In case of flat peaks (i.e. multiple adjacent pixels have 
        identical intensities), if true, only the mean pixel position 
        will be returned.
    
    """

    image = image.astype(np.float32)

    threshold = image.min() + threshold * (image.max() - image.min())

    max_filt = maximum_filter(image, min_distance)

    is_peak = (image == max_filt)

    is_peak[image < threshold] = False

    if local_threshold > 0:
        local_threshold = image.min() + local_threshold * (image.max() -
                                                           image.min())
        min_filt = minimum_filter(image, min_distance)
        is_peak[(max_filt - min_filt) < local_threshold] = False

    if exclude_border:
        is_peak[0:exclude_border + 1, :] = False
        is_peak[:, 0:exclude_border + 1] = False
        is_peak[-1:-exclude_border - 1:-1, :] = False
        is_peak[:, -1:-exclude_border - 1:-1] = False

    if exclude_adjacent:
        labels = label(is_peak)
        peaks = center_of_mass(np.ones_like(labels[0]), labels[0],
                               range(1, labels[1] + 1))
        peaks = np.array(peaks)
    else:
        peaks = np.array(np.where(is_peak)).T

    return peaks
def get_interest_points(image, feature_width, alpha=0.1, top=1500):
    """
    Implement the Harris corner detector (See Szeliski 4.1.1) to start with.
    You can create additional interest point detector functions (e.g. MSER)
    for extra credit.

    If you're finding spurious interest point detections near the boundaries,
    it is safe to simply suppress the gradients / corners near the edges of
    the image.

    Useful in this function in order to (a) suppress boundary interest
    points (where a feature wouldn't fit entirely in the image, anyway)
    or (b) scale the image filters being used. Or you can ignore it.

    By default you do not need to make scale and orientation invariant
    local features.

    The lecture slides and textbook are a bit vague on how to do the
    non-maximum suppression once you've thresholded the cornerness score.
    You are free to experiment. For example, you could compute connected
    components and take the maximum value within each component.
    Alternatively, you could run a max() operator on each sliding window. You
    could use this to ensure that every interest point is at a local maximum
    of cornerness.

    Args:
    -   image: A numpy array of shape (m,n,c),
                image may be grayscale of color (your choice)
    -   feature_width: integer representing the local feature width in pixels.

    Returns:
    -   x: A numpy array of shape (N,) containing x-coordinates of interest points
    -   y: A numpy array of shape (N,) containing y-coordinates of interest points
    -   confidences (optional): numpy nd-array of dim (N,) containing the strength
            of each interest point
    -   scales (optional): A numpy array of shape (N,) containing the scale at each
            interest point
    -   orientations (optional): A numpy array of shape (N,) containing the orientation
            at each interest point
    """
    confidences, scales, orientations = None, None, None
    #############################################################################
    # TODO: YOUR HARRIS CORNER DETECTOR CODE HERE                                                      #
    #############################################################################

    # Define Gaussian Filter and alpha factor for cornerness R function
    gaussian = cv2.getGaussianKernel(ksize=5, sigma=5)
    gaussian = gaussian.dot(gaussian.T)

    #First calculate the derivative Ix, Iy with Sober operator
    Ix = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=5)  # x
    Iy = cv2.Sobel(image, cv2.CV_32F, 0, 1, ksize=5)  # y

    #Second, calculate the second order derivative with filter
    Ix2, Ixy, Iy2 = Ix**2, Ix * Iy, Iy**2
    gIx2 = cv2.filter2D(Ix2, -1, gaussian)
    gIxy = cv2.filter2D(Ixy, -1, gaussian)
    gIy2 = cv2.filter2D(Iy2, -1, gaussian)

    # Calculate the R-function
    R = gIx2 * gIy2 - gIxy**2 - alpha * (gIx2 + gIy2)**2

    # zero-padding and non-maximal supression
    R = R * (R > 0.001 * R.max())
    R[:feature_width // 2, :] = 0
    R[-feature_width // 2:, :] = 0
    R[:, :feature_width // 2] = 0
    R[:, -feature_width // 2:] = 0

    R = R * (R == maximum_filter(R, (11, 11)))

    # Sorted the indexes by R-value

    indx, indy = np.unravel_index(np.argsort(-R, axis=None), R.shape)
    max_len = np.sum((R > 0))  #length of non-zero elements in R
    indx, indy = indx[:max_len], indy[:max_len]

    #############################################################################
    #                             END OF YOUR CODE                              #
    #############################################################################

    #############################################################################
    # TODO: YOUR ADAPTIVE NON-MAXIMAL SUPPRESSION CODE HERE                     #
    # While most feature detectors simply look for local maxima in              #
    # the interest function, this can lead to an uneven distribution            #
    # of feature points across the image, e.g., points will be denser           #
    # in regions of higher contrast. To mitigate this problem, Brown,           #
    # Szeliski, and Winder (2005) only detect features that are both            #
    # local maxima and whose response value is significantly (10%)              #
    # greater than that of all of its neighbors within a radius r. The          #
    # goal is to retain only those points that are a maximum in a               #
    # neighborhood of radius r pixels. One way to do so is to sort all          #
    # points by the response strength, from large to small response.            #
    # The first entry in the list is the global maximum, which is not           #
    # suppressed at any radius. Then, we can iterate through the list           #
    # and compute the distance to each interest point ahead of it in            #
    # the list (these are pixels with even greater response strength).          #
    # The minimum of distances to a keypoint's stronger neighbors               #
    # (multiplying these neighbors by >=1.1 to add robustness) is the           #
    # radius within which the current point is a local maximum. We              #
    # call this the suppression radius of this interest point, and we           #
    # save these suppression radii. Finally, we sort the suppression            #
    # radii from large to small, and return the n keypoints                     #
    # associated with the top n suppression radii, in this sorted               #
    # orderself. Feel free to experiment with n, we used n=1500.                #
    #                                                                           #
    # See:                                                                      #
    # https://www.microsoft.com/en-us/research/wp-content/uploads/2005/06/cvpr05.pdf
    # or                                                                        #
    # https://www.cs.ucsb.edu/~holl/pubs/Gauglitz-2011-ICIP.pdf                 #
    #############################################################################

    def adaptive_non_maximal_supression(indx, indy, R, top=1500, max_len=3000):
        '''
            Input:
                indx, indy: coordinates of interest points
                R: corner function
            Output:
                sorted(indx, indy) by the criterion of adaptive none-maximal suppression
        '''
        indx, indy = indx[:max_len], indy[:max_len]
        X = np.c_[indx, indy]  #(max_len, 2)

        # Find out the distance_0p9 as the minimum distance from one point (x,y)
        # to the next point (x_new,y_new) with R(x_new,y_new)>0.9*R(x,y)
        distance_0p9 = []

        for i, (x, y) in enumerate(zip(indx, indy)):
            corner_0p9 = 0.9 * R[x, y]  #thereshold value = 0.9*R(x,y)

            #calculate the distance from to all other points (x_new,y_new) to this point (x,y) (or X[i])
            distance_i = np.linalg.norm(
                X - X[i], axis=1
            )  # (max_len, 2) - (2,) = (max_len, 2) --> norm, axis = 0 --> (max_len,)

            # Filter out the points (x_new,y_new) where R(x_new,y_new) > corner_0p9
            distance_i_with_significant_R = np.sort(
                distance_i[R[indx, indy] >= corner_0p9])

            # We look for the closest point with R(x_new,y_new)> 0.9*R(x,y) but not the point (x,y)
            min_distance = distance_i_with_significant_R[1] if len(
                distance_i_with_significant_R) > 1 else float('inf')

            distance_0p9.append([x, y, min_distance])

        #Sort this distance_0p9 from the min_distance
        distance_0p9.sort(key=lambda x: x[2], reverse=True)

        # Return the (x,y) coordinates
        distance_0p9 = np.array(distance_0p9[:top])
        return distance_0p9[:, 0], distance_0p9[:, 1]

    # Reverse y, x for the image coordinates
    y, x = adaptive_non_maximal_supression(indx, indy, R, top=top)

    #############################################################################
    #                             END OF YOUR CODE                              #
    #############################################################################
    return x.astype(int), y.astype(int), confidences, scales, orientations
Example #48
0
def mine(detector, transform, groundtruth='groundtruth', wpos=25,
        hard_negatives=True, wneg=5, easiest=0, hardest=-2,
        random_negatives=5):
    """Mine validation data for positives and hard negatives

    Args:
        detector: The detector which produces raw likelihood estimates
        transform: The feature transform to apply to the images

    Returns:
        X,Y: The likelihood produced by the detector, and the true label
    """
    sources = sorted(glob.glob(os.path.join(groundtruth, '*_source.*')))
    targets = sorted(glob.glob(os.path.join(groundtruth, '*_target.*')))
    matches = sorted(glob.glob(os.path.join(groundtruth, '*_correspondences.npy')))
    X,Y = [],[]

    for source, target, match in zip(sources, targets, matches):

        # load the images and compute the feature transform
        I1 = imread(source)
        I2 = imread(target)
        F1 = transform(I1)
        F2 = transform(I2)
        (M,N),(P,Q) = F1.shape[:2], F2.shape[:2]
        (R,S),(T,U) = I1.shape[:2], I2.shape[:2]
        W = np.round(np.max((P/wpos,Q/wpos)))
        #print W

        # load and rescale the matches to feature scale space
        match = np.load(match)
        match = match / ((S,R),(U,T)) * ((N,M),(Q,P))

        # compute the likelihoods
        scores  = detector(F1,F2)
        if hard_negatives:
            maximum = maximum_filter(scores, (1,1,P/wneg,Q/wneg))
            maximum = (scores == maximum)

        # iterate over the groundtruth labels
        for (x1,y1),(x2,y2) in match:
            score = scores[y1,x1]

            # positive
            pos = score[max(0,y2-W):min(y2+W,P),max(0,x2-W):min(x2+W,Q)].max()
            X.append(pos)
            Y.append(1.0)

            # hard negatives
            if hard_negatives:
                p,q   = maximum[y1,x1].nonzero()
                hard  = np.sort(np.unique(score[p,q]))
                neg   = hard[easiest:hardest]
                label = -np.ones_like(neg)
                X.extend(neg)
                Y.extend(label)

            # random negatives
            if random_negatives:
                neg = np.random.choice(score.flat, random_negatives)
                label = -np.ones_like(neg)
                X.extend(neg)
                Y.extend(label)

    # convert the lists to arrays
    return np.array(X), np.array(Y)
Example #49
0
def extrema(mat, mode='wrap', window=75):
    mn = minimum_filter(mat, size=window, mode=mode)
    mx = maximum_filter(mat, size=window, mode=mode)
    return np.nonzero(mat == mn), np.nonzero(mat == mx)
Example #50
0
    # PluginRegistry through use of the macro REGISTER_TENSORRT_PLUGIN present
    # in the plugin implementation. Refer to plugin/clipPlugin.cpp for more details.
    if not os.path.isfile(CLIP_PLUGIN_LIBRARY):
        raise IOError("\n{}\n{}\n{}\n".format(
            "Failed to load library ({}).".format(CLIP_PLUGIN_LIBRARY),
            "Please build the Clip sample plugin.",
            "For more information, see the included README.md"))
    ctypes.CDLL(CLIP_PLUGIN_LIBRARY)

    # Load pretrained model
    if not os.path.isfile(MODEL_PATH):
        raise IOError("\n{}\n{}\n{}\n".format(
            "Failed to load model file ({}).".format(MODEL_PATH),
            "Please use 'python lenet5.py' to train and save the model.",
            "For more information, see the included README.md"))

    # Build an engine and retrieve the image mean from the model.
    with build_engine(MODEL_PATH) as engine:
        inputs, outputs, bindings, stream = common.allocate_buffers(engine)

    trt_rok = gs.create_plugin_node(name="trt_rok",
                                    op="RegionOfKeypoints_TRT",
                                    region_shape=5)


if __name__ == "__main__":
    # main()
    import numpy as np
    from scipy.ndimage.filters import maximum_filter
    maximum_filter(np.ones([1, 10, 10]),
                   footprint=[[[0, 1, 0], [1, 1, 1], [0, 1, 0]]])
Example #51
0
def digest(rate, data):
    #print data.shape
    n = data.shape[0]
    #print data.shape
    d = n / float(rate)

    window = 2048

    overlap = 0.5

    pair_number = 10

    min_max = 10

    neig_number = 10

    region_t = 100
    region_f = 100

    hash_keep = 24

    spectrum = mlab.specgram(data[:, 1],
                             NFFT=window,
                             Fs=rate,
                             window=mlab.window_hanning,
                             noverlap=int(window * overlap))

    #spec=asarray(spec_1,spec_2,spec_3)

    spec_data = asarray(spectrum[0])
    #print spec_data.shape

    struct = generate_binary_structure(2, 1)
    #print struct
    neighborhood = iterate_structure(struct, neig_number)
    #print neighborhood
    local_max = maximum_filter(spec_data, footprint=neighborhood) == spec_data
    #print maximum_filter(spec_data, footprint=neighborhood)
    background = (spec_data == 0)
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)
    #print eroded_background
    detected_peaks = local_max - eroded_background  # this is because previously the eroded background is also true in the peaks;

    the_peaks = spec_data[detected_peaks]

    #print detected_peaks.shape
    p_row, p_col = where(detected_peaks)

    peaks = vstack((p_row, p_col, the_peaks))

    real_peaks = peaks[:, peaks[2, :] > min_max]

    f_index = real_peaks[0, :]
    t_index = real_peaks[1, :]

    star = zip(f_index, t_index)
    star.sort(key=itemgetter(1))
    star = asarray(star).T
    #print star

    star_leng = star.shape[1]
    store = list()
    for i in range(star_leng):
        for j in range(1, neig_number):
            if (i + j) < star_leng and (
                    star[1, (i + j)] - star[1, i]) < region_t and abs(
                        (star[0, (i + j)] - star[0, i])) < region_f:
                f1 = star[0, i]
                f2 = star[0, (i + j)]
                t = star[1, i]
                t_diff = star[1, (i + j)] - star[1, i]

                hass = hashlib.sha1("%s|%s|%s" %
                                    (str(f1), str(f2), str(t_diff)))

                this_hash = [hass.hexdigest()[0:hash_keep], t]
                store.append(this_hash)

    return store
#                      window=mlab.window_hanning,
#                      noverlap=int(4096 * 0.5))[0]
arr2D,arr2D_freqs,arr2D_t = mlab.specgram(samples,
                      NFFT=4096,
                      Fs=44100,
                      window=mlab.window_hanning,
                      noverlap=int(4096 * 0.5))
deltaT = arr2D_t[1] - arr2D_t[0]
deltaF = arr2D_freqs[1] - arr2D_freqs[0]

arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0

struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, 20)
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,border_value=1)
detected_peaks = local_max ^ eroded_background
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > 10] # freq, time, amp
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]

### plots
#fig, ax = plt.subplots(num=1,clear=True)
#ax.imshow(arr2D)
#ax.scatter(time_idx, frequency_idx, s=4**2)
Example #53
0
def plot_bounding_box(labels, heatmaps, org, save_path):
    import scipy.ndimage.filters as filters
    # img_width, img_height = 224, 224
    img_width, img_height = 7, 7

    crop_del = 0
    rescale_factor = 32
    class_index = ['pneumonia', 'normal', 'COVID-19']
    avg_size = np.array([[502.4, 458.7, 400.0, 400.0], [411.8, 512.5, 400.0, 400.0],
                         [434.3, 366.7, 400.0, 400.0]])
    avg_size = avg_size / 4

    '''
    class_index = ['Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass', 'Nodule', 'Pneumonia',
                   'Pneumothorax']
    avg_size = np.array([[411.8, 512.5, 219.0, 139.1], [348.5, 392.3, 479.8, 381.1],
                         [396.5, 415.8, 221.6, 318.0], [394.5, 389.1, 294.0, 297.4],
                         [434.3, 366.7, 168.7, 189.8], [502.4, 458.7, 71.9, 70.4],
                         [378.7, 416.7, 276.5, 304.5], [369.3, 209.4, 198.9, 246.0]])
    '''
    bbox = []
    cnt = 0
    for label, heatmap in zip(labels, heatmaps):
        label = label[0]
        print(heatmap.shape)
        data = heatmap
        # data = heatmap.reshape(-1, img_width, img_height)

        # output avgerge
        prediction_sent = '%s %.1f %.1f %.1f %.1f' % (
            class_index[label], avg_size[label][0], avg_size[label][1], avg_size[label][2], avg_size[label][3])

        if np.isnan(data).any():
            continue

        # w_k, h_k = (avg_size[label][2:4] * (256 / 1024)).astype(np.int)
        w_k, h_k = (avg_size[label][2:4] * 1/32).astype(np.float)

        # Find local maxima
        neighborhood_size = 100
        threshold = .1

        data_max = filters.maximum_filter(data, neighborhood_size)
        maxima = (data == data_max)
        data_min = filters.minimum_filter(data, neighborhood_size)
        diff = ((data_max - data_min) > threshold)
        maxima[diff == 0] = 0
        for _ in range(5):
            maxima = binary_dilation(maxima)

        labeled, num_objects = ndimage.label(maxima)
        slices = ndimage.find_objects(labeled)
        xy = np.array(ndimage.center_of_mass(data, labeled, range(1, num_objects + 1)))
        print(xy)

        for pt in xy:
            upper = max(pt[0] - (h_k / 2), 0.)
            left = max(pt[1] - (w_k / 2), 0.)

            right = min(left + w_k, img_width)
            lower = min(upper + h_k, img_height)

            prediction_sent = '%s %.1f %.1f %.1f %.1f' % (class_index[label], (left + crop_del) * rescale_factor,
                                                          (upper + crop_del) * rescale_factor,
                                                          (right - left) * rescale_factor,
                                                          (lower - upper) * rescale_factor)
            bbox.append([class_index[label], (left + crop_del) * rescale_factor,
                         (upper + crop_del) * rescale_factor,
                         (right - left) * rescale_factor,
                         (lower - upper) * rescale_factor])
            print(bbox[-1])

            x = mix_heatmap(heatmap, org, bbox[-1])
            x.save('{}/{}_with_bbox.jpg'.format(save_path, cnt))
            cnt += 1
Example #54
0
    def __init__(self,
                 X,
                 Y,
                 n_rays,
                 grid,
                 batch_size,
                 patch_size,
                 use_gpu=False,
                 maxfilter_cache=True,
                 maxfilter_patch_size=None,
                 augmenter=None):

        X = [x.astype(np.float32, copy=False) for x in X]
        # Y = [y.astype(np.uint16,  copy=False) for y in Y]

        # sanity checks
        assert len(X) == len(Y) and len(X) > 0
        nD = len(patch_size)
        assert nD in (2, 3)
        x_ndim = X[0].ndim
        assert x_ndim in (nD, nD + 1)
        assert all(
            y.ndim == nD and x.ndim == x_ndim and x.shape[:nD] == y.shape
            for x, y in zip(X, Y))
        if x_ndim == nD:
            self.n_channel = None
        else:
            self.n_channel = X[0].shape[-1]
            assert all(x.shape[-1] == self.n_channel for x in X)

        self.X, self.Y = X, Y
        self.batch_size = batch_size
        self.n_rays = n_rays
        self.patch_size = patch_size
        self.ss_grid = (slice(None), ) + tuple(slice(0, None, g) for g in grid)
        self.perm = np.random.permutation(len(self.X))
        self.use_gpu = bool(use_gpu)
        if augmenter is None:
            augmenter = lambda *args: args
        callable(augmenter) or _raise(
            ValueError("augmenter must be None or callable"))
        self.augmenter = augmenter

        if self.use_gpu:
            from gputools import max_filter
            self.max_filter = lambda y, patch_size: max_filter(
                y.astype(np.float32), patch_size)
        else:
            from scipy.ndimage.filters import maximum_filter
            self.max_filter = lambda y, patch_size: maximum_filter(
                y, patch_size, mode='constant')

        self.maxfilter_patch_size = maxfilter_patch_size if maxfilter_patch_size is not None else self.patch_size

        if maxfilter_cache:
            self.R = [
                self.no_background_patches((y, x))
                for x, y in zip(self.X, self.Y)
            ]
        else:
            self.R = None
Example #55
0
    def _detect(self, im):

        mat = im.asMatrix2D()

        levels = []
        while mat.shape[0] > self.min_size and mat.shape[1] > self.min_size:
            levels.append(mat)
            mat = zoom(mat, 0.5)

        gaussians = []
        k = 2.0**(1.0 / self.scales)
        for level in levels:
            gs = []
            sigma = self.sigma
            for _ in range(self.scales + 3):
                g = gaussian_filter(level, sigma)
                gs.append(g)
                sigma = k * sigma
            gaussians.append(gs)

        dogs = []
        for gs in gaussians:
            ds = []
            for i in range(len(gs) - 1):
                d = gs[i] - gs[i + 1]
                ds.append(d)
            ds = array(ds)
            dogs.append(ds)
        #dogs = array(dogs,'d')

        #points = []
        sigma = 2 * k * self.sigma  # approx 95% bounds
        extrema = []
        scale = 1
        for ds in dogs:
            # find extrema
            mins = minimum_filter(ds, (3, 3, 3))
            maxs = maximum_filter(ds, (3, 3, 3))

            # Find the extrema but not on the edges of the image
            minima = nonzero(mins[1:-1, 1:-1, 1:-1] == ds[1:-1, 1:-1, 1:-1])
            maxima = nonzero(maxs[1:-1, 1:-1, 1:-1] == ds[1:-1, 1:-1, 1:-1])

            for i in range(len(minima[0])):
                # Correct for removing the edges in the previous step
                s = minima[0][i] + 1
                x = minima[1][i] + 1
                y = minima[2][i] + 1

                # Get a 3 by 3 block
                block = ds[s - 1:s + 2, x - 1:x + 2, y - 1:y + 2]
                params = TaylorFit(block)
                ts, tx, ty = TaylorSubpixel(params)
                td = TaylorApprox(params, ts, tx, ty)
                ts -= 1.0
                tx -= 1.0
                ty -= 1.0

                # Only select extrema with high contrast
                if abs(td) < self.min_contrast: continue

                # Chech the ratios of the principal curvatures (see Lowe 2004 Sec 4.1:
                Dxx = 2 * params[1]
                Dyy = 2 * params[2]
                Dxy = params[3]
                TrH = Dxx + Dyy
                DetH = Dxx * Dyy - Dxy * Dxy

                r = self.max_curvature_ratio
                if DetH < 0:
                    continue  # discard because curvatures have different signs
                if r * TrH > DetH * (r + 1) * (r + 1):
                    continue  # Ratio of curvatuers is greater than R

                if not (-1.0 < tx and tx < 1.0 and -1.0 < ty and ty < 1.0):
                    continue

                extrema.append([
                    -td, scale * (x + tx), scale * (y + ty),
                    (k**((s + ts) - 1)) * sigma
                ])

            for i in range(len(maxima[0])):
                s = maxima[0][i] + 1
                x = maxima[1][i] + 1
                y = maxima[2][i] + 1

                # Get a 3 by 3 block
                block = ds[s - 1:s + 2, x - 1:x + 2, y - 1:y + 2]
                params = TaylorFit(block)
                ts, tx, ty = TaylorSubpixel(params)
                td = TaylorApprox(params, ts, tx, ty)
                ts -= 1.0
                tx -= 1.0
                ty -= 1.0

                # Only select extrema with high contrast
                if abs(td) < self.min_contrast: continue

                # Chech the ratios of the principal curvatures (see Lowe 2004 Sec 4.1:
                Dxx = 2 * params[1]
                Dyy = 2 * params[2]
                Dxy = params[3]
                TrH = Dxx + Dyy
                DetH = Dxx * Dyy - Dxy * Dxy

                r = self.max_curvature_ratio
                if DetH < 0:
                    continue  # discard because curvatures have different signs
                if r * TrH > DetH * (r + 1) * (r + 1):
                    continue  # Ratio of curvatuers is greater than R

                if not (-1.0 < tx and tx < 1.0 and -1.0 < ty and ty < 1.0):
                    continue

                extrema.append([
                    td, scale * (x + tx), scale * (y + ty),
                    (k**((s + ts) - 1)) * sigma
                ])

            sigma = (k**2.0) * sigma
            scale *= 2

        return extrema
Example #56
0
if filter == 1:
    print("Value of radius: ")
    radius = int(input())

for i, imagePath in enumerate(imagePaths):

    gray = read_image(imagePath)
    gray = img_as_float(gray)

    if filter == 1:
        blur_image = gaussian_filter(gray, sigma=radius)
    elif filter == 2:
        blur_image = median_filter(gray, size=20)
    elif filter == 3:
        blur_image = maximum_filter(gray, size=20)
    else:
        blur_image = minimum_filter(gray, size=20)

    mask = gray - blur_image  #to keep the edges

    sharp_image = gray + mask * amount
    sharp_image = np.clip(sharp_image, float(0), float(1))
    sharp_image = (sharp_image * 255).astype(np.uint8)

    output_image = sharp_image

    cv2.imshow('Input Image', gray)
    cv2.imshow('Output Image', output_image)
    cv2.imwrite('Output_image_{}_{}.jpg'.format(filter, i), output_image)
Example #57
0
def resegment(line_polygon,
              region_labels,
              region_bin,
              line_id,
              extend_margins=3,
              threshold_relative=0.8,
              threshold_absolute=50):
    """Reduce line polygon in a labelled region to the largest intersection.

    Given a Numpy array ``line_polygon`` of relative coordinates
    in a region given by a Numpy array ``region_labels`` of numbered
    segments and a Numpy array ``region_bin`` of foreground pixels,
    find the label of the largest segment that intersects the polygon.
    If the number of foreground pixels within that segment is larger
    than ``threshold_absolute`` and if the share of foreground pixels
    within the whole polygon is larger than ``threshold_relative``,
    then compute the contour of that intersection and return it
    as a new polygon. Otherwise, return None.

    If ``extend_margins`` is larger than zero, then extend ``line_polygon``
    by that amount of pixels horizontally and vertically before.
    """
    LOG = getLogger('processor.OcropyResegment')
    # height, width = region_labels.shape
    # mask from line polygon:
    line_mask = np.zeros_like(region_labels)
    line_mask[draw.polygon(line_polygon[:, 1], line_polygon[:, 0],
                           line_mask.shape)] = 1
    line_mask[draw.polygon_perimeter(line_polygon[:, 1], line_polygon[:, 0],
                                     line_mask.shape)] = 1
    #DSAVE('line %s mask' % line_id, line_mask + 0.5 * region_bin)
    # pad line polygon (extend the mask):
    line_mask = filters.maximum_filter(line_mask, 1 + 2 * extend_margins)
    # intersect with region labels
    line_labels = region_labels * line_mask
    if not np.count_nonzero(line_labels):
        LOG.warning('Label mask is empty for line "%s"', line_id)
        return None
    # find the mask of the largest label (in the foreground):
    total_count = np.sum(region_bin * line_mask)
    line_labels_fg = region_bin * line_labels
    if not np.count_nonzero(line_labels_fg):
        LOG.warning('No foreground pixels within line mask for line "%s"',
                    line_id)
        return None
    label_counts = np.bincount(line_labels_fg.flat)
    max_label = np.argmax(label_counts[1:]) + 1
    max_count = label_counts[max_label]
    if (max_count < threshold_absolute
            and max_count / total_count < threshold_relative):
        LOG.info('Largest label (%d) is too small (%d/%d) in line "%s"',
                 max_label, max_count, total_count, line_id)
        return None
    LOG.debug(
        'Black pixels before/after resegment of line "%s" (nlabels=%d): %d/%d',
        line_id, len(label_counts.nonzero()[0]), total_count, max_count)
    line_mask = np.array(line_labels == max_label, np.uint8)
    #DSAVE('line %s mask tight' % line_id, line_mask + 0.5 * region_bin)
    # find outer contour (parts):
    contours, _ = cv2.findContours(line_mask, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    # determine largest part by area:
    contour_areas = [cv2.contourArea(contour) for contour in contours]
    max_contour = np.argmax(contour_areas)
    max_area = contour_areas[max_contour]
    total_area = cv2.contourArea(np.expand_dims(line_polygon, 1))
    if max_area / total_area < 0.5 * threshold_relative:
        # using a different, more conservative threshold here:
        # avoid being overly strict with cropping background,
        # just ensure the contours are not a split of the mask
        LOG.warning(
            'Largest label (%d) largest contour (%d) is small (%d/%d) in line "%s"',
            max_label, max_contour, max_area, total_area, line_id)
    contour = contours[max_contour]
    # simplify shape:
    # can produce invalid (self-intersecting) polygons:
    #polygon = cv2.approxPolyDP(contour, 2, False)[:, 0, ::] # already ordered x,y
    polygon = contour[:, 0, ::]  # already ordered x,y
    polygon = Polygon(polygon).simplify(2).exterior.coords[:-1]  # keep open
    if len(polygon) < 4:
        LOG.warning('found no contour of >=4 points for line "%s"', line_id)
        return None
    return polygon
Example #58
0
filter_1 = [[0, 1, 0], [1, -4, 1], [0, 1, 0]]
filter_2 = [[-1, -2, -1], [0, 0, 0], [1, 2, 1]]
filter_3 = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
weight = 1

#%% [markdown]
# ### Applying the convolution with different filters on image

i_transformed = signal.convolve2d(i, filter_3, mode="same")
i_transformed[i_transformed < 0] = 0
i_transformed[i_transformed > 255] = 255

#%% [markdown]
# ### Plot the result of applying filter on image

plt.grid(False)
plt.imshow(i_transformed, cmap="gray")
plt.show()

#%% [markdown]
# ### Applying a 2x2 max pooling filter on the image

i_maxpooled = maximum_filter(i_transformed, footprint=np.ones((2, 2)))

#%% [markdown]
# ### Plot the result of max pooling

plt.grid(False)
plt.imshow(i_maxpooled)
plt.show()
Example #59
0
def compute_colseps_conv(binary, scale=1.0):
    """Find column separators by convoluation and
    thresholding."""
    h, w = binary.shape
    # find vertical whitespace by thresholding
    smoothed = gaussian_filter(1.0 * binary, (scale, scale * 0.5))
    smoothed = uniform_filter(smoothed, (5.0 * scale, 1))
    thresh = (smoothed < amax(smoothed) * 0.1)
    ####imsave('/home/gupta/Documents/1_thresh.png', thresh)
    ##DSAVE("1thresh",thresh)
    # find column edges by filtering

    #
    grad = gaussian_filter(1.0 * binary, (scale, scale * 0.5), order=(0, 1))
    grad = uniform_filter(grad, (10.0 * scale, 1))
    #grad = abs(grad) # use this for finding both edges
    grad = (grad > 0.25 * amax(grad))
    grad1 = morph.select_regions(grad,
                                 sl.dim0,
                                 min=args.csminheight * scale,
                                 nbest=args.maxcolseps + 10)

    ####imsave('/home/gupta/Documents/2_grad.png', grad1)
    x = (1 - thresh) * (1 - grad1)
    thresh11 = (1 - thresh) * x
    ####imsave('/home/gupta/Documents/3_x.png', thresh11)

    #############################################################################################################
    for r in range(0, len(thresh11)):
        count = 0
        for c in range(0, len(thresh11[0])):
            if (thresh11[r][c] == 1):
                continue
            count += 1
            if (c != len(thresh11[0]) - 1 and thresh11[r][c + 1] == 1):
                if (count <= 50):
                    for z in range(c - count, c + 1):
                        thresh11[r][z] = 1
                count = 0

    y = 1 - (thresh11 * (1 - thresh))
    ####imsave('/home/gupta/Documents/4_uniformed.png', y)

    #############################################################################################################

    ##DSAVE("2grad",grad)
    # combine edges and whitespace
    seps = minimum(thresh, maximum_filter(grad, (int(scale), int(5 * scale))))
    seps = maximum_filter(seps, (int(2 * scale), 1))
    #
    ####imsave('/home/gupta/Documents/5_seps.png', seps)
    h, w = seps.shape
    smoothed = gaussian_filter(1.0 * seps, (scale, scale * 0.5))
    smoothed = uniform_filter(smoothed, (5.0 * scale, 1))
    seps1 = (smoothed < amax(smoothed) * 0.1)
    ####imsave('/home/gupta/Documents/6_smooth.png', seps1)
    seps1 = 1 - seps1
    #
    ####imsave('/home/gupta/Documents/7_smooth.png', seps1)
    seps1 = (grad) * seps1
    ####imsave('/home/gupta/Documents/8_multigrad.png', seps1)

    #############################################################################################################
    for c in range(0, len(seps1[0])):
        count = 0
        for r in range(0, len(seps1)):
            if (seps1[r][c] == 1):
                continue
            count += 1
            if (r != len(seps1) - 1 and seps1[r + 1][c] == 1):
                if (
                        count <= 400
                ):  ##################################################### by making it 300 u can improve
                    for z in range(r - count, r + 1):
                        seps1[z][c] = 1
                count = 0

    ####imsave('/home/gupta/Documents/9_uniformed.png', seps1)
    #############################################################################################################

    seps1 = morph.select_regions(seps1,
                                 sl.dim0,
                                 min=args.csminheight * scale,
                                 nbest=args.maxcolseps + 10)
    ####imsave('/home/gupta/Documents/10_seps1.png', seps1)
    #
    ####seps2=seps1*y
    ####t=seps1*(1-y)
    ####imsave('/home/gupta/Documents/t.png', t)
    ####imsave('/home/gupta/Documents/s.png', seps2)

    #
    seps1 = (seps1 * (1 - y)) + seps1
    for c in range(0, len(seps1[0])):
        for r in range(0, len(seps1)):
            if (seps1[r][c] != 0):
                seps1[r][c] = 1
    ####imsave('/home/gupta/Documents/11_testing.png', 0.7*seps1+0.3*binary)
    #f=(seps1-seps2)+seps1

    #############################################################################################################
    for c in range(0, len(seps1[0])):
        count = 0
        for r in range(0, len(seps1)):
            if (seps1[r][c] == 1):
                continue
            count += 1
            if (r != len(seps1) - 1 and seps1[r + 1][c] == 1):
                if (count <= 350):
                    for z in range(r - count, r + 1):
                        seps1[z][c] = 1
                count = 0

    ####imsave('/home/gupta/Documents/12_uniformed.png', seps1)
    #############################################################################################################

    ####imsave('/home/gupta/Documents/13_col_sep.png', seps1)
    return seps1
def calcPeakCoords(heatMap,footPrint):
    maxFilted = maximum_filter(heatMap, footprint=footPrint)
    isPeaks = (heatMap == maxFilted) * (heatMap > config.jointsThres)
    peaksCoords = np.array(np.nonzero(isPeaks)[::-1]).T

    return peaksCoords