Ejemplo n.º 1
0
def test_minmaximum_filter1d():
    # Regression gh-3898
    in_ = np.arange(10)
    out = sndi.minimum_filter1d(in_, 1)
    assert_equal(in_, out)
    out = sndi.maximum_filter1d(in_, 1)
    assert_equal(in_, out)
    # Test reflect
    out = sndi.minimum_filter1d(in_, 5, mode='reflect')
    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
    out = sndi.maximum_filter1d(in_, 5, mode='reflect')
    assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
    #Test constant
    out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1)
    assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
    out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10)
    assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
    # Test nearest
    out = sndi.minimum_filter1d(in_, 5, mode='nearest')
    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
    out = sndi.maximum_filter1d(in_, 5, mode='nearest')
    assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
    # Test wrap
    out = sndi.minimum_filter1d(in_, 5, mode='wrap')
    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
    out = sndi.maximum_filter1d(in_, 5, mode='wrap')
    assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
Ejemplo n.º 2
0
def move_nanmax_filter(arr, window, axis=-1):
    "Moving window maximium ignoring NaNs, implemented with a filter."
    global maximum_filter1d, convolve1d
    if maximum_filter1d is None:
        try:
            from scipy.ndimage import maximum_filter1d
        except ImportError:
            raise ValueError("'filter' method requires SciPy.")
    if convolve1d is None:
        try:
            from scipy.ndimage import convolve1d
        except ImportError:
            raise ValueError("'filter' method requires SciPy.")
    if axis == None:
        raise ValueError, "An `axis` value of None is not supported."
    if window < 1:  
        raise ValueError, "`window` must be at least 1."
    if window > arr.shape[axis]:
        raise ValueError, "`window` is too long."
    arr = arr.astype(float)
    nrr = np.isnan(arr)
    arr[nrr] = -np.inf
    x0 = (window - 1) // 2
    maximum_filter1d(arr, window, axis=axis, mode='constant', cval=np.nan,
                     origin=x0, output=arr)
    w = np.ones(window, dtype=int)
    nrr = nrr.astype(int)
    x0 = (1 - window) // 2
    convolve1d(nrr, w, axis=axis, mode='constant', cval=0, origin=x0,
               output=nrr)
    arr[nrr == window] = np.nan
    return arr
Ejemplo n.º 3
0
def test_multiple_modes_sequentially():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying the filters with
    # different modes sequentially
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    modes = ['reflect', 'wrap']

    expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
    expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.gaussian_filter(arr, 1, mode=modes))

    expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
    expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.uniform_filter(arr, 5, mode=modes))

    expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.maximum_filter(arr, size=5, mode=modes))

    expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
    expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
    assert_equal(expected,
                 sndi.minimum_filter(arr, size=5, mode=modes))
Ejemplo n.º 4
0
def find_nearest_object(mmp, im_labeled, slice_map, i, labels_center_column):
    """
    mmp : mask
    im_labeled : label
    i : object to be connected
    labels_center_column : known objects
    """
    thre = 40 # threshold # of pixels (in y-direction) to detect adjacent object
    steps = [5, 10, 20, 40, 80]

    sl_y, sl_x = slice_map[i]

    # right side
    ss = im_labeled[:,sl_x.stop-3:sl_x.stop].max(axis=1)
    ss_msk = ni.maximum_filter1d(ss == i, thre)

    if sl_x.stop < 2048/2.:
        sl_x0 = sl_x.stop
        sl_x_pos = [sl_x.stop + s for s in steps]
    else:
        sl_x0 = sl_x.start
        sl_x_pos = [sl_x.start - s for s in steps]

    for pos in sl_x_pos:
        ss1 = im_labeled[:,pos]
        detected_ob = set(np.unique(ss1[ss_msk])) - set([0])
        for ob_id in detected_ob:
            if ob_id in labels_center_column:
                sl = slice_map[ob_id][1]
                if sl.start < sl_x0 < sl.stop:
                    continue
                else:
                    return ob_id
Ejemplo n.º 5
0
def move_max_filter(arr, window, axis=-1):
    "Moving window maximium implemented with a filter."
    global maximum_filter1d
    if maximum_filter1d is None:
        try:
            from scipy.ndimage import maximum_filter1d
        except ImportError:
            raise ValueError("'filter' method requires SciPy.")
    if axis == None:
        raise ValueError, "An `axis` value of None is not supported."
    if window < 1:  
        raise ValueError, "`window` must be at least 1."
    if window > arr.shape[axis]:
        raise ValueError, "`window` is too long."  
    y = arr.astype(float)
    x0 = (window - 1) // 2
    maximum_filter1d(y, window, axis=axis, mode='constant', cval=np.nan,
                     origin=x0, output=y)
    return y
Ejemplo n.º 6
0
def average_absolute_discrete_group_delay(mat_phase):
#    print(mat_phase)
    dgd = discrete_group_delay(mat_phase)
    
    temp = ndimage.maximum_filter1d(numpy.abs(dgd), 5, axis=0, output=None, mode="constant", cval=0.0, origin=2)
#    temp = bn.move_max(numpy.abs(dgd), 5, axis=0)
#    temp = maxfilt.maxfilt1d(numpy.abs(dgd), 5) # meine implementierung
#    print(temp)
#    scipy.io.savemat('e:\\dropbox\\applesoup\\knietrommler\\v2\\python_export_dgd.mat', {'dgd':dgd, 'maxfilt_output':temp})
    return numpy.mean(temp, axis=0)
Ejemplo n.º 7
0
def local_extrema_2(arr, min_distance = 20):
    """Find all local maxima of the array, separated by at least min_distance."""
    cval = 0
    mode = 'constant'
    cval = arr.max()+1
    max_points = arr == maximum_filter1d(arr, min_distance)
    min_points = arr == minimum_filter1d(arr, min_distance)
    

    return [arr[nonzero(min_points==True)[0]],nonzero(min_points==True)[0], 
            arr[nonzero(max_points==True)[0]],nonzero(max_points==True)[0]]
Ejemplo n.º 8
0
def get_localmaxs(array, count=3, window=25):
    """
    array must be one-dimensional
    Individualize the locals maxs relatives to a window of the given length.
    Returns theirs coodenates.
    """
    maxs = maximum_filter1d(array, window)
    maxs[maxs > array] = array.min()
    graph(array, maxs)
    top_positions = maxs.argsort()[::-1][:count]
    return sorted(top_positions)
Ejemplo n.º 9
0
def get_y_derivativemap(flat, flat_bpix, bg_std_norm,
                        max_sep_order=150, pad=50,
                        med_filter_size=(7, 7),
                        flat_mask=None):

    """
    flat
    flat_bpix : bpix'ed flat
    """

    # 1d-derivatives along y-axis : 1st attempt
    # im_deriv = ni.gaussian_filter1d(flat, 1, order=1, axis=0)

    # 1d-derivatives along y-axis : 2nd attempt. Median filter first.

    flat_deriv_bpix = ni.gaussian_filter1d(flat_bpix, 1,
                                           order=1, axis=0)

    # We also make a median-filtered one. This one will be used to make masks.
    flat_medianed = ni.median_filter(flat,
                                     size=med_filter_size)

    flat_deriv = ni.gaussian_filter1d(flat_medianed, 1,
                                      order=1, axis=0)

    # min/max filter

    flat_max = ni.maximum_filter1d(flat_deriv, size=max_sep_order, axis=0)
    flat_min = ni.minimum_filter1d(flat_deriv, size=max_sep_order, axis=0)

    # mask for aperture boundray
    if pad is None:
        sl=slice()
    else:
        sl=slice(pad, -pad)

    flat_deriv_masked = np.zeros_like(flat_deriv)
    flat_deriv_masked[sl,sl] = flat_deriv[sl, sl]

    if flat_mask is not None:
        flat_deriv_pos_msk = (flat_deriv_masked > flat_max * 0.5) & flat_mask
        flat_deriv_neg_msk = (flat_deriv_masked < flat_min * 0.5) & flat_mask
    else:
        flat_deriv_pos_msk = (flat_deriv_masked > flat_max * 0.5)
        flat_deriv_neg_msk = (flat_deriv_masked < flat_min * 0.5)

    return dict(data=flat_deriv, #_bpix,
                pos_mask=flat_deriv_pos_msk,
                neg_mask=flat_deriv_neg_msk,
                )
Ejemplo n.º 10
0
def local_maxima(array, min_distance = 1, periodic=False, edges_allowed=True):
    """Find all local maxima of the array, 
       separated by at least  min_distance.
       If uses ndimage.maximum_filter1d()
    """
    import scipy.ndimage as ndimage
    array = np.asarray(array)
    cval = 0
    if periodic:
       mode = 'wrap'
    elif edges_allowed:
       mode = 'nearest'
    else:
       mode = 'constant'
       cval = array.max()+1
    max_points = array == ndimage.maximum_filter1d(array,
                  1+2*min_distance, mode=mode, cval=cval)

    return [indices[max_points] for indices in  np.indices(array.shape)]
Ejemplo n.º 11
0
def findlines(x, y, fwhm, smoothwindow = 'hanning', sigma_threshold = 3.):

    '''
    Several things here and I am not quite sure yet what turn out to be  useful
    - smoothing: show real peaks and not just noise
    - maximum_filter = array: will find the peaks
    - sigma_clipping = are the peaks large enough to be relevant?
    
    Parameters
    ----------
    x : ndarray
        x values, e.g. wavelength
    y : ndarray
        y values, e.g. flux or res_flux / error
    fwhm : float
        estimate for FWHM of lines. Used as smoothing scale
    smoothwindow : string or None
        if `smoothwindow` is on of `['flat', 'hanning', 'hamming',
        'bartlett', 'blackman']` a correspondig window function 
        will be used to smooth the signal before line detection.
    
    Returns
    -------
    peaks : ndarray
        index numbers for peaks found
    '''
    fwhminpix = int(fwhm / np.diff(x).mean())
    if smoothwindow is not None:
        #print smoothwindow
        #print fwhminpix
        y = smooth(y, window_len = 3*fwhminpix, window = smoothwindow)

    maxindex = (maximum_filter1d(y, max(fwhminpix,3)) == y)
    maxindex = maxindex & (y > (y.mean() + sigma_threshold * y.std()))
    # sigma_clipping works only if there is plenty of continuum
    #clipped_y = sigma_clipping(y, threshold = sigma_threshold)
    # believe only peaks which are so large, that the get clipped by sigma_clipping
    #maxindex = maxindex & (clipped_y.mask == False)

    return np.flatnonzero(maxindex)
Ejemplo n.º 12
0
def scan_blink_correction(image, axis=1):
    """
    This filter can be used to filter out impulsive noise from a 2D array along a single axis.
    As filter we apply a sequence of two filters. First a min-filter and then a max-filter.
    This composite non-linear filter technique is also called opening filter.

    This filter will completely remove single-pixel (along given axis) brightness spikes from the
    image but will cause the image to be more "blocky"/less smooth.
    Of course you need to ensure that the image features of interest are larger than

    @param numpy.ndarray image: A 2D numpy array to be filtered (e.g. image data)
    @param int axis: The axis along which to apply the 1D filter
    @return numpy.ndarray: The filtered image. Same dimensions as input image
    """

    if not isinstance(image, np.ndarray):
        logger.error('Image must be 2D numpy array.')
        return image
    if image.ndim != 2:
        logger.error('Image must be 2D numpy array.')
        return image
    if axis != 0 and axis != 1:
        logger.error('Optional axis parameter must be either 0 or 1.')
        return image

    # Calculate median value of the image. This value is used for padding image boundaries during
    # filtering.
    median = np.median(image)
    # Apply a minimum filter along the chosen axis.
    filt_img = minimum_filter1d(image, size=2, axis=axis, mode='constant', cval=median)
    # Apply a maximum filter along the chosen axis. Flip the previous filter result to avoid
    # translation of image features.
    filt_img = maximum_filter1d(
        np.flip(filt_img, axis), size=2, axis=axis, mode='constant', cval=median)
    # Flip back the image to obtain original orientation and return result.
    return np.flip(filt_img, axis)
Ejemplo n.º 13
0
def _prominent_peaks(image, min_xdistance=1, min_ydistance=1,
                     threshold=None, num_peaks=np.inf):
    """Return peaks with non-maximum suppression.

    Identifies most prominent features separated by certain distances.
    Non-maximum suppression with different sizes is applied separately
    in the first and second dimension of the image to identify peaks.

    Parameters
    ----------
    image : (M, N) ndarray
        Input image.
    min_xdistance : int
        Minimum distance separating features in the x dimension.
    min_ydistance : int
        Minimum distance separating features in the y dimension.
    threshold : float
        Minimum intensity of peaks. Default is `0.5 * max(image)`.
    num_peaks : int
        Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
        return `num_peaks` coordinates based on peak intensity.

    Returns
    -------
    intensity, xcoords, ycoords : tuple of array
        Peak intensity values, x and y indices.
    """

    img = image.copy()
    rows, cols = img.shape

    if threshold is None:
        threshold = 0.5 * np.max(img)

    ycoords_size = 2 * min_ydistance + 1
    xcoords_size = 2 * min_xdistance + 1
    img_max = ndi.maximum_filter1d(img, size=ycoords_size, axis=0,
                                   mode='constant', cval=0)
    img_max = ndi.maximum_filter1d(img_max, size=xcoords_size, axis=1,
                                   mode='constant', cval=0)
    mask = (img == img_max)
    img *= mask
    img_t = img > threshold

    label_img = measure.label(img_t)
    props = measure.regionprops(label_img, img_max)

    # Sort the list of peaks by intensity, not left-right, so larger peaks
    # in Hough space cannot be arbitrarily suppressed by smaller neighbors
    props = sorted(props, key=lambda x: x.max_intensity)[::-1]
    coords = np.array([np.round(p.centroid) for p in props], dtype=int)

    img_peaks = []
    ycoords_peaks = []
    xcoords_peaks = []

    # relative coordinate grid for local neighbourhood suppression
    ycoords_ext, xcoords_ext = np.mgrid[-min_ydistance:min_ydistance + 1,
                                        -min_xdistance:min_xdistance + 1]

    for ycoords_idx, xcoords_idx in coords:
        accum = img_max[ycoords_idx, xcoords_idx]
        if accum > threshold:
            # absolute coordinate grid for local neighbourhood suppression
            ycoords_nh = ycoords_idx + ycoords_ext
            xcoords_nh = xcoords_idx + xcoords_ext

            # no reflection for distance neighbourhood
            ycoords_in = np.logical_and(ycoords_nh > 0, ycoords_nh < rows)
            ycoords_nh = ycoords_nh[ycoords_in]
            xcoords_nh = xcoords_nh[ycoords_in]

            # reflect xcoords and assume xcoords are continuous,
            # e.g. for angles:
            # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
            xcoords_low = xcoords_nh < 0
            ycoords_nh[xcoords_low] = rows - ycoords_nh[xcoords_low]
            xcoords_nh[xcoords_low] += cols
            xcoords_high = xcoords_nh >= cols
            ycoords_nh[xcoords_high] = rows - ycoords_nh[xcoords_high]
            xcoords_nh[xcoords_high] -= cols

            # suppress neighbourhood
            img_max[ycoords_nh, xcoords_nh] = 0

            # add current feature to peaks
            img_peaks.append(accum)
            ycoords_peaks.append(ycoords_idx)
            xcoords_peaks.append(xcoords_idx)

    img_peaks = np.array(img_peaks)
    ycoords_peaks = np.array(ycoords_peaks)
    xcoords_peaks = np.array(xcoords_peaks)

    if num_peaks < len(img_peaks):
        idx_maxsort = np.argsort(img_peaks)[::-1][:num_peaks]
        img_peaks = img_peaks[idx_maxsort]
        ycoords_peaks = ycoords_peaks[idx_maxsort]
        xcoords_peaks = xcoords_peaks[idx_maxsort]

    return img_peaks, xcoords_peaks, ycoords_peaks
Ejemplo n.º 14
0
def hough_peaks(hspace, angles, dists, min_distance=10, min_angle=10,
                threshold=None, num_peaks=np.inf):
    """Return peaks in hough transform.

    Identifies most prominent lines separated by a certain angle and distance in
    a hough transform. Non-maximum suppression with different sizes is applied
    separately in the first (distances) and second (angles) dimension of the
    hough space to identify peaks.

    Parameters
    ----------
    hspace : (N, M) array
        Hough space returned by the `hough` function.
    angles : (M,) array
        Angles returned by the `hough` function. Assumed to be continuous
        (`angles[-1] - angles[0] == PI`).
    dists : (N, ) array
        Distances returned by the `hough` function.
    min_distance : int
        Minimum distance separating lines (maximum filter size for first
        dimension of hough space).
    min_angle : int
        Minimum angle separating lines (maximum filter size for second
        dimension of hough space).
    threshold : float
        Minimum intensity of peaks. Default is `0.5 * max(hspace)`.
    num_peaks : int
        Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
        return `num_peaks` coordinates based on peak intensity.

    Returns
    -------
    hspace, angles, dists : tuple of array
        Peak values in hough space, angles and distances.

    Examples
    --------
    >>> import numpy as np
    >>> from skimage.transform import hough, hough_peaks
    >>> from skimage.draw import line
    >>> img = np.zeros((15, 15), dtype=np.bool_)
    >>> rr, cc = line(0, 0, 14, 14)
    >>> img[rr, cc] = 1
    >>> rr, cc = line(0, 14, 14, 0)
    >>> img[cc, rr] = 1
    >>> hspace, angles, dists = hough(img)
    >>> hspace, angles, dists = hough_peaks(hspace, angles, dists)
    >>> angles
    array([  0.74590887,  -0.79856126])
    >>> dists
    array([  10.74418605,  0.51162791])

    """

    hspace = hspace.copy()
    rows, cols = hspace.shape

    if threshold is None:
        threshold = 0.5 * np.max(hspace)

    distance_size = 2 * min_distance + 1
    angle_size = 2 * min_angle + 1
    hspace_max = ndimage.maximum_filter1d(hspace, size=distance_size, axis=0,
                                          mode='constant', cval=0)
    hspace_max = ndimage.maximum_filter1d(hspace_max, size=angle_size, axis=1,
                                          mode='constant', cval=0)
    mask = (hspace == hspace_max)
    hspace *= mask
    hspace_t = hspace > threshold

    label_hspace = morphology.label(hspace_t)
    props = measure.regionprops(label_hspace, ['Centroid'])
    coords = np.array([np.round(p['Centroid']) for p in props], dtype=int)

    hspace_peaks = []
    dist_peaks = []
    angle_peaks = []

    # relative coordinate grid for local neighbourhood suppression
    dist_ext, angle_ext = np.mgrid[-min_distance:min_distance + 1,
                                   -min_angle:min_angle + 1]

    for dist_idx, angle_idx in coords:
        accum = hspace[dist_idx, angle_idx]
        if accum > threshold:
            # absolute coordinate grid for local neighbourhood suppression
            dist_nh = dist_idx + dist_ext
            angle_nh = angle_idx + angle_ext

            # no reflection for distance neighbourhood
            dist_in = np.logical_and(dist_nh > 0, dist_nh < rows)
            dist_nh = dist_nh[dist_in]
            angle_nh = angle_nh[dist_in]

            # reflect angles and assume angles are continuous, e.g.
            # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
            angle_low = angle_nh < 0
            dist_nh[angle_low] = rows - dist_nh[angle_low]
            angle_nh[angle_low] += cols
            angle_high = angle_nh >= cols
            dist_nh[angle_high] = rows - dist_nh[angle_high]
            angle_nh[angle_high] -= cols

            # suppress neighbourhood
            hspace[dist_nh, angle_nh] = 0

            # add current line to peaks
            hspace_peaks.append(accum)
            dist_peaks.append(dists[dist_idx])
            angle_peaks.append(angles[angle_idx])

    hspace_peaks = np.array(hspace_peaks)
    dist_peaks = np.array(dist_peaks)
    angle_peaks = np.array(angle_peaks)

    if num_peaks < len(hspace_peaks):
        idx_maxsort = np.argsort(hspace_peaks)[::-1][:num_peaks]
        hspace_peaks = hspace_peaks[idx_maxsort]
        dist_peaks = dist_peaks[idx_maxsort]
        angle_peaks = angle_peaks[idx_maxsort]

    return hspace_peaks, angle_peaks, dist_peaks