Ejemplo n.º 1
0
def dual_gradient_energy(img):
    R = img[:, :, 0]
    G = img[:, :, 1]
    B = img[:, :, 2]
    w, h = img.shape[:2]
    ibh = filters.sobel_h(B)
    ibv = filters.sobel_v(B)
    irh = filters.sobel_h(R)
    irv = filters.sobel_v(R)
    igh = filters.sobel_h(G)
    igv = filters.sobel_v(G)
    energy = np.zeros((w, h))
    for i in range(0, w):
        for j in range(0, h):
            energy[i][j] = (irh[i][j] * irh[i][j] + igh[i][j] * igh[i][j] +
                            ibh[i][j] * ibh[i][j] + irv[i][j] * irv[i][j] +
                            igv[i][j] * igv[i][j] + ibv[i][j] * ibv[i][j])
    gray()
    imshow(energy)
    title("Energy of image")
    show()
    for i in range(0, w):
        energy[i][0] = energy[i][1]
        energy[i][h-1] = energy[i][h-2]
    for i in range(0, h):
        energy[0][i] = energy[2][i]
        energy[w-1][i] = energy[w-2][i]

    return energy
    pass
Ejemplo n.º 2
0
def dual_gradient_energy(img):
    R = img[:, :, 0]
    G = img[:, :, 1]
    B = img[:, :, 2]
    w, h = img.shape[:2]
    ibh = filters.sobel_h(B)
    ibv = filters.sobel_v(B)
    irh = filters.sobel_h(R)
    irv = filters.sobel_v(R)
    igh = filters.sobel_h(G)
    igv = filters.sobel_v(G)
    energy = np.zeros((w, h))
    for i in range(0, w):
        for j in range(0, h):
            energy[i][j] = (irh[i][j] * irh[i][j] + igh[i][j] * igh[i][j] +
                            ibh[i][j] * ibh[i][j] + irv[i][j] * irv[i][j] +
                            igv[i][j] * igv[i][j] + ibv[i][j] * ibv[i][j])
    gray()
    imshow(energy)
    title("Energy of image")
    show()
    for i in range(0, w):
        energy[i][0] = energy[i][1]
        energy[i][h - 1] = energy[i][h - 2]
    for i in range(0, h):
        energy[0][i] = energy[2][i]
        energy[w - 1][i] = energy[w - 2][i]

    return energy
    pass
Ejemplo n.º 3
0
def color_sobel_edges(I: np.ndarray) -> Tuple[np.ndarray, float]:
    '''
    Sobel vector gradient for color images.
    
    :param I: Input image.
    :return: A 2-tuple which the 1st entry is a 2-d array containing the gradient magnitudes for each pixel,
    the 2nd entry contains the gradient directions for each pixel. 
    '''

    chan_R = I[:, :, 0]
    chan_G = I[:, :, 1]
    chan_B = I[:, :, 2]

    horizon_R = filt.sobel_h(chan_R)
    horizon_G = filt.sobel_h(chan_G)
    horizon_B = filt.sobel_h(chan_B)

    vertical_R = filt.sobel_v(chan_R)
    vertical_G = filt.sobel_v(chan_G)
    vertical_B = filt.sobel_v(chan_B)

    g_x = np.dstack((horizon_R, horizon_G, horizon_B))
    g_y = np.dstack((vertical_R, vertical_G, vertical_B))

    g_xx = color_dot_product(g_x, g_x)
    g_yy = color_dot_product(g_y, g_y)
    g_xy = color_dot_product(g_x, g_y)

    grad_direction_x2 = np.arctan2(2 * g_xy, g_xx - g_yy)

    grad_magnitude = np.sqrt(((g_xx + g_yy) + \
                              (g_xx - g_yy) * np.cos(grad_direction_x2) + \
                              2 * g_xy * np.sin(grad_direction_x2)) / 2)

    return (grad_magnitude, grad_direction_x2 / 2)
def dual_gradient_energy(img):
    """
    calculates the gradient energy of each pixel of an image
    :param img: the image for which the gradient energy is to be calculated
    :return: the gradient energy of each pixel of the image.
    >>> img_test = np.array([[[ 0.77254903,  0.72941178,  0.79215688]]])
    >>> dual_gradient_energy(img_test)
    array([[ 0.]])
    """

    r = img[:, :, 0]
    g = img[:, :, 1]
    b = img[:, :, 2]

    r_h = filters.sobel_h(r)
    g_h = filters.sobel_h(g)
    b_h = filters.sobel_h(b)

    r_v = filters.sobel_v(r)
    g_v = filters.sobel_v(g)
    b_v = filters.sobel_v(b)

    sum_rg_h = np.add(np.square(r_h), np.square(g_h))
    sum_h = np.add(sum_rg_h, np.square(b_h))

    sum_rg_v = np.add(np.square(r_v), np.square(g_v))
    sum_v = np.add(sum_rg_v, np.square(b_v))

    energy = np.add(sum_h, sum_v)
    return energy
def dual_gradient_energy(img):
    h, w = img.shape[:2]
    img = img_as_float(img)
    R = img[:, :, 0]
    G = img[:, :, 1]
    B = img[:, :, 2]
    rh_gradient = filters.sobel_h(R)
    gh_gradient = filters.sobel_h(G)
    bh_gradient = filters.sobel_h(B)
    rv_gradient = filters.sobel_v(R)
    gv_gradient = filters.sobel_v(G)
    bv_gradient = filters.sobel_v(B)

# Calculating the energy Matrix

    energy = (rh_gradient*rh_gradient) + (gh_gradient*gh_gradient) +\
             (bh_gradient*bh_gradient) + (rv_gradient*rv_gradient) +\
             (gv_gradient*gv_gradient) + (bv_gradient*bv_gradient)

# Copying the second column to first and second last column to last
# as first and last column return 0 due to sobel
    for i in range(h):
        for j in range(w):
            if j == 0:
                energy[i][j] = energy[i][j+1]
            if j == w-1:
                energy[i][j] = energy[i][j-1]
    return energy
def dual_gradient_energy(img):
    """
    Dual gradient energy is the sum of the square of a horizontal gradient and a vertical gradient.
    Use skimage.filter.hsobel and vsobel to calculate the gradients of each channel independently.
    The energy is the sum of the square the horizontal and vertical gradients over all channels.
    :param img: input image
    :return: dual gradient energy of input image
    """
    red_channel = img[:, :, 0]      # red channel of the image
    green_channel = img[:, :, 1]    # green channel of the image
    blue_channel = img[:, :, 2]     # blue channel of the image

    horizontal_gradient_red = filters.sobel_h(red_channel)      # horizontal gradient of the red channel
    vertical_gradient_red = filters.sobel_v(red_channel)        # vertical gradient of the red channel

    horizontal_gradient_green = filters.sobel_h(green_channel)  # horizontal gradient of the green channel
    vertical_gradient_green = filters.sobel_v(green_channel)    # vertical gradient of the green channel

    horizontal_gradient_blue = filters.sobel_h(blue_channel)    # horizontal gradient of the blue channel
    vertical_gradient_blue = filters.sobel_v(blue_channel)      # vertical gradient of the blue channel

    # dual gradient energy at each pixel
    energy = (horizontal_gradient_red * horizontal_gradient_red)\
            + (vertical_gradient_red * vertical_gradient_red)\
            + (horizontal_gradient_green * horizontal_gradient_green)\
            + (vertical_gradient_green * vertical_gradient_green)\
            + (horizontal_gradient_blue * horizontal_gradient_blue)\
            + (vertical_gradient_blue * vertical_gradient_blue)

    return energy
Ejemplo n.º 7
0
def dual_gradient_energy(img1):
    w, h = img1.shape[:2]
    R = img1[:, :, 0]
    G = img1[:, :, 1]
    B = img1[:, :, 2]
    A = sobel_h(R) ** 2 + sobel_v(R) ** 2 + sobel_h(G) ** 2 + sobel_v(G) ** 2 + sobel_h(B) ** 2 + sobel_v(B) ** 2
    r = len(range(len(A)))
    c = len(range(len(A[0])))
    for i in range(len(A[0])):
        A[0][i] = A[1][i]
        A[r - 1][i] = A[r - 2][i]
    for i in range(len(A)):
        A[i][0] = A[i][1]
        A[i][c - 1] = A[i][c - 2]
    return A
Ejemplo n.º 8
0
 def calculate(self, image: np.ndarray, disk_size: int=9,
               mean_threshold: int=100, min_object_size: int=750) -> float:
     # Find edges that have a strong vertical direction
     vertical_edges = sobel_v(image)
     # Separate out the areas where there is a large amount of vertically-oriented stuff
     segmentation = self._segment_edge_areas(vertical_edges, disk_size, mean_threshold, min_object_size)
     # Draw a line that follows the center of the segments at each point, which should be roughly vertical
     # We should expect this to give us four approximately-vertical lines, possibly with many gaps in
     # each line
     skeletons = skeletonize(segmentation)
     # Use the Hough transform to get the closest lines that approximate those four lines
     hough = transform.hough_line(skeletons, np.arange(-constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                       constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                       0.0001))
     # Create a list of the angles (in radians) of all of the lines the Hough transform produced, with 0.0
     # being completely vertical
     # These angles correspond to the angles of the four sides of the channels, which we need to
     # correct for
     angles = [angle for _, angle, dist in zip(*transform.hough_line_peaks(*hough))]
     if not angles:
         raise ValueError("Image rotation could not be calculated. Check the images to see if they're weird.")
     else:
         # Get the average angle and convert it to degrees
         offset = sum(angles) / len(angles) * 180.0 / math.pi
         if offset > constants.ACCEPTABLE_SKEW_THRESHOLD:
             log.warn("Image is heavily skewed. Check that the images are valid.")
         return offset
Ejemplo n.º 9
0
def harris_corners(img, window_size=3, k=0.04):
    """
    Compute Harris corner response map. Follow the math equation
    R=Det(M)-k(Trace(M)^2).
    Hint:
        You may use the function scipy.ndimage.filters.convolve,
        which is already imported above

    Args:
        img: Grayscale image of shape (H, W)
        window_size: size of the window function
        k: sensitivity parameter,usually between [0.04,0.06]
    Returns:
        response: Harris response image of shape (H, W)
    """

    H, W = img.shape
    window = np.ones((window_size, window_size))

    response = np.zeros((H, W))

    dx = filters.sobel_v(img)  #使用核为[[1,0,-1],[2,0,-2],[1,0,-1]]滤波
    dy = filters.sobel_h(img)  #使用核为[[1,2,1],[0,0,0],[-1,-2,-1]]滤波

    ### YOUR CODE HERE
    A = convolve(dx**2, window)
    B = convolve(dx * dy, window)
    C = convolve(dy**2, window)
    for i in range(H):
        for j in range(W):
            M = np.array([[A[i, j], B[i, j]], [B[i, j], C[i, j]]])
            response[i, j] = np.linalg.det(M) - k * np.trace(M)**2
    ### END YOUR CODE
    return response
Ejemplo n.º 10
0
def calc_bkgfluctuation(filename, scale=0.003):
    from PIL import Image
    from skimage import filters

    bkg_fn = filename.split(".")[0] + "_bkg.fits"
    bkg_img = getdata(bkg_fn, 0)
    bkg_img = np.array(bkg_img).byteswap().newbyteorder()
    bkg_image = Image.fromarray(bkg_img)
    factor = scale
    width = int(bkg_image.size[0] * factor)
    height = int(bkg_image.size[1] * factor)
    bkg_image = bkg_image.resize((width, height),
                                 Image.ANTIALIAS)  # best down-sizing filter
    bkg_image = np.array(bkg_image)

    edges_x = filters.sobel_h(bkg_image)
    edges_y = filters.sobel_v(bkg_image)

    # sum of gradient over entire background (log)
    grad = np.log10(
        np.sqrt(np.sum(edges_x)**2 + np.sum(edges_y)**2) / bkg_image.size)
    # sum of residual of subtraction between the background and the median level of the background (log)
    residual = np.log10(
        np.sum((bkg_image - np.median(bkg_image))**2) / bkg_image.size)

    return grad, residual
Ejemplo n.º 11
0
def my_features(img):
    """ Implement your own features

    Args:
        img - array of shape (H, W, C)

    Returns:
        features - array of (H * W, C)
    """
    from skimage.filters import sobel_h, sobel_v
    from skimage.color import rgb2gray
    img = rgb2gray(img)
    H, W = np.shape(img)
    features = np.zeros((H, W, 3))
    ### YOUR CODE HERE
    X, Y = sobel_h(img), sobel_v(img)

    features[:, :, 0] = X
    features[:, :, 1] = Y
    features[:, :, 2] = X**2 + Y**2

    for i in range(3):
        mean, std = np.mean(features[:, :, i]), np.std(features[:, :, i])
        features[:, :, i] = (features[:, :, i] - mean) / std

    features = features.reshape((H * W, 3))
    ### END YOUR CODE
    return features
Ejemplo n.º 12
0
def harris_corners(img, window_size=3, k=0.04):
    """
    Compute Harris corner response map. Following the equation
    R=Det(M)-k(Trace(M)^2).
        
    Args:
        img: Grayscale image of shape (H, W)
        window_size: size of the window function
        k: sensitivity parameter

    Returns:
        response: Harris response image of shape (H, W)
    """

    window = np.ones((window_size, window_size))
    dx = filters.sobel_v(img)
    dy = filters.sobel_h(img)

    # Calculate the elements in the matrix in the formula for M
    ixx = dx**2
    ixy = dx * dy
    iyy = dy**2

    # Calculate the sum using convolution
    sxx = convolve(ixx, window)
    sxy = convolve(ixy, window)
    syy = convolve(iyy, window)

    # Calculate determinant and trace
    det = (sxx * syy) - (sxy**2)
    trace = sxx + syy

    response = det - k * (trace**2)

    return response
Ejemplo n.º 13
0
def calculate_rotation(image):
    # sometimes we snag corners, by cropping the left and right 10% of the image we focus only on the
    # vertical bars formed by the structure
    height, width = image.shape
    crop = int(width * 0.1)
    cropped_image = image[:, crop: width - crop]
    # Find edges that have a strong vertical direction
    vertical_edges = sobel_v(cropped_image)
    # Separate out the areas where there is a large amount of vertically-oriented stuff
    segmentation = segment_edge_areas(vertical_edges)
    # Draw a line that follows the center of the segments at each point, which should be roughly vertical
    # We should expect this to give us four approximately-vertical lines, possibly with many gaps in
    # each line
    skeletons = skeletonize(segmentation)
    # Use the Hough transform to get the closest lines that approximate those four lines
    hough = transform.hough_line(skeletons, np.arange(-constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                      constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                      0.0001))
    # Create a list of the angles (in radians) of all of the lines the Hough transform produced, with 0.0
    # being completely vertical
    # These angles correspond to the angles of the four sides of the channels, which we need to
    # correct for
    angles = [angle for _, angle, dist in zip(*transform.hough_line_peaks(*hough))]
    if not angles:
        raise ValueError("Image rotation could not be calculated. Check the images to see if they're weird.")
    else:
        # Get the average angle and convert it to degrees
        offset = sum(angles) / len(angles) * 180.0 / math.pi
        if offset > constants.ACCEPTABLE_SKEW_THRESHOLD:
            log.warn("Image is heavily skewed. Check that the images are valid.")
        return offset
def g(img):
    """
    Computes gradient magnitude at a distance of n
    """
    g_h = sobel_h(img)
    g_v = sobel_v(img)
    return g_v, g_h
Ejemplo n.º 15
0
    def detect_edges(self, filename=None):
        """Edge filter an image using the Canny algorithm."""
        if filename is None:
            filename = './output/phough_transform'

        low = self.canny_threshold[0] * (self.img.max() - self.img.min())
        high = self.canny_threshold[1] * (self.img.max() - self.img.min())

        if self.canny_edges == 'horizontal':
            print('Running One-Way Horizontal Edge Detector')
            magnitude = sobel_h(self.img).clip(min=0)
        elif self.canny_edges == 'vertical':
            print('Running One-Way Vertical Edge Detector')
            magnitude = sobel_v(self.img).clip(min=0)
        else:
            print('Running One-Way Multidirectional Edge Detector')
            magnitude = sobel(self.img).clip(min=0)

        self.edges = apply_hysteresis_threshold(magnitude, low, high)

        if self.show_figures:
            io.imshow(self.edges)
            plt.show(block=False)

        if self.save_figures:
            io.imsave(filename + '.tif', util.img_as_ubyte(self.edges))
Ejemplo n.º 16
0
def extract_hog(img):
    image = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2]
    image = resize(image, (64, 64), mode='reflect')
    sobelx = sobel_v(image)
    sobely = sobel_h(image)
    modul_grad = (sobelx**2 + sobely**2)**(1 / 2)
    way_grad = abs(arctan2(sobely, sobelx))
    gistogram_places = zeros((8, 8, 9))
    for x in range(8):
        for y in range(8):
            for i in range(8):
                for j in range(8):
                    pixelx = 8 * x + i
                    pixely = 8 * y + j
                    gistogram_places[x, y, way(way_grad[
                        pixelx, pixely])] += modul_grad[pixelx, pixely]
    eps = 0.0000000001
    for x in range(7):
        for y in range(7):
            v = gistogram_places[x, y]
            v = append(v, gistogram_places[x + 1, y])
            v = append(v, gistogram_places[x, y + 1])
            v = append(v, gistogram_places[x + 1, y + 1])
            if ((x == 0) and (y == 0)):
                res = v / ((dot(v, v) + eps)**(1 / 2))
                created = 0
            else:
                res = append(res, v / ((dot(v, v) + eps)**(1 / 2)))
    return res
Ejemplo n.º 17
0
def TENV(im):
    # Tenengrad variance (Pech2000)
    gx = sobel_v(im)
    gy = sobel_h(im)
    fm = gx**2 + gy**2
    fm = fm.std()**2
    return fm
    def get_externals_pixel_feats(image: np.array, n_values: int, k_distance: int, eps=1e-4) -> np.array:
        h, w = image.shape
        grads_map = np.array([sobel_h(image), sobel_v(image)])
        grads_map = grads_map / (np.linalg.norm(grads_map, axis=0) + eps)

        def get_shifted_feats(directions):
            shifts = np.round(directions * k_distance).astype(np.int)
            grid = np.stack(np.meshgrid(np.arange(h), np.arange(w), indexing='ij'))

            # calculate coords of inner/outer pixels
            coords = grid + shifts
            # clip values
            coords[coords < 0] = 0
            coords[:, :, -k_distance:] = np.clip(coords[:, :, -k_distance:], 0, w - 1)
            coords[:, -k_distance:, :] = np.clip(coords[:, -k_distance:, :], 0, h - 1)
            # get required pixels
            feats = image[coords[0].reshape(-1), coords[1].reshape(-1)].reshape(h, w)
            feats = feats / (np.max(feats) + eps)
            feats = np.ceil((n_values - 1) * feats)
            feats = unfold(feats[None]).astype(np.int)
            return feats

        outer_feats = get_shifted_feats(grads_map)
        inner_feats = get_shifted_feats(-grads_map)
        return inner_feats, outer_feats
Ejemplo n.º 19
0
def image_features(images, block_size, orientations):
    N, R, C = images.shape
    thetas = np.linspace(0, np.pi, orientations, endpoint=False)
    edge_x = np.empty_like(images)
    edge_y = np.empty_like(images)
    for idx, image in enumerate(images):
        edge_x[idx] = sobel_h(image)
        edge_y[idx] = sobel_v(image)
    Cb = C // block_size
    if (C % block_size): Cb += 1
    Rb = R // block_size
    if (C % block_size): Rb += 1
    #print("CB,RB",Cb,Rb)
    block_features = np.empty((N, Cb, Rb, orientations))
    for orientation, theta in enumerate(thetas):
        v_x = np.cos(theta)
        v_y = np.sin(theta)
        edges = edge_x * v_x + edge_y * v_y
        #print("edges",edges.shape)
        feature = np.maximum(edges, 0)
        block = (1, block_size, block_size)
        block_feature = block_reduce(feature, block, np.mean)
        #print("blocks",block_feature.shape)
        block_features[:, :, :, orientation] = block_feature
    return block_features.reshape(N, -1)

    print(block_features.shape)
    #print("block_features",block_features.shape)
    return block_features.reshape(len(images), -1)
Ejemplo n.º 20
0
def color_gmagnitude(img, sigma=None, norm=True, enhance=False):
    """

	"""
    if sigma is not None:
        img = gaussian(img, sigma=sigma, multichannel=True)

    dx = np.dstack([sobel_h(img[..., i]) for i in range(img.shape[-1])])
    dy = np.dstack([sobel_v(img[..., i]) for i in range(img.shape[-1])])

    Jx = np.sum(dx**2, axis=-1)
    Jy = np.sum(dy**2, axis=-1)
    Jxy = np.sum(dx * dy, axis=-1)

    D = np.sqrt(np.abs(Jx**2 - 2 * Jx * Jy + Jy**2 + 4 * Jxy**2))
    e1 = (Jx + Jy + D) / 2.  # First eigenvalue
    magnitude = np.sqrt(e1)

    if norm:
        magnitude /= magnitude.max()

    if enhance:
        magnitude = 1 - np.exp(-magnitude**2 / magnitude.mean())

    return magnitude.astype(np.float32)
Ejemplo n.º 21
0
def harris_corners(img, window_size=3, k=0.04):
    H, W = img.shape
    window = np.ones((window_size, window_size))

    response = np.zeros((H, W))
    #第一步: 偏导数
    dx = filters.sobel_v(img)
    dy = filters.sobel_h(img)

    # 第二步: 偏导数乘积
    dxx = dx * dx
    dyy = dy * dy
    dxy = dx * dy

    # 第三步: 形成矩阵
    mxx = convolve(dxx, window)
    mxy = convolve(dxy, window)
    myy = convolve(dyy, window)  #加权计算

    # 第四步: 计算response
    for i in range(H):
        for j in range(W):
            M = np.array([[mxx[i, j], mxy[i, j]], [mxy[i, j], myy[i, j]]])
            response[i, j] = np.linalg.det(M) - k * np.trace(M)**2

    return response
Ejemplo n.º 22
0
def harris_corners(img, window_size=3, k=0.04):
    """
    Compute Harris corner response map. Follow the math equation
    R=Det(M)-k(Trace(M)^2).

    Hint:
        You may use the function scipy.ndimage.filters.convolve,
        which is already imported above. If you use convolve(), remember to
        specify zero-padding to match our equations, for example:

        out_image = convolve(in_image, kernel, mode='constant', cval=0)

    Args:
        img: Grayscale image of shape (H, W)
        window_size: size of the window function
        k: sensitivity parameter

    Returns:
        response: Harris response image of shape (H, W)
    """

    H, W = img.shape
    window = np.ones((window_size, window_size))

    response = np.zeros((H, W))

    # 1. Compute x and y derivatives (I_x, I_y) of an image
    dx = filters.sobel_v(img)
    dy = filters.sobel_h(img)

    ### YOUR CODE HERE
    pass
    ### END YOUR CODE

    return response
Ejemplo n.º 23
0
def harris_corners(img, window_size=3, k=0.04):
    """
    Compute Harris corner response map. Follow the math equation
    R=Det(M)-k(Trace(M)^2).

    Hint:
        You may use the function scipy.ndimage.filters.convolve,
        which is already imported above.

    Args:
        img: Grayscale image of shape (H, W)
        window_size: size of the window function
        k: sensitivity parameter

    Returns:
        response: Harris response image of shape (H, W)
    """

    H, W = img.shape
    window = np.ones((window_size, window_size))

    response = np.zeros((H, W))

    dx = filters.sobel_v(img)
    dy = filters.sobel_h(img)

    dx2 = convolve(dx**2, window)
    dy2 = convolve(dy**2, window)
    dxy = convolve(dx * dy, window)

    det_m = dx2 * dy2 - dxy**2
    trace_m = dx2 + dy2
    response = det_m - k * trace_m**2

    return response
Ejemplo n.º 24
0
def generate_lighting_effect(img, light_pos, stroke):
    h, w, _ = img.shape

    # 高斯模糊 + 归一化
    n = [img[:, :, 0], img[:, :, 1], img[:, :, 2]]
    for i in range(3):
        n[i] = filters.gaussian(n[i], sigma=21)
        n[i] = (n[i] - np.min(n[i])) / (np.max(n[i]) - np.min(n[i]))

    # 计算图像上每个位置的光源方向
    coords = np.zeros(img.shape)
    coords[:, :, 0] = np.arange(h * w).reshape((h, w)) % w
    coords[:, :, 1] = np.arange(h * w).reshape((h, w)) // w
    light_dir = light_pos - coords
    light_dir /= np.sqrt(np.sum(light_dir**2, axis=2, keepdims=True))

    # 生成光效
    e = []
    for i in range(3):
        dx = filters.sobel_v(n[i])
        dy = filters.sobel_h(n[i])
        normal = np.stack([-dx, -dy, np.ones((h, w)) * 0.2], axis=2)
        normal /= np.sqrt(np.sum(normal**2, axis=2, keepdims=True))
        e.append(np.sum(normal * light_dir, axis=2).clip(0, 1) * stroke)
    return np.stack(e, axis=2)
Ejemplo n.º 25
0
def harris_corners(img, window_size=3, k=0.04):
    """
    Compute Harris corner response map. Follow the math equation
    R=Det(M)-k(Trace(M)^2).
        
    Args:
        img: Grayscale image of shape (H, W)
        window_size: size of the window function
        k: sensitivity parameter

    Returns:
        response: Harris response image of shape (H, W)
    """

    H, W = img.shape
    window = np.ones((window_size, window_size))

    response = np.zeros((H, W))

    dx = filters.sobel_v(img)
    dy = filters.sobel_h(img)

    Sx2 = scipy.ndimage.filters.convolve(np.square(dx), window)
    Sxy = scipy.ndimage.filters.convolve(np.multiply(dx, dy), window)
    Sy2 = scipy.ndimage.filters.convolve(np.square(dy), window)

    response = np.multiply(Sx2,
                           Sy2) - np.square(Sxy) - k * np.square(Sx2 + Sy2)

    return response
Ejemplo n.º 26
0
def get_diff_peaks(img, axis):
    # return the absolute value of the differential of the sum of image columns
    # axis is 0 for width and 1 for height images
    
    hsv = color.rgb2hsv(img)
    sat = hsv[:, :, 1]
    combo_norm = sat / np.max(sat)

    _, combo_norm = hog(combo_norm, orientations=6, pixels_per_cell=(16, 16),
                    cells_per_block=(1, 1), visualize=True)
    combo_norm = sobel_v(combo_norm)

    combo_sum = np.sum(combo_norm, axis=axis)
    combo_diff = np.abs(np.diff(combo_sum))

    # filter
    x_plot = combo_diff

    # remove values below the standard deviation
    x_plot[x_plot < np.std(x_plot)] = 0

    # pad out to full length after differentiation
    x_plot = np.append(x_plot, 0)

    # mean filter
    smoothed = medfilt(x_plot, kernel_size=3)

    return smoothed
Ejemplo n.º 27
0
def TENG(im):
    # Tenengrad (Krotkov86)
    gx = sobel_v(im)
    gy = sobel_h(im)
    fm = gx**2 + gy**2
    fm = fm.mean()
    return fm
Ejemplo n.º 28
0
 def __call__(self, img_small):
     m = morphology.square(self.square_size)
     img_th = morphology.black_tophat(img_small, m)
     img_sob = abs(filters.sobel_v(img_th))
     img_closed = morphology.closing(img_sob, m)
     threshold = filters.threshold_otsu(img_closed)
     return img_closed > threshold
Ejemplo n.º 29
0
def harris_corners(img, window_size=3, k=0.04):
    """
    Compute Harris corner response map. Follow the math equation
    R=Det(M)-k(Trace(M)^2).
    Hint:
        You may use the function scipy.ndimage.filters.convolve,
        which is already imported above.
    Args:
        img: Grayscale image of shape (H, W)
        window_size: size of the window function
        k: sensitivity parameter
    Returns:
        response: Harris response image of shape (H, W)
    """

    H, W = img.shape
    window = np.ones((window_size, window_size))

    response = np.zeros((H, W))

    dx = filters.sobel_v(img)
    dy = filters.sobel_h(img)

    ### YOUR CODE HERE
    pass
    dx_square = convolve(dx**2, window, mode='constant')
    dy_square = convolve(dy**2, window, mode='constant')
    dxdy = convolve(dx * dy, window, mode='constant')
    det_M = dx_square * dy_square - dxdy**2
    trace_M = dx_square + dy_square
    response = det_M - k * (trace_M**2)
    ### END YOUR CODE

    return response
Ejemplo n.º 30
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. Compute the gradient image in x and y directions (already done for you)
    2. Compute gradient histograms for each cell
    3. Flatten block of histograms into a 1D feature vector
        Here, we treat the entire patch of histograms as our block
    4. Normalize flattened block
        Normalization makes the descriptor more robust to lighting variations

    Args:
        patch: grayscale image patch of shape (H, W)
        pixels_per_cell: size of a cell with shape (M, N)

    Returns:
        block: 1D patch descriptor array of shape ((H*W*n_bins)/(M*N))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi).astype(int) % 180

    # Group entries of G and theta into cells of shape pixels_per_cell, (M, N)
    #   G_cells.shape = theta_cells.shape = (H//M, W//N)
    #   G_cells[0, 0].shape = theta_cells[0, 0].shape = (M, N)
    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    # For each cell, keep track of gradient histrogram of size n_bins
    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    for i in range(rows):
        for j in range(cols):
            G_patch = G_cells[i, j]
            theta_patch = theta_cells[i, j]

            for m in range(pixels_per_cell[0]):
                for n in range(pixels_per_cell[1]):
                    bin_idx = int(theta_patch[m, n] / degrees_per_bin)
                    # print(theta_patch[m, n], bin_idx)
                    cells[i, j, bin_idx] += G_patch[m, n]

        block = cells.flatten()
        block = block / np.linalg.norm(block)

    return block
def g(img):
    """
    Computes gradient magnitude at a distance of n
    """
    g_h = sobel_h(img)
    g_v = sobel_v(img)
    return g_v, g_h
Ejemplo n.º 32
0
def harris_corner(img, alpha=0.04, thre=2, sigma=1, supression_window_size=3):
    # this function will take in an image, optional paramter alpha, threshold and sigma
    # then output a binary mask with corners labeled by 1s and none corder labeled by 0s.
    # the function first computes the harris response, then perform thresholding and finishes
    # off with a non-maxima supression with a selected windowsize
    # the mask returned will be the same size as the image input
    window = gen_2_D_Gaussian(5, sigma)
    harris_response = np.zeros(img.shape)
    # finding the derivative of the image
    image_dx = sobel_h(img)
    image_dy = sobel_v(img)
    # computer second moment matrix and calculate
    for x in range(2, img.shape[1] - 2):
        for y in range(2, img.shape[0] - 2):
            M_xy = get_second_moment(image_dx, image_dy, window, y, x)
            s = np.linalg.eigvals(M_xy)
            harris_response[y, x] = s[0] * s[1] - alpha * (s[0] + s[1])**2

    # plt.imshow(harris_response, cmap="gray")
    # plt.show()
    # perform thresholding
    mask = np.where(harris_response >= thre, 1, 0)
    # non-maxima supression
    mask = non_maxima_supression(harris_response, mask, supression_window_size)
    return mask
Ejemplo n.º 33
0
    def get_vert_lines(self, sz_filt):

        img = self.CleanedImage

        img -= np.min(img)
        img /= np.max(img)

        # Smooth image
        if sz_filt > 0:
            img = filters.median(img, morphology.disk(sz_filt))

        # Use vertical Sobel filter (since we know the lines are always the same orientation)
        sb = filters.sobel_v(img)

        # Normalize data between 0 and 1
        sb -= np.min(sb)
        sb /= np.max(sb)

        # Find Yen threshold
        thr = filters.threshold_yen(sb)

        thr_img = sb < thr
        if np.sum(thr_img) > np.sum(~thr_img):
            thr_img = ~thr_img

        thr_img = morphology.binary_dilation(thr_img, morphology.square(4))
        thr_img = morphology.binary_erosion(thr_img, morphology.square(4))

        return thr_img
Ejemplo n.º 34
0
def define_threshold(imagem):

    sobel_vertical = sobel_v(imagem)
    sobel_horizontal = sobel_h(imagem)

    h, c = imagem.shape
    return np.sum(np.abs(sobel_vertical + sobel_horizontal)) / (h * c)
Ejemplo n.º 35
0
 def get_edges(self, img):
     img = np.asarray(img)
     if self.edge_type == "canny":
         edges = canny(img, sigma=self.sigma).astype(np.float)
     else:
         edges = np.abs(sobel_h(img)) + np.abs(sobel_v(img))
     return Image.fromarray(edges)
Ejemplo n.º 36
0
def getFeature(img, Merkmal, nbins): #returns the given feature for a picture
    if Merkmal == 'mean':
        return np.mean(img, axis=(0,1))
    if Merkmal == 'std':
        return np.std(img, axis=(0,1))
    if Merkmal == 'histogram1D':
        rHist = np.histogram(img[:,:,0], bins = nbins, range=(0,1))[0] 
        gHist = np.histogram(img[:,:,1], bins = nbins, range=(0,1))[0]
        bHist = np.histogram(img[:,:,2], bins = nbins, range=(0,1))[0]
        return np.hstack((rHist, gHist, bHist)) #hstack verbindet die Eingabe (hier die einzelnen Historgamme als Array) zu einem Array, indem es die Elemente horizontal stapelt
    if Merkmal == 'histogram3D':
        imgReshaped = img.reshape((img.shape[0]*img.shape[1],3)) #Reshapen, damit jedes Pixek in einer Zeile liegt
        return np.histogramdd(imgReshaped, bins = [nbins,nbins,nbins], range=((0,1),(0,1),(0,1)))[0].flatten()
    if Merkmal == 'histogramG' :
        return np.histogram(img, bins = nbins)[0]
    if Merkmal == 'edge_count' :
        g_img = color.rgb2gray(img)
        f_img = gaussian_filter(g_img, 2) #Wendet den gaussschen Weichzeichner auf das Bild an mit Sigma = 2
        sobel_h = filters.sobel_h(f_img) #Findet die horizontalen Kanten
        sobel_v = filters.sobel_v(f_img) #Findet die vertikalen Kanten
        intensity = np.linalg.norm(np.stack((sobel_h, sobel_v)), axis=0) #Kombiniert h & v und zeigt den absoluten Kantenwert
        sums = np.array([0])
        for zeile in intensity:
            sums = np.append(sums, zeile.sum())
        return sums 
Ejemplo n.º 37
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. Compute the gradient image in x and y directions (already done for you)
    2. Compute gradient histograms for each cell
    3. Flatten block of histograms into a 1D feature vector
        Here, we treat the entire patch of histograms as our block
    4. Normalize flattened block
        Normalization makes the descriptor more robust to lighting variations

    Args:
        patch: grayscale image patch of shape (H, W)
        pixels_per_cell: size of a cell with shape (M, N)

    Returns:
        block: 1D patch descriptor array of shape ((H*W*n_bins)/(M*N))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    # Group entries of G and theta into cells of shape pixels_per_cell, (M, N)
    #   G_cells.shape = theta_cells.shape = (H//M, W//N)
    #   G_cells[0, 0].shape = theta_cells[0, 0].shape = (M, N)
    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    # For each cell, keep track of gradient histrogram of size n_bins
    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    # YOUR CODE HERE
    for i in range(rows):
        for j in range(cols):
            cell_hist = np.histogram(theta_cells[i, j].flatten(),
                                     bins=n_bins,
                                     range=(0, 180),
                                     weights=G_cells[i, j].flatten())[0]
            cells[i, j, :] = cell_hist
    # normalize the HoG
    cells = (cells - np.mean(cells)) / np.std(cells, ddof=1)
    block = cells.flatten()
    # YOUR CODE HERE

    return block
Ejemplo n.º 38
0
def L1difference(img_true, img_pred):
    [h, w] = img_true.shape
    true_gx = sobel_h(img_true) / 4.0
    true_gy = sobel_v(img_true) / 4.0
    pred_gx = sobel_h(img_pred) / 4.0
    pred_gy = sobel_v(img_pred) / 4.0
    dx = np.abs(true_gx - pred_gx)
    dy = np.abs(true_gy - pred_gy)
    prediction_error = np.sum(dx + dy)
    prediction_error = 128 * 128 * prediction_error / (h * w)
    eps = 0.0001
    if prediction_error > eps:
        prediction_error = 10 * np.log(
            (255 * 255) / prediction_error) / np.log(10)
    else:
        prediction_error = 10 * np.log((255 * 255) / eps) / np.log(10)
    return prediction_error
Ejemplo n.º 39
0
def test_vsobel_vertical():
    """Vertical Sobel on an edge should be a vertical line."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (j >= 0).astype(float)
    result = filters.sobel_v(image)
    # Fudge the eroded points
    j[np.abs(i) == 5] = 10000
    assert (np.all(result[j == 0] == 1))
    assert (np.all(result[np.abs(j) > 1] == 0))
Ejemplo n.º 40
0
def get_gradients(img):
    img /= np.max(img)

    horiz = sobel_h(img)
    vert = sobel_v(img)

    magnitude = np.sqrt(horiz**2 + vert**2) + 1e-5
    vert /= magnitude
    horiz /= magnitude
    return (vert, horiz)
Ejemplo n.º 41
0
def dual_gradient_energy(img):
    """
    calculates energy at each pixel in the image and stores in new matrix
    :param img: given image
    :return: WXH array of floats, having energy of each pixel in image
    """
    red = img[:, :, 0]
    green = img[:, :, 1]
    blue = img[:, :, 2]
    gradient_horizontal_red = filters.sobel_h(red)
    gradient_vertical_red = filters.sobel_v(red)
    gradient_horizontal_green = filters.sobel_h(green)
    gradient_vertical_green = filters.sobel_v(green)
    gradient_horizontal_blue = filters.sobel_h(blue)
    gradient_vertical_blue = filters.sobel_v(blue)
    horizontal_energy = numpy.square(gradient_horizontal_red) + numpy.square(gradient_horizontal_green) + numpy.square(gradient_horizontal_blue)
    vertical_energy = numpy.square(gradient_vertical_red) + numpy.square(gradient_vertical_green) + numpy.square(gradient_vertical_blue)
    energy_matrix = horizontal_energy + vertical_energy
    return energy_matrix
Ejemplo n.º 42
0
def dual_gradient_energy(img):
    R = img[:, :, 0]
    G = img[:, :, 1]
    B = img[:, :, 2]

    hor_R = filters.sobel_h(R)
    hor_G = filters.sobel_h(G)
    hor_B = filters.sobel_h(B)

    ver_R = filters.sobel_v(R)
    ver_G = filters.sobel_v(G)
    ver_B = filters.sobel_v(B)

    sq_x = np.add(np.square(hor_R), np.add(np.square(hor_G), np.square(hor_B)))
    sq_y = np.add(np.square(ver_R), np.add(np.square(ver_G), np.square(ver_B)))

    energy = np.add(sq_x, sq_y)

    return energy
Ejemplo n.º 43
0
def sobel_triple(frame):
    """
    compute horizontal/ vertical sobel intensities and convert to red/ blue values. green channel
    will get the un-directed sobel filter. very pleasing effect.
    """
    output = np.zeros(frame.shape, dtype=np.uint8)
    frame = grayscale(frame)
    output[:, :, 0] = normalize(np.abs(filters.sobel_h(frame)))
    output[:, :, 1] = normalize(filters.sobel(frame))
    output[:, :, 2] = normalize(np.abs(filters.sobel_v(frame)))
    return output
def stack_origi_sobel(df):
    """stack original image with """
    df_preproc = pd.DataFrame(df['Image'])
    df_preproc['sobelh'] = df_preproc['Image'].apply(lambda im: sobel_h(im.reshape(96, 96)).reshape(-1))
    df_preproc['sobelv'] = df_preproc['Image'].apply(lambda im: sobel_v(im.reshape(96, 96)).reshape(-1))
    col = 'Image'
    X = np.vstack(df_preproc[col].values).reshape(-1, 1, 96, 96)
    col = 'sobelh'
    tempx1 = np.vstack(df_preproc[col].values).reshape(-1, 1, 96, 96)
    col = 'sobelv'
    tempx2 = np.vstack(df_preproc[col].values).reshape(-1, 1, 96, 96)
    X = np.concatenate((X, tempx1, tempx2), axis=1).astype(np.float32)
    return X
Ejemplo n.º 45
0
def sobel_hv(frame):
    """
    compute horizontal/ vertical sobel intensities and convert to red/ blue values. green channel
    will be left zero.
    """
    output = np.zeros(frame.shape, dtype=np.uint8)
    frame = grayscale(frame)
    # dilation doesn't really improve much
    # morphology.dilation(normalize(np.abs(filters.sobel_h(frame))), out=output[:, :, 0])
    # morphology.dilation(normalize(np.abs(filters.sobel_v(frame))), out=output[:, :, 2])
    output[:, :, 0] = normalize(np.abs(filters.sobel_h(frame)))
    output[:, :, 2] = normalize(np.abs(filters.sobel_v(frame)))
    return output
Ejemplo n.º 46
0
 def stack_origi_sobel(self):
     """stack original image with """
     df_preproc = pd.DataFrame(self.df['Image'])
     df_preproc['sobelh'] = df_preproc['Image'].apply(lambda im: sobel_h(im.reshape(96, 96)).reshape(-1))
     df_preproc['sobelv'] = df_preproc['Image'].apply(lambda im: sobel_v(im.reshape(96, 96)).reshape(-1))
     col = 'Image'
     self.X = np.vstack(df_preproc[col].values).reshape(-1,1,96,96)
     self.y = self.df[self.df.columns[:-1]].values
     col = 'sobelh'
     tempx1 = np.vstack(df_preproc[col].values).reshape(-1, 1, 96, 96)
     col = 'sobelv'
     tempx2 = np.vstack(df_preproc[col].values).reshape(-1, 1, 96, 96)
     self.X = np.concatenate((self.X,tempx1,tempx2), axis=1)
Ejemplo n.º 47
0
def dual_gradient_energy(img):
    """
    Calculating Energy gradient
    :return 3D image matrix,
    the returned matrix is 3D to enable plotting.
    """
    R_sobel_h = sobel_h(img[:, :, 0])
    R_sobel_v = sobel_v(img[:, :, 0])
    G_sobel_h = sobel_h(img[:, :, 1])
    G_sobel_v = sobel_v(img[:, :, 1])
    B_sobel_h = sobel_h(img[:, :, 2])
    B_sobel_v = sobel_v(img[:, :, 2])
    a = img[:, 0, 0].size
    b = img[0, :, 0].size
    energy = numpy.zeros((a, b, 3))
    sob = numpy.zeros((a, b, 3))
    for i in range(0, img[:, 0, 0].size):
        for j in range(0, img[0, :, 0].size):
            energy[i, j, 0] = R_sobel_h[i, j]**2 + R_sobel_v[i, j]**2
            energy[i, j, 1] = G_sobel_h[i, j]**2 + G_sobel_v[i, j]**2
            energy[i, j, 2] = B_sobel_h[i, j]**2 + B_sobel_v[i, j]**2
            sob[i, j, :] = energy[i, j, 0] + energy[i, j, 1] + energy[i, j, 2]
    return sob
Ejemplo n.º 48
0
def get_spatial_information_score(image: Image) -> float:
    """Use Sobel filters to find the edge energy of an image.

    .. math::
        SI_r = \sqrt{S_v^2 + S_h^2}

        SI_{mean} = \frac{1}{P}\sum{SI_r,}

    Where :math:`SI_r` is the spatial energy for each pixel and :math:`P` the
    number of pixels.

    .. seealso:: http://vintage.winklerbros.net/Publications/qomex2013si.pdf
    """
    img = np.asarray(image)
    num_pixels = img.shape[0] * img.shape[1]
    energy = np.sum(np.sqrt(sobel_v(img) ** 2 + sobel_h(img) ** 2))
    return energy / num_pixels
Ejemplo n.º 49
0
def eoh(image):
    data = { 0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0 }
    canny_image = canny(image, low_threshold=40)
    row, col = canny_image.shape

    sobel_v_image = sobel_v(image)
    sobel_h_image = sobel_h(image)

    for r in xrange(row):
        for c in xrange(col):
            if not canny_image[r][c]:
                continue
            interval = which_interval(sobel_v_image[r][c], sobel_h_image[r][c])
            if data.has_key(interval):
                data[interval] += 1

    return data
Ejemplo n.º 50
0
def sobel_rgb(I,features):
    """Like skimage.sobel_{h,v} but works on RGB images. Returns a
    NxMx6 image with channels being hr, hg, hb, vr, vg, vb.

    """
    #denoised = gaussian_filter(image, 2)
    I = np.atleast_3d(I)
    if features=='sobelHandv':
        return np.dstack(
            [sobel_h(I[..., c]) for c in range(I.shape[-1])]+
            [sobel_v(I[..., c]) for c in range(I.shape[-1])]
        )
    else:
        return np.dstack(
            [sobel(I[..., c]) for c in range(I.shape[-1])]# +
            #[sobel_v(I[..., c]) for c in range(I.shape[-1])]
        )
def dual_gradient_energy(img):
    R = img[:, :, 0]
    G = img[:, :, 1]
    B = img[:, :, 2]
    return sobel_h(R)**2 + sobel_v(R)**2 + sobel_h(G)**2 + \
        sobel_v(G)**2 + sobel_h(B)**2 + sobel_v(B)**2
Ejemplo n.º 52
0
def test_vsobel_zeros():
    """Vertical sobel on an array of all zeros."""
    result = filters.sobel_v(np.zeros((10, 10)), np.ones((10, 10), bool))
    assert_allclose(result, 0)
Ejemplo n.º 53
0
def test_vsobel_mask():
    """Vertical Sobel on a masked array should be zero."""
    np.random.seed(0)
    result = filters.sobel_v(np.random.uniform(size=(10, 10)),
                             np.zeros((10, 10), bool))
    assert_allclose(result, 0)
Ejemplo n.º 54
0
def test_vsobel_horizontal():
    """vertical Sobel on a horizontal edge should be zero."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (i >= 0).astype(float)
    result = filters.sobel_v(image)
    assert_allclose(result, 0)
Ejemplo n.º 55
0
def edges2fractures(ts, Fs=48000, edge_type='Sobel', smoothing=None):
    """
        edges2fractures predicts peaks locations based on edges
        in the signal spectrogram

        Inputs
        ------

        Fs: scalar
            the frequency of the signal
        edge_type: string
            the type of the edge detector - 'Sobel' or 'Canny'
        smoothing: scalar or None
            if None, no smoothing is applied, otherwise
            smoothed with Gaussian filter with sigma=smoothing

        Returns
        -------
        array containing the times of fractures
    """

    from skimage import filters
    from skimage import feature

    # TODO Possibly allow to change the window
    NFFT = 2048       # the length of the windowing segments
    # convert the frequency to integer
    Fs = int(Fs)
    # calculate the spectrogram
    plt.figure(figsize = (10,3))
    Pxx, freq, bins, im = plt.specgram(ts, NFFT=NFFT, Fs=Fs, noverlap=900, mode='psd', cmap='gray', aspect = 'auto')

    # applying smoothing
    if smoothing is not None:
        Pxx = filters.gaussian_filter(Pxx, smoothing)


    # extracting edges
    if edge_type=='Sobel':
        edges = filters.sobel_v(Pxx)
        # convert to binary edges
        # TODO fix arbitrary threshold!!!
        edges = (np.abs(edges)>20)
    elif edge_type=='Canny':
        edges = feature.canny(Pxx)
    else:
        raise ValueError('Invalid Edge Detector Type')




    #plt.figure(figsize = (10,3))
    #plt.imshow(edges, cmap='gray')
    # determining fracture locations
    colSums = np.sum(edges, axis=0)


    # truncate at some number of standard deviations (here 3)
    std = np.std(colSums)
    md = np.median(colSums)
    frac_idx, = np.where(colSums > md+3*std)
    if len(frac_idx) == 0:
        return []

    # plotting the results
    plt.figure(figsize = (10,3))
    plt.plot(np.arange(len(ts))/Fs,ts)
    # plt.plot(colSums)
    fig = [plt.axvline(bins[_x], linewidth=1, color='g') for _x in frac_idx]

    return bins[frac_idx]