Example #1
0
    def __calmetrics(self, pred, target, mse_factor, accthrs, bumpinessclip=0.05, ignore_zero=True):
        metrics = np.zeros((1, 7+len(accthrs)), dtype=float)

        if target.sum() == 0:
            return metrics

        pred_ = np.copy(pred)
        if ignore_zero:
            pred_[target==0.0] = 0.0
            numPixels = (target>0.0).sum() # number of valid pixels
        else:
            numPixels = target.size

        #euclidean norm
        metrics[0,0] = np.square(pred_-target).sum() / numPixels * mse_factor

        # RMS
        metrics[0,1] = np.sqrt(metrics[0,0])

        # log RMS
        logrms = (np.ma.log(pred_)-np.ma.log(target))
        metrics[0,2] = np.sqrt(np.square(logrms).sum() / numPixels)

        # absolute relative
        metrics[0,3] = np.ma.divide(np.abs(pred_-target), target).sum() / numPixels

        #square relative
        metrics[0,4] = np.ma.divide(np.square(pred_-target), target).sum() / numPixels

        # accuracies
        acc = np.ma.maximum(np.ma.divide(pred_,target), np.ma.divide(target, pred_))
        for i, thr in enumerate(accthrs):
            metrics[0, 5+i] = (acc < thr).sum() / numPixels * 100.

        # badpix
        metrics[0, 8]= (np.abs(pred_-target) > 0.07).sum() / numPixels * 100.

        # bumpiness -- Frobenius norm of the Hessian matrix
        diff = np.asarray(pred-target, dtype='float64') # PRED or PRED_
        chn = diff.shape[2] if len(diff.shape) > 2 else 1
        bumpiness = np.zeros_like(pred_).astype('float')
        for c in range(0,chn):
            if chn > 1:
                diff_ = diff[:, :, c]
            else:
                diff_ = diff
            dx = skf.scharr_v(diff_)
            dy = skf.scharr_h(diff_)
            dxx = skf.scharr_v(dx)
            dxy = skf.scharr_h(dx)
            dyy = skf.scharr_h(dy)
            dyx = skf.scharr_v(dy)
            hessiannorm = np.sqrt(np.square(dxx) + np.square(dxy) + np.square(dyy) + np.square(dyx))
            bumpiness += np.clip(hessiannorm, 0, bumpinessclip)
        bumpiness = bumpiness[target>0].sum() if ignore_zero else bumpiness.sum()
        metrics[0, 9] = bumpiness / chn / numPixels * 100.

        return metrics
 def get_bumpiness(self, gt, algo_result):
     # Frobenius norm of the Hesse matrix
     diff = np.asarray(algo_result - gt, dtype='float64')
     dx = skf.scharr_v(diff)
     dy = skf.scharr_h(diff)
     dxx = skf.scharr_v(dx)
     dxy = skf.scharr_h(dx)
     dyy = skf.scharr_h(dy)
     dyx = skf.scharr_v(dy)
     bumpiness = np.sqrt(
         np.square(dxx) + np.square(dxy) + np.square(dyy) + np.square(dyx))
     bumpiness = np.clip(bumpiness, 0, self.clip)
     return bumpiness
Example #3
0
 def __init_mats(self):
     enlarge_x, enlarge_y = self.base_op.visited_mat.shape
     margin = self.base_op.margin
     self.isophote_x = filters.scharr_h(self.origin_sample)
     self.isophote_y = filters.scharr_v(self.origin_sample)
     self.image = np.zeros((enlarge_x, enlarge_y))
     self.image[margin:enlarge_x - margin,
                margin:enlarge_y - margin] = self.base_op.sample
     # specified for boundary and gradient get
     self.boundary_mat = np.ones((enlarge_x, enlarge_y))
     self.boundary_mat[margin:enlarge_x - margin, margin:enlarge_y -
                       margin] = self.base_op.visited_mat[margin:enlarge_x -
                                                          margin,
                                                          margin:enlarge_y -
                                                          margin]
     # get sample block list
     sample_block_list = []
     # get coorsponding coordinate
     coordinate_list = []
     for (x, y), v in np.ndenumerate(self.image):
         if v == 0:
             continue
         tmp = self.image[(x - margin):(x + 1 + margin),
                          (y - margin):(y + 1 + margin)]
         if tmp[tmp == 0].shape[0] == 0:
             sample_block_list.append(tmp)
             coordinate_list.append((x - margin, y - margin))
         self.base_op.visited_mat[x, y] = 1
     sigma = self.base_op.window_size / 6.4
     self.gauss_mask = self.base_op.gaussian2D(
         (self.base_op.window_size, self.base_op.window_size), sigma)
     return [sample_block_list, coordinate_list]
    def precomputation(self, eye_image):
        # compute gradients across x and y
        # (scharr gives better results than sobel)
        x_gradient = scharr_v(eye_image)
        y_gradient = scharr_h(eye_image)
        edgemap = np.sqrt(x_gradient**2 + y_gradient**2) + 0.00001

        # remove gradients under a specific threshold
        # (as suggested on http://thume.ca/projects/2012/11/04/simple-accurate-eye-center-tracking-in-opencv/)
        mean_gradient = np.mean(edgemap)
        #width, height = np.shape(eye_image)
        std_gradient = np.std(edgemap)
        threshold = 0.3 * std_gradient + mean_gradient
        under_threshold_indices = edgemap < threshold
        x_gradient[under_threshold_indices] = 0
        y_gradient[under_threshold_indices] = 0

        # normalize the gradients
        maxedge = np.amax(edgemap)
        x_gradient /= maxedge
        y_gradient /= maxedge
        self.debug_edgemap = np.sqrt(x_gradient**2 + y_gradient**2)

        # compute the inverse-colored image
        inverse = gaussian(1.0 - eye_image)
        return x_gradient, y_gradient, inverse
Example #5
0
def energy_image(im):
    """Computes the energy of an image.
    Parameters
    ----------
    im : np.array (uint8)
        The original image of size M*N*3
    Output
    ------
    energy : np.array (double)
        The energy of the image
    """
    # Check image
    check_image(im)
    # Convert to grayscale
    gray_im = color.rgb2gray(im)
    gray_im = gray_im.astype(np.double)
    # Using gaussian blur to remove fine details
    gray_im = ndimage.gaussian_filter(gray_im, sigma=1)
    # Uncomment for Alternate gradients
    im_y = filters.scharr_v(gray_im)
    im_x = filters.scharr_h(gray_im)
    # Computing Gradients
    # Uncomment the following line to use alternate energy function (L-2 norm)
    # Returning Value
    return np.absolute(im_x) + np.absolute(im_y)
Example #6
0
File: CPT.py Project: xw310/CS534
 def generating_mat(self):
     enlarge_x, enlarge_y = self.visited.shape
     margin = self.margin
     self.x_grad = filters.scharr_h(self.OriginSample)
     self.y_grad = filters.scharr_v(self.OriginSample)
     self.img = np.zeros((enlarge_x, enlarge_y))
     self.img[margin:enlarge_x - margin,
              margin:enlarge_y - margin] = self.sample
     # specified for boundary and gradient get
     self.MatBound = np.ones((enlarge_x, enlarge_y))
     self.MatBound[margin:enlarge_x - margin, margin:enlarge_y -
                   margin] = self.visited[margin:enlarge_x - margin,
                                          margin:enlarge_y - margin]
     # get sample block list
     SampleImg_list_of_block = []
     # get coorsponding coordinate
     list_of_coordinate = []
     for (x, y), v in np.ndenumerate(self.img):
         if v == 0:
             continue
         tmp = self.img[(x - margin):(x + 1 + margin),
                        (y - margin):(y + 1 + margin)]
         if tmp[tmp == 0].shape[0] == 0:
             SampleImg_list_of_block.append(tmp)
             list_of_coordinate.append((x - margin, y - margin))
         self.visited[x, y] = 1
     sigma = self.window_size / 6.4
     self.GaussMask = gaussian((self.window_size, self.window_size), sigma)
     return [SampleImg_list_of_block, list_of_coordinate]
Example #7
0
def test_scharr_v_vertical():
    """Vertical Scharr on an edge should be a vertical line."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (j >= 0).astype(float)
    result = filters.scharr_v(image)
    # Check if result match transform direction
    assert (np.all(result[j == 0] == 1))
    assert (np.all(result[np.abs(j) > 1] == 0))
Example #8
0
def test_vscharr_vertical():
    """Vertical Scharr on an edge should be a vertical line."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (j >= 0).astype(float)
    result = filters.scharr_v(image)
    # Fudge the eroded points
    j[np.abs(i) == 5] = 10000
    assert (np.all(result[j == 0] == 1))
    assert (np.all(result[np.abs(j) > 1] == 0))
Example #9
0
def test_scharr_v_vertical():
    """Vertical Scharr on an edge should be a vertical line."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (j >= 0).astype(float)
    result = filters.scharr_v(image)
    # Fudge the eroded points
    j[np.abs(i) == 5] = 10000
    assert (np.all(result[j == 0] == 1))
    assert (np.all(result[np.abs(j) > 1] == 0))
Example #10
0
def compute_scharr(img, mask=None):
    '''
    Compute the gradient direction and magnitude using Scharr filter 
    Takes: segmented image array
    Returns: array of angles
    '''

    s_h = scharr_h(img, mask=mask)
    s_v = scharr_v(img, mask=mask)
    return np.arctan2(s_v, s_h), np.sqrt(s_v**2 + s_h**2)
Example #11
0
    def detect_edges(self,
                     operator="sobel_vertical",
                     kernel_size=3,
                     optional_mask=None,
                     **kwargs):
        """
        :param optional_mask: See skimage.filters.scharr_v for details.
        :param kernel_size: int size to use for edge detection kernels
        :param operator: One of sobel_vertical, sobel_horizontal,prewitt_horizontal,prewitt_vertical or laplace.
        :return: Detected edges
        """

        available_operators = [
            "sobel_horizontal", "sobel_vertical", "prewitt_horizontal",
            "prewitt_vertical", "laplace", "roberts_cross_neg",
            "roberts_horizontal", "scharr_vertical", "scharr_horizontal",
            "canny", "roberts"
        ]

        if operator not in available_operators:
            raise ValueError(f"Edge detection with {operator} not supported.")

        kernels = {
            'sobel_horizontal':
            lambda x: cv2.Sobel(x, cv2.CV_64F, 1, 0, ksize=kernel_size),
            'sobel_vertical':
            lambda x: cv2.Sobel(x, cv2.CV_64F, 0, 1, ksize=kernel_size),
            'roberts':
            lambda x: filters.roberts(x, optional_mask),
            'roberts_cross_neg':
            lambda x: ndimage.convolve(x, np.array([[0, -1], [1, 0]])),
            'roberts_cross_pos':
            lambda x: filters.roberts_pos_diag(x, optional_mask),
            'laplace':
            lambda x: cv2.Sobel(x, cv2.CV_64F, 1, 0, ksize=kernel_size),
            'prewitt_horizontal':
            lambda x: filters.prewitt_h(x, optional_mask),
            'prewitt_vertical':
            lambda x: filters.prewitt_v(x, optional_mask),
            'scharr_horizontal':
            lambda x: filters.scharr_h(x, optional_mask),
            'scharr_vertical':
            lambda x: filters.scharr_v(x, optional_mask)
        }

        print(f"Detecting edges with the {operator} operator")
        # denoise and gray

        if self.color_mode == "gray":
            denoised = self.smooth(**kwargs)
        else:
            denoised = gray_images(self.smooth(**kwargs))

        return list(map(kernels[operator], denoised))
Example #12
0
def my_structure_tensor(img, smooth_mask):
    """
    Structure tensor is defined as
    [ Axx   Axy ]
    [ Axy   Axx ]
    computes derivative with Scharr filter and smooths with given
    smooth_mask.
    """
    imx = scharr_h(img)
    imy = scharr_v(img)

    Axx = convolve(imx * imx, smooth_mask)
    Axy = convolve(imx * imy, smooth_mask)
    Ayy = convolve(imy * imy, smooth_mask)

    return Axx, Axy, Ayy
Example #13
0
def accum_pdf_hook(self, input, output):
    global FEATURE_MAPS
#        
    pred = output.cpu().detach().numpy()
    prmin = np.min(pred)
    prmax = np.max(pred)
    prange = prmax - prmin
    
    nexamp, nfm, ny, nx = pred.shape
    pred = np.transpose(pred, axes=(0,2,3,1))
#    pred = (pred- prmin/prange*128
    pred = 2*(pred- prmin)/prange - 1
    
    dx = np.zeros_like(pred)
    dy = np.zeros_like(pred)
    
    for i in range(nexamp):
        for j in range(nfm):
            dx[i,:,:,j] = scharr_h(pred[i,:,:,j])
            dy[i,:,:,j] = scharr_v(pred[i,:,:,j])
    
#    i16 = np.int16(pred)
#    dx = np.int16((np.roll(i16,-1,axis=2) - np.roll(i16,1,axis=2))/2.0)
#    dy = np.int16((np.roll(i16,-1,axis=1) - np.roll(i16,1,axis=1))/2.0)


#   The gradient shapes are (nexamp, ny, nx, nfm)
    magnify = 10
    dx = dx[:,1:-1,1:-1,:]*magnify
    dy = dy[:,1:-1,1:-1,:]*magnify
#    Oops now it's (nexamp, ny-2,nx-2,nfm)
    print(np.min(dx), np.max(dx))
    print(np.min(dy), np.max(dy))
    
    if isinstance(FEATURE_MAPS, type(None)):
        FEATURE_MAPS = np.zeros((nexamp,nfm,509,509),dtype=np.uint16)
        print('created zeros for FEATURE_MAPS...')

    edges = np.arange(-255,255)
    for i in range(nexamp):
        for j in range(nfm):
            FEATURE_MAPS[i,j,:,:], xedges, yedges = \
                        np.histogram2d(dx[i,:,:,j].flatten(),\
                                       dy[i,:,:,j].flatten(),\
                                       bins=(edges, edges))
    
    return
Example #14
0
 def transform_image(self, image, selem, sigma, type_of_function):
     # Filtros de suavização
     if type_of_function == 'Média':
         return self.get_image_as_float(
             mean_filter(self.get_image_as_ubyte(image), selem))
     elif type_of_function == 'Mediana':
         return self.get_image_as_float(
             median_filter(self.get_image_as_ubyte(image), selem))
     elif type_of_function == 'Gaussiano':
         return gaussian_filter(image, sigma)
     elif type_of_function == 'Equalização de histograma':
         return equalize_hist(image)
     # Filtros de realce
     elif type_of_function == 'Prewitt horizontal':
         return prewitt_h(image)
     elif type_of_function == 'Prewitt vertical':
         return prewitt_v(image)
     elif type_of_function == 'Prewitt':
         return prewitt(image)
     elif type_of_function == 'Sobel horizontal':
         return sobel_h(image)
     elif type_of_function == 'Sobel vertical':
         return sobel_v(image)
     elif type_of_function == 'Sobel':
         return sobel(image)
     elif type_of_function == 'Scharr horizontal':
         return scharr_h(image)
     elif type_of_function == 'Scharr vertical':
         return scharr_v(image)
     elif type_of_function == 'Scharr':
         return scharr(image)
     # Operadores morfológicos
     elif type_of_function == 'Erosão':
         return erosion(image, selem)
     elif type_of_function == 'Dilatação':
         return dilation(image, selem)
     elif type_of_function == 'Abertura':
         return opening(image, selem)
     elif type_of_function == 'Fechamento':
         return closing(image, selem)
     raise ValueError(
         f'Invalid type of function! {type_of_function} is not valid.')
Example #15
0
 def __get_priority(self, unfilled_list):
     min_priority = 0.0
     min_pixel = (self.base_op.margin, self.base_op.margin)
     margin = self.base_op.margin
     self.origin_sample.shape[0]
     gradient_x = filters.scharr_h(
         self.boundary_mat[margin:self.origin_sample.shape[0] + margin,
                           margin:self.origin_sample.shape[1] + margin])
     gradient_y = filters.scharr_v(
         self.boundary_mat[margin:self.origin_sample.shape[0] + margin,
                           margin:self.origin_sample.shape[1] + margin])
     for pixel in unfilled_list:
         if pixel[0] < margin or pixel[
                 0] >= self.origin_sample.shape[0] + margin or pixel[
                     1] < margin or pixel[1] < margin or pixel[
                         1] >= self.origin_sample.shape[1] + margin:
             continue
         temp = (pixel[0] - margin, pixel[0] + 1 + margin,
                 pixel[1] - margin, pixel[1] + 1 + margin)
         confidence = self.base_op.visited_mat[temp[0]:temp[1],
                                               temp[2]:temp[3]].sum()
         iso_dx = self.isophote_x[pixel[0] - margin, pixel[1] - margin]
         iso_dy = self.isophote_y[pixel[0] - margin, pixel[1] - margin]
         norm = math.sqrt(iso_dx * iso_dx + iso_dy * iso_dy)
         if norm != 0:
             iso_dx /= norm
             iso_dy /= norm
         dx = gradient_y[pixel[0] - margin, pixel[1] - margin]
         dy = gradient_x[pixel[0] - margin, pixel[1] - margin]
         norm = math.sqrt(dx * dx + dy * dy)
         if norm != 0:
             dx /= norm
             dy /= norm
         data_1 = math.fabs(-dx * iso_dx + dy * iso_dy)
         data_2 = math.fabs(dx * iso_dx + -dy * iso_dy)
         priority = max(data_2, data_1) * confidence
         if priority >= min_priority:
             min_pixel = pixel
             min_priority = priority
     return min_pixel
Example #16
0
File: CPT.py Project: xw310/CS534
 def get_order(self, remaining_list):
     MinPrior = 0.0
     MinPixel = (self.margin, self.margin)
     margin = self.margin
     self.OriginSample.shape[0]
     grad_x = filters.scharr_h(
         self.MatBound[margin:self.OriginSample.shape[0] + margin,
                       margin:self.OriginSample.shape[1] + margin])
     grad_y = filters.scharr_v(
         self.MatBound[margin:self.OriginSample.shape[0] + margin,
                       margin:self.OriginSample.shape[1] + margin])
     for pixel in remaining_list:
         if pixel[0] < margin or pixel[
                 0] >= self.OriginSample.shape[0] + margin or pixel[
                     1] < margin or pixel[1] < margin or pixel[
                         1] >= self.OriginSample.shape[1] + margin:
             continue
         temp = (pixel[0] - margin, pixel[0] + 1 + margin,
                 pixel[1] - margin, pixel[1] + 1 + margin)
         conf = self.visited[temp[0]:temp[1], temp[2]:temp[3]].sum()
         sch_dx = self.x_grad[pixel[0] - margin, pixel[1] - margin]
         sch_dy = self.y_grad[pixel[0] - margin, pixel[1] - margin]
         norm = math.sqrt(sch_dx * sch_dx + sch_dy * sch_dy)
         if norm != 0:
             sch_dx /= norm
             sch_dy /= norm
         dx = grad_y[pixel[0] - margin, pixel[1] - margin]
         dy = grad_x[pixel[0] - margin, pixel[1] - margin]
         norm = math.sqrt(dx * dx + dy * dy)
         if norm != 0:
             dx /= norm
             dy /= norm
         v1 = math.fabs(-dx * sch_dx + dy * sch_dy)
         v2 = math.fabs(dx * sch_dx + -dy * sch_dy)
         priority = max(v2, v1) * conf
         if priority >= MinPrior:
             MinPixel = pixel
             MinPrior = priority
     return MinPixel
def compute_edgelets(gray_img, use_length):
    scharr_h = filters.scharr_h(gray_img)
    scharr_v = filters.scharr_v(gray_img)
    img_grad_mags = np.sqrt(np.square(scharr_h) + np.square(scharr_v))
    edges = feature.canny(gray_img, sigma=2)
    lines = transform.probabilistic_hough_line(edges,
                                               line_length=3,
                                               line_gap=2)
    locations, directions, strengths = [], [], []

    for p0, p1 in lines:
        line_points = get_bresenham_line(p0, p1)
        p0, p1 = np.hstack((np.array(p0), 1)), np.hstack((np.array(p1), 1))
        strengths.append(
            calculate_line_strength(line_points, img_grad_mags, use_length))
        directions.append(np.cross(p0, p1))
        locations.append((p0, p1))

    locations, directions, strengths = sort_by_strength(
        locations, directions, strengths)

    return (locations, directions, strengths)
Example #18
0
 def processInput(img):
     fno = img.frame_no
     img = 255 / (mx - md) * (img - md)
     edges = np.sqrt(scharr_h(img)**2 + scharr_v(img)**2)
     return pims.Frame(np.uint8(edges), frame_no=fno + 1)
Example #19
0
def edgeDir(img, mask=None):
    hori = filters.scharr_h(img, mask=mask)
    vert = filters.scharr_v(img, mask=mask)
    return np.arctan2(hori, vert)
Example #20
0
def test_scharr_v_mask():
    """Vertical Scharr on a masked array should be zero."""
    result = filters.scharr_v(np.random.uniform(size=(10, 10)),
                              np.zeros((10, 10), dtype=bool))
    assert_allclose(result, 0)
Example #21
0
def test_scharr_v_horizontal():
    """vertical Scharr on a horizontal edge should be zero."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (i >= 0).astype(float)
    result = filters.scharr_v(image)
    assert_allclose(result, 0)
ax[2].set_ylim((image.shape[0], 0))
ax[2].set_axis_off()
ax[2].set_title('Detected lines')

plt.tight_layout()
plt.show()

# Line finding using the Probabilistic Hough Transform
#image = data.camera()
image = skio.imread(r'D:\image\chemin-de-fer.jpg')
image = skimage.color.rgb2gray(image)
#I=sm.black_tophat(image,sm.square(40))
#plt.figure(2)
#plt.imshow(I,cmap= 'gray')
#image = data.checkerboard()
edges = fl.scharr_v(image)
for i in range(edges.shape[0]):
    for j in range(edges.shape[1]):
        if (np.abs(edges[i, j]) < 0.54):
            edges[i, j] = 0
        else:
            edges[i, j] = 1
#edges = fl.roberts(image)
#plt.figure("image top hat ")
#plt.title("image top hat ")
#plt.imshow(edges,cmap = 'gray')
#plt.axis('off')
lines = probabilistic_hough_line(edges,
                                 threshold=80,
                                 line_length=120,
                                 line_gap=12)
Example #23
0
def test_scharr_v_zeros():
    """Vertical Scharr on an array of all zeros."""
    result = filters.scharr_v(np.zeros((10, 10)),
                              np.ones((10, 10), dtype=bool))
    assert_allclose(result, 0)
Example #24
0
def ScharrV(image):
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    return fil.scharr_v(image)
Example #25
0
def find_pupil(img, rectangle=None, resize_width=30, equalize_img=False,
               normalize_gradient=True, convolve=False, gamma=0.5):
    """ Find a pupil center in a grayscale image. Formulates the problem as
    an optimzation problem over the eye region, where the theoretical optimum
    occurs at the center of the iris.

    Args:
    -----
        img: a 2D numpy array (i.e., a grayscale image)
        rectangle: if passed, look inside a region of the image for a pupil
        resize_width: int, how wide the search space should be for the 
            eye region. Note that this scales as O(resize_width ^ 2)
        equalize_img: boolean, do we want to equalize the histogram in the 
            eye region to make the gradients more defined  
        normalize_gradient: y/n to gradient normalization
        convolve: whether or not to take a 2D mean convolution over the 
            functional space before selecting the optimum (helps with noise)

    Returns:
    --------
        (pupil_loc_w, pupil_loc_h) w.r.t. the original image size and not 
        relative to the crop

    Raises:
    -------
        Exceptions
    """

    # keep track of all the dims
    dimensions = {}
    if rectangle is not None:
        assert isinstance(
            rectangle, dlib.rectangle), 'rectangle must be a dlib rectangle'
        logger.debug('will perform pupil search in constrained region')
        orig = grab_rectangular_region(img, rectangle)
        # stick the cropped eye region in img
        img, orig = orig, img
        dimensions['orig'] = orig.shape
        dimensions['rect'] = rectangle

        logger.debug('ingested image of size: {}'.format(orig.shape))
        logger.debug('searching in rectangle defined by {}'.format(str(rectangle)))

    if resize_width is not None:
        logger.debug('resizing search region to have width = {}'.format(resize_width))
        dimensions['pre_resize'] = img.shape
        img = resize(img, resize_width)

    if equalize_img:
        logger.debug('normalizing intensity histogram')

        img = exposure.equalize_hist(img)

    img = exposure.adjust_gamma(img, gamma)

    h, w = img.shape
    dimensions['img'] = img.shape

    # compute grads
    grad_x = scharr_v(img)
    grad_y = scharr_h(img)

    # get gradient vector for each pixel
    grad = np.array(zip(grad_y.ravel(), grad_x.ravel())).astype('float')

    if normalize_gradient:
        logger.debug('performing gradient normalization')

        # get gradient magnitude for each pixel
        grad_magnitude = np.linalg.norm(grad, axis=-1)
        mask = np.isnan(grad_magnitude)

        grad_mean = np.mean(grad_magnitude[~mask])
        grad_std = np.std(grad_magnitude[~mask])

        # make it one to divide easier
        grad_magnitude[mask] = 1
        # prune out noisy gradients
        grad_magnitude[grad_magnitude < (0.3 * grad_std + grad_mean)] = 1

        grad[grad_magnitude < 2] = 0

        # make the gradient vectors unit length
        # grad = grad / grad_magnitude[:, :, np.newaxis]
        grad = grad / grad_magnitude[:, np.newaxis]

    grad_coords = np.array([a for a in np.ndindex(h, w)]).astype('float')

    direction = (grad_coords[:, :, np.newaxis] - grad_coords.T)
    direction = direction / np.linalg.norm(direction, axis=1)[:, np.newaxis, :]

    acc_grad = (direction * grad[:, :, np.newaxis]).sum(axis=1)
    acc_grad[np.isnan(acc_grad) | (acc_grad < 0)] = 0
    acc_grad = acc_grad.sum(axis=0)

    if convolve:
        logger.debug('convolving final energy space')

        acc_grad = signal.convolve2d(
            acc_grad.reshape((h, w)), mean_kernel(), mode='same')

    # reverse so this is (w x h)
    pupil_loc_w, pupil_loc_h = grad_coords[
        acc_grad.argmax()].astype('int')[::-1]

    if resize_width is not None:

        h_old, w_old = dimensions['pre_resize']
        # get the upsample factor
        h_factor, w_factor = h_old / float(h), w_old / float(w)

        logger.debug('upsampling found pupil '
                     'with ({}, {}) factor'.format(h_factor, w_factor))

        # get the coords in the original system
        pupil_loc_w = int(pupil_loc_w * w_factor)
        pupil_loc_h = int(pupil_loc_h * h_factor)

    if rectangle is not None:
        logger.debug('converting to precropped coordinate system')

        pupil_loc_w += rectangle.left()
        pupil_loc_h += rectangle.top()

    logger.debug('pupil detection successful')

    return pupil_loc_w, pupil_loc_h
Example #26
0
image_y = 2 * y * np.cos(x**2 + y**2)


def angle(dx, dy):
    """Calculate the angles between horizontal and vertical operators."""
    return np.mod(np.arctan2(dy, dx), np.pi)


true_angle = angle(image_x, image_y)

angle_farid = angle(filters.farid_h(image_rotinv),
                    filters.farid_v(image_rotinv))
angle_sobel = angle(filters.sobel_h(image_rotinv),
                    filters.sobel_v(image_rotinv))
angle_scharr = angle(filters.scharr_h(image_rotinv),
                     filters.scharr_v(image_rotinv))
angle_prewitt = angle(filters.prewitt_h(image_rotinv),
                      filters.prewitt_v(image_rotinv))


def diff_angle(angle_1, angle_2):
    """Calculate the differences between two angles."""
    return np.minimum(np.pi - np.abs(angle_1 - angle_2),
                      np.abs(angle_1 - angle_2))


diff_farid = diff_angle(true_angle, angle_farid)
diff_sobel = diff_angle(true_angle, angle_sobel)
diff_scharr = diff_angle(true_angle, angle_scharr)
diff_prewitt = diff_angle(true_angle, angle_prewitt)
Example #27
0
def Hough_edge_center(data_dcm_directory,
                      Image_file,
                      Image,
                      Is_a_file,
                      sig,
                      pui,
                      thr,
                      resol_r=0.25,
                      resol_x=0.25,
                      resol_y=0.25):

    if Is_a_file:
        #Lecture des données pixel du fichier DICOM
        Lec = sitk.ReadImage(os.path.join(data_dcm_directory, Image_file))

        #Conversion en array
        Image = sitk.GetArrayFromImage(Lec[:, :, 0])
        space_y = Lec.GetSpacing()[0]
        space_x = Lec.GetSpacing()[1]

    #Conversion de l'image de non signé 16bits vers le type float
    Image = Image.astype(float)
    #Lissage de l'image par un filtre gaussien
    IG8 = filters.gaussian(Image, sigma=sig)

    #Augmentation du contraste de l'image filtrée
    IG8 = IG8 / (0.7 * np.max(IG8))
    IG8 = IG8**pui

    # Obtention de l'image de gradient par filtre de Scharr
    Scharr = filters.scharr(IG8)
    # On recupère également l'information sur les gradients horizontaux et verticaux. scharr_v donne les contours verticaux, donc les gradients
    # horizontaux. Vice-versa pour scharr_h
    Gx = filters.scharr_v(IG8)
    Gy = filters.scharr_h(IG8)

    # Figures de contrôle
    # plt.figure()
    # plt.subplot(131)
    # plt.imshow(Scharr)
    # plt.subplot(132)
    # plt.imshow(Schx)
    # plt.subplot(133)
    # plt.imshow(Schy)

    #Définition du seuil pour l'image de gradient
    B = thr * np.max(Scharr)
    Thresh = Scharr * (Scharr > B)

    #Par soucis d'economie en temps de calcul et gestion de mémoire, on va rogner notre espace de travail. En effet, si on travaille avec une forme circulaire, travailler sur l'ensemble de l'image n'a pas d'intérêt : l'information est localisée.
    #On cherche donc les indices minimums et maximums selon les deux axes de l'image, et on travaille donc sur un espace de Hough réduit, et un remplissage depuis une image tronquée.

    y_min = np.min(np.nonzero(Thresh)[0])
    y_max = np.max(np.nonzero(Thresh)[0])
    x_min = np.min(np.nonzero(Thresh)[1])
    x_max = np.max(np.nonzero(Thresh)[1])

    Dx = x_max - x_min
    Dy = y_max - y_min

    Dxt = int(round(0.3 * (Dx)))  # Marges en x et y au delà de xmin et xmax
    Dyt = int(round(0.3 * (Dy)))

    # #Figure de contrôle : Image de gradient seuillé
    # plt.figure()
    # plt.title("Image de seuil de " + str(Image_file))
    # plt.imshow(Thresh[y_min-Dyt:y_max+Dyt+1,x_min-Dxt:x_max+Dxt+1])

    # #Figure de contrôle : Image de base après pré-traitement
    # plt.figure()
    # plt.title("Image filtrée de " + str(Image_file))
    # plt.imshow(IG8[y_min-Dyt:y_max+Dyt+1,x_min-Dxt:x_max+Dxt+1])

    D = max(Dx, Dy)
    Dt = max(Dxt, Dyt)

    #Valeurs de contrôles affichées.
    # print(y_min,y_max,x_min,x_max,Dx,Dy,Dxt,Dyt,Nrt)

    #Création de l'espace de Hough

    Hough_space = np.zeros((int(Dt / resol_r), int(
        (2 * D) / resol_y) + 1, int((2 * D) / resol_x) + 1),
                           dtype=float)  # Création de l'espace de Hough

    beta_x = (1 / 2) * (1 - resol_x)
    beta_y = (1 / 2) * (1 - resol_y)

    for yi in range(Dy + 1):  # Les xi et yi (image) ne sont non nuls
        for xi in range(Dx + 1):  # que sur les True de l'image de seuil.
            T = Thresh[yi + y_min, xi + x_min]
            if (T != 0):

                for rh in range(0, int((Dt / resol_r))):

                    #theta est l'angle entre l'orientation du gradient et l'horizontale. Géométriquement, on a tan(theta)=Gy/Gx
                    theta = np.arctan2(Gy[yi + y_min, xi + x_min],
                                       Gx[yi + y_min, xi + x_min])
                    # theta est en plus signé, grâce aux méthodes scharr_v et scharr_h. On a donc seulement un seul point à considérer,
                    # à l'intérieur de l'image de grad, et sur l'axe défini par l'orientation du gradient
                    xh = ((xi + D / 2)) / resol_x + (
                        (rh * resol_r +
                         (D / 2) - Dt) * np.cos(theta)) / resol_x + beta_x
                    yh = ((yi + D / 2)) / resol_y + (
                        (rh * resol_r +
                         (D / 2) - Dt) * np.sin(theta)) / resol_y + beta_y
                    #Enfin, puisque cette méthode est sensible au bruit, on n'ajoute pas un seul point mais un kernel 3*3 centré sur ce point.
                    Hough_space[int(rh),
                                int(round(yh)) - 1:int(round(yh)) + 2,
                                int(round(xh)) - 1:int(round(xh)) + 2] += 0.001

    # Augmentation du contraste de l'espace de Hough
    # Hough_space=Hough_space/np.percentile(Hough_space, 97)
    Hough_space_10 = Hough_space**5

    #Valeur max de l'espace de Hough : le cdm n'a de sens qu'a proximité
    Max_Hough = np.max(Hough_space_10)
    #On retire donc toutes les valeurs trop éloignées du max
    Hough_trunc = np.where(Hough_space_10 > 0.01 * Max_Hough, Hough_space_10,
                           0)
    #Centre de masse de l'espace de Hough
    cdm_10 = ndimage.measurements.center_of_mass(Hough_trunc)

    #Figures de contrôle
    # plt.figure()
    # plt.title("Espace de Hough proche du max de "+str(Image_file))
    # plt.subplot(121)
    # plt.imshow(Hough_space_10[int(np.ceil(cdm_10[0])),:,:])
    # plt.subplot(122)
    # plt.imshow(Hough_space_10[int(np.floor(cdm_10[0])),:,:])

    r_est_10 = cdm_10[0] * resol_r + D / 2 - 2 * Dt
    y_est_10 = (cdm_10[1]) * resol_y + y_min - (D / 2) - beta_y
    x_est_10 = (cdm_10[2]) * resol_x + x_min - (D / 2) - beta_x

    if not Is_a_file:
        space_y = 0.336
        space_x = 0.336

    print("x_est = ", round(x_est_10, 3), ", y_est = ", round(y_est_10,
                                                              3), ", r_est = ",
          round(r_est_10, 3))  # Coordonnées en pixel dans l'image

    return (x_est_10, y_est_10, space_x, space_y)
x, y = np.mgrid[-10:10:255j, -10:10:255j]
img = np.sin(x**2 + y**2)

imgx = 2 * x * np.cos(x**2 + y**2)
imgy = 2 * y * np.cos(x**2 + y**2)


def angle(dx, dy):
    return np.mod(np.arctan2(dy, dx), np.pi)


true_angle = angle(imgx, imgy)

angle_farid = angle(farid_h(img), farid_v(img))
angle_sobel = angle(sobel_h(img), sobel_v(img))
angle_scharr = angle(scharr_h(img), scharr_v(img))
angle_prewitt = angle(prewitt_h(img), prewitt_v(img))


def diff_angle(angle_1, angle_2):
    return np.minimum(np.pi - np.abs(angle_1 - angle_2),
                      np.abs(angle_1 - angle_2))


diff_farid = diff_angle(true_angle, angle_farid)
diff_sobel = diff_angle(true_angle, angle_sobel)
diff_scharr = diff_angle(true_angle, angle_scharr)
diff_prewitt = diff_angle(true_angle, angle_prewitt)

fig, axes = plt.subplots(nrows=3,
                         ncols=2,
Example #29
0
def test_vscharr_horizontal():
    """vertical Scharr on a horizontal edge should be zero."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (i >= 0).astype(float)
    result = filters.scharr_v(image)
    assert_allclose(result, 0)
def get_features(image, xs, ys, feature_width):
    """
    Returns feature descriptors for a given set of interest points.

    To start with, you might want to simply use normalized patches as your
    local feature. This is very simple to code and works OK. However, to get
    full credit you will need to implement the more effective SIFT-like descriptor
    (See Szeliski 4.1.2 or the original publications at
    http://www.cs.ubc.ca/~lowe/keypoints/)

    Your implementation does not need to exactly match the SIFT reference.
    Here are the key properties your (baseline) descriptor should have:
    (1) a 4x4 grid of cells, each feature_width / 4 pixels square.
    (2) each cell should have a histogram of the local distribution of
        gradients in 8 orientations. Appending these histograms together will
        give you 4x4 x 8 = 128 dimensions.
    (3) Each feature should be normalized to unit length

    You do not need to perform the interpolation in which each gradient
    measurement contributes to multiple orientation bins in multiple cells
    As described in Szeliski, a single gradient measurement creates a
    weighted contribution to the 4 nearest cells and the 2 nearest
    orientation bins within each cell, for 8 total contributions. This type
    of interpolation probably will help, though.

    You do not have to explicitly compute the gradient orientation at each
    pixel (although you are free to do so). You can instead filter with
    oriented filters (e.g. a filter that responds to edges with a specific
    orientation). All of your SIFT-like feature can be constructed entirely
    from filtering fairly quickly in this way.

    You do not need to do the normalize -> threshold -> normalize again
    operation as detailed in Szeliski and the SIFT paper. It can help, though.

    Another simple trick which can help is to raise each element of the final
    feature vector to some power that is less than one.

    Useful functions: A working solution does not require the use of all of these
    functions, but depending on your implementation, you may find some useful. Please
    reference the documentation for each function/library and feel free to come to hours
    or post on Piazza with any questions

        - skimage.filters (library)


    :params:
    :image: a grayscale or color image (your choice depending on your implementation)
    :x: np array of x coordinates of interest points
    :y: np array of y coordinates of interest points
    :feature_width: in pixels, is the local feature width. You can assume
                    that feature_width will be a multiple of 4 (i.e. every cell of your
                    local SIFT-like feature will have an integer width and height).
    If you want to detect and describe features at multiple scales or
    particular orientations you can add input arguments.

    :returns:
    :features: np array of computed features. It should be of size
            [len(x) * feature dimensionality] (for standard SIFT feature
            dimensionality is 128)

    """
    # Convert to integers for indexing
    xs = np.round(xs).astype(int)
    ys = np.round(ys).astype(int)

    # Define helper functions for readabilty and avoid copy-pasting
    def get_window(y, x):
        """
         Helper to get indices of the feature_width square
        """
        rows = (x - (feature_width / 2 - 1), x + feature_width / 2)
        if rows[0] < 0:
            rows = (0, rows[1] - rows[0])
        if rows[1] >= image.shape[0]:
            rows = (rows[0] + (image.shape[0] - 1 - rows[1]),
                    image.shape[0] - 1)
        cols = (y - (feature_width / 2 - 1), y + feature_width / 2)
        if cols[0] < 0:
            cols = (0, cols[1] - cols[0])
        if cols[1] >= image.shape[1]:
            cols = (cols[0] - (cols[1] + 1 - image.shape[1]),
                    image.shape[1] - 1)
        return int(rows[0]), int(rows[1]) + 1, int(cols[0]), int(cols[1]) + 1

    def get_current_window(i, j, matrix):
        """
        Helper to get sub square of size feature_width/4 
        From the square matrix of size feature_width
        """
        return matrix[int(i * feature_width / 4):int((i + 1) * feature_width /
                                                     4),
                      int(j * feature_width / 4):int((j + 1) * feature_width /
                                                     4)]

    def rotate_by_dominant_angle(angles, grads):
        hist, bin_edges = np.histogram(angles,
                                       bins=36,
                                       range=(0, 2 * np.pi),
                                       weights=grads)
        angles -= bin_edges[np.argmax(hist)]
        angles[angles < 0] += 2 * np.pi

    # Initialize features tensor, with an easily indexable shape
    features = np.zeros((len(xs), 4, 4, 8))
    # Get gradients and angles by filters (approximation)
    sigma = 0.8
    filtered_image = gaussian(image, sigma)
    dx = scharr_v(filtered_image)
    dy = scharr_h(filtered_image)
    gradient = np.sqrt(np.square(dx) + np.square(dy))
    angles = np.arctan2(dy, dx)
    angles[angles < 0] += 2 * np.pi

    for n, (x, y) in enumerate(zip(xs, ys)):
        # Feature square
        i1, i2, j1, j2 = get_window(x, y)
        grad_window = gradient[i1:i2, j1:j2]
        angle_window = angles[i1:i2, j1:j2]
        # Loop over sub feature squares
        for i in range(int(feature_width / 4)):
            for j in range(int(feature_width / 4)):
                # Enhancement: a Gaussian fall-off function window
                current_grad = get_current_window(i, j, grad_window).flatten()
                current_angle = get_current_window(i, j,
                                                   angle_window).flatten()
                features[n, i, j] = np.histogram(current_angle,
                                                 bins=8,
                                                 range=(0, 2 * np.pi),
                                                 weights=current_grad)[0]

    features = features.reshape((
        len(xs),
        -1,
    ))
    dividend = np.linalg.norm(features, axis=1).reshape(-1, 1)
    # Rare cases where the gradients are all zeros in the window
    # Results in np.nan from division by zero.
    dividend[dividend == 0] = 1
    features = features / dividend
    thresh = 0.25
    features[features >= thresh] = thresh
    features = features**0.8
    # features = features / features.sum(axis = 1).reshape(-1, 1)
    return features
Example #31
0
def test_vscharr_mask():
    """Vertical Scharr on a masked array should be zero."""
    np.random.seed(0)
    result = filters.scharr_v(np.random.uniform(size=(10, 10)),
                              np.zeros((10, 10), bool))
    assert_allclose(result, 0)
Example #32
0
def test_vscharr_zeros():
    """Vertical Scharr on an array of all zeros."""
    result = filters.scharr_v(np.zeros((10, 10)), np.ones((10, 10), bool))
    assert_allclose(result, 0)