コード例 #1
0
    def test_skeletonize_num_neighbours(self):
        # an empty image
        image = np.zeros((300, 300))

        # foreground object 1
        image[10:-10, 10:100] = 1
        image[-100:-10, 10:-10] = 1
        image[10:-10, -100:-10] = 1

        # foreground object 2
        rs, cs = draw.bresenham(250, 150, 10, 280)
        for i in range(10):
            image[rs + i, cs] = 1
        rs, cs = draw.bresenham(10, 150, 250, 280)
        for i in range(20):
            image[rs + i, cs] = 1

        # foreground object 3
        ir, ic = np.indices(image.shape)
        circle1 = (ic - 135)**2 + (ir - 150)**2 < 30**2
        circle2 = (ic - 135)**2 + (ir - 150)**2 < 20**2
        image[circle1] = 1
        image[circle2] = 0
        result = skeletonize(image)

        # there should never be a 2x2 block of foreground pixels in a skeleton
        mask = np.array([[1,  1],
                         [1,  1]], np.uint8)
        blocks = correlate(result, mask, mode='constant')
        assert not numpy.any(blocks == 4)
コード例 #2
0
    def test_skeletonize_num_neighbours(self):
        # an empty image
        image = np.zeros((300, 300))

        # foreground object 1
        image[10:-10, 10:100] = 1
        image[-100:-10, 10:-10] = 1
        image[10:-10, -100:-10] = 1

        # foreground object 2
        rs, cs = draw.bresenham(250, 150, 10, 280)
        for i in range(10):
            image[rs + i, cs] = 1
        rs, cs = draw.bresenham(10, 150, 250, 280)
        for i in range(20):
            image[rs + i, cs] = 1

        # foreground object 3
        ir, ic = np.indices(image.shape)
        circle1 = (ic - 135)**2 + (ir - 150)**2 < 30**2
        circle2 = (ic - 135)**2 + (ir - 150)**2 < 20**2
        image[circle1] = 1
        image[circle2] = 0
        result = skeletonize(image)

        # there should never be a 2x2 block of foreground pixels in a skeleton
        mask = np.array([[1, 1], [1, 1]], np.uint8)
        blocks = correlate(result, mask, mode='constant')
        assert not numpy.any(blocks == 4)
コード例 #3
0
ファイル: test_draw.py プロジェクト: flaviotruzzi/Edgy
def test_diag():
    img = np.zeros((5, 5))

    rr, cc = bresenham(0, 0, 4, 4)
    img[rr, cc] = 1

    img_ = np.eye(5)

    assert_array_equal(img, img_)
コード例 #4
0
ファイル: test_draw.py プロジェクト: flaviotruzzi/Edgy
def test_bresenham_vertical():
    img = np.zeros((10, 10))

    rr, cc = bresenham(0, 0, 9, 0)
    img[rr, cc] = 1

    img_ = np.zeros((10, 10))
    img_[:, 0] = 1

    assert_array_equal(img, img_)
コード例 #5
0
ファイル: test_draw.py プロジェクト: flaviotruzzi/Edgy
def test_reverse():
    img = np.zeros((10, 10))

    rr, cc = bresenham(0, 9, 0, 0)
    img[rr, cc] = 1

    img_ = np.zeros((10, 10))
    img_[0, :] = 1

    assert_array_equal(img, img_)
コード例 #6
0
ファイル: utils.py プロジェクト: NelleV/ROVAR
def merge_bounding_boxes(image, scores, positions, threshold=0.5, dist=5):
    """
    Merges bounding boxes
    """
    image = image.copy()
    num = 0
    calc = scores > threshold
    correct_positions = positions[calc]
    correct_scores = scores[calc]
    for position, score in zip(correct_positions, correct_scores):
        num += 1
        x0 = position[0]
        y0 = position[1]
        image[bresenham(x0, y0, x0 + 24, y0)] = 0
        image[bresenham(x0, y0, x0, y0 + 24)] = 0
        image[bresenham(x0 + 24, y0, x0 + 24, y0 + 24)] = 0
        image[bresenham(x0, y0 + 24, x0 + 24, y0 + 24)] = 0
    print "detected %d faces" % num
    return image, correct_positions, correct_scores
コード例 #7
0
ファイル: test_draw.py プロジェクト: flaviotruzzi/Edgy
def test_bresenham_horizontal():
    img = np.zeros((10, 10))

    rr, cc = bresenham(0, 0, 0, 9)
    img[rr, cc] = 1

    img_ = np.zeros((10, 10))
    img_[0, :] = 1

    assert_array_equal(img, img_)
コード例 #8
0
ファイル: utils.py プロジェクト: NelleV/ROVAR
def show_positive_boxes(image, labels, positions):
    """
    Returns an image with the positive bounding boxes drawn

    params
    -------
        image
        labels

    returns
    -------
        image
    """
    image = image.copy()
    for i, label in enumerate(labels):
        if label:
            x0 = positions[i, 0]
            y0 = positions[i, 1]
            image[bresenham(x0, y0, x0 + 24, y0)] = 0
            image[bresenham(x0, y0, x0, y0 + 24)] = 0
            image[bresenham(x0 + 24, y0, x0 + 24, y0 + 24)] = 0
            image[bresenham(x0, y0 + 24, x0 + 24, y0 + 24)] = 0
    return image
コード例 #9
0
    def _generate(self):
        backprojection = np.zeros((self.width, self.width))
        sinogram = np.zeros((self.num_steps, self.num_detectors))

        filter = self._generate_filter(self.num_detectors)
        angular_step = np.pi / self.num_steps

        for step in range(self.num_steps):
            #obliczenie kąta zerowego emitera
            alpha = step * angular_step

            step_lines = []
            sinogram_row = np.zeros((self.num_detectors), dtype=np.int32)
            for detector in range(self.num_detectors):
                #obliczenie pozycji kolejnego detektora
                detector_angle = alpha + np.pi - self.theta / 2 + detector * (
                    self.theta / (self.num_detectors - 1))
                detector_x, detector_y = self._get_coords(detector_angle)
                #obliczenie pozycji odpowiadającego emitera
                emitter_angle = alpha + self.theta / 2 - detector * (
                    self.theta / (self.num_detectors - 1))
                emitter_x, emitter_y = self._get_coords(emitter_angle)
                #wygenerowanie linii
                rr, cc = bresenham(emitter_y, emitter_x, detector_y,
                                   detector_x)
                #zsumowanie wartości pikseli oraz zapisanie linii do użycia w rekonstrukcji
                sinogram_row[detector] = np.array(self.image[rr, cc]).sum()
                step_lines.append((rr, cc))

            sinogram[step] = sinogram_row

            #filtruj wiersz sinogramu
            if self.filter:
                sinogram_row = np.array(np.convolve(sinogram_row, filter,
                                                    'same'),
                                        dtype=np.int32)

            for detector in range(self.num_detectors):
                rr, cc = step_lines[detector]
                backprojection[rr, cc] += sinogram_row[detector]

            if (self.gaussian):
                self.backprojections.append(
                    self._normalize_image(
                        cv2.GaussianBlur(backprojection, (3, 3), 3)))
            else:
                self.backprojections.append(
                    self._normalize_image(backprojection))

        self.sinogram = cv2.normalize(sinogram, None, 0, 255, cv2.NORM_MINMAX)
コード例 #10
0
ファイル: tp1.py プロジェクト: NelleV/ROVAR
def show_matched_desc(image1, image2, matched_desc):
    image = np.zeros((image1.shape[0], image2.shape[1] + 10 + image2.shape[1]))
    image[:, :image1.shape[1]] = image1
    image[:, 10 + image2.shape[1]:] = image2
    placed_desc = matched_desc
    placed_desc[:, -1] = matched_desc[:, -1] + 10 + image2.shape[1]
    from skimage.draw import bresenham
    for el in placed_desc:
        try:
            draw_point(image, el[0], el[1])
            draw_point(image, el[2], el[3])
            image[bresenham(el[0], el[1], el[2], el[3])] = 0
        except IndexError:
            pass
    return image
コード例 #11
0
def bresenham_line(start, end):
    """Find the cells that should be selected to form a straight line

    Use scikit-image implementation of the Bresenham line algorithm

    Args:
        start: (x, y) INDEX coordinates of the starting point
        end: ((x, y), n) INDEX coordinates of the ending points

    Returns:
        List of (x, y) INDEX coordinates that form the straight lines
    """
    path = list()
    for target in end.T:  # TODO: delete for loop
        tmp = bresenham(start[0], start[1], target[0], target[1])
        path += (list(zip(tmp[0], tmp[1]))[1:-1])  # start + end points removed
    return path
コード例 #12
0
def hog2image(hogArray,
              imageSize=[32, 32],
              orientations=9,
              pixels_per_cell=(8, 8),
              cells_per_block=(3, 3)):
    from scipy import cos, sin
    from skimage import draw

    sy, sx = imageSize
    cx, cy = pixels_per_cell
    bx, by = cells_per_block

    n_cellsx = int(np.floor(sx // cx))  # number of cells in x
    n_cellsy = int(np.floor(sy // cy))  # number of cells in y

    n_blocksx = (n_cellsx - bx) + 1
    n_blocksy = (n_cellsy - by) + 1

    hogArray = hogArray.reshape([n_blocksy, n_blocksx, by, bx, orientations])

    orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
    for x in range(n_blocksx):
        for y in range(n_blocksy):
            block = hogArray[y, x, :]
            orientation_histogram[y:y + by, x:x + bx, :] = block

    radius = min(cx, cy) // 2 - 1
    hog_image = np.zeros((sy, sx), dtype=float)
    for x in range(n_cellsx):
        for y in range(n_cellsy):
            for o in range(orientations):
                centre = tuple([y * cy + cy // 2, x * cx + cx // 2])
                dx = int(radius * cos(float(o) / orientations * np.pi))
                dy = int(radius * sin(float(o) / orientations * np.pi))
                # rr, cc = draw.bresenham(centre[0] - dy, centre[1] - dx,
                #                         centre[0] + dy, centre[1] + dx)
                rr, cc = draw.bresenham(centre[0] - dx, centre[1] - dy,
                                        centre[0] + dx, centre[1] + dy)
                hog_image[rr, cc] += orientation_histogram[y, x, o]

    return hog_image
コード例 #13
0
ファイル: draw.py プロジェクト: NelleV/SORBB
def show_matched_desc(image1, image2, matched_desc):
    """
    Draw lines between matched descriptors of two images

    params
    ------
        image1: ndarray

        image2: ndarray
    """
    # Working on grayscale images
    if len(image1.shape) == 3:
        image1 = image1.mean(axis=2)
    if len(image2.shape) == 3:
        image2 = image2.mean(axis=2)

    # FIXME doesn't work with images of different size
    h = max(image1.shape[0], image2.shape[0])
    image = np.zeros((h,
                      image1.shape[1] + 10 + image2.shape[1]))

    image[:image1.shape[0], :image1.shape[1]] = image1
    image[:image2.shape[0], 10 + image1.shape[1]:] = image2
    placed_desc = matched_desc
    placed_desc[:, -1] = matched_desc[:, -1] + 10 + image1.shape[1]
    for el in placed_desc:
        try:
            draw_point(image, el[0], el[1])
            draw_point(image, el[2], el[3])
            image[bresenham(el[0], el[1], el[2], el[3])] = 0
        except IndexError:
            pass

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.xaxis.set_visible(False)
    ax.yaxis.set_visible(False)
    ax.imshow(image, cmap=cm.gray)
    plt.show()
コード例 #14
0
def show_matched_desc(image1, image2, matched_desc):
    """
    Draw lines between matched descriptors of two images

    params
    ------
        image1: ndarray

        image2: ndarray
    """
    # Working on grayscale images
    if len(image1.shape) == 3:
        image1 = image1.mean(axis=2)
    if len(image2.shape) == 3:
        image2 = image2.mean(axis=2)

    # FIXME doesn't work with images of different size
    h = max(image1.shape[0], image2.shape[0])
    image = np.zeros((h, image1.shape[1] + 10 + image2.shape[1]))

    image[:image1.shape[0], :image1.shape[1]] = image1
    image[:image2.shape[0], 10 + image1.shape[1]:] = image2
    placed_desc = matched_desc
    placed_desc[:, -1] = matched_desc[:, -1] + 10 + image1.shape[1]
    for el in placed_desc:
        try:
            draw_point(image, el[0], el[1])
            draw_point(image, el[2], el[3])
            image[bresenham(el[0], el[1], el[2], el[3])] = 0
        except IndexError:
            pass

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.xaxis.set_visible(False)
    ax.yaxis.set_visible(False)
    ax.imshow(image, cmap=cm.gray)
    plt.show()
コード例 #15
0
def hog2image(hogArray, imageSize=[32,32],orientations=9,pixels_per_cell=(8, 8),cells_per_block=(3, 3)):
    from scipy import sqrt, pi, arctan2, cos, sin
    from skimage import draw

    sy, sx = imageSize
    cx, cy = pixels_per_cell
    bx, by = cells_per_block

    n_cellsx = int(np.floor(sx // cx))  # number of cells in x
    n_cellsy = int(np.floor(sy // cy))  # number of cells in y

    n_blocksx = (n_cellsx - bx) + 1
    n_blocksy = (n_cellsy - by) + 1    

    hogArray = hogArray.reshape([n_blocksy, n_blocksx, by, bx, orientations])

    orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
    for x in range(n_blocksx):
            for y in range(n_blocksy):
                block = hogArray[y, x, :]
                orientation_histogram[y:y + by, x:x + bx, :] = block

    radius = min(cx, cy) // 2 - 1
    hog_image = np.zeros((sy, sx), dtype=float)
    for x in range(n_cellsx):
        for y in range(n_cellsy):
            for o in range(orientations):
                centre = tuple([y * cy + cy // 2, x * cx + cx // 2])
                dx = int(radius * cos(float(o) / orientations * np.pi))
                dy = int(radius * sin(float(o) / orientations * np.pi))
                # rr, cc = draw.bresenham(centre[0] - dy, centre[1] - dx,
                #                         centre[0] + dy, centre[1] + dx)
                rr, cc = draw.bresenham(centre[0] - dx, centre[1] - dy,\
                                        centre[0] + dx, centre[1] + dy)  
                hog_image[rr, cc] += orientation_histogram[y, x, o]

    return hog_image
コード例 #16
0
ファイル: plot_skeleton.py プロジェクト: nitishb/scikit-image
"""
from skimage.morphology import skeletonize
from skimage import draw
import numpy as np
import matplotlib.pyplot as plt

# an empty image
image = np.zeros((400, 400))

# foreground object 1
image[10:-10, 10:100] = 1
image[-100:-10, 10:-10] = 1
image[10:-10, -100:-10] = 1

# foreground object 2
rs, cs = draw.bresenham(250, 150, 10, 280)
for i in range(10): image[rs+i, cs] = 1
rs, cs = draw.bresenham(10, 150, 250, 280)
for i in range(20): image[rs+i, cs] = 1

# foreground object 3
ir, ic = np.indices(image.shape)
circle1 = (ic - 135)**2 + (ir - 150)**2 < 30**2
circle2 = (ic - 135)**2 + (ir - 150)**2 < 20**2
image[circle1] = 1
image[circle2] = 0

# perform skeletonization
skeleton = skeletonize(image)

# display results
コード例 #17
0
def hof(flow, orientations=9, pixels_per_cell=(8, 8),
        cells_per_block=(3, 3), visualise=False, normalise=False, motion_threshold=1.):

    """Extract Histogram of Optical Flow (HOF) for a given image.

    Key difference between this and HOG is that flow is MxNx2 instead of MxN


    Compute a Histogram of Optical Flow (HOF) by

        1. (optional) global image normalisation
        2. computing the dense optical flow
        3. computing flow histograms
        4. normalising across blocks
        5. flattening into a feature vector

    Parameters
    ----------
    Flow : (M, N) ndarray
        Input image (x and y flow images).
    orientations : int
        Number of orientation bins.
    pixels_per_cell : 2 tuple (int, int)
        Size (in pixels) of a cell.
    cells_per_block  : 2 tuple (int,int)
        Number of cells in each block.
    visualise : bool, optional
        Also return an image of the hof.
    normalise : bool, optional
        Apply power law compression to normalise the image before
        processing.
    static_threshold : threshold for no motion

    Returns
    -------
    newarr : ndarray
        hof for the image as a 1D (flattened) array.
    hof_image : ndarray (if visualise=True)
        A visualisation of the hof image.

    References
    ----------
    * http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients

    * Dalal, N and Triggs, B, Histograms of Oriented Gradients for
      Human Detection, IEEE Computer Society Conference on Computer
      Vision and Pattern Recognition 2005 San Diego, CA, USA

    """
    flow = np.atleast_2d(flow)

    """ 
    -1-
    The first stage applies an optional global image normalisation
    equalisation that is designed to reduce the influence of illumination
    effects. In practice we use gamma (power law) compression, either
    computing the square root or the log of each colour channel.
    Image texture strength is typically proportional to the local surface
    illumination so this compression helps to reduce the effects of local
    shadowing and illumination variations.
    """

    if flow.ndim < 3:
        raise ValueError("Requires dense flow in both directions")

    if normalise:
        flow = sqrt(flow)

    """ 
    -2-
    The second stage computes first order image gradients. These capture
    contour, silhouette and some texture information, while providing
    further resistance to illumination variations. The locally dominant
    colour channel is used, which provides colour invariance to a large
    extent. Variant methods may also include second order image derivatives,
    which act as primitive bar detectors - a useful feature for capturing,
    e.g. bar like structures in bicycles and limbs in humans.
    """

    if flow.dtype.kind == 'u':
        # convert uint image to float
        # to avoid problems with subtracting unsigned numbers in np.diff()
        flow = flow.astype('float')

    gx = np.zeros(flow.shape[:2])
    gy = np.zeros(flow.shape[:2])
    # gx[:, :-1] = np.diff(flow[:,:,1], n=1, axis=1)
    # gy[:-1, :] = np.diff(flow[:,:,0], n=1, axis=0)

    gx = flow[:,:,1]
    gy = flow[:,:,0]



    """ 
    -3-
    The third stage aims to produce an encoding that is sensitive to
    local image content while remaining resistant to small changes in
    pose or appearance. The adopted method pools gradient orientation
    information locally in the same way as the SIFT [Lowe 2004]
    feature. The image window is divided into small spatial regions,
    called "cells". For each cell we accumulate a local 1-D histogram
    of gradient or edge orientations over all the pixels in the
    cell. This combined cell-level 1-D histogram forms the basic
    "orientation histogram" representation. Each orientation histogram
    divides the gradient angle range into a fixed number of
    predetermined bins. The gradient magnitudes of the pixels in the
    cell are used to vote into the orientation histogram.
    """

    magnitude = sqrt(gx**2 + gy**2)
    orientation = arctan2(gy, gx) * (180 / pi) % 180

    sy, sx = flow.shape[:2]
    cx, cy = pixels_per_cell
    bx, by = cells_per_block

    n_cellsx = int(np.floor(sx // cx))  # number of cells in x
    n_cellsy = int(np.floor(sy // cy))  # number of cells in y

    # compute orientations integral images
    orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
    subsample = np.index_exp[cy / 2:cy * n_cellsy:cy, cx / 2:cx * n_cellsx:cx]
    for i in range(orientations-1):
        #create new integral image for this orientation
        # isolate orientations in this range

        temp_ori = np.where(orientation < 180 / orientations * (i + 1),
                            orientation, -1)
        temp_ori = np.where(orientation >= 180 / orientations * i,
                            temp_ori, -1)
        # select magnitudes for those orientations
        cond2 = (temp_ori > -1) * (magnitude > motion_threshold)
        temp_mag = np.where(cond2, magnitude, 0)

        temp_filt = uniform_filter(temp_mag, size=(cy, cx))
        orientation_histogram[:, :, i] = temp_filt[subsample]

    ''' Calculate the no-motion bin '''
    temp_mag = np.where(magnitude <= motion_threshold, magnitude, 0)

    temp_filt = uniform_filter(temp_mag, size=(cy, cx))
    orientation_histogram[:, :, -1] = temp_filt[subsample]

    # now for each cell, compute the histogram
    hof_image = None

    if visualise:
        from skimage import draw

        radius = min(cx, cy) // 2 - 1
        hof_image = np.zeros((sy, sx), dtype=float)
        for x in range(n_cellsx):
            for y in range(n_cellsy):
                for o in range(orientations-1):
                    centre = tuple([y * cy + cy // 2, x * cx + cx // 2])
                    dx = int(radius * cos(float(o) / orientations * np.pi))
                    dy = int(radius * sin(float(o) / orientations * np.pi))
                    rr, cc = draw.bresenham(centre[0] - dy, centre[1] - dx,
                                            centre[0] + dy, centre[1] + dx)
                    hof_image[rr, cc] += orientation_histogram[y, x, o]

    """
    The fourth stage computes normalisation, which takes local groups of
    cells and contrast normalises their overall responses before passing
    to next stage. Normalisation introduces better invariance to illumination,
    shadowing, and edge contrast. It is performed by accumulating a measure
    of local histogram "energy" over local groups of cells that we call
    "blocks". The result is used to normalise each cell in the block.
    Typically each individual cell is shared between several blocks, but
    its normalisations are block dependent and thus different. The cell
    thus appears several times in the final output vector with different
    normalisations. This may seem redundant but it improves the performance.
    We refer to the normalised block descriptors as Histogram of Oriented
    Gradient (hog) descriptors.
    """

    n_blocksx = (n_cellsx - bx) + 1
    n_blocksy = (n_cellsy - by) + 1
    normalised_blocks = np.zeros((n_blocksy, n_blocksx,
                                  by, bx, orientations))

    for x in range(n_blocksx):
        for y in range(n_blocksy):
            block = orientation_histogram[y:y+by, x:x+bx, :]
            eps = 1e-5
            normalised_blocks[y, x, :] = block / sqrt(block.sum()**2 + eps)

    """
    The final step collects the hof descriptors from all blocks of a dense
    overlapping grid of blocks covering the detection window into a combined
    feature vector for use in the window classifier.
    """

    if visualise:
        return normalised_blocks.ravel(), hof_image
    else:
        return normalised_blocks.ravel()
コード例 #18
0
ファイル: _hog.py プロジェクト: jaspreet11111111/scikit-image
def hog(image,
        orientations=9,
        pixels_per_cell=(8, 8),
        cells_per_block=(3, 3),
        visualise=False,
        normalise=False):
    """Extract Histogram of Oriented Gradients (HOG) for a given image.

    Compute a Histogram of Oriented Gradients (HOG) by

        1. (optional) global image normalisation
        2. computing the gradient image in x and y
        3. computing gradient histograms
        4. normalising across blocks
        5. flattening into a feature vector

    Parameters
    ----------
    image : (M, N) ndarray
        Input image (greyscale).
    orientations : int
        Number of orientation bins.
    pixels_per_cell : 2 tuple (int, int)
        Size (in pixels) of a cell.
    cells_per_block  : 2 tuple (int,int)
        Number of cells in each block.
    visualise : bool, optional
        Also return an image of the HOG.
    normalise : bool, optional
        Apply power law compression to normalise the image before
        processing.

    Returns
    -------
    newarr : ndarray
        HOG for the image as a 1D (flattened) array.
    hog_image : ndarray (if visualise=True)
        A visualisation of the HOG image.

    References
    ----------
    * http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients

    * Dalal, N and Triggs, B, Histograms of Oriented Gradients for
      Human Detection, IEEE Computer Society Conference on Computer
      Vision and Pattern Recognition 2005 San Diego, CA, USA

    """
    image = np.atleast_2d(image)
    """
    The first stage applies an optional global image normalisation
    equalisation that is designed to reduce the influence of illumination
    effects. In practice we use gamma (power law) compression, either
    computing the square root or the log of each colour channel.
    Image texture strength is typically proportional to the local surface
    illumination so this compression helps to reduce the effects of local
    shadowing and illumination variations.
    """

    if image.ndim > 2:
        raise ValueError("Currently only supports grey-level images")

    if normalise:
        image = sqrt(image)
    """
    The second stage computes first order image gradients. These capture
    contour, silhouette and some texture information, while providing
    further resistance to illumination variations. The locally dominant
    colour channel is used, which provides colour invariance to a large
    extent. Variant methods may also include second order image derivatives,
    which act as primitive bar detectors - a useful feature for capturing,
    e.g. bar like structures in bicycles and limbs in humans.
    """

    if image.dtype.kind == 'u':
        # convert uint image to float
        # to avoid problems with subtracting unsigned numbers in np.diff()
        image = image.astype('float')

    gx = np.zeros(image.shape)
    gy = np.zeros(image.shape)
    gx[:, :-1] = np.diff(image, n=1, axis=1)
    gy[:-1, :] = np.diff(image, n=1, axis=0)
    """
    The third stage aims to produce an encoding that is sensitive to
    local image content while remaining resistant to small changes in
    pose or appearance. The adopted method pools gradient orientation
    information locally in the same way as the SIFT [Lowe 2004]
    feature. The image window is divided into small spatial regions,
    called "cells". For each cell we accumulate a local 1-D histogram
    of gradient or edge orientations over all the pixels in the
    cell. This combined cell-level 1-D histogram forms the basic
    "orientation histogram" representation. Each orientation histogram
    divides the gradient angle range into a fixed number of
    predetermined bins. The gradient magnitudes of the pixels in the
    cell are used to vote into the orientation histogram.
    """

    magnitude = sqrt(gx**2 + gy**2)
    orientation = arctan2(gy, gx) * (180 / pi) % 180

    sy, sx = image.shape
    cx, cy = pixels_per_cell
    bx, by = cells_per_block

    n_cellsx = int(np.floor(sx // cx))  # number of cells in x
    n_cellsy = int(np.floor(sy // cy))  # number of cells in y

    # compute orientations integral images
    orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
    subsample = np.index_exp[cy / 2:cy * n_cellsy:cy, cx / 2:cx * n_cellsx:cx]
    for i in range(orientations):
        #create new integral image for this orientation
        # isolate orientations in this range

        temp_ori = np.where(orientation < 180 / orientations * (i + 1),
                            orientation, -1)
        temp_ori = np.where(orientation >= 180 / orientations * i, temp_ori,
                            -1)
        # select magnitudes for those orientations
        cond2 = temp_ori > -1
        temp_mag = np.where(cond2, magnitude, 0)

        temp_filt = uniform_filter(temp_mag, size=(cy, cx))
        orientation_histogram[:, :, i] = temp_filt[subsample]

    # now for each cell, compute the histogram
    hog_image = None

    if visualise:
        from skimage import draw

        radius = min(cx, cy) // 2 - 1
        hog_image = np.zeros((sy, sx), dtype=float)
        for x in range(n_cellsx):
            for y in range(n_cellsy):
                for o in range(orientations):
                    centre = tuple([y * cy + cy // 2, x * cx + cx // 2])
                    dx = radius * cos(float(o) / orientations * np.pi)
                    dy = radius * sin(float(o) / orientations * np.pi)
                    rr, cc = draw.bresenham(int(centre[0] - dx),
                                            int(centre[1] - dy),
                                            int(centre[0] + dx),
                                            int(centre[1] + dy))
                    hog_image[rr, cc] += orientation_histogram[y, x, o]
    """
    The fourth stage computes normalisation, which takes local groups of
    cells and contrast normalises their overall responses before passing
    to next stage. Normalisation introduces better invariance to illumination,
    shadowing, and edge contrast. It is performed by accumulating a measure
    of local histogram "energy" over local groups of cells that we call
    "blocks". The result is used to normalise each cell in the block.
    Typically each individual cell is shared between several blocks, but
    its normalisations are block dependent and thus different. The cell
    thus appears several times in the final output vector with different
    normalisations. This may seem redundant but it improves the performance.
    We refer to the normalised block descriptors as Histogram of Oriented
    Gradient (HOG) descriptors.
    """

    n_blocksx = (n_cellsx - bx) + 1
    n_blocksy = (n_cellsy - by) + 1
    normalised_blocks = np.zeros((n_blocksy, n_blocksx, by, bx, orientations))

    for x in range(n_blocksx):
        for y in range(n_blocksy):
            block = orientation_histogram[y:y + by, x:x + bx, :]
            eps = 1e-5
            normalised_blocks[y, x, :] = block / sqrt(block.sum()**2 + eps)
    """
    The final step collects the HOG descriptors from all blocks of a dense
    overlapping grid of blocks covering the detection window into a combined
    feature vector for use in the window classifier.
    """

    if visualise:
        return normalised_blocks.ravel(), hog_image
    else:
        return normalised_blocks.ravel()
コード例 #19
0
ファイル: hog.py プロジェクト: Teva/scikits.image
def hog(image, orientations=9, pixels_per_cell=(8, 8),
        cells_per_block=(3, 3), visualise=False, normalise=False):
    """Extract Histogram of Oriented Gradients (HOG) for a given image.

    Compute a Histogram of Oriented Gradients (HOG) by

        1. (optional) global image normalisation
        2. computing the gradient image in x and y
        3. computing gradient histograms
        4. normalising across blocks
        5. flattening into a feature vector

    Parameters
    ----------
    image : (M, N) ndarray
        Input image (greyscale).
    orientations : int
        Number of orientation bins.
    pixels_per_cell : 2 tuple (int, int)
        Size (in pixels) of a cell.
    cells_per_block  : 2 tuple (int,int)
        Number of cells in each block.
    visualise : bool, optional
        Also return an image of the HOG.
    normalise : bool, optional
        Apply power law compression to normalise the image before
        processing.

    Returns
    -------
    newarr : ndarray
        HOG for the image as a 1D (flattened) array.
    hog_image : ndarray (if visualise=True)
        A visualisation of the HOG image.

    References
    ----------
    * http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients

    * Dalal, N and Triggs, B, Histograms of Oriented Gradients for
      Human Detection, IEEE Computer Society Conference on Computer
      Vision and Pattern Recognition 2005 San Diego, CA, USA

    """
    image = np.atleast_2d(image)

    """
    The first stage applies an optional global image normalisation
    equalisation that is designed to reduce the influence of illumination
    effects. In practice we use gamma (power law) compression, either
    computing the square root or the log of each colour channel.
    Image texture strength is typically proportional to the local surface
    illumination so this compression helps to reduce the effects of local
    shadowing and illumination variations.
    """

    if image.ndim > 3:
        raise ValueError("Currently only supports grey-level images")

    if normalise:
        image = sqrt(image)

    """
    The second stage computes first order image gradients. These capture
    contour, silhouette and some texture information, while providing
    further resistance to illumination variations. The locally dominant
    colour channel is used, which provides colour invariance to a large
    extent. Variant methods may also include second order image derivatives,
    which act as primitive bar detectors - a useful feature for capturing,
    e.g. bar like structures in bicycles and limbs in humans.
    """

    gx = np.zeros(image.shape)
    gy = np.zeros(image.shape)
    gx[:, :-1] = np.diff(image, n=1, axis=1)
    gy[:-1, :] = np.diff(image, n=1, axis=0)

    """
    The third stage aims to produce an encoding that is sensitive to
    local image content while remaining resistant to small changes in
    pose or appearance. The adopted method pools gradient orientation
    information locally in the same way as the SIFT [Lowe 2004]
    feature. The image window is divided into small spatial regions,
    called "cells". For each cell we accumulate a local 1-D histogram
    of gradient or edge orientations over all the pixels in the
    cell. This combined cell-level 1-D histogram forms the basic
    "orientation histogram" representation. Each orientation histogram
    divides the gradient angle range into a fixed number of
    predetermined bins. The gradient magnitudes of the pixels in the
    cell are used to vote into the orientation histogram.
    """

    magnitude = sqrt(gx ** 2 + gy ** 2)
    orientation = arctan2(gy, (gx + 1e-15)) * (180 / pi) + 90

    sx, sy = image.shape
    cx, cy = pixels_per_cell
    bx, by = cells_per_block

    n_cellsx = int(np.floor(sx // cx))  # number of cells in x
    n_cellsy = int(np.floor(sy // cy))  # number of cells in y

    # compute orientations integral images
    orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
    for i in range(orientations):
        #create new integral image for this orientation
        # isolate orientations in this range

        temp_ori = np.where(orientation < 180 / orientations * (i + 1),
                            orientation, 0)
        temp_ori = np.where(orientation >= 180 / orientations * i,
                            temp_ori, 0)
        # select magnitudes for those orientations
        cond2 = temp_ori > 0
        temp_mag = np.where(cond2, magnitude, 0)

        orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T


    # now for each cell, compute the histogram
    #orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))

    radius = min(cx, cy) // 2 - 1
    hog_image = None
    if visualise:
        hog_image = np.zeros((sy, sx), dtype=float)

    if visualise:
        from skimage import draw
        
        for x in range(n_cellsx):
            for y in range(n_cellsy):
                for o in range(orientations):
                    centre = tuple([y * cy + cy // 2, x * cx + cx // 2])
                    dx = radius * cos(float(o) / orientations * np.pi)
                    dy = radius * sin(float(o) / orientations * np.pi)
                    rr, cc = draw.bresenham(centre[0] - dx, centre[1] - dy,
                                            centre[0] + dx, centre[1] + dy)
                    hog_image[rr, cc] += orientation_histogram[x, y, o]

    """
    The fourth stage computes normalisation, which takes local groups of
    cells and contrast normalises their overall responses before passing
    to next stage. Normalisation introduces better invariance to illumination,
    shadowing, and edge contrast. It is performed by accumulating a measure
    of local histogram "energy" over local groups of cells that we call
    "blocks". The result is used to normalise each cell in the block.
    Typically each individual cell is shared between several blocks, but
    its normalisations are block dependent and thus different. The cell
    thus appears several times in the final output vector with different
    normalisations. This may seem redundant but it improves the performance.
    We refer to the normalised block descriptors as Histogram of Oriented
    Gradient (HOG) descriptors.
    """

    n_blocksx = (n_cellsx - bx) + 1
    n_blocksy = (n_cellsy - by) + 1
    normalised_blocks = np.zeros((n_blocksx, n_blocksy,
                                  bx, by, orientations))

    for x in range(n_blocksx):
        for y in range(n_blocksy):
            block = orientation_histogram[x:x + bx, y:y + by, :]
            eps = 1e-5
            normalised_blocks[x, y, :] = block / sqrt(block.sum() ** 2 + eps)

    """
    The final step collects the HOG descriptors from all blocks of a dense
    overlapping grid of blocks covering the detection window into a combined
    feature vector for use in the window classifier.
    """

    if visualise:
        return normalised_blocks.ravel(), hog_image
    else:
        return normalised_blocks.ravel()
コード例 #20
0
def hog(image, orientations=9, pixels_per_cell=(8, 8),
        cells_per_block=(3, 3), visualise=False, normalise=False):

    image = np.atleast_2d(image)
    if image.ndim > 3:
        raise ValueError("Currently only supports grey-level images")

    if normalise:
        image = sqrt(image)

    gx = np.zeros(image.shape)
    gy = np.zeros(image.shape)
    gx[:, :-1] = np.diff(image, n=1, axis=1)
    gy[:-1, :] = np.diff(image, n=1, axis=0)

    magnitude = sqrt(gx ** 2 + gy ** 2)
    orientation = arctan2(gy, (gx + 1e-15)) * (180 / pi) + 90

    sy, sx = image.shape
    cx, cy = pixels_per_cell
    bx, by = cells_per_block

    n_cellsx = int(np.floor(sx // cx))  # number of cells in x
    n_cellsy = int(np.floor(sy // cy))  # number of cells in y

    # compute orientations integral images
    orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
    for i in range(orientations):

        temp_ori = np.where(orientation < 180 / orientations * (i + 1),
                            orientation, 0)
        temp_ori = np.where(orientation >= 180 / orientations * i,
                            temp_ori, 0)
        # select magnitudes for those orientations
        cond2 = temp_ori > 0
        temp_mag = np.where(cond2, magnitude, 0)

        #orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cy, cx))[cy/2::cy,cx/2::cx]
        orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cy, cx))[cy/2::cy,cx/2::cx]

    radius = min(cx, cy) // 2 - 1
    hog_image = None
    if visualise:
        hog_image = np.zeros((sy, sx), dtype=float)

    if visualise:
        from skimage import draw
        
        for x in range(n_cellsx):
            for y in range(n_cellsy):
                for o in range(orientations):
                    centre = tuple([y * cy + cy // 2, x * cx + cx // 2])
                    dx = radius * cos(float(o) / orientations * np.pi)
                    dy = radius * sin(float(o) / orientations * np.pi)
                    rr, cc = draw.bresenham(centre[0] - dx, centre[1] - dy,
                                            centre[0] + dx, centre[1] + dy)
                    hog_image[rr, cc] += orientation_histogram[y, x, o]


    n_blocksx = (n_cellsx - bx) + 1
    n_blocksy = (n_cellsy - by) + 1
    normalised_blocks = np.zeros((n_blocksy, n_blocksx,by, bx, orientations))

    for x in range(n_blocksx):
        for y in range(n_blocksy):
            block = orientation_histogram[y:y + by, x:x + bx, :]
            eps = 1e-5
            normalised_blocks[y, x, :] = block / sqrt(block.sum() ** 2 + eps)

    if visualise:
        return normalised_blocks.ravel(), hog_image
    else:
        return normalised_blocks.ravel()
コード例 #21
0
ファイル: hog.py プロジェクト: baibhavr/Pedestrian-Detection
def hog(image,
        orientations=9,
        pixels_per_cell=(8, 8),
        cells_per_block=(2, 2),
        visualise=False,
        normalise=True):

    image = np.atleast_2d(image)

    if image.ndim > 2:
        raise ValueError("Only supports greyscale images")

    if normalise:
        image = sqrt(image)

    if image.dtype.kind == 'u':
        image = image.astype('float')

    gx = np.zeros(image.shape)
    gy = np.zeros(image.shape)
    gx[:, :-1] = np.diff(image, n=1, axis=1)
    gy[:-1, :] = np.diff(image, n=1, axis=0)

    magnitude = sqrt(gx**2 + gy**2)
    orientation = arctan2(gy, gx) * (180 / pi) % 180

    sy, sx = image.shape
    cx, cy = pixels_per_cell
    bx, by = cells_per_block

    n_cellsx = int(np.floor(sx // cx))  # number of cells in x
    n_cellsy = int(np.floor(sy // cy))  # number of cells in y

    orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
    subsample = np.index_exp[cy / 2:cy * n_cellsy:cy, cx / 2:cx * n_cellsx:cx]
    for i in range(orientations):
        temp_ori = np.where(orientation < 180 / orientations * (i + 1),
                            orientation, -1)
        temp_ori = np.where(orientation >= 180 / orientations * i, temp_ori,
                            -1)
        cond2 = temp_ori > -1
        temp_mag = np.where(cond2, magnitude, 0)

        temp_filt = uniform_filter(temp_mag, size=(cy, cx))
        orientation_histogram[:, :, i] = temp_filt[subsample]

    # now for each cell, compute the histogram
    hog_image = None

    if visualise:
        from skimage import draw

        radius = min(cx, cy) // 2 - 1
        hog_image = np.zeros((sy, sx), dtype=float)
        for x in range(n_cellsx):
            for y in range(n_cellsy):
                for o in range(orientations):
                    centre = tuple([y * cy + cy // 2, x * cx + cx // 2])
                    dx = radius * cos(float(o) / orientations * np.pi)
                    dy = radius * sin(float(o) / orientations * np.pi)
                    rr, cc = draw.bresenham(centre[0] - dy, centre[1] - dx,
                                            centre[0] + dy, centre[1] + dx)
                    hog_image[rr, cc] += orientation_histogram[y, x, o]

    n_blocksx = (n_cellsx - bx) + 1
    n_blocksy = (n_cellsy - by) + 1
    normalised_blocks = np.zeros((n_blocksy, n_blocksx, by, bx, orientations))

    for x in range(n_blocksx):
        for y in range(n_blocksy):
            block = orientation_histogram[y:y + by, x:x + bx, :]
            eps = 1e-5
            normalised_blocks[y, x, :] = block / sqrt(block.sum()**2 + eps)


#     print normalised_blocks.shape

    if visualise:
        return normalised_blocks.ravel(), hog_image
    else:
        return normalised_blocks.ravel()
コード例 #22
0
ファイル: pipeline.py プロジェクト: hbradlow/em_hole_finder
 def draw_line(p1,p2):
     cc,rr = bresenham(x=p1[0],y=p1[1],x2=p2[0],y2=p2[1])
     new_data[rr,cc] = 0