Exemplo n.º 1
0
def test_rotated_lena():
    """
    The harris filter should yield the same results with an image and it's
    rotation.
    """
    im = img_as_float(data.lena().mean(axis=2))
    im_rotated = im.T

    # Moravec
    results = peak_local_max(corner_moravec(im))
    results_rotated = peak_local_max(corner_moravec(im_rotated))
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Harris
    results = peak_local_max(corner_harris(im))
    results_rotated = peak_local_max(corner_harris(im_rotated))
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    results_rotated = peak_local_max(corner_shi_tomasi(im_rotated))
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
Exemplo n.º 2
0
def test_rotated_img():
    """
    The harris filter should yield the same results with an image and it's
    rotation.
    """
    im = img_as_float(data.astronaut().mean(axis=2))
    im_rotated = im.T

    # Moravec
    results = peak_local_max(corner_moravec(im),
                             min_distance=10, threshold_rel=0)
    results_rotated = peak_local_max(corner_moravec(im_rotated),
                                     min_distance=10, threshold_rel=0)
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Harris
    results = peak_local_max(corner_harris(im),
                             min_distance=10, threshold_rel=0)
    results_rotated = peak_local_max(corner_harris(im_rotated),
                                     min_distance=10, threshold_rel=0)
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im),
                             min_distance=10, threshold_rel=0)
    results_rotated = peak_local_max(corner_shi_tomasi(im_rotated),
                                     min_distance=10, threshold_rel=0)
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
Exemplo n.º 3
0
def test_rotated_lena():
    """
    The harris filter should yield the same results with an image and it's
    rotation.
    """
    im = img_as_float(data.lena().mean(axis=2))
    im_rotated = im.T

    # Moravec
    results = peak_local_max(corner_moravec(im))
    results_rotated = peak_local_max(corner_moravec(im_rotated))
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Harris
    results = peak_local_max(corner_harris(im))
    results_rotated = peak_local_max(corner_harris(im_rotated))
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    results_rotated = peak_local_max(corner_shi_tomasi(im_rotated))
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
def test_rotated_img():
    """
    The harris filter should yield the same results with an image and it's
    rotation.
    """
    im = img_as_float(data.astronaut().mean(axis=2))
    im_rotated = im.T

    # Moravec
    results = peak_local_max(corner_moravec(im),
                             min_distance=10, threshold_rel=0)
    results_rotated = peak_local_max(corner_moravec(im_rotated),
                                     min_distance=10, threshold_rel=0)
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Harris
    results = peak_local_max(corner_harris(im),
                             min_distance=10, threshold_rel=0)
    results_rotated = peak_local_max(corner_harris(im_rotated),
                                     min_distance=10, threshold_rel=0)
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im),
                             min_distance=10, threshold_rel=0)
    results_rotated = peak_local_max(corner_shi_tomasi(im_rotated),
                                     min_distance=10, threshold_rel=0)
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
Exemplo n.º 5
0
def test_rotated_img():
    """
    The harris filter should yield the same results with an image and it's
    rotation.
    """
    im = img_as_float(data.astronaut().mean(axis=2))
    im_rotated = im.T

    # Moravec
    results = np.nonzero(corner_moravec(im))
    results_rotated = np.nonzero(corner_moravec(im_rotated))
    assert (np.sort(results[0]) == np.sort(results_rotated[1])).all()
    assert (np.sort(results[1]) == np.sort(results_rotated[0])).all()

    # Harris
    results = np.nonzero(corner_harris(im))
    results_rotated = np.nonzero(corner_harris(im_rotated))
    assert (np.sort(results[0]) == np.sort(results_rotated[1])).all()
    assert (np.sort(results[1]) == np.sort(results_rotated[0])).all()

    # Shi-Tomasi
    results = np.nonzero(corner_shi_tomasi(im))
    results_rotated = np.nonzero(corner_shi_tomasi(im_rotated))
    assert (np.sort(results[0]) == np.sort(results_rotated[1])).all()
    assert (np.sort(results[1]) == np.sort(results_rotated[0])).all()
Exemplo n.º 6
0
    def __call__(self, im1, im2, maxiter=10,
                 weigh_by_shitomasi = False,
                 smooth_vfield = 2):

        mesh = self.mesh
        xgrid, ygrid = np.unique(mesh[:,1]), np.unique(mesh[:,0])

        gshape = (len(xgrid), len(ygrid))
        p = np.zeros((2,)+gshape)
        imx = im1.copy()
        for niter in xrange(maxiter):
            vx = lk_opflow(imx,im2, mesh, wsize=self.wsize)

            if weigh_by_shitomasi:
                st_resp = skfeature.corner_shi_tomasi(im1)
                st_resp =  lib.clip_and_rescale(st_resp, 5000)
                weights = np.array([st_resp[tuple(l)] for l in mesh])
                vx = vx*weights[:,None]

            vfields = vx.T.reshape((2,)+gshape)

            if smooth_vfield:
                vfields = map(partial(atrous.smooth, level=smooth_vfield), vfields)

            p += vfields
            imx = self.warp_image(im1, p)
        return imx, p
Exemplo n.º 7
0
def getFeatures(img, bbox, use_shi=False):
    n_object = np.shape(bbox)[0]
    N = 0
    temp = np.empty((n_object, ),
                    dtype=np.ndarray)  # temporary storage of x,y coordinates
    for i in range(n_object):
        (xmin, ymin, boxw, boxh) = cv2.boundingRect(bbox[i, :, :].astype(int))
        roi = img[ymin:ymin + boxh, xmin:xmin + boxw]
        # cv2.imshow('roi',roi)
        if use_shi:
            corner_response = corner_shi_tomasi(roi)
        else:
            corner_response = corner_harris(roi)
        coordinates = peak_local_max(corner_response,
                                     num_peaks=20,
                                     exclude_border=2)
        coordinates[:, 1] += xmin
        coordinates[:, 0] += ymin
        temp[i] = coordinates
        if coordinates.shape[0] > N:
            N = coordinates.shape[0]
    x = np.full((N, n_object), -1)
    y = np.full((N, n_object), -1)
    for i in range(n_object):
        n_feature = temp[i].shape[0]
        x[0:n_feature, i] = temp[i][:, 1]
        y[0:n_feature, i] = temp[i][:, 0]
    return x, y
Exemplo n.º 8
0
def test_noisy_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.
    np.random.seed(seed=1234)
    im = im + np.random.uniform(size=im.shape) * .2

    # Moravec
    results = peak_local_max(corner_moravec(im),
                             min_distance=10,
                             threshold_rel=0)
    # undefined number of interest points
    assert results.any()

    # Harris
    results = peak_local_max(corner_harris(im, method='k'),
                             min_distance=10,
                             threshold_rel=0)
    assert len(results) == 1
    results = peak_local_max(corner_harris(im, method='eps'),
                             min_distance=10,
                             threshold_rel=0)
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im, sigma=1.5),
                             min_distance=10,
                             threshold_rel=0)
    assert len(results) == 1
Exemplo n.º 9
0
def getFeatures(img, bbox):
  #TODO: Your code here
  img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  img_gray = np.array(img_gray)
  x = []
  y = []
  for box in bbox:
    #box = [(y,x),(y,x+w),(y+h,x),(y+h,x+w)]
    tempx =int((box[2][0]-box[0][0])*0.1)
    tempy =int((box[1][1]-box[0][1])*0.1)
    box_img = img_gray[box[0][0]+tempx:box[2][0]-tempx, 
                        box[0][1]+tempy:box[1][1]-tempy]
    xys = corner_peaks(corner_shi_tomasi(box_img, sigma=0.5))
    # plt.figure()
    # plt.imshow(box_img, cmap='gray')
    # plt.axis('off')
    # plt.show()
    x.append(box[0,0]+xys[0:len(xys),0]+tempx)
    y.append(box[0,1]+xys[0:len(xys),1]+tempy)
  x = np.array(x)
  y = np.array(y)
  # print x
  # print y
  # imgwbox = drawBox(img, bbox)
  # plt.figure()
  # plt.imshow(imgwbox)
  # for i in range(len(x)):  
  #   plt.plot(y[i], x[i], 'w+')
  # plt.axis('off')
  # plt.show()
  return x, y
def shi_tomasi_skimage(image, min_distance, num_peaks, **kwargs):
    coords_subpix = np.zeros_like(image)
    cornerness_matrix = sf.corner_peaks(sf.corner_shi_tomasi(image), min_distance=min_distance, num_peaks=num_peaks)
    coords_subpix = sf.corner_subpix(image, cornerness_matrix, window_size = 13, alpha=0.8)
    draw_points(image, cornerness_matrix, coords_subpix)
    print("detected points: ",cornerness_matrix.shape[0])
    return cornerness_matrix, coords_subpix
Exemplo n.º 11
0
def test_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.

    # Moravec
    results = peak_local_max(corner_moravec(im),
                             min_distance=10,
                             threshold_rel=0)
    # interest points along edge
    assert len(results) == 57

    # Harris
    results = peak_local_max(corner_harris(im, method='k'),
                             min_distance=10,
                             threshold_rel=0)
    # interest at corner
    assert len(results) == 1

    results = peak_local_max(corner_harris(im, method='eps'),
                             min_distance=10,
                             threshold_rel=0)
    # interest at corner
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im),
                             min_distance=10,
                             threshold_rel=0)
    # interest at corner
    assert len(results) == 1
Exemplo n.º 12
0
def test_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.

    # Moravec
    results = peak_local_max(corner_moravec(im),
                             min_distance=10, threshold_rel=0)
    # interest points along edge
    assert len(results) == 57

    # Harris
    results = peak_local_max(corner_harris(im, method='k'),
                             min_distance=10, threshold_rel=0)
    # interest at corner
    assert len(results) == 1

    results = peak_local_max(corner_harris(im, method='eps'),
                             min_distance=10, threshold_rel=0)
    # interest at corner
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im),
                             min_distance=10, threshold_rel=0)
    # interest at corner
    assert len(results) == 1
Exemplo n.º 13
0
def getFeatures(img, bbox):
    #we only care about pixels that are within bounding boxes
    #the below for loop creates a new image called boxed_img that includes only the pixels in bounding boxes
    r, c = img.shape
    boxed_img = np.zeros(img.shape, np.uint8)
    [numFaces, numCorners, coords] = bbox.shape
    xOutput = np.zeros((250, numFaces), dtype=np.int_)
    yOutput = np.zeros((250, numFaces), dtype=np.int_)
    count = 0
    for arr in bbox:
        x1 = arr[0, 0]
        y1 = arr[0, 1]
        x2 = arr[3, 0]
        y2 = arr[3, 1]
        boxed_img[y1:y2 + 1, x1:x2 + 1] = img[y1:y2 + 1, x1:x2 + 1]

        #now we do corner detection
        features_array = feature.corner_shi_tomasi(boxed_img, sigma=1)

        #suppress everything except for the top 1000 points
        features_sorted = np.sort(features_array, axis=None)
        thresh = features_sorted[-250]
        features_array[features_array < thresh] = 0
        features_array[features_array > 0] = 1
        features_array = features_array.astype(bool)

        x, y = np.meshgrid(range(c), range(r))
        x = x[features_array]
        y = y[features_array]

        if x.size > 250:
            x = x[0:250]
            y = y[0:250]
        #we pad the array with 0's so that we always have 250 points of interest no matter what
        elif x.size < 250:
            x_pad = np.zeros([250], np.int)
            y_pad = np.zeros([250], np.int)
            x_pad[0:x.size] = x
            y_pad[0:y.size] = y
            x = x_pad
            y = y_pad
        xOutput[:, count] = x
        yOutput[:, count] = y
        count += 1
    '''
    #automatic thresholding
    minX = bbox[0][0][0]
    maxX = bbox[0][1][0]
    minY = bbox[0][0][1]
    maxY = bbox[0][2][1]
    thresholded_features = thresholdInBBox(features_array, minX, maxX, minY, maxY)
    x, y, rmax = anms(thresholded_features, 100)
    '''
    x = xOutput
    y = yOutput

    return x, y
Exemplo n.º 14
0
def corners_shi_tomasi(img, sigma=1., min_distance=1, threshold_rel=0.1):
    img = img2gray(img)
    corners = corner_shi_tomasi(img, sigma)
    corners = corner_peaks(corners,
                           min_distance=min_distance,
                           threshold_abs=None,
                           threshold_rel=threshold_rel,
                           exclude_border=True,
                           indices=True)
    return corners
Exemplo n.º 15
0
def corner_feature(dataset):

    for image in dataset:

        im = color.rgb2gray(image[0])

        corners_map = feature.corner_shi_tomasi(im)

        image[0] = np.asarray(corners_map).reshape(-1)

    return dataset
Exemplo n.º 16
0
def test_squared_dot():
    im = np.zeros((50, 50))
    im[4:8, 4:8] = 1
    im = img_as_float(im)

    # Moravec fails

    # Harris
    results = peak_local_max(corner_harris(im))
    assert (results == np.array([[6, 6]])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    assert (results == np.array([[6, 6]])).all()
Exemplo n.º 17
0
def test_squared_dot():
    im = np.zeros((50, 50))
    im[4:8, 4:8] = 1
    im = img_as_float(im)

    # Moravec fails

    # Harris
    results = peak_local_max(corner_harris(im))
    assert (results == np.array([[6, 6]])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    assert (results == np.array([[6, 6]])).all()
Exemplo n.º 18
0
def conrecs():
    #im = feature.corner_shi_tomasi(im).corner_harris(im)
    keypoints1 = feature.corner_peaks(feature.corner_shi_tomasi(im),
                                      min_distance=1)
    print(keypoints1)

    extractor = feature.BRIEF()

    extractor.extract(im, keypoints1)
    keys = keypoints1[extractor.mask]

    fig, ax = plt.subplots(figsize=(18, 13))
    ax.imshow(im, cmap=plt.cm.gray)

    for pair in keys:
        plt.scatter(pair[0], pair[1])
Exemplo n.º 19
0
 def shi_tomasi_skimage(self, image, **kwargs):
     coords_subpix = np.zeros_like(image)
     cornerness_matrix = sf.corner_peaks(sf.corner_shi_tomasi(image),
                                         min_distance=1)
     coords_subpix = sf.corner_subpix(image,
                                      cornerness_matrix,
                                      window_size=13,
                                      alpha=kwargs["alpha"])
     display.draw_points(image,
                         cornerness_matrix,
                         '_',
                         self.path[2:-1],
                         method_name=kwargs['method'],
                         name=self.name,
                         sp=coords_subpix)
     print("detected points: ", cornerness_matrix.shape[0])
     return cornerness_matrix, coords_subpix
Exemplo n.º 20
0
def test_noisy_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.
    im = im + np.random.uniform(size=im.shape) * .2

    # Moravec
    results = peak_local_max(corner_moravec(im))
    # undefined number of interest points
    assert results.any()

    # Harris
    results = peak_local_max(corner_harris(im, sigma=1.5))
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im, sigma=1.5))
    assert len(results) == 1
    def get_corner_distances(self): 
        a = corner_shi_tomasi(color.rgb2grey(self.img))
#        val = filters.threshold_otsu(self.img)
#        mask = self.img < val 
#        a = peak_local_max(mask)
        print(a.shape)
        print(a)
        d1 = self.get_coord_dist(a[0], a[1])
        d2 = self.get_coord_dist(a[1], a[2])
        d3 = self.get_coord_dist(a[2], a[3])
        d4 = self.get_coord_dist(a[3], a[0])
        print('corner distances')
        print(d1)
        print(d2)
        print(d3)
        print(d4)
        print('std dev: ' + str(np.std([d1,d2,d3,4])))
Exemplo n.º 22
0
def test_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.

    # Moravec
    results = peak_local_max(corner_moravec(im))
    # interest points along edge
    assert len(results) == 57

    # Harris
    results = peak_local_max(corner_harris(im))
    # interest at corner
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    # interest at corner
    assert len(results) == 1
Exemplo n.º 23
0
def test_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.

    # Moravec
    results = peak_local_max(corner_moravec(im))
    # interest points along edge
    assert len(results) == 57

    # Harris
    results = peak_local_max(corner_harris(im))
    # interest at corner
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    # interest at corner
    assert len(results) == 1
Exemplo n.º 24
0
    def classify(self, image, model, args):
        ''' uniformly sampled points '''

        Worig, Horig = image.size()
        num_points = int(args.get('points', 100))
        border = float(args.get('border', 5))
        border = int(round(border * np.mean([Worig, Horig]) / 100.0))

        # get gray scale image for salient point detection
        pix = image.pixels(
            operations=
            'slice=,,1,1&resize=%s,%s,BC,MX&depth=8,d,u&remap=gray&format=tiff'
            % (self.side, self.side))
        W, H = pix.shape[0:2]

        # compute scaling factor
        sx, sy = (1.0, 1.0)
        if Worig != W or Horig != H:
            sx = float(W) / Worig
            sy = float(H) / Horig
            log.debug(
                'Classify: Original image is larger, use scaling factors: %s,%s',
                sx, sy)

        # scale params to resized image
        border = int(round(border * sx))
        pts, num_points_x, num_points_y, sw, sh = distribute_points(
            num_points, W, H, border, equal=False, return_all=True)

        # detect salient points
        pts = corner_peaks(corner_shi_tomasi(pix),
                           min_distance=int(sw * 0.3),
                           exclude_border=border,
                           indices=True,
                           num_peaks=num_points)

        # re-scale points
        points = [(p[0] / sx, p[1] / sy) for p in pts]

        return classify_points(image, model, args, points, 'Salient points')
Exemplo n.º 25
0
def get_features(img, bboxs):
    i_vec = np.empty((0, 0))
    j_vec = np.empty((0, 0))
    for idx, bbox in enumerate(bboxs):
        roi_start = (int(np.min(bbox[:, 0])), int(np.min(bbox[:, 1])))
        roi_w = int(np.max(bbox[:, 0]) - np.min(bbox[:, 0]))
        roi_h = int(np.max(bbox[:, 1]) - np.min(bbox[:, 1]))
        roi = img[roi_start[1]:roi_start[1] + roi_h,
                  roi_start[0]:roi_start[0] + roi_w]
        fps = corner_peaks(corner_shi_tomasi(roi),
                           min_distance=1,
                           num_peaks=50)
        i = np.reshape(fps[:, 0] + roi_start[1], (-1, 1))
        j = np.reshape(fps[:, 1] + roi_start[0], (-1, 1))
        if (i_vec.size == 0):
            i_vec = np.resize(i_vec, (i.shape[0], i_vec.shape[1]))
            j_vec = np.resize(j_vec, (j.shape[0], j_vec.shape[1]))
            i_vec = np.append(i_vec, i, axis=1)
            j_vec = np.append(j_vec, j, axis=1)
        elif (i_vec.shape[0] < i.shape[0]):
            i_vec = np.pad(i_vec.T, ((0, 0), (0, i.shape[0] - i_vec.shape[0])),
                           'constant',
                           constant_values=(-1)).T
            j_vec = np.pad(j_vec.T, ((0, 0), (0, j.shape[0] - j_vec.shape[0])),
                           'constant',
                           constant_values=(-1)).T
            i_vec = np.append(i_vec, i, axis=1)
            j_vec = np.append(j_vec, j, axis=1)
        else:
            i = np.pad(i.T, ((0, 0), (0, i_vec.shape[0] - i.shape[0])),
                       'constant',
                       constant_values=(-1)).T
            j = np.pad(j.T, ((0, 0), (0, j_vec.shape[0] - j.shape[0])),
                       'constant',
                       constant_values=(-1)).T
            i_vec = np.append(i_vec, i, axis=1)
            j_vec = np.append(j_vec, j, axis=1)
    return i_vec, j_vec
Exemplo n.º 26
0
def getFeatures(img, bbox):
	import numpy as np
	from skimage.feature import corner_shi_tomasi
	from helpers import anms
	import matplotlib.pyplot as plt

	# Initialize our outputs
	x = np.zeros(bbox.shape[0], dtype=object)
	y = np.zeros(bbox.shape[0], dtype=object)

	for i in range(bbox.shape[0]):
		# Save our offsets from the bbox array, not necessary but improves readability
		xmin = np.amin(bbox[i, :, 0])
		xmax = np.amax(bbox[i, :, 0])
		ymin = np.amin(bbox[i, :, 1])
		ymax = np.amax(bbox[i, :, 1])

		# Get the corner strength array from the bouding box area with padding
		p = 10
		subimg = img[ymin - p: ymax + p, xmin - p: xmax + p]
		# print(subimg.shape)

		# For debugging: Show the what's inside the bounding box
		# plt.imshow(subimg[p:-p, p:-p])
		# plt.show()

		# Feature points gotten from image bounding box
		h, w = subimg.shape
		max_pts = int(h * w * 0.005)
		
		# Get corner strength matrix
		cimg = corner_shi_tomasi(subimg)[p: -p, p: -p]

		# Suppress non-maxima	
		x[i], y[i] = anms(cimg, max_pts, xmin, ymin)

	return x, y
Exemplo n.º 27
0
    def _detect_polar_body_patch(self, patch):
        # Find edges in patch
        patchKT = feature.corner_shi_tomasi(patch, sigma=self.eSigma)
        patchKT = (patchKT - patchKT.min()) / (patchKT.max() - patchKT.min())
        patchTH = patchKT.copy()
        # threshold edges
        th = self.eThres
        patchTH[patchKT <= th] = 0
        patchTH[patchKT > th] = 255
        patchTH[:, 0:6] = 0
        patchTH[:, 235:] = 0

        # extract coordinates of keypoints
        keyps = np.where(patchTH == 255)
        keyps = np.asarray([[y, x] for (y, x) in zip(keyps[0], keyps[1])])

        # if any keypoints are found, find clusters
        if keyps.size > 0:

            db = DBSCAN(eps=self.dbEps, min_samples=self.dbSamples).fit(keyps)
            core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
            core_samples_mask[db.core_sample_indices_] = True
            labels = db.labels_

            if np.asarray((labels == -1)).all():
                pb = False
                coord = (-1, -1)
            elif np.asarray((labels == 0)).any():
                # find the geometric center of the cluster
                class_member_mask = (labels == 0)
                xy = keyps[class_member_mask]
                ym = int((xy[:, 0].max() + xy[:, 0].min()) / 2)
                xm = int((xy[:, 1].max() + xy[:, 1].min()) / 2)
                pb = True
                coord = (ym, xm)
            return pb, coord
Exemplo n.º 28
0
def test_noisy_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.
    np.random.seed(seed=1234)
    im = im + np.random.uniform(size=im.shape) * .2

    # Moravec
    results = peak_local_max(corner_moravec(im),
                             min_distance=10, threshold_rel=0)
    # undefined number of interest points
    assert results.any()

    # Harris
    results = peak_local_max(corner_harris(im, method='k'),
                             min_distance=10, threshold_rel=0)
    assert len(results) == 1
    results = peak_local_max(corner_harris(im, method='eps'),
                             min_distance=10, threshold_rel=0)
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im, sigma=1.5),
                             min_distance=10, threshold_rel=0)
    assert len(results) == 1
Exemplo n.º 29
0
def corner_detection(img,min_distance=20):
    maxima = corner_peaks(corner_shi_tomasi(img),min_distance=min_distance)
    maxima = [[x[1],x[0],0] for x in maxima]
    return maxima 
    else :
        if detectedByRansac.size >=30:
            inliersArray = np.concatenate((inliersArray,detectedByRansac))
            print('inliersArray: ', inliersArray)
    #update the data with outliers and remove inliers
    
    
    data = np.column_stack([data[outliers, 0],data[outliers, 1]])
    print("inliers: ", inliers)
    print("wihtout: ", data)
    dataSize = data.size
    fig, ax = plt.subplots()
    ####
    #### test
    ax.plot(data[:, 0], data[:, 1], '.r', alpha=0.6,
            label='Outlier data')
    ax.plot(inliersArray[:, 0], inliersArray[:, 1], '.b', alpha=0.6,
            label='Inlier data')
    ax.legend(loc='top left')
    '''plt.show()
    plt.pause(0.0001)'''  

print("hi corner; ",corner_peaks(corner_shi_tomasi(inliersArray), min_distance=1))
fig, ax = plt.subplots()
ax.plot(data[:, 0], data[:, 1], '.r', alpha=0.6,
        label='Outlier data')
ax.plot(inliersArray[:, 0], inliersArray[:, 1], '.b', alpha=0.6,
        label='Inlier data')

ax.legend(loc='top left')
plt.show()
Exemplo n.º 31
0
def estimate_corners(imgr):
    response = feature.corner_shi_tomasi(imgr)
    corners = feature.corner_peaks(response, min_distance=5)
    return corners
Exemplo n.º 32
0
import numpy as np
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
                             plot_matches, BRIEF, corner_shi_tomasi)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt

img1 = rgb2gray(data.astronaut())
tformMatrix = np.array([[1, 0.1, 10], [0, 1, 10], [0, 0, 1]])
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
#print(tform)
img2 = tf.warp(img1, tform)
#img2 = rgb2gray(data.camera())

keypoints1 = corner_peaks(corner_shi_tomasi(img1), min_distance=5)
keypoints2 = corner_peaks(corner_shi_tomasi(img2), min_distance=5)

extractor = BRIEF()

extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors

extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors

matches12 = match_descriptors(descriptors1,
                              descriptors2,
                              cross_check=True,
Exemplo n.º 33
0
def corner_detector(img):
  return feature.corner_shi_tomasi(img)
Exemplo n.º 34
0
def _corner_shi_tomasi_feature(im):
    return feature.corner_shi_tomasi(im)
Exemplo n.º 35
0
	def run(self, ips, snap, img, para = None):
		cimg = feature.corner_shi_tomasi(img, sigma=para['sigma'])
		pts = feature.corner_peaks(cimg, min_distance=1)
		self.ips.roi = PointRoi([tuple(i[::-1]) for i in pts])
# loop over images
frame = frame_start  # 4
while frame <= frame_end:  # end: 6

    frame_number = str(frame).zfill(6)  #000004 -> 000006
    print("Working on images ", frame_number, " up to ", frame_end)

    # load the image into a NUMPY array using matplotlib's imread function
    left_img_file = root_pathname + image_folder + sequence_number + left_camera + frame_number + '.png'
    l_image = plt.imread(left_img_file)
    right_img_file = root_pathname + image_folder + sequence_number + right_camera + frame_number + '.png'
    r_image = plt.imread(right_img_file)  #imread(filenameinstrformat)

    # find corner features in each camera
    l_keypoints = corner_peaks(corner_shi_tomasi(l_image),
                               min_distance=min_peak_dist)
    r_keypoints = corner_peaks(corner_shi_tomasi(r_image),
                               min_distance=min_peak_dist)

    # for each corner found, extract the BRIEF descriptor
    extractor = BRIEF(sigma=1.0)
    extractor.extract(l_image, l_keypoints)
    l_descriptors = extractor.descriptors

    # not all keypoints get descriptors. Remove the ones that didn't:
    mask = extractor.mask
    l_keypoints = l_keypoints[mask]

    extractor.extract(r_image, r_keypoints)
    r_descriptors = extractor.descriptors
Exemplo n.º 37
0
def get_features1(img):
    coords = corner_peaks(corner_shi_tomasi(img),
                          min_distance=10,
                          num_peaks=500)
Exemplo n.º 38
0
        lambda y: extraction.color_features(y, mean=True, std=True), 8, x),
    hed)
print("rgbcie features")
cie_features = util.loading_map(
    lambda x: extraction.split_image_features(
        extraction.calculateColorFeatures, 8, x), cie)
print("hog features")
hog = util.loading_map(
    lambda x: calcHOG(x,
                      orient=6,
                      nr_of_cells_per_image=6,
                      nr_of_cells_per_block=2,
                      normalise=True), grayscaled)
print("corner features")
corners = util.loading_map(
    lambda x: extraction.pixel_features(feature.corner_shi_tomasi(x, sigma=6),
                                        8), grayscaled)
print('\a')
#print("daisy features")
#daisy = util.loading_map(lambda x: feature.daisy(x, step = 32, radius = 30, rings = 2, histograms = 7, orientations = 7).flatten(), grayscaled)

hybrid_hsv_luv = numpy.concatenate((hsv_features, luv_features), 1)
hybrid_hog_luv = numpy.concatenate((hog, luv_features), 1)
hybrid_hog_hsv = numpy.concatenate((hog, hsv_features), 1)
hybrid_hog_hsv_luv = numpy.concatenate((hog, hsv_features, luv_features), 1)
hybrid_bright_hog_hsv_luv = numpy.concatenate(
    (brightness, hog, hsv_features, luv_features), 1)
hybrid_bright_hed_hog_luv = numpy.concatenate(
    (brightness, hed_features, hog, luv_features), 1)
hybrid_bright_hog_luv = numpy.concatenate((brightness, hog, luv_features), 1)
Exemplo n.º 39
0
    def detect_and_extract(self, img, visualize=False):
        cy, cx, aveDist = self.coordOO
        y, x, h, w = self.coordROI
        roi = img[y:y + h, x:x + w]

        alpha = np.linspace(0, 18, num=10) * 5 / 180 * np.pi
        circy = (np.cos(alpha) * (aveDist + 5)).astype(np.uint8)
        circx = (np.sin(alpha) * (aveDist + 5)).astype(np.uint8)

        coord = np.vstack([
            np.hstack([
                cy - np.flipud(circy[0:5]), cy - circy[1:],
                cy - np.flipud(circy[0:-1] * (-1)), cy - circy[1:5] * (-1)
            ]),
            np.hstack([
                np.flipud(circx[0:5]) + cx, circx[1:] * (-1) + cx,
                np.flipud(circx[0:-1] * (-1)) + cx, circx[1:5] + cx
            ])
        ])
        coord[coord < 20] = 20
        pbpos = np.zeros([2, 1], dtype=np.uint64)
        mask = np.zeros((40, 40), dtype=bool)
        mask[0:6, 0:6] = True
        mask[-5:, 0:6] = True
        mask[0:6, -5:] = True
        mask[-5:, -5:] = True

        if visualize:
            roiTemp = roi.copy()
            for y, x in zip(coord[0, :], coord[1, :]):
                for i in range(-1, 2):
                    roiTemp[draw.polygon_perimeter([
                        y - 20 + i, y - 20 + i, y + 20 - i, y + 20 - i,
                        y - 20 - i
                    ], [
                        x - 20 + i, x + 20 - i, x + 20 - i, x - 20 + i,
                        x - 20 + i
                    ],
                                                   shape=roiTemp.shape)] = 1

        for i in range(0, coord.shape[1]):
            patch = roi[coord[0, i] - 20:coord[0, i] + 20,
                        coord[1, i] - 20:coord[1, i] + 20].copy()
            patchKT = feature.corner_shi_tomasi(patch, sigma=2)
            patchKT = (patchKT - patchKT.min()) / (patchKT.max() -
                                                   patchKT.min())
            patchTH = patchKT.copy()
            th = 0.1
            patchTH[patchKT <= th] = 0
            patchTH[patchKT > th] = 1
            patchTH[mask] = 0

            keyps = np.where(patchTH == 1)
            keyps = np.asarray([[y, x] for (y, x) in zip(keyps[0], keyps[1])])

            if keyps.size > 0:

                db = DBSCAN(eps=self.dbEps,
                            min_samples=self.dbSamples).fit(keyps)
                labels = db.labels_
                if np.asarray((labels == 0)).any():
                    class_member_mask = (labels == 0)
                    xy = keyps[class_member_mask]
                    pbpos = np.hstack([
                        pbpos,
                        (np.vstack([
                            coord[0, i] + xy[:, 0] - 20,
                            coord[1, i] + xy[:, 1] - 20
                        ]))
                    ])

        pbpos = np.transpose(pbpos).astype(np.uint64)
        pbCoord = pd.DataFrame(data=pbpos, columns={'y', 'x'})
        pbCoord.drop_duplicates(['x', 'y'], inplace=True)

        db = DBSCAN(eps=25, min_samples=700).fit(pbCoord.values)
        labels = db.labels_

        if np.asarray((labels == -1)).all():
            pb = False
            pbPos = (-1, -1)
            if visualize:
                roiPB = roi.copy()
        elif np.asarray((labels == 0)).any():
            # find the geometric center of the cluster
            pb = True
            class_member_mask = (labels == 0)
            xy = pbCoord.values[class_member_mask]
            pby = int((xy[:, 0].max() + xy[:, 0].min()) / 2)
            pbx = int((xy[:, 1].max() + xy[:, 1].min()) / 2)
            pbPos = (pby, pbx)
            if visualize:
                roiPB = roi.copy()
                roiPB[xy[:, 0], xy[:, 1]] = 1

        inPosition = False

        if visualize:
            return pb, pbPos, inPosition, roiTemp, roiPB, pbCoord
        else:
            return pb, pbPos, inPosition
Exemplo n.º 40
0
	def run(self, ips, snap, img, para = None):
		cimg = feature.corner_shi_tomasi(img, sigma=para['sigma'])
		pts = feature.corner_peaks(cimg, min_distance=1)
		ips.roi = ROI([Points(pts[:,::-1])])