Exemplo n.º 1
0
def MatchPics(I1,I2):
    
    if I1.ndim == 3:
        I1 = rgb2gray(I1)   
    if I2.ndim == 3:
        I2 = rgb2gray(I2)
    
    points1 = corner_peaks(corner_fast(I1,n=12,threshold=0.15),min_distance=1)
    points2 = corner_peaks(corner_fast(I2,n=12,threshold=0.15),min_distance=1)
    
    extractor = BRIEF()
    
    extractor.extract(I1,points1)
    points1 = points1[extractor.mask]
    descriptors1 = extractor.descriptors
    
    extractor.extract(I2,points2)
    points2 = points2[extractor.mask]
    descriptors2 = extractor.descriptors
    
    matches = match_descriptors(descriptors1,descriptors2,metric = 'hamming',cross_check=True)
    
    #these points are y,x (row,col)
    locs1 = points1[matches[:,0]]
    locs2 = points2[matches[:,1]]
    #Change to x,y (col,row)
    xy1 = np.array([locs1[:,1],locs1[:,0]])
    xy1 = xy1.transpose()
    xy2 = np.array([locs2[:,1],locs2[:,0]])
    xy2 = xy2.transpose()
    fig, ax = plt.subplots()
    plot_matches(ax,I1,I2,points1,points2,matches,keypoints_color='r',only_matches=True)#,matches_color='y')
    
    return [xy1,xy2]
Exemplo n.º 2
0
 def correct_drift(self, ref, threshold=0.005):
     """Align images to correct for image drift.
     Detects common features on the images and tracks them moving.
     
     Parameters
     ----------
     ref: KerrArray or ndarray
         reference image with zero drift
     threshold: float
         threshold for detecting imperfections in images 
         (see skimage.feature.corner_fast for details)
     
     Returns
     -------
     shift: array
         shift vector relative to ref (x drift, y drift)
     transim: KerrArray
         copy of self translated to account for drift"""
     refed=ref.clone
     refed=filters.gaussian(ref,sigma=1)
     refed=feature.corner_fast(refed,threshold=0.005)
     imed=self.clone
     imed=filters.gaussian(imed,sigma=1)
     imco=feature.corner_fast(imed,threshold=0.005)
     shift,err,phase=feature.register_translation(refed,imco,upsample_factor=50)
     #tform = SimilarityTransform(translation=(-shift[1],-shift[0]))
     #imed = transform.warp(im, tform) #back to original image
     self=self.translate(translation=(-shift[1],-shift[0]))
     return [shift,self]   
Exemplo n.º 3
0
def do_prediction(path, network_type, network_model):
    entry = []
    if network_type == "hog":
        image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        if len(image.shape) == 3:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        image = cv2.resize(image, (48, 48), interpolation=cv2.INTER_AREA)
        final_image = hog(image,
                          orientations=8,
                          pixels_per_cell=(16, 16),
                          cells_per_block=(1, 1))
        entry.append(final_image)
        result = network_model.predict(entry)
        return str("Network using HOG - " + path + " - Result: " +
                   cfg.Emotion(int(result.item(0))).name)
    elif network_type == "fast":
        image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        if len(image.shape) == 3:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        image = cv2.resize(image, (48, 48), interpolation=cv2.INTER_AREA)
        final_image = corner_fast(image)
        entry.append(final_image)
        entry1 = np.array(entry).reshape((len(entry), -1))
        result = network_model.predict(entry1)
        return str("Network using FAST - " + path + " - Result: " +
                   cfg.Emotion(int(result.item(0))).name)
    elif network_type == "x":
        image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        if len(image.shape) < 3:
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
        image = cv2.resize(image, (71, 71), interpolation=cv2.INTER_AREA)
        entry.append(image)
        result = network_model.predict(np.asarray(entry))
        return str("Network using Xception - " + path + " - Result: " +
                   cfg.Emotion(np.argmax(result, axis=None, out=None)).name)
Exemplo n.º 4
0
def test_corner_orientations_lena():
    img = rgb2gray(data.lena())
    corners = corner_peaks(corner_fast(img, 11, 0.35))
    expected = np.array([-1.9195897 , -3.03159624, -1.05991162, -2.89573739,
                         -2.61607644, 2.98660159])
    actual = corner_orientations(img, corners, octagon(3, 2))
    assert_almost_equal(actual, expected)
Exemplo n.º 5
0
def test_corner_fast_lena():
    img = rgb2gray(data.lena())
    expected = np.array([[67, 157], [204, 261], [247, 146], [269, 111],
                         [318, 158], [386, 73], [413, 70], [435, 180],
                         [455, 177], [461, 160]])
    actual = corner_peaks(corner_fast(img, 12, 0.3))
    assert_array_equal(actual, expected)
Exemplo n.º 6
0
def test_corner_orientations_lena():
    img = rgb2gray(data.lena())
    corners = corner_peaks(corner_fast(img, 11, 0.35))
    expected = np.array([-1.9195897 , -3.03159624, -1.05991162, -2.89573739,
                         -2.61607644, 2.98660159])
    actual = corner_orientations(img, corners, octagon(3, 2))
    assert_almost_equal(actual, expected)
Exemplo n.º 7
0
    def cut_image(self, prec: float = 0.01):
        x = (corner_peaks(corner_fast(self.image, 9), min_distance=1))
        min1 = min(x, key=lambda q: q[0])[0] - int(prec * self.image.shape[0])
        max1 = max(x, key=lambda q: q[0])[0] + int(prec * self.image.shape[0])
        min2 = min(x, key=lambda q: q[1])[1] - int(prec * self.image.shape[1])
        max2 = max(x, key=lambda q: q[1])[1] + int(prec * self.image.shape[1])

        self.image = self.image[min1:max1, min2:max2]
Exemplo n.º 8
0
def test_corner_orientations_square():
    square = np.zeros((12, 12))
    square[3:9, 3:9] = 1
    corners = corner_peaks(corner_fast(square, 9), min_distance=1)
    actual_orientations = corner_orientations(square, corners, octagon(3, 2))
    actual_orientations_degrees = np.rad2deg(actual_orientations)
    expected_orientations_degree = np.array([45.0, 135.0, -45.0, -135.0])
    assert_array_equal(actual_orientations_degrees, expected_orientations_degree)
def find_radius(img):
    labels, num_features = label(img)
    location = (center_of_mass(img, labels, 1))
    print("Object {} center of mass at {}".format(1, location))
    center_1_x, center_1_y = location1[1], location1[0]
    edges = canny(img1, sigma=8.8)

    corner_response = corner_fast(edges, threshold=0.5)
    corner_pos = corner_peaks(corner_response)
Exemplo n.º 10
0
def test_corner_orientations_square():
    square = np.zeros((12, 12))
    square[3:9, 3:9] = 1
    corners = corner_peaks(corner_fast(square, 9), min_distance=1)
    actual_orientations = corner_orientations(square, corners, octagon(3, 2))
    actual_orientations_degrees = np.rad2deg(actual_orientations)
    expected_orientations_degree = np.array([  45.,  135.,  -45., -135.])
    assert_array_equal(actual_orientations_degrees,
                       expected_orientations_degree)
Exemplo n.º 11
0
def briefLite(im):
    # YOUR CODE HERE
    #X = np.load('testPattern_compareX.npy')
    #Y = np.load('testPattern_compareY.npy')
    pattern = sio.loadmat('testPattern.mat')
    X = pattern.get('compareX')[0, :]
    Y = pattern.get('compareY')[0, :]
    #keypoints1 = corner_peaks(corner_harris(im, method = "eps"), min_distance=1)
    keypoints1 = corner_peaks(corner_fast(im), min_distance=1)
    locs, desc = computeBrief(im, keypoints1, X, Y, patchWidth=9)
    return locs, desc
Exemplo n.º 12
0
    def detect(self, image):
        from skimage.feature import corner_fast, corner_peaks

        keypoints = corner_peaks(corner_fast(image),
                                 min_distance=self.min_dist,
                                 threshold_rel=self.thresh)
        random.shuffle(
            keypoints
        )  # in case we want to limit the number of keypoints. since they don't have a response to sort by
        keypoints = [convert_to_cv_keypoint(x, y) for (x, y) in keypoints]
        return keypoints
Exemplo n.º 13
0
def test_corner_orientations_square(dtype):
    square = np.zeros((12, 12), dtype=dtype)
    square[3:9, 3:9] = 1
    corners = corner_peaks(corner_fast(square, 9),
                           min_distance=1,
                           threshold_rel=0)
    actual_orientations = corner_orientations(square, corners, octagon(3, 2))
    assert actual_orientations.dtype == _supported_float_type(dtype)
    actual_orientations_degrees = np.rad2deg(actual_orientations)
    expected_orientations_degree = np.array([45, 135, -45, -135])
    assert_array_equal(actual_orientations_degrees,
                       expected_orientations_degree)
Exemplo n.º 14
0
def detect_brisk(image,
                 mask,
                 layer_id,
                 smooth=True,
                 show=False,
                 show_now=True,
                 save_fig=False):
    ns = [6, 9, 12]
    # ts = [0.02, 0.05, 0.08, 0.15]
    ts = [0.02, 0.07, 0.15]
    params = list(itertools.product(ns, ts))
    resps = []
    corners = []
    for n, t in params:
        resp = skifea.corner_fast(image, n=n, threshold=t)
        resp *= mask
        resps.append(resp)
        corners.append(skifea.corner_peaks(resp, min_distance=1))

    if show or save_fig:
        r = 2
        fig = plt.figure(figsize=(24, 14))
        n_rows = len(ns)
        n_cols = len(ts)
        for id, c_resp in enumerate(corners):
            plt.subplot(n_rows, n_cols, id + 1)
            plt.imshow(image, 'gray', interpolation='nearest')
            plt.title('layer=%i, ns=%i, ts=%.2f' %
                      (layer_id + 1, params[id][0], params[id][1]))
            for y, x in c_resp:
                circ = plt.Circle((x, y),
                                  r,
                                  color='r',
                                  linewidth=2,
                                  fill=False)
                plt.gca().add_patch(circ)
        if save_fig:
            fig_dir = '/home/tomas/Dropbox/Work/Dizertace/figures/keypoints/brisk/'
            dirs = fig_dir.split('/')
            for i in range(2, len(dirs)):
                subdir = '/'.join(dirs[:i])
                if not os.path.exists(subdir):
                    os.mkdir(subdir)
            fig.savefig(os.path.join(fig_dir,
                                     'brisk_layer_%i.png' % (layer_id + 1)),
                        dpi=100,
                        bbox_inches='tight',
                        pad_inches=0)
        if show_now:
            plt.show()

    return corners, resps
Exemplo n.º 15
0
def test_corner_fast_lena():
    img = rgb2gray(data.astronaut())
    expected = np.array([[101, 198], [140, 205], [141, 242], [177, 156],
                         [188, 113], [197, 148], [213, 117], [223, 375],
                         [232, 266], [245, 137], [249, 171], [300, 244],
                         [305, 57], [325, 245], [339, 242], [346, 279],
                         [353, 172], [358, 307], [362, 252], [362, 328],
                         [363, 192], [364, 147], [369, 159], [374, 171],
                         [379, 183], [387, 195], [390, 149], [401, 197],
                         [403, 162], [413, 181], [444, 310], [464, 251],
                         [476, 250], [489, 155], [492, 139], [494, 169],
                         [496, 266]])
    actual = corner_peaks(corner_fast(img, 12, 0.3))
    assert_array_equal(actual, expected)
Exemplo n.º 16
0
def extract_fast_features(input_path, output_path):
    step = 1
    for folder in cfg.folders:
        fast_features = []
        labels_list = []
        for emotion in cfg.Emotion:
            for filename in os.listdir(input_path + '\\' + folder + '\\' + str(emotion.name) + '\\'):
                image = imread(input_path + '\\' + folder + '\\' + emotion.name + '\\' + filename)
                feature = corner_fast(image)
                fast_features.append(feature)
                labels_list.append(emotion.value)
                progress("4 out of 5 - Extracting FAST features", step)
                step = step + 1
        if not os.path.exists(output_path + "\\" + folder):
            os.system('mkdir {}'.format(output_path + "\\" + folder))
        np.save(output_path + "\\" + folder + '\\features.npy', fast_features)
        np.save(output_path + "\\" + folder + '\\labels.npy', labels_list)
Exemplo n.º 17
0
def test_corner_orientations_astronaut():
    img = rgb2gray(data.astronaut())
    corners = corner_peaks(corner_fast(img, 11, 0.35))
    expected = np.array([
        -1.75220190e+00, 2.01197383e+00, -2.01162417e+00, -1.88247204e-01,
        1.19134149e+00, -6.61151410e-01, -2.99143370e+00, 2.17103132e+00,
        -7.52950306e-04, 1.25854853e+00, 2.43573659e+00, -1.69230287e+00,
        -9.88548213e-01, 1.47154532e+00, -1.65449964e+00, 1.09650167e+00,
        1.07812134e+00, -1.68885773e+00, -1.64397304e+00, 3.09780364e+00,
        -3.49561988e-01, -1.46554357e+00, -2.81524886e+00, 8.12701702e-01,
        2.47305654e+00, -1.63869275e+00, 5.46905279e-02, -4.40598471e-01,
        3.14918803e-01, -1.76069982e+00, 3.05330950e+00, 2.39291733e+00,
        -1.22091334e-01, -3.09279990e-01, 1.45931342e+00
    ])

    actual = corner_orientations(img, corners, octagon(3, 2))
    assert_almost_equal(actual, expected)
Exemplo n.º 18
0
 def fast_skimage(self, image, **kwargs):
     coords_subpix = np.zeros_like(image)
     cornerness_matrix = sf.corner_peaks(
         sf.corner_fast(image, 16,
                        0.8), min_distance=1)  # no_of_detected_points*2
     coords_subpix = sf.corner_subpix(image,
                                      cornerness_matrix,
                                      window_size=13,
                                      alpha=kwargs["alpha"])
     display.draw_points(image,
                         cornerness_matrix,
                         '_',
                         self.path[2:-1],
                         method_name=kwargs['method'],
                         name=self.name,
                         sp=coords_subpix)
     return cornerness_matrix, coords_subpix
Exemplo n.º 19
0
def clasificareFast():
    if Data.path is None:
        print("Choose a file")
    else:
        entry = []
        filename = 'fast_model.sav'
        loaded_model = pickle.load(
            open('C:/Users/Raul/Documents/HOG' + "\\" + filename, 'rb'))
        image = cv2.imread(Data.path, cv2.IMREAD_UNCHANGED)
        if len(image.shape) == 3:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        image = cv2.resize(image, (18, 36), interpolation=cv2.INTER_AREA)
        final_image = corner_fast(image)
        entry.append(final_image)
        entry1 = np.array(entry).reshape((len(entry), -1))
        result = loaded_model.predict(entry1)
        print("Network using FAST - " + Data.path + " - Result: " +
              Ped(int(result.item(0))).name)
Exemplo n.º 20
0
    def _detect_octave(self, octave_image):
        # Extract keypoints for current octave
        fast_response = corner_fast(octave_image, self.fast_n,
                                    self.fast_threshold)
        keypoints = corner_peaks(fast_response, min_distance=1)

        if len(keypoints) == 0:
            return (np.zeros(
                (0, 2), dtype=np.double), np.zeros(
                    (0, ), dtype=np.double), np.zeros((0, ), dtype=np.double))

        mask = _mask_border_keypoints(octave_image.shape,
                                      keypoints,
                                      distance=16)
        keypoints = keypoints[mask]

        orientations = corner_orientations(octave_image, keypoints, OFAST_MASK)

        harris_response = corner_harris(octave_image,
                                        method='k',
                                        k=self.harris_k)
        responses = harris_response[keypoints[:, 0], keypoints[:, 1]]

        return keypoints, orientations, responses
def fast_skimage(image, min_distance, num_peaks, **kwargs):
    coords_subpix = np.zeros_like(image)
    cornerness_matrix = sf.corner_peaks(sf.corner_fast(image, 16, 0.8), min_distance=min_distance, num_peaks=num_peaks) # no_of_detected_points*2
    coords_subpix = sf.corner_subpix(image, cornerness_matrix, window_size=13, alpha=0.8)
    draw_points(image, cornerness_matrix)
    return cornerness_matrix, coords_subpix
Exemplo n.º 22
0
import numpy as np
from PIL import Image
from skimage.feature import corner_peaks
from skimage.feature import corner_subpix, corner_fast
from matplotlib import pyplot as plt

# Image is opened and is converted to grayscale.
img = Image.open('../Figures/corner_detector.png').convert('L')
# img is converted to an ndarray.
img1 = np.asarray(img)

corner_response = corner_fast(img1)
cpv = corner_peaks(corner_response, min_distance=50)
corners_subpix_val = corner_subpix(img1, cpv, window_size=13)
fig, ax = plt.subplots()
ax.imshow(img1, interpolation='nearest', cmap=plt.cm.gray)
x = corners_subpix_val[:, 1]
y = corners_subpix_val[:, 0]
ax.plot(x, y, 'ob', markersize=10)
ax.axis('off')
plt.savefig('../Figures/corner_fast_detector_output.png', dpi=300)
plt.show()
Exemplo n.º 23
0
def display_features(orig_img,
                     dsae_feats=None,
                     dsae_duration=None,
                     other_feats=[
                         'corner_harris', 'corner_fast', 'CENSURE_STAR',
                         'CENSURE_Octagon', 'CENSURE_DoB', 'ORB', 'daisy'
                     ]):
    """
    display the original image, optionally displaying the identified features over it

    Input:
    - orig_img
        original image
    - dsae_feats
        2d array of spatial feature coordinates of shape (nb_feats,2)
        Note, the dimension length 2 has its first element representing the i coordinate and
        its second element representing the j coordinate
    - dsea_duration
        float: the amount of time it took to preprocess the image and produce the predicted encoding (features)
    - other_feats
        list of strings: a list of the other types of feature detectors to visually compare with ours
    Returns:
    """
    nimgs = len(other_feats) + 1
    nrows = 2
    ncols = nimgs // 2
    if nimgs % 2 != 0:
        ncols += 1
    if ncols == 1:
        ncols += 1
    assert (nrows * ncols >= nimgs)
    print('nrows:%d\tncols:%d' % (nrows, ncols))
    fig, ax = plt.subplots(figsize=(20, 10), nrows=nrows, ncols=ncols)
    row, col = 0, 0
    gray_img = rgb2gray(orig_img)
    dsae_feats_orig = dsae_feats.copy()
    if orig_img.shape[0] == 3:
        H = orig_img.shape[1]
        W = orig_img.shape[2]
    else:
        H = orig_img.shape[0]
        W = orig_img.shape[1]

    # dsae_feats_orig[:,0] *= H
    # dsae_feats_orig[:,1] *= W

    # plt.imshow(orig_img)
    ax[row, col].imshow(orig_img)
    if dsae_feats is not None:
        extractor = 'DSAE_VGG (ours)'
        # plt.scatter(x=dsae_feats_orig[:,1], y=dsae_feats_orig[:,0], c='r', s=15)
        ax[row, col].scatter(x=dsae_feats_orig[:, 1],
                             y=dsae_feats_orig[:, 0],
                             c='r',
                             s=15)
        ax[row, col].text(0,
                          0,
                          extractor,
                          color='r',
                          fontsize=25,
                          fontweight='bold')
        print('dsae_duration:')
        print(dsae_duration)
        # sys.exit(1)
        if dsae_duration:
            ax[row, col].text(0,
                              15,
                              '%s ms' % dsae_duration,
                              color='b',
                              fontsize=15)
            print('%s: %f ms' % (extractor, dsae_duration))
    # plt.show()
    row, col = inc_row_col(row, col, nrows, ncols)

    if 'ORB' in other_feats:
        extractor = 'ORB'
        descriptor_extractor = ORB(n_keypoints=256)
        start = time.time()
        descriptor_extractor.detect_and_extract(gray_img)
        orb_feats = descriptor_extractor.keypoints
        end = time.time()
        duration = (end - start) * 1000.  # milliseconds now
        # descriptors1 = descriptor_extractor.descriptors
        ax[row, col].imshow(orig_img)
        ax[row, col].scatter(x=orb_feats[:, 1], y=orb_feats[:, 0], c='b', s=15)
        ax[row, col].text(0,
                          0,
                          extractor,
                          color='b',
                          fontsize=25,
                          fontweight='bold')
        print('H', H)
        ax[row, col].text(0,
                          H + 80,
                          '%s ms' % duration,
                          color='b',
                          fontsize=15)
        print('%s: %f ms' % (extractor, duration))
        row, col = inc_row_col(row, col, nrows, ncols)
    if 'blob_dog' in other_feats:
        extractor = 'blob_dog'
        start = time.time()
        blob_dog_feats = blob_dog(gray_img)
        end = time.time()
        duration = (end - start) * 1000.  # milliseconds now
        ax[row, col].imshow(orig_img)
        ax[row, col].scatter(x=blob_dog_feats[:, 1],
                             y=blob_dog_feats[:, 0],
                             c='g',
                             s=15)
        ax[row, col].text(0,
                          H + 80,
                          '%s ms' % duration,
                          color='g',
                          fontsize=15)
        print('%s: %f ms' % (extractor, duration))
        row, col = inc_row_col(row, col, nrows, ncols)
    if 'corner_fast' in other_feats:
        extractor = 'corner_fast'
        start = time.time()
        corner_fast_feats = corner_peaks(corner_fast(gray_img))
        end = time.time()
        duration = (end - start) * 1000.  # milliseconds now
        ax[row, col].imshow(orig_img)
        ax[row, col].scatter(x=corner_fast_feats[:, 1],
                             y=corner_fast_feats[:, 0],
                             c='m',
                             s=15)
        ax[row, col].text(0,
                          0,
                          'corner_fast',
                          color='m',
                          fontsize=25,
                          fontweight='bold')
        ax[row, col].text(0,
                          H + 80,
                          '%s ms' % duration,
                          color='m',
                          fontsize=15)
        print('%s: %f ms' % (extractor, duration))
        row, col = inc_row_col(row, col, nrows, ncols)
    if 'corner_harris' in other_feats:
        extractor = 'corner_harris'
        start = time.time()
        corner_harris_feats = corner_peaks(corner_harris(gray_img))
        end = time.time()
        duration = (end - start) * 1000.  # milliseconds now
        ax[row, col].imshow(orig_img)
        ax[row, col].scatter(x=corner_harris_feats[:, 1],
                             y=corner_harris_feats[:, 0],
                             c='m',
                             s=15)
        ax[row, col].text(0,
                          0,
                          'corner_harris',
                          color='m',
                          fontsize=25,
                          fontweight='bold')
        ax[row, col].text(0,
                          H + 80,
                          '%s ms' % duration,
                          color='m',
                          fontsize=15)
        print('%s: %f ms' % (extractor, duration))
        row, col = inc_row_col(row, col, nrows, ncols)
    if 'CENSURE_STAR' in other_feats:
        extractor = 'CENSURE_STAR'
        censure = CENSURE(mode='STAR')
        start = time.time()
        censure.detect(gray_img)
        censure_star_keypoints = censure.keypoints
        end = time.time()
        duration = (end - start) * 1000.  # milliseconds now
        ax[row, col].imshow(orig_img)
        ax[row, col].scatter(x=censure_star_keypoints[:, 1],
                             y=censure_star_keypoints[:, 0],
                             c='k',
                             s=15)
        ax[row, col].text(0,
                          0,
                          'CENSURE_STAR',
                          color='k',
                          fontsize=25,
                          fontweight='bold')
        ax[row, col].text(0,
                          H + 80,
                          '%s ms' % duration,
                          color='k',
                          fontsize=15)
        print('%s: %f ms' % (extractor, duration))
        row, col = inc_row_col(row, col, nrows, ncols)
    if 'CENSURE_Octagon' in other_feats:
        extractor = 'CENSURE_Octagon'
        censure = CENSURE(mode='Octagon')
        start = time.time()
        censure.detect(gray_img)
        censure_oct_keypoints = censure.keypoints
        end = time.time()
        duration = (end - start) * 1000.  # milliseconds now
        ax[row, col].imshow(orig_img)
        ax[row, col].scatter(x=censure_oct_keypoints[:, 1],
                             y=censure_oct_keypoints[:, 0],
                             c='k',
                             s=15)
        ax[row, col].text(0,
                          0,
                          'CENSURE_Octagon',
                          color='k',
                          fontsize=25,
                          fontweight='bold')
        ax[row, col].text(0,
                          H + 80,
                          '%s ms' % duration,
                          color='k',
                          fontsize=15)
        print('%s: %f ms' % (extractor, duration))
        row, col = inc_row_col(row, col, nrows, ncols)
    if 'CENSURE_DoB' in other_feats:
        extractor = 'CENSURE_DoB'
        censure = CENSURE(mode='DoB')
        start = time.time()
        censure.detect(gray_img)
        censure_dob_keypoints = censure.keypoints
        end = time.time()
        duration = (end - start) * 1000.  # milliseconds now
        ax[row, col].imshow(orig_img)
        ax[row, col].scatter(x=censure_dob_keypoints[:, 1],
                             y=censure_dob_keypoints[:, 0],
                             c='k',
                             s=15)
        ax[row, col].text(0,
                          0,
                          'CENSURE_DoB',
                          color='k',
                          fontsize=25,
                          fontweight='bold')
        ax[row, col].text(0,
                          H + 80,
                          '%s ms' % duration,
                          color='k',
                          fontsize=15)
        print('%s: %f ms' % (extractor, duration))
        row, col = inc_row_col(row, col, nrows, ncols)
    if 'daisy' in other_feats:  # SIFT-like feature descriptor
        extractor = 'daisy'
        start = time.time()
        # descs, descs_img = daisy(gray_img, visualize=True)

        descs, descs_img = daisy(gray_img,
                                 step=50,
                                 radius=25,
                                 rings=2,
                                 histograms=6,
                                 orientations=8,
                                 visualize=True)
        end = time.time()
        duration = (end - start) * 1000.  # milliseconds now
        ax[row, col].imshow(orig_img)
        ax[row, col].imshow(descs_img)
        ax[row, col].text(0,
                          0,
                          'Daisy',
                          color='w',
                          fontsize=25,
                          fontweight='bold')
        ax[row, col].text(0,
                          H + 80,
                          '%s ms' % duration,
                          color='w',
                          fontsize=15)
        print('%s: %f ms' % (extractor, duration))
        row, col = inc_row_col(row, col, nrows, ncols)
    plt.show()
Exemplo n.º 24
0
import numpy as np
from skimage import io
from skimage.feature import (match_descriptors, corner_peaks, corner_fast,
                             plot_matches, BRIEF)
import matplotlib.pyplot as plt

img1 = io.imread("./data/training/image_2/000000_10.png", as_gray=True)
img2 = io.imread("./data/training/image_2/000000_11.png", as_gray=True)

pc1 = corner_peaks(corner_fast(img1), min_distance=5)
pc2 = corner_peaks(corner_fast(img2), min_distance=5)

plt.figure(figsize=(6,6))
plt.imshow(img1, cmap='gray')
plt.plot(pc1[:, 1], pc1[:, 0], '+r', markersize=3)
#ax.axis((0, 350, 350, 0))
plt.show()

extractor = BRIEF(descriptor_size=128, patch_size=49, mode='normal')

extractor.extract(img1, pc1)
desc1 = extractor.descriptors
extractor.extract(img2,pc2)
desc2 = extractor.descriptors
matches = match_descriptors(desc1, desc2, metric="hamming", cross_check=True)

fig = plt.figure(figsize=(18, 10))
ax0 = plt.subplot()
plot_matches(ax0,img1, img2, pc1, pc2, matches)
ax0.axis('off')
ax0.set_title("Image1 vs. Image2")
Exemplo n.º 25
0
def template_matching(query_image,
                      reference_image_border,
                      window_size,
                      patch_min_area=0.1,
                      patch_max_area=0.8,
                      plot=False):
    import dask

    keypoints_q = []
    keypoints_r = []
    # find interest points in query image (e.g. corners or white pixels)
    corners, corners_subpix = detect_corners(query_image)
    logging.debug("number of corners detected: %d" % len(corners))

    # height,width = query_image.shape
    # # reduce image size for performance with fixed aspect ratio. approx- same size as query, to make tempalte amtching work
    # reference_image = cv2.resize(reference_image, (width-window_size*2, height-window_size*2))
    # # reference_image = cv2.resize(reference_image, query_image.shape[::-1])

    # # make border of window size around reference image, to catch edge cases
    # reference_image_border = cv2.copyMakeBorder(reference_image,
    #                                             window_size, window_size, window_size, window_size,
    #                                             cv2.BORDER_CONSTANT, None, 0)
    # # reference_image_border = reference_image

    if plot:
        from matplotlib import pyplot as plt
        plt.subplot("121")
        plt.imshow(query_image)
        plt.scatter([x[1] for x in corners], [y[0] for y in corners],
                    c="r",
                    marker="x")
        plt.subplot("122")
        plt.imshow(reference_image_border)
        from skimage.feature import corner_harris, corner_fast, corner_subpix, corner_peaks
        ref_corners = corner_peaks(corner_fast(reference_image_border),
                                   min_distance=5)
        plt.scatter([x[1] for x in ref_corners], [y[0] for y in ref_corners],
                    c="r",
                    marker="x")
        # y =query_image.shape[0]
        # plt.plot([30,470,470,30,30], [y-30,y-30,30,30,y-30], "g", linewidth=1)
        plt.show()

    # match all sample points
    lazy_r = []
    for sample_point in corners:
        # sample interest point
        y, x = sample_point
        # extract template from query image around sampled point
        template = query_image[y - window_size:y + window_size,
                               x - window_size:x + window_size]
        # skip patches that are not very descriptive
        num_pixels_high = cv2.countNonZero(template)
        pixel_high_percent = num_pixels_high / window_size**2

        if pixel_high_percent < patch_min_area or pixel_high_percent > patch_max_area:
            # don't consider ambiguous patches
            continue

        keypoints_q.append([x, y])

        # optional: reduce search space by only looking at/around interest points in reference image

        # find query template in reference image
        x = dask.delayed(match_template)(reference_image_border, template)
        lazy_r.append(x)

    results = dask.compute(*lazy_r)
    for x in results:
        match_x, match_y = x
        keypoints_r.append([match_x + window_size, match_y + window_size])
        # print("R,M:",(x,y),(match_x,match_y))
        # plot_template()

    # todo: optional: filter matches by score / lowe's test ratio

    # ransac those template matches!
    keypoints_q = np.array(keypoints_q)
    keypoints_r = np.array(keypoints_r)

    return keypoints_q, keypoints_r
Exemplo n.º 26
0
def detect_corners(image):
    from skimage.feature import corner_harris, corner_fast, corner_subpix, corner_peaks

    coords = corner_peaks(corner_fast(image), min_distance=10, threshold_rel=0)
    coords_subpix = None  #corner_subpix(image, coords, window_size=13)
    return coords, coords_subpix
Exemplo n.º 27
0
from skimage.feature import canny
from scipy import misc
from scipy.ndimage import label
from scipy.ndimage import center_of_mass
from scipy.spatial import distance

from skimage.feature import corner_fast, corner_peaks, corner_orientations

img1 = misc.imread("155c012t4.tif", mode='L')
labels1, num_features1 = label(img1)
location1 = (center_of_mass(img1, labels1, 1))
print("Object {} center of mass at {}".format(1, location1))
center_1_x, center_1_y = location1[1], location1[0]
edges = canny(img1, sigma=8.8)

corner_response = corner_fast(edges, threshold=0.5)
corner_pos = corner_peaks(corner_response)

fig, axes = plt.subplots(ncols=2, figsize=(8, 4.5))
ax = axes.ravel()
ax[0] = plt.subplot(1, 2, 1)
ax[1] = plt.subplot(1, 2, 2)

ax[0].imshow(img1)
ax[0].plot(center_1_x, center_1_y, 'r^', markersize=20)
radius_array = np.zeros(len(corner_pos))

for k in range(0, len(corner_pos)):
    y, x = corner_pos[k]
    ax[0].plot(x, y, 'ro', markersize=15)
    print("x={}, y={}".format(x, y))
Exemplo n.º 28
0
def process(**kwargs):

    # a larger blur means less detail to extract points from
    # a smaller detail number means more extracted points
    # a smaller size number means smaller triangles
    # setting trialpha to less than 1 means some of the source image will show

    # set default arguments
    file = kwargs.pop("file", "")
    blur = kwargs.pop("blur", 0)
    detail = kwargs.pop("detail", 1)
    size = kwargs.pop("size", 1)
    trialpha = kwargs.pop("trialpha", 1)
    random = kwargs.pop("random", False)
    pltdelaunay = kwargs.pop("pltdelaunay", False)
    pltvoronoi = kwargs.pop("pltvoronoi", False)

    # open the source image
    img = Image.open(file)
    img = ImageOps.expand(img, border=20, fill="white")
    # img = ImageOps.expand(img,border=5,fill='black')
    w, h = img.size

    # uncomment to sharpen image (more points)
    #  img = img.filter(ImageFilter.UnsharpMask(radius=2, percent=150, threshold=3))
    # img.show()
    # exit()

    source = img_as_ubyte(img)
    img1 = rgb2gray(source)

    fig, ax = plt.subplots()
    plt.gray()

    # POINT EXTRACTION TWEAKS

    # blur image (fewer points)
    if blur > 0:
        img1 = gaussian_filter(img1, sigma=blur, multichannel=True)

    # extract or generate points
    if random:
        corners = sample_poisson_uniform(w, h, size, 30)
        pts = np.zeros((len(corners), 2))
        for i in range(len(corners)):
            x, y = corners[i]
            pts[i] = [int(x), int(y)]
    else:
        corners = corner_peaks(corner_fast(img1, detail), min_distance=size)
        pts = np.zeros((len(corners), 2))
        pts[:, 0] = corners[:, 1]
        pts[:, 1] = corners[:, 0]

    # COLOR SELECTION TWEAKS -------------------------------

    # tint image
    # img = tint_image(img,"#FF0000")

    # posterize image
    # img = ImageOps.posterize(img,6)

    # blur image
    # img = img.filter(ImageFilter.GaussianBlur(radius=10))

    # COLOR SELECTION TWEAKS -------------------------------

    pix = img.load()
    patches = []

    if pltdelaunay:
        triangles = Delaunay(pts)

        for i in triangles.vertices:
            triangle = pts[i]
            a = triangle[0]
            b = triangle[1]
            c = triangle[2]
            triangle_center_x = (a[0] + b[0] + c[0]) * 0.33333
            triangle_center_y = (a[1] + b[1] + c[1]) * 0.33333
            colors = pix[triangle_center_x, triangle_center_y]
            # handle greyscale (or convert to RGB first)
            if isinstance(colors, int):
                R = colors / 255.0
                G = colors / 255.0
                B = colors / 255.0
            else:
                R = colors[0] / 255.0
                G = colors[1] / 255.0
                B = colors[2] / 255.0
            color = [R, G, B]
            # ax.scatter(triangle_center_x, triangle_center_y, s=1, color='r', alpha=1)
            patches.append(
                plt.Polygon(triangle,
                            fill=True,
                            color=color,
                            alpha=trialpha,
                            ec="none",
                            aa=True))

    if pltvoronoi:
        vor = Voronoi(pts)

        lines = [
            shapely.geometry.LineString(vor.vertices[line])
            for line in vor.ridge_vertices if -1 not in line
        ]

        for idx, p in enumerate(shapely.ops.polygonize(lines)):
            pt = p.representative_point()
            if pt.x > 0 and pt.y > 0 and pt.x < w and pt.y < h:
                colors = pix[pt.x, pt.y]
                # handle greyscale (or convert to RGB first)
                if isinstance(colors, int):
                    R = colors / 255.0
                    G = colors / 255.0
                    B = colors / 255.0
                else:
                    R = colors[0] / 255.0
                    G = colors[1] / 255.0
                    B = colors[2] / 255.0
                color = [R, G, B]
                # ax.scatter(pt.x, pt.y, s=1, color='g', alpha=1)
                patches.append(
                    plt.Polygon(
                        p.exterior,
                        fill=True,
                        color=color,
                        alpha=trialpha,
                        ec="none",
                        aa=True,
                    ))

    ax.imshow(img)
    ax.axis("off")
    ax.axes.get_xaxis().set_visible(False)
    ax.axes.get_yaxis().set_visible(False)

    p = PatchCollection(patches, match_original=True)
    ax.add_collection(p)

    return fig
Exemplo n.º 29
0
import scipy.io as sio
import skimage.color
import skimage.io
import skimage.feature

# Q2.1
compareX, compareY = makeTestPattern(9, 256)
sio.savemat('testPattern.mat', {'compareX': compareX, 'compareY': compareY})

# Q2.2
img = skimage.io.imread('../data/chickenbroth_01.jpg')
im = skimage.color.rgb2gray(img)

# YOUR CODE: Run a keypoint detector, with nonmaximum supression
# locs holds those locations n x 2
keypoints1 = corner_peaks(corner_fast(im), min_distance=1)

locs, desc = computeBrief(im, keypoints1, compareX, compareY)

# Q2.3
locs, desc = briefLite(im)

# Q2.4
testMatch()

# Q2.5
briefRotTest()

# EC 1
#briefRotTest(briefRotLite)
Exemplo n.º 30
0
#        hmf=feature.hessian_matrix(image, sigma=1, mode='constant', 
#                                   cval=0, order=None)#6个三通道的原图大小矩阵
        hmd=feature.hessian_matrix_det(imgrey, sigma=1)#原图大小矩阵
#        hme=feature.hessian_matrix_eigvals(hmf, Hxy=None, Hyy=None)
        si=feature.shape_index(imgrey, sigma=1, mode='constant', cval=0)#原图大小矩阵
#        ckr=feature.corner_kitchen_rosenfeld(image, mode='constant', cval=0) ##原图大小矩阵 三通道               
#        ch=feature.corner_harris(imgrey, method='k', k=0.05, eps=1e-06, sigma=1)#原图大小矩阵
#        cht=feature.corner_shi_tomasi(imgrey, sigma=1)#原图大小矩阵
#        cfs=feature.corner_foerstner(imgrey, sigma=1)#2个 #原图大小矩阵
#        csb=feature.corner_subpix(image, ch, window_size=11, alpha=0.99)
        cps=feature.corner_peaks(imgrey, min_distance=1, threshold_abs=None, 
                                 threshold_rel=0.1, exclude_border=True, indices=True, 
                                 footprint=None, labels=None)#一堆坐标值
#        cmr=feature.corner_moravec(imgrey, window_size=1)#原图大小矩阵
#        cft=feature.corner_fast(imgrey, n=12, threshold=0.15)#原图大小矩阵
        corners = feature.corner_peaks(feature.corner_fast(imgrey, 9), min_distance=1)#一堆坐标
        corts=feature.corner_orientations(imgrey, corners, octagon(3, 2))#一维矩阵长度不定
#        mtem=feature.match_template(image, template, pad_input=False,
#                                    mode='constant', constant_values=0)
#        bldg=feature.blob_dog(imgrey, min_sigma=1, max_sigma=50, 
#                              sigma_ratio=1.6, threshold=2.0, overlap=0.5)#不懂
#        bldoh=feature.blob_doh(imgrey, min_sigma=1, max_sigma=30, num_sigma=10, 
#                               threshold=0.01, overlap=0.5, log_scale=False)#不懂
#        bllog=feature.blob_log(imgrey, min_sigma=1, max_sigma=50, num_sigma=10, 
#                               threshold=0.2, overlap=0.5, log_scale=False)#不懂
        zong.append([imname,
                     greycghg[0,0],greycghg[0,1],greycghg[0,2],greycghg[0,3],greycghg[0,4],
                     greycgcl[0,0],greycgcl[0,1],greycgcl[0,2],greycgcl[0,3],greycgcl[0,4],
                     greycgeg[0,0],greycgeg[0,1],greycgeg[0,2],greycgeg[0,3],greycgeg[0,4],
                     greycgasm[0,0],greycgasm[0,1],greycgasm[0,2],greycgasm[0,3],greycgasm[0,4],
                     greycgctt[0,0],greycgctt[0,1],greycgctt[0,2],greycgctt[0,3],greycgctt[0,4],
Exemplo n.º 31
0
def test_corner_fast_image_unsupported_error():
    img = np.zeros((20, 20, 3))
    with testing.raises(ValueError):
        corner_fast(img)
Exemplo n.º 32
0
    # print(ad_image.shape)
    # plt.show()
    image_origin = io.imread(
        "chos lugs-pan chen blo chos kyi rgyal mtshan gsung 'bum-1-0003_1_292.png",
        as_grey=True)

    image = io.imread("skeleton1.png", as_grey=True)
    image = np.where(image > 0, 1, 0)
    image = adjunction_image(image)

    pointx, pointy, anglex, angley, point_image = get_fork_point(image)

    p1 = plt.subplot(231)
    p1.imshow(image_origin, cmap='gray')

    point_image = corner_peaks(corner_fast(image_origin, 9), min_distance=1)
    p2 = plt.subplot(232)
    p2.plot(point_image[:, 1], point_image[:, 0], "r.")
    p2.imshow(image_origin)

    p3 = plt.subplot(233)
    p3.imshow(image, cmap="gray")
    p3.plot(pointx, pointy, "r.")
    p3.plot(anglex, angley, "b.")
    print(image.shape)

    lb_image = measure.label(image, neighbors=8)
    regions = measure.regionprops(lb_image)
    p4 = plt.subplot(234)
    p4.imshow(np.where(lb_image > 0, 1, 0), cmap="gray")