示例#1
0
def main(args):
    # Load the image
    img_left_clr   = read_image(args.image)
   
    # Perform a some transforms on it for feature matching 
    tform = AffineTransform(scale=(0.8, 0.8), rotation=0.2, translation=(20, -10))
    img_right_clr = warp(img_left_clr, tform.inverse, output_shape=img_left_clr.shape[:2])
    img_right_clr = np.uint8(img_right_clr * 255) 

    # Convert to grayscale
    img_left = cv2.cvtColor(img_left_clr, cv2.COLOR_BGR2GRAY)
    img_right = cv2.cvtColor(img_right_clr, cv2.COLOR_RGB2GRAY)

    print("Getting the features from the Harris Detector")
    ftL = harris(img_left, sigma=3, threshold=0.01)
    draw_corners(ftL, img_left_clr, 'corners_left')
    ftR = harris(img_right, sigma=3, threshold=0.01)
    draw_corners(ftR, img_right_clr, 'corners_right')
    
    print("  -- Number of features (left): ", len(ftL))
    print("  -- Number of features (right): ", len(ftR))
    print("Finding the best matches between images")
    with Timer(verbose=True) as t:
        max_matches = min(len(ftL), len(ftR))
        ptsL,ptsR = get_matches(ftL, ftR, [], [], img_left, img_right, max_matches, win_size=args.win_size)
        
        print(" -- Number of matches = ", len(ptsL))
        assert len(ptsL) == len(ptsR)

    print("Performing RANSAC")
    matches = ransac(ptsL,ptsR, img_left, img_right, args.max_iters, args.epsilon)
    print(" -- Number of pruned matches = ", len(matches))
    
    draw_matches(matches, img_left_clr, img_right_clr)
示例#2
0
def get_matches(im1, im2):
	im1_corners, im2_corners = harris(im1, save=True, saveloc="lpoints.png"), harris(im2, save=True, saveloc="rpoints.png")

	im1_kps = harris_keypoints(im1, im1_corners)
	im2_kps = harris_keypoints(im2, im2_corners)

	return match(im1_kps, im2_kps)
示例#3
0
    def _run_detector(self, event=None):
        try:
            threshold = float(self._threshold.get())
            sigma = float(self._sigma.get())
            wwidth = int(self._wwidth.get())
            wheight = int(self._wheight.get())
        except (TypeError, ValueError):
            raise

        if self._img_show.img is None:
            return

        invert_img = self._invert.get()

        images = harris(self._img_show.img,
                        threshold,
                        sigma,
                        wwidth,
                        wheight,
                        invert=invert_img)
        mask, new_img = images[0], images[1]

        if new_img is None:
            return
        new_img_tk = ImageTk.PhotoImage(new_img)
        mimg = Image.new('L', (len(mask), len(mask[0])))
        mimg.putdata([
            255 * mask[i][j] for i in xrange(len(mask))
            for j in xrange(len(mask[0]))
        ])
        mimg = ImageTk.PhotoImage(mimg)

        window = Tkinter.Toplevel()
        window.image = new_img
        label = Tkinter.Label(window, text='%s, %s' % (threshold, sigma))
        mask_img = Tkinter.Label(window, image=mimg)
        mask_img.img = mimg
        img_frame = Tkinter.Frame(window)
        img = scrolledcanvas(window, img_frame, highlightthickness=0)
        img.new_img = new_img_tk
        canvas_addimage(img, new_img_tk, new_img_tk.width(),
                        new_img_tk.height())
        label.pack()
        mask_img.pack()
        img.pack(fill='both', expand=True)
        img_frame.pack(fill='both', expand=True)

        for image in images[2:]:
            win = Tkinter.Toplevel()
            win.image = image
            iimg = ImageTk.PhotoImage(win.image)
            iframe = Tkinter.Frame(win)
            ilbl = scrolledcanvas(win, iframe, highlightthickness=0)
            canvas_addimage(ilbl, iimg, *image.size)
            ilbl.img = iimg
            ilbl.pack(fill='both', expand=True)
            iframe.pack(fill='both', expand=True)
示例#4
0
    def get_Srcextremes(self,ims,sa):
        """ 
        find local extremas on pattern image
        """

        # instantiate funtional class
        hs = harris.harris()
        hess = hessian.hessian()
        cont = contrast.contrast()

        coordinates = []
        temp = {}

        H = [0,1,2,3]
        W = [0,1,2,3]

        for i in range(4):
            H[i] = len(ims[i][0])
            W[i] = len(ims[i][0][0])

        localArea = [0,1,2]

        # get the unstable and low contrast pixel
        hs_points = hs.corner(sa)
        hess_points = hess.Srcedgedetect(sa)
        low_contrast = cont.lowcontrast(sa)

        # compute the pixels which are not situable for pixel matching
        bad_points = list(set(hs_points) | set(hess_points) | set(low_contrast))
        bad = dict.fromkeys(bad_points, 0)


        for m in range(4):
            for n in range(1,3):
                for i in range(16,H[m]-16):
                    for j in range(16,W[m]-16):
                        if ((i*2**m,j*2**m) in bad.keys())==False :

                            # compare local pixel with its 26 neighbour
                            currentPixel = ims[m][n][i][j]    
                            localArea[0] = ims[m][n-1][i-1:i+2,j-1:j+2]
                            localArea[1] = ims[m][n][i-1:i+2,j-1:j+2]
                            localArea[2] = ims[m][n+1][i-1:i+2,j-1:j+2]

                            Area = numpy.array(localArea) 
                                   
                            maxLocal = numpy.array(Area).max()
                            minLocal = numpy.array(Area).min()

                            if (currentPixel == maxLocal) or (currentPixel == minLocal):
                                if ((i*2**m,j*2**m) in temp.keys()) == False:
                                    coordinates.append([int(i*2**m),int(j*2**m)])
                                    temp[(i*2**m,j*2**m)] = [i*2**m,j*2**m]
        return coordinates
示例#5
0
    def get_Srcextremes(self,ims,sa):
        """ 
        find local extremas on pattern image
        """

        # instantiate funtional class
        hs = harris.harris()
        hess = hessian.hessian()
        cont = contrast.contrast()

        coordinates = []
        temp = {}

        H = [0,1,2,3]
        W = [0,1,2,3]

        for i in range(4):
            H[i] = len(ims[i][0])
            W[i] = len(ims[i][0][0])

        localArea = [0,1,2]

        # get the unstable and low contrast pixel
        hs_points = hs.corner(sa)
        hess_points = hess.Srcedgedetect(sa)
        low_contrast = cont.lowcontrast(sa)

        # compute the pixels which are not situable for pixel matching
        bad_points = list(set(hs_points) | set(hess_points) | set(low_contrast))
        bad = dict.fromkeys(bad_points, 0)


        for m in range(4):
            for n in range(1,3):
                for i in range(16,H[m]-16):
                    for j in range(16,W[m]-16):
                        if bad.has_key((i*2**m,j*2**m))==False :

                            # compare local pixel with its 26 neighbour
                            currentPixel = ims[m][n][i][j]    
                            localArea[0] = ims[m][n-1][i-1:i+2,j-1:j+2]
                            localArea[1] = ims[m][n][i-1:i+2,j-1:j+2]
                            localArea[2] = ims[m][n+1][i-1:i+2,j-1:j+2]

                            Area = numpy.array(localArea) 
                                   
                            maxLocal = numpy.array(Area).max()
                            minLocal = numpy.array(Area).min()

                            if (currentPixel == maxLocal) or (currentPixel == minLocal):
                                if temp.has_key((i*2**m,j*2**m)) == False:
                                    coordinates.append([int(i*2**m),int(j*2**m)])
                                    temp[(i*2**m,j*2**m)] = [i*2**m,j*2**m]
        return coordinates
示例#6
0
    def _run_detector(self, event=None):
        try:
            threshold = float(self._threshold.get())
            sigma = float(self._sigma.get())
            wwidth = int(self._wwidth.get())
            wheight = int(self._wheight.get())
        except (TypeError, ValueError):
            raise

        if self._img_show.img is None:
            return

        invert_img = self._invert.get()

        images = harris(self._img_show.img, threshold, sigma, wwidth, wheight,
                invert=invert_img)
        mask, new_img = images[0], images[1]

        if new_img is None:
            return
        new_img_tk = ImageTk.PhotoImage(new_img)
        mimg = Image.new('L', (len(mask), len(mask[0])))
        mimg.putdata([255 * mask[i][j]
            for i in xrange(len(mask)) for j in xrange(len(mask[0]))])
        mimg = ImageTk.PhotoImage(mimg)

        window = Tkinter.Toplevel()
        window.image = new_img
        label = Tkinter.Label(window, text='%s, %s' % (threshold, sigma))
        mask_img = Tkinter.Label(window, image=mimg)
        mask_img.img = mimg
        img_frame = Tkinter.Frame(window)
        img = scrolledcanvas(window, img_frame, highlightthickness=0)
        img.new_img = new_img_tk
        canvas_addimage(img, new_img_tk,new_img_tk.width(),new_img_tk.height())
        label.pack()
        mask_img.pack()
        img.pack(fill='both', expand=True)
        img_frame.pack(fill='both', expand=True)

        for image in images[2:]:
            win = Tkinter.Toplevel()
            win.image = image
            iimg = ImageTk.PhotoImage(win.image)
            iframe = Tkinter.Frame(win)
            ilbl = scrolledcanvas(win, iframe, highlightthickness=0)
            canvas_addimage(ilbl, iimg, *image.size)
            ilbl.img = iimg
            ilbl.pack(fill='both', expand=True)
            iframe.pack(fill='both', expand=True)
示例#7
0
def demo(imgfile0,imgfile1,rth0,rth1,mth):
    # read image, convert to gray
    #img = io.imread(sys.argv[1]);
    img=io.imread("data/plane.bmp")
    img0 = io.imread(imgfile0)
    img1 = io.imread(imgfile1)

    print("orig0 shape: "+str(img0.shape))
    print("orig1 shape: "+str(img1.shape))

    gry0 = rgb2gray(img0)
    gry1 = rgb2gray(img1)

    print("gray0 shape: "+str(gry0.shape))
    print("gray1 shape: "+str(gry1.shape))
    
    # get list of locations of interest points
    feat0 = harris(gry0,gk(3,3,1),rth0)
    feat1 = harris(gry1,gk(3,3,1),rth1)

    print("number of features0: "+str(len(feat0)))
    print("number of features1: "+str(len(feat1)))
    
    # make sift descriptors
    des0 = sift(gry0,feat0)
    des1 = sift(gry1,feat1)
    
    showFeatures(img0,feat0,des0)
    showFeatures(img1,feat1,des1)
    
    M = matchSIFT(des0,des1,40,mth)
    
    img2 = drawMatches(img0,feat0,img1,feat1,M)
    
    io.imshow( img2,vmin=0,vmax=255,cmap="gray")
    io.show()
示例#8
0
    counter = counter + 1

#plt.figure(1)
#plt.imshow(Ig)
#plt.figure(2)
#plt.imshow(Jg_interpolated)
#plt.show()

T_field = orientation_tensor(I, 17, 2.0, 17, 2.0)

#plt.imshow(T_field[:,:,1,1], cmap="gray")
#print(T_field[:,:,1,1].shape)
#print(I.shape)
#plt.show()

harris_response = harris(T_field, 0.05)

#harris_thresholded1 = harris_response < 500
#harris_thresholded2 = harris_response > -500
#harris_thresholded = harris_thresholded1 * harris_thresholded2

harris_thresholded = (harris_response < -15000) | (harris_response > 15000)
#harris_thresholded = harris_thresholded * harris_response

max_img = ndimage.filters.maximum_filter(harris_thresholded, size=1)
[row, col] = np.nonzero(harris_thresholded == max_img)
print(row)
print(col)

plt.imshow(max_img, cmap="gray")
plt.show()
示例#9
0
harris_kappa = 0.08
num_keypoints = 200
nonmaximum_supression_radius = 8
descriptor_radius = 9
match_lambda = 4

img = cv.imread('../data/000000.png')
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
""" Part 1 - Calculate Corner Response Functions """

# Shi - Tomasi
shi_tomasi_scores = shi_tomasi(img, corner_patch_size)
# assert (min(len(shi_tomasi_scores) == len(shi_tomasi_scores)))

# % Harris
harris_scores = harris(img, corner_patch_size, harris_kappa)
# assert (min(size(harris_scores) == size(harris_scores)))

plt.figure()
plt.subplot(2, 2, 1)
plt.imshow(img)

plt.subplot(2, 2, 2)
plt.imshow(img)

plt.subplot(2, 2, 3)
ax = plt.gca()
ax.imshow(shi_tomasi_scores)
plt.title('Shi-Tomasi Scores')

plt.subplot(2, 2, 4)
示例#10
0
文件: main.py 项目: xfv/Project2
for i in range(data_set):
    print i
    img_1 = img[i]
    img_2 = img[i+1]
    img_1_cy = img_cy[i]
    img_2_cy = img_cy[i+1]
    
    ### get gray scale
    img_1_gray = cv2.cvtColor(img_1_cy, cv2.COLOR_BGR2GRAY)
    img_2_gray = cv2.cvtColor(img_2_cy, cv2.COLOR_BGR2GRAY)
    
    ### harris
    ### points are(x, y) not (row, col)
    print 'harris'
    points_1 = harris.harris(img_1_gray)
    points_2 = harris.harris(img_2_gray)

    ### draw dots
    img_1_harris = harris.drawDots(img_1_cy, points_1)
    img_2_harris = harris.drawDots(img_2_cy, points_2)
    
    ### get feature
    print 'getting features...'
    feature_1 = matching.descriptor(img_1_gray, points_1)
    feature_2 = matching.descriptor(img_2_gray, points_2)
    
    print 'matching...'
    pairs = matching.find_pair(points_1, feature_1, points_2, feature_2)
    print 'Got ', len(pairs[0]), ' pairs'
    start = 0