Beispiel #1
0
from cam import MyCam

from fmatch import draw_match

MIN_MATCH_COUNT = 10

# Initiate SIFT detector
orb = cv2.ORB()
cam = MyCam()
cam.size = (640, 480)
img1 = img1 = cv2.imread('box.png', 0)

cv2.imshow('source', img1)
while True:

    img2 = cv2.flip(cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY), 1)
    k = cv2.waitKey(5)
    if k == ord('s'):
        img1 = img2.copy()
        cv2.imwrite('campic.png', img1)
    elif k == 27:
        break

    # find the keypoints and descriptors with ORB
    if k is not None:
        cv2.destroyWindow('preview')
        kp1, des1 = orb.detectAndCompute(img1, None)

    kp2, des2 = orb.detectAndCompute(img2, None)

    # If nothing match then continue
    from random import randint
    win_size = (600, 800, 3)

    cam = MyCam()
    sf = ScreenFinder()
    
    black = np.full(win_size, (0, 0, 0), np.uint8)
    
    while True:
        k = waitKey(5)
        if k == 27:
            break
        if k == ord('r'):
            sf.reset()
        
        cam_img = cam.read()
        on = True
        while sf.screens is None:
            
            on = not on
            x = on * 255
            screen_img = np.full(win_size, (x, x, x), np.uint8)
            imshow('main', screen_img)
            imshow('screen finder', cam_img)
            # wait so that the camera can capture images completely loaded
            k = waitKey(20)
            cam_img = cam.read()
            sf.put_calibrate_images(screen_img, cam_img)
            
            # delay again to make sure that the image will not change during the camera taking picture
            k = waitKey(20)
Beispiel #3
0
from fmatch import draw_match


MIN_MATCH_COUNT = 10

# Initiate SIFT detector
sift = cv2.SIFT()
cam = MyCam()
cam.size = (640, 480)
img1 = img1 = cv2.imread('box.png', 0)

cv2.imshow('source', img1)
while True:
    
    img2 = cv2.flip(cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY), 1)
    k = cv2.waitKey(5)
    if k == ord('s'):
        img1 = img2.copy()
        cv2.imwrite('campic.png', img1)
    elif k== 27:
        break
    
    
    # find the keypoints and descriptors with SIFT
    if k is not None:
        cv2.destroyWindow('preview') 
        kp1, des1 = sift.detectAndCompute(img1,None)
        
    kp2, des2 = sift.detectAndCompute(img2,None)
Beispiel #4
0
        cross_product = np.cross(tmp[1] - tmp[0], tmp[2] - tmp[1])[0]
        if cross_product * orientation > 0:
            tmp = np.flipud(tmp)

        # rearrange the points so that the returned point array always start with
        # the point that is closest to origin
        tmp = tmp.reshape(-1, 2)
        distance_from_origin = map(lambda pt: pt[0]**2 + pt[1]**2, tmp)
        val, idx = min(
            (val, idx) for (idx, val) in enumerate(distance_from_origin))

        if idx > 0:
            up, down = np.vsplit(tmp, [idx])
            tmp = np.vstack((down, up))

        polygons.append(tmp)

    return polygons


if __name__ == "__main__":
    cam = MyCam()
    while True:
        img = cam.read()
        polygons = find_polygons(img, 4)
        for ctr in polygons:
            draw_oriented_polylines(img, ctr, 0, (0, 0, 255), 2, (255, 0, 0))
        cv2.imshow('find quadrangles', img)
        k = cv2.waitKey(5)
        if k == 27:
            break
Beispiel #5
0
        self.diff = cv2.bitwise_and(d1, d2)

    def feed_image(self, image):
        self._index = (self._index + 1) % (2 * self._N)
        self._frame[self._index] = image
        self.diff_img()
        

        
if __name__ == '__main__':

    winName = "cam test"
    cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE)
    # Read three images first:

    cam = MyCam()
    md = MotionDetector(N=2, shape=cam.read().shape)

    while True:
        
        md.feed_image(cam.read())
        cv2.imshow(winName, md.diff)
        
        key = cv2.waitKey(10)
        if key == 27 or key == 32:
            cam.release()
            cv2.destroyWindow(winName)
            break

    print "Goodbye"
Beispiel #6
0
        f0, f1, f2 = self.three_frames()
        d1 = cv2.absdiff(f1, f0)
        d2 = cv2.absdiff(f2, f1)
        self.diff = cv2.bitwise_and(d1, d2)

    def feed_image(self, image):
        self._index = (self._index + 1) % (2 * self._N)
        self._frame[self._index] = image
        self.diff_img()


winName = "cam test"
cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE)
# Read three images first:

cam = MyCam()
md = MotionDetector(N=2, shape=cam.read().shape)

while True:

    md.feed_image(cam.read())
    cv2.imshow(winName, md.diff)

    key = cv2.waitKey(10)
    if key == 27 or key == 32:
        md.cam.release()
        cv2.destroyWindow(winName)
        break

print "Goodbye"
Beispiel #7
0
        
        # now sort the points in counter clock wise so we can eliminate similar solutions
        cross_product = np.cross(tmp[1] - tmp[0], tmp[2] - tmp[1])[0]
        if cross_product * orientation > 0:
            tmp = np.flipud(tmp)
        
        # rearrange the points so that the returned point array always start with
        # the point that is closest to origin
        tmp = tmp.reshape(-1,2)
        distance_from_origin = map(lambda pt: pt[0] ** 2 + pt[1] ** 2, tmp)
        val, idx = min((val, idx) for (idx, val) in enumerate(distance_from_origin))
        
        if idx > 0:
            up, down = np.vsplit(tmp, [idx])
            tmp = np.vstack((down, up))
        
        polygons.append(tmp)
    
    return polygons
    
if __name__ == "__main__":
    cam = MyCam()
    while True:
        img = cam.read()
        polygons = find_polygons(img, 4)
        for ctr in polygons:
            draw_oriented_polylines(img, ctr, 0, (0,0,255), 2, (255,0,0))
        cv2.imshow('find quadrangles',img)
        k = cv2.waitKey(5)
        if k == 27:
            break
Beispiel #8
0
def test_feature_matching_realtime(detetor=cv2.ORB()):
    from cam import MyCam
    """
    Press 's' to take a picture or 'l' to load one and start real-time
    """

    MIN_MATCH_COUNT = 10

    cam = MyCam()
    cam.size = (640, 480)
    img1 = img1 = cv2.imread('box.png', 0)

    cv2.imshow('source', img1)
    while True:
        
        img2 = cv2.flip(cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY), 1)
        k = cv2.waitKey(5)
        if k == ord('s'):
            img1 = img2.copy()
            cv2.imwrite('campic.png', img1)
        elif k== 27:
            break
        
        
        
        # find the keypoints and descriptors with ORB
        if k is not None:
            cv2.destroyWindow('preview') 
            kp1, des1 = detetor.detectAndCompute(img1,None)
            
        kp2, des2 = detetor.detectAndCompute(img2,None)
        
        
        # If nothing match then continue
        if des2 is None:
            img3 = img3 = draw_match(img1,kp1,img2,kp2,[])
            continue
        
        des1 = des1.astype(np.uint8, copy=False)    # Fix the data type
        des2 = des2.astype(np.uint8, copy=False)
        
        
        # Now match describers
        bf = cv2.BFMatcher(cv2.NORM_HAMMING)
        # matches = bf.match(des1,des2)
        
        matches = bf.knnMatch(des1,des2, k=2)
        
        # m = matches[0][0]
        # p1, p2 = np.float32(kp1[m.queryIdx].pt), np.float32(kp2[m.trainIdx].pt)
        # print m.distance, p1, p2
        
        # Apply ratio test
        good = []
        try:
            for m,n in matches:
                if m.distance < 0.7*n.distance:
                    good.append(m)
        except ValueError:
            good = []
        
        if len(good)>MIN_MATCH_COUNT:
            src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()
            
            h,w = img1.shape
            pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
            dst = cv2.perspectiveTransform(pts,M)
            
            cv2.polylines(img2,[np.int32(dst)], True, (0,0,255) ,3)

        else:
            # print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
            matchesMask = None
            good = []
        
        img3 = draw_match(img1,kp1,img2,kp2,good, matchesMask=matchesMask)
        
        
        cv2.imshow('matches', img3)
        
    print 'press any key to continue'