Esempio n. 1
0
"""
Press 's' to take a picture or 'l' to load one and start real-time
"""

import cv2
import numpy as np
from cam import MyCam

from fmatch import draw_match


MIN_MATCH_COUNT = 10

# Initiate SIFT detector
sift = cv2.SIFT()
cam = MyCam()
cam.size = (640, 480)
img1 = img1 = cv2.imread('box.png', 0)

cv2.imshow('source', img1)
while True:
    
    img2 = cv2.flip(cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY), 1)
    k = cv2.waitKey(5)
    if k == ord('s'):
        img1 = img2.copy()
        cv2.imwrite('campic.png', img1)
    elif k== 27:
        break
    
    
Esempio n. 2
0
"""
Press 's' to take a picture or 'l' to load one and start real-time
"""

import cv2
import numpy as np
from cam import MyCam

from fmatch import draw_match

MIN_MATCH_COUNT = 10

# Initiate SIFT detector
orb = cv2.ORB()
cam = MyCam()
cam.size = (640, 480)
img1 = img1 = cv2.imread('box.png', 0)

cv2.imshow('source', img1)
while True:

    img2 = cv2.flip(cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY), 1)
    k = cv2.waitKey(5)
    if k == ord('s'):
        img1 = img2.copy()
        cv2.imwrite('campic.png', img1)
    elif k == 27:
        break

    # find the keypoints and descriptors with ORB
    if k is not None:
Esempio n. 3
0
        cross_product = np.cross(tmp[1] - tmp[0], tmp[2] - tmp[1])[0]
        if cross_product * orientation > 0:
            tmp = np.flipud(tmp)

        # rearrange the points so that the returned point array always start with
        # the point that is closest to origin
        tmp = tmp.reshape(-1, 2)
        distance_from_origin = map(lambda pt: pt[0]**2 + pt[1]**2, tmp)
        val, idx = min(
            (val, idx) for (idx, val) in enumerate(distance_from_origin))

        if idx > 0:
            up, down = np.vsplit(tmp, [idx])
            tmp = np.vstack((down, up))

        polygons.append(tmp)

    return polygons


if __name__ == "__main__":
    cam = MyCam()
    while True:
        img = cam.read()
        polygons = find_polygons(img, 4)
        for ctr in polygons:
            draw_oriented_polylines(img, ctr, 0, (0, 0, 255), 2, (255, 0, 0))
        cv2.imshow('find quadrangles', img)
        k = cv2.waitKey(5)
        if k == 27:
            break
Esempio n. 4
0
        
        
        
    def reset(self):
        self.screens = None
            
    def find(self):
        pass


if __name__ == '__main__':
    
    from random import randint
    win_size = (600, 800, 3)

    cam = MyCam()
    sf = ScreenFinder()
    
    black = np.full(win_size, (0, 0, 0), np.uint8)
    
    while True:
        k = waitKey(5)
        if k == 27:
            break
        if k == ord('r'):
            sf.reset()
        
        cam_img = cam.read()
        on = True
        while sf.screens is None:
            
Esempio n. 5
0
import cv2
from cam import MyCam



def feature_matching(input)
    return input

if __name__ == "__main__":
    cam = MyCam()
    cam.cam_loop(feature_matching)
Esempio n. 6
0
def buildpyr(img_in):

    h = img_in.shape[0]
    w = img_in.shape[1]
    d = img_in.shape[2]

    img_out = np.full((h + h / 2, w, d), 0, np.uint8)
    img_pyr = img_in
    x, y = 0, 0
    dx, dy = w, h

    for i in range(10):
        # place image at x, y
        img_out = mix_image(img_out, img_pyr, (x, y))

        if i % 2 == 0:
            y = y + dy
        else:
            x = x + dx

        dx, dy = dx / 2, dy / 2

        img_pyr = cv2.pyrDown(img_pyr)

    return img_out


if __name__ == "__main__":
    cam = MyCam()
    cam.cam_loop(buildpyr)
    # cv2.waitKey(0)
Esempio n. 7
0
        self.diff = cv2.bitwise_and(d1, d2)

    def feed_image(self, image):
        self._index = (self._index + 1) % (2 * self._N)
        self._frame[self._index] = image
        self.diff_img()
        

        
if __name__ == '__main__':

    winName = "cam test"
    cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE)
    # Read three images first:

    cam = MyCam()
    md = MotionDetector(N=2, shape=cam.read().shape)

    while True:
        
        md.feed_image(cam.read())
        cv2.imshow(winName, md.diff)
        
        key = cv2.waitKey(10)
        if key == 27 or key == 32:
            cam.release()
            cv2.destroyWindow(winName)
            break

    print "Goodbye"
Esempio n. 8
0
        if self.screen2cam_matrix is None: return cam_img
        img = cam_img.copy()
        draw_oriented_polylines(img, self._screen_corners, True, (0, 0, 255),
                                3)

        return img

    def find_top_view(self, cam_img):
        shape = (self._screen_img.shape[1], self._screen_img.shape[0])
        img = cv2.warpPerspective(cam_img, self.cam2screen_matrix, shape)
        return img


if __name__ == '__main__':
    sf = ScreenFinder()
    cam = MyCam()
    cam.size = (640, 480)

    img = cv2.imread('seabunny_lying.png', 0)
    cv2.imshow('source', img)

    sf.set_screen_img(img)
    if img.shape[0] * img.shape[1] > cam.size[0] * cam.size[1]:
        img = cv2.resize(img, cam.size)

    while True:
        cam_img = cam.read()

        while not sf.screen_is_found:
            cam_img = cam.read()
            sf.find_screen_img(cam_img, debug=True)
Esempio n. 9
0
"""

import cv2
import numpy as np
from cam import MyCam

from fmatch import draw_match


print __doc__

MIN_MATCH_COUNT = 10

# Initiate SIFT detector
sift = cv2.SIFT()
cam = MyCam()
cam.size = (640, 480)#(160, 120)
img1 = cv2.imread('box.png', 0)
cv2.imshow('source', img1)

if img1.shape[0] * img1.shape[1] > cam.size[0] * cam.size[1]:
    img1 = cv2.resize(img1, cam.size)

kp1, des1 = sift.detectAndCompute(img1,None)
    
while True:
    
    img2 = cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY)
    k = cv2.waitKey(5)
    if k == ord('s'):
        img1 = img2.copy()
Esempio n. 10
0
"""
Press 's' to take a picture or 'l' to load one and start real-time
"""

import cv2
import numpy as np
from cam import MyCam

from fmatch import draw_match

MIN_MATCH_COUNT = 10

# Initiate SIFT detector
sift = cv2.SIFT()
cam = MyCam()
cam.size = (640, 480)  #(160, 120)
img1 = cv2.imread('box.png', 0)
cv2.imshow('source', img1)

if img1.shape[0] * img1.shape[1] > cam.size[0] * cam.size[1]:
    img1 = cv2.resize(img1, cam.size)

kp1, des1 = sift.detectAndCompute(img1, None)

while True:

    img2 = cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY)
    k = cv2.waitKey(5)
    if k == ord('s'):
        img1 = img2.copy()
        kp1, des1 = sift.detectAndCompute(img1, None)
Esempio n. 11
0
        f0, f1, f2 = self.three_frames()
        d1 = cv2.absdiff(f1, f0)
        d2 = cv2.absdiff(f2, f1)
        self.diff = cv2.bitwise_and(d1, d2)

    def feed_image(self, image):
        self._index = (self._index + 1) % (2 * self._N)
        self._frame[self._index] = image
        self.diff_img()


winName = "cam test"
cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE)
# Read three images first:

cam = MyCam()
md = MotionDetector(N=2, shape=cam.read().shape)

while True:

    md.feed_image(cam.read())
    cv2.imshow(winName, md.diff)

    key = cv2.waitKey(10)
    if key == 27 or key == 32:
        md.cam.release()
        cv2.destroyWindow(winName)
        break

print "Goodbye"
Esempio n. 12
0
        
        # now sort the points in counter clock wise so we can eliminate similar solutions
        cross_product = np.cross(tmp[1] - tmp[0], tmp[2] - tmp[1])[0]
        if cross_product * orientation > 0:
            tmp = np.flipud(tmp)
        
        # rearrange the points so that the returned point array always start with
        # the point that is closest to origin
        tmp = tmp.reshape(-1,2)
        distance_from_origin = map(lambda pt: pt[0] ** 2 + pt[1] ** 2, tmp)
        val, idx = min((val, idx) for (idx, val) in enumerate(distance_from_origin))
        
        if idx > 0:
            up, down = np.vsplit(tmp, [idx])
            tmp = np.vstack((down, up))
        
        polygons.append(tmp)
    
    return polygons
    
if __name__ == "__main__":
    cam = MyCam()
    while True:
        img = cam.read()
        polygons = find_polygons(img, 4)
        for ctr in polygons:
            draw_oriented_polylines(img, ctr, 0, (0,0,255), 2, (255,0,0))
        cv2.imshow('find quadrangles',img)
        k = cv2.waitKey(5)
        if k == 27:
            break
Esempio n. 13
0
    h = img_in.shape[0]
    w = img_in.shape[1]
    d = img_in.shape[2]
    
    img_out = np.full((h + h/2, w, d), 0, np.uint8)
    img_pyr = img_in
    x, y = 0, 0
    dx, dy = w, h
    
    
    for i in range(10):
        # place image at x, y
        img_out = mix_image(img_out, img_pyr, (x, y))
        
        if i % 2 == 0:
            y = y + dy
        else:
            x = x + dx
        
        dx, dy = dx/2, dy/2
        
        img_pyr = cv2.pyrDown(img_pyr)
    
    
    return img_out
    

if __name__ == "__main__":
    cam = MyCam()
    cam.cam_loop(buildpyr)
    # cv2.waitKey(0)
Esempio n. 14
0
        if self.screen2cam_matrix is None: return cam_img
        img = cam_img.copy()
        draw_oriented_polylines(img, self._screen_corners, True, (0,0,255), 3)

        return img
        
    
    def find_top_view(self, cam_img):
        shape = (self._screen_img.shape[1], self._screen_img.shape[0])
        img = cv2.warpPerspective(cam_img, self.cam2screen_matrix, shape)
        return img


if __name__ == '__main__':
    sf = ScreenFinder()
    cam = MyCam()
    cam.size = (640, 480)
    
    img = cv2.imread('seabunny_lying.png', 0)
    cv2.imshow('source', img)

    sf.set_screen_img(img)
    if img.shape[0] * img.shape[1] > cam.size[0] * cam.size[1]:
        img = cv2.resize(img, cam.size)

    while True:
        cam_img = cam.read()
        
        while not sf.screen_is_found:
            cam_img = cam.read()
            sf.find_screen_img(cam_img, debug=True)
Esempio n. 15
0
def test_feature_matching_realtime(detetor=cv2.ORB()):
    from cam import MyCam
    """
    Press 's' to take a picture or 'l' to load one and start real-time
    """

    MIN_MATCH_COUNT = 10

    cam = MyCam()
    cam.size = (640, 480)
    img1 = img1 = cv2.imread('box.png', 0)

    cv2.imshow('source', img1)
    while True:
        
        img2 = cv2.flip(cv2.cvtColor(cam.read(), cv2.COLOR_BGR2GRAY), 1)
        k = cv2.waitKey(5)
        if k == ord('s'):
            img1 = img2.copy()
            cv2.imwrite('campic.png', img1)
        elif k== 27:
            break
        
        
        
        # find the keypoints and descriptors with ORB
        if k is not None:
            cv2.destroyWindow('preview') 
            kp1, des1 = detetor.detectAndCompute(img1,None)
            
        kp2, des2 = detetor.detectAndCompute(img2,None)
        
        
        # If nothing match then continue
        if des2 is None:
            img3 = img3 = draw_match(img1,kp1,img2,kp2,[])
            continue
        
        des1 = des1.astype(np.uint8, copy=False)    # Fix the data type
        des2 = des2.astype(np.uint8, copy=False)
        
        
        # Now match describers
        bf = cv2.BFMatcher(cv2.NORM_HAMMING)
        # matches = bf.match(des1,des2)
        
        matches = bf.knnMatch(des1,des2, k=2)
        
        # m = matches[0][0]
        # p1, p2 = np.float32(kp1[m.queryIdx].pt), np.float32(kp2[m.trainIdx].pt)
        # print m.distance, p1, p2
        
        # Apply ratio test
        good = []
        try:
            for m,n in matches:
                if m.distance < 0.7*n.distance:
                    good.append(m)
        except ValueError:
            good = []
        
        if len(good)>MIN_MATCH_COUNT:
            src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()
            
            h,w = img1.shape
            pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
            dst = cv2.perspectiveTransform(pts,M)
            
            cv2.polylines(img2,[np.int32(dst)], True, (0,0,255) ,3)

        else:
            # print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
            matchesMask = None
            good = []
        
        img3 = draw_match(img1,kp1,img2,kp2,good, matchesMask=matchesMask)
        
        
        cv2.imshow('matches', img3)
        
    print 'press any key to continue'