Esempio n. 1
0
class LaserScanner(object):
    def __init__(self):
        self.showCamView = True
        self.showTopView = False
        self.screenIsFound = False
        self.captureLaser = False
        self.cam = OpenCV_Cam()
        self.screenFinder = ScreenFinder()

    def findScreen(self):
        self.screenFinder.clear_found()
        while not self.screenIsFound:
            img = self.cam.read()
            self.screenFinder.find_screen_img(img)
            cv2.imshow('camera image', img)
            k = cv2.waitKey(5)
            if k == 27:
                break
        
        cv2.destroyWindow('camera image')

    def setScreenImage(self, bgImage): 
        self.screenFinder.set_screen_img(bgImage)
        self.background = bgImage

    def capture(self):
        if self.captureLaser:
            img = self.cam.read()
            Xcam = self.getLaserLocation(img)
            print Xcam
            #x, y = tuple(self.screenFinder.reverse_transform(Xcam).reshape(-1))
            return Xcam
        

    def update(self):
        self.img = self.cam.read()
        print self.capture()

    def show(self):
        cv2.imshow('Burn this image', self.background)
        if self.showTopView:
            top_view = sf.screen_top_view(img)
            cv2.imshow('Top view', top_view)
        if self.showCamView:
            cv2.imshow('Cam view', self.img)
        

    @staticmethod
    def getLaserLocation(image):
        red_part = image[:,:,2]
        ly, lx = np.unravel_index(red_part.argmax(), red_part.shape)
        return np.array([lx, ly])
Esempio n. 2
0
 def __init__(self):
     self.showCamView = True
     self.showTopView = False
     self.screenIsFound = False
     self.captureLaser = False
     self.cam = OpenCV_Cam()
     self.screenFinder = ScreenFinder()
Esempio n. 3
0
        return img

    def reverse_transform(self, cam_pts):
        pts = np.float32(cam_pts).reshape(-1, 1, 2)
        return cv2.perspectiveTransform(pts, self.cam2screen_matrix)

    def screen_top_view(self, cam_img):
        shape = (self._screen_img.shape[1], self._screen_img.shape[0])
        img = cv2.warpPerspective(cam_img, self.cam2screen_matrix, shape)
        return img


if __name__ == '__main__':
    sf = ScreenFinder()
    cam = OpenCV_Cam()
    cam.size = (640, 480)

    color_img = cv2.imread('wood.png')
    img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)
    cv2.imshow('source', color_img)

    sf.set_screen_img(img)
    if img.shape[0] * img.shape[1] > cam.size[0] * cam.size[1]:
        img = cv2.resize(img, cam.size)

    while True:
        cam_img = cam.read()

        if not sf.screen_is_found:
            sf.find_screen_loop(cam, True)
Esempio n. 4
0
        f0, f1, f2 = self.three_frames()
        d1 = cv2.absdiff(f1, f0)
        d2 = cv2.absdiff(f2, f1)
        self.diff = cv2.bitwise_and(d1, d2)

    def feed_image(self, image):
        self._index = (self._index + 1) % (2 * self._N)
        self._frame[self._index] = image
        self.diff_img()


winName = "cam test"
cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE)
# Read three images first:

cam = OpenCV_Cam()
md = MotionDetector(N=2, shape=cam.read().shape)

while True:

    md.feed_image(cam.read())
    cv2.imshow(winName, md.diff)

    key = cv2.waitKey(10)
    if key == 27 or key == 32:
        md.cam.release()
        cv2.destroyWindow(winName)
        break

print "Goodbye"
Esempio n. 5
0
        
        # now sort the points in counter clock wise so we can eliminate similar solutions
        cross_product = np.cross(tmp[1] - tmp[0], tmp[2] - tmp[1])[0]
        if cross_product * orientation > 0:
            tmp = np.flipud(tmp)
        
        # rearrange the points so that the returned point array always start with
        # the point that is closest to origin
        tmp = tmp.reshape(-1,2)
        distance_from_origin = map(lambda pt: pt[0] ** 2 + pt[1] ** 2, tmp)
        val, idx = min((val, idx) for (idx, val) in enumerate(distance_from_origin))
        
        if idx > 0:
            up, down = np.vsplit(tmp, [idx])
            tmp = np.vstack((down, up))
        
        polygons.append(tmp)
    
    return polygons
    
if __name__ == "__main__":
    cam = OpenCV_Cam()
    while True:
        img = cam.read()
        polygons = find_polygons(img, 4)
        for ctr in polygons:
            draw_oriented_polylines(img, ctr, 0, (0,0,255), 2, (255,0,0))
        cv2.imshow('find quadrangles',img)
        k = cv2.waitKey(5)
        if k == 27:
            break
Esempio n. 6
0
import cv2
import numpy as np
from cam import OpenCV_Cam


if __name__ == '__main__':

    cam = OpenCV_Cam()
    w,h = cam.size
    cam.set('EXPOSURE', 0)
    dst = np.full((h, w, 3), 0, dtype=np.uint8)
    
    exp_list = range(-2, -9, -1)
    ratio = 1.0 / len(exp_list)
    for e in exp_list:
        cam.set('EXPOSURE', e)
        img = None
        while img is None:
            img = cam.read()
        dst = cv2.addWeighted(dst, 1.0, img, ratio,0)
        
        cv2.imshow(str(e), img)
        cv2.imshow('dst'+str(e), dst)
        cv2.waitKey(10)
    
    cv2.imshow('mix', dst)
    
    k = cv2.waitKey(0)
Esempio n. 7
0
    shift[:,:,0] = offset

    hsv_img = hsv_img + shift



    lower = np.array(lower, dtype = "uint8")
    upper = np.array(upper, dtype = "uint8")
 
    mask = cv2.inRange(hsv_img, lower, upper)
    return mask


if __name__ == '__main__':

    cam = OpenCV_Cam()

    red_bound = ([-5, 150, 0], [15, 255, 255])
    green_bound = ([80, 100, 0], [105, 255, 255])

    while True:
        
        image = cam.read()
        red_mask   = get_mask(image, *red_bound)
        green_mask = get_mask(image, *green_bound)

        masks = cv2.bitwise_or(red_mask, green_mask)

        cv2.imshow('masks', np.hstack([red_mask, green_mask]))

        output = cv2.bitwise_and(image, image, mask = masks)
Esempio n. 8
0
    return np.array([lx, ly])

def find_threshold(cam):
    img = cam.read()
    hx, hy = find_laser_loc(img, 0)
    threshold = img[hy, hx, 2] + 10
    print "The red threshold is automatically determined to be", threshold
    return threshold

background = cv2.imread('wood.png')
cv2.imshow('Burn this page!', background)

sf = ScreenFinder()
sf.set_screen_img(background)

cam = OpenCV_Cam()
img = cam.read()
sf.find_screen_img(img)
sf.find_screen_loop(cam, False)

bs = background.shape
canvas = np.full((bs[0], bs[1], 4), 0, dtype=np.uint8)

# prepare threshold
thresh = find_threshold(cam)

show_top_view, show_cam_view = False, False
while True:
    img = cam.read()
    
    if show_cam_view:
Esempio n. 9
0
    return frame

    
def find_milkbox(contours):
    pentagons = []
    for ctr in contours:
        vecs = zip(ctr, np.roll(ctr, 1))
        normalized_vecs = []
        for vec in vecs: 
            length = LA.norm(vec[0] - vec[1])
            norm_vec = (vec[0] - vec[1]) / length
            normalized_vecs.append(norm_vec)
        
        paired_vecs = zip(normalized_vecs, np.roll(normalized_vecs, 2))
        count = 0
        for pair in paired_vecs:
            angle = np.inner(pair[0], pair[1])
            if angle < 0.05:
                count = count + 1
        if count != 2:
            continue

        pentagons.append(ctr)
        
    return pentagons

    
if __name__ == "__main__":
    cam = OpenCV_Cam()
    cam.cam_loop(contour_proc)
Esempio n. 10
0
from cam import OpenCV_Cam
import cv2
import os.path
import time

cam = OpenCV_Cam(0)
cam.size = (1920, 1080)

KEY_ESC = 27
KEY_SPACE = ord(' ')
PAGE_DOWN = 2228224  # This make the stop motion to be controllable by presenter.
prevFrame = None
i = 0

#Make a directory on current working directory with date and time as its name
timestr = time.strftime("%Y%m%d-%H%M%S")
cwd = os.getcwd()
dirName = cwd + "\\" + timestr
os.makedirs(dirName)

fname = cwd + "\\frame_.png"
if os.path.isfile(fname):
    prevFrame = cv2.imread(fname)

#Make .avi file from collected frames
fourcc = cv2.cv.CV_FOURCC(*'XVID')
video = cv2.VideoWriter(dirName + "\\" + 'output_.avi',
                        fourcc,
                        3.0,
                        cam.size,
                        isColor=True)
Esempio n. 11
0
def find_threshold(cam):
    img = cam.read()
    hx, hy = find_laser_loc(img, 0)
    threshold = img[hy, hx, 2] + 10
    print "The red threshold is automatically determined to be", threshold
    return threshold


FILENAME = 'wood_800.png'
background = cv2.imread(FILENAME)
cv2.imshow('Burn this page!', background)

sf = ScreenFinder()
sf.set_screen_img(background)

cam = OpenCV_Cam()
cam.size = 640, 480
img = cam.read()
sf.find_screen_img(img)
sf.find_screen_loop(cam, False)

bs = background.shape
canvas = np.full((bs[0], bs[1], 4), 0, dtype=np.uint8)

# prepare threshold
thresh = find_threshold(cam)

show_top_view, show_cam_view = False, False
while True:
    img = cam.read()
Esempio n. 12
0
import cv2
import numpy as np
from cam import OpenCV_Cam

if __name__ == '__main__':

    cam = OpenCV_Cam()
    w, h = cam.size
    cam.set('EXPOSURE', 0)
    dst = np.full((h, w, 3), 0, dtype=np.uint8)

    exp_list = range(-2, -9, -1)
    ratio = 1.0 / len(exp_list)
    for e in exp_list:
        cam.set('EXPOSURE', e)
        img = None
        while img is None:
            img = cam.read()
        dst = cv2.addWeighted(dst, 1.0, img, ratio, 0)

        cv2.imshow(str(e), img)
        cv2.imshow('dst' + str(e), dst)
        cv2.waitKey(10)

    cv2.imshow('mix', dst)

    k = cv2.waitKey(0)
Esempio n. 13
0
        
    def reverse_transform(self, cam_pts):
        pts = np.float32(cam_pts).reshape(-1,1,2)
        return cv2.perspectiveTransform(pts, self.cam2screen_matrix)
    
    
    def screen_top_view(self, cam_img):
        shape = (self._screen_img.shape[1], self._screen_img.shape[0])
        print self.cam2screen_matrix
        img = cv2.warpPerspective(cam_img, self.cam2screen_matrix, shape)
        return img


if __name__ == '__main__':
    sf = ScreenFinder()
    cam = OpenCV_Cam()
    cam.size = (640, 480)
    
    color_img = cv2.imread('wood.png')
    img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)
    cv2.imshow('source', color_img)

    sf.set_screen_img(img)
    if img.shape[0] * img.shape[1] > cam.size[0] * cam.size[1]:
        img = cv2.resize(img, cam.size)

    while True:
        cam_img = cam.read()
        
        if not sf.screen_is_found:
            sf.find_screen_loop(cam, True)
Esempio n. 14
0
import cv2
from cam import OpenCV_Cam

if __name__ == '__main__':
    cam = OpenCV_Cam()
    cam.size = (640,480)
    fourcc = cv2.cv.CV_FOURCC(*'XVID')
    video = cv2.VideoWriter('output.avi',fourcc, 30.0, (640,480))

    while True:
        img = cam.read()
        cv2.imshow('img', img)
        video.write(img)
        k = cv2.waitKey(1000)
        if k == 27:
            break

    cv2.destroyAllWindows()
    cam.release()
    video.release()
Esempio n. 15
0
    hsv_img = cv2.blur(hsv_img, (blur, blur))
    shift = np.full_like(img, 0, np.uint8)
    shift[:, :, 0] = offset

    hsv_img = hsv_img + shift

    lower = np.array(lower, dtype="uint8")
    upper = np.array(upper, dtype="uint8")

    mask = cv2.inRange(hsv_img, lower, upper)
    return mask


if __name__ == '__main__':

    cam = OpenCV_Cam()

    red_bound = ([-5, 150, 0], [15, 255, 255])
    green_bound = ([80, 100, 0], [105, 255, 255])

    while True:

        image = cam.read()
        red_mask = get_mask(image, *red_bound)
        green_mask = get_mask(image, *green_bound)

        masks = cv2.bitwise_or(red_mask, green_mask)

        cv2.imshow('masks', np.hstack([red_mask, green_mask]))

        output = cv2.bitwise_and(image, image, mask=masks)
Esempio n. 16
0
from cam import OpenCV_Cam
import cv2
import os.path
import time

cam = OpenCV_Cam(0)
cam.size = (1920, 1080)

KEY_ESC = 27
KEY_SPACE = ord(' ')
PAGE_DOWN = 2228224 # This make the stop motion to be controllable by presenter. 
prevFrame = None
i = 0 


#Make a directory on current working directory with date and time as its name
timestr = time.strftime("%Y%m%d-%H%M%S")
cwd = os.getcwd()
dirName = cwd + "\\"+timestr 
os.makedirs(dirName)


fname= cwd + "\\frame_.png"
if os.path.isfile(fname):
    prevFrame = cv2.imread(fname)


#Make .avi file from collected frames
fourcc = cv2.cv.CV_FOURCC(*'XVID')
video = cv2.VideoWriter(dirName+"\\"+'output_.avi',fourcc, 3.0, cam.size, isColor =True)
Esempio n. 17
0
    return np.array([lx, ly])

def find_threshold(cam):
    img = cam.read()
    hx, hy = find_laser_loc(img, 0)
    threshold = img[hy, hx, 2] + 10
    print "The red threshold is automatically determined to be", threshold
    return threshold

background = cv2.imread('wood.png')
cv2.imshow('Burn this page!', background)

sf = ScreenFinder()
sf.set_screen_img(background)

cam = OpenCV_Cam()
cam.size = 640, 480
img = cam.read()
sf.find_screen_img(img)
sf.find_screen_loop(cam, False)

bs = background.shape
canvas = np.full((bs[0], bs[1], 4), 0, dtype=np.uint8)

# prepare threshold
thresh = find_threshold(cam)

show_top_view, show_cam_view = False, False
while True:
    img = cam.read()
    
Esempio n. 18
0
def find_pentagons(contours):
    pentagons = []
    for ctr in contours:
        area = cv2.contourArea(ctr)

        # area test
        if area < 400: continue

        simp_ctr = cv2.approxPolyDP(ctr, 3, True)
        simp_area = cv2.contourArea(simp_ctr)
        if simp_area < 400: continue
        if len(simp_ctr) != 5: continue

        edges = [
            LA.norm(a - b) for a, b in zip(simp_ctr, np.roll(simp_ctr, 2))
        ]

        ratios = [(1 - e / edges[0])**2 for e in edges]

        passr = [r for r in ratios if r > 0.2]
        if len(passr) > 0: continue

        pentagons.append(simp_ctr)

    return pentagons


if __name__ == "__main__":
    cam = OpenCV_Cam()
    cam.cam_loop(contour_proc)
Esempio n. 19
0
        d1 = cv2.absdiff(f1, f0)
        d2 = cv2.absdiff(f2, f1)
        self.diff = cv2.bitwise_and(d1, d2)

    def feed_image(self, image):
        self._index = (self._index + 1) % (2 * self._N)
        self._frame[self._index] = image
        self.diff_img()
        
    

winName = "cam test"
cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE)
# Read three images first:

cam = OpenCV_Cam()
md = MotionDetector(N=2, shape=cam.read().shape)

while True:
    
    md.feed_image(cam.read())
    cv2.imshow(winName, md.diff)
    
    key = cv2.waitKey(10)
    if key == 27 or key == 32:
        md.cam.release()
        cv2.destroyWindow(winName)
        break

print "Goodbye"