Ejemplo n.º 1
0
def main():

    print "FaceIn! an OpenCV Python Face Recognition Program"
    
    highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvMoveWindow ('Camera', 10, 10)
    device = 0 #use first device found
    capture = highgui.cvCreateCameraCapture (device)
    frame = highgui.cvQueryFrame (capture)
    frame_size = cv.cvGetSize (frame)
    fps = 30
        
    while 1:
        
        frame = highgui.cvQueryFrame (capture)
        
        detectFace(frame)
        # display the frames to have a visual output
        highgui.cvShowImage ('Camera', frame)

        # handle events
        k = highgui.cvWaitKey (5)

        if k % 0x100 == 27:
            # user has press the ESC key, so exit
            quit()
Ejemplo n.º 2
0
def main():

    print "FaceIn! an OpenCV Python Face Recognition Program"

    highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvMoveWindow('Camera', 10, 10)
    device = 0  #use first device found
    capture = highgui.cvCreateCameraCapture(device)
    frame = highgui.cvQueryFrame(capture)
    frame_size = cv.cvGetSize(frame)
    fps = 30

    while 1:

        frame = highgui.cvQueryFrame(capture)

        detectFace(frame)
        # display the frames to have a visual output
        highgui.cvShowImage('Camera', frame)

        # handle events
        k = highgui.cvWaitKey(5)

        if k % 0x100 == 27:
            # user has press the ESC key, so exit
            quit()
Ejemplo n.º 3
0
    def __init__(self,
                 name,
                 size=2,
                 draw_center=True,
                 draw_grid=True,
                 meters_radius=4.0):
        """
			 name = name of window
			 meter_radus = 4.0
			 size = multiple of 400x200 to use for screen
			 meter_radius = how many per metrer 
		"""
        self.draw_center = draw_center
        self.draw_grid = draw_grid
        self.w = (int)(round(size * 400.0))
        self.h = (int)(round(size * 200.0))

        self.meters_disp = 4.0  #Range in meters of area around robot to display
        self.laser_win = name
        self.buffer = cv.cvCreateImage(cv.cvSize(self.w, 2 * self.h),
                                       cv.IPL_DEPTH_8U, 3)
        #print "RobotDisp: window width", self.buffer.width
        #print "RobotDisp: window height", self.buffer.height
        self.pixels_per_meter = self.h / self.meters_disp
        hg.cvNamedWindow(name, hg.CV_WINDOW_AUTOSIZE)
        hg.cvMoveWindow(name, 0, 50)

        self.font = cv.cvInitFont(cv.CV_FONT_HERSHEY_PLAIN, as_int(1),
                                  as_int(1), 0, 1, cv.CV_AA)
 def _make_windows(self):
     windows = ['video', 'right', 'thresholded', 'motion', 'intensity', 'patch', 'big_patch']
     for n in windows:
         hg.cvNamedWindow(n, 1)
     hg.cvMoveWindow("video",       0,   0)
     hg.cvMoveWindow("right",       800, 0)
     hg.cvMoveWindow("thresholded", 800, 0)
     hg.cvMoveWindow("intensity",   0,   600)
     hg.cvMoveWindow("motion",      800, 600)
Ejemplo n.º 5
0
 def _make_windows(self):
     windows = [
         'video', 'right', 'thresholded', 'motion', 'intensity', 'patch',
         'big_patch'
     ]
     for n in windows:
         hg.cvNamedWindow(n, 1)
     hg.cvMoveWindow("video", 0, 0)
     hg.cvMoveWindow("right", 800, 0)
     hg.cvMoveWindow("thresholded", 800, 0)
     hg.cvMoveWindow("intensity", 0, 600)
     hg.cvMoveWindow("motion", 800, 600)
Ejemplo n.º 6
0
def display_images(image_list, max_x=1200, max_y=1000, save_images=False):
    """
	Display a list of OpenCV images tiled across the screen
	with maximum width of max_x and maximum height of max_y

	save_images - will save the images(with timestamp)
	"""

    curtime = time.localtime()
    date_name = time.strftime('%Y_%m_%d_%I%M%S', curtime)

    loc_x, loc_y = 0, 0
    wins = []
    for i, im in enumerate(image_list):
        if save_images:
            if im.nChannels == 1 and im.depth == cv.IPL_DEPTH_32F:
                clr = cv.cvCreateImage(cv.cvSize(im.width, im.height),
                                       cv.IPL_DEPTH_8U, 1)
                cv.cvConvertScale(im, clr, 255.0)
                im = clr
            highgui.cvSaveImage('image%d_' % i + date_name + '.png', im)

        window_name = 'image %d' % i
        wins.append((window_name, im))
        highgui.cvNamedWindow(window_name, highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvMoveWindow(window_name, loc_x, loc_y)
        loc_x = loc_x + im.width
        if loc_x > max_x:
            loc_x = 0
            loc_y = loc_y + im.height
            if loc_y > max_y:
                loc_y = 0
    while True:
        for name, im in wins:
            highgui.cvShowImage(name, im)
        keypress = highgui.cvWaitKey(10)
        if keypress == '\x1b':
            break
Ejemplo n.º 7
0
def display_images(image_list, max_x = 1200, max_y = 1000, save_images=False):
	"""
	Display a list of OpenCV images tiled across the screen
	with maximum width of max_x and maximum height of max_y

	save_images - will save the images(with timestamp)
	"""

	curtime=time.localtime()
	date_name = time.strftime('%Y_%m_%d_%I%M%S', curtime)

	loc_x, loc_y = 0, 0
	wins = []
	for i, im in enumerate(image_list):
		if save_images:
			if im.nChannels == 1 and im.depth == cv.IPL_DEPTH_32F:
				clr = cv.cvCreateImage(cv.cvSize(im.width, im.height), cv.IPL_DEPTH_8U, 1)
				cv.cvConvertScale(im, clr, 255.0)
				im = clr
			highgui.cvSaveImage('image%d_'%i+date_name+'.png', im)

		window_name = 'image %d' % i
		wins.append((window_name, im)) 
		highgui.cvNamedWindow(window_name, highgui.CV_WINDOW_AUTOSIZE)
		highgui.cvMoveWindow(window_name, loc_x, loc_y)
		loc_x = loc_x + im.width
		if loc_x > max_x:
			loc_x = 0
			loc_y = loc_y + im.height
			if loc_y > max_y:
				loc_y = 0
	while True:
		for name, im in wins:
			highgui.cvShowImage(name, im)
		keypress = highgui.cvWaitKey(10)
		if keypress == '\x1b':
			break
Ejemplo n.º 8
0
    def __init__(self, name, size=2, draw_center=True, draw_grid=True, meters_radius=4.0):
		"""
			 name = name of window
			 meter_radus = 4.0
			 size = multiple of 400x200 to use for screen
			 meter_radius = how many per metrer 
		"""
		self.draw_center = draw_center
		self.draw_grid   = draw_grid
		self.w = (int) (round(size * 400.0))
		self.h = (int) (round(size * 200.0))


		self.meters_disp = 4.0  #Range in meters of area around robot to display
		self.laser_win = name
		self.buffer = cv.cvCreateImage(cv.cvSize(self.w, 2*self.h), cv.IPL_DEPTH_8U, 3)
		#print "RobotDisp: window width", self.buffer.width
		#print "RobotDisp: window height", self.buffer.height
		self.pixels_per_meter = self.h / self.meters_disp
		hg.cvNamedWindow(name, hg.CV_WINDOW_AUTOSIZE)
		hg.cvMoveWindow(name, 0, 50)

		self.font = cv.cvInitFont(cv.CV_FONT_HERSHEY_PLAIN, 
				as_int(1), as_int(1), 0, 1, cv.CV_AA)
Ejemplo n.º 9
0
#! /usr/bin/env python

import opencv
from opencv import highgui

cap = highgui.cvCreateFileCapture("../c/tree.avi")
img = highgui.cvQueryFrame(cap)
print "Got frame of dimensions (", img.width, " x ", img.height, " )"

highgui.cvNamedWindow("win", highgui.CV_WINDOW_AUTOSIZE)
highgui.cvShowImage("win", img)
highgui.cvMoveWindow("win", 200, 200)
highgui.cvWaitKey(0)

Ejemplo n.º 10
0
if __name__ == '__main__':

    # a small welcome
    print "OpenCV Python capture video"

    # first, create the necessary window
    highgui.cvStartWindowThread()
    highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvStartWindowThread()
    highgui.cvNamedWindow('Color Segmentation', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvStartWindowThread()
    highgui.cvNamedWindow('Canny', highgui.CV_WINDOW_AUTOSIZE)

    # move the new window to a better place
    highgui.cvMoveWindow('Camera', 10, 10)

    try:
        # try to get the device number from the command line
        device = int(sys.argv[1])

        # got it ! so remove it from the arguments
        del sys.argv[1]
    except (IndexError, ValueError):
        # no device number on the command line, assume we want the 1st device
        device = 0

    if len(sys.argv) == 1:
        # no argument on the command line, try to use the camera
        capture = highgui.cvCreateCameraCapture(device)
        highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH,
Ejemplo n.º 11
0
#############################################################################
# so, here is the main part of the program

if __name__ == '__main__':

    print "OpenCV Python wrapper test"
    print "OpenCV version: %s (%d, %d, %d)" % (
        cv.CV_VERSION, cv.CV_MAJOR_VERSION, cv.CV_MINOR_VERSION,
        cv.CV_SUBMINOR_VERSION)

    # first, create the necessary windows
    highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Histogram', highgui.CV_WINDOW_AUTOSIZE)

    # move the new window to a better place
    highgui.cvMoveWindow('Camera', 10, 40)
    highgui.cvMoveWindow('Histogram', 10, 270)

    global mouse_origin
    global mouse_selection
    global mouse_select_object
    mouse_select_object = False
    global track_object
    track_object = 0

    global track_comp
    global track_box

    track_comp = cv.CvConnectedComp()
    track_box = cv.CvBox2D()
Ejemplo n.º 12
0
def create_and_position_window(name, xpos, ypos):
    """Creates a named widow placing it on the screen at (xpos, ypos)."""
    highgui.cvNamedWindow(name, highgui.CV_WINDOW_AUTOSIZE)  # Create window
    highgui.cvResizeWindow(name, cam_width, cam_height)  # Resize it
    highgui.cvMoveWindow(name, xpos, ypos)  # move to (xpos,ypos) on the screen
Ejemplo n.º 13
0
import sys, os
from opencv import cv
from opencv import highgui

print "to use: python houghcircles.py imagefile.jpg minRadius maxRadius"

# first, create the necessary window
highgui.cvStartWindowThread()

highgui.cvNamedWindow('GrayScale', highgui.CV_WINDOW_AUTOSIZE)
highgui.cvNamedWindow('Canny', highgui.CV_WINDOW_AUTOSIZE)
highgui.cvNamedWindow('Image Display Window', highgui.CV_WINDOW_AUTOSIZE)

# move the new window to a better place

highgui.cvMoveWindow ('GrayScale', 100, 10)
highgui.cvMoveWindow ('Canny', 200, 10)
highgui.cvMoveWindow ('Image Display Window', 10, 10)

#load image
image = highgui.cvLoadImage(sys.argv[1]);

#create image arrays
grayimage = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)
cannyedges = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)


#convert to grayscale
cv.cvCvtColor(image, grayimage, cv.CV_BGR2GRAY)
#Canny
#Canny(image, edges, threshold1, threshold2, aperture_size=3) = None
Ejemplo n.º 14
0
def main(args):
	global capture
	global hmax, hmin
	highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Saturation', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvMoveWindow('Camera', 0, 10)
	highgui.cvMoveWindow('Hue', 0, 350)
	highgui.cvMoveWindow('Saturation', 360, 10)
	highgui.cvMoveWindow('Value', 360, 350)
	highgui.cvMoveWindow('Laser', 700, 40)

	highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness);
	highgui.cvCreateTrackbar("hmin Trackbar","Hue",hmin,180, change_hmin);
	highgui.cvCreateTrackbar("hmax Trackbar","Hue",hmax,180, change_hmax);
	highgui.cvCreateTrackbar("smin Trackbar","Saturation",smin,255, change_smin);
	highgui.cvCreateTrackbar("smax Trackbar","Saturation",smax,255, change_smax);
	highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin);
	highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax);

	print "grabbing camera"
	capture = highgui.cvCreateCameraCapture(0)
	print "found camera"
	highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, iwidth)
	highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, iheight)

	frame = highgui.cvQueryFrame(capture)
	frameSize = cv.cvGetSize(frame)

	hsv = cv.cvCreateImage(frameSize,8,3)
	mask = cv.cvCreateImage(frameSize,8,1)
	hue = cv.cvCreateImage(frameSize,8,1)
	saturation = cv.cvCreateImage(frameSize,8,1)
	value = cv.cvCreateImage(frameSize,8,1)
	laser = cv.cvCreateImage(frameSize,8,1)
	
	while 1:
		frame = highgui.cvQueryFrame(capture)

		cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV)	
		#cv.cvInRangeS(hsv,hsv_min,hsv_max,mask)
		cv.cvSplit(hsv,hue,saturation,value,None)
	

		#print hmin, hmax
		cv.cvInRangeS(hue,cv.cvScalar(hmin),cv.cvScalar(hmax),hue)
		cv.cvInRangeS(saturation,cv.cvScalar(smin),cv.cvScalar(smax),saturation)
		cv.cvInRangeS(value,cv.cvScalar(vmin),cv.cvScalar(vmax),value)
		
		#cv.cvInRangeS(hue,cv.cvScalar(0),cv.cvScalar(180),hue)

        	cv.cvAnd(hue, value, laser)
        	#cv.cvAnd(laser, value, laser)
		
		# stupid filter
		#removeErrantPoints(laser)

		cenX,cenY =  averageWhitePoints(laser)

		px = iwidth/2 - cenX
		dis = 57.18832855 / ( px - 5.702350176) + .05753797721  

		print cenX,px,dis 
		draw_target(frame,cenX,cenY)
		#draw_target(frame,200,1)
		
		highgui.cvShowImage('Hue',hue)
		highgui.cvShowImage('Camera',frame)
		highgui.cvShowImage('Saturation',saturation)
		highgui.cvShowImage('Value',value)
		highgui.cvShowImage('Laser',laser)

		highgui.cvWaitKey(10)
Ejemplo n.º 15
0
def create_and_position_window(name, xpos, ypos):
    ''' a function to created a named widow (from name), 
        and place it on the screen at (xpos, ypos) '''
    highgui.cvNamedWindow(name, highgui.CV_WINDOW_AUTOSIZE) # create the window
    highgui.cvResizeWindow(name, cam_width, cam_height) # resize it
    highgui.cvMoveWindow(name, xpos, ypos) # move it to (xpos,ypos) on the screen
Ejemplo n.º 16
0
def main(args):
	global capture
	global hmax, hmin
	global stats, startTime

	highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Red Hue', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Green Hue', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Red Laser', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Green Laser', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvMoveWindow('Camera', 0, 10)
	highgui.cvMoveWindow('Value', 10, 420)
	highgui.cvMoveWindow('Red Laser', 360, 10)
	highgui.cvMoveWindow('Green Laser', 360, 360)
	highgui.cvMoveWindow('Red Hue',700, 10 )
	highgui.cvMoveWindow('Green Hue',700, 420) 

	highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness);
	highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin);
	highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax);
	highgui.cvCreateTrackbar("red hmin Trackbar","Red Hue",red_hmin,180, change_red_hmin);
	highgui.cvCreateTrackbar("red hmax Trackbar","Red Hue",red_hmax,180, change_red_hmax);
	highgui.cvCreateTrackbar("green hmin Trackbar","Green Hue",green_hmin,180, change_green_hmin);
	highgui.cvCreateTrackbar("green hmax Trackbar","Green Hue",green_hmax,180, change_green_hmax);

	print "grabbing camera"
	capture = highgui.cvCreateCameraCapture(0)
	print "found camera"
	highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, iwidth)
	highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, iheight)

	frame = highgui.cvQueryFrame(capture)
	frameSize = cv.cvGetSize(frame)

	hsv = cv.cvCreateImage(frameSize,8,3)
	mask = cv.cvCreateImage(frameSize,8,1)
	red_hue = cv.cvCreateImage(frameSize,8,1)
	green_hue = cv.cvCreateImage(frameSize,8,1)
	saturation = cv.cvCreateImage(frameSize,8,1)
	value = cv.cvCreateImage(frameSize,8,1)
	red_laser = cv.cvCreateImage(frameSize,8,1)
	green_laser = cv.cvCreateImage(frameSize,8,1)
	turret = FuzzyController(frameSize.width,frameSize.height,True)	
	
	while 1:
		frame = highgui.cvQueryFrame(capture)

		cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV)	
		cv.cvSplit(hsv,red_hue,saturation,value,None)
		cv.cvSplit(hsv,green_hue,saturation,value,None)
	
		cv.cvInRangeS(red_hue, cv.cvScalar(red_hmin), cv.cvScalar(red_hmax), red_hue)
		cv.cvInRangeS(green_hue, cv.cvScalar(green_hmin), cv.cvScalar(green_hmax), green_hue)
		cv.cvInRangeS(value, cv.cvScalar(vmin), cv.cvScalar(vmax), value)

		cv.cvAnd(red_hue, value, red_laser)
		cv.cvAnd(green_hue, value, green_laser)

		green_cenX,green_cenY =  averageWhitePoints(green_laser)
		draw_target(frame, green_cenX, green_cenY, "GREEN")
		red_cenX, red_cenY = averageWhitePoints(red_laser)
		draw_target(frame, red_cenX, red_cenY, "RED")
		
		if(green_cenX >= 0 and green_cenY >= 0):# and move_count <= 0):
			turret.update(green_cenX,green_cenY)
		
		highgui.cvShowImage('Camera',frame)
		highgui.cvShowImage('Red Hue', red_hue)
		highgui.cvShowImage('Green Hue', green_hue)
		highgui.cvShowImage('Value',value)
		highgui.cvShowImage('Red Laser',red_laser)
		highgui.cvShowImage('Green Laser',green_laser)

		if stats:
			printRunningStats((green_cenX, green_cenY), (red_cenX, red_cenY))

		k = highgui.cvWaitKey(10)
		if k == '\x1b' or k == 'q':
			sys.exit()
		if k == 'p':
			if stats:
				printTotalStats()
				stats = False
			else:
				startTime = time()
				stats = True
Ejemplo n.º 17
0
import sys, os
from opencv import cv
from opencv import highgui

print "to use: python houghcircles.py imagefile.jpg minRadius maxRadius"

# first, create the necessary window
highgui.cvStartWindowThread()

highgui.cvNamedWindow("GrayScale", highgui.CV_WINDOW_AUTOSIZE)
highgui.cvNamedWindow("Canny", highgui.CV_WINDOW_AUTOSIZE)
highgui.cvNamedWindow("Image Display Window", highgui.CV_WINDOW_AUTOSIZE)

# move the new window to a better place

highgui.cvMoveWindow("GrayScale", 100, 10)
highgui.cvMoveWindow("Canny", 200, 10)
highgui.cvMoveWindow("Image Display Window", 10, 10)

# load image
image = highgui.cvLoadImage(sys.argv[1])

# create image arrays
grayimage = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)
cannyedges = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)


# convert to grayscale
cv.cvCvtColor(image, grayimage, cv.CV_BGR2GRAY)
# Canny
# Canny(image, edges, threshold1, threshold2, aperture_size=3) = None
Ejemplo n.º 18
0
def main(args):
    global capture
    global hmax, hmin
    highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvMoveWindow('Camera', 0, 10)
    highgui.cvMoveWindow('Hue', 0, 350)
    highgui.cvMoveWindow('Satuation', 360, 10)
    highgui.cvMoveWindow('Value', 360, 350)
    highgui.cvMoveWindow('Laser', 700, 40)

    highgui.cvCreateTrackbar("Brightness Trackbar", "Camera", 0, 255,
                             change_brightness)
    highgui.cvCreateTrackbar("hmin Trackbar", "Hue", hmin, 180, change_hmin)
    highgui.cvCreateTrackbar("hmax Trackbar", "Hue", hmax, 180, change_hmax)
    highgui.cvCreateTrackbar("smin Trackbar", "Satuation", smin, 255,
                             change_smin)
    highgui.cvCreateTrackbar("smax Trackbar", "Satuation", smax, 255,
                             change_smax)
    highgui.cvCreateTrackbar("vmin Trackbar", "Value", vmin, 255, change_vmin)
    highgui.cvCreateTrackbar("vmax Trackbar", "Value", vmax, 255, change_vmax)

    print "grabbing camera"
    capture = highgui.cvCreateCameraCapture(0)
    print "found camera"
    highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320)
    highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT,
                                 240)

    frame = highgui.cvQueryFrame(capture)
    frameSize = cv.cvGetSize(frame)

    hsv = cv.cvCreateImage(frameSize, 8, 3)
    mask = cv.cvCreateImage(frameSize, 8, 1)
    hue = cv.cvCreateImage(frameSize, 8, 1)
    satuation = cv.cvCreateImage(frameSize, 8, 1)
    value = cv.cvCreateImage(frameSize, 8, 1)
    laser = cv.cvCreateImage(frameSize, 8, 1)

    while 1:
        frame = highgui.cvQueryFrame(capture)

        cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV)
        #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask)
        cv.cvSplit(hsv, hue, satuation, value, None)

        cv.cvInRangeS(hue, hmin, hmax, hue)
        cv.cvInRangeS(satuation, smin, smax, satuation)
        cv.cvInRangeS(value, vmin, vmax, value)
        #cv.cvInRangeS(hue,0,180,hue)

        cv.cvAnd(hue, value, laser)
        #cv.cvAnd(laser, value, laser)

        cenX, cenY = averageWhitePoints(laser)
        #print cenX,cenY
        draw_target(frame, cenX, cenY)
        #draw_target(frame,200,1)

        highgui.cvShowImage('Camera', frame)
        highgui.cvShowImage('Hue', hue)
        highgui.cvShowImage('Satuation', satuation)
        highgui.cvShowImage('Value', value)
        highgui.cvShowImage('Laser', laser)

        k = highgui.cvWaitKey(10)
        if k == " ":
            highgui.cvDestroyAllWindows()
            highgui.cvReleaseCapture(capture)
            sys.exit()
Ejemplo n.º 19
0
def main(args):
        global capture
        global hmax, hmin
        highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvMoveWindow('Camera', 0, 10)
        highgui.cvMoveWindow('Hue', 0, 350)
        highgui.cvMoveWindow('Satuation', 360, 10)
        highgui.cvMoveWindow('Value', 360, 350)
        highgui.cvMoveWindow('Laser', 700, 40)

        highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness);
        highgui.cvCreateTrackbar("hmin Trackbar","Hue",hmin,180, change_hmin);
        highgui.cvCreateTrackbar("hmax Trackbar","Hue",hmax,180, change_hmax);
        highgui.cvCreateTrackbar("smin Trackbar","Satuation",smin,255, change_smin);
        highgui.cvCreateTrackbar("smax Trackbar","Satuation",smax,255, change_smax);
        highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin);
        highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax);

        print "grabbing camera"
        capture = highgui.cvCreateCameraCapture(0)
        print "found camera"
        highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, 320)
        highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, 240)

        frame = highgui.cvQueryFrame(capture)
        frameSize = cv.cvGetSize(frame)

        hsv = cv.cvCreateImage(frameSize,8,3)
        mask = cv.cvCreateImage(frameSize,8,1)
        hue = cv.cvCreateImage(frameSize,8,1)
        satuation = cv.cvCreateImage(frameSize,8,1)
        value = cv.cvCreateImage(frameSize,8,1)
        laser = cv.cvCreateImage(frameSize,8,1)
        
        while 1:
                frame = highgui.cvQueryFrame(capture)

                cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV)        
                #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask)
                cv.cvSplit(hsv,hue,satuation,value,None)
        
                cv.cvInRangeS(hue,hmin,hmax,hue)
                cv.cvInRangeS(satuation,smin,smax,satuation)
                cv.cvInRangeS(value,vmin,vmax,value)
                #cv.cvInRangeS(hue,0,180,hue)

                cv.cvAnd(hue, value, laser)
                #cv.cvAnd(laser, value, laser)
                
                cenX,cenY =  averageWhitePoints(laser)
                #print cenX,cenY
                draw_target(frame,cenX,cenY)
                #draw_target(frame,200,1)
                
                highgui.cvShowImage('Camera',frame)
                highgui.cvShowImage('Hue',hue)
                highgui.cvShowImage('Satuation',satuation)
                highgui.cvShowImage('Value',value)
                highgui.cvShowImage('Laser',laser)

                k = highgui.cvWaitKey(10)
                if k == " ":
                  highgui.cvDestroyAllWindows()
                  highgui.cvReleaseCapture (capture)
                  sys.exit()
Ejemplo n.º 20
0
def main(args):
	global capture
	global hmax, hmin
	highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvMoveWindow('Camera', 0, 10)
	highgui.cvMoveWindow('Hue', 0, 350)
	highgui.cvMoveWindow('Satuation', 360, 10)
	highgui.cvMoveWindow('Value', 360, 350)
	highgui.cvMoveWindow('Laser', 700, 40)

	highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness);
	highgui.cvCreateTrackbar("hmin Trackbar","Hue",hmin,180, change_hmin);
	highgui.cvCreateTrackbar("hmax Trackbar","Hue",hmax,180, change_hmax);
	highgui.cvCreateTrackbar("smin Trackbar","Satuation",smin,255, change_smin);
	highgui.cvCreateTrackbar("smax Trackbar","Satuation",smax,255, change_smax);
	highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin);
	highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax);

	print "grabbing camera"
	capture = highgui.cvCreateCameraCapture(0)
	print "found camera"
	highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, 320)
	highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, 240)

	frame = highgui.cvQueryFrame(capture)
	frameSize = cv.cvGetSize(frame)

	hsv = cv.cvCreateImage(frameSize,8,3)
	mask = cv.cvCreateImage(frameSize,8,1)
	hue = cv.cvCreateImage(frameSize,8,1)
	satuation = cv.cvCreateImage(frameSize,8,1)
	value = cv.cvCreateImage(frameSize,8,1)
	laser = cv.cvCreateImage(frameSize,8,1)
	turret = FuzzyController(frameSize.width,frameSize.height,True)	
	move_count = 0
	while 1:
		frame = highgui.cvQueryFrame(capture)

		cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV)	
		#cv.cvInRangeS(hsv,hsv_min,hsv_max,mask)
		cv.cvSplit(hsv,hue,satuation,value,None)
	
		cv.cvInRangeS(hue,cv.cvScalar(hmin),cv.cvScalar(hmax),hue)
		cv.cvInRangeS(satuation,cv.cvScalar(smin),cv.cvScalar(smax),satuation)
		cv.cvInRangeS(value,cv.cvScalar(vmin),cv.cvScalar(vmax),value)
		#cv.cvInRangeS(hue,0,180,hue)

        	cv.cvAnd(hue, value, laser)
        	#cv.cvAnd(laser, value, laser)
		
		cenX,cenY =  averageWhitePoints(laser)
		#print cenX,cenY
		draw_target(frame,cenX,cenY)
	
		if(cenX != 0 and cenY != 0):# and move_count <= 0):
			turret.update(cenX,cenY,False)
		"""
			turret.reset()
			move_count = 3	
			if(cenX < 100):
				turret.left(20)
			elif(cenX > 200):
				turret.right(20)
		
			if(cenY < 80):
				turret.up(40)
			elif(cenY > 170):
				print "DOWN please.."
				turret.down(40)
			print cenY
		"""
		#move_count -= 1
		#draw_target(frame,200,1)
		
		highgui.cvShowImage('Camera',frame)
		highgui.cvShowImage('Hue',hue)
		highgui.cvShowImage('Satuation',satuation)
		highgui.cvShowImage('Value',value)
		highgui.cvShowImage('Laser',laser)

		k = highgui.cvWaitKey(10)
		if k == 'q':
			sys.exit()
Ejemplo n.º 21
0
if __name__ == '__main__':

    # a small welcome
    print "OpenCV Python capture video"

    # first, create the necessary window
    highgui.cvStartWindowThread()
    highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvStartWindowThread()
    highgui.cvNamedWindow ('Color Segmentation', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvStartWindowThread()
    highgui.cvNamedWindow ('Canny', highgui.CV_WINDOW_AUTOSIZE)

    # move the new window to a better place
    highgui.cvMoveWindow ('Camera', 10, 10)
    
    try:
        # try to get the device number from the command line
        device = int (sys.argv [1])

        # got it ! so remove it from the arguments
        del sys.argv [1]
    except (IndexError, ValueError):
        # no device number on the command line, assume we want the 1st device
        device = 0

    if len (sys.argv) == 1:
        # no argument on the command line, try to use the camera
        capture = highgui.cvCreateCameraCapture (device)
        highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320)
Ejemplo n.º 22
0
from opencv import highgui

if __name__ == '__main__':
	print "HOWDY, welcome to the webcam proggy"

	# first, create the necessary windows
	highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('HUE', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('SATURATION', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('VALUE', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('RED', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('GREEN', highgui.CV_WINDOW_AUTOSIZE)
	highgui.cvNamedWindow('BLUE', highgui.CV_WINDOW_AUTOSIZE)

	# move the new window to a better place
	highgui.cvMoveWindow('Camera', 0, 40)
	highgui.cvMoveWindow('HUE', 0, 400)
	highgui.cvMoveWindow('SATURATION', 330, 40)
	highgui.cvMoveWindow('VALUE', 330, 400)
	highgui.cvMoveWindow('RED', 660, 40)
	highgui.cvMoveWindow('GREEN', 660, 400)
	highgui.cvMoveWindow('BLUE', 990, 40)

	capture = highgui.cvCreateCameraCapture(0)

	# set the wanted image size from the camera
	highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320)
	highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT, 240)

	# check that capture device is OK
	if not capture:
Ejemplo n.º 23
0
def run(exposure, video=None, display=False, debug=False):
    if display:
        hg.cvNamedWindow("video", 1)
        hg.cvMoveWindow("video",   0,   0)

    if debug:
        hg.cvNamedWindow('right',       1)
        hg.cvMoveWindow("right", 800,   0)
        hg.cvNamedWindow("thresholded", 1)
        hg.cvNamedWindow('motion',      1)
        hg.cvNamedWindow('intensity',   1)

        hg.cvMoveWindow("thresholded", 800, 0)
        hg.cvMoveWindow("intensity",   0,   600)
        hg.cvMoveWindow("motion",      800, 600)

    if video is None:
        #video    = cam.VidereStereo(0, gain=96, exposure=exposure)
        video    = cam.StereoFile('measuring_tape_red_left.avi','measuring_tape_red_right.avi')

    frames = video.next()
    detector       = LaserPointerDetector(frames[0], LaserPointerDetector.SUN_EXPOSURE, 
                                            use_color=False, use_learning=True)
    detector_right = LaserPointerDetector(frames[1], LaserPointerDetector.SUN_EXPOSURE, 
                                            use_color=False, use_learning=True, classifier=detector.classifier)
    stereo_cam     = cam.KNOWN_CAMERAS['videre_stereo2']

    for i in xrange(10):
        frames = video.next()
        detector.detect(frames[0])
        detector_right.detect(frames[1])

    lt = cv.cvCreateImage(cv.cvSize(640,480), 8, 3)
    rt = cv.cvCreateImage(cv.cvSize(640,480), 8, 3)
    for l, r in video:
        start_time = time.time()
        #l = stereo_cam.camera_left.undistort_img(l)
        #r = stereo_cam.camera_right.undistort_img(r)
        cv.cvCopy(l, lt)
        cv.cvCopy(r, rt)
        l = lt
        r = rt
        undistort_time = time.time()

        _, _, right_cam_detection, stats = detector_right.detect(r)
        if debug:
            draw_blobs(r, stats)
            draw_detection(r, right_cam_detection)
            hg.cvShowImage('right', r)

        image, combined, left_cam_detection, stats = detector.detect(l)
        detect_time = time.time()

        if debug: 
            motion, intensity = detector.get_motion_intensity_images()
            show_processed(l, [combined, motion, intensity], left_cam_detection, stats, detector)
        elif display:
            #draw_blobs(l, stats)
            draw_detection(l, left_cam_detection)
            hg.cvShowImage('video', l)
            hg.cvWaitKey(10)

        if right_cam_detection != None and left_cam_detection != None:
            x  = np.matrix(left_cam_detection['centroid']).T
            xp = np.matrix(right_cam_detection['centroid']).T
            result = stereo_cam.triangulate_3d(x, xp)
            print '3D point located at', result['point'].T, 
            print 'distance %.2f error %.3f' % (np.linalg.norm(result['point']),  result['error'])
        triangulation_time = time.time()

        diff = time.time() - start_time
        print 'Main: Running at %.2f fps, took %.4f s' % (1.0 / diff, diff)
Ejemplo n.º 24
0
    ############################ GUI Config #####################################
    if GUI == 1:

        # first, create the necessary windows
        highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('Originale', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('Binarisation', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('1-without background',
                              highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('2-amplifie', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('3-lisser-Smooth', highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvNamedWindow('4-lisser-And', highgui.CV_WINDOW_AUTOSIZE)

        # move the new window to a better place
        highgui.cvMoveWindow('Camera', 0, 0)
        highgui.cvMoveWindow('Binarisation', 0, 280)
        highgui.cvMoveWindow('1-without background', 320, 0)
        highgui.cvMoveWindow('2-amplifie', 320, 280)
        highgui.cvMoveWindow('3-lisser-Smooth', 640, 0)
        highgui.cvMoveWindow('4-lisser-And', 640, 280)

        #trackbar pour la modification des variables de reglages
        highgui.cvCreateTrackbar("nombre division", "Camera", get_nb_div(), 6,
                                 set_nb_div)
        highgui.cvCreateTrackbar("seuil binarisation", "Binarisation",
                                 get_seuil(), 255, set_seuil)
        highgui.cvCreateTrackbar("gain", "2-amplifie", get_gain(), 100,
                                 set_gain)
        #highgui.cvCreateTrackbar ("param lissage", "3-lisser", 3, 3, set_param_liss)
        #highgui.cvCreateTrackbar ("param 2 lissage", "3-lisser", 1, 10, set_param2_liss)
Ejemplo n.º 25
0
def learn_run(exposure = LaserPointerDetector.SUN_EXPOSURE, num_examples_to_collect=200, display_during_run = True):
    hg.cvNamedWindow("video",       1)
    hg.cvNamedWindow("thresholded", 1)
    hg.cvNamedWindow('motion',      1)
    hg.cvNamedWindow('intensity',   1)

    hg.cvMoveWindow("video",       0,   0)
    hg.cvMoveWindow("thresholded", 800, 0)

    hg.cvMoveWindow("intensity",   0,   600)
    hg.cvMoveWindow("motion",      800, 600)

    video     = cam.VidereStereo(0, gain=96, exposure=exposure)
    frames    = video.next()
    detector  = LaserPointerDetector(frames[0], exposure=exposure, 
                                    dataset=PatchClassifier.DEFAULT_DATASET_FILE,
                                    use_color=False, use_learning=False)
    detector2 = LaserPointerDetector(frames[1], exposure=exposure, 
                                    dataset=PatchClassifier.DEFAULT_DATASET_FILE,
                                    use_color=False, use_learning=False)

    def append_examples_to_file(dataset, file = PatchClassifier.DEFAULT_DATASET_FILE):
        try:
            loaded_set = load_pickle(file)
            dataset.append(loaded_set)
        except IOError:
            pass
        dump_pickle(dataset, file)
        print 'Saved examples!'

    #Gather positive examples from laser detector
    if confirmation_prompt('gather positive examples?'):
        print 'Lets gather some positive examples... we need', num_examples_to_collect, 'examples'
        positive_examples_for_classifier = []
        count_down(0)
        for i in xrange(10):
            frames = video.next()
            detector.detect(frames[0])
            detector2.detect(frames[1])

        for img in video:
            image                 = None
            combined              = None
            motion                = None
            intensity             = None
            laser_blob            = None
            intensity_motion_blob = None

            for raw_image, detect in zip(img, [detector, detector2]):
                before = time.time()
                image, combined, laser_blob, intensity_motion_blob = detect.detect(raw_image)
                diff = time.time() - before
                #print 'took %.2f seconds to run or %.2f fps' % (diff, 1.0/diff)
                if laser_blob != None:
                    instance = blob_to_input_instance(image, laser_blob)
                    if instance is not None:
                        positive_examples_for_classifier.append(instance)
                        print 'got', len(positive_examples_for_classifier), 'instances'
                motion, intensity = detect.get_motion_intensity_images()

            show_processed(image, [combined, motion, intensity], laser_blob, intensity_motion_blob, detector2)
            if len(positive_examples_for_classifier) > num_examples_to_collect:
                break
        positive_instances_dataset = matrix_to_dataset(ut.list_mat_to_mat(positive_examples_for_classifier, axis=1))
        append_examples_to_file(positive_instances_dataset)

    if confirmation_prompt('gather negative examples?'):
        #Gather negative examples from laser detector
        print 'lets gather some negative examples... we need', num_examples_to_collect,' examples'
        negative_examples_for_classifier = []
        count_down(10)
        for i in xrange(10):
            frames = video.next()
            detector.detect(frames[0])
            detector2.detect(frames[1])

        for img in video:
            image                 = None
            combined              = None
            motion                = None
            intensity             = None
            laser_blob            = None
            intensity_motion_blob = None
            for raw_image, detect in zip(img, [detector, detector2]):
                image, combined, laser_blob, intensity_motion_blob = detect.detect(raw_image)
                if laser_blob != None:
                    instance = blob_to_input_instance(image, laser_blob)
                    if instance is not None:
                        negative_examples_for_classifier.append(instance)
                        print 'got', len(negative_examples_for_classifier), 'instances'
                motion, intensity = detect.get_motion_intensity_images()

            show_processed(image, [combined, motion, intensity], laser_blob, intensity_motion_blob, detector2)
            if len(negative_examples_for_classifier) > (num_examples_to_collect*2):
                break
        negative_instances_dataset = matrix_to_dataset(ut.list_mat_to_mat(negative_examples_for_classifier, axis=1))
        append_examples_to_file(negative_instances_dataset)

    if confirmation_prompt('run classifier?'):
        run(exposure, video = video, display=display_during_run)
Ejemplo n.º 26
0
    # a small welcome
    print "OpenCV Python wrapper test"
    print "OpenCV version: %s (%d, %d, %d)" % (
        cv.CV_VERSION,
        cv.CV_MAJOR_VERSION,
        cv.CV_MINOR_VERSION,
        cv.CV_SUBMINOR_VERSION,
    )

    # first, create the necessary windows
    highgui.cvNamedWindow("Camera", highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow("Histogram", highgui.CV_WINDOW_AUTOSIZE)

    # move the new window to a better place
    highgui.cvMoveWindow("Camera", 10, 40)
    highgui.cvMoveWindow("Histogram", 10, 270)

    try:
        # try to get the device number from the command line
        device = int(sys.argv[1])

        # got it ! so remove it from the arguments
        del sys.argv[1]
    except (IndexError, ValueError):
        # no device number on the command line, assume we want the 1st device
        device = 0

    if len(sys.argv) == 1:
        # no argument on the command line, try to use the camera
        capture = highgui.cvCreateCameraCapture(device)
if __name__ == '__main__':

    # a small welcome
    print "OpenCV Python wrapper test"
    print "OpenCV version: %s (%d, %d, %d)" % (cv.CV_VERSION,
                                               cv.CV_MAJOR_VERSION,
                                               cv.CV_MINOR_VERSION,
                                               cv.CV_SUBMINOR_VERSION)

    # first, create the necessary windows
    highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow ('Histogram', highgui.CV_WINDOW_AUTOSIZE)

    # move the new window to a better place
    highgui.cvMoveWindow ('Camera', 10, 40)
    highgui.cvMoveWindow ('Histogram', 10, 270)

    try:
        # try to get the device number from the command line
        device = int (sys.argv [1])

        # got it ! so remove it from the arguments
        del sys.argv [1]
    except (IndexError, ValueError):
        # no device number on the command line, assume we want the 1st device
        device = 0

    if len (sys.argv) == 1:
        # no argument on the command line, try to use the camera
        capture = highgui.cvCreateCameraCapture (device)
Ejemplo n.º 28
0

def cb_val(v):
    global val_cutoff
    val_cutoff = v


# windows
highgui.cvNamedWindow("Input")
highgui.cvNamedWindow("Histogram - Hue")
highgui.cvCreateTrackbar("Threshold", "Histogram - Hue", hue_cutoff, int(sample_pixels), cb_hue)
highgui.cvNamedWindow("Histogram - Value")
highgui.cvCreateTrackbar("Threshold", "Histogram - Value", val_cutoff, int(sample_pixels), cb_val)
highgui.cvNamedWindow("Obstacles")

highgui.cvMoveWindow("Input", 0, 0)
highgui.cvMoveWindow("Histogram - Hue", 0, size.height + 75)
highgui.cvMoveWindow("Histogram - Value", int(h_bins * scalewidth) + 25, size.height + 75)
highgui.cvMoveWindow("Obstacles", size.width + 25, 0)


def hsv2rgb(hue):
    # convert the hue value to the corresponding rgb value

    sector_data = [[0, 2, 1], [1, 2, 0], [1, 0, 2], [2, 0, 1], [2, 1, 0], [0, 1, 2]]
    hue *= 0.1 / 3
    sector = cv.cvFloor(hue)
    p = cv.cvRound(255 * (hue - sector))
    if sector & 1:
        p ^= 255