Ejemplo n.º 1
0
def video_file(file):
     cap          = hg.cvCreateFileCapture(file);
     f            = hg.cvQueryFrame(cap)

     while f != None:
         yield f
         f          = hg.cvQueryFrame(cap)
Ejemplo n.º 2
0
def main():

    print "FaceIn! an OpenCV Python Face Recognition Program"

    highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvMoveWindow('Camera', 10, 10)
    device = 0  #use first device found
    capture = highgui.cvCreateCameraCapture(device)
    frame = highgui.cvQueryFrame(capture)
    frame_size = cv.cvGetSize(frame)
    fps = 30

    while 1:

        frame = highgui.cvQueryFrame(capture)

        detectFace(frame)
        # display the frames to have a visual output
        highgui.cvShowImage('Camera', frame)

        # handle events
        k = highgui.cvWaitKey(5)

        if k % 0x100 == 27:
            # user has press the ESC key, so exit
            quit()
Ejemplo n.º 3
0
 def contFrames(self,capture):
     frame = cvQueryFrame(capture)
     total_frames = 1
     while frame <> None:
         frame = cvQueryFrame(capture)
         total_frames+=1
     return total_frames
Ejemplo n.º 4
0
    def process(self, videofile, progress):
        progress(0, _("Extracting histogram"))
        video = hg.cvCreateFileCapture(str(videofile).encode(sys.getfilesystemencoding()))
        if not video:
            raise Exception("Could not open video file")
        histo = cv.cvCreateHist([256],cv.CV_HIST_ARRAY,[[0,256]], 1)
        frame = hg.cvQueryFrame(video)
        frame_gray  = cv.cvCreateImage(cv.cvGetSize(frame), frame.depth, 1);
        hists    = []
        nbframes = 0

        fps = hg.cvGetCaptureProperty(video, hg.CV_CAP_PROP_FPS)
        while frame :
            if not progress(hg.cvGetCaptureProperty(video, hg.CV_CAP_PROP_POS_AVI_RATIO)):
                break
            hg.cvConvertImage(frame,frame_gray)
            cv.cvCalcHist(frame_gray,histo,0,None)
            h = [cv.cvGetReal1D(histo.bins,i) for i in range(255) ]
            h = numpy.array(h,dtype='int32')
            hists.append(h)
            frame = hg.cvQueryFrame(video)
            nbframes += 1

        hists = numpy.array(hists)
        return hists.reshape(nbframes, -1), fps
Ejemplo n.º 5
0
 def contFrames(self, capture):
     frame = cvQueryFrame(capture)
     total_frames = 1
     while frame <> None:
         frame = cvQueryFrame(capture)
         total_frames += 1
     return total_frames
Ejemplo n.º 6
0
    def apply_filter(self, image, _config):
        highgui.cvQueryFrame(self.camera)
        im = highgui.cvQueryFrame(self.camera)
        pilwebcam_image = opencv.adaptors.Ipl2PIL(im)

        pilwebcam_image.thumbnail(
            (int((pilwebcam_image.size[0] / 100.0) * _config['width']),
             int((pilwebcam_image.size[1] / 100.0) * _config['height'])))

        ## Border
        from ImageDraw import Draw
        drawing = Draw(pilwebcam_image)

        # top line
        drawing.line(((0, 0), (pilwebcam_image.size[0], 0)),
                     fill=_config['border_colour'])
        # left line
        drawing.line(((0, 0), (0, pilwebcam_image.size[1])),
                     fill=_config['border_colour'])
        # right line
        drawing.line(((pilwebcam_image.size[0] - 1, 0),
                      (pilwebcam_image.size[0] - 1, pilwebcam_image.size[1])),
                     fill=_config['border_colour'])
        # bottow line
        drawing.line(((0, pilwebcam_image.size[1] - 1),
                      (pilwebcam_image.size[0], pilwebcam_image.size[1] - 1)),
                     fill=_config['border_colour'])

        image.paste(pilwebcam_image, (10, 10))
        return image
Ejemplo n.º 7
0
    def __init__(self,
                 processFunction=None,
                 title="Video Capture Player",
                 show=True,
                 **argd):
        self.__dict__.update(**argd)
        super(VideoCapturePlayer, self).__init__(**argd)
        t_begin = time.time()
        self.processFunction = processFunction
        self.title = title
        self.show = show

        if self.show is True:
            self.display = hg.cvNamedWindow(self.title)

        try:
            self.camera = hg.cvCreateCameraCapture(0)
        except:
            print("Couldn't open camera device, is it connected?")
            hg.cvDestroyWindow(title)
            raise SystemExit

        # Take a frame to get props and use in testing
        self.snapshot = cv.cvCloneMat(hg.cvQueryFrame(self.camera))
        # check that we got an image, otherwise try again.
        for i in xrange(100):
            if self.snapshot is not None: break
            self.snapshot = hg.cvQueryFrame(self.camera)
Ejemplo n.º 8
0
def get_image(camera):
    testimage = highgui.cvQueryFrame(camera)	
    time.sleep(1)
    im = highgui.cvQueryFrame(camera)
    dir(im)
    print "l'image est prise"
    return opencv.adaptors.Ipl2PIL(im)
Ejemplo n.º 9
0
	def apply_filter(self, image, _config):
		highgui.cvQueryFrame(self.camera)
		im = highgui.cvQueryFrame(self.camera)		
		pilwebcam_image = opencv.adaptors.Ipl2PIL(im)

		pilwebcam_image.thumbnail(
			(
			int((pilwebcam_image.size[0] / 100.0) * _config['width']),
			int((pilwebcam_image.size[1] / 100.0) * _config['height'])		
			)
		)

		## Border
		from ImageDraw import Draw
		drawing = Draw(pilwebcam_image)

		# top line
		drawing.line(((0,0), (pilwebcam_image.size[0], 0)), fill = _config['border_colour'])
		# left line
		drawing.line(((0,0), (0, pilwebcam_image.size[1])), fill = _config['border_colour'])
		# right line
		drawing.line(((pilwebcam_image.size[0]-1,0), (pilwebcam_image.size[0]-1, pilwebcam_image.size[1])), fill = _config['border_colour'])
		# bottow line
		drawing.line(((0, pilwebcam_image.size[1]-1), (pilwebcam_image.size[0], pilwebcam_image.size[1]-1)), fill = _config['border_colour'])

		image.paste(pilwebcam_image, (10,10))
		return image
Ejemplo n.º 10
0
def video_file(file):
    cap = hg.cvCreateFileCapture(file)
    f = hg.cvQueryFrame(cap)

    while f != None:
        yield f
        f = hg.cvQueryFrame(cap)
Ejemplo n.º 11
0
def main():

    print "FaceIn! an OpenCV Python Face Recognition Program"
    
    highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvMoveWindow ('Camera', 10, 10)
    device = 0 #use first device found
    capture = highgui.cvCreateCameraCapture (device)
    frame = highgui.cvQueryFrame (capture)
    frame_size = cv.cvGetSize (frame)
    fps = 30
        
    while 1:
        
        frame = highgui.cvQueryFrame (capture)
        
        detectFace(frame)
        # display the frames to have a visual output
        highgui.cvShowImage ('Camera', frame)

        # handle events
        k = highgui.cvWaitKey (5)

        if k % 0x100 == 27:
            # user has press the ESC key, so exit
            quit()
Ejemplo n.º 12
0
    def next(self):
        tup = (hg.cvQueryFrame(self.left), hg.cvQueryFrame(self.right))
        #frame_numb = hg.cvGetCaptureProperty(self.left, hg.CV_CAP_PROP_POS_FRAMES )

        #if tup[0] == None or tup[1] == None or frame_numb >= self.total_frames:
        if tup[0] == None or tup[1] == None:
            raise StopIteration()
        return tup
Ejemplo n.º 13
0
    def next(self):
        tup = (hg.cvQueryFrame(self.left), hg.cvQueryFrame(self.right))
        #frame_numb = hg.cvGetCaptureProperty(self.left, hg.CV_CAP_PROP_POS_FRAMES )

        #if tup[0] == None or tup[1] == None or frame_numb >= self.total_frames:
        if tup[0] == None or tup[1] == None:
            raise StopIteration()
        return tup
Ejemplo n.º 14
0
    def _get_image(self, params):
        #while 1:

        ### HACK to work around opencv problem
        for i in range(5):
            highgui.cvQueryFrame(self._camera)
        ###

        im = highgui.cvQueryFrame(self._camera)
        #im = opencv.cvGetMat(im)
        im = opencv.adaptors.Ipl2PIL(im)
        # Figure out what is happening!
        #im = copy.copy(im)
        self._image = ImageOps.grayscale(im)
        return self._image
Ejemplo n.º 15
0
    def _get_image(self,params):
        #while 1:

        ### HACK to work around opencv problem
        for i in range(5):
            highgui.cvQueryFrame(self._camera)
        ###

        im = highgui.cvQueryFrame(self._camera)
        #im = opencv.cvGetMat(im)
        im = opencv.adaptors.Ipl2PIL(im)
            # Figure out what is happening!
        #im = copy.copy(im)
        self._image = ImageOps.grayscale(im)
        return self._image
Ejemplo n.º 16
0
    def onCapture(self, event):

        self.buffer = highgui.cvQueryFrame(self.camera)
        self.im = highgui.cvQueryFrame(self.camera)
        self.im = opencv.cvGetMat(self.im)
        self.im = opencv.adaptors.Ipl2PIL(self.im)
        filename = 'img' + str(self.image_counter) + '.jpg'
        self.im.save('Photos/'+filename)
        raw = wx.Image('Photos/'+filename)
        # (308, 267) is the size of the picture after getting the
        # size of the panel
        bmp = raw.Rescale(308,267)
        bmp = bmp.ConvertToBitmap()
        wx.StaticBitmap(self.Photoshot_Panel, -1, bmp)
        self.image_counter += 1
def main():
    # Initialization
    highgui.cvNamedWindow("Guardian", 1)

    signal.signal(signal.SIGINT, handler)
    # Stage
    #robot = playerc.playerc_client(None, "localhost", 6665)
    # Corobot
    robot = playerc.playerc_client(None, "corobot-w.wifi.wpi.edu", 6665)

    robot.connect()

    p2dproxy = playerc.playerc_position2d(robot, 0)
    p2dproxy.subscribe(playerc.PLAYERC_OPEN_MODE)
    p2dproxy.get_geom()

    robot.read()

    while True:
        image = highgui.cvQueryFrame(camera)
        detectObject(image)

        p2dproxy.set_cmd_vel(speed[0], 0, speed[1], 0)

        draw_gui(image)
        highgui.cvShowImage("Guardian", image)

        if highgui.cvWaitKey(20) != -1:
            break

    highgui.cvDestroyWindow("Guardian")
    p2dproxy.set_cmd_vel(0, 0, 0, 0)
Ejemplo n.º 18
0
	def get_frame(self, face_rec = False):
		
		image = highgui.cvQueryFrame(self.device)
		face_matches = False
		
		if face_rec:
			
			grayscale = cv.cvCreateImage(cv.cvSize(640, 480), 8, 1)
			cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
			storage = cv.cvCreateMemStorage(0)
			cv.cvClearMemStorage(storage)
			cv.cvEqualizeHist(grayscale, grayscale)
			
			for cascade in self.haarfiles:
				matches = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, 
										  cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(100,100))
			  
				if matches:
					face_matches = True
					for i in matches:
						cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
							cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
							cv.CV_RGB(0,0,255), 2, 5, 0)
			
			image = cv.cvGetMat(image)
			
		return (image, face_matches)
def get_image(camera):
    im = highgui.cvQueryFrame(camera)
    # Add the line below if you need it (Ubuntu 8.04+)
    #im = opencv.cvGetMat(im)
    #convert Ipl image to PIL image
    #print im
    return opencv.adaptors.Ipl2PIL(im) 
Ejemplo n.º 20
0
    def init_camera(self):
        # create the device
        self._device = hg.cvCreateCameraCapture(self._index)

        # Set preferred resolution
        cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                              self.resolution[0])
        cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                              self.resolution[1])

        # and get frame to check if it's ok
        frame = hg.cvQueryFrame(self._device)
        # Just set the resolution to the frame we just got, but don't use
        # self.resolution for that as that would cause an infinite recursion
        # with self.init_camera (but slowly as we'd have to always get a
        # frame).
        self._resolution = (int(frame.width), int(frame.height))

        # get fps
        self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS)
        if self.fps <= 0:
            self.fps = 1 / 30.

        if not self.stopped:
            self.start()
Ejemplo n.º 21
0
 def onIdle(self, event):
     """
     Event to grab and display a frame from the camera. (internal use).
     """
     if self.cap == None:  #Should be cvCameraCapture instance.
         #unbind the idle instance, change to click.
         highgui.cvReleaseCapture(self.cap)  #release the old instance and
         self.cap = highgui.cvCreateCameraCapture(
             self.camera)  #try new one.
         self.displayError(self.errorBitmap, (128, 128))
         raise CameraError('Unable to open camera, retrying....')
         event.Skip()
     try:
         img = highgui.cvQueryFrame(self.cap)
     except cv2.error as e:
         raise CameraError('Error when querying for frame: {0}'.format(e))
     self._error = 0  #worked successfully
     img = opencv.cvGetMat(img)
     cv.cvCvtColor(img, img, cv.CV_BGR2RGB)
     if conf.as_bool(conf.config['webcam']['cropBars']):
         #Draw cropping region
         cv.cvRectangle(img, (80, -1), (560, 480), (205.0, 0.0, 0.0, 0.0),
                        2)
     self.displayImage(img)
     event.RequestMore()
Ejemplo n.º 22
0
def getData():
	frame = highgui.cvQueryFrame(capture)
	if frame is None:
		return None

	cv.cvSplit(frame, b_img, g_img, r_img, None)
	cv.cvInRangeS(r_img, 150, 255, r_img)
	cv.cvInRangeS(g_img, 0, 100, g_img)
	cv.cvInRangeS(b_img, 0, 100, b_img)

	cv.cvAnd(r_img, g_img, laser_img)
	cv.cvAnd(laser_img, b_img, laser_img)
	cv.cvErode(laser_img,laser_img) #,0,2)
	cv.cvDilate(laser_img,laser_img)
		
	c_count, contours = cv.cvFindContours (laser_img, 
											storage,
											cv.sizeof_CvContour,
											cv.CV_RETR_LIST,
											cv.CV_CHAIN_APPROX_NONE,
											cv.cvPoint (0,0))
	if c_count:
		return returnEllipses(contours)
	else:
		return None
def main():
  # Initialization
  highgui.cvNamedWindow("Guardian", 1)

  signal.signal(signal.SIGINT, handler)
  # Stage
  #robot = playerc.playerc_client(None, "localhost", 6665)
  # Corobot
  robot = playerc.playerc_client(None, "corobot-w.wifi.wpi.edu", 6665)

  robot.connect()

  p2dproxy = playerc.playerc_position2d(robot, 0)
  p2dproxy.subscribe(playerc.PLAYERC_OPEN_MODE)
  p2dproxy.get_geom()

  robot.read()

  while True:
    image = highgui.cvQueryFrame(camera)
    detectObject(image)

    p2dproxy.set_cmd_vel(speed[0], 0, speed[1], 0)

    draw_gui(image)
    highgui.cvShowImage("Guardian", image)

    if highgui.cvWaitKey(20) != -1:
      break

  highgui.cvDestroyWindow("Guardian")
  p2dproxy.set_cmd_vel(0, 0, 0, 0)
Ejemplo n.º 24
0
 def save(self):
     print "Save"
     img = highgui.cvQueryFrame(self.cap)
     img = opencv.cvGetMat(img)
     #No BGR => RGB conversion needed for PIL output.
     pil = opencv.adaptors.Ipl2PIL(img)  #convert to a PIL
     pil.save('out.jpg')
Ejemplo n.º 25
0
def get_image():
    img = highgui.cvQueryFrame(camera)
    # Add the line below if you need it (Ubuntu 8.04+)
    #im = opencv.cvGetMat(im)
    #convert Ipl image to PIL image
    print img
    return opencv.adaptors.Ipl2PIL(img)
Ejemplo n.º 26
0
 def get_image(self,surface=None,*argv,**argd):
     """
     Since we are using opencv there are some differences here.
     This function will return a cvmat by default (the quickest route from camera to your code)
     Or if pygame was specified a 3dpixel array
       - this can be blitted directly to a pygame surface
       - or can be converted to a surface array and returned to your pygame code.
     
     """
     if self.imageType == "pygame":
         try:
             surfarray.blit_array(surface,conversionUtils.cv2SurfArray(hg.cvQueryFrame(self.capture)))
             return surface
         except:
             return surfarray.make_surface(conversionUtils.cv2SurfArray(hg.cvQueryFrame(self.capture))).convert()
     return hg.cvQueryFrame(self.capture)
Ejemplo n.º 27
0
    def init_camera(self):
        # create the device
        self._device = hg.cvCreateCameraCapture(self._index)

        # Set preferred resolution
        cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                              self.resolution[0])
        cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                              self.resolution[1])

        # and get frame to check if it's ok
        frame = hg.cvQueryFrame(self._device)
        # Just set the resolution to the frame we just got, but don't use
        # self.resolution for that as that would cause an infinite recursion
        # with self.init_camera (but slowly as we'd have to always get a
        # frame).
        self._resolution = (int(frame.width), int(frame.height))

        #get fps
        self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS)
        if self.fps <= 0:
            self.fps = 1 / 30.

        if not self.stopped:
            self.start()
Ejemplo n.º 28
0
    def init_camera(self):
        # consts have changed locations between versions 2 and 3
        if self.opencvMajorVersion in (3, 4):
            PROPERTY_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv2.CAP_PROP_FPS
        elif self.opencvMajorVersion == 2:
            PROPERTY_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv2.cv.CV_CAP_PROP_FPS
        elif self.opencvMajorVersion == 1:
            PROPERTY_WIDTH = cv.CV_CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv.CV_CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv.CV_CAP_PROP_FPS

        Logger.debug('Using opencv ver.' + str(self.opencvMajorVersion))

        if self.opencvMajorVersion == 1:
            # create the device
            self._device = hg.cvCreateCameraCapture(self._index)
            # Set preferred resolution
            cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                                  self.resolution[0])
            cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                                  self.resolution[1])
            # and get frame to check if it's ok
            frame = hg.cvQueryFrame(self._device)
            # Just set the resolution to the frame we just got, but don't use
            # self.resolution for that as that would cause an infinite
            # recursion with self.init_camera (but slowly as we'd have to
            # always get a frame).
            self._resolution = (int(frame.width), int(frame.height))
            # get fps
            self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS)

        elif self.opencvMajorVersion in (2, 3, 4):
            # create the device
            self._device = cv2.VideoCapture(self._index)
            # Set preferred resolution
            self._device.set(PROPERTY_WIDTH,
                             self.resolution[0])
            self._device.set(PROPERTY_HEIGHT,
                             self.resolution[1])
            # and get frame to check if it's ok
            ret, frame = self._device.read()

            # source:
            # http://stackoverflow.com/questions/32468371/video-capture-propid-parameters-in-opencv # noqa
            self._resolution = (int(frame.shape[1]), int(frame.shape[0]))
            # get fps
            self.fps = self._device.get(PROPERTY_FPS)

        if self.fps == 0 or self.fps == 1:
            self.fps = 1.0 / 30
        elif self.fps > 1:
            self.fps = 1.0 / self.fps

        if not self.stopped:
            self.start()
Ejemplo n.º 29
0
    def init_camera(self):
        # consts have changed locations between versions 2 and 3
        if self.opencvMajorVersion in (3, 4):
            PROPERTY_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv2.CAP_PROP_FPS
        elif self.opencvMajorVersion == 2:
            PROPERTY_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv2.cv.CV_CAP_PROP_FPS
        elif self.opencvMajorVersion == 1:
            PROPERTY_WIDTH = cv.CV_CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv.CV_CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv.CV_CAP_PROP_FPS

        Logger.debug('Using opencv ver.' + str(self.opencvMajorVersion))

        if self.opencvMajorVersion == 1:
            # create the device
            self._device = hg.cvCreateCameraCapture(self._index)
            # Set preferred resolution
            cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                                  self.resolution[0])
            cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                                  self.resolution[1])
            # and get frame to check if it's ok
            frame = hg.cvQueryFrame(self._device)
            # Just set the resolution to the frame we just got, but don't use
            # self.resolution for that as that would cause an infinite
            # recursion with self.init_camera (but slowly as we'd have to
            # always get a frame).
            self._resolution = (int(frame.width), int(frame.height))
            # get fps
            self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS)

        elif self.opencvMajorVersion in (2, 3, 4):
            # create the device
            self._device = cv2.VideoCapture(self._index)
            # Set preferred resolution
            self._device.set(PROPERTY_WIDTH,
                             self.resolution[0])
            self._device.set(PROPERTY_HEIGHT,
                             self.resolution[1])
            # and get frame to check if it's ok
            ret, frame = self._device.read()

            # source:
            # http://stackoverflow.com/questions/32468371/video-capture-propid-parameters-in-opencv # noqa
            self._resolution = (int(frame.shape[1]), int(frame.shape[0]))
            # get fps
            self.fps = self._device.get(PROPERTY_FPS)

        if self.fps == 0 or self.fps == 1:
            self.fps = 1.0 / 30
        elif self.fps > 1:
            self.fps = 1.0 / self.fps

        if not self.stopped:
            self.start()
Ejemplo n.º 30
0
def getImg():
	ImgOCV = highgui.cvQueryFrame(camera)
	ImgPIL = adaptors.Ipl2PIL(ImgOCV) 
	ImgPIL = ImgPIL.filter(GAUSSIAN)
	ImgRAW = ImgPIL.convert("L").tostring()
	ImgArray = numpy.frombuffer(ImgRAW, dtype=numpy.uint8)
	ImgArray = ImgArray.reshape(ImgPIL.size)
	return ImgArray
Ejemplo n.º 31
0
    def onNextFrame(self, evt):

        frame = gui.cvQueryFrame(self.capture)
        if frame:
            cv.cvCvtColor(frame, frame, cv.CV_BGR2RGB)
            self.bmp = wx.BitmapFromBuffer(frame.width, frame.height, frame.imageData)
            self.Refresh()        
        evt.Skip()
Ejemplo n.º 32
0
 def get_image():
     try:
         im = highgui.cvQueryFrame(camera)
         return opencv.adaptors.Ipl2PIL(im)
     except Exception: 
         print "Error: camera disconnected"
         sys.exit()
         raise SystemExit
Ejemplo n.º 33
0
def get_image(camera):
  """
  Grabs a frame from the camera 
  and returns it as a PIL image
  """
  im = highgui.cvQueryFrame(camera)
  im = opencv.cvGetMat(im)
  return opencv.adaptors.Ipl2PIL(im)  #convert Ipl image to PIL image
Ejemplo n.º 34
0
 def get_image(self):
     im=highgui.cvQueryFrame(self.camera)
     image=opencv.adaptors.Ipl2PIL(im)
     workimage=os.path.join(self.imgdir,self.workimage)
     imagename=os.path.join(self.imgdir,self.imagetemplate % self.index)
     image.save(os.path.join(self.imgdir,self.workimage))
     os.rename(workimage,imagename)
     self.index+=1
     self.write_index()
Ejemplo n.º 35
0
def snapshot():
    cam = highgui.cvCreateCameraCapture(1)
    img = highgui.cvQueryFrame(cam)

    pilImage = opencv.adaptors.Ipl2PIL(img)

    highgui.cvReleaseCapture(cam)
    
    return pilImage
Ejemplo n.º 36
0
    def onNextFrame(self, evt):

        frame = gui.cvQueryFrame(self.capture)
        if frame:
            cv.cvCvtColor(frame, frame, cv.CV_BGR2RGB)
            self.bmp = wx.BitmapFromBuffer(frame.width, frame.height,
                                           frame.imageData)
            self.Refresh()
        evt.Skip()
Ejemplo n.º 37
0
    def display_picture(self):
	imagecapture = highgui.cvQueryFrame(self.camera)
	self.pilimage = opencv.adaptors.Ipl2PIL(imagecapture)
	miniimage = self.pilimage.resize((320,240))
	self.image = ImageTk.PhotoImage(miniimage, master=self.frame)
	self.imagecanvas.create_image(0,0,image=self.image,anchor="nw")
	self.imagecanvas.update()
	time.sleep(0.1)
	self.display_picture()
Ejemplo n.º 38
0
 def snapshot(arg):
   time.sleep(1.0)
   camera = highgui.cvCreateCameraCapture(0)
   im = highgui.cvQueryFrame(camera)
   # Add the line below if you need it (Ubuntu 8.04+)
   im = opencv.cvGetMat(im)
   #convert Ipl image to PIL image
   im = opencv.adaptors.Ipl2PIL(im)
   im.save('test.png',"PNG")
Ejemplo n.º 39
0
def main(argv):
    # Frames per second
    fps = 20
    tux_pos = 5
    tux_pos_min = 0.0
    tux_pos_max = 9.0

    try:
        opts, args = getopt.getopt(argv, "fps", ["framerate=",])
    except getopt.GetoptError:
            sys.exit(2)

    for opt, arg in opts:
            if opt in ("-fps", "--framerate"):
                fps = arg

    camera = highgui.cvCreateCameraCapture(0)

    while True:
        highgui.cvNamedWindow('Camera', 1)
        im = highgui.cvQueryFrame(camera)
        if im is None:
            break
        # mirror
        opencv.cv.cvFlip(im, None, 1)

#        positions = face.detect(im, 'haarcascade_data/haarcascade_profileface.xml')
        positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml')
#        if not positions:
#            positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml')

        # display webcam image
        highgui.cvShowImage('Camera', im)

        # Division of the screen to count as "walking" motion to trigger tux
        image_size = opencv.cvGetSize(im)
        motion_block = image_size.width / 9

        if positions:
            mp = None
            for position in positions:
                if not mp or mp['width'] > position['width']:
                    mp = position
            pos = (mp['x'] + (mp['width'] / 2)) / motion_block
            print "tux pos: %f" % tux_pos
            print "pos: %f" % pos

            if pos != tux_pos:
                if tux_pos > pos:
                    move_tux_right(tux_pos - pos)
                elif tux_pos < pos:
                    move_tux_left(pos - tux_pos)
                tux_pos = pos

        if highgui.cvWaitKey(fps) >= 0:
            highgui.cvDestroyWindow('Camera')
            sys.exit(0)
Ejemplo n.º 40
0
Archivo: chroma.py Proyecto: bmiro/vpc
def getFilter(frameWidht, frameHeight):    
    cvNamedWindow("Filtred")
    
    cvCreateTrackbar("hmax", "Filtred", getHlsFilter('hmax'), 180, trackBarChangeHmax)
    cvCreateTrackbar("hmin", "Filtred", getHlsFilter('hmin'), 180, trackBarChangeHmin)
    #cvCreateTrackbar("lmax", "Filtred", hlsFilter['lmax'], 255, trackBarChangeLmax)
    #cvCreateTrackbar("lmin", "Filtred", hlsFilter['lmin'], 255, trackBarChangeLmin)
    cvCreateTrackbar("smax", "Filtred", getHlsFilter('smax'), 255, trackBarChangeSmax)
    cvCreateTrackbar("smin", "Filtred", getHlsFilter('smin'), 255, trackBarChangeSmin)

    cvSetMouseCallback("Filtred", mouseClick, None)
    
    frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    filtredFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)

    mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)

    hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    key = -1
    while key == -1: 
        if not cvGrabFrame(CAM):
            print "Could not grab a frame"
            exit
        frame = cvQueryFrame(CAM)
        
        cvCvtColor(frame, hlsFrame, CV_BGR2HLS)
    
        cvSplit(hlsFrame, hFrame, lFrame, sFrame, None)
        
        pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) 
        #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame)
        pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame)
        
        cvSetZero(mask)        
        cvAnd(ThHFrame, ThSFrame, mask)
        
        cvSetZero(filtredFrame)
        
        cvCopy(frame, filtredFrame, mask)
        
        cvShowImage("Filtred", filtredFrame)

        key = cvWaitKey(10)
        if key == 'r':
            key = -1
            resetHlsFilter()
            
    cvDestroyWindow("Filtred")    
Ejemplo n.º 41
0
 def _update(self, dt):
     if self.stopped:
         return
     try:
         frame = hg.cvQueryFrame(self._device)
         self._format = 'bgr'
         self._buffer = frame.imageData
         self._copy_to_gpu()
     except:
         Logger.exception('OpenCV: Couldn\'t get image from Camera')
Ejemplo n.º 42
0
    def onShot(self, event):
        frame = gui.cvQueryFrame(self.capture)
        self.playTimer.Stop()
        gui.cvSaveImage("foo.png", frame)        

        self.hasPicture = True
        self.shotbutton.Hide()
        self.retrybutton.Show()
        self.Layout()
        event.Skip()
Ejemplo n.º 43
0
 def CaptureVGA(self):
     im = highgui.cvQueryFrame(camera)
     highgui.cvWriteFrame(self.videowriter, im)  # 录制视频,写入文件
     # convert Ipl image to PIL image
     im = opencv.adaptors.Ipl2PIL(im)
     im = im.convert('RGB').tostring('jpeg', 'RGB')  # 转换格式,jpeg
     self.image.loadFromData(QByteArray(im))  # 格式支持QT,直接加载
     # im.save('3.jpg')#opencv 返回的是Ipl 格式,QT无法直接显示。不知道如何转换格式,采用保存、读取的方式。
     # pic.load('3.jpg')
     self.piclabel.setPixmap(QPixmap.fromImage(self.image))  # 一帧一帧的显示
Ejemplo n.º 44
0
 def update(self):
     if self.stopped:
         return
     try:
         frame = hg.cvQueryFrame(self._device)
         self._format = GL_BGR_EXT
         self._buffer = frame.imageData
         self._copy_to_gpu()
     except:
         pymt.pymt_logger.exception('OpenCV: Couldn\'t get image from Camera')
Ejemplo n.º 45
0
    def onShot(self, event):
        frame = gui.cvQueryFrame(self.capture)
        self.playTimer.Stop()
        gui.cvSaveImage("foo.png", frame)

        self.hasPicture = True
        self.shotbutton.Hide()
        self.retrybutton.Show()
        self.Layout()
        event.Skip()
Ejemplo n.º 46
0
def _detect(image):
    """ Detects faces on `image`
    Parameters:
        @image: image file path

    Returns:
        [((x1, y1), (x2, y2)), ...] List of coordenates for top-left
                                    and bottom-right corner
    """
    # the OpenCV API says this function is obsolete, but we can't
    # cast the output of cvLoad to a HaarClassifierCascade, so use
    # this anyways the size parameter is ignored
    capture = cvCreateFileCapture(image)

    if not capture:
        return []

    frame = cvQueryFrame(capture)
    if not frame:
        return []

    img = cvCreateImage(cvSize(frame.width, frame.height), IPL_DEPTH_8U,
                        frame.nChannels)
    cvCopy(frame, img)

    # allocate temporary images
    gray = cvCreateImage((img.width, img.height), COPY_DEPTH, COPY_CHANNELS)
    width, height = (cvRound(img.width / IMAGE_SCALE),
                     cvRound(img.height / IMAGE_SCALE))
    small_img = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS)

    # convert color input image to grayscale
    cvCvtColor(img, gray, CV_BGR2GRAY)

    # scale input image for faster processing
    cvResize(gray, small_img, CV_INTER_LINEAR)
    cvEqualizeHist(small_img, small_img)
    cvClearMemStorage(STORAGE)

    coords = []
    for haar_file in CASCADES:
        cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1))
        if cascade:
            faces = cvHaarDetectObjects(small_img, cascade, STORAGE,
                                        HAAR_SCALE, MIN_NEIGHBORS, HAAR_FLAGS,
                                        MIN_SIZE) or []
            for face_rect in faces:
                # the input to cvHaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                x, y = face_rect.x, face_rect.y
                pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE))
                pt2 = (int((x + face_rect.width) * IMAGE_SCALE),
                       int((y + face_rect.height) * IMAGE_SCALE))
                coords.append((pt1, pt2))
    return coords
Ejemplo n.º 47
0
 def onRetry(self, event):
     frame = gui.cvQueryFrame(self.capture)
     cv.cvCvtColor(frame, frame, cv.CV_BGR2RGB)
     self.bmp = wx.BitmapFromBuffer(frame.width, frame.height,
                                    frame.imageData)
     self.startTimer()
     self.shotbutton.Show()
     self.retrybutton.Hide()
     self.hasPicture = False
     self.Layout()
     event.Skip()
Ejemplo n.º 48
0
def seek_onChange(pos, capture, windowName):
    '''Callback for the seek trackbar'''
    print 'Seeking to frame: %d' % pos

    # Set the pointer to frame pos and grab the frame
    highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_POS_FRAMES,
                                 pos * 3600 - 1)
    frame = highgui.cvQueryFrame(capture)

    # Display the frame on the window
    highgui.cvShowImage(windowName, frame)
Ejemplo n.º 49
0
 def update(self):
     if self.stopped:
         return
     try:
         frame = hg.cvQueryFrame(self._device)
         self._format = GL_BGR_EXT
         self._buffer = frame.imageData
         self._copy_to_gpu()
     except:
         pymt.pymt_logger.exception(
             'OpenCV: Couldn\'t get image from Camera')
Ejemplo n.º 50
0
 def get_image(self, surface=None, *argv, **argd):
     """
     Since we are using opencv there are some differences here.
     This function will return a cvmat by default (the quickest route from camera to your code)
     Or if pygame was specified a 3dpixel array
       - this can be blitted directly to a pygame surface
       - or can be converted to a surface array and returned to your pygame code.
     
     """
     if self.imageType == "pygame":
         try:
             surfarray.blit_array(
                 surface,
                 conversionUtils.cv2SurfArray(hg.cvQueryFrame(
                     self.capture)))
             return surface
         except:
             return surfarray.make_surface(
                 conversionUtils.cv2SurfArray(hg.cvQueryFrame(
                     self.capture))).convert()
     return hg.cvQueryFrame(self.capture)
Ejemplo n.º 51
0
def get_image(new_size=None, mode="L"):
    ''' inspired by http://www.jperla.com/blog/2007/09/26/capturing-frames-from-a-webcam-on-linux '''
    ipl_im = highgui.cvQueryFrame(camera)
    # Add the line below if you need it (Ubuntu 8.04+)
    #ipl_im = opencv.cvGetMat(ipl_im)
    #convert Ipl image to PIL image
    im = opencv.adaptors.Ipl2PIL(ipl_im)
    im = im.convert(mode)
        
    if new_size: 
        return array(im.resize(new_size))
    return array(im) 
Ejemplo n.º 52
0
def capture_webcam():
    """
    Grab a frame from the first detected webcam using OpenCV.
    """
    import opencv
    from opencv import highgui

    camera = highgui.cvCreateCameraCapture(0)
    cv_img = highgui.cvQueryFrame(camera)
    img = opencv.adaptors.Ipl2PIL(cv_img)
    highgui.cvReleaseCapture(camera)
    return img
Ejemplo n.º 53
0
    def __init__(self, src="", time=None, flipped=False, thresh=128, thmode=0):

        self.src = src
        self.time = time
        self.bthresh = thresh
        self.bthreshmode = thmode
        if self.time:
            hg.cvSetCaptureProperty(self.src, hg.CV_CAP_PROP_POS_FRAMES, self.time)
        self.iplimage = hg.cvQueryFrame(self.src)
        if flipped:
            opencv.cvFlip(self.iplimage, None, 1)
        self.width = self.iplimage.width
        self.height = self.iplimage.height
Ejemplo n.º 54
0
    def run(self):
        """
        Consume images from the webcam at 25fps.
        If visualize is True, show the result in the screen.
        """
        if self.visualize:
            highgui.cvNamedWindow('DucksboardFace')

        while self.running:
            self.image = highgui.cvQueryFrame(self.camera)
            if self.visualize:
                highgui.cvShowImage('DucksboardFace', self.image)
            highgui.cvWaitKey(1000 / 25)
Ejemplo n.º 55
0
    def save(self, record=-1):
        """
        Captures, crops, and saves a webcam frame.  Pass an explicit record number
        otherwise writes to next in sequence.  Returns zero-padded photo reference ID.
        """
        img = highgui.cvQueryFrame(self.cap)
        img = opencv.cvGetMat(img)
        #No BGR => RGB conversion needed for PIL output.
        pil = opencv.adaptors.Ipl2PIL(img)  #convert to a PIL
        #~ pil = pil.crop((80, 0, 560, 480))
        #~ pil.show()

        return self.store.savePIL(pil, record)
Ejemplo n.º 56
0
    def init_camera(self):
        # create the device
        self._device = hg.cvCreateCameraCapture(self._index)

        try:
            # try first to set resolution
            cv.hg(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                              self.resolution[0])
            cv.hg(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                              self.resolution[1])

            # and get frame to check if it's ok
            frame = hg.cvQueryFrame(self._device)
            if not int(frame.width) == self.resolution[0]:
                raise Exception('OpenCV: Resolution not supported')

        except:
            # error while setting resolution
            # fallback on default one
            w = int(hg.cvGetCaptureProperty(self._device,
                    hg.CV_CAP_PROP_FRAME_WIDTH))
            h = int(hg.cvGetCaptureProperty(self._device,
                    hg.CV_CAP_PROP_FRAME_HEIGHT))
            frame = hg.cvQueryFrame(self._device)
            Logger.warning(
                'OpenCV: Camera resolution %s impossible! Defaulting to %s.' %
                (self.resolution, (w, h)))

            # set resolution to default one
            self._resolution = (w, h)

        # create texture !
        self._texture = Texture.create(*self._resolution)
        self._texture.flip_vertical()
        self.dispatch('on_load')

        if not self.stopped:
            self.start()
Ejemplo n.º 57
0
    def get_image(self):
        """Return an image in PIL (Python Imaging Library) format."""
        im = None
        while not im:
            im = highgui.cvQueryFrame(self.camera)

        arr = numpy.asarray(cv.adaptors.Ipl2PIL(im)).copy()
        arr[:, :, 2] = 0

        if self.blur_factor:
            arr = ndimage.gaussian_filter(arr, self.blur_factor)

        pil = Image.fromarray(arr).convert('L')
        return pil, pil.mode