示例#1
0
def picture():
	sock = cStringIO.StringIO()
	camera = highgui.cvCreateCameraCapture(0)
	def get_image(): #Creamos camara
	    im = highgui.cvQueryFrame(camera)
	    im = opencv.cvGetMat(im)
	    return opencv.adaptors.Ipl2PIL(im) 

	fps = 30.0 #Frames per second
	pygame.init()
	window = pygame.display.set_mode((640,480)) #Tamaño
	pygame.display.set_caption("Twitter") #Titulo
	screen = pygame.display.get_surface() #Mostramos camara

	while True:
	    events = pygame.event.get()
	    im = get_image()
	    pg_img = pygame.image.frombuffer(im.tostring(), im.size, im.mode)
	    screen.blit(pg_img, (0,0))
	    pygame.display.flip() #Flipeamos la imagen
	    pygame.time.delay(int(1000 * 1.0/fps)) #Actualizamos frames
	    for event in events:
		if event.type == KEYDOWN:
		    if event.key == K_SPACE: #Tomamos foto con barra espaciadora
		        pygame.image.save(pg_img, filename)
		        img = filename
		        xml = upload_from_computer(img)
		        process(xml) #Enviamos a imgur
		        sys.exit(0) #No encontre la forma de cerrar la ventana de la camara, con esto se cierra todo el programa
		    if event.key == K_ESCAPE: #Cerramos la camara al presionar ESC
		        sys.exit(0) 
def setup_camera_capture(device_num=0):
    """Perform camera setup for the device number (default device = 0).
    Returns a reference to the camera Capture.

    """
    try:
        device = int(device_num)
    except (IndexError, ValueError):
        # assume we want the 1st device
        device = 0
    print 'Using Camera device %d' % device

    # Try to start capturing frames
    capture = highgui.cvCreateCameraCapture(device)

    # set the wanted image size from the camera
    highgui.cvSetCaptureProperty(
        capture,
        highgui.CV_CAP_PROP_FRAME_WIDTH,
        cam_width
    )
    highgui.cvSetCaptureProperty(
        capture,
        highgui.CV_CAP_PROP_FRAME_HEIGHT,
        cam_height
    )

    # check that capture device is OK
    if not capture:
        print "Error opening capture device"
        sys.exit(1)
    return capture
示例#3
0
文件: webcam.py 项目: rechner/Taxidi
 def onIdle(self, event):
     """
     Event to grab and display a frame from the camera. (internal use).
     """
     if self.cap == None:  #Should be cvCameraCapture instance.
         #unbind the idle instance, change to click.
         highgui.cvReleaseCapture(self.cap)  #release the old instance and
         self.cap = highgui.cvCreateCameraCapture(
             self.camera)  #try new one.
         self.displayError(self.errorBitmap, (128, 128))
         raise CameraError('Unable to open camera, retrying....')
         event.Skip()
     try:
         img = highgui.cvQueryFrame(self.cap)
     except cv2.error as e:
         raise CameraError('Error when querying for frame: {0}'.format(e))
     self._error = 0  #worked successfully
     img = opencv.cvGetMat(img)
     cv.cvCvtColor(img, img, cv.CV_BGR2RGB)
     if conf.as_bool(conf.config['webcam']['cropBars']):
         #Draw cropping region
         cv.cvRectangle(img, (80, -1), (560, 480), (205.0, 0.0, 0.0, 0.0),
                        2)
     self.displayImage(img)
     event.RequestMore()
示例#4
0
def main():

    print "FaceIn! an OpenCV Python Face Recognition Program"

    highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvMoveWindow('Camera', 10, 10)
    device = 0  #use first device found
    capture = highgui.cvCreateCameraCapture(device)
    frame = highgui.cvQueryFrame(capture)
    frame_size = cv.cvGetSize(frame)
    fps = 30

    while 1:

        frame = highgui.cvQueryFrame(capture)

        detectFace(frame)
        # display the frames to have a visual output
        highgui.cvShowImage('Camera', frame)

        # handle events
        k = highgui.cvWaitKey(5)

        if k % 0x100 == 27:
            # user has press the ESC key, so exit
            quit()
示例#5
0
    def __init__(self, *args, **kwds):
        # begin wxGlade: MyPanel.__init__
        kwds["style"] = wx.TAB_TRAVERSAL
        wx.Panel.__init__(self, *args, **kwds)
	self.messenger = wxMessenger(RXBOX_JID, RXBOX_PWD, self, -1,)
	self.phone = Linphone(self)
        self.Videoconf_Label = wx.StaticText(self, -1, "Video Conference", style=wx.ALIGN_CENTRE)
        self.Videoconf_Panel = wx.Panel(self, -1)
        self.Photoshot_Label = wx.StaticText(self, -1, "Photo Snapshot", style=wx.ALIGN_CENTRE)
        self.Photoshot_Panel = wx.Panel(self, -1)
        # added for photosnapshot
        self.Capture_Button = wx.Button(self, -1, "CAPTURE!")
        self.camera = highgui.cvCreateCameraCapture(1)
        self.image_counter = 0
        # added for photosnapshot
        
        self.static_line_5 = wx.StaticLine(self, -1)
        self.IM_Label = wx.StaticText(self, -1, "Instant Messaging", style=wx.ALIGN_CENTRE)
        self.Remarks_Label = wx.StaticText(self, -1, "Remarks", style=wx.ALIGN_CENTRE)
        self.Remarks_Text = wx.TextCtrl(self, -1, "", style=wx.TE_PROCESS_ENTER|wx.TE_MULTILINE)

        self.__set_properties()
        self.__do_layout()

        # added for photosnapshot
        self.Bind(wx.EVT_BUTTON, self.onCapture, self.Capture_Button)
        # end wxGlade
	
	os.environ['SDL_VIDEODRIVER']='x11'
	os.environ['SDL_VIDEO_YUV_HWACCEL']='0'
        os.environ['SDL_WINDOWID']=str(self.Videoconf_Panel.GetHandle())
示例#6
0
文件: __init__.py 项目: aoloe/shoebot
 def __init__(self, cam=0, width=None, height=None):
     self.path = cam
     self.video = hg.cvCreateCameraCapture(self.path)
     if width:
         hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_WIDTH, width)
     if height:
         hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_HEIGHT, height)
示例#7
0
 def __init__(self, cam=0, width=None, height=None):
     self.path = cam
     self.video = hg.cvCreateCameraCapture(self.path)
     if width:
         hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_WIDTH, width)
     if height:
         hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_HEIGHT, height)
示例#8
0
def setup_camera_capture(device_num=0):
    ''' perform camera setup for the device number (default device = 0) i
        return a reference to the camera Capture
    '''
    try:
        # try to get the device number from the command line
        device = int(device_num)
    except (IndexError, ValueError):
        # no device number on the command line, assume we want the 1st device
        device = 0
    print 'Using Camera device %d'%device

    # no argument on the command line, try to use the camera
    capture = highgui.cvCreateCameraCapture (device)

    # set the wanted image size from the camera
    highgui.cvSetCaptureProperty (capture,highgui.CV_CAP_PROP_FRAME_WIDTH, cam_width)
    highgui.cvSetCaptureProperty (capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, cam_height)

    # check that capture device is OK
    if not capture:
        print "Error opening capture device"
        sys.exit (1)
    
    return capture    
示例#9
0
def main():

    print "FaceIn! an OpenCV Python Face Recognition Program"
    
    highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvMoveWindow ('Camera', 10, 10)
    device = 0 #use first device found
    capture = highgui.cvCreateCameraCapture (device)
    frame = highgui.cvQueryFrame (capture)
    frame_size = cv.cvGetSize (frame)
    fps = 30
        
    while 1:
        
        frame = highgui.cvQueryFrame (capture)
        
        detectFace(frame)
        # display the frames to have a visual output
        highgui.cvShowImage ('Camera', frame)

        # handle events
        k = highgui.cvWaitKey (5)

        if k % 0x100 == 27:
            # user has press the ESC key, so exit
            quit()
示例#10
0
    def __init__(self,
                 processFunction=None,
                 title="Video Capture Player",
                 show=True,
                 **argd):
        self.__dict__.update(**argd)
        super(VideoCapturePlayer, self).__init__(**argd)
        t_begin = time.time()
        self.processFunction = processFunction
        self.title = title
        self.show = show

        if self.show is True:
            self.display = hg.cvNamedWindow(self.title)

        try:
            self.camera = hg.cvCreateCameraCapture(0)
        except:
            print("Couldn't open camera device, is it connected?")
            hg.cvDestroyWindow(title)
            raise SystemExit

        # Take a frame to get props and use in testing
        self.snapshot = cv.cvCloneMat(hg.cvQueryFrame(self.camera))
        # check that we got an image, otherwise try again.
        for i in xrange(100):
            if self.snapshot is not None: break
            self.snapshot = hg.cvQueryFrame(self.camera)
示例#11
0
def init_cam(width=320, height=240):
    capture = highgui.cvCreateCameraCapture(0)
    highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH,
                                 int(width))
    highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT,
                                 int(height))
    return capture
示例#12
0
    def init_camera(self):
        # create the device
        self._device = hg.cvCreateCameraCapture(self._index)

        # Set preferred resolution
        cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                              self.resolution[0])
        cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                              self.resolution[1])

        # and get frame to check if it's ok
        frame = hg.cvQueryFrame(self._device)
        # Just set the resolution to the frame we just got, but don't use
        # self.resolution for that as that would cause an infinite recursion
        # with self.init_camera (but slowly as we'd have to always get a
        # frame).
        self._resolution = (int(frame.width), int(frame.height))

        #get fps
        self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS)
        if self.fps <= 0:
            self.fps = 1 / 30.

        if not self.stopped:
            self.start()
示例#13
0
    def init_camera(self):
        # create the device
        self._device = hg.cvCreateCameraCapture(self._index)

        # Set preferred resolution
        cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                              self.resolution[0])
        cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                              self.resolution[1])

        # and get frame to check if it's ok
        frame = hg.cvQueryFrame(self._device)
        # Just set the resolution to the frame we just got, but don't use
        # self.resolution for that as that would cause an infinite recursion
        # with self.init_camera (but slowly as we'd have to always get a
        # frame).
        self._resolution = (int(frame.width), int(frame.height))

        # get fps
        self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS)
        if self.fps <= 0:
            self.fps = 1 / 30.

        if not self.stopped:
            self.start()
示例#14
0
    def init_camera(self):
        # consts have changed locations between versions 2 and 3
        if self.opencvMajorVersion in (3, 4):
            PROPERTY_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv2.CAP_PROP_FPS
        elif self.opencvMajorVersion == 2:
            PROPERTY_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv2.cv.CV_CAP_PROP_FPS
        elif self.opencvMajorVersion == 1:
            PROPERTY_WIDTH = cv.CV_CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv.CV_CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv.CV_CAP_PROP_FPS

        Logger.debug('Using opencv ver.' + str(self.opencvMajorVersion))

        if self.opencvMajorVersion == 1:
            # create the device
            self._device = hg.cvCreateCameraCapture(self._index)
            # Set preferred resolution
            cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                                  self.resolution[0])
            cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                                  self.resolution[1])
            # and get frame to check if it's ok
            frame = hg.cvQueryFrame(self._device)
            # Just set the resolution to the frame we just got, but don't use
            # self.resolution for that as that would cause an infinite
            # recursion with self.init_camera (but slowly as we'd have to
            # always get a frame).
            self._resolution = (int(frame.width), int(frame.height))
            # get fps
            self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS)

        elif self.opencvMajorVersion in (2, 3, 4):
            # create the device
            self._device = cv2.VideoCapture(self._index)
            # Set preferred resolution
            self._device.set(PROPERTY_WIDTH,
                             self.resolution[0])
            self._device.set(PROPERTY_HEIGHT,
                             self.resolution[1])
            # and get frame to check if it's ok
            ret, frame = self._device.read()

            # source:
            # http://stackoverflow.com/questions/32468371/video-capture-propid-parameters-in-opencv # noqa
            self._resolution = (int(frame.shape[1]), int(frame.shape[0]))
            # get fps
            self.fps = self._device.get(PROPERTY_FPS)

        if self.fps == 0 or self.fps == 1:
            self.fps = 1.0 / 30
        elif self.fps > 1:
            self.fps = 1.0 / self.fps

        if not self.stopped:
            self.start()
示例#15
0
    def init_camera(self):
        # consts have changed locations between versions 2 and 3
        if self.opencvMajorVersion in (3, 4):
            PROPERTY_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv2.CAP_PROP_FPS
        elif self.opencvMajorVersion == 2:
            PROPERTY_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv2.cv.CV_CAP_PROP_FPS
        elif self.opencvMajorVersion == 1:
            PROPERTY_WIDTH = cv.CV_CAP_PROP_FRAME_WIDTH
            PROPERTY_HEIGHT = cv.CV_CAP_PROP_FRAME_HEIGHT
            PROPERTY_FPS = cv.CV_CAP_PROP_FPS

        Logger.debug('Using opencv ver.' + str(self.opencvMajorVersion))

        if self.opencvMajorVersion == 1:
            # create the device
            self._device = hg.cvCreateCameraCapture(self._index)
            # Set preferred resolution
            cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                                  self.resolution[0])
            cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                                  self.resolution[1])
            # and get frame to check if it's ok
            frame = hg.cvQueryFrame(self._device)
            # Just set the resolution to the frame we just got, but don't use
            # self.resolution for that as that would cause an infinite
            # recursion with self.init_camera (but slowly as we'd have to
            # always get a frame).
            self._resolution = (int(frame.width), int(frame.height))
            # get fps
            self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS)

        elif self.opencvMajorVersion in (2, 3, 4):
            # create the device
            self._device = cv2.VideoCapture(self._index)
            # Set preferred resolution
            self._device.set(PROPERTY_WIDTH,
                             self.resolution[0])
            self._device.set(PROPERTY_HEIGHT,
                             self.resolution[1])
            # and get frame to check if it's ok
            ret, frame = self._device.read()

            # source:
            # http://stackoverflow.com/questions/32468371/video-capture-propid-parameters-in-opencv # noqa
            self._resolution = (int(frame.shape[1]), int(frame.shape[0]))
            # get fps
            self.fps = self._device.get(PROPERTY_FPS)

        if self.fps == 0 or self.fps == 1:
            self.fps = 1.0 / 30
        elif self.fps > 1:
            self.fps = 1.0 / self.fps

        if not self.stopped:
            self.start()
示例#16
0
	def init_camera(self,cam_num=0):
		self.capture=highgui.cvCreateCameraCapture(cam_num)

		if not self.capture:
			raise IOError('Unable to open camera %d'%cam_num)

		highgui.cvSetCaptureProperty(self.capture,highgui.CV_CAP_PROP_FRAME_WIDTH,self.image_dims[0])
		highgui.cvSetCaptureProperty(self.capture,highgui.CV_CAP_PROP_FRAME_HEIGHT,self.image_dims[1])
示例#17
0
文件: webcam.py 项目: rechner/Taxidi
 def open(self, camera=-1):
     """
     Open a capture device after __init__ has been called.  Call close() first
     before opening a new device.  Takes camera index as an option.
     """
     self.cap = highgui.cvCreateCameraCapture(camera)
     self.Bind(wx.EVT_IDLE, self.onIdle)
     pass
示例#18
0
文件: webcam.py 项目: rechner/Taxidi
 def open(self, camera=-1):
     """
     Open a capture device after __init__ has been called.  Call close() first
     before opening a new device.  Takes camera index as an option.
     """
     self.cap = highgui.cvCreateCameraCapture(camera)
     self.Bind(wx.EVT_IDLE, self.onIdle)
     pass
示例#19
0
	def __init__(self, *args):
		apply(QWidget.__init__,(self, ) + args)
		self.cascade_name						= 'haarcascades/haarcascade_frontalface_alt.xml'
		self.cascade							= cv.cvLoadHaarClassifierCascade(self.cascade_name, cv.cvSize(20,20))
		self.cap								= highgui.cvCreateCameraCapture(0)
		self.q_buffer							= QImage()
		self.q_buffer.create(self.width(),self.height(),8)
		self.timer								= self.startTimer(1)
示例#20
0
文件: detector.py 项目: alien9/cam
  def __init__(self):
    def ahead(nu):
        print "KEY PRESSED "
        k = getkey()
        if k=='\x1b' or k=='q':
            print "CAI FORA"
            os.kill(pro, signal.SIGKILL)
            sys.exit(0)
            raise SystemExit
    thread.start_new_thread ( ahead, (1,) )
    def get_image():
        try:
            im = highgui.cvQueryFrame(camera)
            return opencv.adaptors.Ipl2PIL(im)
        except Exception: 
            print "Error: camera disconnected"
            sys.exit()
            raise SystemExit

    def difere(a,b):
        d=0
        x=0
        while x<W:
            y=0
            while y<H:
                p=a.getpixel((x,y))
                q=b.getpixel((x,y))
                d+=abs(p[0]-q[0]+p[1]-q[1]+p[2]-q[2])
                y+=THRESHOLD
            x+=THRESHOLD
        return d
    camera = highgui.cvCreateCameraCapture(-1)

    im=get_image()
    W=im.size[0]
    H=im.size[1]
    ig=copy.copy(im)

    c=0
    while True: 

        c+=1
        im = get_image()
        if im==None or ig==None:
            sys.exit()

            
        h=difere(im,ig)
        print '*** %08d ***' % h
        if h>PEAK and c>50:
            print "toca video"
            #g.emit("eof",1)
            #.loadfile("../stood.mov")
            call("mplayer ../stood.mov -fs", shell=True)
            c=0
            time.sleep(5) 
        ig=copy.copy(im)
    exit()
 def snapshot(arg):
   time.sleep(1.0)
   camera = highgui.cvCreateCameraCapture(0)
   im = highgui.cvQueryFrame(camera)
   # Add the line below if you need it (Ubuntu 8.04+)
   im = opencv.cvGetMat(im)
   #convert Ipl image to PIL image
   im = opencv.adaptors.Ipl2PIL(im)
   im.save('test.png',"PNG")
示例#22
0
def main(argv):
    # Frames per second
    fps = 20
    tux_pos = 5
    tux_pos_min = 0.0
    tux_pos_max = 9.0

    try:
        opts, args = getopt.getopt(argv, "fps", ["framerate=",])
    except getopt.GetoptError:
            sys.exit(2)

    for opt, arg in opts:
            if opt in ("-fps", "--framerate"):
                fps = arg

    camera = highgui.cvCreateCameraCapture(0)

    while True:
        highgui.cvNamedWindow('Camera', 1)
        im = highgui.cvQueryFrame(camera)
        if im is None:
            break
        # mirror
        opencv.cv.cvFlip(im, None, 1)

#        positions = face.detect(im, 'haarcascade_data/haarcascade_profileface.xml')
        positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml')
#        if not positions:
#            positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml')

        # display webcam image
        highgui.cvShowImage('Camera', im)

        # Division of the screen to count as "walking" motion to trigger tux
        image_size = opencv.cvGetSize(im)
        motion_block = image_size.width / 9

        if positions:
            mp = None
            for position in positions:
                if not mp or mp['width'] > position['width']:
                    mp = position
            pos = (mp['x'] + (mp['width'] / 2)) / motion_block
            print "tux pos: %f" % tux_pos
            print "pos: %f" % pos

            if pos != tux_pos:
                if tux_pos > pos:
                    move_tux_right(tux_pos - pos)
                elif tux_pos < pos:
                    move_tux_left(pos - tux_pos)
                tux_pos = pos

        if highgui.cvWaitKey(fps) >= 0:
            highgui.cvDestroyWindow('Camera')
            sys.exit(0)
示例#23
0
文件: webcam.py 项目: rechner/Taxidi
 def __init__(self, parent, id, camera=-1):
     wx.Panel.__init__(self, parent, id, style=wx.NO_BORDER)
     self.camera = camera
     self.cap = highgui.cvCreateCameraCapture(camera)
     wximg = wx.Image('resources/icons/camera-error-128.png')
     self.errorBitmap = wximg.ConvertToBitmap()
     self._error = 0
     self.store = Storage()
     self.Bind(wx.EVT_IDLE, self.onIdle)
示例#24
0
 def __init__(self, *args):
     apply(QWidget.__init__, (self, ) + args)
     self.cascade_name = 'haarcascades/haarcascade_frontalface_alt.xml'
     self.cascade = cv.cvLoadHaarClassifierCascade(self.cascade_name,
                                                   cv.cvSize(20, 20))
     self.cap = highgui.cvCreateCameraCapture(0)
     self.q_buffer = QImage()
     self.q_buffer.create(self.width(), self.height(), 8)
     self.timer = self.startTimer(1)
示例#25
0
文件: webcam.py 项目: rechner/Taxidi
 def __init__(self, parent, id, camera=-1):
     wx.Panel.__init__(self, parent, id, style=wx.NO_BORDER)
     self.camera = camera
     self.cap = highgui.cvCreateCameraCapture(camera)
     wximg = wx.Image('resources/icons/camera-error-128.png')
     self.errorBitmap = wximg.ConvertToBitmap()
     self._error = 0
     self.store = Storage()
     self.Bind(wx.EVT_IDLE, self.onIdle)
示例#26
0
def snapshot():
    cam = highgui.cvCreateCameraCapture(1)
    img = highgui.cvQueryFrame(cam)

    pilImage = opencv.adaptors.Ipl2PIL(img)

    highgui.cvReleaseCapture(cam)
    
    return pilImage
示例#27
0
    def __init__(self, devnum=0, showVideoWindow=0):
        self.camera = highgui.cvCreateCameraCapture(devnum)
        
        #self.normalfont = ImageFont.load_default()
        #self.boldfont = ImageFont.load_default()
        #This is going to be linux specific! We might have a problem here
        self.normalfont = ImageFont.truetype('/usr/share/fonts/truetype/custom/tahoma.ttf', 12)
        self.normalfont = ImageFont.truetype('/usr/share/fonts/truetype/custom/tahomabd.ttf', 12)

        self.font = None
示例#28
0
	def start_capture(self, device):

#		video_dimensions = [176, 144]
		video_dimensions = [320, 240]

                if not self.capture:

			self.capture = highgui.cvCreateCameraCapture (device)
			
			highgui.cvSetCaptureProperty(self.capture, highgui.CV_CAP_PROP_FRAME_WIDTH, video_dimensions[0])
			highgui.cvSetCaptureProperty(self.capture, highgui.CV_CAP_PROP_FRAME_HEIGHT, video_dimensions[1])
示例#29
0
文件: pylapse.py 项目: oogg06/pylapse
    def __init__(self, file):
        self.widgets = gtk.glade.XML(file)
        self.build_references_to_controls()

        self.camera = highgui.cvCreateCameraCapture(0)
        self.widgets.signal_autoconnect(self)

        self.buttonSelectFolder = self.__getitem__("buttonFolder")

        self.video = self.__getitem__("video")
        self.main_window.show()
示例#30
0
文件: camera.py 项目: Foued70/pycam
    def __init__(self, device, size, mode, imageType='opencv'):
        self.imageType = imageType
        self.size = self.width, self.height = size
        self.device = device
  
        # todo: would be nice if this didn't make a whole lot of noise about firewire...
        self.capture = hg.cvCreateCameraCapture(self.device)

        # set the wanted image size from the camera
        hg.cvSetCaptureProperty (self.capture, hg.CV_CAP_PROP_FRAME_WIDTH, self.width)
        hg.cvSetCaptureProperty (self.capture, hg.CV_CAP_PROP_FRAME_HEIGHT, self.height)
示例#31
0
文件: camera.py 项目: Foued70/pycam
def list_cameras():
    #return [0]  # Just use this line if errors occur
    cams = []
    for i in range(3):
        try:
            capture = hg.cvCreateCameraCapture( i )  # Must be a better way of doing this...
            if capture is not None:
                cams.append(i)
        except Exception, e:
            pass
        finally:
示例#32
0
文件: webcam.py 项目: CNCBASHER/Cura
	def __init__(self):
		if cv != None:
			self._cam = highgui.cvCreateCameraCapture(-1)
		elif win32vidcap != None:
			self._cam = win32vidcap.new_Dev(0, False)
			#self._cam.displaycapturefilterproperties()
			#self._cam.displaycapturepinproperties()
		else:
			raise exception("No camera implementation available")
		
		self._doTimelaps = False
		self._bitmap = None
示例#33
0
文件: webcam.py 项目: CNCBASHER/Cura
    def __init__(self):
        if cv != None:
            self._cam = highgui.cvCreateCameraCapture(-1)
        elif win32vidcap != None:
            self._cam = win32vidcap.new_Dev(0, False)
            #self._cam.displaycapturefilterproperties()
            #self._cam.displaycapturepinproperties()
        else:
            raise exception("No camera implementation available")

        self._doTimelaps = False
        self._bitmap = None
示例#34
0
文件: webcam.py 项目: martinxyz/Cura
	def __init__(self):
		self._cam = None
		if cv != None:
			self._cam = highgui.cvCreateCameraCapture(-1)
		elif win32vidcap != None:
			try:
				self._cam = win32vidcap.new_Dev(0, False)
			except:
				pass
		
		self._doTimelaps = False
		self._bitmap = None
示例#35
0
def list_cameras():
    #return [0]  # Just use this line if errors occur
    cams = []
    for i in range(3):
        try:
            capture = hg.cvCreateCameraCapture(
                i)  # Must be a better way of doing this...
            if capture is not None:
                cams.append(i)
        except Exception, e:
            pass
        finally:
示例#36
0
def capture_webcam():
    """
    Grab a frame from the first detected webcam using OpenCV.
    """
    import opencv
    from opencv import highgui

    camera = highgui.cvCreateCameraCapture(0)
    cv_img = highgui.cvQueryFrame(camera)
    img = opencv.adaptors.Ipl2PIL(cv_img)
    highgui.cvReleaseCapture(camera)
    return img
def server():
	camera = highgui.cvCreateCameraCapture(0)
	p = pyaudio.PyAudio()
	FORMAT = pyaudio.paInt16
	CHANNELS = 2
	RATE = 8000
	AUD_PARTS=5
	chunk = RATE/AUD_PARTS
	stream = p.open(format = FORMAT, channels = CHANNELS, rate = RATE,input = True, frames_per_buffer = chunk)
	
	# We shall first create the socket for connecting to the multicast address.
	addrinfo = socket.getaddrinfo(group, None)[0] 
	s = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
	ttl_bin = struct.pack('@i', MYTTL)
	s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
	i=0
	#print len(image)
	for i in range(0,5):
			image=get_image(camera)
			print i
	fno=0 #frame number
	while True:
		time1=time.time()
		image=get_image(camera)
		#image=Image.open('arbaz.jpg')
		stri=StringIO()
		image.save(stri,"jpeg",quality=QUALITY)
		data=stri.getvalue()
		size_piece = len(data)/PARTS
		total=''
		dataaud=stream.read(chunk)
		size_piece_aud = len(dataaud)/AUD_PARTS
		for z in range(0,PARTS-1):
			data1=data[z*size_piece:(z+1)*size_piece]				
#data=newim.read()
			s.sendto('vid,'+str(fno) + ',' + str(z)  + ',' +data1 + '\0', (addrinfo[4][0], MYPORT))
			if z%8==0:
				data2=dataaud[z/8*size_piece_aud:(z+8)/8*size_piece_aud]				
				s.sendto('aud,'+str(fno) + ',' + str(z/8)  + ',' +data2 + '\0', (addrinfo[4][0], MYPORT))
			i+=1
		#	total=total+data1
			#time.sleep(0.001)
		z=z+1
		data1=data[z*size_piece:]		
		data2=dataaud[z*size_piece_aud:]		
		s.sendto('vid,'+str(fno) + ',' + str(z)  + ',' +data1 + '\0', (addrinfo[4][0], MYPORT))
		s.sendto('aud,'+str(fno) + ',' + str(z/8)  + ',' +data2 + '\0', (addrinfo[4][0], MYPORT))
		#total=total+data1
		#f=open('senders.jpg','w')
		#f.write(total)
		time2=time.time()
		time.sleep(0.2-time2+time1)
		fno=fno+1
示例#38
0
    def __init__(self, cam=0, width=None, height=None):
        self.path = cam
        self.video = hg.cvCreateCameraCapture(self.path)
        if width:
            hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_WIDTH, width)
        if height:
            hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_HEIGHT, height)

        def release_camera(camera=self.video):
            print(camera)  # this for debugging, do not leave it!!!
            print(dir(camera))  # same as above
            opencv.cvReleaseCapture(camera)
        _ctx.drawing_closed = release_camera
示例#39
0
    def __init__(self, device, size, mode, imageType='opencv'):
        self.imageType = imageType
        self.size = self.width, self.height = size
        self.device = device

        # todo: would be nice if this didn't make a whole lot of noise about firewire...
        self.capture = hg.cvCreateCameraCapture(self.device)

        # set the wanted image size from the camera
        hg.cvSetCaptureProperty(self.capture, hg.CV_CAP_PROP_FRAME_WIDTH,
                                self.width)
        hg.cvSetCaptureProperty(self.capture, hg.CV_CAP_PROP_FRAME_HEIGHT,
                                self.height)
示例#40
0
文件: __init__.py 项目: aoloe/shoebot
 def __init__(self, cam=0, width=None, height=None):
     self.path = cam
     self.video = hg.cvCreateCameraCapture(self.path)
     if width:
         hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_WIDTH, width)
     if height:
         hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_HEIGHT, height)
           
     def release_camera(camera=self.video):
         print camera # this for debugging, do not leave it!!!     
         print dir(camera) #same as above 
         opencv.cvReleaseCapture(camera)
     _ctx.drawing_closed = release_camera                
示例#41
0
文件: webcam.py 项目: Ademan/Cura
	def __init__(self):
		self._cam = None
		self._overlayImage = toolbarUtil.getBitmapImage("cura-overlay.png")
		self._overlayUltimaker = toolbarUtil.getBitmapImage("ultimaker-overlay.png")
		if cv != None:
			self._cam = highgui.cvCreateCameraCapture(-1)
		elif win32vidcap != None:
			try:
				self._cam = win32vidcap.new_Dev(0, False)
			except:
				pass
		
		self._doTimelaps = False
		self._bitmap = None
示例#42
0
def main(): # ctrl+c to end
    global h,s,v,h2,v2,s2,d,e
    highgui.cvNamedWindow("Camera 1", 1)
    highgui.cvNamedWindow("Orig", 1)
    highgui.cvCreateTrackbar("H", "Camera 1", h, 256, tb_h)
    highgui.cvCreateTrackbar("S", "Camera 1", s, 256, tb_s)
    highgui.cvCreateTrackbar("V", "Camera 1", v, 256, tb_v)
    highgui.cvCreateTrackbar("H2", "Camera 1", h2, 256, tb_h2)
    highgui.cvCreateTrackbar("S2", "Camera 1", s2, 256, tb_s2)
    highgui.cvCreateTrackbar("V2", "Camera 1", v2, 256, tb_v2)
    highgui.cvCreateTrackbar("Dilate", "Camera 1", d, 30, tb_d)
    highgui.cvCreateTrackbar("Erode", "Camera 1", e, 30, tb_e)
    
    cap = highgui.cvCreateCameraCapture(1)
    highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_WIDTH, IMGW)
    highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_HEIGHT, IMGH)
    c = 0
    t1 = tdraw = time.clock()
    t = 1
    font = cv.cvInitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1)
    while c != 0x27:
        image = highgui.cvQueryFrame(cap)
        if not image:
            print "capture failed"
            break
            
        thresh = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,1)
        cv.cvSetZero(thresh)
        cv.cvCvtColor(image,image,cv.CV_RGB2HSV)
        cv.cvInRangeS(image, (h,s,v,0), (h2,s2,v2,0), thresh)
        result = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,3)
        cv.cvSetZero(result)
        
        cv.cvOr(image,image,result,thresh)
        for i in range(1,e):
            cv.cvErode(result,result)
        for i in range(1,d):
            cv.cvDilate(result,result)
            
        # floodfill objects back in, allowing threshold differences outwards
        
        t2 = time.clock()
        if t2 > tdraw+0.3:
            t = t2-t1
            tdraw=t2
        cv.cvPutText(result, "FPS: " + str(1 / (t)), (0,25), font, (255,255,255))
        t1 = t2
        highgui.cvShowImage("Orig", image)
        highgui.cvShowImage("Camera 1", result)
        c = highgui.cvWaitKey(10)
示例#43
0
文件: webcam.py 项目: allmighty/Cura
	def __init__(self):
		self._cam = None
		self._overlayImage = wx.Bitmap(getPathForImage('cura-overlay.png'))
		self._overlayUltimaker = wx.Bitmap(getPathForImage('ultimaker-overlay.png'))
		if cv != None:
			self._cam = highgui.cvCreateCameraCapture(-1)
		elif win32vidcap != None:
			try:
				self._cam = win32vidcap.new_Dev(0, False)
			except:
				pass

		self._doTimelaps = False
		self._bitmap = None
示例#44
0
def main(argv):
    parser = argparse.ArgumentParser(description="Sends images from camera to server")
    parser.add_argument("id", help="Identifier for this capturer. Limited to 8 chars")
    parser.add_argument("host", default="localhost", nargs="?", help="Server to send images to")
    parser.add_argument("-p", "--port", default=8888, type=int, help="Server port to send images")
    parser.add_argument("-i", "--interval", default=3.0, type=float, help="Number of seconds between image captures")
    args = parser.parse_args()

    # Setting stuff
    conn_info = (args.host, args.port)
    img_path = os.path.join("/tmp", "temp.jpg")

    # Make ID exactly 8 characters
    id = args.id[0:8]
    id += " " * (8 - len(id))

    print "Capturer", id, "sending to", args.host + ":" + str(args.port)

    # Open camera and start taking pictures
    camera = highgui.cvCreateCameraCapture(0)
    while True:
        # Get image and save as JPG
        img = get_image(camera)
        img.save(img_path, "JPEG")

        # Send to server
        try:
            print ("Sending image")
            sock = socket.create_connection(conn_info, timeout=1)

            sock.send(id)
            with open(img_path, "rb") as f:
                while True:
                    data = f.read(1024)
                    if data:
                        sock.send(data)
                    else:
                        break

            sock.close()
        except socket.error as e:
            # Try again later, assume that the server is just down
            # but that the user gave us the right address
            print (e)

        # Don't overload ourselves here
        time.sleep(args.interval)

    return 0
示例#45
0
    def start_capture(self, device):

        #		video_dimensions = [176, 144]
        video_dimensions = [320, 240]

        if not self.capture:

            self.capture = highgui.cvCreateCameraCapture(device)

            highgui.cvSetCaptureProperty(self.capture,
                                         highgui.CV_CAP_PROP_FRAME_WIDTH,
                                         video_dimensions[0])
            highgui.cvSetCaptureProperty(self.capture,
                                         highgui.CV_CAP_PROP_FRAME_HEIGHT,
                                         video_dimensions[1])
示例#46
0
    def __init__(self):
        self._cam = None
        self._overlayImage = wx.Bitmap(getPathForImage('cura-overlay.png'))
        self._overlayUltimaker = wx.Bitmap(
            getPathForImage('ultimaker-overlay.png'))
        if cv != None:
            self._cam = highgui.cvCreateCameraCapture(-1)
        elif win32vidcap != None:
            try:
                self._cam = win32vidcap.new_Dev(0, False)
            except:
                pass

        self._doTimelapse = False
        self._bitmap = None
示例#47
0
    def __init__(self):
        self._cam = None
        self._overlayImage = toolbarUtil.getBitmapImage("cura-overlay.png")
        self._overlayUltimaker = toolbarUtil.getBitmapImage(
            "ultimaker-overlay.png")
        if cv != None:
            self._cam = highgui.cvCreateCameraCapture(-1)
        elif win32vidcap != None:
            try:
                self._cam = win32vidcap.new_Dev(0, False)
            except:
                pass

        self._doTimelaps = False
        self._bitmap = None
示例#48
0
	def __init__(self, dev, squeue, camlog):
		threading.Thread.__init__(self)
		
		self.dev = dev
		self.squeue = squeue
		self.camlog = camlog
		
		self.frame = 0
		self.reportAfter = 20 # frames
		self.quit = False
		self.success = 0
		self.lastPacket = ""
		
		self.reader = highgui.cvCreateCameraCapture(Conf.get("camnum", 0))
		self.scanner = zbar.ImageScanner()
		self.scanner.parse_config('enable')
示例#49
0
	def _openCam(self):
		if self._cameraList is not None and self._camId >= len(self._cameraList):
			return False
		if self._cam is not None:
			if self._activeId != self._camId:
				del self._cam
				self._cam = None
			else:
				return True

		self._activeId = self._camId
		if cv is not None:
			self._cam = highgui.cvCreateCameraCapture(self._camId)
		elif win32vidcap is not None:
			try:
				self._cam = win32vidcap.new_Dev(self._camId, False)
			except:
				pass
		return self._cam is not None
示例#50
0
    def __init__(self, parent):

        wx.Frame.__init__(
            self,
            parent,
            -1,
        )

        sizer = wx.BoxSizer(wx.VERTICAL)

        self.capture = gui.cvCreateCameraCapture(0)
        frame = gui.cvQueryFrame(self.capture)
        cv.cvCvtColor(frame, frame, cv.CV_BGR2RGB)

        self.SetSize((frame.width + 300, frame.height + 100))

        self.bmp = wx.BitmapFromBuffer(frame.width, frame.height,
                                       frame.imageData)
        self.displayPanel = wx.StaticBitmap(self, -1, bitmap=self.bmp)
        sizer.Add(self.displayPanel, 0, wx.ALL, 10)

        self.shotbutton = wx.Button(self, -1, "Shot")
        sizer.Add(self.shotbutton, -1, wx.GROW)

        self.retrybutton = wx.Button(self, -1, "Retry")
        sizer.Add(self.retrybutton, -1, wx.GROW)
        self.retrybutton.Hide()

        #events
        self.Bind(wx.EVT_BUTTON, self.onShot, self.shotbutton)
        self.Bind(wx.EVT_BUTTON, self.onRetry, self.retrybutton)
        self.Bind(wx.EVT_PAINT, self.onPaint)
        self.Bind(wx.EVT_CLOSE, self.onClose)

        self.playTimer = wx.Timer(self, self.TIMER_PLAY_ID)
        wx.EVT_TIMER(self, self.TIMER_PLAY_ID, self.onNextFrame)

        self.fps = 8
        self.SetSizer(sizer)
        sizer.Layout()
        self.startTimer()
示例#51
0
def render_flipped_camera():
    camera = highgui.cvCreateCameraCapture(0)

    fps = 30.0
    pygame.init()
    pygame.display.set_mode((640, 480))
    pygame.display.set_caption("WebCam Demo")
    screen = pygame.display.get_surface()

    while True:
        events = pygame.event.get()

        for event in events:
            if event.type == QUIT or event.type == KEYDOWN:
                sys.exit(0)

        im = get_image(camera)
        pg_img = pygame.image.frombuffer(im.tostring(), im.size, im.mode)
        screen.blit(pg_img, (0, 0))
        pygame.display.flip()
        pygame.time.delay(int(1000 * 1.0/fps))
示例#52
0
    def init_camera(self):
        # create the device
        self._device = hg.cvCreateCameraCapture(self._index)

        try:
            # try first to set resolution
            cv.hg(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
                              self.resolution[0])
            cv.hg(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
                              self.resolution[1])

            # and get frame to check if it's ok
            frame = hg.cvQueryFrame(self._device)
            if not int(frame.width) == self.resolution[0]:
                raise Exception('OpenCV: Resolution not supported')

        except:
            # error while setting resolution
            # fallback on default one
            w = int(hg.cvGetCaptureProperty(self._device,
                    hg.CV_CAP_PROP_FRAME_WIDTH))
            h = int(hg.cvGetCaptureProperty(self._device,
                    hg.CV_CAP_PROP_FRAME_HEIGHT))
            frame = hg.cvQueryFrame(self._device)
            Logger.warning(
                'OpenCV: Camera resolution %s impossible! Defaulting to %s.' %
                (self.resolution, (w, h)))

            # set resolution to default one
            self._resolution = (w, h)

        # create texture !
        self._texture = Texture.create(*self._resolution)
        self._texture.flip_vertical()
        self.dispatch('on_load')

        if not self.stopped:
            self.start()
示例#53
0
#! /usr/bin/env python
# coding=utf-8
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
import opencv
from PIL import Image
# this is important for capturing/displaying images
from opencv import highgui
from opencv import cv
import time

MPEG1VIDEO = 0x314D4950
camera = highgui.cvCreateCameraCapture(0)  # 找摄像头,一般填0-99都可以


class MainWindow(QWidget):
    def __init__(self, parent=None):
        QWidget.__init__(self)
        self.resize(550, 550)
        self.setWindowTitle('vedio control')
        self.status = 0  # 0 is init status;1 is play video; 2 is capture video
        self.image = QImage()

        # 录制的视频保存位置、格式等参数设定
        self.videowriter = highgui.cvCreateVideoWriter(
            "test.mpg", highgui.CV_FOURCC('m', 'p', 'g', '1'), 25,
            cv.cvSize(200, 200), 1)
        # 播放的视频位置
        self.playcapture = highgui.cvCreateFileCapture("test.avi")
#open sockets between remote and station
#capture frame on remote
#send frame from remote to station

#import the necessary things for Socket
import sys, socket
# import the necessary things for OpenCV
import cv
#this is important for capturing/displaying images
from opencv import highgui
import pygame
import Image
from pygame.locals import *

#capture a frame
camera = highgui.cvCreateCameraCapture(0)

def get_image():
    im = highgui.cvQueryFrame(camera)
    # Add the line below if you need it (Ubuntu 8.04+)
    #im = opencv.cvGetMat(im)
    #convert Ipl image to PIL image
    return opencv.adaptors.Ipl2PIL(im)

fps = 30.0
pygame.init()
window = pygame.display.set_mode((640,480))
pygame.display.set_caption("WebCam Demo")
screen = pygame.display.get_surface()

while True:
示例#55
0
 def __init__(self, visualize=False):
     threading.Thread.__init__(self)
     self.camera = highgui.cvCreateCameraCapture(-1)
     self.image = None
     self.visualize = visualize
     self.running = True
示例#56
0
    import sys

    try:
        # try to get the device number from the command line
        device = int(sys.argv[1])

        # got it ! so remove it from the arguments
        del sys.argv[1]
    except (IndexError, ValueError):
        # no device number on the command line, assume we want the 1st device
        device = highgui.CV_CAP_ANY

    if len(sys.argv) == 1:
        # no argument on the command line, try to use the camera
        capture = highgui.cvCreateCameraCapture(device)

        # set the wanted image size from the camera
        highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH,
                                     320)
        highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT,
                                     240)

    # capture the 1st frame to get some propertie on it
    frame = highgui.cvQueryFrame(capture)

    # get some properties of the frame
    frame_size = cv.cvGetSize(frame)

    # create some images useful later
    my_grayscale = cv.cvCreateImage(frame_size, 8, 1)
示例#57
0
def main(args):
    global capture
    global hmax, hmin
    highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvMoveWindow('Camera', 0, 10)
    highgui.cvMoveWindow('Hue', 0, 350)
    highgui.cvMoveWindow('Satuation', 360, 10)
    highgui.cvMoveWindow('Value', 360, 350)
    highgui.cvMoveWindow('Laser', 700, 40)

    highgui.cvCreateTrackbar("Brightness Trackbar", "Camera", 0, 255,
                             change_brightness)
    highgui.cvCreateTrackbar("hmin Trackbar", "Hue", hmin, 180, change_hmin)
    highgui.cvCreateTrackbar("hmax Trackbar", "Hue", hmax, 180, change_hmax)
    highgui.cvCreateTrackbar("smin Trackbar", "Satuation", smin, 255,
                             change_smin)
    highgui.cvCreateTrackbar("smax Trackbar", "Satuation", smax, 255,
                             change_smax)
    highgui.cvCreateTrackbar("vmin Trackbar", "Value", vmin, 255, change_vmin)
    highgui.cvCreateTrackbar("vmax Trackbar", "Value", vmax, 255, change_vmax)

    print "grabbing camera"
    capture = highgui.cvCreateCameraCapture(0)
    print "found camera"
    highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320)
    highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT,
                                 240)

    frame = highgui.cvQueryFrame(capture)
    frameSize = cv.cvGetSize(frame)

    hsv = cv.cvCreateImage(frameSize, 8, 3)
    mask = cv.cvCreateImage(frameSize, 8, 1)
    hue = cv.cvCreateImage(frameSize, 8, 1)
    satuation = cv.cvCreateImage(frameSize, 8, 1)
    value = cv.cvCreateImage(frameSize, 8, 1)
    laser = cv.cvCreateImage(frameSize, 8, 1)

    while 1:
        frame = highgui.cvQueryFrame(capture)

        cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV)
        #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask)
        cv.cvSplit(hsv, hue, satuation, value, None)

        cv.cvInRangeS(hue, hmin, hmax, hue)
        cv.cvInRangeS(satuation, smin, smax, satuation)
        cv.cvInRangeS(value, vmin, vmax, value)
        #cv.cvInRangeS(hue,0,180,hue)

        cv.cvAnd(hue, value, laser)
        #cv.cvAnd(laser, value, laser)

        cenX, cenY = averageWhitePoints(laser)
        #print cenX,cenY
        draw_target(frame, cenX, cenY)
        #draw_target(frame,200,1)

        highgui.cvShowImage('Camera', frame)
        highgui.cvShowImage('Hue', hue)
        highgui.cvShowImage('Satuation', satuation)
        highgui.cvShowImage('Value', value)
        highgui.cvShowImage('Laser', laser)

        k = highgui.cvWaitKey(10)
        if k == " ":
            highgui.cvDestroyAllWindows()
            highgui.cvReleaseCapture(capture)
            sys.exit()
 def __init__(self, device=0, size=(640, 480), mode="RGB"):
     """
     """
     self.camera = highgui.cvCreateCameraCapture(device)
     if not self.camera:
         raise ValueError("Could not open camera.  Sorry.")
示例#59
0
# -*- coding:utf8 -*-
import opencv
from opencv import highgui as hg

capture = hg.cvCreateCameraCapture(0)
hg.cvNamedWindow("Snapshot")

frames = []
for i in range(10):
    frame = hg.cvQueryFrame(capture)
    frames.append(opencv.cvClone(frame))
    hg.cvShowImage("Snapshot", frame)
hg.cvWaitKey(1000)

hg.cvNamedWindow("hello")
for i in range(10):
    hg.cvShowImage("hello", frames[i])
    hg.cvWaitKey(1000)
"""
import copy
dst=copy.copy(frames[1])
opencv.cvSub(frames[2], frames[1], dst)
hg.cvShowImage("Snapshot", dst)
from IPython.Shell import IPShellEmbed
IPShellEmbed()()
hg.cvWaitKey(10000)
"""