Exemple #1
0
    def webcamshot(self, filename):
        # type: (str) -> Union[str, None]
        """ Takes a snapshot with the webcam and returns the path to the
            saved image (in TMP). None if could not take the snapshot.
        """

        if not self.configuration["camshot"]:
            self.log.info("Skipping webcamshot.")
            return None

        temp = gettempdir()
        self.log.info("Taking webcamshot")
        if self.is_windows():
            filepath = "{}_webcam.jpg".format(os.path.join(temp, filename))
            try:
                cam = Device(devnum=0)  # type: ignore
                if not cam:
                    cam = Device(devnum=1)  # type: ignore
            except Exception as ex:  # pylint: disable=broad-except
                self.log.error("vidcap.Error: %s", ex)
                return None

            try:
                # Here you can modify the picture resolution
                # cam.setResolution(768, 576)
                cam.getImage()
                time.sleep(2)
                cam.saveSnapshot(filepath)
            except ValueError as ex:
                self.log.error(ex)
                return None
        else:
            filepath = "{}_webcam.{}".format(
                os.path.join(temp, filename),
                self.configuration["camshot_filetype"])
            cmd = self.configuration["camshot"].replace("<filepath>", filepath)
            self.runprocess(cmd, useshell=True)
            if os.path.isfile(filepath):
                if self.configuration["camshot_filetype"] == "ppm":
                    full_path_ = os.path.join(temp, filename)
                    new_path_ = "{}_webcam.jpg".format(full_path_)
                    self.runprocess(["/usr/bin/convert", filepath, new_path_])
                    os.unlink(filepath)
                    filepath = new_path_

        if not os.path.isfile(filepath):
            return None

        self.log.debug(filepath)
        return filepath
Exemple #2
0
class Webcam(object):
    def __init__(self, app, interval=0.5, log_interval=0.5):
        self.app = app

        self.interval = interval
        self.log_interval = log_interval
        self.last_log = 0.0
        self.jpeg_data = None

        self.cam = None
        try:
            from VideoCapture import Device
            # NOTE: must initialize this from the main thread, it seems
            self.cam = Device()
            # ???
            self.cam.getImage()
        except:
            import traceback
            traceback.print_exc()
            log.warn('webcam failed to initialize')

    def get_image(self):
        return self.jpeg_data

    def tick(self):
        file = StringIO()
        image = self.cam.getImage(1)
        image.save(file, 'jpeg')
        self.jpeg_data = file.getvalue()

        now = time.time()
        if self.log_interval and self.last_log + self.log_interval < now:
            self.last_log = now
            fname = 'logs/webcam/cam.%s.jpg' % \
                time.strftime('%Y-%m-%d.%H.%M.%S')
            open(fname, 'wb').write(self.jpeg_data)

        time.sleep(self.interval)

    def start(self):
        if not self.cam:
            return

        def loop():
            while 1:
                self.tick()

        thread = threading.Thread(target=loop)
        thread.daemon = True
        thread.start()
Exemple #3
0
class Webcam(object):
    def __init__(self, app, interval=0.5, log_interval=0.5):
        self.app = app

        self.interval = interval
        self.log_interval = log_interval
        self.last_log = 0.0
        self.jpeg_data = None

        self.cam = None
        try:
            from VideoCapture import Device
            # NOTE: must initialize this from the main thread, it seems
            self.cam = Device()
            # ???
            self.cam.getImage()
        except:
            import traceback
            traceback.print_exc()
            log.warn('webcam failed to initialize')

    def get_image(self):
        return self.jpeg_data

    def tick(self):
        file = StringIO()
        image = self.cam.getImage(1)
        image.save(file, 'jpeg')
        self.jpeg_data = file.getvalue()

        now = time.time()
        if self.log_interval and self.last_log+self.log_interval < now:
            self.last_log = now
            fname = 'logs/webcam/cam.%s.jpg' % \
                time.strftime('%Y-%m-%d.%H.%M.%S')
            open(fname, 'wb').write(self.jpeg_data)

        time.sleep(self.interval)

    def start(self):
        if not self.cam:
            return
        def loop():
            while 1:
                self.tick()
        thread = threading.Thread(target=loop)
        thread.daemon = True
        thread.start()
Exemple #4
0
def getImage():
    if testImg == False:
        cam = Device()
        img = cam.getImage()
        img = array(img)
    else:
        img = cv2.imread('16.jpg', 0)
        img = rotateImage(img, 270)
        # img = Image.open(str(testImg) + '.jpg')
        # img = img.rotate(270)

    # img = PIL.ImageOps.fit(img, (1500,3000))
    # img = PIL.ImageOps.solarize(img,128)
    # img = PIL.ImageOps.autocontrast(img)
    # img = PIL.ImageOps.grayscale(img)
    # img = PILa2rray(img)
    # img = cv2.medianBlur(img,5)
    # img = img.mean(img, -1)
    img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
            cv2.THRESH_BINARY, 11, 3)

    if debugImage == True:
        pyplot.imshow(img)
        pyplot.show()

    return img
Exemple #5
0
def take_picture():
	# Start camera device
	cam = Device()

	# Some machines take longer to initialize the camera and so 
	# black images can occur if the module is loaded too quickly. 
	# We will keep taking images until the image is not completely
	# black.
	while True:
		img = cam.getImage()
		if not is_black(img):
			break
		
	# Get a quick image count in the directory
	image_count = len([fi for fi in os.listdir(SNAPSHOT_DIR) if re.search('.jpg$',fi)])
	if not image_count:
		location = '\\'.join([SNAPSHOT_DIR,'lolock']) + '.jpg'
	else:
		location = '\\'.join([SNAPSHOT_DIR,'lolock.'+str(image_count+1)]) + '.jpg'

	# Save image to disk
	img.save(location)

	# Unload device
	del(cam)

	# Send email if enabled
	if SEND_MAIL:
		send_email(location)
class VCCamera():

    ## __init__
    #
    # @param camera_num (Optional) The camera number, defaults to 0.
    # @param xmin (Optional) The x position of the start of the ROI, defaults to 0.
    # @param xmax (Optional) The x position of the end of the ROI, defaults to 150.
    # @param ymin (Optional) The y position of the start of the ROI, defaults to 0.
    # @param ymax (Optional) The y position of the end of the ROI, defaults to 300.
    #
    def __init__(self, camera_num = 0, xmin = 0, xmax = 150, ymin = 0, ymax = 300):
        self.xmin = xmin
        self.xmax = xmax
        self.ymin = ymin
        self.ymax = ymax
        self.cam = Device(devnum = camera_num)

    ## capture
    #
    # @return The current camera image as a numpy uint8 array.
    #
    def capture(self):
        # These do the same thing, but the second one is much faster..
        if 0:
            image = self.cam.getImage()
            data = numpy.array(image.getdata(), numpy.uint8).reshape(image.size[1], image.size[0], 3)
        if 1:
            buf = self.cam.getBuffer()
            x_size = buf[1]
            y_size = buf[2]
            data = numpy.fromstring(buf[0], numpy.uint8).reshape(y_size, x_size, 3)
            data = data[self.xmin:self.xmax,self.ymin:self.ymax]
        data = numpy.average(data, 2)
        return data
Exemple #7
0
def take_picture():
    # Start camera device
    cam = Device()

    # Some machines take longer to initialize the camera and so
    # black images can occur if the module is loaded too quickly.
    # We will keep taking images until the image is not completely
    # black.
    while True:
        img = cam.getImage()
        if not is_black(img):
            break

    # Get a quick image count in the directory
    image_count = len(
        [fi for fi in os.listdir(SNAPSHOT_DIR) if re.search('.jpg$', fi)])
    if not image_count:
        location = '\\'.join([SNAPSHOT_DIR, 'lolock']) + '.jpg'
    else:
        location = '\\'.join([SNAPSHOT_DIR, 'lolock.' + str(image_count + 1)
                              ]) + '.jpg'

    # Save image to disk
    img.save(location)

    # Unload device
    del (cam)

    # Send email if enabled
    if SEND_MAIL:
        send_email(location)
Exemple #8
0
def getImage():
    if testImg == False:
        cam = Device()
        img = cam.getImage()
        img = array(img)
    else:
        img = cv2.imread('16.jpg',0)
        img = rotateImage(img, 270)
        # img = Image.open(str(testImg) + '.jpg')
        # img = img.rotate(270)

    # img = PIL.ImageOps.fit(img, (1500,3000))
    # img = PIL.ImageOps.solarize(img,128)
    # img = PIL.ImageOps.autocontrast(img)
    # img = PIL.ImageOps.grayscale(img)
    # img = PILa2rray(img)
    # img = cv2.medianBlur(img,5)
    # img = img.mean(img, -1)
    img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
            cv2.THRESH_BINARY, 11, 3)

    if debugImage == True:
        pyplot.imshow(img)
        pyplot.show()

    return img
Exemple #9
0
class Camera(object):
    def __init__(self,
                 root,
                 devnum=0,
                 siz=(640, 480),
                 position=(0, 0),
                 anchor='lefttop',
                 layer=0,
                 visible=True):
        self.cam = Device(devnum)
        self.root = root
        self.siz = siz
        self.position = position
        self.anchor = anchor
        self.layer = layer
        self.visible = visible

        self.brightness = 1.0
        self.contrast = 1.0

        self.reset()

    def reset(self):
        self.blitp = blit_pos1(self.siz, self.position, self.anchor)

    def show(self):
        if self.visible:
            im = self.cam.getImage()
            # im = ImageEnhance.Brightness(im).enhance(self.brightness)
            # im = ImageEnhance.Contrast(im).enhance(self.contrast)
            sur = pygame.image.fromstring(im.tostring(), (640, 480), "RGB")
            if self.siz[0] != 640 or self.siz[1] != 480:
                sur = pygame.transform.scale(sur, self.siz)
            self.root.blit(sur, self.blitp)
Exemple #10
0
class PyCamera:
    def __init__(self, device_num=0):
        self.cam = Device(device_num)
        self.resolution = self.cam.getBuffer()[1:3]

    @property
    def get(self):
        '''
        return: {'buffer':**,'pixels':**,'pg_surface':**,'resolution':**}
        '''
        # 推荐使用getImage,getImage能显著提高图像质量,并且自动完成图像的翻转等操作
        # buffer,width,height = self.cam.getBuffer()

        image = self.cam.getImage()
        buffer = image.tostring()
        sur = pygame.image.frombuffer(buffer, self.resolution, 'RGB')
        pixels = np.fromstring(buffer, dtype=np.uint8)
        return {
            'buffer': buffer,
            'pixels': pixels,
            'pg_surface': sur,
            'resolution': self.resolution
        }

    def save_to_disk(self, filename):
        self.cam.saveSnapshot(filename)
Exemple #11
0
def get_image_VC():
    from VideoCapture import Device

    cam = Device()
    cam.setResolution(800, 400)

    img = cam.getImage()
    return img
Exemple #12
0
    def webcamshot(self, filename):
        ''' Takes a snapshot with the webcam and returns the path to the
            saved image (in TMP). None if could not take the snapshot.
        '''

        if not self.configuration['camshot']:
            self.log.info('Skipping webcamshot.')
            return None

        temp = gettempdir()
        self.log.info('Taking webcamshot')
        if self.os_name == 'Windows':
            filepath = '{}_webcam.jpg'.format(os.path.join(temp, filename))
            try:
                cam = Device(devnum=0)
                if not cam:
                    cam = Device(devnum=1)
            except Exception as ex:
                self.log.error('vidcap.Error: %s', ex)
                return None
            try:
                # Here you can modify the picture resolution
                # cam.setResolution(768, 576)
                cam.getImage()
                time.sleep(1)
                cam.saveSnapshot(filepath)
            except ValueError as ex:
                self.log.error(ex)
                return None
        else:
            filepath = '{}_webcam.{}'.format(
                os.path.join(temp, filename),
                self.configuration['camshot_filetype'])
            cmd = self.configuration['camshot'].replace('<filepath>', filepath)
            self.runprocess(cmd, useshell=True)
            if os.path.isfile(filepath):
                if self.configuration['camshot_filetype'] == 'ppm':
                    full_path_ = os.path.join(temp, filename)
                    new_path_ = '{}_webcam.jpg'.format(full_path_)
                    self.runprocess(['/usr/bin/convert', filepath, new_path_])
                    os.unlink(filepath)
                    filepath = new_path_
        if not os.path.isfile(filepath):
            return None
        return filepath
Exemple #13
0
    def webcamshot(self, filename):
        ''' Takes a snapshot with the webcam and returns the path to the
            saved image (in TMP). None if could not take the snapshot.
        '''

        if not self.configuration['camshot']:
            self.log.info('Skipping webcamshot.')
            return None

        temp = gettempdir()
        self.log.info('Taking webcamshot')
        if self.os_name == 'Windows':
            filepath = '{}_webcam.jpg'.format(os.path.join(temp, filename))
            try:
                cam = Device(devnum=0)
                if not cam:
                    cam = Device(devnum=1)
            except Exception as ex:
                self.log.error('vidcap.Error: %s', ex)
                return None
            try:
                # Here you can modify the picture resolution
                # cam.setResolution(768, 576)
                cam.getImage()
                time.sleep(1)
                cam.saveSnapshot(filepath)
            except ValueError as ex:
                self.log.error(ex)
                return None
        else:
            filepath = '{}_webcam.{}'.format(
                os.path.join(temp, filename),
                self.configuration['camshot_filetype'])
            cmd = self.configuration['camshot'].replace('<filepath>', filepath)
            self.runprocess(cmd, useshell=True)
            if os.path.isfile(filepath):
                if self.configuration['camshot_filetype'] == 'ppm':
                    full_path_ = os.path.join(temp, filename)
                    new_path_ = '{}_webcam.jpg'.format(full_path_)
                    self.runprocess(['/usr/bin/convert', filepath, new_path_])
                    os.unlink(filepath)
                    filepath = new_path_
        if not os.path.isfile(filepath):
            return None
        return filepath
Exemple #14
0
def webcamshot(filename):
    '''
        Takes a snapshot with the webcam and returns the path to the
        saved image (in TMP). None if could not take the snapshot.
    '''

    if not CONFIG['camshot']:
        LOG.info('Skipping webcamshot.')
        return None

    temp = tempfile.gettempdir()
    LOG.info('Taking webcamshot')
    if OS == 'Windows':
        filepath = '{0}{1}{2}_webcam.jpg'.format(temp, SEP, filename)
        try:
            cam = Device(devnum=0)
            if not cam:
                cam = Device(devnum=1)
        except Exception as ex:
            LOG.error('vidcap.Error: %s', ex)
            return None
        try:
            # Here you can modify the picture resolution
            #cam.setResolution(768, 576)
            cam.getImage()
            time.sleep(1)
            cam.saveSnapshot(filepath)
        except ValueError as ex:
            LOG.error(ex)
            return None
    else:
        filepath = '{0}{1}{2}_webcam.{3}'.format(temp, SEP, filename,
                    CONFIG['camshot_filetype'])
        cmd = CONFIG['camshot'].replace('<filepath>', filepath)
        runprocess(cmd, useshell=True)
        if os.path.isfile(filepath):
            if CONFIG['camshot_filetype'] == 'ppm':
                new_filepath = '{0}{1}{2}_webcam.jpg'.format(
                                temp, SEP, filename)
                runprocess(['/usr/bin/convert', filepath, new_filepath])
                os.unlink(filepath)
                filepath = new_filepath
    if not os.path.isfile(filepath):
        return None
    return filepath
class ImageCapture():
    '''
    图像采集类
    '''
    NoDeviceError = vidcap.error
    def __init__(self, devnum=0):
        '''
        初始化
        @param devnum: 设备号
        '''
        self.cam = Device(devnum)
        # TODO: 卡死保护
#         self.cam.displayCaptureFilterProperties()
#         self.cam.displayCapturePinProperties()
        cvimg = np.array(self.cam.getImage().convert('RGB'))
        self.size = (cvimg.shape[1],cvimg.shape[0])
    
    def get_frame_size(self):
        '''
        获取帧大小
        @return: 帧的大小 
        '''
        return self.size
    
    def get_frame(self):
        '''
        获取当前帧
        @return: 当前帧
        '''    
        im_pil = self.cam.getImage().convert('RGB')
        cvimg = np.array(im_pil)
        return cvimg[:,:,::-1] # opencv的默认图像制式是 BGR
    
    def release(self):
        '''
        释放摄像头
        '''
        print 'Release Camera'
        del self.cam
Exemple #16
0
def webcamshot():
	''' Takes a snapshot with the webcam and returns the path to the 
	    saved image (in TMP). None if could not take the snapshot. 
	'''
	if not CONFIG['Commands']['camshot']:
		LOG.info('Skipping webcamshot.')
		return None

	LOG.info('Taking webcamshot')
	try:
		if OS == 'Windows':
			filepath = '%s%c%s_webcam.jpg' % (TMP, SEP, FILENAME)
			from VideoCapture import Device
			cam = Device(devnum=0)
			if not cam:
				cam = Device(devnum=1)
				if not cam:
					LOG.error('Error while taking webcamshot: no device available.')
					return None
			#cam.setResolution(768, 576) # Here you can modify the picture resolution
			cam.getImage()
			time.sleep(1)
			cam.saveSnapshot(filepath)
		else:
			filepath = '%s%c%s_webcam.%s' % (TMP, SEP, FILENAME, CONFIG['Commands']['camshot_filetype'])
			cmd = CONFIG['Commands']['camshot'].replace('<filepath>', filepath)
			runprocess(cmd, useshell=True)
			if os.path.isfile(filepath):
				if CONFIG['Commands']['camshot_filetype'] == 'ppm':
					new_filepath = '%s%c%s_webcam.jpg' % (TMP, SEP, FILENAME)
					runprocess(['/usr/bin/convert', filepath, new_filepath])
					os.unlink(filepath)
					filepath = new_filepath
	except Exception as ex:
		LOG.error(ex)
		return None
	if not os.path.isfile(filepath):
		return None
	return filepath
Exemple #17
0
class Capture(object):
    """Provides access to video devices."""

    def __init__(self, index = 0):
        """Opens a video device for capturing.
        
        index - The number of the device to open.
        Throws an exception if the device can't be opened or if the given index
        is out of range.
        """
        
        object.__init__(self)
        self.dev = Device()


    def grabFrame(self):
        """Returns a snapshot from the device as PIL.Image.Image object."""
        
        return self.dev.getImage()


    def grabRawFrame(self):
        """Returns a snapshot from this device as raw pixel data.
        
        This function returns a 4-tuple consisting of the raw pixel data as string,
        the width and height of the snapshot and it's orientation, which is either
        1 (top-to-bottom) or -1 (bottom-to-top).
        """
        
        return self.dev.getBuffer + (-1,)


    @staticmethod
    def enumerateDevices():
        """Lists all available video devices.
        
        Returns a tuple of 2-tuples, which contain the integral index
        and the display name (if available) of the video device.
        """
        
        devices = ()
        i = 0
        cont = True
        while cont:
            try:
                d = Device(i)
                devices += ((i, d.getDisplayName()),)
                i += 1
            except:
                cont = False
        return devices
Exemple #18
0
class UsbCamera(object):
    size = (640, 480)
    position = (0, 0)
    anchor = 'center'
    device_num = 0
    layer = 0
    visible = False
    update = True
    Fps = 20

    parmkeys = [
        'size', 'position', 'anchor', 'device_num', 'layer', 'visible', 'Fps',
        'update'
    ]
    sur = None

    def __init__(self, root, **argw):
        self.root = root
        self.update_parm(**argw)
        self.initcamera()
        self.pt = 1 / self.Fps
        self.clk = time.clock()
        self.reset(**argw)
        print '============================'

    def initcamera(self):
        self.cam = Device(self.device_num)

    def capture(self):
        return pygame.image.fromstring(self.cam.getImage().tostring(),
                                       (640, 480), 'RGB').convert()

    def update_parm(self, **argw):
        for item in argw:
            exec('self.%s = argw[item]' % (item))

    def reset(self, **argw):
        self.update_parm(**argw)
        self.blitp = blit_pos1(self.size, self.position, self.anchor)

    def show(self):
        if self.visible:
            if self.update and time.clock() - self.clk > self.pt:
                self.sur = self.capture()
                if self.size[0] != 640 or self.size[1] != 480:
                    self.sur = pygame.transform.scale(self.sur, self.size)
                self.clk = time.clock()
            if self.sur != None:
                self.root.blit(self.sur, self.blitp)
Exemple #19
0
def do_both():
    L = []
    i = 0
    cam = Device()

    thread.start_new_thread(input_thread, (L,))
    photo = cam.getImage(3, 1, "tl")
    photoWidth, photoHeight = photo.size
    combinedImage = Image.new("RGB", (width + photoWidth, height), color=0)
    startTime = time.time()
    print "Press enter to stop."
    while 1:
        time.sleep(waitTime)
        i = i + 1
        istr = str(i)

        combinedImage.paste(ImageGrab.grab(), (0, 0))
        photo = cam.getImage(3, 1, "tl")
        combinedImage.paste(photo, (width, 0))
        draw = ImageDraw.Draw(combinedImage)
        font = ImageFont.truetype("arialbd.ttf", 40)
        draw.rectangle([(width, photoHeight), (width + photoWidth, photoHeight + height - photoHeight)], fill="black",
                       outline="red")
        draw.text((width + 10, photoHeight + 10), datetime.now().strftime("%A, %d. %B %Y %I:%M%p"), (255, 255, 255),
                  font=font)
        elapsed = time.time() - startTime
        m, s = divmod(elapsed, 60)
        h, m = divmod(m, 60)
        formattedElapsed = "%d:%02d:%02d" % (h, m, s)
        draw.text((width + 10, photoHeight + 60), formattedElapsed, (255, 255, 255), font=font)
        combinedImage.save("imgs/" + istr + ".png")
        # combinedImage.save("imgs/" + timestamp() + ".png")
        print "saved" + istr
        if L:
            main()
            break
Exemple #20
0
class Capture(object):
    """Provides access to video devices."""
    def __init__(self, index=0):
        """Opens a video device for capturing.
        
        index - The number of the device to open.
        Throws an exception if the device can't be opened or if the given index
        is out of range.
        """

        object.__init__(self)
        self.dev = Device()

    def grabFrame(self):
        """Returns a snapshot from the device as PIL.Image.Image object."""

        return self.dev.getImage()

    def grabRawFrame(self):
        """Returns a snapshot from this device as raw pixel data.
        
        This function returns a 4-tuple consisting of the raw pixel data as string,
        the width and height of the snapshot and it's orientation, which is either
        1 (top-to-bottom) or -1 (bottom-to-top).
        """

        return self.dev.getBuffer + (-1, )

    @staticmethod
    def enumerateDevices():
        """Lists all available video devices.
        
        Returns a tuple of 2-tuples, which contain the integral index
        and the display name (if available) of the video device.
        """

        devices = ()
        i = 0
        cont = True
        while cont:
            try:
                d = Device(i)
                devices += ((i, d.getDisplayName()), )
                i += 1
            except:
                cont = False
        return devices
Exemple #21
0
def get_webcamimg(correcttime):
    filename = r"F:/learn/watchcomputer/webcam" + correcttime + ".jpg"
    cam = Device()
    
    res = (640,480)
    cam = Device()
    cam.setResolution(res[0],res[1])
    
    brightness = 1.0
    contrast = 1.0
    
    camshot = ImageEnhance.Brightness(cam.getImage()).enhance(brightness)
    camshot = ImageEnhance.Contrast(camshot).enhance(contrast)
    time.sleep(10)
    cam.saveSnapshot(filename,timestamp=3, boldfont=1, quality=80)
    print "webcam img saved ok!!!!!!"
    return filename
class Server():
    def __init__(self):
        pygame.init()
        self.cam = Device(0)
        self.scr = pygame.display.set_mode((640, 480), 0, 24)
        self.sender = I2Isender()

    def start(self):
        tk = pygame.time.Clock()
        while True:
            pygame.event.clear()
            buf = self.cam.getImage().tostring()
            sur = pygame.image.fromstring(buf, (640, 480), "RGB")
            self.scr.blit(sur.copy(), (0, 0))
            self.sender.send(sur, ('127.0.0.1', 9000))
            pygame.display.flip()
            tk.tick(50)
Exemple #23
0
def camera():
    res = (640,480)
    pygame.init()
    cam = Device()
    cam.setResolution(res[0],res[1])
    screen = pygame.display.set_mode((640,480))
    pygame.display.set_caption('Webcam')
    pygame.font.init()
    font = pygame.font.SysFont("Courier",11)
     
    def disp(phrase,loc):
        s = font.render(phrase, True, (200,200,200))
        sh = font.render(phrase, True, (50,50,50))
        screen.blit(sh, (loc[0]+1,loc[1]+1))
        screen.blit(s, loc)
     
    brightness = 1.0
    contrast = 1.0
    shots = 0
     
    while 1:
        camshot = ImageEnhance.Brightness(cam.getImage()).enhance(brightness)
        camshot = ImageEnhance.Contrast(camshot).enhance(contrast)
        for event in pygame.event.get():
            if event.type == pygame.QUIT: sys.exit()
        keyinput = pygame.key.get_pressed()
        #if keyinput[K_1]: brightness -= .1
        #if keyinput[K_2]: brightness += .1
        #if keyinput[K_3]: contrast -= .1
        #if keyinput[K_4]: contrast += .1
        if keyinput[K_q]: sys.exit()
        #if keyinput[K_w]: cam.displayCaptureFilterProperties()
        if keyinput[K_s]:
            filename = str(time.time()) + ".jpg"
            cam.saveSnapshot(filename, quality=80, timestamp=0)
            shots += 1
        camshot = pygame.image.frombuffer(camshot.tostring(), res, "RGB")
        screen.blit(camshot, (0,0))
        #disp("S:" + str(shots), (10,4))
        #disp("B:" + str(brightness), (10,16))
        #disp("C:" + str(contrast), (10,28))
        disp("press s to save the shot",(10,40))
        disp("press q to exit the camera",(10,52))
        pygame.display.flip()
Exemple #24
0
class CameraManager(threading.Thread):
	def __init__(self, size, fps):
		threading.Thread.__init__(self)
		self.size = size
		self.camera = Device(0)
		self.should_stop = threading.Event()
		self.freq = int(1.0 / float(fps) * 1000.0)
		self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
		self.client = None
		#self.camera.displayCapturePinProperties()
		
	def stop(self):
		self.should_stop.set()
		
	def run(self):
		self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
		self.sock.bind(("", 23457))
		self.sock.listen(5)
		
		while 1:
			client, addr = self.sock.accept()
			log("Camera connection")
			self.client = client
			break
			
		fpsm = FPSMeter()
		while 1:
			if self.should_stop.isSet():
				return
			print fpsm.tick()
			wait(self.freq)
			data = self.get_image()
			data_len = len(data)
			self.client.send(pack("!I%ds" % data_len, data_len, data))
			
		
	def get_image(self):
		io = StringIO.StringIO()
		image = self.camera.getImage().resize(self.size)
		image.save(io, "JPEG", quality = 75, optimize = True)
		data = io.getvalue()
		io.close()
		return data
Exemple #25
0
def collect(bits=50):
    came = Device()

    img = came.getImage()
    pix = img.load()
    width = img.size[0]
    height = img.size[1]

    size = int(round(math.sqrt(bits)))
    w_step = int(round(width / size))
    h_step = int(round(height / size))

    # will return a list of numbers as long as the next square
    arr = []
    for w in range(0, width, w_step):
        for h in range(0, height, h_step):
            cur_pix = pix[w, h]
            val = (cur_pix[0] + cur_pix[1] + cur_pix[2]) / 3
            arr.append(0 if val % 2 == 0 else 1)
    return arr[:bits]
Exemple #26
0
class MTVideoCaptureCamera(MTCameraBase):
    def init_camera(self):
        if not self.capture_device:
            self.capture_device = Device()
            #self.capture_device.setResolution(self.resolution[0], self.resolution[1])

        self.frame_texture  = Texture.create(*self.resolution)
        self.frame_texture.tex_coords = (1,1,0,  0,1,0,  0,0,0, 1,0,0)

    def capture_frame(self):
        try:
            self.frame  = self.capture_device.getImage()

            #pymt_logger.info("Format:" + )
            self.format = GL_RGB

            self.buffer = self.frame.tostring();
            self.copy_buffer_to_gpu()

        except Exception, e:
            pymt_logger.exception("Couldn't get Image from Camera!"+ str(e))     
Exemple #27
0
class ScannerModel:
    """Scanner model managing the webcam.

    Use the start function to start scanning with callbacks carrying results.
    Use the async scan function to start scanning a wait for a scanned result.
    """
    def __init__(self):
        self._graph = None
        self._camera_callback = None
        self._camera = None
        self._stop_scan = False

    async def scan(self,
                   camera_callback,
                   crop_x=None,
                   crop_y=None) -> Optional[str]:
        self._camera_callback = camera_callback

        self._camera = Device(devnum=0, showVideoWindow=0)

        self._stop_scan = False
        code = None
        try:
            while not self._stop_scan:
                buffer = self._camera.getImage()
                buffer = crop(buffer, crop_x, crop_y)
                self._camera_callback(buffer)
                code = decode(buffer)
                if code:
                    break
                await asyncio.sleep(0.1)
        finally:
            del self._camera

        return code

    def stop(self):
        """Stop the webcam."""
        self._stop_scan = True
Exemple #28
0
class VCCamera():

    ## __init__
    #
    # @param camera_num (Optional) The camera number, defaults to 0.
    # @param xmin (Optional) The x position of the start of the ROI, defaults to 0.
    # @param xmax (Optional) The x position of the end of the ROI, defaults to 150.
    # @param ymin (Optional) The y position of the start of the ROI, defaults to 0.
    # @param ymax (Optional) The y position of the end of the ROI, defaults to 300.
    #
    def __init__(self, camera_num=0, xmin=0, xmax=150, ymin=0, ymax=300):
        self.xmin = xmin
        self.xmax = xmax
        self.ymin = ymin
        self.ymax = ymax
        self.cam = Device(devnum=camera_num)

    ## capture
    #
    # @return The current camera image as a numpy uint8 array.
    #
    def capture(self):
        # These do the same thing, but the second one is much faster..
        if 0:
            image = self.cam.getImage()
            data = numpy.array(image.getdata(),
                               numpy.uint8).reshape(image.size[1],
                                                    image.size[0], 3)
        if 1:
            buf = self.cam.getBuffer()
            x_size = buf[1]
            y_size = buf[2]
            data = numpy.fromstring(buf[0],
                                    numpy.uint8).reshape(y_size, x_size, 3)
            data = data[self.xmin:self.xmax, self.ymin:self.ymax]
        data = numpy.average(data, 2)
        return data
Exemple #29
0
def main():
    
    cam = Device()

    print "Getting initial image, and adjusting camera..."
    
    initialImage = adjustCamera( cam )
    initialImage.save('initialImage.jpg')

    print "initial image saved and webcam adjusted..."


    recordedImages = [initialImage]
    end            = False

    while True:
        time.sleep(.2)

        newImage = cam.getImage()
        compared = compare_images( initialImage, newImage, 70, ANY_2RGB )

        amtPxChanged = totalChange( compared )
        if amtPxChanged > 20000:
            print "Recording..."
            print amtPxChanged

            # time.sleep(.2)
            
            recordedImages.append( newImage )
            compared.save('recording.jpg')#' + str(imgNum) + '.jpg')
            end = True
##        elif end:
##            break
        else:
            print "not recording..."
            compared.save('recording.jpg')
Exemple #30
0
class MotionDetector(): 
    def __init__(self):
        self._cam              = Device()
        self._detectedImages   = []
        self._currentImage     = ""#Image.open( "VSAS logo.jpg" )
        self._recording        = False
        self._defaultTimeLimit = 5 * 60 #make sure to change back to 5 min
        self._stopRecording    = False
        self._timeStamp        = ""
        self._date             = ""

    def __call__(self):
        self.detect()

    def getCurrentImage(self):
        return self._currentImage

    def isRecording(self):
        return self._recording

    def totalChange( self, greenScaleImage ):
        count = 0
        for pixel in greenScaleImage.getdata():
            if pixel != (0, 255, 0, 0):
                count += 1
        return count

    def adjustCamera( self, cam ):
        for x in xrange(10):
            time.sleep(.50)
            initialImage = cam.getImage()
            time.sleep(.50)
        return initialImage

    def convertImagesToCv( self, images ):
        newImages = []
        for index in xrange(len(images)):
            pilImage = images[index].save( "pilConversion.jpg" )
            openCvImage = cv.LoadImage( "pilConversion.jpg" )
            newImages.append(openCvImage)
        os.remove("pilConversion.jpg")
        return newImages

    def overThreshold( self, greenImage, thresholdByPx ):
        amtChanged = self.totalChange( greenImage )
        return amtChanged > thresholdByPx

    def getMostCurrentImage( self ):
        return self._cam.getImage()

    def recordOutToVideo(self):
        cvImages = self.convertImagesToCv( self._detectedImages )
        frame = cvImages[0]
        ts = time.time()
        self._timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H:%M:%S')+".avi"
        self._timeStamp = self._timeStamp.replace(":", "-")
        self._timeStamp = "" + self._timeStamp
        video_out = cv.CreateVideoWriter( self._timeStamp, #make it add to subfolder..
                                          cv.CV_FOURCC('I','4','2','0'), 
                                          2.0, 
                                          cv.GetSize(frame), 
                                          1 )
        for item in cvImages:
            frame = item
            cv.WriteFrame( video_out, frame )

    def overTimeLimit(self, initialTime):
        (time.time() - initialTime) > self._defaultTimeLimit

    def end(self):
        self._stopRecording = True

    def aboveTimeLimit(self, startTime):
        if startTime == 0: 
            return False
        # print time.time() - startTime
        return (time.time() - startTime) > self._defaultTimeLimit

    def detect( self ):
        initialImage = self.adjustCamera( self._cam )
        # initialImage.save('initialImage.jpg')
        intialMotion = self.getMostCurrentImage()
        startTime = 0

        while True:
            # print str(time.time() - startTime) + " => " + str(self._defaultTimeLimit)
            self._currentImage = self._cam.getImage()
            compared           = compare_images( initialImage, 
                                                 self._currentImage, 
                                                 70, 
                                                 ANY_2RGB )
            # print self.aboveTimeLimit(startTime)
            if self._stopRecording:
                del self._detectedImages
                break
            elif (not self._recording and len(self._detectedImages) > 0) or self.aboveTimeLimit(startTime): 
            #this could look better
                print "\n\nEND RECORDING!\n\n"
                self.recordOutToVideo()
                initialMotionName = "initImage-" + str(self._timeStamp[7:-4]) + ".jpg"
                initialMotion.save( initialMotionName )
                self._detectedImages = []

                command = 'ffmpeg\\bin\\ffmpeg -i ' + str(self._timeStamp) + ' -acodec libmp3lame -ab 192 ' + str(self._timeStamp)[:-4] + '.mov'
                # command = 'ffmpeg -i ' + str(self._timeStamp) + ' -acodec libmp3lame -ab 192 ' + str(self._timeStamp)[:-4] + '.mov'
                print command
                p = subprocess.Popen(command.split())
                p.wait()
                os.remove(self._timeStamp)

                #video is now .mov
                self._timeStamp = self._timeStamp[:-4] + ".mov"

                #dropbox uploading done here
                dropboxPic = DropboxUploader()
                dropboxVid = DropboxUploader()

                dropboxPic.authenticate()
                dropboxVid.authenticate()

                picLocation = initialMotionName
                dropboxPic.uploadFile( picLocation )
                dropboxVid.uploadFile( self._timeStamp )

                picURL = dropboxPic.getDBLink( picLocation )
                vidURL = dropboxVid.getVSASLink( self._timeStamp )

                #email done here

                self._date = datetime.datetime.now().strftime("%Y-%m-%d")
                emailFile = open("emailTester.txt")
                while True:
                    line = emailFile.readline()
                    line = line.replace("\n","")
                    if not line:
                        break
                    emailSender = SendEmail()                
                    emailSender.setSubject("VSAS Motion Detected!")
                    emailSender.setAlertLevel("RED")
                    emailSender.setDbPhotoLink( picURL )
                    emailSender.setDbVidLink( vidURL )
                    emailSender.setRecipient( line )
                    emailSender.setDate(self._date)
                    emailSender.sendEmail()
                emailFile.close()

                recordingsFile = open("testPreviousRecordings.txt", "a")
                recordingsFile.write(self._date+","+vidURL+"\n")
                recordingsFile.close()

                os.remove(self._timeStamp)
                os.remove(initialMotionName)

                #clear memory
                startTime = 0
                self._recording = False 

            elif self.overThreshold( compared, 20000 ): #recording
                if len(self._detectedImages) == 1:
                    initialMotion = self.getMostCurrentImage()
                print "recording..."
                if startTime == 0:
                    startTime = time.time()
                self._recording = True
                self._detectedImages.append( self._currentImage )
            else: #not recording
                print "not recording..."
                self._recording = False
Exemple #31
0
    pixdiff = 0
    for i in range(pixel_count):
        if abs(sum(img1[i]) - sum(img2[i])) > pix_threshold:
            pixdiff += 1
            diffperc = pixdiff / (pixel_count/100)
            if diffperc > img_threshold:
                # motion detected
                return True


# get cam device
cam = Device()

# interval or framerate
interval = 1

prevImg = None
counter = 0

while True:
    currImg = cam.getImage()
    if diff_image(prevImg, currImg):
        print "motion detected!"
        currImg.save('image_%s.png' % (counter))
        interval = 3
        counter += 1
    else:
        interval = 1

    prevImg = currImg
    time.sleep(interval)
Exemple #32
0
 
 ## Define the working directory and create a "screenshots" directory
 if path=="":
     path=os.getcwd()
     print "Using path= ",path
 try:os.mkdir(path+"/screenshots")
 except: pass
 # Create an empty timecode file
 f=open(path+"/timecode.csv","w")
 f.close()
 print "Beginning VideoLog..."
 
 ## Open video input
 try:
     cam = Device(videoInput)
     oldImage=cam.getImage()
 except:
     print "Couldn't open videoport: no signal present"
     sys.exit(0)
 
 print "Comparing images... ",
 
 ## Enter main loop
 while 1:
     time.sleep(tempo)
     print "*",
     newImage=cam.getImage()
     compareOldNew=compareImages(oldImage,newImage,imgLimit,pixLimit,step=1)
     #print "compareOldNew", compareOldNew,type(compareOldNew)
     
     if (compareOldNew==True) or diaId==0:
# 0 = transparent, 255 = opaque
photo_transparency = 200

# initialization
pygame.init()
cam = Device()
cam.setResolution(camera_width,camera_height)
screen = pygame.display.set_mode(screen_resolution)
pygame.display.set_caption('Kiosk in a Cabinet')
pygame.mouse.set_visible(0)

while 1:

    # grab two images and clip button region to do motion detection
    img1 = cam.getImage()
    detect1 = img1.crop((detector_left,detector_upper,detector_right,detector_lower))
    img2 = cam.getImage()
    detect2 = img2.crop((detector_left,detector_upper,detector_right,detector_lower))

    # event handler
    for event in pygame.event.get():
        if event.type == pygame.QUIT: sys.exit()
    keyinput = pygame.key.get_pressed()
    if keyinput[K_s]: screen_shot()

    # compose images on a temporary surface s
    s = pygame.Surface(screen.get_size())
    s = s.convert()
    
    # render webcam image
Exemple #34
0
    screen.blit(s, loc)


if __name__ == '__main__':
    res = (640, 480)

    cam = Device()
    cam.setResolution(res[0], res[1])

    brightness = 1.0
    contrast = 1.0
    shots = 0

    receiveThread = UdpReceiver()
    receiveThread.setDaemon(True)  # 该选项设置后使得主线程退出后子线程同时退出
    receiveThread.start()

    while 1:
        if is_sending:
            camshot = ImageEnhance.Brightness(
                cam.getImage()).enhance(brightness)
            camshot = ImageEnhance.Contrast(camshot).enhance(contrast)
            clock = pygame.time.Clock()
            img = cam.getImage().resize((160, 120))
            data = img.tostring()
            ser_socket.sendto(data, cli_address)
            time.sleep(0.05)
        else:
            time.sleep(1)
    receiveThread.stop()
    ser_socket.close()
import pygame
import pygame.camera
from pygame.locals import *
# Simply show the webcam video on a screen

pygame.init()
pygame.camera.init()

cam = pygame.camera.Camera(pygame.camera.list_cameras()[0])
cam.start()

import sys
if sys.platform == "win32":
    from VideoCapture import Device
    tmpcam = Device()
    size = tmpcam.getImage().size
    del tmpcam
else:
    snap = cam.get_image()
    size= (snap.get_width(),snap.get_height())

display = pygame.display.set_mode(size)

snapshot = pygame.surface.Surface(size, 0, display)

def get_and_flip():
	global snapshot, display
	snapshot = cam.get_image(snapshot)
	display.blit(snapshot, (0,0))
	pygame.display.flip()
Exemple #36
0
from socket import *
from VideoCapture import Device
import time
import sys

cam = Device()

socket = socket(AF_INET, SOCK_STREAM)
if len(sys.argv) == 1:
    socket.connect(("localhost", 7777))
else:
    socket.connect((sys.argv[1], 7777))

res = cam.getImage().size
socket.send(str(res[0]))  #Send webcam resolution
socket.send(str(res[1]))

time.sleep(1)

while True:
    try:
        img = cam.getImage()
        imgstr = img.tostring()
        time.sleep(
            0.01
        )  #otherwise send to fast and the server receive images in pieces and fail
        socket.send(imgstr)
    except KeyboardInterrupt:
        socket.send("quit")
        socket.close()
        break
Exemple #37
0
from PyQt4 import QtCore, QtGui
from VideoCapture import Device
from PIL import Image, ImageQt
import sys
import time

app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
layout = QtGui.QGridLayout(window)

button = QtGui.QLabel()


cam = Device(0)

pilImage = cam.getImage()
qimg = ImageQt.ImageQt(pilImage)
qPixmap = QtGui.QPixmap.fromImage(qimg)
button.setPixmap(qPixmap)
time.sleep(10)
del(cam)


layout.addWidget(button)

window.show()
app.exec_()
Exemple #38
0
                is_sending = True
                ser_socket.sendto('startRcv', cli_address)
            if message == 'quitCam':
                is_sending = False
                print 'quit camera',

    def stop(self):
        self.thread_stop = True


# 创建接收线程
receiveThread = UdpReceiver()
receiveThread.setDaemon(True)  # 该选项设置后使得主线程退出后子线程同时退出
receiveThread.start()

# 初始化摄像头
cam = Device()
cam.setResolution(320, 240)

# 主线程循环,发送视频数据
while 1:
    if is_sending:
        img = cam.getImage().resize((160, 120))
        data = img.tostring()
        ser_socket.sendto(data, cli_address)
        time.sleep(0.05)
    else:
        time.sleep(1)

receiveThread.stop()
ser_socket.close()
class gui():

    PORT=9801
    dataSize=65535

    #Change values for Image
    width=640
    height=480

    #Change values for audio quality
    chunk = 4096
    FORMAT = pyaudio.paInt16
    CHANNELS = 1
    RATE = 44100


    #------------------Used by program while running. DO NOT edit.
    sndPicDataAvail=False
    sndAudDataAvail=False
    recvPicDataAvail=False
    recvAudDataAvail=False
    prog=False
    stop=False
    exitProg=False
    #------------------------------------------------------------

    def __init__(self, root):
        self.root=root
        self.root.wm_title("Video Chatting: Coded by Rogue")
        self.root.protocol('WM_DELETE_WINDOW', self.safeExit)

        self.sock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        self.cam=Device()
        self.photo=ImageTk.PhotoImage(self.cam.getImage())

        p = pyaudio.PyAudio()
        self.audStream = p.open(format = self.FORMAT,
                        channels = self.CHANNELS,
                        rate = self.RATE,
                        input = True,
                        output = True,
                        frames_per_buffer = self.chunk)
        self.design()
        t1=threading.Thread(target=self.sndData)
        t2=threading.Thread(target=self.recvData)
        t3=threading.Thread(target=self.showMe)
        t4=threading.Thread(target=self.callrecv)
        t1.daemon=True
        t2.daemon=True
        t3.daemon=True
        t4.daemon=True
        t1.start()
        t2.start()
        t3.start()
        t4.start()

    def design(self):
        self.frame=tk.Frame(self.root)
        self.label=tk.Label(self.frame, text="Enter the ip address")
        self.label.pack()
        self.input=tk.Entry(self.frame)
        self.input.pack()
        self.start=tk.Button(self.frame, text="Call", command=self.call)
        self.start.pack()
        self.frame.pack(fill=tk.X)
        self.mainCanvas=tk.Canvas(self.root, height=self.height, width=self.width, relief=tk.RAISED, bd=5, bg="white")
        self.mainCanvas.pack(fill=tk.BOTH)

    def startThreads(self):
        threading.Thread(target=self.videosnd).start()
        threading.Thread(target=self.videorecv).start()
        threading.Thread(target=self.audiosnd).start()
        threading.Thread(target=self.audiorecv).start()

    def call(self):
        if self.prog:
            self.start.configure(text="Call")
            self.prog=False
        else:
            ip=self.input.get()
            try:
                tmp=socket.getaddrinfo(ip,80)
                print tmp
            except socket.gaierror:
                tkMessageBox.showerror('Error','Error in connecting to {}!!!'.format(ip))
                return

            try:
                self.sock.sendto("this is to stop the callrecv",self.selfaddr)
                self.sock.sendto("Can i connect???",(ip,self.PORT))
                data,addr=self.sock.recvfrom(self.dataSize)
                print data
                if data=="OK":
                    self.addr=(ip, self.PORT)
                    self.start.configure(text="End Call")
                    self.prog=True
                    self.startThreads()
                elif data=="NO":
                    tkMessageBox.showerror('Error','Connection refused!!!'.format(ip))
                else:
                    tkMessageBox.showerror('Error','Error in connecting!!!'.format(ip))
            except:
                tkMessageBox.showerror('Error','Error in Connecting!!!'.format(ip))
            self.stop=False

    def askques(self,addr):
        return tkMessageBox.askyesno('Calling...','trying to connect. Accept')

    def callrecv(self):
        self.selfaddr=(socket.gethostbyname(socket.gethostname()), self.PORT)
        self.sock.bind(self.selfaddr)
        debug("callrecv started at {}".format(socket.gethostbyname(socket.gethostname())))
##        debug('{}'.format(tkMessageBox.askyesno('Calling...','{} is trying to connect. Accept ?')))
        while not self.exitProg:
            if not self.prog and not self.stop:
                data, addr=self.sock.recvfrom(self.dataSize)
##                connect=self.askques('123')
##                print data, addr, addr[0]
##                debug('data recv {}'.format(data))
                if addr==self.selfaddr:
                    self.stop=True
                else:
##                    connect=tkMessageBox.askyesno('Calling...','{} is trying to connect. Accept ?'.format(addr[0]))
                    connect=True
                    if connect:
                        debug('sending ok')
                        self.sock.sendto("OK",addr)
                        self.start.configure(text="End Call")
                        self.addr=addr
                        self.prog=True
                        self.startThreads()
                    else:
                        self.sock.sendto("NO",addr)
            sleep(2)

    def sndData(self):
        #This is gonna run till the end. If you want to encrypt it do it here. Am not gonna do it
        while not self.exitProg:
            while self.prog:
                try:
                    if self.sndAudDataAvail:
                        self.sock.sendto(self.sndAudData, self.addr)
                        self.sndAudDataAvail=False
                except:
                    debug('error in sndData audio. current val: {},{},{}'.format(self.prog, self.sndAudDataAvail, self.sndPicDataAvail))

                try:
                    if self.sndPicDataAvail:
                        self.sock.sendto(self.sndPicData, self.addr)
                        self.sndPicDataAvail=False
                except:
                    debug('error in sndData pic. current val: {},{},{}'.format(self.prog, self.sndAudDataAvail, self.sndPicDataAvail))
            sleep(1)

    def recvData(self):
        #This is gonna run till the end. If you want to decrypt it do it here. Am not gonna do it
        while not self.exitProg:
            while self.prog:
                try:
                    data,addr=self.sock.recvfrom(self.dataSize)
                    if addr==self.addr:
                        typ=data[0]
                        if typ=='p':
                            self.recvPicData=data[1:]
                            self.recvPicDataAvail=True
                        if typ=='a':
                            self.recvAudData=data[1:]
                            self.recvAudDataAvail=True
                except:
                    debug('error in recvData. current val: {},{},{}'.format(self.prog, self.sndAudDataAvail, self.sndPicDataAvail))
            sleep(1)

    #w1 , h1 is the current size and w2, h2 is the resize output.
    def getPILImage(self, buf, w1, h1, w2, h2, recv=False):
        if not recv:
            img = Image.fromstring('RGB', (w1,h1), buf, 'raw', 'BGR', 0, -1)
            img=img.resize((w2,h2))
            img=img.transpose(Image.FLIP_LEFT_RIGHT)
        else:
            img= Image.fromstring('RGB',(w1,h1), buf, 'raw')
            img=img.resize((w2,h2))
        return img

    def getData(self, typ):
        if typ=="pic" and self.recvPicDataAvail:
            val=self.recvPicData
            self.recvPicDataAvail=False
            return val
        if typ=="aud" and self.recvAudDataAvail:
            val=self.recvAudData
            self.recvAudDataAvail=False
            return val

    def setSendData(self, data, typ):
        if typ=="pic":
            #Write data only if previous data has been sent i.e sndPicDataAvail is false
            if not self.sndPicDataAvail:
                self.sndPicData='p'+data
                self.sndPicDataAvail=True
        if typ=="aud":
            if not self.sndAudDataAvail:
                self.sndAudData='a'+data
                self.sndAudDataAvail=True


    def videosnd(self):
        while self.prog:
            try:
                pic,w,h=self.cam.getBuffer()
                pic=self.getPILImage(pic,w,h,160,120)
                self.setSendData(pic.tostring(), "pic")
                sleep(.1)
            except:
                debug("Error in videosnd")

    def videorecv(self):
        while self.prog:
            try:
                while not self.recvPicDataAvail:
                    sleep(.05)
                pic=self.getData("pic")
                img=self.getPILImage(pic,160,120,self.width,self.height,True)
                photo=ImageTk.PhotoImage(img)
##                self.mainCanvas.delete(tk.ALL)
                self.mainCanvas.create_image(self.width/2,self.height/2,image=photo)
                sleep(.05)
            except:
                debug("Error in videorecv")

    def audiosnd(self):
        while self.prog:
            try:
                data=self.audStream.read(self.chunk)
                self.setSendData(data,"aud")
            except:
                debug("Error in audiosnd")

    def audiorecv(self):
        while self.prog:
            try:
                while not self.recvAudDataAvail:
                    sleep(.05)
                data=self.getData("aud")
                self.audStream.write(data)
            except:
                debug("Error in audiorecv")
##              stream.write(data)

    def showMe(self):
        while not self.exitProg:
            try:
                pic,w,h=self.cam.getBuffer()
                img=self.getPILImage(pic,w,h,self.width/4,self.height/4)
                photo=ImageTk.PhotoImage(img)
                self.mainCanvas.create_image(self.width/8,self.height-self.height/8,image=photo)
                sleep(.05)
            except:
                debug("Error in showMe")

    def safeExit(self):
        self.exitProg=True
        sleep(2)
        self.sock.close()
        self.root.destroy()
Exemple #40
0
import socket  
import Image  
from VideoCapture import Device  
cam = Device()  
cam.setResolution(320,240)  
clisocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)  
while 1:  
    im = cam.getImage()  
    im = im.resize((160,120))  
    da = im.tostring()  
    clisocket.sendto(da, ("127.0.0.1", 1234))  
s.close() 
import sys
sys.path.insert(0,'/VideoCapture')

from VideoCapture import Device
import numpy as np

from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg

cam = Device()
im = cam.getImage()


app = QtGui.QApplication([])

## Create window with ImageView widget
win = QtGui.QMainWindow()
win.resize(480,640)
imv = pg.RawImageWidget()
win.setCentralWidget(imv)
win.show()
win.setWindowTitle('pyqtgraph example: ImageView')

def update():
    global imv, cam
    im = cam.getImage()
    pix = np.asarray(im)
    r = pix[:,:,2]

    imv.setImage(r)
timer = QtCore.QTimer()
Exemple #42
0
def get_image():
    cam = Device(0)
    return cam.getImage(1)
Exemple #43
0
 Move Sequence block
 """
 # version CCD_LN7_2.py uses absolute coordinates of the microscope stage in um as a reference in seq_coord
 if Move_seq_marker==1 and command_exec==1:  # move only when the TTL signal marks command marker, and the 'm' key was pressed to mark the Move_seq_marker
     if len(seq_coord)>0:
         print 'Moving sequence initiated...'
         # seq_coord already has the sequence of the absolute coordinates to move to (in the microscope stage coordinate system)
         x,y,z=seq_coord.pop(0)            
         MyUnit.move_to_um(x,y,z) 
         mouse_seq.pop(0)
     Move_seq_marker=1#originally 0 w/ UL
     
         
         
 #camera 
 camshot = ImageEnhance.Brightness(cam.getImage()).enhance(brightness)
 camshot = ImageEnhance.Contrast(camshot).enhance(contrast)
 for event in pygame.event.get():
     
     if event.type == QUIT:
         pygame.quit()
         sys.exit()
             
     keyinput = pygame.key.get_pressed() # returns the status of every key pressed - tuple
     if keyinput[K_b]: brightness -= .1
     if keyinput[K_n]: brightness += .1
     if keyinput[K_c]: contrast -= .1
     if keyinput[K_v]: contrast += .1
     if keyinput[K_q]: cam.displayCapturePinProperties()
     if keyinput[K_e]: cam.displayCaptureFilterProperties()
     if keyinput[K_t]:
Exemple #44
0
class MotionDetectorTest(): 
    def __init__(self): #not using video recorder as of now
        self._cam            = Device()
        self._detectedImages = []
        self._currentImage   = Image.open( "VSAS logo.jpg" )
        self._recording      = False
        self._videoRecorder  = self.newRecorder()
        self._recorded       = False

    def __call__(self):
        self.detect()

    def newRecorder(self):
        videoRecorder = cv.CreateVideoWriter( "testing-vsas.avi", 
                                               cv.CV_FOURCC('I','4','2','0'), 
                                               2.0, 
                                               (640,480), 
                                               1)
        return videoRecorder

    def getCurrentImage(self):
        return self._currentImage

    def isRecording(self):
        return self._recording

    def totalChange( self, greenScaleImage ):
        count = 0
        for pixel in greenScaleImage.getdata():
            if pixel != (0, 255, 0, 0):
                count += 1
        return count

    def adjustCamera( self, cam ):
        for x in xrange(10):
            time.sleep(.75)
            initialImage = cam.getImage()
            initialImage.save('image.jpg')
            time.sleep(.75)
        return initialImage

    # def convertImagesToCv( self, images ):
    #     newImages = []
    #     for index in xrange(len(images)):
    #         pilImage = images[index].save( "pilchange.jpg" )
    #         openCvImage = cv.LoadImage( "pilchange.jpg" )
    #         newImages.append(openCvImage)
    #     return newImages

    def overThreshold( self, greenImage, thresholdByPx ):
        amtChanged = self.totalChange( greenImage )
        return amtChanged > thresholdByPx

    def convertPilToCV( self, pilImage ):
        pilImage.save( "convert.jpg" )
        cvImage = cv.LoadImage( "convert.jpg")
        return cvImage

    def captureImages( self, fps ):
        #nothing here
        print

    def getMostCurrentImage( self ):
        return self._cam.getImage()

    def writeToVideo(self, pilImage):
        cvImage = self.convertPilToCV( pilImage )
        cv.WriteFrame( self._videoRecorder, cvImage )
        self._recorded = True
    # def recordImage(self, pilImage):
    #     cvImage = self._videoRecorder.convertPil( pilImage )
    #     self._videoRecorder.writeToVideo( cvImage )

    # def recordOutToVideo(self):
    #     cvImages = self.convertImagesToCv( self._detectedImages )
    #     frame = cvImages[0]
    #     cv.SaveImage("comparison.jpg", frame)
    #     video_out = cv.CreateVideoWriter( "testing-vsas.avi", 
    #                                       cv.CV_FOURCC('I','4','2','0'), 
    #                                       2.0, 
    #                                       cv.GetSize(frame), 
    #                                       1)
    # #     # cv.CV_FOURCC('I','4','2','0')
    #     for item in cvImages:
    #         frame = item
    #         cv.WriteFrame( video_out, frame )


    def detect( self ):
        #"Getting initial image, and adjusting camera..."
        initialImage = self.adjustCamera( self._cam )
        initialImage.save('initialImage.jpg')
        #"initial image saved and webcam adjusted..."

        while True:
            time.sleep(.01) #to slow it down...might not be needed

            self._currentImage = self._cam.getImage()
            compared           = compare_images( initialImage, 
                                                 self._currentImage, 
                                                 70, 
                                                 ANY_2RGB )
            if not self._recording and self._recorded:#self._videoRecorder.videoWritten(): #len(self._detectedImages) > 0:#self._videoRecorder.videoWritten(): #had just stopped recording
                # self._detectedImages[len(self._detectedImages)/2].save("detected.jpg")
                # self.recordOutToVideo()
                # self._detectedImages = []
                # print "!!!finishedRecording..."
                self._videoRecorder = self.newRecorder()
                self._recorded = False
                #clear memory
            elif self.overThreshold( compared, 20000 ): #recording
                print "recording..."
                self._recording = True
                # self._detectedImages.append( self._currentImage )
                # print "!!!recorded image to video..."
                self.writeToVideo( self._currentImage )
                # compared.save('recording.jpg')#' + str(imgNum) + '.jpg')
            else: #not recording
                print "not recording..."
                self._recording = False
@author: aval
'''

import Image
from VideoCapture import Device

from PreImage import PreImage
from SegmImage import SegmImage
from FindFaces import FindFaces

if __name__ == '__main__':

    cam = Device()
    print "\t==== Camera initial ok ===="
    input_buf = cam.getImage()
    print "\t==== Getting image ok ===="
    image = input_buf.resize((100, 80))
    print "\t==== Resizing image ok ===="

    filterHSV = PreImage(image)
    filterYCbCr = PreImage(image)

    filterHSV.convert('', 'HSV')
    filterYCbCr.convert('', 'YCbCr')

    filterHSV.grey_scale('', 'HSV')
    filterYCbCr.grey_scale('', 'YCbCr')

    filterHSV2 = SegmImage(filterHSV.output_HSV_grey_scale, ClaNum=5)
    filterYCbCr2 = SegmImage(filterYCbCr.output_YCbCr_grey_scale, ClaNum=5)
Exemple #46
0
		#print('Image saved.\nUploading via SFTP.')
		ctr_array += 1
		thread_ctr += 1
		time.sleep(array_delay)

if __name__ == "__main__":
	f1 , g1 = os.popen4('psftp cam')  # 'cam' is the name of my saved
	f1.write('cd captures\n')           #PuTTY session, and 'captures' is
	f2 , g2 = os.popen4('psftp cam')    #the name of the folder where the
	f2.write('cd captures\n')           #photos are saved on the remote comp
	f3 , g3 = os.popen4('psftp cam')
	f3.write('cd captures\n')
	
	array = 5    #number of shots to take when motion is detected
		
	while 1:		
		shot_a = cam.getImage()
		time.sleep(.5)
		shot_b = cam.getImage()
		
		motion = diff_image(shot_a, shot_b)
		
		if motion:
			action(array)
	f1.close()
	g1.close()
	f2.close()
	g2.close()
	f3.close()
	g3.close()
	
Exemple #47
0
#test taking pictures

from PIL import Image
from VideoCapture import Device

cam = Device(
)  #devnum=0 means you are using the device set in 0 position probably your webcam
blackimg = cam.getImage(
)  #this return a PIL image but I don't know why the first is always black
#blackimg.show()#try to check if you want
Image = cam.getImage()  #this is a real image PIL image
#Image.show()

cam.saveSnapshot('image.jpg')
Exemple #48
0
class MotionDetector():
    def __init__(self):
        self._cam = Device()
        self._detectedImages = []
        self._currentImage = ""  #Image.open( "VSAS logo.jpg" )
        self._recording = False
        self._defaultTimeLimit = 5 * 60  #make sure to change back to 5 min
        self._stopRecording = False
        self._timeStamp = ""
        self._date = ""

    def __call__(self):
        self.detect()

    def getCurrentImage(self):
        return self._currentImage

    def isRecording(self):
        return self._recording

    def totalChange(self, greenScaleImage):
        count = 0
        for pixel in greenScaleImage.getdata():
            if pixel != (0, 255, 0, 0):
                count += 1
        return count

    def adjustCamera(self, cam):
        for x in xrange(10):
            time.sleep(.50)
            initialImage = cam.getImage()
            time.sleep(.50)
        return initialImage

    def convertImagesToCv(self, images):
        newImages = []
        for index in xrange(len(images)):
            pilImage = images[index].save("pilConversion.jpg")
            openCvImage = cv.LoadImage("pilConversion.jpg")
            newImages.append(openCvImage)
        os.remove("pilConversion.jpg")
        return newImages

    def overThreshold(self, greenImage, thresholdByPx):
        amtChanged = self.totalChange(greenImage)
        return amtChanged > thresholdByPx

    def getMostCurrentImage(self):
        return self._cam.getImage()

    def recordOutToVideo(self):
        cvImages = self.convertImagesToCv(self._detectedImages)
        frame = cvImages[0]
        ts = time.time()
        self._timeStamp = datetime.datetime.fromtimestamp(ts).strftime(
            '%Y-%m-%d-%H:%M:%S') + ".avi"
        self._timeStamp = self._timeStamp.replace(":", "-")
        self._timeStamp = "" + self._timeStamp
        video_out = cv.CreateVideoWriter(
            self._timeStamp,  #make it add to subfolder..
            cv.CV_FOURCC('I', '4', '2', '0'),
            2.0,
            cv.GetSize(frame),
            1)
        for item in cvImages:
            frame = item
            cv.WriteFrame(video_out, frame)

    def overTimeLimit(self, initialTime):
        (time.time() - initialTime) > self._defaultTimeLimit

    def end(self):
        self._stopRecording = True

    def aboveTimeLimit(self, startTime):
        if startTime == 0:
            return False
        # print time.time() - startTime
        return (time.time() - startTime) > self._defaultTimeLimit

    def detect(self):
        initialImage = self.adjustCamera(self._cam)
        # initialImage.save('initialImage.jpg')
        intialMotion = self.getMostCurrentImage()
        startTime = 0

        while True:
            # print str(time.time() - startTime) + " => " + str(self._defaultTimeLimit)
            self._currentImage = self._cam.getImage()
            compared = compare_images(initialImage, self._currentImage, 70,
                                      ANY_2RGB)
            # print self.aboveTimeLimit(startTime)
            if self._stopRecording:
                del self._detectedImages
                break
            elif (not self._recording and len(self._detectedImages) > 0
                  ) or self.aboveTimeLimit(startTime):
                #this could look better
                print "\n\nEND RECORDING!\n\n"
                self.recordOutToVideo()
                initialMotionName = "initImage-" + str(
                    self._timeStamp[7:-4]) + ".jpg"
                initialMotion.save(initialMotionName)
                self._detectedImages = []

                command = 'ffmpeg\\bin\\ffmpeg -i ' + str(
                    self._timeStamp) + ' -acodec libmp3lame -ab 192 ' + str(
                        self._timeStamp)[:-4] + '.mov'
                # command = 'ffmpeg -i ' + str(self._timeStamp) + ' -acodec libmp3lame -ab 192 ' + str(self._timeStamp)[:-4] + '.mov'
                print command
                p = subprocess.Popen(command.split())
                p.wait()
                os.remove(self._timeStamp)

                #video is now .mov
                self._timeStamp = self._timeStamp[:-4] + ".mov"

                #dropbox uploading done here
                dropboxPic = DropboxUploader()
                dropboxVid = DropboxUploader()

                dropboxPic.authenticate()
                dropboxVid.authenticate()

                picLocation = initialMotionName
                dropboxPic.uploadFile(picLocation)
                dropboxVid.uploadFile(self._timeStamp)

                picURL = dropboxPic.getDBLink(picLocation)
                vidURL = dropboxVid.getVSASLink(self._timeStamp)

                #email done here

                self._date = datetime.datetime.now().strftime("%Y-%m-%d")
                emailFile = open("emailTester.txt")
                while True:
                    line = emailFile.readline()
                    line = line.replace("\n", "")
                    if not line:
                        break
                    emailSender = SendEmail()
                    emailSender.setSubject("VSAS Motion Detected!")
                    emailSender.setAlertLevel("RED")
                    emailSender.setDbPhotoLink(picURL)
                    emailSender.setDbVidLink(vidURL)
                    emailSender.setRecipient(line)
                    emailSender.setDate(self._date)
                    emailSender.sendEmail()
                emailFile.close()

                recordingsFile = open("testPreviousRecordings.txt", "a")
                recordingsFile.write(self._date + "," + vidURL + "\n")
                recordingsFile.close()

                os.remove(self._timeStamp)
                os.remove(initialMotionName)

                #clear memory
                startTime = 0
                self._recording = False

            elif self.overThreshold(compared, 20000):  #recording
                if len(self._detectedImages) == 1:
                    initialMotion = self.getMostCurrentImage()
                print "recording..."
                if startTime == 0:
                    startTime = time.time()
                self._recording = True
                self._detectedImages.append(self._currentImage)
            else:  #not recording
                print "not recording..."
                self._recording = False
Exemple #49
0
class Camera:

    def __init__(self):
        self.winName = "Dad CCTV"
        self.recording = False
        self.fps = 24
        device = int(raw_input("Choose System Device: "))
        self.cam = Device(devnum=device)

    def initFile(self):
        self.newpath = time.strftime("%d.%m.%y"+"\\") 
        self.fileName = time.strftime("%H.%M.%S")
        self.getBaseFrame()
        self.startWriter()
    def record(self):
        self.initFile()
        print self.newpath
        if not os.path.exists(self.newpath): os.makedirs(self.newpath)
        self.recording = True
        print "Recording... Press Ctr-C to stop"
        try:
            self.write()    
        except KeyboardInterrupt:
            print "Stopped Recording File Saved As: " + self.fileName
            self.cleanUp()
        
            
    def getCurrentTime(self):
        time = time.strftime("%H:%M:%S")

    def getBaseFrame(self):
        self.cam.saveSnapshot("1.jpg")

    def getFrame(self):
         return numpy.array(self.cam.getImage())

    def getPILFrame(self):
        return self.cam.getImage()

    def startWriter(self):
        img = cv2.imread("1.jpg")
        height,width,layers = img.shape
        fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
        self.videoWriter = cv2.VideoWriter(self.newpath+self.fileName+".avi",fourcc,self.fps,(width,height))

    def record5Seconds(self):
        recording =True
        self.initFile()
        now = time.time()
        future = now + 5
        while time.time() < future:
            self.write()
        else:
            self.cleanUp()
            
    def write(self):
        self.videoWriter.write(self.getFrame())

    def cleanUp(self):
        recording = False
        self.videoWriter.release()

    def isRecording(self):
        return self.recording
Exemple #50
0
        imgGREDiff = cv.cv.CreateImage(wsize, cv.IPL_DEPTH_8U, 1)
        imgBLUDiff = cv.cv.CreateImage(wsize, cv.IPL_DEPTH_8U, 1)

        imgTM1 = 0
        imgTM2 = 0
        imgTM3 = 0
        imgTM4 = 0

        imgREF = 0
        imgPEN = 0

    # Set this to true for now
    calibrated = True

    # Get the image from the webcam
    imgRAW = cam.getImage()  # Get raw image from the webcam
    imgCapture = ImageEnhance.Brightness(imgRAW).enhance(brightness)  # Brightness
    imgCapture = ImageEnhance.Contrast(imgCapture).enhance(contrast)  # Contrast
    imgCapture = imgCapture.transpose(Image.FLIP_LEFT_RIGHT).resize(wsize)  # Flip and resize the image

    # Convert the image to a OpenCV compatible image (RGB)
    cv.cv.SetData(imgConCAP, imgCapture.tostring())

    # Split up the different channels
    cv.cv.Split(imgConCAP, imgRED[1], imgBLU[1], imgGRE[1], None)

    # Get the difference between the old and the new frames
    if frame_count < 1:
        cv.cv.Copy(imgRED[1], imgRED[0])
        cv.cv.Copy(imgGRE[1], imgGRE[0])
        cv.cv.Copy(imgBLU[1], imgBLU[0])
Exemple #51
0
import pygame.camera
import pygame.image
import sys
import cv2
import time
from VideoCapture import Device

cam = Device()
cam.saveSnapshot('image.jpg')
cam.setResolution(160,120)

# grab first frame
colorimg = cam.getImage()
img = colorimg.convert('L')
WIDTH = img.size[0]
HEIGHT = img.size[1]
screen = pygame.display.set_mode( ( WIDTH*2, HEIGHT*2 ) )
pygame.display.set_caption("pyGame Camera View")
average = img
diff = img


while True :
    time.sleep(0.05)
    for e in pygame.event.get() :
        if e.type == pygame.QUIT :
            sys.exit()
    rgbimg = img.convert("RGB")
    mode = rgbimg.mode
    size = rgbimg.size
    data = rgbimg.tostring()
Exemple #52
0
while do:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
           do = 0
           del cam
           sys.exit()
        elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
           do = 0
           del cam
           sys.exit()
    #Get the images
    ###############
    #oci and nci are used for motion detection, they don't show up on screen
    #if you want to show something use ni
    oci = cci #make the current compressed image the old one
    ni = cam.getImage() #get new image for compression
    ni = ni.transpose(Image.FLIP_LEFT_RIGHT) #Flip image for mirror movement, this way topleft == (0,0)
    cci = ni.resize(CSIZE,Image.BILINEAR) #compress the new image and make it the current one

    #Get motion from images
    #######################
    motionArray = motion.getMotionArray(oci,cci,COLORTHRESHOLD) #compare the images and get the array of pixels with difference
    motionPoint = motion.getMotionPoint(motionArray) #calculate the avarage point
    if motionPoint[0] > 0 and motionPoint[1] > 0: #chek X and Y values
        #if a real avarage point of motion has been returned
        wp = motionPoint
        px = wp[0]*ratio #multiply by compression ratio to get the position
        py = wp[1]*ratio #of the coordinate on the uncompressed image
        wp = (px,py)

        if len(tracklist) < 240: #we keep track of some frames' avarage points
from VideoCapture import Device

import cv2
import numpy

cam = Device()
cam.setResolution(640, 480)

cv2.namedWindow("preview")

while True:
    key = cv2.waitKey(20)
    if key == 27:  # exit on ESC
        break

    frame = numpy.asarray(cam.getImage())
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    # display frame
    cv2.imshow("preview", frame)
trap = (538,224)

#set traps being used (solenoid number)
start = '1'
end = '3'

#allow warm up
time.sleep(1)

# keep looping
count = 1
currentCount = 0
switch = False
while True:
        # grab the current frame
        pil_img = camera.getImage()
        cv_img = cv2.cv.CreateImageHeader(pil_img.size,cv2.cv.IPL_DEPTH_8U, 3)
        cv2.cv.SetData(cv_img, pil_img.tobytes(), pil_img.size[0]*3)
        frame = np.asarray(cv_img[:,:])

        # resize the frame, blur it, and convert it to the HSV
        # color space
        frame = imutils.resize(frame, width=600)
        # blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # construct a mask for the color "green", then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv2.inRange(hsv, blueLower, blueUpper)
        mask = cv2.erode(mask, None, iterations=2)
    parser.add_option("--pc", action="store", type="float", default=1.0) # [1] describes this as the "penalty coefficient" and it is used to avoid reinitialization according to [4]
    #thresh = 0.5
    parser.add_option("--thresh", action="store", type="float", default=0.5) # threshold used for determining which areas are inside and outside the contour
    parser.add_option("-p", "--hide-progress-image", action="store_true", default=False)
    parser.add_option("-w", "--use-webcam", action="store_true", default=False)
    parser.add_option("--noise", action="store", type="string", default="none")
    parser.add_option("-r", "--reverse-inequalities", action="store_true", default=False)
    parser.add_option("--use-heaviside", action="store_true", default=False)
    
    
    (options, args) = parser.parse_args()
    
    if options.use_webcam:
        from VideoCapture import Device
        cam = Device()
        imgin = cam.getImage()
    else:
        imgin = Image.open(args[0])
	
    imgin = imgin.convert("L") # convert to greyscale (luminance)
    
    img = np.asarray(imgin)
    img = img.astype(np.float32) # convert to a floating point
    
    if options.noise != "none":
        if options.noise == "mean":
            noise_width = img.mean() * 0.1
            print noise_width
        else:
            noise_width = float(options.noise)
            
# 0 = transparent, 255 = opaque
photo_transparency = 200

# initialization
pygame.init()
cam = Device()
cam.setResolution(camera_width, camera_height)
screen = pygame.display.set_mode(screen_resolution)
pygame.display.set_caption('Kiosk in a Cabinet')
pygame.mouse.set_visible(0)

while 1:

    # grab two images and clip button region to do motion detection
    img1 = cam.getImage()
    detect1 = img1.crop(
        (detector_left, detector_upper, detector_right, detector_lower))
    img2 = cam.getImage()
    detect2 = img2.crop(
        (detector_left, detector_upper, detector_right, detector_lower))

    # event handler
    for event in pygame.event.get():
        if event.type == pygame.QUIT: sys.exit()
    keyinput = pygame.key.get_pressed()
    if keyinput[K_s]: screen_shot()

    # compose images on a temporary surface s
    s = pygame.Surface(screen.get_size())
    s = s.convert()
Exemple #57
0
        ctr_array += 1
        thread_ctr += 1
        time.sleep(array_delay)


if __name__ == "__main__":
    f1, g1 = os.popen4('psftp cam')  # 'cam' is the name of my saved
    f1.write('cd captures\n')  #PuTTY session, and 'captures' is
    f2, g2 = os.popen4('psftp cam')  #the name of the folder where the
    f2.write('cd captures\n')  #photos are saved on the remote comp
    f3, g3 = os.popen4('psftp cam')
    f3.write('cd captures\n')

    array = 5  #number of shots to take when motion is detected

    while 1:
        shot_a = cam.getImage()
        time.sleep(.5)
        shot_b = cam.getImage()

        motion = diff_image(shot_a, shot_b)

        if motion:
            action(array)
    f1.close()
    g1.close()
    f2.close()
    g2.close()
    f3.close()
    g3.close()