Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser(description="Time to D-D-D-Duel.")
    parser.add_argument("saveDir", help="Directory to save photos to.")
    parser.add_argument("--prefix", default="ygh-photo",
                        help="File prefix for each numbered photo.")
    parser.add_argument("--psm", type=int, default=6,
                        help="psm argument for tesseract tool.")
    args = parser.parse_args()

    prefix = args.prefix
    save_dir = args.saveDir
    psm = args.psm

    if not os.path.exists(save_dir):
        os.mkdir("./%s" % save_dir)

    # setup camera
    try:
        pc.init()
        print("Cameras -> ")
        print(pc.list_cameras())
        webcam = pc.Camera(pc.list_cameras()[0])
        webcam.start()
    except Exception as e:
        print("Error encountered when setting up webcam, check it's not already in use.")
        print(e)
        raise SystemExit

    i = webcam.get_image()
    pi.save(i, "./photo.png")
    # let user select when to take each photo, number them consecutively.
    count = 0
    while True:
        input()
        img = webcam.get_image()
        file_path = "%s/%s%d.png" % (save_dir, prefix, count)
        pi.save(img, file_path)
        print("---> Processing image %s" % file_path)
        try:
            processed_fp = "%s/processed-%s%d.png" % (save_dir, prefix, count)
            preprocess_image(file_path, processed_fp)
            # Define config parameters.
            # '-l eng'  for using the English language
            # '--oem 1' for using LSTM OCR Engine
            # psm 6 = words as a text line?
            config = ("-l eng --oem 1 --psm %d" % psm)
            text = pytesseract.image_to_string(
                Image.open(file_path), config=config)
            print("-----text found-------")
            print(text)
            print("----------------------")
        except UnicodeEncodeError:
            print("[!] had an issue encoding to Unicode.")
        count += 1

    pc.quit()
Esempio n. 2
0
 def setup(self):
     cameras = camera.list_cameras()
     if CAMERA_INDEX >= len(cameras):
         raise Exception('Error: no camera')
     self._cam = camera.Camera(cameras[CAMERA_INDEX],
                               (CAMERA_WIDTH, CAMERA_HEIGHT))
     self._cam.start()
Esempio n. 3
0
        def __init__(self):
                os = system()
                
                if os == 'Windows':
                        self.usuario = environ['USERNAME']
                else:
                        self.usuario = environ['USER']
 
                camera.init()
                misWebcams = camera.list_cameras()
 
                if len(misWebcams) == 0:
                        raise Exception('No hay webcam disponible.')
                        exit()
 
                elif len(misWebcams) == 1:
                        self.miWebcam = misWebcams[0]
 
                else:
                        for i in range(len(misWebcams)):
                                try:
                                        self.miWebcam = misWebcams[i]
                                        break
                                except:
                                        continue
Esempio n. 4
0
def getImage():
    cam.init()
    k = cam.Camera(cam.list_cameras()[0])
    k.start()
    img = k.get_image()
    sejv(img, "tabla.jpg")
    k.stop()
	def __init__(self,camera=0,resolution=(640,480)):
		""" Init the camera with a camera and a certain resolution """
		PyCamera.init()
		try:
			self.cam = PyCamera.Camera(PyCamera.list_cameras()[camera],resolution)
			self.resolution = resolution
		except Exception:
			print 'Problem initializing the camera!'
Esempio n. 6
0
 def __init__(self, num=0, size=(640, 640)):
     super().__init__(num, size)
     camera.init()
     cam = None
     while cam is None:
         cam = camera.Camera(camera.list_cameras()[num], size)
     # cam.set_resolution(*size)
     self.cam = cam
Esempio n. 7
0
    def __init__(self,
                 processFunction=None,
                 display=None,
                 photos=None,
                 version=None,
                 **argd):

        logging.debug("Initializing Video Capture Class")

        #set display size in pixels = width,height
        displaySize = 752, 600
        size = 640, 480

        processRuns = 0

        self.__dict__.update(argd)
        self.__dict__.update(locals())

        #super(VideoCapturePlayer, self).__init__(**argd)

        if self.display is None:
            pygame.display.init()
            pygame.display.set_caption(u"Open Allure " + version)
            self.display = pygame.display.set_mode(self.displaySize, 0)

#bring in photos
        self.photoSmile = pygame.image.load(photos[0]).convert()
        self.photoTalk = pygame.image.load(photos[1]).convert()
        self.photoListen = pygame.image.load(photos[2]).convert()

        import pygame.camera as camera
        camera.init()

        # get a list of available cameras.
        if sys.platform == 'darwin':
            self.cameraList = ['0']  # camera.list_cameras()
        else:
            self.cameraList = camera.list_cameras()
        if not self.cameraList:
            raise ValueError("Sorry, no cameras detected.")

        logging.info("Opening device %s, with video size (%s,%s)" %
                     (self.cameraList[0], self.size[0], self.size[1]))

        # create and start the camera of the specified size in RGB colorspace
        self.camera = camera.Camera(self.cameraList[0], self.size, "RGB")
        self.camera.start()

        self.processClock = self.clock = pygame.time.Clock()

        # create a surface to capture to.  for performance purposes, you want the
        # bit depth to be the same as that of the display surface.
        self.snapshot = pygame.surface.Surface(self.size, 0, self.display)

        # place holders for thumbnails
        self.snapshotThumbnail = None
        self.processedShotThumbnail = None
Esempio n. 8
0
    def __init__(self, display):
        pgcam.init()
        self.clist = pgcam.list_cameras()
        print(self.clist)
        if not self.clist:
            raise ValueError("sorry no camera detected")

        self.camera = pgcam.Camera(self.clist[0], (640, 480))
        self.screen = pg.surface.Surface((640, 480), 0, display)
        self.camera.start()
        print("camera set")
 def __init__(self, device, width, height, color='RGB'):
     '''Intialize device
     '''
     camera.init()
     if not device:
         device = camera.list_cameras()[0]
     self._cam = camera.Camera(device, (width, height), color)
     self._cam.start()
     self.running = True
     self.img = None
     Thread.__init__(self)
Esempio n. 10
0
    def __init__(self, processFunction=None,
                       display        =None,
                       photos         =None,
                       version        =None,**argd):

        logging.debug( "Initializing Video Capture Class" )

        #set display size in pixels = width,height
        displaySize = 752,600
        size = 640,480

        processRuns = 0

        self.__dict__.update( argd )
        self.__dict__.update( locals() )

        #super(VideoCapturePlayer, self).__init__(**argd)

        if self.display is None:
            pygame.display.init()
            pygame.display.set_caption(u"Open Allure " + version)
            self.display = pygame.display.set_mode( self.displaySize, 0 )

		#bring in photos
        self.photoSmile  = pygame.image.load( photos[0] ).convert()
        self.photoTalk   = pygame.image.load( photos[1] ).convert()
        self.photoListen = pygame.image.load( photos[2] ).convert()

        import pygame.camera as camera
        camera.init()

        # get a list of available cameras.
        if sys.platform == 'darwin':
            self.cameraList = ['0'] # camera.list_cameras()
        else:
            self.cameraList = camera.list_cameras()
        if not self.cameraList:
            raise ValueError( "Sorry, no cameras detected." )

        logging.info( "Opening device %s, with video size (%s,%s)" % ( self.cameraList[0], self.size[0], self.size[1] ) )

        # create and start the camera of the specified size in RGB colorspace
        self.camera = camera.Camera( self.cameraList[0], self.size, "RGB" )
        self.camera.start()

        self.processClock = self.clock = pygame.time.Clock()

        # create a surface to capture to.  for performance purposes, you want the
        # bit depth to be the same as that of the display surface.
        self.snapshot = pygame.surface.Surface( self.size, 0, self.display )

        # place holders for thumbnails
        self.snapshotThumbnail = None
        self.processedShotThumbnail = None
Esempio n. 11
0
 def __init__(self, display):
     pgcam.init()
     self.clist = pgcam.list_cameras()
     print(self.clist)
     if not self.clist:
         raise ValueError("sorry no camera detected")
     self.width = 960
     self.height = 720
     self.camera = pgcam.Camera(self.clist[0], (self.width, self.height))
     self.screen = pg.surface.Surface((self.width, self.height), 0, display)
     self.camera.start()
     print("camera set")
Esempio n. 12
0
def captureImgAndSend( toAddr ):
    # capture photos
    print "capturing photos"
    pycam.init()
    cam1 = pycam.Camera(pycam.list_cameras()[0])  
    cam2 = pycam.Camera(pycam.list_cameras()[1])  
    cam1.start()
    #cam1.set_controls(False, False, 100)
    img1 = cam1.get_image()
    pyimg.save(img1, "img1.jpg")
    cam1.stop()
    cam2.start()
    img2 = cam2.get_image()
    pyimg.save(img2, "img2.jpg")
    cam2.stop()

    # send to receiver
    print "sending photos"
    img1_data = open("img1.jpg", 'rb').read()
    msg1 = MIMEMultipart()
    msg1["From"] = EMAIL
    msg1["To"] = toAddr
    image1 = MIMEImage(img1_data, name=os.path.basename("img1.jpg"))
    msg1.attach(image1)
    img2_data = open("img2.jpg", 'rb').read()
    msg2 = MIMEMultipart()
    msg2["From"] = EMAIL
    msg2["To"] = toAddr
    image2 = MIMEImage(img2_data, name=os.path.basename("img2.jpg"))
    msg2.attach(image2)
    s = smtplib.SMTP_SSL("smtp.gmail.com")
    s.login(EMAIL, PASSWORD)
    s.sendmail(EMAIL, [toAddr], msg1.as_string())
    s.sendmail(EMAIL, [toAddr], msg2.as_string())
    s.quit()

    # delete the img file
    os.remove("img1.jpg")
    os.remove("img2.jpg")
    def __init__(self):
        self.size = (640, 480)
        # create a display surface. standard pygame stuff
        self.display = pg.display.set_mode(self.size, 0)

        # this is the same as what we saw before
        self.clist = pgcam.list_cameras()
        if not self.clist:
            raise ValueError("Sorry, no cameras detected.")
        self.cam = pgcam.Camera(self.clist[0], self.size)
        self.cam.start()

        # create a surface to capture to.  for performance purposes
        # bit depth is the same as that of the display surface.
        self.snapshot = pg.surface.Surface(self.size, 0, self.display)
Esempio n. 14
0
 def __init__(self, cfg):
     super().__init__(inputs=[], outputs=[
         'cam/img',
     ], threaded=True)
     self.img_w = cfg['img_w']
     self.img_h = cfg['img_h']
     self.image_format = cfg['image_format']
     pygame.init()
     camera.init()
     cameras = camera.list_cameras()
     print("Using camera %s ..." % cameras[cfg['cam_source']])
     self.webcam = camera.Camera(cameras[cfg['cam_source']],
                                 cfg['cam_resolution'])
     self.processed_frame = None
     self.on = True
Esempio n. 15
0
def take_picture(given_name='test'):
    camera.init()

    list_of_cameras = camera.list_cameras()
    print("Found {} cameras!".format(len(list_of_cameras)))

    if len(list_of_cameras):
        my_camera = camera.Camera(list_of_cameras[0])
        print("Successfully connected to the camera!")

        my_camera.start()
        surface = my_camera.get_image()
        print(surface)
        pyimage.save(surface, '{}.bmp'.format(given_name))
        my_camera.stop()
Esempio n. 16
0
def main():
    camera.init()
    print('scaning cameras...')
    while 'inf':
        for camera_path in camera.list_cameras():
            # camera_path : /dev/video0
            cmr = camera.Camera(camera_path)
            try:
                cmr.start()
                cmr.stop()
            except:
                print('** Alert ** : (camera {} is opend)'.format(camera_path))
                Thread(target=alert).start()
                sleep(1)
        sleep(1)
    camera.quit()
Esempio n. 17
0
    def open(self):
        # TODO: make async
        from pygame import camera

        camera.init()
        cameras = camera.list_cameras()
        dc = camera.Camera(cameras[self._device_index], self.default_resolution, 'RGB')
        dc.start()

        time.sleep(1)  # give time for webcam to init.

        # 'prime' the capture context...
        # some webcams might not init fully until a capture
        # is done.  so we do a capture here to force device to be ready
        # and query the maximum supported size
        self._temp_surface = dc.get_image()
        self._device_context = dc
Esempio n. 18
0
        def __init__(self, input_signal, output_channel, device=0, max_freq=10, size=(WIDTH, HEIGHT), grey=True):
            """
            Constructor for a VideoSnapshot source.
    
            @param input_signal: A channel that will pass a message when an output
            is desired.
    
            @param output_channel: The channel that will be passed a tagged image signal.
    
            @param device: The camera device to connect to - (0 is default)
    
            @param max_freq: We won't bother polling faster than this max frequency.
    
            @param size: A tuple containing the width and height to use for the camera
            device.
    
            @param grey: A boolean indicating if the image should be averaged to one channel
            Example useage:
    
                >>> msg = Event(tag = 1, value = go)
                >>> in_channel, out_channel = Channel(), Channel()
                >>> vid_src = VideoSnapshot(in_channel, out_channel)
                >>> in_channel.put(msg)
                >>> in_channel.put(LastEvent())  # Tells the component we are finished
                >>> vid_src.start()     # Start the thread, it will process its input channel
                >>> vid_src.join()
                >>> img1 = out_channel.get()
                >>> assert out_channel.get().last == True
            """
            super(VideoSnapshot, self).__init__(input_signal, output_channel)
            self.MAX_FREQUENCY = max_freq
            self.device = device
            self.size = size
            self.grey = grey
            self.snapshot = None # This is where we will save our pygame surface image
            logging.debug("Initialising Video Capture")
            camera.init()

            # gets a list of available cameras.
            self.clist = camera.list_cameras()
            if not self.clist:
                raise IOError("Sorry, no cameras detected.")

            logging.info("Opening device %s, with video size (%s,%s)" % (self.clist[0], self.size[0], self.size[1]))

            self.camera = camera.Camera(self.clist[0], self.size, "RGB")
Esempio n. 19
0
def main():
    print("Loading calibration matrix....")

    with np.load('calib_camera.npz') as fp:
        mtx, dist = [fp[i] for i in ('mtx', 'dist')]

    pygame.init()
    screen = pygame.display.set_mode(size)
    pygame.display.set_caption('Calibration')

    ubuntu = pygame.font.match_font('Ubuntu')
    font = pygame.font.Font(ubuntu, 20)
    font.set_bold(True)

    camera.init()
    c = camera.Camera(camera.list_cameras()[0], size)
    c.start()

    finish = False
    clock = pygame.time.Clock()

    while not finish:
        surf = c.get_image()
        img = pygame.surfarray.pixels3d(surf)
        img = pygame.surfarray.pixels3d(surf)
        gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        ret, corners = cv2.findChessboardCorners(gray, (6, 6), None)
        img_gray = np.dstack([gray, gray, gray])
        if ret:
            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            _, rvecs, tvecs = cv2.solvePnP(objp, corners2, mtx, dist)
            # print(rvecs.shape)
            imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
            img_gray = draw(img_gray, corners2, imgpts)

        gray_surf = pygame.surfarray.make_surface(img_gray)
        screen.blit(gray_surf, (0, 0))
        clock.tick(FPS)

        pygame.display.update()
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                finish = True

    c.stop()
Esempio n. 20
0
    def open(self):
        # TODO: make async
        from pygame import camera

        camera.init()
        cameras = camera.list_cameras()
        dc = camera.Camera(cameras[self._device_index],
                           self.default_resolution, 'RGB')
        dc.start()

        time.sleep(1)  # give time for webcam to init.

        # 'prime' the capture context...
        # some webcams might not init fully until a capture
        # is done.  so we do a capture here to force device to be ready
        # and query the maximum supported size
        self._temp_surface = dc.get_image()
        self._device_context = dc
Esempio n. 21
0
 def __init__(self, display):
     self.display = display
     camera.init()
     self.clist = camera.list_cameras()
     if not self.clist:
         self.is_enabled = False
         # raise ValueError("Nessuna WebCam trovata! :[")
     else:
         self.device = camera.Camera(self.clist[0], CAMERA_SIZE)
         try:
             self.device.start()
             self.is_enabled = True
             self.surface = py.surface.Surface(self.device.get_size(), 0,
                                               display)
         except:
             self.is_enabled = False
             print("Errore nell'avvio della Webcam >:[")
             self.surface = None
Esempio n. 22
0
    def __init__(self, context, report_to: Queue,
                 screen_config: gui.ScreenConfig):
        self.tasks = Queue()
        self.report_to = report_to
        self.context = context
        self.screen_config = screen_config
        self.camera = camera.Camera(camera.list_cameras()[0], (640, 480))

        if screen_config.FULLSCREEN:
            self.screen = pygame.display.set_mode(
                (screen_config.WIDTH, screen_config.HEIGHT), pygame.FULLSCREEN)
        else:
            self.screen = pygame.display.set_mode(
                (screen_config.WIDTH, screen_config.HEIGHT), )

        self.current_menu = self.MAIN_MENU = gui.main_menu.MainMenu(
            context=self)
        self.TOP_UP_MENU = gui.top_up_menu.TopUpMenu(context=self)
        self.BUY_TICKET_MENU = gui.buy_ticket_menu.BuyTicketMenu(context=self)
        self.USE_RETURN_TICKET_MENU = gui.use_return_ticket_menu.UseReturnTicketMenu(
            context=self)

        super(FrontendController, self).__init__(target=self.handler)
Esempio n. 23
0
    def 啟動視訊(它, 攝影機編號=0):

        #
        # gets a list of available cameras.
        #
        它.攝影機群 = pgCam.list_cameras()

        print('攝影機群= ', 它.攝影機群)

        if 它.攝影機群 == []:
            raise ValueError("歹勢,無 攝影機。Sorry, no cameras detected. ")

        try:
            攝影機id = 它.攝影機群[攝影機編號]
        except IndexError:
            攝影機id = 它.攝影機群[0]

        #
        # creates the camera of the specified 幕寬高 and in RGB, or HSV colorspace
        #
        它.攝影機 = pgCam.Camera(攝影機id, 它.幕寬高, 'HSV')  #"RGB")

        #
        # starts the camera
        #
        # 這行與我們取音訊的精神很相像,應該就是 多線 的做法。
        #
        #
        它.攝影機.start()

        #
        # create a surface 作為 攝影畫面 to capture to.
        # for performance purposes,
        # you want the bit depth to be the same
        # as that of the display surface.
        #
        它.攝影畫面 = pg.surface.Surface(它.幕寬高, 0, 它.幕)
Esempio n. 24
0
    def initVideo(self, cameraIndex=0):

        #
        # gets a list of available cameras.
        #
        self.cameras = pgCam.list_cameras()

        print('cameras= ', self.cameras)

        if self.cameras == []:
            raise ValueError("歹勢,無 攝影機。Sorry, no cameras detected. ")

        try:
            cameraId = self.cameras[cameraIndex]
        except IndexError:
            cameraId = self.cameras[0]

        #
        # creates the camera of the specified screenSize and in RGB, or HSV colorspace
        #
        self.camera = pgCam.Camera(cameraId, self.screenSize, 'HSV')  #"RGB")

        #
        # starts the camera
        #
        # 這行與我們取音訊的精神很相像,應該就是 多線 的做法。
        # (We take this line with the spirit of the audio is very similar, it should be is a multi-line approach.)
        #
        self.camera.start()

        #
        # create a surface 作為 videoShot to capture to.
        # for performance purposes,
        # you want the bit depth to be the same
        # as that of the display surface.
        #
        self.videoShot = pg.surface.Surface(self.screenSize, 0, self.screen)
Esempio n. 25
0
    def 啟動視訊(它, 攝影機編號=0):

        #
        # gets a list of available cameras.
        #
        它.攝影機群 = pgCam.list_cameras()

        print("攝影機群= ", 它.攝影機群)

        if 它.攝影機群 == []:
            raise ValueError("歹勢,無 攝影機。Sorry, no cameras detected. ")

        try:
            攝影機id = 它.攝影機群[攝影機編號]
        except IndexError:
            攝影機id = 它.攝影機群[0]

        #
        # creates the camera of the specified 幕寬高 and in RGB, or HSV colorspace
        #
        它.攝影機 = pgCam.Camera(攝影機id, 它.幕寬高, "HSV")  # "RGB")

        #
        # starts the camera
        #
        # 這行與我們取音訊的精神很相像,應該就是 多線 的做法。
        #
        #
        它.攝影機.start()

        #
        # create a surface 作為 攝影畫面 to capture to.
        # for performance purposes,
        # you want the bit depth to be the same
        # as that of the display surface.
        #
        它.攝影畫面 = pg.surface.Surface(它.幕寬高, 0, 它.幕)
Esempio n. 26
0
def main():
    pygame.init()
    screen = pygame.display.set_mode(size)
    pygame.display.set_caption('Extracción de colores')

    camera.init()
    c = camera.Camera(camera.list_cameras()[0], size)
    c.start()

    finish = False
    clock = pygame.time.Clock()

    while not finish:
        surf = c.get_image()
        img = pygame.surfarray.pixels3d(surf)
        hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
        surf = pygame.surfarray.make_surface(hsv)
        screen.blit(surf, (0, 0))
        pygame.display.update()
        clock.tick(FPS)
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                finish = True
    c.stop()
Esempio n. 27
0
#!/usr/bin/python

import pygame
from pygame import camera
import time

img_dir = '/home/pi/imgs'
interval = 5*60 #seconds
limit = 100

camera.init()
camera.list_cameras()
cam = camera.Camera("/dev/video0", (640, 480))

pic_count = 0
while pic_count < limit:
    image_name = "{0}/outside_{1}.jpg".format(img_dir, pic_count)
    cam.start()
    img = cam.get_image()
    pygame.image.save(img, image_name)
    cam.stop()
    pic_count += 1
    time.sleep(interval)
Esempio n. 28
0
def get_qr_content(with_gui=False, manual_detect=False):

    detected = False
    camera.init()
    if not len(camera.list_cameras()):
        print("No camera detected!")
        sys.exit(-1)
    cam = camera.Camera(camera.list_cameras()[config.camera_number - 1])
    size = cam.get_size()
    width, height = size

    if not manual_detect:
        sys.stdout.write("QR detection started, wait several seconds...")
        sys.stdout.flush()
        cam.start()

        if with_gui:
            screen = pygame.display.set_mode(cam.get_size())
            pygame.display.set_caption("Check QR recognize")
    else:
        with_gui = True
        print("QR detection through GUI, press any key when green line flash")

    data = 0
    while not detected:
        try:

            if manual_detect:
                qr = QR()
                qr.decode_webcam()
                data = qr.data

            else:
                img = cam.get_image()

                # we can use file buffer for recognition
                # pygame.image.save(img, "file.jpg")
                # pil_string_image = Image.open("file.jpg").convert('L').tostring()

                pygame_img = pygame.image.tostring(img, 'RGB', False)
                pil_string_image = Image.fromstring(
                    'RGB', size, pygame_img).convert('L').tostring()

                if with_gui:
                    screen.blit(img, (0, 0))
                    pygame.display.flip()  # display update

                zbar_image = zbar.Image(width, height, 'Y800',
                                        pil_string_image)

                scanner = zbar.ImageScanner()
                scanner.parse_config('enable')
                data = scanner.scan(zbar_image)

                sys.stdout.write('.')
                sys.stdout.flush()

                for qr in zbar_image:
                    if data:
                        "Additional QR recognized!"
                    data = qr.data

            if data:
                print("\nRecognized: `{}`".format(data))
                detected = True

        except Exception as e:
            print("Error! " + str(e))
            pass
        finally:
            time.sleep(config.qr_scan_waiting)

    if not manual_detect:
        pygame.display.quit()
        cam.stop()

    return 0 if data == "NULL" else data
Esempio n. 29
0
import pygame
import pygame.camera as camera
import time
import pygame.image as im
from PIL import Image
from itertools import izip
import os

camera.init()

cam = camera.Camera(camera.list_cameras()[0],(640,480))
cam.start()
size = cam.get_size()

def check_images(i1,i2):
	i1 = im.tostring(i1,"RGB")
	i1 = Image.frombytes("RGB",(600,480),i1)
	

	i2 = im.tostring(i2,"RGB")
	i2 = Image.frombytes("RGB",(600,480),i2)
	
	pairs = izip(i1.getdata(), i2.getdata())
	if len(i1.getbands()) == 1:
		dif = sum(abs(p1 - p2) for p1,p2 in pairs)
	else:
		dif = sum(abs(c1 - c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))

	ncomponents = i1.size[0] * i1.size[1] * 3

	return (dif / 255.0 * 100) / ncomponents
Esempio n. 30
0
logging.basicConfig(level=logging.INFO)


def sign_in_to_twitter():
    CONSUMER_KEY = '<Paste Consumer key (API Key) here>'
    CONSUMER_SECRET = '<Paste Consumer Secret (API Secret) here>'
    ACCESS_TOKEN = '<Paste Access Token here>'
    ACCESS_SECRET = '<Paste Access Token Secret here>'

    return Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET)

if __name__ == '__main__':
    # Initialise camera
    camera.init()
    camlist = camera.list_cameras()
    cam = camera.Camera(camlist[0])
    cam.start()

    # Get an image from the camera
    capture = cam.get_image()
    logging.info("Image captured")
    cam.stop()

    # Upload to Twitter
    now = datetime.datetime.now()
    time_string = now.strftime("%Y-%m-%d %T")
    image.save(capture, "/tmp/image.jpg")
    photo = open("/tmp/image.jpg", "rb")
    twitter = sign_in_to_twitter()
    twitter.update_status_with_media(status=time_string, media=photo)
Esempio n. 31
0
	renderText(surface)
	image.save(surface, '/var/www/webcam.jpg')

def renderText(image):
	dateFont = font.SysFont('arial', 20, bold=True, italic=False)
	addressFont = font.SysFont('arial', 14, bold=False, italic=True)

	date = datetime.datetime.now().strftime('%b %d, %Y %I:%M %p')
	srfDate = dateFont.render(date, True, (255,255,255))
	srfDateShadow = dateFont.render(date, True, (0,0,0))

	address = 'Boulevard East, Weehawken, NJ'
	srfAddress = addressFont.render(address, True, (255,255,255))
	srfAddressShadow = addressFont.render(address, True, (0,0,0))

	image.blit(srfDateShadow, (11, 11))
	image.blit(srfDate, (10,10))
	image.blit(srfAddressShadow, (11, 36))
	image.blit(srfAddress, (10, 35))

print 'Starting WebCam service.'
pygame.init()
camera.init()
cameraNames = camera.list_cameras()
print 'Camera list: {}'.format(cameraNames)

cam = camera.Camera(cameraNames[1]) # todo: make index configurable / pass in name
cam.start()

timer = FakeTimer(capture)
timer.start()
    def close(self):
        '''Stop webcam and thread
        '''
        self.running = False


class Webcam(object):
    '''Wrapper over the thread.
    '''
    def __init__(self, *args, **kwargs):
        self.thread = WebcamThread(*args, **kwargs)
        self.thread.start()

    def capture(self, *args, **kwargs):
        self.thread.capture(*args, **kwargs)

    def close(self):
        self.thread.close()
        self.thread.join()


if __name__ == "__main__":
    camera.init()
    cams = camera.list_cameras()
    if cams:
        print "Detected webcams:"
        for c in cams:
            print " {}".format(c)
    else:
        print "No webcams detected."
Esempio n. 33
0
    def __init__(self, processFunction=None, display=None, show=True, **argd):
        import logging
        import pygame
        import utils

        logging.debug("Initializing Video Capture Class")
        logging.debug("Pygame Version: %s" % pygame.__version__)

        processRuns = 0

        #set display size in pixels = width,height
        size = 640, 480

        utils.initFromArgs(self)

        #print self.__dict__.items()

        #super(VideoCapturePlayer, self).__init__(**argd)

        if self.display is None:
            if self.show is True:
                # create a display surface. standard pygame stuff
                self.display = pygame.display.set_mode(self.size, 0)
            else:
                pygame.display.init()
                self.display = pygame.surface.Surface(self.size)

        import pygame.camera as camera
        camera.init()

        # get a list of available cameras.
        self.cameraList = camera.list_cameras()
        if not self.cameraList:
            raise ValueError("Sorry, no cameras detected.")

        logging.info(" Opening device %s, with video size (%s,%s)" %
                     (self.cameraList[0], self.size[0], self.size[1]))

        # create and start the camera of the specified size in RGB colorspace
        self.camera = camera.Camera(self.cameraList[0], self.size, "RGB")
        self.camera.start()

        self.processClock = self.clock = pygame.time.Clock()

        # create a surface to capture to.  for performance purposes, you want the
        # bit depth to be the same as that of the display surface.
        self.snapshot = pygame.surface.Surface(self.size, 0, self.display)

        # Explore namespace now:

        #print dir()
        """
          ['argd', 'camera', 'display', 'logging', 'processFunction', 'processRuns', 'pygame', 'self', 'show', 'size', 'utils']
        """

        #print dir(self)
        """
          ['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__',
          '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__',
          'argd', 'camera', 'cameraList', 'clock', 'display', 'get_and_flip', 'logging', 'main', 'processClock', 'processFunction',
          'processRuns', 'pygame', 'show', 'size', 'snapshot', 'utils']
        """

        #print self.__dict__.items()
        """
Esempio n. 34
0
File: webcam2.py Progetto: stnbu/rpi
from pygame import camera
from pygame import image
camera.init()
cam = camera.Camera(camera.list_cameras()[0])
cam.start()
img = cam.get_image()
image.save(img, "/tmp/photo.bmp")
camera.quit()
Esempio n. 35
0
 def __init__(self):
     cameraNames = camera.list_cameras()
     self.cam = camera.Camera(cameraNames[0])
     self.cam.start()
     print cameraNames, '\n'
Esempio n. 36
0
photo_counter = 0


#Called if there was a 'loud noise'
def snap(cam):
    #print 'Snap!'
    #cam.get_image()
    #cam.get_image()
    cam.get_image()
    cam.get_image()
    img = cam.get_image()

    file_string = '/var/www/html/toilet.jpeg'
    pyimg.save(img, file_string)


try:

    pycam.init()
    cam = pycam.Camera(pycam.list_cameras()[0])
    cam.start()
    sleep(2)
    snap(cam)

except Exception:
    print str(Exception)
finally:
    pycam.quit()
    pycam.stop()
Esempio n. 37
0
def renderText(image):
    dateFont = font.SysFont('arial', 20, bold=True, italic=False)
    addressFont = font.SysFont('arial', 14, bold=False, italic=True)

    date = datetime.datetime.now().strftime('%b %d, %Y %I:%M %p')
    srfDate = dateFont.render(date, True, (255, 255, 255))
    srfDateShadow = dateFont.render(date, True, (0, 0, 0))

    address = 'Boulevard East, Weehawken, NJ'
    srfAddress = addressFont.render(address, True, (255, 255, 255))
    srfAddressShadow = addressFont.render(address, True, (0, 0, 0))

    image.blit(srfDateShadow, (11, 11))
    image.blit(srfDate, (10, 10))
    image.blit(srfAddressShadow, (11, 36))
    image.blit(srfAddress, (10, 35))


print 'Starting WebCam service.'
pygame.init()
camera.init()
cameraNames = camera.list_cameras()
print 'Camera list: {}'.format(cameraNames)

cam = camera.Camera(
    cameraNames[1])  # todo: make index configurable / pass in name
cam.start()

timer = FakeTimer(capture)
timer.start()
Esempio n. 38
0
def main():
    pygame.init()
    camera.init()
    pygame.surfarray.use_arraytype("numpy")

    cams = camera.list_cameras()
    cam = camera.Camera(cams[0], (360, 296))
    cam = camera.Camera(cams[0], (640, 480))
    cam.start()
    fps = 25.0
    window = pygame.display.set_mode((640, 480), 0, 8)
    pygame.display.set_caption("Video")
    screen = pygame.display.get_surface()
    screen.set_palette([(i, i, i) for i in range(256)])

    print("Starting main loop")

    pea_list = [
        ("Spectrum", get_spectrum, get_equalized),
        ("Automask", apply_mask, get_normalized),
        ("Propagation", propagate, get_normalized),
        ("Reconstruction", reconstruct, get_complex_view),
        ]

    set_array = False
    set_equalize = False
    set_normalize = True
    set_pea = False
    pea_level = 1
    distance = 5
    comp_view = "phase"

    while True:
        events = pygame.event.get()
        for event in events:
            if event.type == pygame.QUIT:
                return
            elif event.type == pygame.KEYDOWN:
                if (event.key == pygame.K_q):
                    return

                # IMAGE PROCESSING
                elif (event.key == pygame.K_a):
                    set_array = not set_array
                    print("Converting to array: %s" % set_array)
                elif (event.key == pygame.K_n):
                    set_normalize = not set_normalize
                    print("Normalize: %s" % set_normalize)
                elif (event.key == pygame.K_e):
                    set_equalize = not set_equalize
                    print("Equalize: %s" % set_equalize)

                # PEA
                elif (event.key == pygame.K_p):
                    set_pea = not set_pea
                    print("PEA processing set: %s" % set_pea)
                    print("Setted pea to level %d, %s." % (
                        pea_level, pea_list[pea_level - 1][0]))
                elif (event.key == pygame.K_PAGEUP):
                    pea_level -= 1
                    pea_level = max(pea_level, 1)
                    print("Setted pea to level %d, %s." % (
                        pea_level, pea_list[pea_level - 1][0]))
                elif (event.key == pygame.K_PAGEDOWN):
                    pea_level += 1
                    pea_level = min(pea_level, len(pea_list))
                    print("Setted pea to level %d, %s." % (
                        pea_level, pea_list[pea_level - 1][0]))
                elif (event.key == pygame.K_TAB):
                    comp_view = "phase" if comp_view != "phase" else "mod"
                    print("PEA complex viewer set to: %s" % comp_view)

                # FOCUS DISTANCE
                elif (event.key == pygame.K_DOWN):
                    distance += 5
                    print("Distance: %.1f" % distance)
                elif (event.key == pygame.K_UP):
                    distance -= 5
                    print("Distance: %.1f" % distance)
                elif (event.key == pygame.K_LEFT):
                    distance -= .5
                    print("Distance: %.1f" % distance)
                elif (event.key == pygame.K_RIGHT):
                    distance += .5
                    print("Distance: %.1f" % distance)

                # FULSCREEN
                elif (event.key == pygame.K_f):
                    pygame.display.toggle_fullscreen()

                # CAPTURE
                elif (event.key == pygame.K_c):
                    filename = save_raw(cam)
                    print("Raw image saved to: %s" % filename)

        image = cam.get_image()

        if set_array:
            array = pygame.surfarray.array2d(image)

            if array.ndim > 2:
                array = round(array.mean(-1))
#                array = array[:,:,0] # red
#                array = array[:,:,0] # green
#                array = array[:,:,0] # blue

            if set_equalize:
                array = equalize(array).astype(int)
            elif set_normalize:
                array = normalize(array)

            pygame.surfarray.blit_array(screen, array)

        elif set_pea:
            array = pygame.surfarray.array2d(image)

            if array.ndim > 2:
                array = round(array.mean(-1))
#                array = array[:,:,0] # red
#                array = array[:,:,0] # green
#                array = array[:,:,0] # blue

            pea_algs = pea_list[:pea_level]
            pea_rep = pea_algs[-1][-1]

            for alg in pea_algs:
                try:
                    array = alg[1](array, distance=distance)
                except:
                    print("W: skipped framme's: %s" % alg[0])

            array = pea_rep(array, comp_view=comp_view).astype(int)

            pygame.surfarray.blit_array(screen, array)

        else:
            screen.blit(image, (0,0))

        pygame.display.flip()
        pygame.time.delay(int(1000./fps))
Esempio n. 39
0
 def setup(self):
     cameras = camera.list_cameras()
     if CAMERA_INDEX >= len(cameras):
         raise Exception('Error: no camera')
     self._cam = camera.Camera(cameras[CAMERA_INDEX], (CAMERA_WIDTH, CAMERA_HEIGHT))
     self._cam.start()
Esempio n. 40
0
def scan():
    camera.init()
    cam = camera.Camera(camera.list_cameras()[0], (1600,1200,))
    cam.start()
    
    ok = False
    while not ok:
        movie = raw_input('Name of the movie: ')
        startby = raw_input('Start movie by picture [default 0]: ')
        startat = raw_input('Start the scanner at "8:20" [default now]: ')
        
        try:
            if startat == '' or startat == 'now':
                startat = None
            else:
                h, m = startat.split(':')
                startat = datetime.datetime.now()
                startat = startat.replace(hour=int(h), minute=int(m))
                if(datetime.datetime.now() > startat):
                    startat = startat + datetime.timedelta(days=1)
        except:
            print 'Wrong time input'
            continue

        if startby:
            startby = int(startby)
        else:
            startby = 0
        f = '%s/%s' % (PATH, movie,)
        if (os.path.exists(f) and startby is 0) or (not os.path.exists(f) and startby is not 0):
            print 'Folder already exist'
            continue
        os.system(CMD_MKDIR % f)
        ok = True
    while startat is not None and datetime.datetime.now() < startat:
       clear()
       print 'Scanner will starts in %s:%s:%s ' % cal_time((startat - datetime.datetime.now()).total_seconds())
       time.sleep(0.5)



    params = dict(scanning=True, 
                  filename='',
                  stop=False,
                  stopwatch=datetime.timedelta(),
                  counter=1,
                  exit = False,
                  watchdog=True,
                  watchdog_diff=0)
    counter = startby
    stopwatch = datetime.datetime.now()
    threading.Thread(target=scan_display, args = ((params,))).start()
    threading.Thread(target=scan_stop, args = ((params,))).start()
    threading.Thread(target=scan_watchdog, args = ((params,))).start()
    #while GPIO.input(END_SENSOR) == GPIO.HIGH and counter <= MAX_IMAGES:
    while counter <= MAX_IMAGES and not params['exit']:
        while params['stop']:
            time.sleep(0.01)
        counter += 1
        params['counter'] = counter
        params['stopwatch'] = datetime.datetime.now() - stopwatch
        stopwatch = datetime.datetime.now()
        params['filename'] = '%s/%s/%s.raw' % (PATH, movie, ('0'*6 + str(counter))[-6:],)
        #subprocess.call(CMD_CAM % params['filename'], shell=True)
        f = open(params['filename'], 'w')
        # wait until the cameras frame is ready
        for i in range(FRAMES + SKIPS):
            while not cam.query_image():
                time.sleep(0.005)
            if i < SKIPS:
                cam.get_raw()
            else:
                f.write(cam.get_raw())
        f.close()
        subprocess.call(CMD_LN % (os.path.basename(params['filename']), '%s/%s' % (PATH, movie,),), shell=True)
        step()
    params['scanning'] = False
    cam.stop()
    open('%s/%s/ready' % (PATH, movie,), 'w').close()
Esempio n. 41
0
def main():
    pygame.init()
    camera.init()
    pygame.surfarray.use_arraytype("numpy")

    cams = camera.list_cameras()
    cam = camera.Camera(cams[0], (360, 296))
    cam = camera.Camera(cams[0], (640, 480))
    cam.start()
    fps = 25.0
    window = pygame.display.set_mode((640, 480), 0, 8)
    pygame.display.set_caption("Video")
    screen = pygame.display.get_surface()
    screen.set_palette([(i, i, i) for i in range(256)])

    print("Starting main loop")

    pea_list = [
        ("Spectrum", get_spectrum, get_equalized),
        ("Automask", apply_mask, get_normalized),
        ("Propagation", propagate, get_normalized),
        ("Reconstruction", reconstruct, get_complex_view),
    ]

    set_array = False
    set_equalize = False
    set_normalize = True
    set_pea = False
    pea_level = 1
    distance = 5
    comp_view = "phase"

    while True:
        events = pygame.event.get()
        for event in events:
            if event.type == pygame.QUIT:
                return
            elif event.type == pygame.KEYDOWN:
                if event.key == pygame.K_q:
                    return

                # IMAGE PROCESSING
                elif event.key == pygame.K_a:
                    set_array = not set_array
                    print("Converting to array: %s" % set_array)
                elif event.key == pygame.K_n:
                    set_normalize = not set_normalize
                    print("Normalize: %s" % set_normalize)
                elif event.key == pygame.K_e:
                    set_equalize = not set_equalize
                    print("Equalize: %s" % set_equalize)

                # PEA
                elif event.key == pygame.K_p:
                    set_pea = not set_pea
                    print("PEA processing set: %s" % set_pea)
                    print("Setted pea to level %d, %s." % (pea_level, pea_list[pea_level - 1][0]))
                elif event.key == pygame.K_PAGEUP:
                    pea_level -= 1
                    pea_level = max(pea_level, 1)
                    print("Setted pea to level %d, %s." % (pea_level, pea_list[pea_level - 1][0]))
                elif event.key == pygame.K_PAGEDOWN:
                    pea_level += 1
                    pea_level = min(pea_level, len(pea_list))
                    print("Setted pea to level %d, %s." % (pea_level, pea_list[pea_level - 1][0]))
                elif event.key == pygame.K_TAB:
                    comp_view = "phase" if comp_view != "phase" else "mod"
                    print("PEA complex viewer set to: %s" % comp_view)

                # FOCUS DISTANCE
                elif event.key == pygame.K_DOWN:
                    distance += 5
                    print("Distance: %.1f" % distance)
                elif event.key == pygame.K_UP:
                    distance -= 5
                    print("Distance: %.1f" % distance)
                elif event.key == pygame.K_LEFT:
                    distance -= 0.5
                    print("Distance: %.1f" % distance)
                elif event.key == pygame.K_RIGHT:
                    distance += 0.5
                    print("Distance: %.1f" % distance)

                # FULSCREEN
                elif event.key == pygame.K_f:
                    pygame.display.toggle_fullscreen()

                # CAPTURE
                elif event.key == pygame.K_c:
                    filename = save_raw(cam)
                    print("Raw image saved to: %s" % filename)

        image = cam.get_image()

        if set_array:
            array = pygame.surfarray.array2d(image)

            if array.ndim > 2:
                array = round(array.mean(-1))
            #                array = array[:,:,0] # red
            #                array = array[:,:,0] # green
            #                array = array[:,:,0] # blue

            if set_equalize:
                array = equalize(array).astype(int)
            elif set_normalize:
                array = normalize(array)

            pygame.surfarray.blit_array(screen, array)

        elif set_pea:
            array = pygame.surfarray.array2d(image)

            if array.ndim > 2:
                array = round(array.mean(-1))
            #                array = array[:,:,0] # red
            #                array = array[:,:,0] # green
            #                array = array[:,:,0] # blue

            pea_algs = pea_list[:pea_level]
            pea_rep = pea_algs[-1][-1]

            for alg in pea_algs:
                try:
                    array = alg[1](array, distance=distance)
                except:
                    print("W: skipped framme's: %s" % alg[0])

            array = pea_rep(array, comp_view=comp_view).astype(int)

            pygame.surfarray.blit_array(screen, array)

        else:
            screen.blit(image, (0, 0))

        pygame.display.flip()
        pygame.time.delay(int(1000.0 / fps))
Esempio n. 42
0
def readyCamera():
    cam.init()
    kamera = cam.Camera(cam.list_cameras()[1])
    return kamera
Esempio n. 43
0
    def run(self):
        self.size = (640, 480)
        self.depth = 24
        self.thumbscale = 4
        pygame.init()
        pygame.camera.init()
        self.fuente = pygame.font.Font(None, 60)
        self.camlist = camera.list_cameras()
        self.camera = camera.Camera(self.camlist[0], self.size, "RGB")
        self.camera.set_controls(True, False)
        self.camera.start()
        self.clock = pygame.time.Clock()
        self.final = None
        self.imlist = []
        self.offset = 20
        self.max_cant = 9
        self.display = pygame.display.get_surface()
        self.display.fill((82, 186, 74))

        self.converted = pygame.surface.Surface(self.size, 0, self.display)
        self.snapshot = pygame.surface.Surface(self.size, 0, self.display)
        self.tiny = pygame.surface.Surface((self.size[0]/self.thumbscale,self.size[1]/self.thumbscale),0,self.display)
        pygame.display.flip()

        going = True
        while going:
            #GTK events
            while gtk.events_pending():
                gtk.main_iteration()

            events = pygame.event.get()
            for e in events:
                if e.type == pygame.USEREVENT:
                    if hasattr(e,"action"):
                        if e.action == 'save_button':
                            self.show_text("Saving")
                            if self.final:
                                self.parent.save_image(self.final)
                            else:
                                if not(self.imlist == []):
                                    self.final = stitcher.build_panorama(self, self.imlist, self.auto_stich)
                                    self.parent.save_image(self.final)
                            pygame.display.flip()
                        elif e.action == 'new_button':
                            self.imlist = []
                            self.final = None
                            self.display.fill((82, 186, 74))
                            self.offset = 20
                            pygame.display.flip()
                        elif e.action == 'capture_button':
                            self.add_capture()
                        elif e.action == 'stitch_button':
                            self.show_text("Processing")
                            if not(self.imlist == []):
                                self.final = stitcher.build_panorama(self, self.imlist, self.auto_stich)
                            pygame.display.flip()
                elif e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):
                    going = False
                elif e.type == KEYDOWN and e.key == K_SPACE:
                    self.add_capture()


            self.get_and_flip()
            self.clock.tick()

        if self.camera:
            self.camera.stop()
import pygame
import pygame.camera as pycamera
import time
from pygame.locals import *

size = (640, 480)
display = pygame.display.set_mode(size, 0)
snapshot = pygame.surface.Surface(size, 0, display)

pycamera.init()
cameras = pycamera.list_cameras()
if len(cameras) <= 0:
  print "No cameras detected."
  pygame.quit()
if len(cameras) == 1:
  print "Found 1 camera:"
else:
  print "Found "+len(cameras)+" cameras:"
for camera in cameras:
  print camera
cam = pycamera.Camera(cameras[0], size)
cam.start()

running = True
while running:
  events = pygame.event.get()
  for e in events:
    if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):
      running = False
  img = cam.get_image(snapshot)
  display.blit(img, (0,0))
Esempio n. 45
0
def get_camera_list():
    return camera.list_cameras()
Esempio n. 46
0
def run_inference():
    """Runs inference on a webcam stream.
  Returns:
    Nothing
  """
    """Creates a graph from saved GraphDef file and returns a saver."""
    # Creates graph from saved graph_def.pb.
    with tf.gfile.FastGFile(
            os.path.join(FLAGS.model_dir, 'classify_image_graph_def.pb'),
            'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')

    # Creates node ID --> English string lookup.
    node_lookup = NodeLookup()

    pygame.init()
    pycam.init()
    target_camera = pycam.list_cameras()[camera_index]
    cam = pycam.Camera(target_camera, camera_resolution)
    cam.start()

    screen = pygame.display.set_mode(display_resolution)
    pygame.display.set_caption("Wall-E")
    myfont = pygame.font.SysFont("Comic Sans MS, Bold", 15)
    image = cam.get_image()
    pygame.display.get_surface().blit(
        pygame.transform.scale(image, display_resolution), (0, 0))

    with tf.Session() as sess:
        # Some useful tensors:
        # 'softmax:0': A tensor containing the normalized prediction across
        #   1000 labels.
        # 'pool_3:0': A tensor containing the next-to-last layer containing 2048
        #   float description of the image.
        # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
        #   encoding of the image.
        # Runs the softmax tensor by feeding the image_data as input to the graph.
        softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')

        running = True
        while running:
            time.sleep(0.1)
            cam.get_image(image)
            # Rotate image because resulting array has image rotated.
            img_arr = pygame.surfarray.pixels3d(
                pygame.transform.rotate(image, 90))
            # image_data = pygame.image.tostring(image, "RGBA", True)
            # pil_img = Image.frombuffer("RGBA", (1280, 720), image_data)
            # img_arr = np.array(pil_img)[:, :, 0:3]

            # Blit image onto screen.
            screen.blit(pygame.transform.scale(image, display_resolution),
                        (0, 0))

            predictions = sess.run(softmax_tensor, {'DecodeJpeg:0': img_arr})
            predictions = np.squeeze(predictions)

            top_k = predictions.argsort()[-num_top_predictions:][::-1]

            for i in xrange(num_top_predictions):
                node_id = top_k[i]
                human_string = node_lookup.id_to_string(node_id)
                score = predictions[node_id]
                result = '%.5f: %s' % (score, human_string)
                if score < 0.09:
                    break
                label = myfont.render(result, True,
                                      (255 - 20 * i, 255, 20 * i))
                screen.blit(label, (10, 10 + 20 * i))

            # Refresh display
            pygame.display.flip()

            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    running = False

    cam.stop()
    pygame.quit()
Esempio n. 47
0
def init_cam():
    pygame.init()
    camera.init()
    webcam = camera.Camera(camera.list_cameras()[0])
    return webcam
Esempio n. 48
0
    def __init__(self, processFunction=None,
                       display        =None,
                       show           =True, **argd):
        import logging
        import pygame

        logging.debug(" Initializing Video Capture Class")

        self.processRuns = 0

        #set display size in pixels = width,height
        self.size = 640,480

        self.processFunction = processFunction
        self.display = display
        self.show = show

        #print self.__dict__.items()

        #super(VideoCapturePlayer, self).__init__(**argd)

        if self.display is None:
            if self.show is True:
                # create a display surface. standard pygame stuff
                self.display = pygame.display.set_mode( self.size, 0 )
            else:
                pygame.display.init()
                self.display = pygame.surface.Surface(self.size)

        import pygame.camera as camera
        camera.init()

        # get a list of available cameras.
        self.cameraList = camera.list_cameras()
        if not self.cameraList:
            raise ValueError("Sorry, no cameras detected.")

        logging.info(" Opening device %s, with video size (%s,%s)" % (self.cameraList[0],self.size[0],self.size[1]))

        # create and start the camera of the specified size in RGB colorspace
        self.camera = camera.Camera(self.cameraList[0], self.size, "RGB")
        self.camera.start()

        self.processClock = self.clock = pygame.time.Clock()

        # create a surface to capture to.  for performance purposes, you want the
        # bit depth to be the same as that of the display surface.
        self.snapshot = pygame.surface.Surface(self.size, 0, self.display)

        # Explore namespace now:

        #print dir()
        """
          ['argd', 'camera', 'display', 'logging', 'processFunction', 'processRuns', 'pygame', 'self', 'show', 'size', 'utils']
        """

        #print dir(self)
        """
          ['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__',
          '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__',
          'argd', 'camera', 'cameraList', 'clock', 'display', 'get_and_flip', 'logging', 'main', 'processClock', 'processFunction',
          'processRuns', 'pygame', 'show', 'size', 'snapshot', 'utils']
        """

        #print self.__dict__.items()
        """
Esempio n. 49
0
def printVideoDevices():
    '''Prints a list of available video devices'''
    camera.init()
    print(camera.list_cameras())