コード例 #1
0
    def __init__(self, logger, queue, parent_queue):
        threading.Thread.__init__(self)
        self.logger = logger
        self.queue = queue
        self.parent_queue = parent_queue
        self.logger.debug("Video created")

        self.text = "Welcome to Smash Putt"

        # Setup screen
        pygame.init()
        pygame.mouse.set_visible(False)
        self.width = 1024
        self.height = 768
        self.screen = pygame.display.set_mode((self.width, self.height))
        font_size = 60
        font_width = font_size * 0.68956
        font_width = font_size * 0.7
        self.font = pygame.font.SysFont("Droid Sans Mono", font_size, bold=1)
        self.line_length = self.width / font_width

        camera.init()
        camera_size = (640, 480)
        self.c = camera.Camera('/dev/video0', camera_size)
        self.c.start()
        self.surface = pygame.Surface(camera_size)
        self.bigSurface = None
        self.pause = False

        self.foregroundColor = pygame.Color(255, 0, 0)
        self.black = pygame.Color(0, 0, 0)
        self.shadowShade = 0
コード例 #2
0
	def __init__(self, logger, queue, parent_queue):
		threading.Thread.__init__(self)
		self.logger = logger
		self.queue = queue
		self.parent_queue = parent_queue
		self.logger.debug("Video created")

		self.text = "Welcome to Smash Putt"

		# Setup screen
		pygame.init()
		pygame.mouse.set_visible(False)
		self.width = 1024
		self.height = 768
		self.screen = pygame.display.set_mode((self.width, self.height))
		font_size = 60
		font_width = font_size * 0.68956
		font_width = font_size * 0.7
		self.font = pygame.font.SysFont("Droid Sans Mono", font_size, bold=1)
		self.line_length = self.width/font_width


		camera.init()
		camera_size = (640,480)
		self.c = camera.Camera('/dev/video0', camera_size)
		self.c.start()
		self.surface = pygame.Surface(camera_size)
		self.bigSurface = None
		self.pause = False

		self.foregroundColor = pygame.Color(255, 0, 0)
		self.black = pygame.Color(0, 0, 0)
		self.shadowShade = 0
コード例 #3
0
ファイル: webcamCapture.py プロジェクト: RyzenElstra/PyRAT
        def __init__(self):
                os = system()
                
                if os == 'Windows':
                        self.usuario = environ['USERNAME']
                else:
                        self.usuario = environ['USER']
 
                camera.init()
                misWebcams = camera.list_cameras()
 
                if len(misWebcams) == 0:
                        raise Exception('No hay webcam disponible.')
                        exit()
 
                elif len(misWebcams) == 1:
                        self.miWebcam = misWebcams[0]
 
                else:
                        for i in range(len(misWebcams)):
                                try:
                                        self.miWebcam = misWebcams[i]
                                        break
                                except:
                                        continue
コード例 #4
0
def init():
    global _init_done
    if _init_done:
        return
    pygame.init()
    camera.init()
    _init_done = True
コード例 #5
0
def getImage():
    cam.init()
    k = cam.Camera(cam.list_cameras()[0])
    k.start()
    img = k.get_image()
    sejv(img, "tabla.jpg")
    k.stop()
コード例 #6
0
 def __init__(self, num=0, size=(640, 640)):
     super().__init__(num, size)
     camera.init()
     cam = None
     while cam is None:
         cam = camera.Camera(camera.list_cameras()[num], size)
     # cam.set_resolution(*size)
     self.cam = cam
コード例 #7
0
	def __init__(self,camera=0,resolution=(640,480)):
		""" Init the camera with a camera and a certain resolution """
		PyCamera.init()
		try:
			self.cam = PyCamera.Camera(PyCamera.list_cameras()[camera],resolution)
			self.resolution = resolution
		except Exception:
			print 'Problem initializing the camera!'
コード例 #8
0
ファイル: kalliope_gif.py プロジェクト: slapker/kalliope-gif
 def __init__(self, save_directory,nb_photos,isReverse,duration,timer):
     camera.init()
     self.cam = camera.Camera("/dev/video0",(320,200))
     self.images_cam=[]
     self.save_directory=save_directory
     self.nb_photos=nb_photos
     self.isReverse=isReverse
     self.duration=duration
     self.timer=timer
コード例 #9
0
ファイル: video.py プロジェクト: mak1e/Open-Allure-DS
    def __init__(self,
                 processFunction=None,
                 display=None,
                 photos=None,
                 version=None,
                 **argd):

        logging.debug("Initializing Video Capture Class")

        #set display size in pixels = width,height
        displaySize = 752, 600
        size = 640, 480

        processRuns = 0

        self.__dict__.update(argd)
        self.__dict__.update(locals())

        #super(VideoCapturePlayer, self).__init__(**argd)

        if self.display is None:
            pygame.display.init()
            pygame.display.set_caption(u"Open Allure " + version)
            self.display = pygame.display.set_mode(self.displaySize, 0)

#bring in photos
        self.photoSmile = pygame.image.load(photos[0]).convert()
        self.photoTalk = pygame.image.load(photos[1]).convert()
        self.photoListen = pygame.image.load(photos[2]).convert()

        import pygame.camera as camera
        camera.init()

        # get a list of available cameras.
        if sys.platform == 'darwin':
            self.cameraList = ['0']  # camera.list_cameras()
        else:
            self.cameraList = camera.list_cameras()
        if not self.cameraList:
            raise ValueError("Sorry, no cameras detected.")

        logging.info("Opening device %s, with video size (%s,%s)" %
                     (self.cameraList[0], self.size[0], self.size[1]))

        # create and start the camera of the specified size in RGB colorspace
        self.camera = camera.Camera(self.cameraList[0], self.size, "RGB")
        self.camera.start()

        self.processClock = self.clock = pygame.time.Clock()

        # create a surface to capture to.  for performance purposes, you want the
        # bit depth to be the same as that of the display surface.
        self.snapshot = pygame.surface.Surface(self.size, 0, self.display)

        # place holders for thumbnails
        self.snapshotThumbnail = None
        self.processedShotThumbnail = None
コード例 #10
0
def main():
    parser = argparse.ArgumentParser(description="Time to D-D-D-Duel.")
    parser.add_argument("saveDir", help="Directory to save photos to.")
    parser.add_argument("--prefix", default="ygh-photo",
                        help="File prefix for each numbered photo.")
    parser.add_argument("--psm", type=int, default=6,
                        help="psm argument for tesseract tool.")
    args = parser.parse_args()

    prefix = args.prefix
    save_dir = args.saveDir
    psm = args.psm

    if not os.path.exists(save_dir):
        os.mkdir("./%s" % save_dir)

    # setup camera
    try:
        pc.init()
        print("Cameras -> ")
        print(pc.list_cameras())
        webcam = pc.Camera(pc.list_cameras()[0])
        webcam.start()
    except Exception as e:
        print("Error encountered when setting up webcam, check it's not already in use.")
        print(e)
        raise SystemExit

    i = webcam.get_image()
    pi.save(i, "./photo.png")
    # let user select when to take each photo, number them consecutively.
    count = 0
    while True:
        input()
        img = webcam.get_image()
        file_path = "%s/%s%d.png" % (save_dir, prefix, count)
        pi.save(img, file_path)
        print("---> Processing image %s" % file_path)
        try:
            processed_fp = "%s/processed-%s%d.png" % (save_dir, prefix, count)
            preprocess_image(file_path, processed_fp)
            # Define config parameters.
            # '-l eng'  for using the English language
            # '--oem 1' for using LSTM OCR Engine
            # psm 6 = words as a text line?
            config = ("-l eng --oem 1 --psm %d" % psm)
            text = pytesseract.image_to_string(
                Image.open(file_path), config=config)
            print("-----text found-------")
            print(text)
            print("----------------------")
        except UnicodeEncodeError:
            print("[!] had an issue encoding to Unicode.")
        count += 1

    pc.quit()
コード例 #11
0
ファイル: ryApp.py プロジェクト: renyuanL/realTimeSpectrogram
    def __init__(它):

        pg.init()
        pgCam.init()

        它.幕 = pg.display.set_mode(它.幕寬高, 0)
        pg.display.set_caption("ryApp.py, using RyAudio, on PyCon APAC 2014, by Renyuan Lyu")

        它.啟動視訊()

        它.啟動音訊()
コード例 #12
0
    def __init__(self, display):
        pgcam.init()
        self.clist = pgcam.list_cameras()
        print(self.clist)
        if not self.clist:
            raise ValueError("sorry no camera detected")

        self.camera = pgcam.Camera(self.clist[0], (640, 480))
        self.screen = pg.surface.Surface((640, 480), 0, display)
        self.camera.start()
        print("camera set")
コード例 #13
0
ファイル: video.py プロジェクト: mak1e/Open-Allure-DS
    def __init__(self, processFunction=None,
                       display        =None,
                       photos         =None,
                       version        =None,**argd):

        logging.debug( "Initializing Video Capture Class" )

        #set display size in pixels = width,height
        displaySize = 752,600
        size = 640,480

        processRuns = 0

        self.__dict__.update( argd )
        self.__dict__.update( locals() )

        #super(VideoCapturePlayer, self).__init__(**argd)

        if self.display is None:
            pygame.display.init()
            pygame.display.set_caption(u"Open Allure " + version)
            self.display = pygame.display.set_mode( self.displaySize, 0 )

		#bring in photos
        self.photoSmile  = pygame.image.load( photos[0] ).convert()
        self.photoTalk   = pygame.image.load( photos[1] ).convert()
        self.photoListen = pygame.image.load( photos[2] ).convert()

        import pygame.camera as camera
        camera.init()

        # get a list of available cameras.
        if sys.platform == 'darwin':
            self.cameraList = ['0'] # camera.list_cameras()
        else:
            self.cameraList = camera.list_cameras()
        if not self.cameraList:
            raise ValueError( "Sorry, no cameras detected." )

        logging.info( "Opening device %s, with video size (%s,%s)" % ( self.cameraList[0], self.size[0], self.size[1] ) )

        # create and start the camera of the specified size in RGB colorspace
        self.camera = camera.Camera( self.cameraList[0], self.size, "RGB" )
        self.camera.start()

        self.processClock = self.clock = pygame.time.Clock()

        # create a surface to capture to.  for performance purposes, you want the
        # bit depth to be the same as that of the display surface.
        self.snapshot = pygame.surface.Surface( self.size, 0, self.display )

        # place holders for thumbnails
        self.snapshotThumbnail = None
        self.processedShotThumbnail = None
コード例 #14
0
 def __init__(self, device, width, height, color='RGB'):
     '''Intialize device
     '''
     camera.init()
     if not device:
         device = camera.list_cameras()[0]
     self._cam = camera.Camera(device, (width, height), color)
     self._cam.start()
     self.running = True
     self.img = None
     Thread.__init__(self)
コード例 #15
0
 def __init__(self, display):
     pgcam.init()
     self.clist = pgcam.list_cameras()
     print(self.clist)
     if not self.clist:
         raise ValueError("sorry no camera detected")
     self.width = 960
     self.height = 720
     self.camera = pgcam.Camera(self.clist[0], (self.width, self.height))
     self.screen = pg.surface.Surface((self.width, self.height), 0, display)
     self.camera.start()
     print("camera set")
コード例 #16
0
    def __init__(它):

        pg.init()
        pgCam.init()

        它.幕 = pg.display.set_mode(它.幕寬高, 0)
        pg.display.set_caption(
            'ryApp.py, using RyAudio, on PyCon APAC 2014, by Renyuan Lyu')

        它.啟動視訊()

        它.啟動音訊()
コード例 #17
0
    def __init__(self):

        pg.init()
        pgCam.init()

        self.screen = pg.display.set_mode(self.screenSize, 0)
        pg.display.set_caption(
            'ryApp.py, using RyAudio, on PyCon APAC 2014, by Renyuan Lyu')

        self.initVideo()

        self.initAudio()
コード例 #18
0
 def __init__(self, is_laptop_cam, device, crop_start, crop_size):
     self.crop_start = crop_start
     self.crop_size = crop_size
     self.device = device
     self.is_laptop_cam = is_laptop_cam
     if is_laptop_cam:
         # use pygame
         try:
             pc.init()
             self.camera = pc.Camera(device)
             self.camera.start()
         except Exception as e:
             raise ValueError("Unable to setup pygame %s" % e)
コード例 #19
0
 def __init__(self, cfg):
     super().__init__(inputs=[], outputs=[
         'cam/img',
     ], threaded=True)
     self.img_w = cfg['img_w']
     self.img_h = cfg['img_h']
     self.image_format = cfg['image_format']
     pygame.init()
     camera.init()
     cameras = camera.list_cameras()
     print("Using camera %s ..." % cameras[cfg['cam_source']])
     self.webcam = camera.Camera(cameras[cfg['cam_source']],
                                 cfg['cam_resolution'])
     self.processed_frame = None
     self.on = True
コード例 #20
0
ファイル: displaycam.py プロジェクト: mharig/displayCam
def initCam(_camera='/dev/video0', _res=(640, 480)):
    '''Wants the name of the wanted camera device. Returns the device object and the resolution (w,h)'''

    camera.init()
    cam = camera.Camera(_camera, _res)

    if cam is None:
        raise Exception('Cannot connect to camera. Maybe in use by other program?')

    try:
        cam.start()
    except:
        raise Exception('Cannot connect to camera. Maybe in use by other program?')

    return cam
コード例 #21
0
ファイル: camera.py プロジェクト: notmahi/marvin
def take_picture(given_name='test'):
    camera.init()

    list_of_cameras = camera.list_cameras()
    print("Found {} cameras!".format(len(list_of_cameras)))

    if len(list_of_cameras):
        my_camera = camera.Camera(list_of_cameras[0])
        print("Successfully connected to the camera!")

        my_camera.start()
        surface = my_camera.get_image()
        print(surface)
        pyimage.save(surface, '{}.bmp'.format(given_name))
        my_camera.stop()
コード例 #22
0
def main():
    camera.init()
    print('scaning cameras...')
    while 'inf':
        for camera_path in camera.list_cameras():
            # camera_path : /dev/video0
            cmr = camera.Camera(camera_path)
            try:
                cmr.start()
                cmr.stop()
            except:
                print('** Alert ** : (camera {} is opend)'.format(camera_path))
                Thread(target=alert).start()
                sleep(1)
        sleep(1)
    camera.quit()
コード例 #23
0
def camstream(camera):
    camera.init()
    DEVICE = 0
    SIZE = (640, 480)
    FILENAME = 'capture.png'
    display = pygame.display.set_mode(SIZE, 0)
    camera = pygame.camera.Camera(DEVICE, SIZE)
    camera.start()
    screen = pygame.surface.Surface(SIZE, 0, display)
    capture = True
    while capture:
        # photo = camera.get_image(screen)
        display.blit(screen, (0, 0))
        pygame.display.flip()
        for event in pygame.event.get():
            if event.type == QUIT:
                capture = False
            elif event.type == KEYDOWN and event.key == K_s:
                photo = camera.get_image(screen)
                pygame.image.save(
                    photo,
                    "E://Projects//Pygame experiment//images_without_bg//img.jpg"
                )
                img = plt.imread(
                    "E://Projects//Pygame experiment//images_without_bg//img.jpg"
                )

                detector = FER()

                print("Started processing..")
                try:
                    print(detector.top_emotion(img))
                    print("Ended Processing result :)")
                except:
                    print("Couldn't generate emotion analysis :(")
                # plt.imshow(img)

                # image = pygame.image.save(screen, FILENAME)
                # pygame.image.save(photo, "E://Projects//Pygame experiment//images_without_bg//img.jpg")
                # img = plt.imread("E://Projects//Pygame experiment//images_without_bg//img.jpg")
                # detector = FER(mtcnn=True)
                # print(detector.detect_emotions("E://Projects//Pygame experiment//images_without_bg//img.jpg"))
                # plt.imshow(img)

    camera.stop()
    # pygame.quit()
    return
コード例 #24
0
ファイル: pygame_camera.py プロジェクト: FrancoisPerez/tailor
    def open(self):
        # TODO: make async
        from pygame import camera

        camera.init()
        cameras = camera.list_cameras()
        dc = camera.Camera(cameras[self._device_index], self.default_resolution, 'RGB')
        dc.start()

        time.sleep(1)  # give time for webcam to init.

        # 'prime' the capture context...
        # some webcams might not init fully until a capture
        # is done.  so we do a capture here to force device to be ready
        # and query the maximum supported size
        self._temp_surface = dc.get_image()
        self._device_context = dc
コード例 #25
0
ファイル: displaycam.py プロジェクト: mharig/displayCam
def initCam(_camera='/dev/video0', _res=(640, 480)):
    '''Wants the name of the wanted camera device. Returns the device object and the resolution (w,h)'''

    camera.init()
    cam = camera.Camera(_camera, _res)

    if cam is None:
        raise Exception(
            'Cannot connect to camera. Maybe in use by other program?')

    try:
        cam.start()
    except:
        raise Exception(
            'Cannot connect to camera. Maybe in use by other program?')

    return cam
コード例 #26
0
        def __init__(self, input_signal, output_channel, device=0, max_freq=10, size=(WIDTH, HEIGHT), grey=True):
            """
            Constructor for a VideoSnapshot source.
    
            @param input_signal: A channel that will pass a message when an output
            is desired.
    
            @param output_channel: The channel that will be passed a tagged image signal.
    
            @param device: The camera device to connect to - (0 is default)
    
            @param max_freq: We won't bother polling faster than this max frequency.
    
            @param size: A tuple containing the width and height to use for the camera
            device.
    
            @param grey: A boolean indicating if the image should be averaged to one channel
            Example useage:
    
                >>> msg = Event(tag = 1, value = go)
                >>> in_channel, out_channel = Channel(), Channel()
                >>> vid_src = VideoSnapshot(in_channel, out_channel)
                >>> in_channel.put(msg)
                >>> in_channel.put(LastEvent())  # Tells the component we are finished
                >>> vid_src.start()     # Start the thread, it will process its input channel
                >>> vid_src.join()
                >>> img1 = out_channel.get()
                >>> assert out_channel.get().last == True
            """
            super(VideoSnapshot, self).__init__(input_signal, output_channel)
            self.MAX_FREQUENCY = max_freq
            self.device = device
            self.size = size
            self.grey = grey
            self.snapshot = None # This is where we will save our pygame surface image
            logging.debug("Initialising Video Capture")
            camera.init()

            # gets a list of available cameras.
            self.clist = camera.list_cameras()
            if not self.clist:
                raise IOError("Sorry, no cameras detected.")

            logging.info("Opening device %s, with video size (%s,%s)" % (self.clist[0], self.size[0], self.size[1]))

            self.camera = camera.Camera(self.clist[0], self.size, "RGB")
コード例 #27
0
def main():
    print("Loading calibration matrix....")

    with np.load('calib_camera.npz') as fp:
        mtx, dist = [fp[i] for i in ('mtx', 'dist')]

    pygame.init()
    screen = pygame.display.set_mode(size)
    pygame.display.set_caption('Calibration')

    ubuntu = pygame.font.match_font('Ubuntu')
    font = pygame.font.Font(ubuntu, 20)
    font.set_bold(True)

    camera.init()
    c = camera.Camera(camera.list_cameras()[0], size)
    c.start()

    finish = False
    clock = pygame.time.Clock()

    while not finish:
        surf = c.get_image()
        img = pygame.surfarray.pixels3d(surf)
        img = pygame.surfarray.pixels3d(surf)
        gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        ret, corners = cv2.findChessboardCorners(gray, (6, 6), None)
        img_gray = np.dstack([gray, gray, gray])
        if ret:
            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            _, rvecs, tvecs = cv2.solvePnP(objp, corners2, mtx, dist)
            # print(rvecs.shape)
            imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
            img_gray = draw(img_gray, corners2, imgpts)

        gray_surf = pygame.surfarray.make_surface(img_gray)
        screen.blit(gray_surf, (0, 0))
        clock.tick(FPS)

        pygame.display.update()
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                finish = True

    c.stop()
コード例 #28
0
    def open(self):
        # TODO: make async
        from pygame import camera

        camera.init()
        cameras = camera.list_cameras()
        dc = camera.Camera(cameras[self._device_index],
                           self.default_resolution, 'RGB')
        dc.start()

        time.sleep(1)  # give time for webcam to init.

        # 'prime' the capture context...
        # some webcams might not init fully until a capture
        # is done.  so we do a capture here to force device to be ready
        # and query the maximum supported size
        self._temp_surface = dc.get_image()
        self._device_context = dc
コード例 #29
0
ファイル: ZumaCam.py プロジェクト: nonzod/zuma
 def __init__(self, display):
     self.display = display
     camera.init()
     self.clist = camera.list_cameras()
     if not self.clist:
         self.is_enabled = False
         # raise ValueError("Nessuna WebCam trovata! :[")
     else:
         self.device = camera.Camera(self.clist[0], CAMERA_SIZE)
         try:
             self.device.start()
             self.is_enabled = True
             self.surface = py.surface.Surface(self.device.get_size(), 0,
                                               display)
         except:
             self.is_enabled = False
             print("Errore nell'avvio della Webcam >:[")
             self.surface = None
コード例 #30
0
ファイル: __init__.py プロジェクト: aikikode/uspeak
    def __init__(self):
        camera.init()
        self.size = (640, 480, )
        # create a display surface. standard pygame stuff
        self.display = pygame.display.set_mode(self.size, 0)

        # this is the same as what we saw before
        self.clist = pygame.camera.list_cameras()
        if not self.clist:
            raise ValueError("Sorry, no cameras detected.")
        self.cam = pygame.camera.Camera(self.clist[0], self.size)
        self.cam.start()

        # create a surface to capture to.  for performance purposes
        # bit depth is the same as that of the display surface.
        self.snapshot = pygame.surface.Surface(self.size, 0, self.display)
        self.thresholded = pygame.surface.Surface(self.size, 0, self.display)
        self.previous_pixels = None
コード例 #31
0
ファイル: video.py プロジェクト: Tinkerers/BirdBox
	def __init__(self, logger, queue, parent_queue):
		threading.Thread.__init__(self)
		self.logger = logger
		self.logger.debug("Creating video...")
		self.queue = queue
		self.parent_queue = parent_queue
		self.is_slide = False
		self.slide = None

		self.text = "Welcome!"

		# Setup screen
		pygame.init()
		self.clock = time.Clock();
		pygame.mouse.set_visible(False)
		self.width = settings.SCREEN_WIDTH
		self.height = settings.SCREEN_HEIGHT
		flags = 0 #pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.FULLSCREEN
		self.screen = pygame.display.set_mode((self.width, self.height), flags)
		font_size = settings.FONT_SIZE
		self.font = pygame.font.SysFont(settings.FONT, font_size, bold=1)


		if settings.CAMERA:
			camera.init()
			camera_size = (640,480)
			self.c = camera.Camera('/dev/video0', camera_size)
			self.c.start()
			self.surface = pygame.Surface(camera_size)
		self.bigSurface = None
		self.alert = False

		self.foregroundColor = pygame.Color(settings.FONT_COLOR)
		self.backgroundColor = pygame.Color(settings.BACKGROUND_COLOR)
		self.black = pygame.Color(0, 0, 0, 100)
		self.shadowShade = 0

		self.background_image = None
		if settings.BACKGROUND_IMAGE:
			self.background_image = pygame.image.load(settings.BACKGROUND_IMAGE)
		self.logger.debug("Video created")
コード例 #32
0
    def __init__(self, logger, queue, parent_queue):
        threading.Thread.__init__(self)
        self.logger = logger
        self.logger.debug("Creating video...")
        self.queue = queue
        self.parent_queue = parent_queue
        self.is_slide = False
        self.slide = None

        self.text = "Welcome!"

        # Setup screen
        pygame.init()
        self.clock = time.Clock()
        pygame.mouse.set_visible(False)
        self.width = settings.SCREEN_WIDTH
        self.height = settings.SCREEN_HEIGHT
        flags = 0  #pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.FULLSCREEN
        self.screen = pygame.display.set_mode((self.width, self.height), flags)
        font_size = settings.FONT_SIZE
        self.font = pygame.font.SysFont(settings.FONT, font_size, bold=1)

        if settings.CAMERA:
            camera.init()
            camera_size = (640, 480)
            self.c = camera.Camera('/dev/video0', camera_size)
            self.c.start()
            self.surface = pygame.Surface(camera_size)
        self.bigSurface = None
        self.alert = False

        self.foregroundColor = pygame.Color(settings.FONT_COLOR)
        self.backgroundColor = pygame.Color(settings.BACKGROUND_COLOR)
        self.black = pygame.Color(0, 0, 0, 100)
        self.shadowShade = 0

        self.background_image = None
        if settings.BACKGROUND_IMAGE:
            self.background_image = pygame.image.load(
                settings.BACKGROUND_IMAGE)
        self.logger.debug("Video created")
コード例 #33
0
ファイル: __init__.py プロジェクト: swipswaps/uspeak
    def __init__(self):
        camera.init()
        self.size = (
            640,
            480,
        )
        # create a display surface. standard pygame stuff
        self.display = pygame.display.set_mode(self.size, 0)

        # this is the same as what we saw before
        self.clist = pygame.camera.list_cameras()
        if not self.clist:
            raise ValueError("Sorry, no cameras detected.")
        self.cam = pygame.camera.Camera(self.clist[0], self.size)
        self.cam.start()

        # create a surface to capture to.  for performance purposes
        # bit depth is the same as that of the display surface.
        self.snapshot = pygame.surface.Surface(self.size, 0, self.display)
        self.thresholded = pygame.surface.Surface(self.size, 0, self.display)
        self.previous_pixels = None
コード例 #34
0
ファイル: home_bot.py プロジェクト: sammoser/home-bot
def captureImgAndSend( toAddr ):
    # capture photos
    print "capturing photos"
    pycam.init()
    cam1 = pycam.Camera(pycam.list_cameras()[0])  
    cam2 = pycam.Camera(pycam.list_cameras()[1])  
    cam1.start()
    #cam1.set_controls(False, False, 100)
    img1 = cam1.get_image()
    pyimg.save(img1, "img1.jpg")
    cam1.stop()
    cam2.start()
    img2 = cam2.get_image()
    pyimg.save(img2, "img2.jpg")
    cam2.stop()

    # send to receiver
    print "sending photos"
    img1_data = open("img1.jpg", 'rb').read()
    msg1 = MIMEMultipart()
    msg1["From"] = EMAIL
    msg1["To"] = toAddr
    image1 = MIMEImage(img1_data, name=os.path.basename("img1.jpg"))
    msg1.attach(image1)
    img2_data = open("img2.jpg", 'rb').read()
    msg2 = MIMEMultipart()
    msg2["From"] = EMAIL
    msg2["To"] = toAddr
    image2 = MIMEImage(img2_data, name=os.path.basename("img2.jpg"))
    msg2.attach(image2)
    s = smtplib.SMTP_SSL("smtp.gmail.com")
    s.login(EMAIL, PASSWORD)
    s.sendmail(EMAIL, [toAddr], msg1.as_string())
    s.sendmail(EMAIL, [toAddr], msg2.as_string())
    s.quit()

    # delete the img file
    os.remove("img1.jpg")
    os.remove("img2.jpg")
コード例 #35
0
ファイル: vision.py プロジェクト: traker/robot
	def __init__( self, config, board ):
		'''
		@param board: objet pyfirmata
		@type board: pyfirmata
		@param config: objet configparser contenant les configurations du robot
		@type config: configparser.RawConfigParser
		'''
		self.timexe = 0
		self.cam_width = config.getint( 'Camera', 'width' ) #largeur camera
		self.cam_height = config.getint( 'Camera', 'height' ) #hauteur camera
		self.device = config.get( 'Camera', 'device' ) # lien vers la webcam 
		self.size = ( self.cam_width, self.cam_height ) # tuple (largeur, hauteur)
		self.bitimage = cv.CreateImage( self.size, 8, 1 ) # image noir et blanc
		self.matriximg = None
		self.laplaceim = cv.CreateImage( self.size, cv.IPL_DEPTH_8U, 1 )
		self.image_actuel = cv.CreateImage( self.size, cv.IPL_DEPTH_8U, 1 )
		self.image_brut = cv.CreateImageHeader( self.size, 8, 3 ) #image capture
		self.snapshot = surface.Surface( self.size )	# tampon image
		self.vmin = config.getint( 'Camera', 'tresholdmin' ) #valeur minimum treshold
		self.vmax = config.getint( 'Camera', 'tresholdmax' ) #valeur maximum treshold
		camera.init()
		self.cam = camera.Camera( self.device, self.size, "RGB" )
		self.cam.start()
		print "chargement webcam"
		while not self.cam.query_image():
			print "chargement webcam"
			time.sleep( 0.5 )
		self.snapshot = self.cam.get_image( self.snapshot )
		# configuration pour le lrf
		self.laser_pin = config.getint( 'Lrf', 'pin_laser' )
		self.laser = board.get_pin( 'd:' + str( self.laser_pin ) + ':o' )
		self.laser_pos = False
		self.listplage = ( ( 144, 128 ), ( 128, 96 ), ( 96, 48 ), ( 64, 48 ), ( 48, 24 ), ( 24, 0 ) )
		self.plage_rech = ( 0, 0 ) #nby, nbx
		self.point = []
		self.BETA = config.getfloat( 'Lrf', 'beta' )
		self.L = config.getfloat( 'Lrf', 'lfocus' )
コード例 #36
0
def main():
    pygame.init()
    screen = pygame.display.set_mode(size)
    pygame.display.set_caption('Extracción de colores')

    camera.init()
    c = camera.Camera(camera.list_cameras()[0], size)
    c.start()

    finish = False
    clock = pygame.time.Clock()

    while not finish:
        surf = c.get_image()
        img = pygame.surfarray.pixels3d(surf)
        hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
        surf = pygame.surfarray.make_surface(hsv)
        screen.blit(surf, (0, 0))
        pygame.display.update()
        clock.tick(FPS)
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                finish = True
    c.stop()
コード例 #37
0
ファイル: camera.py プロジェクト: pointtonull/golsoft
def main():
    pygame.init()
    camera.init()
    pygame.surfarray.use_arraytype("numpy")

    cams = camera.list_cameras()
    cam = camera.Camera(cams[0], (360, 296))
    cam = camera.Camera(cams[0], (640, 480))
    cam.start()
    fps = 25.0
    window = pygame.display.set_mode((640, 480), 0, 8)
    pygame.display.set_caption("Video")
    screen = pygame.display.get_surface()
    screen.set_palette([(i, i, i) for i in range(256)])

    print("Starting main loop")

    pea_list = [
        ("Spectrum", get_spectrum, get_equalized),
        ("Automask", apply_mask, get_normalized),
        ("Propagation", propagate, get_normalized),
        ("Reconstruction", reconstruct, get_complex_view),
    ]

    set_array = False
    set_equalize = False
    set_normalize = True
    set_pea = False
    pea_level = 1
    distance = 5
    comp_view = "phase"

    while True:
        events = pygame.event.get()
        for event in events:
            if event.type == pygame.QUIT:
                return
            elif event.type == pygame.KEYDOWN:
                if event.key == pygame.K_q:
                    return

                # IMAGE PROCESSING
                elif event.key == pygame.K_a:
                    set_array = not set_array
                    print("Converting to array: %s" % set_array)
                elif event.key == pygame.K_n:
                    set_normalize = not set_normalize
                    print("Normalize: %s" % set_normalize)
                elif event.key == pygame.K_e:
                    set_equalize = not set_equalize
                    print("Equalize: %s" % set_equalize)

                # PEA
                elif event.key == pygame.K_p:
                    set_pea = not set_pea
                    print("PEA processing set: %s" % set_pea)
                    print("Setted pea to level %d, %s." % (pea_level, pea_list[pea_level - 1][0]))
                elif event.key == pygame.K_PAGEUP:
                    pea_level -= 1
                    pea_level = max(pea_level, 1)
                    print("Setted pea to level %d, %s." % (pea_level, pea_list[pea_level - 1][0]))
                elif event.key == pygame.K_PAGEDOWN:
                    pea_level += 1
                    pea_level = min(pea_level, len(pea_list))
                    print("Setted pea to level %d, %s." % (pea_level, pea_list[pea_level - 1][0]))
                elif event.key == pygame.K_TAB:
                    comp_view = "phase" if comp_view != "phase" else "mod"
                    print("PEA complex viewer set to: %s" % comp_view)

                # FOCUS DISTANCE
                elif event.key == pygame.K_DOWN:
                    distance += 5
                    print("Distance: %.1f" % distance)
                elif event.key == pygame.K_UP:
                    distance -= 5
                    print("Distance: %.1f" % distance)
                elif event.key == pygame.K_LEFT:
                    distance -= 0.5
                    print("Distance: %.1f" % distance)
                elif event.key == pygame.K_RIGHT:
                    distance += 0.5
                    print("Distance: %.1f" % distance)

                # FULSCREEN
                elif event.key == pygame.K_f:
                    pygame.display.toggle_fullscreen()

                # CAPTURE
                elif event.key == pygame.K_c:
                    filename = save_raw(cam)
                    print("Raw image saved to: %s" % filename)

        image = cam.get_image()

        if set_array:
            array = pygame.surfarray.array2d(image)

            if array.ndim > 2:
                array = round(array.mean(-1))
            #                array = array[:,:,0] # red
            #                array = array[:,:,0] # green
            #                array = array[:,:,0] # blue

            if set_equalize:
                array = equalize(array).astype(int)
            elif set_normalize:
                array = normalize(array)

            pygame.surfarray.blit_array(screen, array)

        elif set_pea:
            array = pygame.surfarray.array2d(image)

            if array.ndim > 2:
                array = round(array.mean(-1))
            #                array = array[:,:,0] # red
            #                array = array[:,:,0] # green
            #                array = array[:,:,0] # blue

            pea_algs = pea_list[:pea_level]
            pea_rep = pea_algs[-1][-1]

            for alg in pea_algs:
                try:
                    array = alg[1](array, distance=distance)
                except:
                    print("W: skipped framme's: %s" % alg[0])

            array = pea_rep(array, comp_view=comp_view).astype(int)

            pygame.surfarray.blit_array(screen, array)

        else:
            screen.blit(image, (0, 0))

        pygame.display.flip()
        pygame.time.delay(int(1000.0 / fps))
コード例 #38
0
import pygame
import pygame.camera as pycamera
import time
from pygame.locals import *

size = (640, 480)
display = pygame.display.set_mode(size, 0)
snapshot = pygame.surface.Surface(size, 0, display)

pycamera.init()
cameras = pycamera.list_cameras()
if len(cameras) <= 0:
  print "No cameras detected."
  pygame.quit()
if len(cameras) == 1:
  print "Found 1 camera:"
else:
  print "Found "+len(cameras)+" cameras:"
for camera in cameras:
  print camera
cam = pycamera.Camera(cameras[0], size)
cam.start()

running = True
while running:
  events = pygame.event.get()
  for e in events:
    if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):
      running = False
  img = cam.get_image(snapshot)
  display.blit(img, (0,0))
コード例 #39
0
def draw_from_points(cv_image, points):
    """Takes the cv_image and points and draws a rectangle based on the points.
    Returns a cv_image."""
    for (x, y, w, h), n in points:
        cv.Rectangle(cv_image, (x, y), (x + w, y + h), 255)
    return cv_image


if __name__ == '__main__':

    # Set game screen
    screen = pygame.display.set_mode(SCREEN)

    pygame.init()  # Initialize pygame
    camera.init()  # Initialize camera

    # Load camera source then start
    cam = camera.Camera('/dev/video1', SCREEN)
    cam.start()

    while 1:  # Ze loop

        time.sleep(1 / 120)  # 60 frames per second

        image = cam.get_image()  # Get current webcam image

        cv_image = pygame_to_cvimage(
            image)  # Create cv image from pygame image

        # Detect faces then draw points on image
コード例 #40
0
def init_cam():
    pygame.init()
    camera.init()
    webcam = camera.Camera(camera.list_cameras()[0])
    return webcam
コード例 #41
0
ファイル: cammodule.py プロジェクト: hanabishi/pythoncam
def setup_pygame_camera():
    pygame.init()
    camera.init()
コード例 #42
0
def readyCamera():
    cam.init()
    kamera = cam.Camera(cam.list_cameras()[1])
    return kamera
コード例 #43
0
def draw_from_points(cv_image, points):
    """Takes the cv_image and points and draws a rectangle based on the points.
    Returns a cv_image."""
    for (x, y, w, h), n in points:
        cv.Rectangle(cv_image, (x, y), (x + w, y + h), 255)
    return cv_image


if __name__ == '__main__':

    # Set game screen
    screen = pygame.display.set_mode(SCREEN)

    pygame.init()  # Initialize pygame
    camera.init()  # Initialize camera

    # Load camera source then start
    cam = camera.Camera('/dev/video1', SCREEN)
    cam.start()

    while 1:  # Ze loop

        time.sleep(1 / 120)  # 60 frames per second

        image = cam.get_image()  # Get current webcam image

        cv_image = pygame_to_cvimage(image)  # Create cv image from pygame image

        # Detect faces then draw points on image
        # FIXME: Current bottleneck. Image has to be Grayscale to make it faster.
コード例 #44
0
ファイル: snapshot_test.py プロジェクト: mikegpl/rpi-chicken
import pygame
import pygame.camera as pycam
from pygame.locals import *

PATH = "/dev/video0"
RES = (1024, 768)
FNAME = "xD.png"


pygame.init()
pycam.init()

cam = pycam.Camera(PATH, RES)
cam.start()
image = cam.get_image()
pygame.image.save(image, FNAME)
コード例 #45
0
ファイル: displaycam.py プロジェクト: mharig/displayCam
def printVideoDevices():
    '''Prints a list of available video devices'''
    camera.init()
    print(camera.list_cameras())
コード例 #46
0
def main():
    pygame.init()
    camera.init()
    pygame.surfarray.use_arraytype("numpy")

    cams = camera.list_cameras()
    cam = camera.Camera(cams[0], (360, 296))
    cam = camera.Camera(cams[0], (640, 480))
    cam.start()
    fps = 25.0
    window = pygame.display.set_mode((640, 480), 0, 8)
    pygame.display.set_caption("Video")
    screen = pygame.display.get_surface()
    screen.set_palette([(i, i, i) for i in range(256)])

    print("Starting main loop")

    pea_list = [
        ("Spectrum", get_spectrum, get_equalized),
        ("Automask", apply_mask, get_normalized),
        ("Propagation", propagate, get_normalized),
        ("Reconstruction", reconstruct, get_complex_view),
        ]

    set_array = False
    set_equalize = False
    set_normalize = True
    set_pea = False
    pea_level = 1
    distance = 5
    comp_view = "phase"

    while True:
        events = pygame.event.get()
        for event in events:
            if event.type == pygame.QUIT:
                return
            elif event.type == pygame.KEYDOWN:
                if (event.key == pygame.K_q):
                    return

                # IMAGE PROCESSING
                elif (event.key == pygame.K_a):
                    set_array = not set_array
                    print("Converting to array: %s" % set_array)
                elif (event.key == pygame.K_n):
                    set_normalize = not set_normalize
                    print("Normalize: %s" % set_normalize)
                elif (event.key == pygame.K_e):
                    set_equalize = not set_equalize
                    print("Equalize: %s" % set_equalize)

                # PEA
                elif (event.key == pygame.K_p):
                    set_pea = not set_pea
                    print("PEA processing set: %s" % set_pea)
                    print("Setted pea to level %d, %s." % (
                        pea_level, pea_list[pea_level - 1][0]))
                elif (event.key == pygame.K_PAGEUP):
                    pea_level -= 1
                    pea_level = max(pea_level, 1)
                    print("Setted pea to level %d, %s." % (
                        pea_level, pea_list[pea_level - 1][0]))
                elif (event.key == pygame.K_PAGEDOWN):
                    pea_level += 1
                    pea_level = min(pea_level, len(pea_list))
                    print("Setted pea to level %d, %s." % (
                        pea_level, pea_list[pea_level - 1][0]))
                elif (event.key == pygame.K_TAB):
                    comp_view = "phase" if comp_view != "phase" else "mod"
                    print("PEA complex viewer set to: %s" % comp_view)

                # FOCUS DISTANCE
                elif (event.key == pygame.K_DOWN):
                    distance += 5
                    print("Distance: %.1f" % distance)
                elif (event.key == pygame.K_UP):
                    distance -= 5
                    print("Distance: %.1f" % distance)
                elif (event.key == pygame.K_LEFT):
                    distance -= .5
                    print("Distance: %.1f" % distance)
                elif (event.key == pygame.K_RIGHT):
                    distance += .5
                    print("Distance: %.1f" % distance)

                # FULSCREEN
                elif (event.key == pygame.K_f):
                    pygame.display.toggle_fullscreen()

                # CAPTURE
                elif (event.key == pygame.K_c):
                    filename = save_raw(cam)
                    print("Raw image saved to: %s" % filename)

        image = cam.get_image()

        if set_array:
            array = pygame.surfarray.array2d(image)

            if array.ndim > 2:
                array = round(array.mean(-1))
#                array = array[:,:,0] # red
#                array = array[:,:,0] # green
#                array = array[:,:,0] # blue

            if set_equalize:
                array = equalize(array).astype(int)
            elif set_normalize:
                array = normalize(array)

            pygame.surfarray.blit_array(screen, array)

        elif set_pea:
            array = pygame.surfarray.array2d(image)

            if array.ndim > 2:
                array = round(array.mean(-1))
#                array = array[:,:,0] # red
#                array = array[:,:,0] # green
#                array = array[:,:,0] # blue

            pea_algs = pea_list[:pea_level]
            pea_rep = pea_algs[-1][-1]

            for alg in pea_algs:
                try:
                    array = alg[1](array, distance=distance)
                except:
                    print("W: skipped framme's: %s" % alg[0])

            array = pea_rep(array, comp_view=comp_view).astype(int)

            pygame.surfarray.blit_array(screen, array)

        else:
            screen.blit(image, (0,0))

        pygame.display.flip()
        pygame.time.delay(int(1000./fps))
コード例 #47
0
    def close(self):
        '''Stop webcam and thread
        '''
        self.running = False


class Webcam(object):
    '''Wrapper over the thread.
    '''
    def __init__(self, *args, **kwargs):
        self.thread = WebcamThread(*args, **kwargs)
        self.thread.start()

    def capture(self, *args, **kwargs):
        self.thread.capture(*args, **kwargs)

    def close(self):
        self.thread.close()
        self.thread.join()


if __name__ == "__main__":
    camera.init()
    cams = camera.list_cameras()
    if cams:
        print "Detected webcams:"
        for c in cams:
            print " {}".format(c)
    else:
        print "No webcams detected."
コード例 #48
0
ファイル: scan.py プロジェクト: codeix/superPi8
def scan():
    camera.init()
    cam = camera.Camera(camera.list_cameras()[0], (1600,1200,))
    cam.start()
    
    ok = False
    while not ok:
        movie = raw_input('Name of the movie: ')
        startby = raw_input('Start movie by picture [default 0]: ')
        startat = raw_input('Start the scanner at "8:20" [default now]: ')
        
        try:
            if startat == '' or startat == 'now':
                startat = None
            else:
                h, m = startat.split(':')
                startat = datetime.datetime.now()
                startat = startat.replace(hour=int(h), minute=int(m))
                if(datetime.datetime.now() > startat):
                    startat = startat + datetime.timedelta(days=1)
        except:
            print 'Wrong time input'
            continue

        if startby:
            startby = int(startby)
        else:
            startby = 0
        f = '%s/%s' % (PATH, movie,)
        if (os.path.exists(f) and startby is 0) or (not os.path.exists(f) and startby is not 0):
            print 'Folder already exist'
            continue
        os.system(CMD_MKDIR % f)
        ok = True
    while startat is not None and datetime.datetime.now() < startat:
       clear()
       print 'Scanner will starts in %s:%s:%s ' % cal_time((startat - datetime.datetime.now()).total_seconds())
       time.sleep(0.5)



    params = dict(scanning=True, 
                  filename='',
                  stop=False,
                  stopwatch=datetime.timedelta(),
                  counter=1,
                  exit = False,
                  watchdog=True,
                  watchdog_diff=0)
    counter = startby
    stopwatch = datetime.datetime.now()
    threading.Thread(target=scan_display, args = ((params,))).start()
    threading.Thread(target=scan_stop, args = ((params,))).start()
    threading.Thread(target=scan_watchdog, args = ((params,))).start()
    #while GPIO.input(END_SENSOR) == GPIO.HIGH and counter <= MAX_IMAGES:
    while counter <= MAX_IMAGES and not params['exit']:
        while params['stop']:
            time.sleep(0.01)
        counter += 1
        params['counter'] = counter
        params['stopwatch'] = datetime.datetime.now() - stopwatch
        stopwatch = datetime.datetime.now()
        params['filename'] = '%s/%s/%s.raw' % (PATH, movie, ('0'*6 + str(counter))[-6:],)
        #subprocess.call(CMD_CAM % params['filename'], shell=True)
        f = open(params['filename'], 'w')
        # wait until the cameras frame is ready
        for i in range(FRAMES + SKIPS):
            while not cam.query_image():
                time.sleep(0.005)
            if i < SKIPS:
                cam.get_raw()
            else:
                f.write(cam.get_raw())
        f.close()
        subprocess.call(CMD_LN % (os.path.basename(params['filename']), '%s/%s' % (PATH, movie,),), shell=True)
        step()
    params['scanning'] = False
    cam.stop()
    open('%s/%s/ready' % (PATH, movie,), 'w').close()
コード例 #49
0
import pygame.camera as pycam

# initialize module
pycam.init()

# Get all available cameras
cameras = pycam.list_cameras()
print("Available cameras:")
for c in cameras:
    print(c)

# Use the first one on the list
print("Using camera", cameras[0])

# Initialize camera, capturing 640x480 images
cam = pycam.Camera(cameras[0], (640, 480))
cam.start()

while True:
    img = cam.get_image() # get an image 
    raw = cam.get_raw()   # get raw data


import PIL.Image as Image
img = Image.fromstring(raw)
コード例 #50
0
ファイル: projet_robot.py プロジェクト: traker/robot
nbx = 220
# This should be the size of the image coming from the camera.
cam_width = 320
cam_height = 240
camsize = ( cam_width, cam_height )
# HSV color space Threshold values for a RED laser pointer. If the dot from the
# laser pointer doesn't fall within these values, it will be ignored.
ang_actu_rotx = 90
ang_actu_roty = 90

# value
vmin = 220
vmax = 250

#initialise la camera
camera.init()
cam = camera.Camera( "/dev/video0", ( cam_width, cam_height ), "RGB" )
snapshot = pygame.surface.Surface( camsize )
cam.start()
print "chargement webcam"
while not cam.query_image():
	print "chargement webcam"
	time.sleep( 0.5 )
snapshot = cam.get_image( snapshot )

# set up the arduino
board = pyfirmata.ArduinoMega( "/dev/ttyACM0" )
print "Setting up Arduino..."
time.sleep( 0.5 )
it = pyfirmata.util.Iterator( board )
it.start()
コード例 #51
0
ファイル: video.py プロジェクト: mak1e/Open-Allure-DS
    def __init__(self, processFunction=None,
                       display        =None,
                       show           =True, **argd):
        import logging
        import pygame

        logging.debug(" Initializing Video Capture Class")

        self.processRuns = 0

        #set display size in pixels = width,height
        self.size = 640,480

        self.processFunction = processFunction
        self.display = display
        self.show = show

        #print self.__dict__.items()

        #super(VideoCapturePlayer, self).__init__(**argd)

        if self.display is None:
            if self.show is True:
                # create a display surface. standard pygame stuff
                self.display = pygame.display.set_mode( self.size, 0 )
            else:
                pygame.display.init()
                self.display = pygame.surface.Surface(self.size)

        import pygame.camera as camera
        camera.init()

        # get a list of available cameras.
        self.cameraList = camera.list_cameras()
        if not self.cameraList:
            raise ValueError("Sorry, no cameras detected.")

        logging.info(" Opening device %s, with video size (%s,%s)" % (self.cameraList[0],self.size[0],self.size[1]))

        # create and start the camera of the specified size in RGB colorspace
        self.camera = camera.Camera(self.cameraList[0], self.size, "RGB")
        self.camera.start()

        self.processClock = self.clock = pygame.time.Clock()

        # create a surface to capture to.  for performance purposes, you want the
        # bit depth to be the same as that of the display surface.
        self.snapshot = pygame.surface.Surface(self.size, 0, self.display)

        # Explore namespace now:

        #print dir()
        """
          ['argd', 'camera', 'display', 'logging', 'processFunction', 'processRuns', 'pygame', 'self', 'show', 'size', 'utils']
        """

        #print dir(self)
        """
          ['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__',
          '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__',
          'argd', 'camera', 'cameraList', 'clock', 'display', 'get_and_flip', 'logging', 'main', 'processClock', 'processFunction',
          'processRuns', 'pygame', 'show', 'size', 'snapshot', 'utils']
        """

        #print self.__dict__.items()
        """
コード例 #52
0
ファイル: main.py プロジェクト: morgatron/pgBeams
#from morgTools.morgTools import gauss
import sys
from scipy import optimize as opt
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
#from beamprofile import doFit
import beamprofile as bp
from DummyCam import DummyCam



if 0:
    pygame.init()
    from pygame import camera as pgcamera
    pgcamera.init()
    cam = pgcamera.Camera("/dev/video2")#,(640,480))
    cam.start()

import pyqtgraph.parametertree.parameterTypes as pTypes
from pyqtgraph.parametertree import Parameter, ParameterTree, ParameterItem, registerParameterType

from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
import pyqtgraph.ptime as ptime
from pyqtgraph import dockarea as da


RES=(640,480)