Beispiel #1
0
class Application:
    def __init__(self, **kwargs):
        self.verbose = kwargs.get('verbose', DEFAULT_VERBOSE)
        self.webcam = Webcam(**kwargs)
        self.gpio = GPIO(inputs = INPUT_PINS, outputs = OUTPUT_PINS)
            
        
    def main_loop(self):
        i = 0
        try:
            self.gpio.export()
            while True:
                button_state = self.gpio[SNAP_BUTTON_PIN]
                self.gpio[SNAP_LED_PIN] = button_state
                if button_state:
                    dt = datetime.datetime.now()
                    filename_prefix = dt.strftime("%Y-%m-%d_%H_%M_%S")
                    filename_suffix = "_img%03d" % i
                    self.webcam.take_photo(filename_prefix = filename_prefix,
                                           filename_suffix = filename_suffix,
                                           blocking = True,
                                           )
                    self.gpio[SNAP_LED_PIN] = button_state
                time.sleep(SLEEP_TIME)
        except KeyboardInterrupt:
            if self.verbose:
                print "user aborted capture...goodbye"
        finally:
            self.gpio.unexport()
Beispiel #2
0
 def __init__(self):
     super(GUI, self).__init__()
     self.initUI()
     self.webcam = Webcam()
     #self.video = Video()
     self.input = self.webcam
     self.dirname = ""
     print("Input: webcam")
     self.statusBar.showMessage("Input: webcam", 5000)
     self.process = Process()
     self.status = False
     self.frame = np.zeros((10, 10, 3), np.uint8)
     #self.plot = np.zeros((10,10,3),np.uint8)
     self.bpm = 0
Beispiel #3
0
    def __init__(self):

        # initialise webcams
        self.webcam_one = Webcam(0)
        self.webcam_two = Webcam(1)

        # initialise config
        self.config_provider = ConfigProvider()

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None
Beispiel #4
0
    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
 
        # initialise glyphs
        self.glyphs = Glyphs()
 
        # initialise shapes
        self.cone = None
        self.sphere = None
        # self.hp = HP()
        # initialise texture
        self.texture_background = None
Beispiel #5
0
    def __init__(self):
        self._debug = conf['debug'] or False
        self._cv_debug = conf['cv_debug'] or False
        self.webcam = Webcam()
        self.tracker = Tracker()
        self.detector = FaceClassifier()
        self.last_tick = cv.getTickCount()
        self.got_face = False

        self.sample_stream = self.webcam.as_observable() \
            .sample(1000) \
            .do_action(lambda f: self._debug and print('sampled at {}'.format(datetime.now()))) \
            .map(self.detect) \
            .publish() \
            .auto_connect()
Beispiel #6
0
    def __init__(self):
        #initialize the webcam
        self.camera = Webcam()

        #initialize optical debug mode
        self.opticalDebugMode = False

        #initialize the video window
        cv2.namedWindow(WIN_NAME, cv2.CV_WINDOW_AUTOSIZE)

        #start collecting frames for processing
        self.init_frames()

        #initialize Algorithm instance
        self.alg = Algorithm()
Beispiel #7
0
    def __init__(self, account_setting):
        gtk.VBox.__init__(self)
        self.account_setting = account_setting

        self.camera_pixbuf = app_theme.get_pixbuf(
            "account/camera.png").get_pixbuf()
        self.error_pixbuf = app_theme.get_pixbuf(
            "account/error.png").get_pixbuf()
        self.success_pixbuf = app_theme.get_pixbuf(
            "account/success.png").get_pixbuf()

        self.camera_box_align = gtk.Alignment(0.5, 0, 0, 0)
        self.camera_box = gtk.VBox()
        self.camera_box.set_size_request(CAMERA_BOX_SIZE, CAMERA_BOX_SIZE)
        self.camera_box.connect("expose-event", self.__camera_box_expose)
        self.camera_box_align.add(self.camera_box)

        self.under_camera_box = gtk.VBox(spacing=10)
        self.under_camera_box_align = gtk.Alignment(0.5, 0, 0, 0)
        self.under_camera_box_align.set_padding(WIDGET_SPACING, 0, 0, 0)
        self.under_camera_box_align.add(self.under_camera_box)
        self.__init_widgets()
        if Webcam.has_device():
            self.under_camera_box.pack_start(self.start_record_button)
        else:
            self.under_camera_box.pack_start(self.no_device_warning)

        self.pack_start(self.camera_box_align, False, False)
        self.pack_start(self.under_camera_box_align, False, False)
Beispiel #8
0
    def __init__(self):
        # sigint interrupt initialize
        signal.signal(signal.SIGINT, self.signal_handler)    

        # initialize webcam
        self.webcam = Webcam()
        self.webcam.start()
          
        self.x_axis = 0.0
        self.y_axis = 0.0
        self.z_axis = 0.0
        self.z_pos = -7.0
        
        self.win = 0
        self.texture_background = None
        self.texture_teapot = None
Beispiel #9
0
 def refresh(self):
     container_remove_all(self.camera_box)
     container_remove_all(self.under_camera_box)
     if Webcam.has_device():
         self.under_camera_box.pack_start(self.start_record_button)
     else:
         self.under_camera_box.pack_start(self.no_device_warning)
    def __camera_box_expose(self, widget, event):
        cr = widget.window.cairo_create()
        x, y, w, h = widget.allocation
        # draw frame
        with cairo_disable_antialias(cr):
            cr.rectangle(x, y, w, h)
            cr.set_line_width(1)
            cr.set_source_rgb(*color_hex_to_cairo("#a2a2a2"))
            cr.stroke()
        # draw background
        cr.rectangle(x + 5, y + 5, w - 10, h - 10)
        cr.set_source_rgb(*color_hex_to_cairo("#333333"))
        cr.fill()

        # draw camera icon
        if hasattr(self, "scanning") and self.scanning:
            draw_pixbuf(cr, self.snapshot_pixbuf,
                        x = x + (CAMERA_BOX_SIZE - self.snapshot_pixbuf.get_width()) / 2,
                        y = y + (CAMERA_BOX_SIZE - self.snapshot_pixbuf.get_height()) / 2)
        else:
            draw_pixbuf(cr, self.camera_pixbuf,
                        x = x + (CAMERA_BOX_SIZE - self.camera_pixbuf.get_width()) / 2,
                        y = y + (CAMERA_BOX_SIZE - self.camera_pixbuf.get_height()) / 2)
            if not Webcam.has_device():
                draw_pixbuf(cr, self.error_pixbuf,
                            x = x + (CAMERA_BOX_SIZE - self.camera_pixbuf.get_width()) / 2 + 12,
                            y = y + (CAMERA_BOX_SIZE - self.camera_pixbuf.get_height()) / 2 + 12)
    def __init__(self, account_setting):
        gtk.VBox.__init__(self)
        self.account_setting = account_setting
        
        self.camera_pixbuf = app_theme.get_pixbuf("account/camera.png").get_pixbuf()
        self.error_pixbuf = app_theme.get_pixbuf("account/error.png").get_pixbuf()
        self.success_pixbuf = app_theme.get_pixbuf("account/success.png").get_pixbuf()

        self.camera_box_align = gtk.Alignment(0.5, 0, 0, 0)
        self.camera_box = gtk.VBox()
        self.camera_box.set_size_request(CAMERA_BOX_SIZE, CAMERA_BOX_SIZE)
        self.camera_box.connect("expose-event", self.__camera_box_expose)
        self.camera_box_align.add(self.camera_box)

        self.under_camera_box = gtk.VBox(spacing=10)
        self.under_camera_box_align = gtk.Alignment(0.5, 0, 0, 0)
        self.under_camera_box_align.set_padding(WIDGET_SPACING, 0, 0, 0)
        self.under_camera_box_align.add(self.under_camera_box)
        self.__init_widgets()
        if Webcam.has_device():
            self.under_camera_box.pack_start(self.start_record_button)
        else:
            self.under_camera_box.pack_start(self.no_device_warning)

        self.pack_start(self.camera_box_align, False, False)
        self.pack_start(self.under_camera_box_align, False, False)
 def refresh(self):
     container_remove_all(self.camera_box)
     container_remove_all(self.under_camera_box)
     if Webcam.has_device():
         self.under_camera_box.pack_start(self.start_record_button)
     else:
         self.under_camera_box.pack_start(self.no_device_warning)        
def take_imgs(chessboard_size=(11,7), kSaveImageDeltaTime=1):
    sys.path.append("../")
    os.makedirs("./calib_images", exist_ok=True)
    camera_num = 0
    if len(sys.argv) == 2:
            camera_num = int(sys.argv[1])
    print('opening camera: ', camera_num)

    webcam = Webcam(camera_num)
    webcam.start()
    
    lastSaveTime = time.time()
 
    while True:
        
        # get image from webcam
        image = webcam.get_current_frame()
        if image is not None: 

            # check if pattern found
            ret, corners = cv2.findChessboardCorners(cv2.cvtColor(image,cv2.COLOR_BGR2GRAY), chessboard_size, None)
        
            if ret == True:     
                print('found chessboard')
                # save image
                filename = datetime.now().strftime('%Y%m%d_%Hh%Mm%Ss%f') + '.bmp'
                image_path="./calib_images/" + filename
                
                elapsedTimeSinceLastSave = time.time() - lastSaveTime
                do_save = elapsedTimeSinceLastSave > kSaveImageDeltaTime
                print(elapsedTimeSinceLastSave, kSaveImageDeltaTime)
                if do_save:
                    lastSaveTime = time.time()
                    print('saving file ', image_path)
                    cv2.imwrite(image_path, image)

                # draw the corners
                image = cv2.drawChessboardCorners(image, chessboard_size, corners, ret)                       

            cv2.imshow('camera', image)                

        else: 
            pass
            #print('empty image')                
                            
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break    
Beispiel #14
0
    def _create_cameras(self):

        cameras = []

        for camera_number in range(self.config_provider.number_of_cameras):
            cameras.append(Webcam(camera_number))

        return cameras
 def get_backlight_from_webcam(self):
     backlight = Webcam.get_backlight()
     if backlight is not None:
         print('=== captured backlight: %s ===' % (backlight))
         self.set_backlight(backlight)
     else:
         print('=== something happens ===')
     return backlight
Beispiel #16
0
    def get(self):
        
        webcams = Webcam.all()
        webcams.filter("valley_id = ", "6")

        template_values = {'webcams': webcams}
        path = os.path.join(os.path.dirname(__file__),'templates/benasque.html')
        self.response.out.write(template.render(path,template_values))
    def __init__(self):
        if os.path.exists("data.json"):
            os.remove("data.json")

        super(GUI, self).__init__()
        self.initUI()
        self.webcam = Webcam()
        self.video = Video()
        self.input = self.webcam
        self.dirname = ""
        print("Input: webcam")
        self.statusBar.showMessage("Input: webcam", 5000)
        self.btnOpen.setEnabled(False)
        self.process = Process()
        self.status = False
        self.frame = np.zeros((10, 10, 3), np.uint8)
        #self.plot = np.zeros((10,10,3),np.uint8)
        self.bpm = 0
Beispiel #18
0
    def __init__(self, *args, **kwargs):
        super(Studio, self).__init__(*args, **kwargs)
        self.setupUi(self)

        # Device
        self.device_default = 0
        self.device = self.device_default

        # Webcam
        self.webcam = Webcam()

        # Image
        self.image_dir = 'outputs'
        self.image_ext = 'jpg'
        self.num_images_max_default = 10
        self.num_images_max = self.num_images_max_default
        self.num_images = 0

        self.saved_width_default = 416  # In pixel
        self.saved_height_default = 416
        self.saved_width = self.saved_width_default
        self.saved_height = self.saved_height_default

        self.flip_image = False
        self.cb_flip_image.stateChanged.connect(self.change_flip_image)

        # Filename prefix
        self.filename_prefix = 'class_memo'

        # Recording flag
        self.is_recording = False

        # Timer
        self.timer_is_on = False
        self.timer_duration = 500  # msec
        self.timer = QTimer(self)
        self.timer.timeout.connect(self.process_image)

        # Plot min/max
        self.plot_min = 0.0
        self.plot_max = -1.0

        # Initialize
        self.initialize()
Beispiel #19
0
def emotion_recognition():
    client = docker.from_env()
    t = time.time()

    isOpen = Webcam.open()
    if not isOpen:
        print("ERROR: cannot open webcam")
        return None

    Webcam.take_photo(path_volume + "face.jpg")
    Webcam.close()

    if not watch(path_volume + "face.jpg", t):
        print("ERROR: cannot take photo")
        return None

    t = time.time()
    client.containers.run('app_emotion_recognition',
                          command='volume/face.jpg volume/emotion.txt',
                          volumes=volumes,
                          auto_remove=True)

    os.remove(path_volume + "face.jpg")

    if not watch(path_volume + "emotion.txt", t):
        print("ERROR: emotion_recognition cannot detect face or emotion")
        return None

    f = open(path_volume + "emotion.txt", "r")
    res = f.readline()
    f.close()
    if not res.isdigit():
        print("ERROR: emotion is not int")
        return None

    os.remove(path_volume + "emotion.txt")

    emotion_index = int(res)

    if emotion_index < 0:
        return None
    else:
        return EMOTIONS[emotion_index]
Beispiel #20
0
    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None
Beispiel #21
0
def face_recognizer(face_path=None, face_bib="faces.json"):

    if not face_path:
        face_path = "face.jpg"
        t = time.time()

        isOpen = Webcam.open()
        if not isOpen:
            print("ERROR: cannot open webcam")
            return None

        Webcam.take_photo(path_volume + face_path)
        Webcam.close()

        if not watch(path_volume + face_path, t):
            print("ERROR: cannot take photo")
            return None

    client = docker.from_env()

    t = time.time()
    face_path = "face.jpg"

    client.containers.run('app_face_recognizer',
                          command='volume/' + face_path + ' volume/' +
                          face_bib + ' volume/face_reco.txt volume',
                          volumes=volumes,
                          auto_remove=True)
    os.remove(path_volume + face_path)

    if not watch(path_volume + "face_reco.txt", t):
        print("ERROR: face_recognizer cannot detect any face")
        return None

    f = open(path_volume + "face_reco.txt", "r")
    name = f.readline()
    f.close()

    os.remove(path_volume + "face_reco.txt")

    return name
Beispiel #22
0
    def __init__(self):
        self.config_provider = ConfigProvider()

        self.irobot = Robot()
        self.webcam = Webcam()

        self.marker = Marker()
        self.markers_cache = None

        self.features = Features(self.config_provider)

        self.texture_background = None
Beispiel #23
0
    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
        self.find = fp()
        self.find.set_img('sample.jpg')

        self.hei, self.wid = self.webcam.get_frame_shape()[:2]
        # initialise cube
        # self.d_obj = None
        self.img = None
        # initialise texture
        self.texture_background = None
        self.K = None
        self.mark_kp = None
        self.mark_des = None
        self.set_keypoint()
        self.new_kp = None

        self.mat_kp = None
        self.mat_des = None
        self.H = None
def real_time_lrp(conf):
    """Method to display feature relevance scores in real time.

    Args:
        conf: Dictionary consisting of configuration parameters.
    """
    record_video = conf["playback"]["record_video"]

    webcam = Webcam()
    lrp = RelevancePropagation(conf)

    if record_video:
        recorder = VideoRecorder(conf)

    while True:
        t0 = time.time()

        frame = webcam.get_frame()
        heatmap = lrp.run(frame)
        heatmap = post_processing(frame, heatmap, conf)
        cv2.imshow("LRP", heatmap)

        if record_video:
            recorder.record(heatmap)

        t1 = time.time()
        fps = 1.0 / (t1 - t0)
        print("{:.1f} FPS".format(fps))

        if cv2.waitKey(1) % 256 == 27:
            print("Escape pressed.")
            break

    if record_video:
        recorder.release()

    webcam.turn_off()
    cv2.destroyAllWindows()
    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
 
        # initialise glyphs
        self.glyphs = Glyphs()
 
        # initialise shapes
        self.cone = None
        self.sphere = None
 
        # initialise texture
        self.texture_background = None
Beispiel #26
0
    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None
Beispiel #27
0
    def __start_record_clicked(self, widget):
        container_remove_all(self.under_camera_box)
        self.under_camera_box.pack_start(self.keep_few_minutes)

        self.webcam_align = gtk.Alignment(0, 0.5, 0, 0)
        self.webcam_align.set_padding(5, 5, 5, 5)
        if not hasattr(self.account_setting, "record_webcam"):
            self.account_setting.record_webcam = Webcam()
            self.account_setting.record_webcam.set_size_request(
                WEBCAM_SIZE, min(WEBCAM_SIZE, 240))
            self.account_setting.record_webcam.create_video_pipeline(320, 240)
        self.webcam_align.add(self.account_setting.record_webcam)
        container_remove_all(self.camera_box)
        self.camera_box.add(self.webcam_align)
        self.account_setting.record_webcam.play()
        gobject.timeout_add(2000, self.__do_save_photo, 0)
        gobject.timeout_add(2500, self.__do_save_photo, 1)
        gobject.timeout_add(3000, self.__do_save_photo, 2)
        gobject.timeout_add(3500, self.__do_save_photo, 3)

        gobject.timeout_add(4000, self.__do_action)
Beispiel #28
0
    def __camera_box_expose(self, widget, event):
        cr = widget.window.cairo_create()
        x, y, w, h = widget.allocation
        # draw frame
        with cairo_disable_antialias(cr):
            cr.rectangle(x, y, w, h)
            cr.set_line_width(1)
            cr.set_source_rgb(*color_hex_to_cairo("#a2a2a2"))
            cr.stroke()
        # draw background
        cr.rectangle(x + 5, y + 5, w - 10, h - 10)
        cr.set_source_rgb(*color_hex_to_cairo("#333333"))
        cr.fill()

        # draw camera icon
        if hasattr(self, "scanning") and self.scanning:
            draw_pixbuf(
                cr,
                self.snapshot_pixbuf,
                x=x + (CAMERA_BOX_SIZE - self.snapshot_pixbuf.get_width()) / 2,
                y=y +
                (CAMERA_BOX_SIZE - self.snapshot_pixbuf.get_height()) / 2)
        else:
            draw_pixbuf(
                cr,
                self.camera_pixbuf,
                x=x + (CAMERA_BOX_SIZE - self.camera_pixbuf.get_width()) / 2,
                y=y + (CAMERA_BOX_SIZE - self.camera_pixbuf.get_height()) / 2)
            if not Webcam.has_device():
                draw_pixbuf(
                    cr,
                    self.error_pixbuf,
                    x=x +
                    (CAMERA_BOX_SIZE - self.camera_pixbuf.get_width()) / 2 +
                    12,
                    y=y +
                    (CAMERA_BOX_SIZE - self.camera_pixbuf.get_height()) / 2 +
                    12)
Beispiel #29
0
    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise glyphs
        self.glyphs = Glyphs()
        self.glyphs_cache = None

        # initialise browser
        self.browser = None
        
        if self.config_provider.browser:
            self.browser = Browser()

        # initialise texture
        self.texture_background = None
Beispiel #30
0
	def __init__(self):
		# initialise webcam and start thread
		self.webcam = Webcam()
		self.webcam.start()
		
		#initialise
		self.hBox = causalBox(winSize = 10)
		self.vBox = causalBox(winSize = 10)
		
		plyerName = ["John","Doe","Tommy","Emmanuel"]
		self.game = GameCtrler(plyerName)
		
		# initialise shapes
		self.dragon = None
		self.fly = None
		self.ele = None
		self.boat = None
		self.horse = None
		self.house = None
		self.juk = None

		# textures
		self.texture_background = None
Beispiel #31
0
class OpenGLGlyphs:
	# constants
	INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
								[-1.0,-1.0,-1.0,-1.0],
								[-1.0,-1.0,-1.0,-1.0],
								[ 1.0, 1.0, 1.0, 1.0]])
	

	
	def __init__(self):
		# initialise webcam and start thread
		self.webcam = Webcam()
		self.webcam.start()
		
		#initialise
		self.hBox = causalBox(winSize = 10)
		self.vBox = causalBox(winSize = 10)
		
		plyerName = ["John","Doe","Tommy","Emmanuel"]
		self.game = GameCtrler(plyerName)
		
		# initialise shapes
		self.dragon = None
		self.fly = None
		self.ele = None
		self.boat = None
		self.horse = None
		self.house = None
		self.juk = None

		# textures
		self.texture_background = None

	def _init_gl(self, Width, Height):
		# initialPosition = (0,0,0) 
		glClearColor(0.0, 0.0, 0.0, 0.0)
		glClearDepth(1.0)
		glDepthFunc(GL_LESS)
		glEnable(GL_DEPTH_TEST)
		glShadeModel(GL_SMOOTH)
		
		# Projection matrix
		glMatrixMode(GL_PROJECTION)
		glLoadIdentity()
		fovy = 2*np.arctan(Height/1375.0)*180.0/np.pi
		gluPerspective(fovy, float(Width)/float(Height), 0.1, 1375.1)
		glViewport(0,0,Width,Height)
		glMatrixMode(GL_MODELVIEW)

		# assign shapes
		print "loading model 1/7"
		self.dragon = OBJ('Drgn9-6.obj')
		print "loading model 2/7"
		self.fly = OBJ('plane.obj')
		print "loading model 3/7"
		self.ele = OBJ('minion.obj')
		print "loading model 4/7"
		self.boat = OBJ('VikingShip.mtl.obj')
		print "loading model 5/7"
		self.horse = OBJ('Wooden_Toy_Truck.obj')
		print "loading model 6/7"
		self.house = OBJ('house_001.obj')
		print "loading model 7/7"
		self.juk = OBJ('Barrel_variation.obj')
		print "loading model done"

		# enable textures
		glEnable(GL_TEXTURE_2D)
		self.texture_background = glGenTextures(1)

	def _draw_scene(self):
		glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
		glLoadIdentity()
		
		# get image from webcam
		image = self.webcam.get_current_frame()

		self._draw_background(image)
		
		# handle glyphs
		image = self._handle_glyphs(image)
		
		glutSwapBuffers()
		
	def _handle_glyphs(self, image):
		
		
		# attempt to detect glyphs
		glyphs = []

		try:
			glyphs = detect_glyph(image, self.hBox, self.vBox, self.game)
		except Exception as ex: 
			print(ex)

		if not glyphs: 
			return
			
		for glyph in glyphs:
		
			rvecs, tvecs, glyph_name = glyph
			# build view matrix
			rmtx = cv2.Rodrigues(rvecs)[0]
			view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
									[rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
									[rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
									[0.0       ,0.0       ,0.0       ,1.0    ]])
			view_matrix =  view_matrix * self.INVERSE_MATRIX 
			view_matrix = np.transpose(view_matrix)
			
			# load view matrix and draw cube
			glPushMatrix()
			glLoadIdentity()
			glLoadMatrixd(view_matrix)
			if glyph_name == "B juk":
				glCallList(self.juk.gl_list)
			elif glyph_name == "R juk":
				glCallList(self.juk.gl_list)
			elif glyph_name == "B Phao":
				glCallList(self.house.gl_list)
			elif glyph_name == "R Phao":
				glCallList(self.house.gl_list)
			elif glyph_name == "B Horse":
				glCallList(self.horse.gl_list)
			elif glyph_name == "R Horse":
				glCallList(self.horse.gl_list)
			elif glyph_name == "B Boat":
				glCallList(self.boat.gl_list)
			elif glyph_name == "R Boat":
				glCallList(self.boat.gl_list)
			elif glyph_name == "B Ele":
				glCallList(self.ele.gl_list)
			elif glyph_name == "R Ele":
				glCallList(self.ele.gl_list)
			elif glyph_name == "B Fly":
				glCallList(self.fly.gl_list)
			elif glyph_name	== "R Fly":
				glCallList(self.fly.gl_list)
			elif glyph_name == "B T":
				glCallList(self.dragon.gl_list)
			elif glyph_name == "R T":
				glCallList(self.dragon.gl_list)				
			else:
				glCallList(self.dragon.gl_list)				
			glPopMatrix()

	def _draw_background(self, image):
	
		# convert image to OpenGL texture format
		bg_image = cv2.flip(image, 0)
		bg_image = Image.fromarray(bg_image)     
		ix = bg_image.size[0]
		iy = bg_image.size[1]
		bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)
		
		# create background texture
		glBindTexture(GL_TEXTURE_2D, self.texture_background)
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
		glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)

		# draw background
		glBindTexture(GL_TEXTURE_2D, self.texture_background)
		glPushMatrix()
		glLoadIdentity()
		glTranslatef(-100,100,-1375)
		glBegin(GL_QUADS)
		i, j = 1520/2, 820/2
		glTexCoord2f(0.0, 1.0); glVertex3f(-i, -j, 0.0)
		glTexCoord2f(1.0, 1.0); glVertex3f( i, -j, 0.0)
		glTexCoord2f(1.0, 0.0); glVertex3f( i,  j, 0.0)
		glTexCoord2f(0.0, 0.0); glVertex3f(-i,  j, 0.0)
		glEnd()
		glPopMatrix()

	def main(self):
	
		width = 1520
		heigh = 820
		# setup and run OpenGL
		glutInit()
		glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
		glutInitWindowSize(760, 410)
		glutInitWindowPosition(100, 100)
		self.window_id = glutCreateWindow("OpenGL Glyphs")
		glutDisplayFunc(self._draw_scene)
		glutIdleFunc(self._draw_scene)
		self._init_gl(width, heigh)
		glutMainLoop()
from webcam import Webcam
import cv2
from datetime import datetime

# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)

webcam = Webcam(1)
webcam.start()


def main():
    while True:

        #get the image
        image = webcam.get_current_frame()
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        cv2.imshow('grid', image)

        ret, corners = cv2.findChessboardCorners(
            image_gray, (9, 6), flags=cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH)

        if ret == True:
            print "found a chessboard"
            cv2.drawChessboardCorners(image, (9, 6), corners, ret)
            cv2.imshow('grid', image)

            corners2 = cv2.cornerSubPix(image_gray, corners, (11, 11),
                                        (-1, -1), criteria)
Beispiel #33
0
from flask import Flask, jsonify, request, redirect, send_file
from flask_cors import CORS, cross_origin
from webcam import Webcam
from io import BytesIO
from PIL import Image
import base64
import re

app = Flask(__name__)
webcam = Webcam()

ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}


@app.route("/")
@cross_origin()
def hello():
    return "Hello World!"


@app.route('/status')
@cross_origin()
def status():
    if webcam.running:
        return 'Running'
    else:
        return 'Not Running'


@app.route('/start')
@cross_origin()
Beispiel #34
0
from webcam import Webcam
import cv2
from datetime import datetime

webcam = Webcam()
webcam.start()

while True:

    # get image from webcam
    image = webcam.get_current_frame()

    # display image
    cv2.imshow('grid', image)
    cv2.waitKey(3000)

    # save image to file, if pattern found
    ret, corners = cv2.findChessboardCorners(
        cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (7, 6), None)

    if ret == True:
        filename = datetime.now().strftime('%Y%m%d_%Hh%Mm%Ss%f') + '.jpg'
        cv2.imwrite("pose/sample_images/" + filename, image)
Beispiel #35
0
import cv2
from glyphfunctions import *
from glyphdatabase import *
from webcam import Webcam

webcam = Webcam()
webcam.start()

QUADRILATERAL_POINTS = 4
BLACK_THRESHOLD = 100
WHITE_THRESHOLD = 155

while True:

    # Stage 1: Read an image from our webcam
    image = webcam.get_current_frame()

    # Stage 2: Detect edges in image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edges = cv2.Canny(gray, 100, 200)

    # Stage 3: Find contours
    contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cv2.imshow('edges',edges)
    # contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
    #
    # for contour in contours:
    #
    #     # Stage 4: Shape check
    #     perimeter = cv2.arcLength(contour, True)
Beispiel #36
0
        elif options.mode == 'epfl':
            from sprites import epfl
            if iter < 2:
                m.set_buffer_size(192+12)
                m.list_to_buffer(epfl[0])
                m.list_to_buffer(epfl[1], row='bottom')
            elif iter % 2 == 0:
                m.scroll_buffer('left'); m.scroll_buffer('left')
        elif options.mode == 'webcam':
            if options.mirror:
                flip = 1
            else:
                flip = None
            from webcam import Webcam
            from hashtoarr import *
            if iter < 2:
                cam = Webcam(0)
                m.set_buffer_size(192)
            list = cam.get_array_from_cam(flip=flip, resize=6)
            arr = lines_to_buffers(list)
            m.list_to_buffer(arr[0])
            m.list_to_buffer(arr[1], row='bottom')

        if m.sim:
            ui.update()

    if m.sim:
        ui.terminate()


    def __init__(self):
        super(IconEditArea, self).__init__()

        self.edit_area = gtk.EventBox()
        self.camera_area_vbox = gtk.VBox(False)
        self.camera_area = Webcam()
        self.camera_area_up = gtk.EventBox()
        self.camera_area_down = gtk.EventBox()
        self.camera_area_init_flag = False
        self.button_hbox = gtk.HBox(False)
        self.button_hbox_height = 40
        self.__widget_y = 92

        self.edit_area.set_size_request(self.AREA_WIDTH, self.AREA_HEIGHT)
        #self.camera_area.set_size_request(self.AREA_WIDTH, self.AREA_HEIGHT)
        self.camera_area_vbox.set_size_request(self.AREA_WIDTH, self.AREA_HEIGHT)
        self.camera_area.set_size_request(self.AREA_WIDTH, 225)
        #self.camera_area_up.set_size_request(self.AREA_WIDTH, 37)
        #self.camera_area_down.set_size_request(self.AREA_WIDTH, 37)
        self.button_hbox.set_size_request(self.AREA_WIDTH, self.button_hbox_height)

        self.button_zoom_in = ImageButton(
            app_theme.get_pixbuf("account/zoom_in.png"),
            app_theme.get_pixbuf("account/zoom_in.png"),
            app_theme.get_pixbuf("account/zoom_in.png"),
            _("zoom in"))
        self.button_zoom_out = ImageButton(
            app_theme.get_pixbuf("account/zoom_out.png"),
            app_theme.get_pixbuf("account/zoom_out.png"),
            app_theme.get_pixbuf("account/zoom_out.png"),
            _("zoom out"))
        self.button_camera = ImageButton(
            app_theme.get_pixbuf("account/camera.png"),
            app_theme.get_pixbuf("account/camera.png"),
            app_theme.get_pixbuf("account/camera.png"),
            _("Take a photo"))
        self.button_camera_again = ImageButton(
            app_theme.get_pixbuf("account/camera_again.png"),
            app_theme.get_pixbuf("account/camera_again.png"),
            app_theme.get_pixbuf("account/camera_again.png"),
            _("Try again"))

        self.button_zoom_in_align = tools.make_align(self.button_zoom_in, xalign=0.5, yalign=0.5)
        self.button_zoom_out_align = tools.make_align(self.button_zoom_out, xalign=0.5, yalign=0.5)
        self.button_camera_align = tools.make_align(self.button_camera, xalign=0.5, yalign=0.5)
        self.button_camera_again_align = tools.make_align(self.button_camera_again, xalign=0.5, yalign=0.5)

        self.button_zoom_in.connect("clicked", self.on_zoom_in_clicked_cb)
        self.button_zoom_out.connect("clicked", self.on_zoom_out_clicked_cb)
        self.button_camera.connect("clicked", self.on_camera_clicked_cb)
        self.button_camera_again.connect("clicked", self.on_camera_again_clicked_cb)

        self.box = gtk.VBox(False)
        self.box.pack_start(self.edit_area, False, False)
        #self.box.pack_start(self.button_hbox, False, False)
        #self.box.pack_start(tools.make_align(yalign=0.0, yscale=1.0))
        self.set_size(self.AREA_WIDTH, self.AREA_HEIGHT)
        self.set_size_request(self.AREA_WIDTH, self.AREA_HEIGHT)
        self.connect("expose-event", self.draw_frame_border)
        self.put(self.box, 0, 0)
        #self.put(self.button_hbox, 0, self.AREA_HEIGHT-self.button_hbox_height)

        self.edit_area.set_can_focus(True)
        self.edit_area.set_visible_window(False)
        self.edit_area.add_events(gtk.gdk.ALL_EVENTS_MASK)        
        self.edit_area.connect("button-press-event", self.__on_button_press_cb)
        self.edit_area.connect("button-release-event", self.__on_button_release_cb)
        self.edit_area.connect("motion-notify-event", self.__on_motion_notify_cb)
        #self.edit_area.connect("leave-notify-event", self.__on_leave_notify_cb)
        self.edit_area.connect("expose-event", self.__expose_edit)

        self.camera_area_down.add_events(gtk.gdk.POINTER_MOTION_MASK)
        #self.camera_area.connect("motion-notify-event", self.__on_camera_motion_notify_cb)
        self.camera_area_down.connect("motion-notify-event", self.__on_camera_motion_notify_cb)
        self.camera_area_up.connect("expose-event", self.__on_camera_expose_cb)
        self.camera_area_down.connect("expose-event", self.__on_camera_expose_cb)
        self.camera_area_vbox.pack_start(self.camera_area_up)
        self.camera_area_vbox.pack_start(self.camera_area, False, False)
        self.camera_area_vbox.pack_start(self.camera_area_down)

        #panel_size = self.button_camera.get_size_request()
        #self.panel = Panel(panel_size[0], panel_size[1], gtk.WINDOW_POPUP)
        self.panel = Panel(self.AREA_WIDTH, self.button_hbox_height, gtk.WINDOW_POPUP)
        self.panel_layout = gtk.Fixed()
        #self.panel_layout.put(self.button_camera_align, (self.AREA_WIDTH-panel_size[0])/2, 0)
        self.panel_layout.put(self.button_hbox, 0, 0)
        self.panel.add(self.panel_layout)
        self.panel.hide_panel()
        self.panel.connect("expose-event", self.__draw_panel_background)
        self.panel.connect("size-allocate", lambda w,e: w.queue_draw())

        #self.panel.connect("enter-notify-event", self.__on_camera_enter_notify_cb)
        self.panel.connect("leave-notify-event", self.__on_camera_leave_notify_cb)
        self.camera_focus_flag = True

        self.__refresh_time_id = None
        self.__button_time_id = None
        self.current_mode = self.MODE_EDIT
        self.origin_pixbuf = None
        self.origin_pixbuf_width = 0
        self.origin_pixbuf_height = 0
        self.cache_pixbuf = CachePixbuf()
        self.border_color = "#000000"

        # cursor
        self.cursor = {
            self.POS_IN_DRAG : gtk.gdk.Cursor(gtk.gdk.BOTTOM_RIGHT_CORNER),
            self.POS_IN_MOVE : gtk.gdk.Cursor(gtk.gdk.FLEUR),
            self.POS_OUT : None}
        self.cursor_current = None

        self.press_point_coord = (0, 0)
        self.position = self.POS_OUT
        self.drag_flag = False
        self.move_flag = False
        #
        self.__show_button_flag = True
        self.__button_moving_flag = False
        #self.__refresh_flag = False

        # the pixbuf shown area
        self.pixbuf_offset_x = 0
        self.pixbuf_offset_y = 0
        self.pixbuf_offset_cmp_x = 0
        self.pixbuf_offset_cmp_y = 0
        self.pixbuf_x = 0
        self.pixbuf_y = 0
        self.pixbuf_w = self.AREA_WIDTH
        self.pixbuf_h = self.AREA_HEIGHT
        # the select box area
        self.edit_coord_x = 0
        self.edit_coord_y = 0
        self.edit_coord_w = self.AREA_WIDTH
        self.edit_coord_h = self.AREA_HEIGHT
        self.edit_coord_backup_x = 0
        self.edit_coord_backup_y = 0
        self.edit_coord_backup_w = self.AREA_WIDTH
        self.edit_coord_backup_h = self.AREA_HEIGHT

        self.drag_point_x = 0
        self.drag_point_y = 0
        self.__update_drag_point_coord()
Beispiel #38
0
class ArucoFootball:

    # constants
    INVERSE_MATRIX = np.array([[1.0, 1.0, 1.0, 1.0],
                               [-1.0, -1.0, -1.0, -1.0],
                               [-1.0, -1.0, -1.0, -1.0],
                               [1.0, 1.0, 1.0, 1.0]])

    def __init__(self, btAddr):
        # init needed values
        self.btAddr = btAddr
        self.player = None
        self.texture_background = None
        self.set_ids = []
        self.set_players = []
        self.players = []
        self.calc_values()
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
        # connect wiimote and create model
        wiiModel = WiiModel.WiiModel(self.btAddr)
        # init openGl, Qt and Wiimote
        self.initOpenGL()
        self.initGUI()
        # run wiimote-connection-loop
        thread = threading.Thread(target=wiiModel.wiimoteLoop, args=(self.mainWindow, self.cursor))
        thread.start()
        # run opengl and camera in a thread
        thread = threading.Thread(target=glutMainLoop, args=())
        thread.start()
        # run Qt
        self.app.exec_()

    def initGUI(self):
        self.app = QApplication(sys.argv)
        self.mainWindow = MainWindow(self.players)
        self.mainWindow.show()
        self.set_player_widget = self.mainWindow.listWidgetB
        self.unset_player_widget = self.mainWindow.listWidgetA

        self.unset_player_widget.itemChanged.connect(self.removeID)
        self.mainWindow.setFocus()
        self.mainWindow.setWindowTitle("Tactic-Window")
        self.mainWindow.resize(600, 800)
        self.cursor = QCursor()

    def initOpenGL(self):
        # setup OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(1280, 960)
        glutInitWindowPosition(800, 400)
        self.window_id = glutCreateWindow("Footballfield")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)

        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # assign shapes
        player_model = "models/football-player-new.obj"
        self.player1 = OBJ(player_model, 1)
        self.player2 = OBJ(player_model, 2)
        self.player3 = OBJ(player_model, 3)
        # self.player4 = OBJ(player_model, 4)

        # add Players to list
        self.players.append(Player("Dani", "1", "player_images/dani-img.jpg", self.player1))
        self.players.append(Player("Maxi", "2", "player_images/maxi-img.jpg", self.player2))
        self.players.append(Player("Jonas", "3", "player_images/jonas-img.jpg", self.player3))
        # self.players.append(Player("Michi", "4", "player_images/michi-img.jpg", self.player4))

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    # if player removed from list -> remove set id
    def removeID(self):
        for index in range(self.unset_player_widget.count()):
            item = self.unset_player_widget.item(index)
            if item:
                data = item.data(Qt.UserRole)
                if data:
                    for player in self.players:
                        if player.number == data[0] and player.marker_num is not None:
                            self.set_ids.remove(player.marker_num)
                            player.marker_num = None

    # add players to set_players for all players on "Field"
    def setChangedListItems(self):
        items = []
        for index in range(self.set_player_widget.count()):
            item = self.set_player_widget.item(index)
            if item:
                data = item.data(Qt.UserRole)
                for player in self.players:
                    if player.number == data[0]:
                        items.append(player)

        self.set_players = items

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # get image from webcam
        image = self.webcam.get_current_frame()

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)

        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)

        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0, 0.0, -10.0)
        self._draw_background()
        glPopMatrix()

        # handle glyphs
        image = self._handle_aruco(image)

        glutSwapBuffers()

    def calc_values(self):
        path = 'calib_images/*.jpg'
        self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = Tracker.calculate_camera_values(path)

    def _handle_aruco(self, image):
        img = image
        corners, ids, _ = Tracker.preprocess(img)
        if np.all(ids is not None):
            # check for OpenCV output in different versions
            params = aruco.estimatePoseSingleMarkers(corners, 1, self.mtx, self.dist)
            if len(params) == 2:
                rvec, tvec = params
            else:
                rvec, tvec, _ = params
        else:
            return

        # set all players to list from "Field"
        self.setChangedListItems()
        for i in range(len(ids)):

            rvecs, tvecs, glyph_name = rvec[i], tvec[i], ids[i][0]
            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]
            view_matrix = np.array([[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvecs[0][0]],
                                    [rmtx[1][0], rmtx[1][1], rmtx[1][2], tvecs[0][1]],
                                    [rmtx[2][0], rmtx[2][1], rmtx[2][2], tvecs[0][2]],
                                    [0.0, 0.0, 0.0, 1.0]])

            view_matrix = view_matrix * self.INVERSE_MATRIX
            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw shape
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            # check if ID is set or not and set it
            if ids[i] not in self.set_ids:
                for player in self.set_players:
                    if player.marker_num is None:
                        player.marker_num = ids[i]
                        self.set_ids.append(ids[i])
                        break

            # if ID is set project model
            for player in self.set_players:
                if player.marker_num == ids[i]:
                    glCallList(player.model.gl_list)

            glPopMatrix()

    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-4.0,  3.0, 0.0)
        glEnd()
class EspejoVirtual:
    def __init__(self):
        self.cam = Webcam()
        self.textura_cam = 0
        self.ancho = 640
        self.alto = 480
        self.obj = None
        self.x = -5
        self.y = 0.1

    def iniciar_opengl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        glMatrixMode(GL_MODELVIEW)
        self.textura_cam = glGenTextures(1)
        self.obj = OBJ('data/gafas4.obj', True)

    def iniciar_opencv(self):
        self.cam.iniciar()

    def pintar_escena(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()
        glViewport(0, 0, self.ancho, self.alto)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        glOrtho(0.0, self.ancho, 0.0, self.alto, 0.1, 100.0)
        glEnable(GL_DEPTH_TEST)
        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()

        self.pintar_video(self.cam.obtener_imagen())
        if True or self.cam.ojos_detectados:
            self.pintar_objeto()

        glutSwapBuffers()

    def pintar_video(self, imagen):
        pillow_img = Image.fromarray(imagen)  # giro la imagen
        ancho = pillow_img.size[0]
        alto = pillow_img.size[1]

        bytes_img = pillow_img.tobytes('raw', 'BGRX', 0, -1)

        glEnable(GL_TEXTURE_2D)
        glColor3fv((1, 1, 1))

        glBindTexture(GL_TEXTURE_2D, self.textura_cam)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ancho, alto, 0, GL_RGBA,
                     GL_UNSIGNED_BYTE, bytes_img)

        glBindTexture(GL_TEXTURE_2D, self.textura_cam)

        glTranslatef(0.0, 0.0, -99)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(0, self.alto, 0.0)
        #glVertex3f(-4.0, -3.0, 0.0)

        glTexCoord2f(1.0, 1.0)
        glVertex3f(self.ancho, self.alto, 0.0)
        #glVertex3f( 4.0, -3.0, 0.0)

        glTexCoord2f(1.0, 0.0)
        glVertex3f(self.ancho, 0, 0.0)
        #glVertex3f( 4.0,  3.0, 0.0)

        glTexCoord2f(0.0, 0.0)
        glVertex3f(0, 0, 0.0)
        #glVertex3f(-4.0,  3.0, 0.0)

        glEnd()

    def draw_rect(self, x, y, width, height):
        glDisable(GL_TEXTURE_2D)
        glTranslatef(0, 0, 1)

        glBegin(GL_QUADS)
        glVertex2f(x, y)
        glVertex2f(x + width, y)
        glVertex2f(x + width, y + height)
        glVertex2f(x, y + height)
        glEnd()

    def pintar_objeto(self):
        glDisable(GL_TEXTURE_2D)
        glLoadIdentity()
        glTranslatef(self.cam.coordenadas_gafas[0],
                     415 - self.cam.coordenadas_gafas[1], 0)

        glColor3fv((1, 1, 1))

        ##glRectf(100, 100, 0, 0.5)
        #glRotate(90, 0, 1, 0)
        glRotate(180, 1, 0, 0)
        glRotate(180, 0, 0, 1)

        glRotate(self.cam.angulo_ojos, 0, 0, 1)

        glCallList(self.obj.gl_list)

    def comenzar(self):
        glutInit()

        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(self.ancho, self.alto)
        glutCreateWindow('Espejo virtual')
        glutDisplayFunc(self.pintar_escena)
        glutIdleFunc(self.pintar_escena)

        self.iniciar_opengl()
        self.iniciar_opencv()

        glutMainLoop()
Beispiel #40
0
def main():
    print("Initializing Equipment")

    print("3g... ")
    data = CellDongle()
    print("Status: " + data.detected())
    print("Attempting connection...")
    #print(data.connect())
    print("Testing status...")
   # print(data.status())
    print("Disconnecting...")
    #print(data.disconnect())

    print("GPS... ")
    print("Starting daemon...") ## actually happens in gpsClass.py
    gpsp = GpsPoller("localhost","99", "/dev/ttyUSBgps")
    gpsp.start()
    print("LifeCam...")
   # print '/dev/v4l/by-id/usb-Microsoft_Microsoft_LifeCam_NX-6000-video-index0'.encode('utf-8')
    cam1 = Webcam('/dev/v4l/by-id/usb-Microsoft_Microsoft*_LifeCam_NX-6000-video-index0',"1600x1200")
    #print(cam1.status())
    print("iSight..")
    cam2 = Webcam("/dev/v4l/by-id/usb-Micron_Built-in_iSight-video-index0","352x288")
    print("Barometer... ")
    bar1 = Barometer()

    cam1.take_pic()
    cam2.take_pic()

    print("camera2 status: " + cam2.status())
    #print(gps1.getLat())

    for i in xrange(3):
        try:
            print gpsp.getDMS()
            #print gpsd.utc, gpsd.fix.time
        # print("speed: {0:0.2f} m/s".format(gps1.getSpeed()))
        # print("alt: {0:0.2f} m").format(gps1.getAlt())
        # print("climb: {0:0.2f} m/s".format(gps1.getClimb()))
        # print("time: {0} ".format(gps1.getTime()))
        except NameError:
            print("GPS missing connection")
            gpsp.update_daemon()

        except:
            e = sys.exc_info()[0]
            print("GPS Error:")
            print e
            gpsp.update_daemon()

    try:
        print("temp *C " + bar1.getTempC())
        print("temp *F " + bar1.getTempF())
        print("pressure alt: {0:0.2f} m".format(bar1.getAlt()))
        print("pressure: {0:0.2f} pa".format(bar1.getPressure()))
  #  print("sea level pressure " + bar1.getSeaLevelPressure())
    except:
        e = sys.exc_info()[0]
        print("Barometer Error: ")
        print e

    gpsp.running = False
    gpsp.join()
class VidMag():
    def __init__(self):
        self.webcam = Webcam()
        self.buffer_size = 40
        self.fps = 0
        self.times = []
        self.t0 = time.time()
        self.data_buffer = []
        #self.vidmag_frames = []
        self.frame_out = np.zeros((10, 10, 3), np.uint8)
        self.webcam.start()
        print("init")

    #--------------COLOR MAGNIFICATIONN---------------------#
    def build_gaussian_pyramid(self, src, level=3):
        s = src.copy()
        pyramid = [s]
        for i in range(level):
            s = cv2.pyrDown(s)
            pyramid.append(s)
        return pyramid

    def gaussian_video(self, video_tensor, levels=3):
        for i in range(0, video_tensor.shape[0]):
            frame = video_tensor[i]
            pyr = self.build_gaussian_pyramid(frame, level=levels)
            gaussian_frame = pyr[-1]
            if i == 0:
                vid_data = np.zeros(
                    (video_tensor.shape[0], gaussian_frame.shape[0],
                     gaussian_frame.shape[1], 3))
            vid_data[i] = gaussian_frame
        return vid_data

    def temporal_ideal_filter(self, tensor, low, high, fps, axis=0):
        fft = fftpack.fft(tensor, axis=axis)
        frequencies = fftpack.fftfreq(tensor.shape[0], d=1.0 / fps)
        bound_low = (np.abs(frequencies - low)).argmin()
        bound_high = (np.abs(frequencies - high)).argmin()
        fft[:bound_low] = 0
        fft[bound_high:-bound_high] = 0
        fft[-bound_low:] = 0
        iff = fftpack.ifft(fft, axis=axis)
        return np.abs(iff)

    def amplify_video(self, gaussian_vid, amplification=70):
        return gaussian_vid * amplification

    def reconstract_video(self, amp_video, origin_video, levels=3):
        final_video = np.zeros(origin_video.shape)
        for i in range(0, amp_video.shape[0]):
            img = amp_video[i]
            for x in range(levels):
                img = cv2.pyrUp(img)
            img = img + origin_video[i]
            final_video[i] = img
        return final_video

    def magnify_color(self,
                      data_buffer,
                      fps,
                      low=0.4,
                      high=2,
                      levels=3,
                      amplification=30):
        gau_video = self.gaussian_video(data_buffer, levels=levels)
        filtered_tensor = self.temporal_ideal_filter(gau_video, low, high, fps)
        amplified_video = self.amplify_video(filtered_tensor,
                                             amplification=amplification)
        final_video = self.reconstract_video(amplified_video,
                                             data_buffer,
                                             levels=levels)
        #print("c")
        return final_video

    #-------------------------------------------------------------#

    #-------------------MOTION MAGNIFICATIONN---------------------#
    #build laplacian pyramid for video
    def laplacian_video(self, video_tensor, levels=3):
        tensor_list = []
        for i in range(0, video_tensor.shape[0]):
            frame = video_tensor[i]
            pyr = self.build_laplacian_pyramid(frame, levels=levels)
            if i == 0:
                for k in range(levels):
                    tensor_list.append(
                        np.zeros((video_tensor.shape[0], pyr[k].shape[0],
                                  pyr[k].shape[1], 3)))
            for n in range(levels):
                tensor_list[n][i] = pyr[n]
        return tensor_list

    #Build Laplacian Pyramid
    def build_laplacian_pyramid(self, src, levels=3):
        gaussianPyramid = self.build_gaussian_pyramid(src, levels)
        pyramid = []
        for i in range(levels, 0, -1):
            GE = cv2.pyrUp(gaussianPyramid[i])
            L = cv2.subtract(gaussianPyramid[i - 1], GE)
            pyramid.append(L)
        return pyramid

    #reconstract video from laplacian pyramid
    def reconstract_from_tensorlist(self, filter_tensor_list, levels=3):
        final = np.zeros(filter_tensor_list[-1].shape)
        for i in range(filter_tensor_list[0].shape[0]):
            up = filter_tensor_list[0][i]
            for n in range(levels - 1):
                up = cv2.pyrUp(up) + filter_tensor_list[n + 1][i]
            final[i] = up
        return final

    #butterworth bandpass filter
    def butter_bandpass_filter(self, data, lowcut, highcut, fs, order=5):
        omega = 0.5 * fs
        low = lowcut / omega
        high = highcut / omega
        b, a = signal.butter(order, [low, high], btype='band')
        y = signal.lfilter(b, a, data, axis=0)
        return y

    def magnify_motion(self,
                       video_tensor,
                       fps,
                       low=0.4,
                       high=1.5,
                       levels=3,
                       amplification=30):
        lap_video_list = self.laplacian_video(video_tensor, levels=levels)
        filter_tensor_list = []
        for i in range(levels):
            filter_tensor = self.butter_bandpass_filter(
                lap_video_list[i], low, high, fps)
            filter_tensor *= amplification
            filter_tensor_list.append(filter_tensor)
        recon = self.reconstract_from_tensorlist(filter_tensor_list)
        final = video_tensor + recon
        return final

    #-------------------------------------------------------------#

    def buffer_to_tensor(self, buffer):
        tensor = np.zeros((len(buffer), 192, 256, 3), dtype="float")
        i = 0
        for i in range(len(buffer)):
            tensor[i] = buffer[i]
        return tensor

    def run_color(self):
        self.times.append(time.time() - self.t0)
        L = len(self.data_buffer)
        #print(self.data_buffer)

        if L > self.buffer_size:
            self.data_buffer = self.data_buffer[-self.buffer_size:]
            self.times = self.times[-self.buffer_size:]
            #self.vidmag_frames = self.vidmag_frames[-self.buffer_size:]
            L = self.buffer_size

        if len(self.data_buffer) > self.buffer_size - 1:
            self.fps = float(L) / (self.times[-1] - self.times[0])
            tensor = self.buffer_to_tensor(self.data_buffer)
            final_vid = self.magnify_color(data_buffer=tensor, fps=self.fps)
            #print(final_vid[0].shape)
            #self.vidmag_frames.append(final_vid[-1])
            #print(self.fps)
            self.frame_out = final_vid[-1]

    def run_motion(self):
        self.times.append(time.time() - self.t0)
        L = len(self.data_buffer)
        #print(L)

        if L > self.buffer_size:
            self.data_buffer = self.data_buffer[-self.buffer_size:]
            self.times = self.times[-self.buffer_size:]
            #self.vidmag_frames = self.vidmag_frames[-self.buffer_size:]
            L = self.buffer_size

        if len(self.data_buffer) > self.buffer_size - 1:
            self.fps = float(L) / (self.times[-1] - self.times[0])
            tensor = self.buffer_to_tensor(self.data_buffer)
            final_vid = self.magnify_motion(video_tensor=tensor, fps=self.fps)
            #print(self.fps)
            #self.vidmag_frames.append(final_vid[-1])
            self.frame_out = final_vid[-1]

    def key_handler(self):
        """
        A plotting or camera frame window must have focus for keypresses to be
        detected.
        """
        self.pressed = waitKey(1) & 255  # wait for keypress for 10 ms
        if self.pressed == 27:  # exit program on 'esc'
            print("[INFO] Exiting")
            self.webcam.stop()
            sys.exit()

    def mainLoop(self):
        frame = self.webcam.get_frame()
        f1 = imutils.resize(frame, width=256)
        #crop_frame = frame[100:228,200:328]
        self.data_buffer.append(f1)
        self.run_color()
        #print(frame)

        #if len(self.vidmag_frames) > 0:
        #print(self.vidmag_frames[0])
        cv2.putText(frame, "FPS " + str(float("{:.2f}".format(self.fps))),
                    (20, 420), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)

        #frame[100:228,200:328] = cv2.convertScaleAbs(self.vidmag_frames[-1])
        cv2.imshow("Original", frame)
        #f2 = imutils.resize(cv2.convertScaleAbs(self.vidmag_frames[-1]), width = 640)
        f2 = imutils.resize(cv2.convertScaleAbs(self.frame_out), width=640)

        cv2.imshow("Color amplification", f2)

        self.key_handler()  #if not the GUI cant show anything
Beispiel #42
0
 def __init__(self, **kwargs):
     self.verbose = kwargs.get('verbose', DEFAULT_VERBOSE)
     self.webcam = Webcam(**kwargs)
     self.gpio = GPIO(inputs = INPUT_PINS, outputs = OUTPUT_PINS)
Beispiel #43
0
class MUcamera:
    def __init__(self):
        self.w = Webcam()

        self.start = self.w.start()
        self.im = self.w.grab_image()
        self.w.register_callback(self.average_intensity, 1)
        #        self.grabimage = self.w.grab_image()
        self.avg_intensity = []
        self.images = []
        self.filt_list = []
        self.f, self.ax = plt.subplots(1, 1)
        self.day = []

    def average_intensity(self):

        pix_val = list(self.im.getdata())
        pixel_intensity = []
        for x in pix_val:
            avg = sum(x) / len(x)
            pixel_intensity.append(avg)
        self.avg_pixel = np.average(pixel_intensity)
        self.avg_intensity.append(self.avg_pixel)
        return self.avg_intensity
#        avg  = np.mean(np.mean(image,axis=1))
#        self.avg_list.append(avg)

    def average_intensity_filtered(self):
        width = 3
        #        i=0

        if len(self.avg_intensity) >= 5:
            for x in range(len(self.avg_intensity) - 2):
                self.filt_list.append(
                    (self.avg_intensity[x] + self.avg_intensity[x + 1] +
                     self.avg_intensity[x + 2]) / width)
            return self.filt_list
        else:
            return self.filt_list
#        while i+width <= len(self.filt_list):
#            y = self.filt_list[i:i+width]
#            total_sum=sum(y)/width
#            self.filt_list.append(total_sum)
#            i+=1

#    def stop(self):
#        self.w.stop()
#        self.average_intensity_mean_plot()
#        self.average_intensity_filtered_plot()
#
#    def average_intensity_mean_plot(self):
#        self.ax.plot(self.avg_intensity, 'C1')
#        self.ax.set_xlabel('Image Number')
#        self.ax.set_ylabel('Intensity')
#        self.ax.set_title('Image Intensity')
##
#    def average_intensity_filtered_plot(self):
#        self.average_intensity_filtered()
#        self.ax.plot(self.filt_list, 'C2')
#

    def daytime(self):
        self.average = np.mean(np.mean(self.im, axis=1))
        if self.average >= 95:
            #            self.i.append(self.i)
            return print("True")
        else:
            return print("False")
##

    def most_common_color(self):
        w, h = self.im.size
        pixels = self.im.getcolors(w * h)
        print(len(pixels))
        most_frequent_pixel = pixels[0]
        for count, color in pixels:
            if count > most_frequent_pixel[0]:
                most_frequent_pixel = (count, color)

    #        compare("Most Common", image, most_frequent_pixel[1])
    #    print(self.most_frequent_pixel)
        return print(most_frequent_pixel[0] / len(pixels), most_frequent_pixel)

    def stop(self):
        self.w.stop()
        self.daytime()
        self.most_common_color()
        self.average_intensity_mean_plot()
        self.average_intensity_filtered_plot()

    def average_intensity_mean_plot(self):
        self.average_intensity()
        self.ax.plot(self.avg_intensity, 'C1')
        self.ax.set_xlabel('Image Number')
        self.ax.set_ylabel('Intensity')
        self.ax.set_title('Image Intensity')
#

    def average_intensity_filtered_plot(self):
        self.average_intensity_filtered()
        self.ax.plot(self.filt_list, 'C2')
Beispiel #44
0
class OpenGLGlyphs:

    ##############################################################초기화
    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
        self.find = fp()
        self.find.set_img('sample.jpg')

        self.hei, self.wid = self.webcam.get_frame_shape()[:2]
        # initialise cube
        # self.d_obj = None
        self.img = None
        # initialise texture
        self.texture_background = None
        self.K = None
        self.mark_kp = None
        self.mark_des = None
        self.set_keypoint()
        self.new_kp = None

        self.mat_kp = None
        self.mat_des = None
        self.H = None

        # self.Rt=None

    ##############################################################카메라 세팅
    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0) # 투명도 결정
        glClearDepth(1.0) # 깊이 버퍼의 모든 픽셀에 설정될 초기값 지정
        glDepthFunc(GL_LESS) # 언제나 새로 들어오는 값이 기준 GL_LESS를 설정했다고 하자. 이 경우에는 새로 들어온 값이
                                # 이미 저장되어 있는 값 보다 적을 경우에 depth buffer의 값을 새로 들어온 값으로 갱신하겠다,
        glEnable(GL_DEPTH_TEST) # 요건 깊이 정보에 따라 이미지를 순서대로 나줌.
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()

        self.K = my_calibration((Height, Width))
        fx = self.K[0, 0]
        fy = self.K[1, 1]
        fovy = 2 * arctan(0.5 * Height / fy) * 180 / pi
        aspect = (float)(Width * fy) / (Height * fx)
        # define the near and far clipping planes
        near = 0.1
        far = 100.0
        # set perspective
        gluPerspective(fovy, aspect, near, far)

        glMatrixMode(GL_MODELVIEW)
        # self.d_obj=[OBJ('Rocket.obj')]
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)
        # gluPerspective(33.7, 1.3, 0.1, 100.0)

    ##############################################################marker의 kp, des저장
    def set_keypoint(self):

        self.find.start()
        self.mark_kp, self.mark_des = self.find.get_point()

    ##############################################################K값 구하기
    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # get image from webcam
        image = self.webcam.get_current_frame()

        Rt = self._my_cal(image)
        """
        if Rt!=None:
            box=ones((self.hei,self.wid),uint8)
            H_box=cv2.warpPerspective(box,self.H,(self.wid, self.hei))
            image=image*H_box[:,:,newaxis]
            image=cv2.drawKeypoints(image,self.mat_kp,flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
        """
        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)

        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)

        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, self.wid, self.hei, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)

        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()

        # glTranslatef(0.0,0.0,0.0)
        gluLookAt(0.0, 0.0, 12.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)

        self._draw_background()
        glPopMatrix()
        ################Rt를 구해서 매칭되는 이미지가 있는지 판단

        if Rt is not None:
            self._set_modelview_from_camera(Rt)
            glEnable(GL_LIGHTING)
            glEnable(GL_LIGHT0)
            glEnable(GL_DEPTH_TEST)
            glEnable(GL_NORMALIZE)
            glClear(GL_DEPTH_BUFFER_BIT)
            glMaterialfv(GL_FRONT, GL_AMBIENT, [0.5, 0.5, 0.0, 1.0])
            glMaterialfv(GL_FRONT, GL_DIFFUSE, [0.9, 0.9, 0.0, 1.0])
            glMaterialfv(GL_FRONT, GL_SPECULAR, [1.0, 1.0, 1.0, 1.0])
            glMaterialfv(GL_FRONT, GL_SHININESS, 0.25 * 128.0)
            glutSolidTeapot(0.1)

        glutSwapBuffers()

    ##############################################################OpenGL용 Rt변환
    def _set_modelview_from_camera(self, Rt):

        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()
        Rx = array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])

        # set rotation to best approximation
        R = Rt[:, :3]

        # change sign of x-axis
        R[0, :] = -R[0, :]
        # set translation
        t = Rt[:, 3]
        t[0] = -t[0]

        # setup 4*4 model view matrix
        M = eye(4)
        M[:3, :3] = dot(R, Rx)
        M[:3, 3] = t
        M[3, :3] = t

        # transpose and flatten to get column order
        M = M.T

        m = M.flatten()
        # replace model view with the new matrix
        glLoadMatrixf(m)

    ##############################################################Rt반환
    def _my_cal(self, image):
        find_H = fp()
        find_H.set_cv_img(image)
        find_H.start()
        kp, des = find_H.get_point()

        self.H = self.match_images(self.mark_kp, self.mark_des, kp, des)
        if self.H is not None:
            cam1 = camera.Camera(hstack((self.K, dot(self.K, array([[0], [0], [-1]])))))
            # Rt1=dot(linalg.inv(self.K),cam1.P)
            cam2 = camera.Camera(dot(self.H, cam1.P))

            A = dot(linalg.inv(self.K), cam2.P[:, :3])
            A = array([A[:, 0], A[:, 1], cross(A[:, 0], A[:, 1])]).T
            cam2.P[:, :3] = dot(self.K, A)
            Rt = dot(linalg.inv(self.K), cam2.P)

            return Rt
        else:
            return None

    ##############################################################match image
    def match_images(self, kp1, des1, kp2, des2):
        matcher = cv2.BFMatcher()
        match_des = matcher.knnMatch(des1, des2, k=2)
        matches = []
        matA, matB = [], []
        matC = []

        for m in match_des:
            if m[0].distance < 0.8 * m[1].distance:
                matA.append(kp1[m[0].queryIdx])
                matB.append(kp2[m[0].trainIdx])
                matC.append(des1[m[0].queryIdx])

        if len(matA) > 50:
            ptsA = float32([m.pt for m in matA])
            ptsB = float32([n.pt for n in matB])
            H1 = []
            H1, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 5.0)
            H1 = self.homo_check(H1)
            self.mat_kp = array([matB[i] for i in range(status.shape[0]) if status[i] == 1])
            self.mat_des = array([matC[i] for i in range(status.shape[0]) if status[i] == 1])

            return H1
        else:
            return None

    ##############################################################homography check
    def homo_check(self, H1):
        if self.H is None:
            return H1
        else:
            if cv2.norm(H1, self.H) > 1.0:
                return H1
            else:
                return self.H

    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0);
        glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0);
        glVertex3f(4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0);
        glVertex3f(4.0, 3.0, 0.0)
        glTexCoord2f(0.0, 0.0);
        glVertex3f(-4.0, 3.0, 0.0)
        glEnd()
        glDeleteTextures(1)

    def keyboard(self, *args):
        if args[0] is GLUT_KEY_UP:
            glutDestroyWindow(self.window_id)
            self.webcam.finish()
            sys.exit()

    ##############################################################OpenGL창 초기

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(self.wid, self.hei)
        glutInitWindowPosition(400, 400)
        self.window_id = glutCreateWindow(b"OpenGL Glyphs")
        self._init_gl(self.wid, self.hei)
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        glutSpecialFunc(self.keyboard)
        glutMainLoop()
class BacklightManagerForGNOME():

    def __init__(self):
        ctrls = acpilight.get_controllers()
        self.ctrl = acpilight.Controller(next(iter(ctrls.values())))
        self.backlight = 0
        self.update_backlight()

    def get_backlight(self):
        self.update_backlight()
        return self.backlight

    def update_backlight(self):
        self.backlight = self.ctrl.brightness()

    def set_backlight(self, value):
        self.ctrl.set_brightness(value)
        self.update_backlight()


if __name__ == '__main__':
    bm = BacklightManagerForGNOME()
    print(bm.get_backlight())
    bm.set_backlight(50)
    print(bm.get_backlight())
    from webcam import Webcam
    wc = Webcam()
    b = wc.get_backlight()
    print(b)
    bm.set_backlight(b)
class IconEditArea(gtk.Layout):
    __gsignals__ = {
        "pixbuf-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,))}

    AREA_WIDTH = 300
    AREA_HEIGHT = 300
    DRAG_WIDTH = 10
    MIN_SIZE = 150

    POS_OUT = 0
    POS_IN_MOVE = 1
    POS_IN_DRAG = 2

    MODE_CAMERA = 0
    MODE_CAMERA_EDIT = 1
    MODE_EDIT = 2

    def __init__(self):
        super(IconEditArea, self).__init__()

        self.edit_area = gtk.EventBox()
        self.camera_area_vbox = gtk.VBox(False)
        self.camera_area = Webcam()
        self.camera_area_up = gtk.EventBox()
        self.camera_area_down = gtk.EventBox()
        self.camera_area_init_flag = False
        self.button_hbox = gtk.HBox(False)
        self.button_hbox_height = 40
        self.__widget_y = 92

        self.edit_area.set_size_request(self.AREA_WIDTH, self.AREA_HEIGHT)
        #self.camera_area.set_size_request(self.AREA_WIDTH, self.AREA_HEIGHT)
        self.camera_area_vbox.set_size_request(self.AREA_WIDTH, self.AREA_HEIGHT)
        self.camera_area.set_size_request(self.AREA_WIDTH, 225)
        #self.camera_area_up.set_size_request(self.AREA_WIDTH, 37)
        #self.camera_area_down.set_size_request(self.AREA_WIDTH, 37)
        self.button_hbox.set_size_request(self.AREA_WIDTH, self.button_hbox_height)

        self.button_zoom_in = ImageButton(
            app_theme.get_pixbuf("account/zoom_in.png"),
            app_theme.get_pixbuf("account/zoom_in.png"),
            app_theme.get_pixbuf("account/zoom_in.png"),
            _("zoom in"))
        self.button_zoom_out = ImageButton(
            app_theme.get_pixbuf("account/zoom_out.png"),
            app_theme.get_pixbuf("account/zoom_out.png"),
            app_theme.get_pixbuf("account/zoom_out.png"),
            _("zoom out"))
        self.button_camera = ImageButton(
            app_theme.get_pixbuf("account/camera.png"),
            app_theme.get_pixbuf("account/camera.png"),
            app_theme.get_pixbuf("account/camera.png"),
            _("Take a photo"))
        self.button_camera_again = ImageButton(
            app_theme.get_pixbuf("account/camera_again.png"),
            app_theme.get_pixbuf("account/camera_again.png"),
            app_theme.get_pixbuf("account/camera_again.png"),
            _("Try again"))

        self.button_zoom_in_align = tools.make_align(self.button_zoom_in, xalign=0.5, yalign=0.5)
        self.button_zoom_out_align = tools.make_align(self.button_zoom_out, xalign=0.5, yalign=0.5)
        self.button_camera_align = tools.make_align(self.button_camera, xalign=0.5, yalign=0.5)
        self.button_camera_again_align = tools.make_align(self.button_camera_again, xalign=0.5, yalign=0.5)

        self.button_zoom_in.connect("clicked", self.on_zoom_in_clicked_cb)
        self.button_zoom_out.connect("clicked", self.on_zoom_out_clicked_cb)
        self.button_camera.connect("clicked", self.on_camera_clicked_cb)
        self.button_camera_again.connect("clicked", self.on_camera_again_clicked_cb)

        self.box = gtk.VBox(False)
        self.box.pack_start(self.edit_area, False, False)
        #self.box.pack_start(self.button_hbox, False, False)
        #self.box.pack_start(tools.make_align(yalign=0.0, yscale=1.0))
        self.set_size(self.AREA_WIDTH, self.AREA_HEIGHT)
        self.set_size_request(self.AREA_WIDTH, self.AREA_HEIGHT)
        self.connect("expose-event", self.draw_frame_border)
        self.put(self.box, 0, 0)
        #self.put(self.button_hbox, 0, self.AREA_HEIGHT-self.button_hbox_height)

        self.edit_area.set_can_focus(True)
        self.edit_area.set_visible_window(False)
        self.edit_area.add_events(gtk.gdk.ALL_EVENTS_MASK)        
        self.edit_area.connect("button-press-event", self.__on_button_press_cb)
        self.edit_area.connect("button-release-event", self.__on_button_release_cb)
        self.edit_area.connect("motion-notify-event", self.__on_motion_notify_cb)
        #self.edit_area.connect("leave-notify-event", self.__on_leave_notify_cb)
        self.edit_area.connect("expose-event", self.__expose_edit)

        self.camera_area_down.add_events(gtk.gdk.POINTER_MOTION_MASK)
        #self.camera_area.connect("motion-notify-event", self.__on_camera_motion_notify_cb)
        self.camera_area_down.connect("motion-notify-event", self.__on_camera_motion_notify_cb)
        self.camera_area_up.connect("expose-event", self.__on_camera_expose_cb)
        self.camera_area_down.connect("expose-event", self.__on_camera_expose_cb)
        self.camera_area_vbox.pack_start(self.camera_area_up)
        self.camera_area_vbox.pack_start(self.camera_area, False, False)
        self.camera_area_vbox.pack_start(self.camera_area_down)

        #panel_size = self.button_camera.get_size_request()
        #self.panel = Panel(panel_size[0], panel_size[1], gtk.WINDOW_POPUP)
        self.panel = Panel(self.AREA_WIDTH, self.button_hbox_height, gtk.WINDOW_POPUP)
        self.panel_layout = gtk.Fixed()
        #self.panel_layout.put(self.button_camera_align, (self.AREA_WIDTH-panel_size[0])/2, 0)
        self.panel_layout.put(self.button_hbox, 0, 0)
        self.panel.add(self.panel_layout)
        self.panel.hide_panel()
        self.panel.connect("expose-event", self.__draw_panel_background)
        self.panel.connect("size-allocate", lambda w,e: w.queue_draw())

        #self.panel.connect("enter-notify-event", self.__on_camera_enter_notify_cb)
        self.panel.connect("leave-notify-event", self.__on_camera_leave_notify_cb)
        self.camera_focus_flag = True

        self.__refresh_time_id = None
        self.__button_time_id = None
        self.current_mode = self.MODE_EDIT
        self.origin_pixbuf = None
        self.origin_pixbuf_width = 0
        self.origin_pixbuf_height = 0
        self.cache_pixbuf = CachePixbuf()
        self.border_color = "#000000"

        # cursor
        self.cursor = {
            self.POS_IN_DRAG : gtk.gdk.Cursor(gtk.gdk.BOTTOM_RIGHT_CORNER),
            self.POS_IN_MOVE : gtk.gdk.Cursor(gtk.gdk.FLEUR),
            self.POS_OUT : None}
        self.cursor_current = None

        self.press_point_coord = (0, 0)
        self.position = self.POS_OUT
        self.drag_flag = False
        self.move_flag = False
        #
        self.__show_button_flag = True
        self.__button_moving_flag = False
        #self.__refresh_flag = False

        # the pixbuf shown area
        self.pixbuf_offset_x = 0
        self.pixbuf_offset_y = 0
        self.pixbuf_offset_cmp_x = 0
        self.pixbuf_offset_cmp_y = 0
        self.pixbuf_x = 0
        self.pixbuf_y = 0
        self.pixbuf_w = self.AREA_WIDTH
        self.pixbuf_h = self.AREA_HEIGHT
        # the select box area
        self.edit_coord_x = 0
        self.edit_coord_y = 0
        self.edit_coord_w = self.AREA_WIDTH
        self.edit_coord_h = self.AREA_HEIGHT
        self.edit_coord_backup_x = 0
        self.edit_coord_backup_y = 0
        self.edit_coord_backup_w = self.AREA_WIDTH
        self.edit_coord_backup_h = self.AREA_HEIGHT

        self.drag_point_x = 0
        self.drag_point_y = 0
        self.__update_drag_point_coord()

    def draw_frame_border(self, widget, event):
        cr = widget.window.cairo_create()
        with cairo_disable_antialias(cr):
            cr.set_line_width(1)
            x, y, w, h = widget.allocation
            cr.set_source_rgb(*color_hex_to_cairo(TREEVIEW_BORDER_COLOR))
            cr.rectangle(x-1, y-1, w+2, h+2)
            cr.stroke()

    def on_camera_again_clicked_cb(self, button):
        self.set_camera_mode()

    def on_camera_clicked_cb(self, button):
        self.current_mode = self.MODE_CAMERA_EDIT
        drawable = self.camera_area.window
        colormap = drawable.get_colormap()
        pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8, *drawable.get_size())
        pixbuf = pixbuf.get_from_drawable(drawable, colormap, 0, 0, 0, 0, *drawable.get_size()) 
        self.__edit_picture(pixbuf)
        container_remove_all(self.button_hbox)
        self.button_hbox.pack_start(self.button_zoom_in_align)
        self.button_hbox.pack_start(self.button_zoom_out_align)
        self.button_hbox.pack_start(self.button_camera_again_align)
        self.button_hbox.show_all()

    def on_zoom_in_clicked_cb(self, button):
        if self.pixbuf_w >= self.origin_pixbuf_width or self.pixbuf_h >= self.origin_pixbuf_height:
            print "has max size"
            button.set_sensitive(False)
            return
        width = int(self.pixbuf_w * 1.1)
        height = int(self.pixbuf_h * 1.1)
        if width >= self.origin_pixbuf_width:
            width = self.origin_pixbuf_width
        if height >= self.origin_pixbuf_height:
            height = self.origin_pixbuf_height
        self.cache_pixbuf.scale(self.origin_pixbuf, width, height)
        # count show area
        self.pixbuf_w = width
        self.pixbuf_h = height
        self.pixbuf_x = (self.AREA_WIDTH - width) / 2
        self.pixbuf_y = (self.AREA_HEIGHT - height) / 2
        if self.pixbuf_x < 0:
            self.pixbuf_offset_x -= self.pixbuf_x
            if self.pixbuf_offset_x + self.AREA_WIDTH > self.pixbuf_w:
                self.pixbuf_offset_x = self.pixbuf_w - self.AREA_WIDTH
            self.pixbuf_x = 0
        if self.pixbuf_y < 0:
            self.pixbuf_offset_y -= self.pixbuf_y
            if self.pixbuf_offset_y + self.AREA_HEIGHT > self.pixbuf_h:
                self.pixbuf_offset_y = self.pixbuf_h - self.AREA_HEIGHT
            self.pixbuf_y = 0
        self.__update_drag_point_coord()
        self.emit_changed()
        if self.pixbuf_w >= self.origin_pixbuf_width or self.pixbuf_h >= self.origin_pixbuf_height:
            button.set_sensitive(False)
        if not (self.pixbuf_w <= self.edit_coord_w or self.pixbuf_h <= self.edit_coord_h) \
                and not self.button_zoom_out.get_sensitive():
            self.button_zoom_out.set_sensitive(True)

    def on_zoom_out_clicked_cb(self, button):
        if self.edit_coord_w < self.MIN_SIZE or self.edit_coord_h < self.MIN_SIZE:
            self.edit_coord_w = self.edit_coord_h = self.MIN_SIZE
        if self.pixbuf_w <= self.edit_coord_w or self.pixbuf_h <= self.edit_coord_h:
            print "has min size"
            button.set_sensitive(False)
            return
        width = int(self.pixbuf_w * 0.9)
        height = int(self.pixbuf_h * 0.9)
        if height >= width and width <= self.edit_coord_w:
            height = int(float(height) / width * self.edit_coord_w)
            width = int(self.edit_coord_w)
        elif height < self.edit_coord_h:
            width = int(float(width) / height * self.edit_coord_h)
            height = int(self.edit_coord_h)
        self.cache_pixbuf.scale(self.origin_pixbuf, width, height)
        # count show area
        self.pixbuf_w = width
        self.pixbuf_h = height
        self.pixbuf_x = (self.AREA_WIDTH - width) / 2
        self.pixbuf_y = (self.AREA_HEIGHT - height) / 2
        # count pixbuf offset
        if self.pixbuf_x < 0:
            self.pixbuf_offset_x -= self.pixbuf_x
            if self.pixbuf_offset_x + self.AREA_WIDTH > self.pixbuf_w:
                self.pixbuf_offset_x = self.pixbuf_w - self.AREA_WIDTH
            self.pixbuf_x = 0
        if self.pixbuf_y < 0:
            self.pixbuf_offset_y -= self.pixbuf_y
            if self.pixbuf_offset_y + self.AREA_HEIGHT > self.pixbuf_h:
                self.pixbuf_offset_y = self.pixbuf_h - self.AREA_HEIGHT
            self.pixbuf_y = 0
        if self.pixbuf_x + self.pixbuf_w < self.AREA_WIDTH:
            self.pixbuf_offset_x = 0
        if self.pixbuf_y + self.pixbuf_h < self.AREA_HEIGHT:
            self.pixbuf_offset_y = 0
        # count edit area
        if self.edit_coord_x < self.pixbuf_x:
            self.edit_coord_x = self.pixbuf_x
        if self.edit_coord_y < self.pixbuf_y:
            self.edit_coord_y = self.pixbuf_y
        right_pos = min(self.pixbuf_x+self.pixbuf_w, self.AREA_WIDTH)
        bottom_pos = min(self.pixbuf_y+self.pixbuf_h, self.AREA_HEIGHT)
        if self.edit_coord_x + self.edit_coord_w > right_pos:
            self.edit_coord_x = right_pos - self.edit_coord_w
        if self.edit_coord_y + self.edit_coord_h > bottom_pos:
            self.edit_coord_y = bottom_pos - self.edit_coord_h
        self.__update_drag_point_coord()
        self.emit_changed()
        if self.pixbuf_w <= self.edit_coord_w or self.pixbuf_h <= self.edit_coord_h:
            button.set_sensitive(False)
        if not (self.pixbuf_w >= self.origin_pixbuf_width or self.pixbuf_h >= self.origin_pixbuf_height) \
                and not self.button_zoom_in.get_sensitive():
            self.button_zoom_in.set_sensitive(True)

    def __expose_edit(self, widget, event):
        pixbuf = self.cache_pixbuf.get_cache()
        if not pixbuf:
            return
        cr = widget.window.cairo_create()
        cr.set_source_pixbuf(pixbuf, self.pixbuf_x-self.pixbuf_offset_x, self.pixbuf_y-self.pixbuf_offset_y)
        cr.paint()
        self.__draw_mask(cr, widget.allocation)
        self.__draw_frame(cr, widget.allocation)
        return True

    def __draw_frame(self, cr, allocation):
        with cairo_disable_antialias(cr):
            cr.set_line_width(1)
            cr.save()
            cr.set_dash((9, 3))
            cr.set_source_rgb(*color_hex_to_cairo(self.border_color))
            x = self.edit_coord_x
            y = self.edit_coord_y
            w = self.edit_coord_w
            h = self.edit_coord_h
            if x == 0:
                x = 1
            if y == 0:
                y = 1
            if x + w > self.AREA_WIDTH:
                w = self.AREA_WIDTH- x
            if y + h > self.AREA_HEIGHT:
                h = self.AREA_HEIGHT- y
            cr.rectangle(x, y, w, h)
            cr.stroke()

            cr.set_dash((3, 9))
            cr.set_source_rgb(1, 1, 1)
            cr.rectangle(x, y, w, h)
            cr.stroke()
            cr.restore()

            cr.set_source_rgb(*color_hex_to_cairo(self.border_color))
            cr.rectangle(self.drag_point_x, self.drag_point_y, self.DRAG_WIDTH, self.DRAG_WIDTH)
            cr.stroke()

    def __draw_mask(self, cr, allocation):
        x, y, w, h = allocation
        x = 0
        y = 0
        # Draw left.
        if self.edit_coord_x > 0:
            cr.set_source_rgba(0, 0, 0, 0.5)
            cr.rectangle(x, y, self.edit_coord_x, h)
            cr.fill()

        # Draw top.
        if self.edit_coord_y > 0:
            cr.set_source_rgba(0, 0, 0, 0.5)
            cr.rectangle(x+self.edit_coord_x, y, w-self.edit_coord_x, self.edit_coord_y)
            cr.fill()

        # Draw bottom.
        if self.edit_coord_y + self.edit_coord_h < h:
            cr.set_source_rgba(0, 0, 0, 0.5)
            cr.rectangle(x+self.edit_coord_x, y+self.edit_coord_y+self.edit_coord_h,
                         w-self.edit_coord_x, h-self.edit_coord_y-self.edit_coord_h)
            cr.fill()

        # Draw right.
        if self.edit_coord_x + self.edit_coord_w < w:
            cr.set_source_rgba(0, 0, 0, 0.5)
            cr.rectangle(x+self.edit_coord_x+self.edit_coord_w, y+self.edit_coord_y,
                         w-self.edit_coord_x-self.edit_coord_w, self.edit_coord_h)
            cr.fill()

    def set_pixbuf(self, pixbuf):
        if not pixbuf:
            self.cache_pixbuf.cache_pixbuf = None
            self.edit_area.queue_draw()
            self.emit_changed()
            self.button_zoom_in.set_sensitive(False)
            self.button_zoom_out.set_sensitive(False)
            return
        self.origin_pixbuf = pixbuf
        self.origin_pixbuf_width = w = pixbuf.get_width()
        self.origin_pixbuf_height = h = pixbuf.get_height()
        if h >= w:
            w = int(float(w) / h * self.AREA_HEIGHT)
            h = self.AREA_HEIGHT
            if w < self.MIN_SIZE:
                h = int(float(h) / w * self.MIN_SIZE)
                w = self.MIN_SIZE
            self.edit_coord_w = self.edit_coord_h = w
        else:
            h = int(float(h) / w * self.AREA_WIDTH)
            w = self.AREA_WIDTH
            if h < self.MIN_SIZE:
                w = int(float(w) / h * self.MIN_SIZE)
                h = self.MIN_SIZE
            self.edit_coord_w = self.edit_coord_h = h
        # count the offset coord
        self.pixbuf_offset_x = 0
        self.pixbuf_offset_y = 0
        self.pixbuf_x = 0
        self.pixbuf_y = 0
        self.pixbuf_w = w
        self.pixbuf_h = h
        if w < self.AREA_WIDTH:
            self.pixbuf_x = (self.AREA_WIDTH - w) / 2
        if h < self.AREA_HEIGHT :
            self.pixbuf_y = (self.AREA_HEIGHT - h) / 2
        # update select box coord
        self.edit_coord_x = self.pixbuf_x
        self.edit_coord_y = self.pixbuf_y
        self.cache_pixbuf.scale(pixbuf, w, h)
        ######
        self.edit_coord_w = self.edit_coord_h = self.MIN_SIZE
        self.edit_coord_x = self.edit_coord_y = (self.AREA_WIDTH - self.MIN_SIZE) / 2
        ######
        self.drag_point_x = self.edit_coord_x + self.edit_coord_w - self.DRAG_WIDTH
        self.drag_point_y = self.edit_coord_y + self.edit_coord_h - self.DRAG_WIDTH
        self.edit_area.queue_draw()
        self.emit_changed()
        self.__update_button_sensitive()

    def emit_changed(self):
        pix = self.cache_pixbuf.get_cache()
        if pix:
            pix = pix.subpixbuf(int(self.edit_coord_x-self.pixbuf_x+self.pixbuf_offset_x),
                                int(self.edit_coord_y-self.pixbuf_y+self.pixbuf_offset_y),
                                int(self.edit_coord_w),
                                int(self.edit_coord_h))
        self.emit("pixbuf-changed", pix)

    def get_pixbuf(self):
        return self.cache_pixbuf.get_cache()

    def set_cursor(self):
        if not self.edit_area.window:
            return
        if self.cursor_current != self.cursor[self.position]:
            self.cursor_current = self.cursor[self.position]
            self.edit_area.window.set_cursor(self.cursor_current)

    def __on_button_press_cb(self, widget, event):
        self.__update_position(event.x, event.y)
        if self.position == self.POS_IN_DRAG:
            self.drag_flag = True
        elif self.position == self.POS_IN_MOVE:
            self.move_flag = True
        self.press_point_coord = (event.x, event.y)
        self.edit_coord_backup_x = self.edit_coord_x
        self.edit_coord_backup_y = self.edit_coord_y
        self.edit_coord_backup_w = self.edit_coord_w
        self.edit_coord_backup_h = self.edit_coord_h

    def __on_button_release_cb(self, widget, event):
        self.drag_flag = False
        self.move_flag = False

    def __on_motion_notify_cb(self, widget, event):
        # if application window has not grab focus, return function
        if not self.camera_focus_flag:
            return
        x, y, w, h = widget.allocation
        if not self.drag_flag and not self.move_flag and \
                not self.panel.get_visible() and \
                y+self.AREA_HEIGHT-self.button_hbox_height<event.y<y+self.AREA_HEIGHT:
            self.slide_button_show(event)
            return
        pixbuf = self.cache_pixbuf.get_cache()
        if not pixbuf:
            return
        if not self.drag_flag and not self.move_flag:
            self.__update_position(event.x, event.y)
            self.set_cursor()
        right_pos = min(self.pixbuf_x+self.pixbuf_w, self.AREA_WIDTH)
        bottom_pos = min(self.pixbuf_y+self.pixbuf_h, self.AREA_HEIGHT)
        if self.move_flag:
            self.edit_coord_x = self.edit_coord_backup_x + event.x - self.press_point_coord[0]
            self.edit_coord_y = self.edit_coord_backup_y + event.y - self.press_point_coord[1]

            # check left
            if self.edit_coord_x < self.pixbuf_x:
                # move the pixbuf into canvas
                if self.pixbuf_w > self.AREA_WIDTH:
                    if self.pixbuf_offset_x > 0:
                        self.pixbuf_offset_x -= self.pixbuf_x - self.edit_coord_x
                    if self.pixbuf_offset_x < 0:
                        self.pixbuf_offset_x = 0
                self.edit_coord_x = self.pixbuf_x
            # check top
            if self.edit_coord_y < self.pixbuf_y:
                # move the pixbuf into canvas
                if self.pixbuf_h > self.AREA_HEIGHT:
                    if self.pixbuf_offset_y > 0:
                        self.pixbuf_offset_y -= self.pixbuf_y - self.edit_coord_y
                    if self.pixbuf_offset_y < 0:
                        self.pixbuf_offset_y = 0
                self.edit_coord_y = self.pixbuf_y
            # check right
            if self.edit_coord_x + self.edit_coord_w > right_pos:
                # move the pixbuf into canvas
                if self.pixbuf_w > self.AREA_WIDTH:
                    if self.pixbuf_offset_x + self.AREA_WIDTH < self.pixbuf_w:
                        self.pixbuf_offset_x += (self.edit_coord_x + self.edit_coord_w) - self.AREA_WIDTH
                    if self.pixbuf_offset_x + self.AREA_WIDTH > self.pixbuf_w:
                        self.pixbuf_offset_x = self.pixbuf_w - self.AREA_WIDTH
                self.edit_coord_x = right_pos - self.edit_coord_w
            # check bottom
            if self.edit_coord_y + self.edit_coord_h > bottom_pos:
                # move the pixbuf into canvas
                if self.pixbuf_h > self.AREA_HEIGHT:
                    if self.pixbuf_offset_y + self.AREA_HEIGHT < self.pixbuf_h:
                        self.pixbuf_offset_y += (self.edit_coord_y + self.edit_coord_h) - self.AREA_HEIGHT
                    if self.pixbuf_offset_y + self.AREA_HEIGHT > self.pixbuf_h:
                        self.pixbuf_offset_y = self.pixbuf_h - self.AREA_HEIGHT
                self.edit_coord_y = bottom_pos - self.edit_coord_h
        elif self.drag_flag:
            drag_offset = max(event.x - self.press_point_coord[0],
                              event.y - self.press_point_coord[1])
            self.edit_coord_h = self.edit_coord_w = self.edit_coord_backup_w + drag_offset
            if self.edit_coord_h < self.MIN_SIZE or self.edit_coord_w < self.MIN_SIZE:
                self.edit_coord_h = self.edit_coord_w = self.MIN_SIZE

            if self.edit_coord_x + self.edit_coord_w > right_pos:
                self.edit_coord_h = self.edit_coord_w = right_pos - self.edit_coord_x
            if self.edit_coord_y + self.edit_coord_h > bottom_pos:
                self.edit_coord_h = self.edit_coord_w = bottom_pos - self.edit_coord_y
            # check zoom_out button sensitive
            # if edit_area's size more than pixbuf size, then disable zoom_out button
            # else enable zoom_out button
            if not (self.pixbuf_w <= self.edit_coord_w or self.pixbuf_h <= self.edit_coord_h):
                if not self.button_zoom_out.get_sensitive():
                    self.button_zoom_out.set_sensitive(True)
            else:
                if self.button_zoom_out.get_sensitive():
                    self.button_zoom_out.set_sensitive(False)
        self.__update_drag_point_coord()

    def __on_camera_motion_notify_cb(self, widget, event):
        if not self.camera_focus_flag:
            return
        x, y, w, h = widget.allocation
        #if not self.panel.get_visible() and \
                #y+self.AREA_HEIGHT-self.button_hbox_height<event.y<y+self.AREA_HEIGHT:
        if not self.panel.get_visible():
            if self.__refresh_time_id:
                gtk.timeout_remove(self.__refresh_time_id)
            if self.__button_time_id:
                gtk.timeout_remove(self.__button_time_id)
            self.__button_time_id = gobject.timeout_add(30, self.__slide_camera_button_show)
            #self.panel_layout.move(self.button_hbox, 0, self.button_hbox_height)
            x = event.x_root - event.x
            y = event.y_root - event.y - widget.allocation.y + self.AREA_HEIGHT - self.button_hbox_height
            self.set_win_pos(event.x_root - event.x - self.allocation.x,
                             event.y_root - event.y - widget.allocation.y - self.allocation.y)
            self.panel.move(int(x), int(y) + self.button_hbox_height)
            self.panel.show_panel()

    def __draw_panel_background(self, widget, event):
        cr = widget.window.cairo_create()
        x, y, w, h = widget.allocation
        cr.set_source_rgb(0, 0, 0)
        cr.set_operator(OPERATOR_SOURCE)
        cr.paint()

        cr.set_source_rgba(0, 0, 0, 0.5)
        cr.rectangle(x, y, w, h)
        cr.paint()
        propagate_expose(widget, event)
        return True

    def __on_camera_expose_cb(self, widget, event):
        cr = widget.window.cairo_create()
        cr.set_source_rgb(0, 0, 0)
        cr.rectangle(0, 0, widget.allocation.width, widget.allocation.height)
        cr.fill()
        return True

    def __on_camera_enter_notify_cb(self, widget, event):
        pass

    def __on_camera_leave_notify_cb(self, widget, event):
        x, y, w, h = widget.allocation
        if (event.y_root == event.x_root == 0.0) or (x < event.x < x+w and y < event.y < y+h):
            return
        if self.__button_time_id:
            gtk.timeout_remove(self.__button_time_id)
        self.__button_time_id = gobject.timeout_add(30, self.__slide_camera_button_hide)

    def __update_drag_point_coord(self):
        new_x = self.edit_coord_x + self.edit_coord_w - self.DRAG_WIDTH
        new_y = self.edit_coord_y + self.edit_coord_h - self.DRAG_WIDTH
        if self.drag_point_x != new_x or self.drag_point_y != new_y or\
                self.pixbuf_offset_cmp_x != self.pixbuf_offset_x or\
                self.pixbuf_offset_cmp_y != self.pixbuf_offset_y:
            self.drag_point_x = new_x
            self.drag_point_y = new_y
            self.pixbuf_offset_cmp_x = self.pixbuf_offset_x
            self.pixbuf_offset_cmp_y = self.pixbuf_offset_y
            self.emit_changed()
        self.edit_area.queue_draw()

    def __update_position(self, x, y):
        if self.drag_point_x <= x <= self.drag_point_x + self.DRAG_WIDTH and\
                self.drag_point_y <= y <= self.drag_point_y + self.DRAG_WIDTH:
            self.position = self.POS_IN_DRAG
        elif self.edit_coord_x <= x <= self.edit_coord_x + self.edit_coord_w and\
                self.edit_coord_y <= y <= self.edit_coord_y + self.edit_coord_h:
            self.position = self.POS_IN_MOVE
        else:
            self.position = self.POS_OUT

    def set_camera_mode(self):
        self.current_mode = self.MODE_CAMERA
        self.__camera_picture()
        self.__refresh_camera_button()

    def __camera_picture(self):
        self.set_pixbuf(None)
        if self.edit_area in self.box.get_children():
            self.box.remove(self.edit_area)
        if not self.camera_area_vbox in self.box.get_children():
            self.box.pack_start(self.camera_area_vbox, False, False)
            self.box.reorder_child(self.camera_area_vbox, 0)
        container_remove_all(self.button_hbox)
        self.button_hbox.pack_start(self.button_camera_align)
        self.button_hbox.show_all()
        self.show_all()
        try:
            if not self.camera_area.video_player:
                self.camera_area.create_video_pipeline()
            else:
                self.camera_start()
        except Exception, e:
            print e
class GUI(QMainWindow, QThread):
    def __init__(self):
        if os.path.exists("data.json"):
            os.remove("data.json")

        super(GUI, self).__init__()
        self.initUI()
        self.webcam = Webcam()
        self.video = Video()
        self.input = self.webcam
        self.dirname = ""
        print("Input: webcam")
        self.statusBar.showMessage("Input: webcam", 5000)
        self.btnOpen.setEnabled(False)
        self.process = Process()
        self.status = False
        self.frame = np.zeros((10, 10, 3), np.uint8)
        #self.plot = np.zeros((10,10,3),np.uint8)
        self.bpm = 0

    def initUI(self):
        #set font
        font = QFont()
        font.setPointSize(16)

        #widgets
        self.btnStart = QPushButton("Start", self)
        self.btnStart.move(440, 520)
        self.btnStart.setFixedWidth(200)
        self.btnStart.setFixedHeight(50)
        self.btnStart.setFont(font)
        self.btnStart.clicked.connect(self.run)

        self.btnOpen = QPushButton("Open", self)
        self.btnOpen.move(230, 520)
        self.btnOpen.setFixedWidth(200)
        self.btnOpen.setFixedHeight(50)
        self.btnOpen.setFont(font)
        self.btnOpen.clicked.connect(self.openFileDialog)

        self.cbbInput = QComboBox(self)
        self.cbbInput.addItem("Webcam")
        self.cbbInput.addItem("Video")
        self.cbbInput.setCurrentIndex(0)
        self.cbbInput.setFixedWidth(200)
        self.cbbInput.setFixedHeight(50)
        self.cbbInput.move(20, 520)
        self.cbbInput.setFont(font)
        self.cbbInput.activated.connect(self.selectInput)
        #-------------------

        self.lblDisplay = QLabel(self)  #label to show frame from camera
        self.lblDisplay.setGeometry(10, 10, 640, 480)
        self.lblDisplay.setStyleSheet("background-color: #000000")

        self.lblROI = QLabel(self)  #label to show face with ROIs
        self.lblROI.setGeometry(660, 10, 200, 200)
        self.lblROI.setStyleSheet("background-color: #000000")

        self.lblHR = QLabel(self)  #label to show HR change over time
        self.lblHR.setGeometry(900, 20, 300, 40)
        self.lblHR.setFont(font)
        self.lblHR.setText("Frequency: ")

        self.lblHR2 = QLabel(self)  #label to show stable HR
        self.lblHR2.setGeometry(900, 70, 300, 40)
        self.lblHR2.setFont(font)
        self.lblHR2.setText("Heart rate: ")

        # self.lbl_Age = QLabel(self) #label to show stable HR
        # self.lbl_Age.setGeometry(900,120,300,40)
        # self.lbl_Age.setFont(font)
        # self.lbl_Age.setText("Age: ")

        # self.lbl_Gender = QLabel(self) #label to show stable HR
        # self.lbl_Gender.setGeometry(900,170,300,40)
        # self.lbl_Gender.setFont(font)
        # self.lbl_Gender.setText("Gender: ")

        #dynamic plot
        self.signal_Plt = pg.PlotWidget(self)

        self.signal_Plt.move(660, 220)
        self.signal_Plt.resize(480, 192)
        self.signal_Plt.setLabel('bottom', "Signal")

        self.fft_Plt = pg.PlotWidget(self)

        self.fft_Plt.move(660, 425)
        self.fft_Plt.resize(480, 192)
        self.fft_Plt.setLabel('bottom', "FFT")

        self.timer = pg.QtCore.QTimer()
        self.timer.timeout.connect(self.update)
        self.timer.start(200)

        self.statusBar = QStatusBar()
        self.statusBar.setFont(font)
        self.setStatusBar(self.statusBar)

        #event close
        self.c = Communicate()
        self.c.closeApp.connect(self.close)

        #event change combobox index

        #config main window
        self.setGeometry(100, 100, 1160, 640)
        #self.center()
        self.setWindowTitle("Heart rate monitor")
        self.show()

    def update(self):
        #z = np.random.normal(size=1)
        #u = np.random.normal(size=1)
        self.signal_Plt.clear()
        self.signal_Plt.plot(self.process.samples[20:], pen='g')

        self.fft_Plt.clear()
        self.fft_Plt.plot(np.column_stack(
            (self.process.freqs, self.process.fft)),
                          pen='g')

    def center(self):
        qr = self.frameGeometry()
        cp = QDesktopWidget().availableGeometry().center()
        qr.moveCenter(cp)
        self.move(qr.topLeft())

    def closeEvent(self, event):
        reply = QMessageBox.question(self, "Message",
                                     "Are you sure want to quit",
                                     QMessageBox.Yes | QMessageBox.No,
                                     QMessageBox.Yes)
        if reply == QMessageBox.Yes:
            event.accept()
            self.input.stop()
            cv2.destroyAllWindows()
        else:
            event.ignore()

    def selectInput(self):
        self.reset()
        if self.cbbInput.currentIndex() == 0:
            self.input = self.webcam
            print("Input: webcam")
            self.btnOpen.setEnabled(False)
            self.statusBar.showMessage("Input: webcam", 5000)
        elif self.cbbInput.currentIndex() == 1:
            self.input = self.video
            print("Input: video")
            self.btnOpen.setEnabled(True)
            self.statusBar.showMessage("Input: video", 5000)

    def mousePressEvent(self, event):
        self.c.closeApp.emit()

    # def make_bpm_plot(self):

    # plotXY([[self.process.times[20:],
    # self.process.samples[20:]],
    # [self.process.freqs,
    # self.process.fft]],
    # labels=[False, True],
    # showmax=[False, "bpm"],
    # label_ndigits=[0, 0],
    # showmax_digits=[0, 1],
    # skip=[3, 3],
    # name="Plot",
    # bg=None)

    # fplot = QImage(self.plot, 640, 280, QImage.Format_RGB888)
    # self.lblPlot.setGeometry(10,520,640,280)
    # self.lblPlot.setPixmap(QPixmap.fromImage(fplot))

    def key_handler(self):
        """
        cv2 window must be focused for keypresses to be detected.
        """
        self.pressed = waitKey(1) & 255  # wait for keypress for 10 ms
        if self.pressed == 27:  # exit program on 'esc'
            print("[INFO] Exiting")
            self.webcam.stop()
            sys.exit()

    def openFileDialog(self):
        self.dirname = QFileDialog.getOpenFileName(
            self, 'OpenFile', r"C:\Users\uidh2238\Desktop\test videos")
        self.statusBar.showMessage("File name: " + self.dirname, 5000)

    def reset(self):
        self.process.reset()
        self.lblDisplay.clear()
        self.lblDisplay.setStyleSheet("background-color: #000000")

    @QtCore.pyqtSlot()
    def main_loop(self):
        frame = self.input.get_frame()

        self.process.frame_in = frame
        self.process.run()

        cv2.imshow("Processed", frame)

        self.frame = self.process.frame_out  #get the frame to show in GUI
        self.f_fr = self.process.frame_ROI  #get the face to show in GUI
        #print(self.f_fr.shape)
        self.bpm = self.process.bpm  #get the bpm change over the time

        self.frame = cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR)
        cv2.putText(self.frame,
                    "FPS " + str(float("{:.2f}".format(self.process.fps))),
                    (20, 460), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 255), 2)
        img = QImage(self.frame, self.frame.shape[1], self.frame.shape[0],
                     self.frame.strides[0], QImage.Format_RGB888)
        self.lblDisplay.setPixmap(QPixmap.fromImage(img))

        self.f_fr = cv2.cvtColor(self.f_fr, cv2.COLOR_RGB2BGR)
        #self.lblROI.setGeometry(660,10,self.f_fr.shape[1],self.f_fr.shape[0])
        self.f_fr = np.transpose(self.f_fr, (0, 1, 2)).copy()
        f_img = QImage(self.f_fr, self.f_fr.shape[1], self.f_fr.shape[0],
                       self.f_fr.strides[0], QImage.Format_RGB888)
        self.lblROI.setPixmap(QPixmap.fromImage(f_img))

        self.lblHR.setText("Freq: " + str(float("{:.2f}".format(self.bpm))))

        if self.process.bpms.__len__() > 50:
            if (
                    max(self.process.bpms - np.mean(self.process.bpms)) < 5
            ):  #show HR if it is stable -the change is not over 5 bpm- for 3s
                self.lblHR2.setText(
                    "Heart rate: " +
                    str(float("{:.2f}".format(np.mean(self.process.bpms)))) +
                    " bpm")

        #entry = "FPS: " + str(float("{:.2f}".format(self.process.fps)))
        #entry = "Freq: " + str(float("{:.2f}".format(self.bpm)))
        #entry = "Heart rate: " + str(float("{:.2f}".format(np.mean(self.process.bpms))))

        entry = [
            "FPS: " + str(float("{:.2f}".format(self.process.fps))),
            "Freq: " + str(float("{:.2f}".format(self.bpm))), "Heart rate: " +
            str(float("{:.2f}".format(np.mean(self.process.bpms))))
        ]
        with open('data.json', 'a') as outfile:
            outfile.write(json.dumps(entry))
            outfile.write(",")
            outfile.close()

        #self.lbl_Age.setText("Age: "+str(self.process.age))
        #self.lbl_Gender.setText("Gender: "+str(self.process.gender))
        #self.make_bpm_plot()#need to open a cv2.imshow() window to handle a pause
        #QtTest.QTest.qWait(10)#wait for the GUI to respond
        self.key_handler()  #if not the GUI cant show anything

    def run(self, input):
        self.reset()
        input = self.input
        self.input.dirname = self.dirname
        if self.input.dirname == "" and self.input == self.video:
            print("choose a video first")
            self.statusBar.showMessage("choose a video first", 5000)
            return
        if self.status == False:
            self.status = True
            input.start()
            self.btnStart.setText("Stop")
            self.cbbInput.setEnabled(False)
            self.btnOpen.setEnabled(False)
            self.lblHR2.clear()
            while self.status == True:
                self.main_loop()
        elif self.status == True:
            self.status = False
            input.stop()
            self.btnStart.setText("Start")
            self.cbbInput.setEnabled(True)
Beispiel #48
0
class SaltwashAR:
 
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None

    def _init_gl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # load robots frames
        self.rocky_robot.load_frames(self.config_provider.animation)
        self.sporty_robot.load_frames(self.config_provider.animation)

        # start webcam thread
        self.webcam.start()

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # reset robots
        self.rocky_robot.reset()
        self.sporty_robot.reset()

        # get image from webcam
        image = self.webcam.get_current_frame()

        # handle background
        self._handle_background(image.copy())

        # handle markers
        self._handle_markers(image.copy())
       
        # handle features
        self.features.handle(self.rocky_robot, self.sporty_robot, image.copy())

        glutSwapBuffers()

    def _handle_background(self, image):
        
        # let features update background image
        image = self.features.update_background_image(image)

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes('raw', 'BGRX', 0, -1)
 
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
        
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
        glPopMatrix()

    def _handle_markers(self, image):

        # attempt to detect markers
        markers = []

        try:
            markers = self.markers.detect(image)
        except Exception as ex:
            print(ex)

        # manage markers cache
        if markers:
            self.markers_cache = markers
        elif self.markers_cache: 
            markers = self.markers_cache
            self.markers_cache = None
        else:
            return

        for marker in markers:
            
            rvecs, tvecs, marker_rotation, marker_name = marker

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if marker_name == ROCKY_ROBOT:
                self.rocky_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())
            elif marker_name == SPORTY_ROBOT:
                self.sporty_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())

            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(100, 100)
        self.window_id = glutCreateWindow('SaltwashAR')
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl()
        glutMainLoop()
Beispiel #49
0
    "%.0f" % frame_width_meters, "%.0f" % DISTANCE_METERS))

# -- other values used in program
base_image = None
abs_chg = 0
mph = 0
secs = 0.0
ix, iy = -1, -1
fx, fy = -1, -1
drawing = False
setup_complete = False
tracking = False
text_on_image = 'No cars'
prompt = ''

webcam = Webcam("calibrated.avi", FPS)

# create an image window and place it in the upper left corner of the screen
cv2.namedWindow("Speed Camera")
cv2.moveWindow("Speed Camera", 10, 40)

# call the draw_rectangle routines when the mouse is used
cv2.setMouseCallback('Speed Camera', draw_rectangle)

# grab a reference image to use for drawing the monitored area's boundry
image = webcam.get_image()
org_image = image.copy()

if SAVE_CSV:
    csvfileout = "carspeed_{}.cvs".format(
        datetime.datetime.now().strftime("%Y%m%d_%H%M"))
Beispiel #50
0
#2D image points. If you change the image, you need to change vector
image_points = np.array(
    [
        (359, 391),  # Nose tip
        (399, 561),  # Chin
        (337, 297),  # Left eye left corner
        (513, 301),  # Right eye right corne
        (345, 465),  # Left Mouth corner
        (453, 469)  # Right mouth corner
    ],
    dtype="double")

# 2d points in image plane.

webcam = Webcam()
#webcam.start()


def draw(img, corners, imgpts):
    corner = tuple(corners[0].ravel())
    img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 5)
    img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5)
    img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5)
    return img


while True:

    # get image from webcam
    webcam.update_frame()
 def is_has_camera(self):
     return Webcam.has_device()
Beispiel #52
0
class SaltwashAR:
 
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise glyphs
        self.glyphs = Glyphs()
        self.glyphs_cache = None

        # initialise browser
        self.browser = None
        
        if self.config_provider.browser:
            self.browser = Browser()

        # initialise texture
        self.texture_background = None

    def _init_gl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # load robots frames
        self.rocky_robot.load_frames(self.config_provider.animation)
        self.sporty_robot.load_frames(self.config_provider.animation)

        # start threads
        self.webcam.start()
        
        if self.browser: 
            self.browser.start()

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # reset robots
        self.rocky_robot.is_detected = False
        self.sporty_robot.is_detected = False

        # get image from webcam
        image = self.webcam.get_current_frame()

        # handle background
        self._handle_background(image)

        # handle glyphs
        self._handle_glyphs(image)
       
        # handle browser
        self._handle_browser()

        glutSwapBuffers()

    def _handle_background(self, image):
        
        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tostring("raw", "BGRX", 0, -1)
 
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
        
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
        glPopMatrix()

    def _handle_glyphs(self, image):

        # attempt to detect glyphs
        glyphs = []

        try:
            glyphs = self.glyphs.detect(image)
        except Exception as ex:
            print(ex)

        # manage glyphs cache
        if glyphs:
            self.glyphs_cache = glyphs
        elif self.glyphs_cache: 
            glyphs = self.glyphs_cache
            self.glyphs_cache = None
        else:
            return

        for glyph in glyphs:
            
            rvecs, tvecs, _, glyph_name = glyph

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if glyph_name == ROCKY_ROBOT:
                self.rocky_robot.is_detected = True
                
                if self.browser and self.browser.is_speaking:
                    self.rocky_robot.next_frame(True)
                else:
                    self.rocky_robot.next_frame(False)
            
            elif glyph_name == SPORTY_ROBOT:
                self.sporty_robot.is_detected = True
                
                if self.browser and self.browser.is_speaking:
                    self.sporty_robot.next_frame(True)
                else:
                    self.sporty_robot.next_frame(False)
            
            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()

    def _handle_browser(self):

        # check browser instantiated
        if not self.browser: return

        # handle browser
        if self.rocky_robot.is_detected:
            self.browser.load(ROCK)
        elif self.sporty_robot.is_detected:
            self.browser.load(SPORT)
        else:
            self.browser.halt()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        self.window_id = glutCreateWindow("SaltwashAR")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl()
        glutMainLoop()
class OpenGLGlyphs:
  
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])
 
    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
 
        # initialise glyphs
        self.glyphs = Glyphs()
 
        # initialise shapes
        self.cone = None
        self.sphere = None
 
        # initialise texture
        self.texture_background = None
 
    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)
         
        # assign shapes
        self.cone = OBJ('cone.obj')
        self.sphere = OBJ('sphere.obj')
 
        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)
 
    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()
 
        # get image from webcam
        image = self.webcam.get_current_frame()
 
        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)
  
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
         
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        self._draw_background()
        glPopMatrix()
 
        # handle glyphs
        image = self._handle_glyphs(image)
 
        glutSwapBuffers()
 
    def _handle_glyphs(self, image):
 
        # attempt to detect glyphs
        glyphs = []
 
        try:
            glyphs = self.glyphs.detect(image)
        except Exception as ex: 
            print(ex)
 
        if not glyphs: 
            return
 
        for glyph in glyphs:
             
            rvecs, tvecs, glyph_name = glyph
 
            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]
 
            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])
 
            view_matrix = view_matrix * self.INVERSE_MATRIX
 
            view_matrix = np.transpose(view_matrix)
 
            # load view matrix and draw shape
            glPushMatrix()
            glLoadMatrixd(view_matrix)
 
            if glyph_name == SHAPE_CONE:
                glCallList(self.cone.gl_list)
            elif glyph_name == SHAPE_SPHERE:
                glCallList(self.sphere.gl_list)
 
            glPopMatrix()
 
    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
 
    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        self.window_id = glutCreateWindow("OpenGL Glyphs")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl(640, 480)
        glutMainLoop()