Beispiel #1
0
def emotion_recognition():
    client = docker.from_env()
    t = time.time()

    isOpen = Webcam.open()
    if not isOpen:
        print("ERROR: cannot open webcam")
        return None

    Webcam.take_photo(path_volume + "face.jpg")
    Webcam.close()

    if not watch(path_volume + "face.jpg", t):
        print("ERROR: cannot take photo")
        return None

    t = time.time()
    client.containers.run('app_emotion_recognition',
                          command='volume/face.jpg volume/emotion.txt',
                          volumes=volumes,
                          auto_remove=True)

    os.remove(path_volume + "face.jpg")

    if not watch(path_volume + "emotion.txt", t):
        print("ERROR: emotion_recognition cannot detect face or emotion")
        return None

    f = open(path_volume + "emotion.txt", "r")
    res = f.readline()
    f.close()
    if not res.isdigit():
        print("ERROR: emotion is not int")
        return None

    os.remove(path_volume + "emotion.txt")

    emotion_index = int(res)

    if emotion_index < 0:
        return None
    else:
        return EMOTIONS[emotion_index]
Beispiel #2
0
def face_recognizer(face_path=None, face_bib="faces.json"):

    if not face_path:
        face_path = "face.jpg"
        t = time.time()

        isOpen = Webcam.open()
        if not isOpen:
            print("ERROR: cannot open webcam")
            return None

        Webcam.take_photo(path_volume + face_path)
        Webcam.close()

        if not watch(path_volume + face_path, t):
            print("ERROR: cannot take photo")
            return None

    client = docker.from_env()

    t = time.time()
    face_path = "face.jpg"

    client.containers.run('app_face_recognizer',
                          command='volume/' + face_path + ' volume/' +
                          face_bib + ' volume/face_reco.txt volume',
                          volumes=volumes,
                          auto_remove=True)
    os.remove(path_volume + face_path)

    if not watch(path_volume + "face_reco.txt", t):
        print("ERROR: face_recognizer cannot detect any face")
        return None

    f = open(path_volume + "face_reco.txt", "r")
    name = f.readline()
    f.close()

    os.remove(path_volume + "face_reco.txt")

    return name
Beispiel #3
0
class AR_Project:
 
    def __init__(self):
        # sigint interrupt initialize
        signal.signal(signal.SIGINT, self.signal_handler)    

        # initialize webcam
        self.webcam = Webcam()
        self.webcam.start()
          
        self.x_axis = 0.0
        self.y_axis = 0.0
        self.z_axis = 0.0
        self.z_pos = -7.0
        
        self.win = 0
        self.texture_background = None
        self.texture_teapot = None
     
    def signal_handler(self, signal, frame):
        print('\nYou pressed Ctrl+C!')
        self.webcam.close()
        sys.exit()

    def _get_background(self):
        # get image from webcam 
        image = self.webcam.get_current_frame()

        # convert image to OpenGL texture format
        image = cv2.flip(image, 0)
        image = cv2.flip(image, 1)
        gl_image = Image.fromarray(image)     
        ix = gl_image.size[0]
        iy = gl_image.size[1]
        gl_image = gl_image.tobytes("raw", "BGRX", 0, -1)
      
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, gl_image)
 
    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 4.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 4.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 4.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 4.0)
        glEnd()
 
    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(45.0, float(Width)/float(Height)-.2, 0.1, 500.0)
        glMatrixMode(GL_MODELVIEW)
 
        # enable texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)
 
        # initialize lighting 
        #glLightfv(GL_LIGHT0, GL_AMBIENT, (0.5, 0.5, 0.5, 1.0))
        #glLightfv(GL_LIGHT0, GL_DIFFUSE, (1.0, 0.8, 0.0, 1.0)) 
        glEnable(GL_LIGHT0)
        glEnable(GL_LIGHTING)
 
        # initialize blending
        glColor4f(0.2, 0.2, 0.2, 0.5)
        glBlendFunc(GL_SRC_ALPHA, GL_ONE)
        glEnable(GL_BLEND)
 
        #glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        #glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        #glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)
        #glEnable(GL_TEXTURE_2D)
 
    def _draw_scene(self):
        # handle any hand gesture
        self._get_background()
 
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glLoadIdentity();
            
        # draw background
        #glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-11.2)
        self._draw_background()
        glPopMatrix()

        # position teapot
        glTranslatef(0.0,0.0,self.z_pos);
        glRotatef(self.x_axis,1.0,0.0,0.0)
        glRotatef(self.y_axis,0.0,1.0,0.0)
        glRotatef(self.z_axis,0.0,0.0,1.0)
 
        # draw teapot
        glutSolidTeapot(1.2)
 
        # rotate teapot 
        self.x_axis = self.x_axis - 2
        self.z_axis = self.z_axis - 2
 
        glutSwapBuffers()
 
    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        self.win = glutCreateWindow("COS 429 AR Project")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl(640, 480)
        glutMainLoop()
Beispiel #4
0
        畫線、區域初始
    '''
    drawline = False
    drawContours = False
    # initialise webcam and start thread
    webcam = Webcam()
    webcam.start()
    while True:
        clock.tick(30)
        frame = webcam.get_current_frame()

        for e in pygame.event.get():
            if e.type == QUIT or pygame.key.get_pressed()[pygame.K_q]:
                pygame.quit()
                cv2.destroyAllWindows()
                webcam.close()
                sys.exit()
                break
            elif e.type == KEYDOWN and e.key == K_ESCAPE:
                sys.exit()
            elif pygame.key.get_pressed()[pygame.K_l]:
                drawline = not drawline  # drawline
            elif pygame.key.get_pressed()[pygame.K_k]:
                drawContours = not drawContours  # drawcontour
            elif pygame.key.get_pressed()[pygame.K_c]:
                glEnable(GL_LIGHTING)  # 點亮
            elif pygame.key.get_pressed()[pygame.K_d]:
                glDisable(GL_LIGHTING)  # 關閉
            elif e.type == MOUSEBUTTONDOWN:
                if e.button == 4: zpos_o = max(1, zpos_o - 1)
                elif e.button == 5: zpos_o += 1