def retrieve_face_emotion_att(clientId):

    global global_vars
    global_var = (item for item in global_vars if item["clientId"] == str(clientId)).next()
    data = global_var['binary_data']

    chrome_server2client(clientId, 'Veuillez patienter pendant quelques secondes...')
    time.sleep(0.5)
    chrome_server2client(clientId, 'START')

    # Face API
    faceResult = face_api.faceDetect(None, None, data)

    # Emotion API
    emoResult = emotion_api.recognizeEmotion(None, None, data)

    # Results
    print 'Found {} '.format(len(faceResult)) + ('faces' if len(faceResult)!=1 else 'face')

    nb_faces = len(faceResult)
    tb_face_rect = [{} for ind in range(nb_faces)]
    tb_age       = ['' for ind in range(nb_faces)]
    tb_gender    = ['' for ind in range(nb_faces)]
    tb_glasses   = ['' for ind in range(nb_faces)]
    tb_emo       = ['' for ind in range(len(emoResult))]

    if (len(faceResult)>0 and len(emoResult)>0):
        ind = 0
        for currFace in faceResult:
            faceRectangle       = currFace['faceRectangle']
            faceAttributes      = currFace['faceAttributes']

            tb_face_rect[ind]   = faceRectangle
            tb_age[ind]         = str(faceAttributes['age'])
            tb_gender[ind]      = faceAttributes['gender']
            tb_glasses[ind]     = faceAttributes['glasses']
            ind += 1

        ind = 0
        for currFace in emoResult:
            tb_emo[ind] = max(currFace['scores'].iteritems(), key=operator.itemgetter(1))[0]
            ind += 1

        faceWidth  = np.zeros(shape=(nb_faces))
        faceHeight = np.zeros(shape=(nb_faces))
        for ind in range(nb_faces):
            faceWidth[ind]  = tb_face_rect[ind]['width']
            faceHeight[ind] = tb_face_rect[ind]['height']
        ind_max = np.argmax(faceWidth*faceHeight.T)

        global_var['age']     = tb_age[ind_max]
        global_var['gender']  = tb_gender[ind_max]
        global_var['emo']     = tb_emo[ind_max]

        chrome_server2client(clientId, 'DONE')
        time.sleep(0.5)

        return tb_age, tb_gender, tb_glasses, tb_emo
    else:
        return 'N/A','N/A','N/A','N/A'
def retrieve_face_emotion_att(clientId):

    global global_vars
    global_var = (item for item in global_vars if item["clientId"] == str(clientId)).next()
    data = global_var['binary_data']

    # Face API
    faceResult = face_api.faceDetect(None, None, data)

    # Emotion API
    emoResult = emotion_api.recognizeEmotion(None, None, data)

    # Results
    print 'Found {} '.format(len(faceResult)) + ('faces' if len(faceResult)!=1 else 'face')
    nb_faces = len(faceResult)
    tb_face_rect = [{} for ind in range(nb_faces)]
    tb_age       = ['' for ind in range(nb_faces)]
    tb_gender    = ['' for ind in range(nb_faces)]
    tb_glasses   = ['' for ind in range(nb_faces)]
    tb_emo       = ['' for ind in range(len(emoResult))]

    if (len(faceResult)>0 and len(emoResult)>0):
        ind = 0
        for currFace in faceResult:
            faceRectangle       = currFace['faceRectangle']
            faceAttributes      = currFace['faceAttributes']

            tb_face_rect[ind]   = faceRectangle
            tb_age[ind]         = str(faceAttributes['age'])
            tb_gender[ind]      = faceAttributes['gender']
            tb_glasses[ind]     = faceAttributes['glasses']
            ind += 1

        ind = 0
        for currFace in emoResult:
            tb_emo[ind] = max(currFace['scores'].iteritems(), key=operator.itemgetter(1))[0]
            ind += 1

        faceWidth  = np.zeros(shape=(nb_faces))
        faceHeight = np.zeros(shape=(nb_faces))
        for ind in range(nb_faces):
            faceWidth[ind]  = tb_face_rect[ind]['width']
            faceHeight[ind] = tb_face_rect[ind]['height']
        ind_max = np.argmax(faceWidth*faceHeight.T)

        global_var['age']     = tb_age[ind_max]
        global_var['gender']  = tb_gender[ind_max]
        global_var['emo']     = tb_emo[ind_max]

#        global_var['age']     = tb_age[0] # TODO: replace the first face by the biggest face (Done)
#        global_var['gender']  = tb_gender[0]
#        global_var['emo']     = tb_emo[0]

        return tb_age, tb_gender, tb_glasses, tb_emo
    else:
        return 'N/A','N/A','N/A','N/A'
def recognize_from_video():
    wait_time  = 4 # Identify after each 4 seconds
    image_path = 'video.jpg'
    
    start_time   = time.time() # For recognition timer (will reset after each 3 secs)
    time_origine = time.time() # For counting using time
    
    thread_video = Thread(target = streaming_video, name = 'thread_video')
    thread_video.start()
    
    while (time.time() - time_origine < 20) and (key != 27):
        elapsed_time = time.time() - start_time
        if (elapsed_time > wait_time): # Identify after each 4 seconds
        
            faceDetectResult = face_api.faceDetect(None, image_path, None)
            faceRecongize(faceDetectResult)
                
            start_time = time.time()  # reset timer
def retrieve_face_emotion_att(filename):

    # Face API
    faceResult = face_api.faceDetect(None, filename, None)

    # Emotion API
    emoResult = emotion_api.recognizeEmotion(None, filename, None)

    # Results
    print 'Found {} '.format(len(faceResult)) + ('faces' if len(faceResult)!=1 else 'face')
    nb_faces     = len(faceResult)
    tb_face_rect = [{} for ind in range(nb_faces)]
    tb_age       = ['' for ind in range(nb_faces)]
    tb_gender    = ['' for ind in range(nb_faces)]
    tb_glasses   = ['' for ind in range(nb_faces)]
    tb_emo       = ['' for ind in range(len(emoResult))]

    if (len(faceResult)>0 and len(emoResult)>0):
        ind = 0
        for currFace in faceResult:
            faceRectangle       = currFace['faceRectangle']
            faceAttributes      = currFace['faceAttributes']

            tb_face_rect[ind]   = faceRectangle
            tb_age[ind]         = str(faceAttributes['age'])
            tb_gender[ind]      = faceAttributes['gender']
            tb_glasses[ind]     = faceAttributes['glasses']
            ind += 1

        ind = 0
        for currFace in emoResult:
            tb_emo[ind] = max(currFace['scores'].iteritems(), key=operator.itemgetter(1))[0]
            ind += 1

        print 'Face index:', '\t', 'Age', '\t', 'Gender', '\t', 'Glasses', '\t', 'Emotion'
        for ind in range(nb_faces):
            print 'Face '+str(ind)+': ', '\t', tb_age[ind], '\t', tb_gender[ind], '\t', tb_glasses[ind], '\t', tb_emo[ind]
    return tb_age, tb_gender, tb_glasses, tb_emo    
def run_program(clientId):

    global global_vars
    global_var = (item for item in global_vars if item["clientId"] == str(clientId)).next()

    # Autorisation to begin Streaming Video
    optin0 = allow_streaming_video(clientId)

    if (optin0 == 1):
        global_var['key'] = 0

        start_time   = time.time() # For recognition timer (will reset after each 3 secs)
        time_origine = time.time() # For display (unchanged)

        """
        Permanent loop
        """
        i = 0
        j = 0
        while (time.time() - time_origine < 120):
            data = global_var['binary_data']

            """
            Decision part
            """
            if not (global_var['flag_quit']): #TODO: new
                elapsed_time = time.time() - start_time
                if ((elapsed_time > wait_time) and global_var['flag_enable_recog']): # Identify after each 3 seconds
                    faceDetectResult    = face_api.faceDetect(None, None, data)
                    # print faceDetectResult
                    if (len(faceDetectResult)>=1):
                        new_faceId          = faceDetectResult[0]['faceId']
                        resultIdentify      = face_api.faceIdentify(groupId, [new_faceId], maxNbOfCandidates)

                        if (len(resultIdentify[0]['candidates'])>=1): # If the number of times recognized is big enough
                            global_var['flag_recog']  = 1 # Known Person
                            global_var['flag_ask']    = 0
                            recognizedPersonId  = resultIdentify[0]['candidates'][0]['personId']
                            conf                = resultIdentify[0]['candidates'][0]['confidence']
                            recognizedPerson    = face_api.getPerson(groupId, recognizedPersonId)
                            recognizedPerson    = recognizedPerson.replace('null','None')
                            recognizedPerson    = eval(recognizedPerson)

                            global_var['nom']   = recognizedPerson['name']
                            global_var['text']  = 'Reconnu : ' + global_var['nom'] + ' (confidence={})'.format(conf)

                            print global_var['text']

                            if (not global_var['flag_reidentify']):
                                global_var['text2'] = "Appuyez [Y] si c'est bien vous"
                                global_var['text3'] = "Appuyez [N] si ce n'est pas vous"

                                res_verify_recog = verify_recog(clientId, global_var['nom'])
                                if (res_verify_recog==1):
                                    global_var['key'] = ord('y')
                                elif (res_verify_recog==0):
                                    global_var['key'] = ord('n')

                        else: # If the number of times recognized anyone from database is too low
                            global_var['flag_recog'] = 0 # Unknown Person
                            global_var['nom']   = '@' # '@' is for unknown person
                            global_var['text']  = 'Personne inconnue'
                            global_var['text2'] = ''
                            global_var['text3'] = ''

                            if (not global_var['flag_reidentify']):
                                global_var['flag_ask'] = 1
                                simple_message(clientId, u'Désolé, je ne vous reconnaît pas')
                                time.sleep(0.25)
                    else:
                        global_var['flag_recog'] = -1
                        global_var['text']  = 'Aucune personne'
                        global_var['text2'] = ''
                        global_var['text3'] = ''

                    start_time = time.time()  # reset timer

                """
                Redirecting user based on recognition result and user's status (already took photos or not) in database
                """
                count_time = time.time() - time_origine
                if (count_time <= wait_time):
                    global_var['text3'] = 'Initialisation (pret dans ' + str(wait_time-count_time)[0:4] + ' secondes)...'
                    if i==0:
                        chrome_server2client(clientId, 'START')
                        i=1
                    if (global_var['flag_quit']):
                        break
                else:
                    """
                    Start Redirecting after the first 1.5 seconds
                    """
                    if j==0:
                        chrome_server2client(clientId, 'DONE')
                        j=1
                    if (global_var['flag_quit']):
                        break
                    if (global_var['flag_recog']==1):
                        if (global_var['key']==ord('y') or global_var['key']==ord('Y')): # User chooses Y to go to Formation page
                            global_var['flag_wrong_recog']  = 0
                            get_face_emotion_api_results(clientId)
                            go_to_formation(clientId, xls_filename, global_var['nom'])

                            global_var['key'] = 0

                        if (global_var['key']==ord('n') or global_var['key']==ord('N')): # User confirms that the recognition result is wrong by choosing N
                            global_var['flag_wrong_recog'] = 1
                            global_var['flag_ask'] = 1
                            global_var['key'] = 0


                    if ((global_var['flag_recog']==1 and global_var['flag_wrong_recog']==1) or (global_var['flag_recog']==0)): # Not recognized or not correctly recognized
                        if (global_var['flag_ask']==1):# and (not flag_quit)):
                            resp_deja_photos = deja_photos(clientId) # Ask user if he has already had a database of face photos
                            print 'resp_deja_photos = ', resp_deja_photos
                            # if (resp_deja_photos==-1):
                            #     global_var['flag_ask'] = 0

                            if (resp_deja_photos==1): # User has a database of photos
                                global_var['flag_enable_recog'] = 0 # Disable recognition in order not to recognize while re-identifying
                                global_var['flag_ask'] = 0

                                name0 = global_var['nom']   # Save the recognition result, which is wrong, in order to compare later
                                nb_time_max = 2             # Number of times to retry recognize

                                thread_reidentification = Thread(target = re_identification, args = (clientId, nb_time_max, name0), name = 'thread_reidentification_'+clientId)
                                thread_reidentification.start()

                            elif (resp_deja_photos == 0): # User doesnt have a database of photos

                                global_var['flag_enable_recog'] = 0 # Disable recognition in order not to recognize while taking photos
                                resp_allow_take_photos = allow_take_photos(clientId)

                                if (resp_allow_take_photos==1): # User allows to take photos
                                    global_var['flag_take_photo'] = 1  # Enable photo taking

                                else: # User doesnt want to take photos
                                    global_var['flag_take_photo'] = 0
                                    res = allow_go_to_formation_by_id(clientId)
                                    if (res==1): # User agrees to go to Formation in providing his id manually
                                        name = ask_name(clientId, 1)
                                        go_to_formation(clientId, xls_filename, name)

                                    else: # Quit if user refuses to provide manually his id (after all other functionalities)
                                        break

                                resp_allow_take_photos = 0
                            resp_deja_photos = 0
                        global_var['flag_ask'] = 0

                    if (global_var['flag_take_photo']==1):# and (not flag_quit)):
                        step_time  = 1 # Interval of time (in second) between two times of taking photo

                        thread_take_photo = Thread(target = take_photos, args = (clientId, step_time, 1), name = 'thread_take_photo_'+clientId)
                        thread_take_photo.start()

                        global_var['flag_take_photo'] = 0

                    """
                    Call Face API and Emotion API, and display
                    """
                    if (global_var['key']==ord('i') or global_var['key']==ord('I')):
                        retrieve_face_emotion_att(clientId)
                        global_var['key'] = 0
        """
        End of While-loop
        """
    """
    Exit the program
    """
    quit_program(clientId)
    imgPath = "face_database_for_oxford/" # path to database of faces
    imgTest = 'test.png'
    flag_video = False

# Parameters     
maxNbOfCandidates = 1 # Maximum number of candidates for the identification

# Training Phase
groupId     = "group_all"
groupName   = "employeurs"

list_nom = []
list_personId = []
nbr = 0

create_group_add_person(groupId, groupName)

result = train_person_group(groupId)


if (not flag_video):
    faceDetectResult    = face_api.faceDetect(None, imgTest, None)
    faceRecongize(faceDetectResult)
    
else:
    text, x0, y0, w0, h0 = '', 0, 0, 0, 0
    key = 0
    recognize_from_video()