def faceRecongize(faceDetectResult):
  
    global text, x0, y0, w0, h0
    if (len(faceDetectResult)>=1):
        faceRectangle  = faceDetectResult[0]['faceRectangle']    
        x0, y0, w0, h0 = faceRectangle['left'], faceRectangle['top'], faceRectangle['width'], faceRectangle['height']
        new_faceId     = faceDetectResult[0]['faceId']
        resultIdentify = face_api.faceIdentify(groupId, [new_faceId], maxNbOfCandidates)

        if (len(resultIdentify[0]['candidates'])>=1):

            recognizedPersonId   = resultIdentify[0]['candidates'][0]['personId']
            recognizedConfidence = resultIdentify[0]['candidates'][0]['confidence']
            recognizedPerson     = face_api.getPerson(groupId, recognizedPersonId)
            recognizedPerson     = recognizedPerson.replace('null','None')
            recognizedPerson     = eval(recognizedPerson)
            name_predict         = recognizedPerson['name']

            text = "Recognized: %s (confidence=%.2f)" % (name_predict, recognizedConfidence)
            print text
def run_program(clientId):

    global global_vars
    global_var = (item for item in global_vars if item["clientId"] == str(clientId)).next()

    # Autorisation to begin Streaming Video
    optin0 = allow_streaming_video(clientId)

    if (optin0 == 1):
        global_var['key'] = 0

        start_time   = time.time() # For recognition timer (will reset after each 3 secs)
        time_origine = time.time() # For display (unchanged)

        """
        Permanent loop
        """
        i = 0
        j = 0
        while (time.time() - time_origine < 120):
            data = global_var['binary_data']

            """
            Decision part
            """
            if not (global_var['flag_quit']): #TODO: new
                elapsed_time = time.time() - start_time
                if ((elapsed_time > wait_time) and global_var['flag_enable_recog']): # Identify after each 3 seconds
                    faceDetectResult    = face_api.faceDetect(None, None, data)
                    # print faceDetectResult
                    if (len(faceDetectResult)>=1):
                        new_faceId          = faceDetectResult[0]['faceId']
                        resultIdentify      = face_api.faceIdentify(groupId, [new_faceId], maxNbOfCandidates)

                        if (len(resultIdentify[0]['candidates'])>=1): # If the number of times recognized is big enough
                            global_var['flag_recog']  = 1 # Known Person
                            global_var['flag_ask']    = 0
                            recognizedPersonId  = resultIdentify[0]['candidates'][0]['personId']
                            conf                = resultIdentify[0]['candidates'][0]['confidence']
                            recognizedPerson    = face_api.getPerson(groupId, recognizedPersonId)
                            recognizedPerson    = recognizedPerson.replace('null','None')
                            recognizedPerson    = eval(recognizedPerson)

                            global_var['nom']   = recognizedPerson['name']
                            global_var['text']  = 'Reconnu : ' + global_var['nom'] + ' (confidence={})'.format(conf)

                            print global_var['text']

                            if (not global_var['flag_reidentify']):
                                global_var['text2'] = "Appuyez [Y] si c'est bien vous"
                                global_var['text3'] = "Appuyez [N] si ce n'est pas vous"

                                res_verify_recog = verify_recog(clientId, global_var['nom'])
                                if (res_verify_recog==1):
                                    global_var['key'] = ord('y')
                                elif (res_verify_recog==0):
                                    global_var['key'] = ord('n')

                        else: # If the number of times recognized anyone from database is too low
                            global_var['flag_recog'] = 0 # Unknown Person
                            global_var['nom']   = '@' # '@' is for unknown person
                            global_var['text']  = 'Personne inconnue'
                            global_var['text2'] = ''
                            global_var['text3'] = ''

                            if (not global_var['flag_reidentify']):
                                global_var['flag_ask'] = 1
                                simple_message(clientId, u'Désolé, je ne vous reconnaît pas')
                                time.sleep(0.25)
                    else:
                        global_var['flag_recog'] = -1
                        global_var['text']  = 'Aucune personne'
                        global_var['text2'] = ''
                        global_var['text3'] = ''

                    start_time = time.time()  # reset timer

                """
                Redirecting user based on recognition result and user's status (already took photos or not) in database
                """
                count_time = time.time() - time_origine
                if (count_time <= wait_time):
                    global_var['text3'] = 'Initialisation (pret dans ' + str(wait_time-count_time)[0:4] + ' secondes)...'
                    if i==0:
                        chrome_server2client(clientId, 'START')
                        i=1
                    if (global_var['flag_quit']):
                        break
                else:
                    """
                    Start Redirecting after the first 1.5 seconds
                    """
                    if j==0:
                        chrome_server2client(clientId, 'DONE')
                        j=1
                    if (global_var['flag_quit']):
                        break
                    if (global_var['flag_recog']==1):
                        if (global_var['key']==ord('y') or global_var['key']==ord('Y')): # User chooses Y to go to Formation page
                            global_var['flag_wrong_recog']  = 0
                            get_face_emotion_api_results(clientId)
                            go_to_formation(clientId, xls_filename, global_var['nom'])

                            global_var['key'] = 0

                        if (global_var['key']==ord('n') or global_var['key']==ord('N')): # User confirms that the recognition result is wrong by choosing N
                            global_var['flag_wrong_recog'] = 1
                            global_var['flag_ask'] = 1
                            global_var['key'] = 0


                    if ((global_var['flag_recog']==1 and global_var['flag_wrong_recog']==1) or (global_var['flag_recog']==0)): # Not recognized or not correctly recognized
                        if (global_var['flag_ask']==1):# and (not flag_quit)):
                            resp_deja_photos = deja_photos(clientId) # Ask user if he has already had a database of face photos
                            print 'resp_deja_photos = ', resp_deja_photos
                            # if (resp_deja_photos==-1):
                            #     global_var['flag_ask'] = 0

                            if (resp_deja_photos==1): # User has a database of photos
                                global_var['flag_enable_recog'] = 0 # Disable recognition in order not to recognize while re-identifying
                                global_var['flag_ask'] = 0

                                name0 = global_var['nom']   # Save the recognition result, which is wrong, in order to compare later
                                nb_time_max = 2             # Number of times to retry recognize

                                thread_reidentification = Thread(target = re_identification, args = (clientId, nb_time_max, name0), name = 'thread_reidentification_'+clientId)
                                thread_reidentification.start()

                            elif (resp_deja_photos == 0): # User doesnt have a database of photos

                                global_var['flag_enable_recog'] = 0 # Disable recognition in order not to recognize while taking photos
                                resp_allow_take_photos = allow_take_photos(clientId)

                                if (resp_allow_take_photos==1): # User allows to take photos
                                    global_var['flag_take_photo'] = 1  # Enable photo taking

                                else: # User doesnt want to take photos
                                    global_var['flag_take_photo'] = 0
                                    res = allow_go_to_formation_by_id(clientId)
                                    if (res==1): # User agrees to go to Formation in providing his id manually
                                        name = ask_name(clientId, 1)
                                        go_to_formation(clientId, xls_filename, name)

                                    else: # Quit if user refuses to provide manually his id (after all other functionalities)
                                        break

                                resp_allow_take_photos = 0
                            resp_deja_photos = 0
                        global_var['flag_ask'] = 0

                    if (global_var['flag_take_photo']==1):# and (not flag_quit)):
                        step_time  = 1 # Interval of time (in second) between two times of taking photo

                        thread_take_photo = Thread(target = take_photos, args = (clientId, step_time, 1), name = 'thread_take_photo_'+clientId)
                        thread_take_photo.start()

                        global_var['flag_take_photo'] = 0

                    """
                    Call Face API and Emotion API, and display
                    """
                    if (global_var['key']==ord('i') or global_var['key']==ord('I')):
                        retrieve_face_emotion_att(clientId)
                        global_var['key'] = 0
        """
        End of While-loop
        """
    """
    Exit the program
    """
    quit_program(clientId)
def run_program(clientId):

    global global_vars
    global_var = (item for item in global_vars if item["clientId"] == str(clientId)).next()

    # Autorisation to begin Streaming Video
    optin0 = allow_streaming_video(clientId)

    if (optin0 == 1):
        global_var['key'] = 0

        start_time   = time.time() # For recognition timer (will reset after each 3 secs)
        time_origine = time.time() # For display (unchanged)

        """
        Permanent loop
        """
        i = 0
        j = 0
        while True:
            data = global_var['binary_data']

            """
            Decision part
            """
            if not (global_var['flag_quit']):
                elapsed_time = time.time() - start_time
                if ((elapsed_time > wait_time) and global_var['flag_enable_recog']): # Identify after each 3 seconds
                    faceDetectResult    = face_api.faceDetect(None, None, data)
                    # print faceDetectResult
                    if (len(faceDetectResult)>=1):
                        new_faceId          = faceDetectResult[0]['faceId']
                        resultIdentify      = face_api.faceIdentify(groupId, [new_faceId], maxNbOfCandidates)

                        if (len(resultIdentify[0]['candidates'])>=1): # If the number of times recognized is big enough
                            global_var['flag_recog']  = 1 # Known Person
                            global_var['flag_ask']    = 0
                            recognizedPersonId  = resultIdentify[0]['candidates'][0]['personId']
                            conf                = resultIdentify[0]['candidates'][0]['confidence']
                            recognizedPerson    = face_api.getPerson(groupId, recognizedPersonId)
                            recognizedPerson    = recognizedPerson.replace('null','None')
                            recognizedPerson    = eval(recognizedPerson)

                            global_var['nom']   = recognizedPerson['name']
                            global_var['text']  = 'Reconnu : ' + global_var['nom'] + ' (confidence={})'.format(conf)

                            print global_var['text']

                            if (not global_var['flag_reidentify']):
                                global_var['text2'] = "Appuyez [Y] si c'est bien vous"
                                global_var['text3'] = "Appuyez [N] si ce n'est pas vous"

                                res_verify_recog = verify_recog(clientId, global_var['nom'])
                                if (res_verify_recog==1):
                                    global_var['key'] = ord('y')
                                elif (res_verify_recog==0):
                                    global_var['key'] = ord('n')

                        else: # If the number of times recognized anyone from database is too low
                            global_var['flag_recog'] = 0 # Unknown Person
                            global_var['nom']   = '@' # '@' is for unknown person
                            global_var['text']  = 'Personne inconnue'
                            global_var['text2'] = ''
                            global_var['text3'] = ''

                            if (not global_var['flag_reidentify']):
                                global_var['flag_ask'] = 1
                                simple_message(clientId, u'Désolé, je ne vous reconnaît pas')
                                time.sleep(0.25)
                    else:
                        global_var['flag_recog'] = -1
                        global_var['text']  = 'Aucune personne'
                        global_var['text2'] = ''
                        global_var['text3'] = ''

                    start_time = time.time()  # reset timer

                """
                Redirecting user based on recognition result and user's status (already took photos or not) in database
                """
                count_time = time.time() - time_origine
                if (count_time <= wait_time):
                    global_var['text3'] = 'Initialisation (pret dans ' + str(wait_time-count_time)[0:4] + ' secondes)...'
                    if i==0:
                        chrome_server2client(clientId, 'START')
                        i=1
                    if (global_var['flag_quit']):
                        break
                else:
                    """
                    Start Redirecting after the first 1.5 seconds
                    """
                    if j==0:
                        chrome_server2client(clientId, 'DONE')
                        j=1
                    if (global_var['flag_quit']):
                        break
                    if (global_var['flag_recog']==1):
                        if (global_var['key']==ord('y') or global_var['key']==ord('Y')): # User chooses Y to go to Formation page
                            global_var['flag_wrong_recog']  = 0
                            get_face_emotion_api_results(clientId)
                            go_to_formation(clientId, xls_filename, global_var['nom'])

                            global_var['key'] = 0

                        if (global_var['key']==ord('n') or global_var['key']==ord('N')): # User confirms that the recognition result is wrong by choosing N
                            global_var['flag_wrong_recog'] = 1
                            global_var['flag_ask'] = 1
                            global_var['key'] = 0


                    if ((global_var['flag_recog']==1 and global_var['flag_wrong_recog']==1) or (global_var['flag_recog']==0)): # Not recognized or not correctly recognized
                        if (global_var['flag_ask']==1):# and (not flag_quit)):
                            resp_deja_photos = deja_photos(clientId) # Ask user if he has already had a database of face photos
                            print 'resp_deja_photos = ', resp_deja_photos
                            # if (resp_deja_photos==-1):
                            #     global_var['flag_ask'] = 0

                            if (resp_deja_photos==1): # User has a database of photos
                                global_var['flag_enable_recog'] = 0 # Disable recognition in order not to recognize while re-identifying
                                global_var['flag_ask'] = 0

                                name0 = global_var['nom']   # Save the recognition result, which is wrong, in order to compare later
                                nb_time_max = 2             # Number of times to retry recognize

                                thread_reidentification = Thread(target = re_identification, args = (clientId, nb_time_max, name0), name = 'thread_reidentification_'+clientId)
                                thread_reidentification.start()

                            elif (resp_deja_photos == 0): # User doesnt have a database of photos

                                global_var['flag_enable_recog'] = 0 # Disable recognition in order not to recognize while taking photos
                                resp_allow_take_photos = allow_take_photos(clientId)

                                if (resp_allow_take_photos==1): # User allows to take photos
                                    global_var['flag_take_photo'] = 1  # Enable photo taking

                                else: # User doesnt want to take photos
                                    global_var['flag_take_photo'] = 0
                                    res = allow_go_to_formation_by_id(clientId)
                                    if (res==1): # User agrees to go to Formation in providing his id manually
                                        name = ask_name(clientId, 1)
                                        go_to_formation(clientId, xls_filename, name)

                                    else: # Quit if user refuses to provide manually his id (after all other functionalities)
                                        break

                                resp_allow_take_photos = 0
                            resp_deja_photos = 0
                        global_var['flag_ask'] = 0

                    if (global_var['flag_take_photo']==1):# and (not flag_quit)):
                        step_time  = 1 # Interval of time (in second) between two times of taking photo

                        thread_take_photo = Thread(target = take_photos, args = (clientId, step_time, 1), name = 'thread_take_photo_'+clientId)
                        thread_take_photo.start()

                        global_var['flag_take_photo'] = 0

                    """
                    Call Face API and Emotion API, and display
                    """
                    if (global_var['key']==ord('i') or global_var['key']==ord('I')):
                        retrieve_face_emotion_att(clientId)
                        global_var['key'] = 0
        """
        End of While-loop
        """
    """
    Exit the program
    """
    quit_program(clientId)