Example #1
0
File: utils.py Project: crs4/ACTIVE
def __recognize_instance(auth_params, func_params):
    """
    This script has been defined in order to recognize
    face instances that are saved into the database
    according to existent models
    """
    try:
        token = auth_params.get('token', '1234')
        instance_id = func_params["id"]
        instance_path = os.path.join(
            get_media_root(), func_params["features"])

        # Get available models
        model_type = 'video'
        models = tsm.get_models(model_type, auth_params['token'])

        # Create dictionary with models
        models_list = []
        for model in models:
            model_id = model['id']
            model_file = os.path.join(
                get_media_root(), model['model_file'])
            model_dict = {c.MODEL_ID_KEY: model_id,
                          c.MODEL_FILE_KEY: model_file,
                          }
            models_list.append(model_dict)

        fm = FaceModels(models_list)

        # Recognize given instance
        face = cv2.imread(instance_path, cv2.IMREAD_GRAYSCALE)
        (model_id, conf) = fm.recognize_face(face)

        # update the instance reference if recognized
        if model_id != c.UNDEFINED_LABEL:
            edit_instance(instance_id, model_id=label, token=token)
            return 'Instance ' + instance_id + ' associated to model ' + model_id
        return 'Instance ' + instance_id + ' not recognized by any model'

        # TODO modificare i dynamic tag per associare automaticamente la persona ?

    except Exception as e:
        print e
        return 'Error on instance recognition'
Example #2
0
File: utils.py Project: crs4/ACTIVE
def __recognize_instance(auth_params, func_params):
    """
    Funzione utilizzata per applicare i modelli esistenti alle nuove istanze di
    tipo audio che vengono salvate nel database.
    Il modello con il punteggio di riconoscimento piu' alto viene assegnato
    all'istanza considerata (l'id della persona associata al modello)
    """
    try:
        # extract all needed parameters
        instance_id   = func_params["id"]
        instance_path = os.path.join(get_media_root(), func_params["features"])
        models = get_models('audio', token=token)

        # copiare i modelli in una cartella in tmp

        # avviare l'identificazione dell'istanza
        model_id = identification() # ha bisogno del file di settings locale (ricostruirlo?)

        # extract model parameters
        model_id   = model['id']
        model_path = os.path.join(get_media_root(), model['model_file'])
        entity_id  = model['entity']
        print 'Comparing model ' + model_id + ' with instance ' + instance_id

            
        # update the instance reference if recognized
        if model_id is not None:
            edit_instance(instance_id, model_id=model_id, token=token)
            return 'Instance ' + instance_id + ' associated to model ' + model_id
        return 'Instance ' + instance_id + ' not recognized by any model'

        #TODO modificare i dynamic tag per associare automaticamente la persona

    except Exception as e:
        print e
        return 'Error on instance recognition'
Example #3
0
File: utils.py Project: crs4/ACTIVE
def __generate_instances(auth_params, func_params):
    """
    @param auth_params: Input parameters of the function
                        that generate this function call
    @param func_params: Output parameters of the function
                        that generate this function call
    """
    
    file_path = os.path.join(get_media_root(), func_params['file'])
    item_id = func_params['id']

    # remove existing tags (and dynamic tags) for the item
    tags = get_tags_by_item(func_params['id'], auth_params['token'])
    for tag in tags:
        if tag['type'] == 'face':
            remove_tag(tag['id'], auth_params['token'])


    # extract faces from video and save metadata on filesystem

    # Get available models
    model_type = 'video'
    models = tsm.get_models(model_type, auth_params['token'])

    # Create dictionary with models
    models_list = []
    for model in models:
        model_id = model['id']
        model_file = os.path.join(
            get_media_root(), model['model_file'])
        entity_id = model['entity']
        person = get_person(entity_id, auth_params['token'])
        name = person['first_name']
        surname = person['last_name']
        tag = surname + c.TAG_SEP + name

        model_dict = {c.MODEL_ID_KEY: model_id,
                      c.MODEL_FILE_KEY: model_file,
                      c.TAG_KEY: tag
                      }
        models_list.append(model_dict)

    fe = VideoFaceExtractor(file_path, str(item_id), models_list)

    fe.analyze_video()

    set_status(item_id, "FACE_RECOG", auth_params['token'])

    people = fe.get_people()
    
    uniform_tag_ids_arr = []
    # retrieve dynamic tags and save them on ACTIVE core
    for person_dict in people:

        #print "Tag assegnato al cluster", person_dict['assigned_tag']

        #~ # update the image for the person
        #~ image_path = os.path.join(get_media_root(),'items',
                                  #~ str(item_id), 'Face extraction',
                                  #~ 'Face recognition', 'Key frames',
                                  #~ person_dict[c.KEYFRAME_NAME_KEY])
        #~ set_image(person_id, image_path, 'image/png')

        # check if the person has been recognized
        model_id = person_dict[c.ASSIGNED_LABEL_KEY]
        trusted = False
        instance_id = None
        if model_id == c.UNDEFINED_LABEL:
            print "Creata una nuova persona"
            person = create_person(
                "Unknown", str(func_params['id']) + '_' +
                str(person_dict['person_counter']),
                auth_params['token'])
            person_id = person['id']
            # Create a model for the unknown instance
            model = tsm.create_model(
                person_id, 'video',
                person['first_name'] + ' ' + person['last_name'],
                token=auth_params['token'])
            instance = tsm.create_instance(
                model_type, False, model_id=model['id'],
                token=auth_params['token'])
        else:
            # Create model instance
            instance = tsm.create_instance(
                model_type, trusted, model_id=model_id,
                token=auth_params['token'])
            model = tsm.get_model(model_id)
            person_id = model['entity']

        # update the image for the person
        image_path = os.path.join(fe.rec_path,
                                  c.FACE_RECOGNITION_KEY_FRAMES_DIR,
                                  person_dict[c.KEYFRAME_NAME_KEY])
        set_image(person_id, image_path, 'image/png', auth_params['token'])
        tsm.set_instance_thumbnail(
            instance['id'], image_path, token=auth_params['token'])

        # Get aligned face and set it as instance feature
        print person_dict.keys()
        aligned_face_path = os.path.join(fe.align_path, person_dict[c.MEDOID_ALIGNED_FACE_KEY])
        tsm.set_instance_feature(instance['id'], aligned_face_path, token=auth_params['token'])

        # TODO DELETE?
        # else:
        #     # Find id person by name and surname
        #     tag_parts = person_id.split(c.TAG_SEP)
        #     surname = tag_parts[0]
        #     name = tag_parts[1]
        #     person = create_person(name, surname, auth_params['token'])

        #person_id = person['id']

        #~ if person['image'] == "unknown_user.png":
            #~ set_image(person_id, image_path, 'image/png')


        # create a tag for user name
        #createTagKeyword(item_id, person['first_name'], person['last_name'])

        # create a tag (occurrence of a person in a digital item)
        tag = create_tag(item_id, person_id, "face", auth_params['token'])
        #create audio+video tag
        #uniform_tag = create_tag(item_id, person_id, "face+speaker", auth_params['token'])
        #uniform_tag_ids_arr.append[uniform_tag['id']]

        for segment in person_dict[c.SEGMENTS_KEY]:
            start = segment[c.SEGMENT_START_KEY]
            duration = segment[c.SEGMENT_DURATION_KEY]
            bbox_x, bbox_y, width, height = segment[c.FRAMES_KEY][0][c.DETECTION_BBOX_KEY]

            create_dtag(tag['id'], int(start), int(duration), bbox_x, bbox_y,
                        width, height, auth_params['token'])
    
    """    
    item_status = get_status(item_id, token)
    if "SPEAKER_RECOG" in item_status['status']:       
        #create dtags for audio+video tag
        for u_tag_id in uniform_tag_ids_arr:
            create_uniform_dtags(item_id, u_tag_id, auth_params['token'])
    """
    create_uniform_dtags(item_id, auth_params['token'])