def extract_metadata(auth_params, func_params): """ This function is used to extract relevant metadata from a digital item and then save this data on the ACTIVE Core. :param auth_params: Authentication parameters :param func_params: Function parameters """ file_path = os.path.join(get_media_root(), func_params['file']) item_info = get_exif_metadata(file_path) token = auth_params['token'] if not set_metadata(func_params['id'], item_info, token): raise Exception('Error on metadata update') if not set_status(func_params['id'], 'ANALYZED', token): raise Exception('Error on processing status update') print 'Extracted and saved metadata for digital item', func_params['id']
dtag=create_dtag(tag["id"], st*10, dur*10, token=token) print "dtag ",dtag uniform_tag = create_tag(id_item, id_persona, "face+speaker", token) print 'uniform tag', uniform_tag uniform_tag_ids_arr.append(uniform_tag['id']) # update the instance with the model id print 'instance, model', inst, model #edit_instance(inst['id'], model_id=model['id'], token=token) print 'ascallo' except Exception, e: print e set_status(id_item,"SPEAKER_RECOG", token) """ item_status = get_status(id_item, token) if "FACE_RECOG" in item_status['status']: for u_tag_id in uniform_tag_ids_arr: create_uniform_dtags(id_item, uniform_tag_id, token) """ create_uniform_dtags(id_item, token) print "***** PLUGIN SPEAKER RECOGNITION: POST DI ESEMPIO ---> STOP" """ r_json=create_dtag(id_persona,"0","60000", token) set_status(id_item,"SPEAKER_RECOG", token) """ def split4diarization(orig,start,duration, dest):
def __generate_instances(auth_params, func_params): """ @param auth_params: Input parameters of the function that generate this function call @param func_params: Output parameters of the function that generate this function call """ file_path = os.path.join(get_media_root(), func_params['file']) item_id = func_params['id'] # remove existing tags (and dynamic tags) for the item tags = get_tags_by_item(func_params['id'], auth_params['token']) for tag in tags: if tag['type'] == 'face': remove_tag(tag['id'], auth_params['token']) # extract faces from video and save metadata on filesystem # Get available models model_type = 'video' models = tsm.get_models(model_type, auth_params['token']) # Create dictionary with models models_list = [] for model in models: model_id = model['id'] model_file = os.path.join( get_media_root(), model['model_file']) entity_id = model['entity'] person = get_person(entity_id, auth_params['token']) name = person['first_name'] surname = person['last_name'] tag = surname + c.TAG_SEP + name model_dict = {c.MODEL_ID_KEY: model_id, c.MODEL_FILE_KEY: model_file, c.TAG_KEY: tag } models_list.append(model_dict) fe = VideoFaceExtractor(file_path, str(item_id), models_list) fe.analyze_video() set_status(item_id, "FACE_RECOG", auth_params['token']) people = fe.get_people() uniform_tag_ids_arr = [] # retrieve dynamic tags and save them on ACTIVE core for person_dict in people: #print "Tag assegnato al cluster", person_dict['assigned_tag'] #~ # update the image for the person #~ image_path = os.path.join(get_media_root(),'items', #~ str(item_id), 'Face extraction', #~ 'Face recognition', 'Key frames', #~ person_dict[c.KEYFRAME_NAME_KEY]) #~ set_image(person_id, image_path, 'image/png') # check if the person has been recognized model_id = person_dict[c.ASSIGNED_LABEL_KEY] trusted = False instance_id = None if model_id == c.UNDEFINED_LABEL: print "Creata una nuova persona" person = create_person( "Unknown", str(func_params['id']) + '_' + str(person_dict['person_counter']), auth_params['token']) person_id = person['id'] # Create a model for the unknown instance model = tsm.create_model( person_id, 'video', person['first_name'] + ' ' + person['last_name'], token=auth_params['token']) instance = tsm.create_instance( model_type, False, model_id=model['id'], token=auth_params['token']) else: # Create model instance instance = tsm.create_instance( model_type, trusted, model_id=model_id, token=auth_params['token']) model = tsm.get_model(model_id) person_id = model['entity'] # update the image for the person image_path = os.path.join(fe.rec_path, c.FACE_RECOGNITION_KEY_FRAMES_DIR, person_dict[c.KEYFRAME_NAME_KEY]) set_image(person_id, image_path, 'image/png', auth_params['token']) tsm.set_instance_thumbnail( instance['id'], image_path, token=auth_params['token']) # Get aligned face and set it as instance feature print person_dict.keys() aligned_face_path = os.path.join(fe.align_path, person_dict[c.MEDOID_ALIGNED_FACE_KEY]) tsm.set_instance_feature(instance['id'], aligned_face_path, token=auth_params['token']) # TODO DELETE? # else: # # Find id person by name and surname # tag_parts = person_id.split(c.TAG_SEP) # surname = tag_parts[0] # name = tag_parts[1] # person = create_person(name, surname, auth_params['token']) #person_id = person['id'] #~ if person['image'] == "unknown_user.png": #~ set_image(person_id, image_path, 'image/png') # create a tag for user name #createTagKeyword(item_id, person['first_name'], person['last_name']) # create a tag (occurrence of a person in a digital item) tag = create_tag(item_id, person_id, "face", auth_params['token']) #create audio+video tag #uniform_tag = create_tag(item_id, person_id, "face+speaker", auth_params['token']) #uniform_tag_ids_arr.append[uniform_tag['id']] for segment in person_dict[c.SEGMENTS_KEY]: start = segment[c.SEGMENT_START_KEY] duration = segment[c.SEGMENT_DURATION_KEY] bbox_x, bbox_y, width, height = segment[c.FRAMES_KEY][0][c.DETECTION_BBOX_KEY] create_dtag(tag['id'], int(start), int(duration), bbox_x, bbox_y, width, height, auth_params['token']) """ item_status = get_status(item_id, token) if "SPEAKER_RECOG" in item_status['status']: #create dtags for audio+video tag for u_tag_id in uniform_tag_ids_arr: create_uniform_dtags(item_id, u_tag_id, auth_params['token']) """ create_uniform_dtags(item_id, auth_params['token'])