def job(callback_code, **kwargs): IdentificationRunProcess._job_logger_info( IDENTIFICATION_RUN_PROCESS_CLASS_NAME, **kwargs) AlgorithmsDataStore.instance().store_job_result( record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=kwargs, callback_code=callback_code)
def job(callback_code, **kwargs): """ Job function for preparing data to update the identification hash. :param callback_code: callback function identifier :param kwargs: settings dictionary: { 'algoID': algorithm identifier string, 'userID': user identifier string, 'general_data': { 'ai_code': AI code string, 'data_path': image data path, 'probe_id': probe identifier string, 'try_type': try type string }, 'temp_data_path': temporary data path, 'ended': matching ending status, 'clusters_list': list of optional keys, <optional key>: descriptor list, <optional key>: descriptor list, ... <optional key>: descriptor list } """ InitIdentificationUpdateProcess._job_logger_info( INIT_IDENTIFICATION_UPDATE_PROCESS_CLASS_NAME, **kwargs) record = InitIdentificationUpdateProcess.process(**kwargs) AlgorithmsDataStore.instance().store_job_result( record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=record, callback_code=callback_code)
def job(callback_code, **kwargs): TrainingStartProcess._job_logger_info( TRAINING_START_PROCESS_CLASS_NAME, **kwargs) settings = pre_training_helper(callback_code=callback_code, **kwargs) if settings is not None: AlgorithmsDataStore.instance().store_job_result( record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=settings, callback_code=callback_code)
def job(callback_code, **kwargs): IdentificationStartProcess._job_logger_info( IDENTIFICATION_START_PROCESS_CLASS_NAME, **kwargs) settings = pre_identification_helper(callback_code=callback_code, **kwargs) if settings is not None: AlgorithmsDataStore.instance().store_job_result( record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=settings, callback_code=callback_code)
def handler(self, result): self._handler_logger_info(result) if result is not None: ident_data = AlgorithmsDataStore.instance().get_data( self._data_redis_key) AlgorithmsDataStore.instance().delete_data(self._data_redis_key) users_list = [ str(record['user_id']) for record in result['records'] ] ident_data.update({'users': users_list}) self._ident_run_process.run(self._worker, **ident_data)
def job(callback_code, **kwargs): FinalIdentificationProcess._job_logger_info(FINAL_IDENTIFICATION_PROCESS_CLASS_NAME, **kwargs) max_count = 0 max_key = None for key, count in kwargs['candidates_score'].iteritems(): if max_count < count: max_count = count max_key = key record = { 'result': max_key if max_key is not None else "FaceIdentificationError: any userID not found." } AlgorithmsDataStore.instance().store_job_result(record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=record, callback_code=callback_code)
def pre_training_helper(images, probe_id, settings, callback_code, try_type, ai_code): logger.info('Running training for user - %s, with given parameters - %s' % (settings.get('userID'), settings)) ai_response_type = dict() try: logger.info('Telling AI that we are starting training with code - %s' % ai_code) ai_response_type.update({ 'status': TRAINING_STARTED_STATUS, 'message': TRAINING_STARTED_MESSAGE }) ai_response_sender(ai_code, ai_response_type) except Exception as e: logger.error('Failed to build rest request to AI - %s' % str(e)) logger.exception(e) ai_response_type.update({ 'status': TRAINING_SUCCESS_STATUS, 'message': TRAINING_SUCCESS_MESSAGE }) result = False error = None if AlgorithmsDataStore.instance().exists(key=REDIS_UPDATE_TRAINING_KEY % probe_id): settings.update({'database': get_algo_db(probe_id=probe_id)}) AlgorithmsDataStore.instance().delete_data( key=REDIS_UPDATE_TRAINING_KEY % probe_id) temp_image_path = tempfile.mkdtemp(dir=APP_ROOT) try: image_paths = save_images(images, temp_image_path) # Store photos for test purposes store_test_photo_helper(APP_ROOT, image_paths) settings.update({'data': image_paths}) settings.update({ 'general_data': { 'data_path': temp_image_path, 'ai_code': ai_code, 'try_type': try_type, 'probe_id': probe_id } }) return settings except: final_helper(temp_image_path, probe_id, error, callback_code, result, ai_response_type, try_type, ai_code) return None
def job(callback_code, **kwargs): IdentificationPrepareProcess._job_logger_info( IDENTIFICATION_PREPARE_PROCESS_CLASS_NAME, **kwargs) data = load_temp_data(kwargs['data_file'], remove=False) logger.debug(data) data_redis_key = IDENTIFICATION_DATA_TRAINING_KEY % (str( uuid.uuid4()), data['providerID']) AlgorithmsDataStore.instance().store_data(data_redis_key, **kwargs) settings = { 'providerID': data['providerID'], 'data_redis_key': data_redis_key } AlgorithmsDataStore.instance().store_job_result( record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=settings, callback_code=callback_code)
def pre_identification_helper(images, probe_id, settings, hash_config_path, callback_code): logger.info( 'Running verification for user - %s, with given parameters - %s' % (settings.get('userID'), settings)) if AlgorithmsDataStore.instance().exists(key=REDIS_JOB_RESULTS_ERROR % callback_code): logger.info( 'Job interrupted because of job_results_error key existence.') return None temp_image_path = tempfile.mkdtemp(dir=APP_ROOT) try: image_paths = save_images(images, temp_image_path) # Store photos for test purposes store_test_photo_helper(APP_ROOT, image_paths) settings.update({'data': image_paths}) settings.update({ 'general_data': { 'data_path': temp_image_path, 'hash_config_path': hash_config_path, 'probe_id': probe_id } }) return settings except: return None
def job(callback_code, **kwargs): LoadIdentificationHashProcess._job_logger_info(LOAD_IDENTIFICATION_HASH_PROCESS_CLASS_NAME, **kwargs) record = LoadIdentificationHashProcess.process(**kwargs) AlgorithmsDataStore.instance().append_value_to_list(key=REDIS_PARTIAL_RESULTS_KEY % callback_code, value=record) results_counter = AlgorithmsDataStore.instance().decrement_int_value(REDIS_RESULTS_COUNTER_KEY % callback_code) if results_counter <= 0: gathered_results = AlgorithmsDataStore.instance().get_stored_list(REDIS_PARTIAL_RESULTS_KEY % callback_code) if results_counter < 0: result = create_error_message(INTERNAL_TRAINING_ERROR, "jobs_counter", "Number of jobs is incorrect.") else: result = create_result_message({'results': gathered_results}, 'estimation') AlgorithmsDataStore.instance().delete_data(key=REDIS_RESULTS_COUNTER_KEY % callback_code) AlgorithmsDataStore.instance().delete_data(key=REDIS_PARTIAL_RESULTS_KEY % callback_code) AlgorithmsDataStore.instance().store_job_result(record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=result, callback_code=callback_code)
def pre_training_helper(images, probe_id, settings, callback_code, try_type, ai_code): worker_logger.info('Running training for user - %s, with given parameters - %s' % (settings.get('userID'), settings)) ai_response_type = dict() try: worker_logger.info('Telling AI that we are starting training with code - %s' % ai_code) ai_response_type.update({'status': TRAINING_STARTED_STATUS, 'message': TRAINING_STARTED_MESSAGE}) response_type = base64.b64encode(json.dumps(ai_response_type)) register_biometrics_url = biomio_settings.ai_rest_url % (REST_REGISTER_BIOMETRICS % (ai_code, response_type)) response = requests.post(register_biometrics_url) try: response.raise_for_status() worker_logger.info('AI should now know that training started with code - %s and response type - %s' % (ai_code, response_type)) except HTTPError as e: worker_logger.exception(e) worker_logger.exception('Failed to tell AI that training started, reason - %s' % response.reason) except Exception as e: worker_logger.error('Failed to build rest request to AI - %s' % str(e)) worker_logger.exception(e) ai_response_type.update({'status': TRAINING_SUCCESS_STATUS, 'message': TRAINING_SUCCESS_MESSAGE}) result = False error = None if AlgorithmsDataStore.instance().exists(key=REDIS_UPDATE_TRAINING_KEY % probe_id): settings.update({'database': get_algo_db(probe_id=probe_id)}) AlgorithmsDataStore.instance().delete_data(key=REDIS_UPDATE_TRAINING_KEY % probe_id) temp_image_path = tempfile.mkdtemp(dir=APP_ROOT) try: image_paths = save_images(images, temp_image_path) # Store photos for test purposes store_test_photo_helper(APP_ROOT, image_paths) settings.update({'data': image_paths}) settings.update({'general_data': {'data_path': temp_image_path, 'ai_code': ai_code, 'try_type': try_type, 'probe_id': probe_id}}) return settings except: final_helper(temp_image_path, probe_id, error, callback_code, result, ai_response_type, try_type, ai_code) return None
def training(self, callback=None, **kwargs): """ Method for training identification hashes and verification database using input images. :param callback: callback function object :param kwargs: settings dictionary: { 'images': image byte arrays list, 'ai_code': AI code string, 'probe_id': probe identifier string, 'try_type': try type string, 'settings': { 'userID': user identifier string, 'algoID': algorithm identifier string } } """ self._result_count = 0 self._store_results = [] mode = kwargs.get("mode", TRAINING_FULL) self._callback = callback worker = WorkerInterface.instance() training_start_process = TrainingStartProcess(self._interface_training_callback) main_process = MainTrainingProcess(TEMP_DATA_PATH, worker) training_process = TrainingProcess(TEMP_DATA_PATH, worker) data_detect_process = DataDetectionProcess(TEMP_DATA_PATH, worker) rotation_detect_process = RotationDetectionProcess(TEMP_DATA_PATH, worker) rotation_result_process = RotationResultProcess(TEMP_DATA_PATH, worker) cluster_matching_process = ClusterMatchingProcess(worker) transfer_data_process = TransferDataProcess(worker) init_ident_process = InitIdentificationUpdateProcess(worker) update_data_process = UpdateDataStructureProcess(self._interface_training_callback) training_start_process.set_main_training_process(main_process) main_process.set_data_training_process(training_process) training_process.set_data_detection_process(data_detect_process) training_process.set_data_rotation_process(rotation_detect_process) rotation_detect_process.set_rotation_result_process(rotation_result_process) rotation_result_process.set_data_detection_process(data_detect_process) data_detect_process.set_cluster_matching_process(cluster_matching_process) data_detect_process.set_final_training_process(transfer_data_process) cluster_matching_process.set_final_training_process(transfer_data_process) if mode == TRAINING_FULL: self._result_count += 1 final_training_process = FinalTrainingProcess(self._interface_training_callback) transfer_data_process.add_transfer_process(final_training_process) self._result_count += 1 transfer_data_process.add_transfer_process(init_ident_process) init_ident_process.set_update_data_hash_process(update_data_process) for idx in range(0, 6, 1): AlgorithmsDataStore.instance().delete_data(REDIS_CLUSTER_JOB_ACTION % idx) AlgorithmsDataStore.instance().delete_data(REDIS_TEMPLATE_RESULT % kwargs['settings']['userID']) AlgorithmsDataStore.instance().delete_data(REDIS_GENERAL_DATA % kwargs['settings']['userID']) training_start_process.run(worker, **kwargs)
def job(callback_code, **kwargs): """ Job function for preparing data to training. :param callback_code: callback function identifier :param kwargs: settings dictionary: { 'images': image byte arrays list, 'ai_code': AI code string, 'try_type': try type string, 'probe_id': probe identifier string, 'settings': { 'providerID': provider identifier string, 'algoID': algorithm identifier string, 'userID': user identifier string } } """ TrainingStartProcess._job_logger_info(TRAINING_START_PROCESS_CLASS_NAME, **kwargs) settings = pre_training_helper(callback_code=callback_code, **kwargs) if settings is not None: AlgorithmsDataStore.instance().store_job_result(record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=settings, callback_code=callback_code)
def _interface_training_callback(self, result): self._result_count -= 1 self._store_results.append(result) if self._result_count == 0: for idx in range(0, 6, 1): AlgorithmsDataStore.instance().delete_data(REDIS_CLUSTER_JOB_ACTION % idx) AlgorithmsDataStore.instance().delete_data(REDIS_TEMPLATE_RESULT % result['userID']) AlgorithmsDataStore.instance().delete_data(REDIS_GENERAL_DATA % result['userID']) res = self._store_results[0] res['result'] = self._store_results[0]['result'] and self._store_results[1]['result'] worker = WorkerInterface.instance() worker.run_job(result_send_job, callback=self._callback, **res)
def store_verification_results(result, callback_code, probe_id): if 'error' in result: retries_count = AlgorithmsDataStore.instance().decrement_int_value( REDIS_VERIFICATION_RETIES_COUNT_KEY % probe_id) if retries_count == 0: AlgorithmsDataStore.instance().delete_data(key=REDIS_VERIFICATION_RETIES_COUNT_KEY % probe_id) worker_logger.debug('Max number of verification attempts reached...') del result['error'] result.update({'max_retries': True}) else: AlgorithmsDataStore.instance().delete_data(key=REDIS_VERIFICATION_RETIES_COUNT_KEY % probe_id) AlgorithmsDataStore.instance().delete_data(key=REDIS_RESULTS_COUNTER_KEY % callback_code) AlgorithmsDataStore.instance().delete_data(key=REDIS_PARTIAL_RESULTS_KEY % callback_code) AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY % callback_code, result=result)
def final_helper(temp_image_path, probe_id, error, callback_code, result, ai_response_type, try_type, ai_code): shutil.rmtree(temp_image_path) if error is not None: retries_count = AlgorithmsDataStore.instance().decrement_int_value( REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) if retries_count == 0: AlgorithmsDataStore.instance().delete_data(key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) worker_logger.debug('Maximum training attempts reached...') result = False ai_response_type.update(dict( status=TRAINING_MAX_RETRIES_STATUS, message=TRAINING_MAX_RETRIES_MESSAGE )) # _tell_ai_training_results(result, ai_response_type, try_type, ai_code) else: AlgorithmsDataStore.instance().store_data(key=REDIS_UPDATE_TRAINING_KEY % probe_id, error=error) result = dict(result=False, error=error) AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY % callback_code, result=result) worker_logger.info('Job was finished with internal algorithm error %s ' % error) else: AlgorithmsDataStore.instance().delete_data(key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY % callback_code, result=result) _tell_ai_training_results(result, ai_response_type, try_type, ai_code)
def result_identification_job(callback_code, **kwargs): AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY % callback_code, result=kwargs['result'])
def job(callback_code, **kwargs): """ Job function for update identification database tables. :param callback_code: callback function identifier :param kwargs: settings dictionary: { 'uuid': user identifier string, 'database': database identifier, 'hash_settings': { 'database_type': database type, 'hash_config_path': identification hash settings files path, 'settings': default identification hash settings dictionary }, 'cluster_id': cluster identifier, 'data_settings': { 'temp_image_path': temporary data path, 'userID': user identifier string, 'algoID': algorithm identifier string, 'probe_id': probe identifier string, 'ai_code': AI code string, 'save': list of saved keys, 'try_type': try type string }, 'template': descriptor list } """ UpdateDataStructureProcess._job_logger_info( UPDATE_DATA_STRUCTURE_PROCESS_CLASS_NAME, **kwargs) redis_store = kwargs['database'] settings = kwargs['hash_settings']['settings'].copy() settings_path = os.path.join( kwargs['hash_settings']['hash_config_path'], HASH_SETTINGS_FILE % kwargs['cluster_id']) if os.path.exists(settings_path): settings = load_json(settings_path) else: settings['projection_name'] += str(kwargs['cluster_id']) database_store = get_data_structure( kwargs['hash_settings']['database_type'])(settings) if not os.path.exists(settings_path): if not os.path.exists(kwargs['hash_settings']['hash_config_path']): os.mkdir(kwargs['hash_settings']['hash_config_path']) save_json(settings_path, database_store.get_config()) buckets = database_store.hash_vectors(kwargs['template'], kwargs['uuid']) record = { 'data': buckets, 'store': redis_store, 'uuid': kwargs['uuid'], 'data_settings': kwargs['data_settings'] } logger.debug(buckets) AlgorithmsHashRedisStackStore.instance(redis_store).store_vectors( buckets, record['uuid'], kwargs['cluster_id'], None) result = {'result': True} AlgorithmsDataStore.instance().store_job_result( record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=result, callback_code=callback_code)
def apply(self, data): """ :param data: dictionary = { 'temp_image_path': path to the temporary image storage, 'probe_id': probe identifier, 'error': error message, 'callback_code': code of callback function, 'result': status of result 'ai_response_type': type of AI response, 'try_type': type of try 'ai_code': AI response code } :return: """ temp_image_path = data['temp_image_path'] error = data['error'] probe_id = data['probe_id'] ai_response_type = data['ai_response_type'] callback_code = data['callback_code'] result = data['result'] try_type = data['try_type'] ai_code = data['ai_code'] shutil.rmtree(temp_image_path) if error is not None: retries_count = AlgorithmsDataStore.instance().decrement_int_value( REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) if retries_count == 0: AlgorithmsDataStore.instance().delete_data( key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) logger.debug('Maximum training attempts reached...') result = False ai_response_type.update({ 'status': TRAINING_MAX_RETRIES_STATUS, 'message': TRAINING_MAX_RETRIES_MESSAGE }) else: AlgorithmsDataStore.instance().store_data( key=REDIS_UPDATE_TRAINING_KEY % probe_id, error=error) result = dict(result=False, error=error) AlgorithmsDataStore.instance().store_data( key=REDIS_PROBE_RESULT_KEY % callback_code, result=result) logger.info('Job was finished with internal algorithm error %s ' % error) else: AlgorithmsDataStore.instance().delete_data( key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) AlgorithmsDataStore.instance().store_data( key=REDIS_PROBE_RESULT_KEY % callback_code, result=result) tell_ai_training_results(result, ai_response_type, try_type, ai_code) return None
def ind_final_helper(temp_image_path, probe_id, error, callback_code, result, ai_response_type, try_type, ai_code): shutil.rmtree(temp_image_path) res_dict = { 'result': result, 'ai_response_type': ai_response_type, 'try_type': try_type, 'ai_code': ai_code } if error is not None: retries_count = AlgorithmsDataStore.instance().decrement_int_value( REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) if retries_count == 0: AlgorithmsDataStore.instance().delete_data( key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) logger.debug('Maximum training attempts reached...') res_dict['result'] = False ai_response_type.update( dict(status=TRAINING_MAX_RETRIES_STATUS, message=TRAINING_MAX_RETRIES_MESSAGE)) # _tell_ai_training_results(result, ai_response_type, try_type, ai_code) else: AlgorithmsDataStore.instance().store_data( key=REDIS_UPDATE_TRAINING_KEY % probe_id, error=error) res_dict['result'] = False res_dict['error'] = error AlgorithmsDataStore.instance().store_job_result( key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, result=res_dict) logger.info('Job was finished with internal algorithm error %s ' % error) else: AlgorithmsDataStore.instance().delete_data( key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) AlgorithmsDataStore.instance().store_job_result( record_key=REDIS_DO_NOT_STORE_RESULT_KEY % callback_code, record_dict=res_dict, callback_code=callback_code)
def result_send_job(callback_code, **kwargs): AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY % callback_code, result=kwargs['result']) tell_ai_training_results(kwargs['result'], kwargs['ai_response_type'], kwargs['try_type'], kwargs['ai_code'])
def apply(self, data): """ :param data: dictionary = { 'images': training image set, 'probe_id': prove identifier, 'settings': general settings dictionary, 'callback_code': code of callback function, 'try_type': type of try request, 'ai_code': AI response code, 'temp_data_dir': Path to the temporary storage } :return: """ images = data['images'] settings = data['settings'] ai_code = data['ai_code'] probe_id = data['probe_id'] try_type = data['try_type'] temp_data_dir = data['temp_data_dir'] logger.info( 'Running training for user - %s, with given parameters - %s' % (settings.get('userID'), settings)) ai_response_type = dict() try: logger.info( 'Telling AI that we are starting training with code - %s' % ai_code) ai_response_type.update({ 'status': TRAINING_STARTED_STATUS, 'message': TRAINING_STARTED_MESSAGE }) ai_response_sender(ai_code, ai_response_type) except Exception as e: # TODO: Write Error handler logger.error('Failed to build rest request to AI - %s' % str(e)) logger.exception(e) ai_response_type.update({ 'status': TRAINING_SUCCESS_STATUS, 'message': TRAINING_SUCCESS_MESSAGE }) result = False if AlgorithmsDataStore.instance().exists( key=REDIS_UPDATE_TRAINING_KEY % probe_id): settings.update({'database': get_algo_db(probe_id=probe_id)}) AlgorithmsDataStore.instance().delete_data( key=REDIS_UPDATE_TRAINING_KEY % probe_id) temp_image_path = tempfile.mkdtemp(dir=temp_data_dir) try: image_paths = save_images(images, temp_image_path) # Store photos for test purposes store_test_photo_helper(temp_data_dir, image_paths, "train_{}".format(settings.get('userID'))) settings.update({'data': image_paths}) settings.update({ 'general_data': { 'data_path': temp_image_path, 'ai_code': ai_code, 'try_type': try_type, 'probe_id': probe_id } }) return settings except Exception as error: end_data = data.copy() del end_data['images'] del end_data['settings'] end_data.update({ 'temp_image_path': temp_image_path, 'error': error, 'result': result, 'ai_response_type': ai_response_type }) return self._stages.get(FINAL_TRAINING_STAGE).apply(end_data)