def run_job(self, job_to_run, callback=None, **kwargs): """ Adds job to worker's queue so it will be picked up by worker. :param job_to_run: Job that will be placed inside queue :param callback: optional callback function that will be executed after job is finished. :param kwargs: Arguments to run the job with. """ worker_logger.info('Running job - %s' % job_to_run) if callback is not None: callback_code = sha1(urandom(32)).hexdigest() self._subscribed_callbacks.update({callback_code: callback}) kwargs.update({'callback_code': callback_code}) self._queue.enqueue(job_to_run, **kwargs)
def _tell_ai_training_results(result, ai_response_type, try_type, ai_code): if isinstance(result, bool) and result: ai_response_type.update(get_ai_training_response(try_type)) try: worker_logger.info('Telling AI that training is finished with code - %s and result - %s' % (ai_code, result)) response_type = base64.b64encode(json.dumps(ai_response_type)) register_biometrics_url = biomio_settings.ai_rest_url % (REST_REGISTER_BIOMETRICS % (ai_code, response_type)) response = requests.post(register_biometrics_url) try: response.raise_for_status() worker_logger.info( 'AI should now know that training is finished with code - %s and response type - %s' % (ai_code, response_type)) except HTTPError as e: worker_logger.exception(e) worker_logger.exception( 'Failed to tell AI that training is finished, reason - %s' % response.reason) except Exception as e: worker_logger.error('Failed to build rest request to AI - %s' % str(e)) worker_logger.exception(e)
def final_helper(temp_image_path, probe_id, error, callback_code, result, ai_response_type, try_type, ai_code): shutil.rmtree(temp_image_path) if error is not None: retries_count = AlgorithmsDataStore.instance().decrement_int_value( REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) if retries_count == 0: AlgorithmsDataStore.instance().delete_data(key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) worker_logger.debug('Maximum training attempts reached...') result = False ai_response_type.update(dict( status=TRAINING_MAX_RETRIES_STATUS, message=TRAINING_MAX_RETRIES_MESSAGE )) # _tell_ai_training_results(result, ai_response_type, try_type, ai_code) else: AlgorithmsDataStore.instance().store_data(key=REDIS_UPDATE_TRAINING_KEY % probe_id, error=error) result = dict(result=False, error=error) AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY % callback_code, result=result) worker_logger.info('Job was finished with internal algorithm error %s ' % error) else: AlgorithmsDataStore.instance().delete_data(key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY % callback_code, result=result) _tell_ai_training_results(result, ai_response_type, try_type, ai_code)
def pre_training_helper(images, probe_id, settings, callback_code, try_type, ai_code): worker_logger.info('Running training for user - %s, with given parameters - %s' % (settings.get('userID'), settings)) ai_response_type = dict() try: worker_logger.info('Telling AI that we are starting training with code - %s' % ai_code) ai_response_type.update({'status': TRAINING_STARTED_STATUS, 'message': TRAINING_STARTED_MESSAGE}) response_type = base64.b64encode(json.dumps(ai_response_type)) register_biometrics_url = biomio_settings.ai_rest_url % (REST_REGISTER_BIOMETRICS % (ai_code, response_type)) response = requests.post(register_biometrics_url) try: response.raise_for_status() worker_logger.info('AI should now know that training started with code - %s and response type - %s' % (ai_code, response_type)) except HTTPError as e: worker_logger.exception(e) worker_logger.exception('Failed to tell AI that training started, reason - %s' % response.reason) except Exception as e: worker_logger.error('Failed to build rest request to AI - %s' % str(e)) worker_logger.exception(e) ai_response_type.update({'status': TRAINING_SUCCESS_STATUS, 'message': TRAINING_SUCCESS_MESSAGE}) result = False error = None if AlgorithmsDataStore.instance().exists(key=REDIS_UPDATE_TRAINING_KEY % probe_id): settings.update({'database': get_algo_db(probe_id=probe_id)}) AlgorithmsDataStore.instance().delete_data(key=REDIS_UPDATE_TRAINING_KEY % probe_id) temp_image_path = tempfile.mkdtemp(dir=APP_ROOT) try: image_paths = save_images(images, temp_image_path) # Store photos for test purposes store_test_photo_helper(APP_ROOT, image_paths) settings.update({'data': image_paths}) settings.update({'general_data': {'data_path': temp_image_path, 'ai_code': ai_code, 'try_type': try_type, 'probe_id': probe_id}}) return settings except: final_helper(temp_image_path, probe_id, error, callback_code, result, ai_response_type, try_type, ai_code) return None
def verification_job(image, probe_id, settings, callback_code): """ Runs verification for user with given image :param image: to run verification for :param probe_id: app_id :param settings: settings with values for algoId and userID :param callback_code: code of the callback which should be executed after job is finished. """ worker_logger.info( 'Running verification for user - %s, with given parameters - %s' % (settings.get('userID'), settings)) if RedisStorage.persistence_instance().exists(key=REDIS_JOB_RESULTS_ERROR % callback_code): worker_logger.info( 'Job interrupted because of job_results_error key existence.') return result = False database = get_algo_db(probe_id=probe_id) settings.update({'database': database}) settings.update({'action': 'verification'}) temp_image_path = tempfile.mkdtemp(dir=ALGO_ROOT) error = None try: temp_image = save_image(image, temp_image_path) settings.update({'data': temp_image}) # Store photos for test purposes store_test_photo_helper(ALGO_ROOT, [temp_image]) algo_result = AlgorithmsInterface.verification(**settings) if algo_result.get('status', '') == "result": # record = dictionary: # key value # 'status' "result" # 'result' bool value: True is verification successfully, otherwise False # 'userID' Unique user identifier # # Need save to redis result = algo_result.get('result', False) elif algo_result.get('status', '') == "data_request": # record = dictionary: # key value # 'status' "data_request" # 'userID' Unique user identifier # 'algoID' Unique algorithm identifier # # Need save to redis as data request (for this we can use this dictionary) pass elif algo_result.get('status', '') == "error": worker_logger.exception( 'Error during verification - %s, %s, %s' % (algo_result.get('status'), algo_result.get('type'), algo_result.get('details'))) if 'Internal algorithm' in algo_result.get('type', ''): error = algo_result.get('details', {}).get('message', '') # record = dictionary: # key value # 'status' "error" # 'type' Type of error # 'userID' Unique user identifier # 'algoID' Unique algorithm identifier # 'details' Error details dictionary # # Algorithm can have three types of errors: # "Algorithm settings are empty" # in this case fields 'userID', 'algoID', 'details' are empty # "Invalid algorithm settings" # in this case 'details' dictionary has following structure: # key value # 'params' Parameters key ('data') # 'message' Error message (for example "File <path> doesn't exists") # "Internal algorithm error" # Need save to redis pass except Exception as e: worker_logger.exception(e) finally: if error is not None or RedisStorage.persistence_instance().exists( key=REDIS_JOB_RESULTS_ERROR % callback_code): if not RedisStorage.persistence_instance().exists( key=REDIS_JOB_RESULTS_ERROR % callback_code): result = dict(verified=False, error=error) RedisStorage.persistence_instance().store_data( key=REDIS_JOB_RESULTS_ERROR % callback_code, ex=300, result=result) store_verification_results(result=result, callback_code=callback_code, probe_id=probe_id) if error is not None: worker_logger.info( 'Job was finished with internal algorithm error %s ' % error) else: worker_logger.info( 'Job was not stored because of job_results_error key existence.' ) else: RedisStorage.persistence_instance().append_value_to_list( key=REDIS_PARTIAL_RESULTS_KEY % callback_code, value=result) results_counter = RedisStorage.persistence_instance( ).decrement_int_value(REDIS_RESULTS_COUNTER_KEY % callback_code) if results_counter <= 0: gathered_results = RedisStorage.persistence_instance( ).get_stored_list(REDIS_PARTIAL_RESULTS_KEY % callback_code) worker_logger.debug( 'All gathered results for verification job - %s' % gathered_results) if results_counter < 0: worker_logger.exception( 'Results count is less than 0, check worker jobs consistency!' ) result = dict(verified=False) else: true_count = float(gathered_results.count('True')) result = dict(verified=( (true_count / len(gathered_results)) * 100) >= 50) store_verification_results(result=result, callback_code=callback_code, probe_id=probe_id) shutil.rmtree(temp_image_path) worker_logger.info( 'Verification finished for user - %s, with result - %s' % (settings.get('userID'), result))
def training_job(images, probe_id, settings, callback_code, try_type, ai_code): """ Runs education for given user with given array of images. :param images: array of images to run verification on. :param probe_id: current app_id :param settings: dictionary which contains information about algoId and userID :param callback_code: code of the callback that should be executed after job is finished """ worker_logger.info( 'Running training for user - %s, with given parameters - %s' % (settings.get('userID'), settings)) ai_response_type = dict() try: worker_logger.info( 'Telling AI that we are starting training with code - %s' % ai_code) ai_response_type.update({ 'status': TRAINING_STARTED_STATUS, 'message': TRAINING_STARTED_MESSAGE }) response_type = base64.b64encode(json.dumps(ai_response_type)) register_biometrics_url = biomio_settings.ai_rest_url % ( REST_REGISTER_BIOMETRICS % (ai_code, response_type)) response = requests.post(register_biometrics_url) try: response.raise_for_status() worker_logger.info( 'AI should now know that training started with code - %s and response type - %s' % (ai_code, response_type)) except HTTPError as e: worker_logger.exception(e) worker_logger.exception( 'Failed to tell AI that training started, reason - %s' % response.reason) except Exception as e: worker_logger.error('Failed to build rest request to AI - %s' % str(e)) worker_logger.exception(e) ai_response_type.update({ 'status': TRAINING_SUCCESS_STATUS, 'message': TRAINING_SUCCESS_MESSAGE }) result = False error = None settings.update({'action': 'education'}) if RedisStorage.persistence_instance().exists( key=REDIS_UPDATE_TRAINING_KEY % probe_id): settings.update({'database': get_algo_db(probe_id=probe_id)}) RedisStorage.persistence_instance().delete_data( key=REDIS_UPDATE_TRAINING_KEY % probe_id) temp_image_path = tempfile.mkdtemp(dir=ALGO_ROOT) try: image_paths = save_images(images, temp_image_path) # Store photos for test purposes store_test_photo_helper(ALGO_ROOT, image_paths) settings.update({'data': image_paths}) algo_result = AlgorithmsInterface.verification(**settings) if isinstance(algo_result, dict) and algo_result.get('status', '') == "update": # record = dictionary: # key value # 'status' "update" # 'userID' Unique user identificator # 'algoID' Unique algorithm identificator # 'database' BLOB data of user, with userID, for verification algorithm algoID # # Need update record in algorithms database or create record for user userID and algorithm # algoID if it doesn't exists database = algo_result.get('database', None) if database is not None: store_training_db(database, probe_id) result = True ai_response_type.update( dict(status=TRAINING_SUCCESS_STATUS, message=TRAINING_SUCCESS_MESSAGE)) elif isinstance(algo_result, list): for algo_result_item in algo_result: if algo_result_item.get('status', '') == "error": worker_logger.exception( 'Error during education - %s, %s, %s' % (algo_result_item.get('status'), algo_result_item.get('type'), algo_result_item.get('details'))) if 'Internal Training Error' in algo_result_item.get( 'type', ''): error = algo_result_item.get('details', {}).get('message', '') ai_response_type.update( dict(status=TRAINING_RETRY_STATUS, message=TRAINING_RETRY_MESSAGE)) else: ai_response_type.update({'status': 'error'}) elif algo_result_item.get('status', '') == 'update': database = algo_result_item.get('database', None) if database is not None: store_training_db(database, probe_id) result = True ai_response_type.update( dict(status=TRAINING_SUCCESS_STATUS, message=TRAINING_SUCCESS_MESSAGE)) # record = dictionary: # key value # 'status' "error" # 'type' Type of error # 'userID' Unique user identificator # 'algoID' Unique algorithm identificator # 'details' Error details dictionary # # Algorithm can have three types of errors: # "Algorithm settings are empty" # in this case fields 'userID', 'algoID', 'details' are empty # "Invalid algorithm settings" # in this case 'details' dictionary has following structure: # key value # 'params' Parameters key ('data') # 'message' Error message (for example "File <path> doesn't exists") # "Internal algorithm error" # Need save to redis pass elif algo_result.get('status', '') == "error": worker_logger.exception( 'Error during education - %s, %s, %s' % (algo_result.get('status'), algo_result.get('type'), algo_result.get('details'))) if 'Internal Training Error' in algo_result.get('type', ''): error = algo_result.get('details', {}).get('message', '') ai_response_type.update( dict(status=TRAINING_RETRY_STATUS, message=TRAINING_RETRY_MESSAGE)) else: ai_response_type.update({'status': 'error'}) ai_response_type.update( dict(status=TRAINING_FAILED_STATUS, message=TRAINING_FAILED_MESSAGE)) # record = dictionary: # key value # 'status' "error" # 'type' Type of error # 'userID' Unique user identificator # 'algoID' Unique algorithm identificator # 'details' Error details dictionary # # Algorithm can have three types of errors: # "Algorithm settings are empty" # in this case fields 'userID', 'algoID', 'details' are empty # "Invalid algorithm settings" # in this case 'details' dictionary has following structure: # key value # 'params' Parameters key ('data') # 'message' Error message (for example "File <path> doesn't exists") # "Internal algorithm error" # Need save to redis pass except Exception as e: worker_logger.exception(e) finally: shutil.rmtree(temp_image_path) if error is not None: retries_count = RedisStorage.persistence_instance( ).decrement_int_value(REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) if retries_count == 0: RedisStorage.persistence_instance().delete_data( key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) worker_logger.debug('Maximum training attempts reached...') result = False ai_response_type.update( dict(status=TRAINING_MAX_RETRIES_STATUS, message=TRAINING_MAX_RETRIES_MESSAGE)) # _tell_ai_training_results(result, ai_response_type, try_type, ai_code) else: RedisStorage.persistence_instance().store_data( key=REDIS_UPDATE_TRAINING_KEY % probe_id, error=error) result = dict(result=False, error=error) RedisStorage.persistence_instance().store_data( key=REDIS_PROBE_RESULT_KEY % callback_code, result=result) worker_logger.info( 'Job was finished with internal algorithm error %s ' % error) else: RedisStorage.persistence_instance().delete_data( key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id) RedisStorage.persistence_instance().store_data( key=REDIS_PROBE_RESULT_KEY % callback_code, result=result) _tell_ai_training_results(result, ai_response_type, try_type.split('_')[0], ai_code) worker_logger.info('training finished for user - %s, with result - %s' % (settings.get('userID'), result))
def unsubscribe(self, channel_key): worker_logger.info("Unsubscribing from channel - %s" % channel_key) if channel_key in self._subscribers: del self._subscribers[channel_key]
def subscribe(self, channel_key, callback): worker_logger.info( 'Subscribing channel - %s , with the callback - %s' % (channel_key, callback.__name__)) self._subscribers.update({channel_key: callback})
def __del__(self): worker_logger.info('Redis Subscriber was successfully destroyed')
def __init__(self): worker_logger.info('Initialized Redis Subscriber instance') self._client = Client(host=REDIS_HOST, port=int(REDIS_PORT)) self._subscribers = {} self._listen()