def test_insight_template(self, data): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: # construct insight configure message req_data = dict() payload = data['template_value'] payload['request_type'] = data['template_key'] req_data['entity_id'] = DEFAULT_ENTITY_ID req_data['solution_id'] = self.solution_id req_data['data'] = payload response = post_job(INSIGHT_CONFIG['get_insight'], req_data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): if is_message_published(response): processed_request = False req_data['data'] = {'insight_id': self.get_insight_id_from_response(response), 'request_type': 'default'} initiated_dt = datetime.now() while not processed_request: response = post_job(INSIGHT_CONFIG['get_insight'], req_data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response) and is_message_published(response): data = self.get_insight_from_response(response) curr_date = datetime.now() if ('insights' in data.keys() and len(data['insights']) > 0) or \ (curr_date - initiated_dt).total_seconds() > 60: return {'status': 'success', 'msg': 'Get insight response', 'data': data, 'job_id': job_id} else: return {'status': 'failure', 'msg': 'Request timed out', 'job_id': job_id} else: return {'status': 'failure', 'msg': 'Failed in configure insight template', 'error': '', 'job_id': job_id} return {'status': 'failure', 'msg': 'Request timed out', 'job_id': job_id} # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) traceback.print_exc() if job_id: return {'status': 'failure', 'msg': 'Error in configure insight template', 'error': traceback.format_exc(), 'job_id': job_id} else: return {'status': 'failure', 'msg': 'Error in configure insight template', 'error': traceback.format_exc()} finally: context.end_span()
def update_email_details(solution_id,payload): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: data = dict(solution_id = solution_id, data=dict(source_type="email",service_name="source",solution_id=solution_id, configuration=payload)) response = post_job(CONFIGURE_EMAIL,data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: MongoDbConn.update(SOURCE_COLLECTION, where_clause=dict(solution_id=solution_id,source_type="email"), query=data["data"]) temp_result = create_email_template(solution_id,payload) if temp_result["status"] == "success": return {"status": "success", "msg": "Updated email details.", 'job_id': job_id} else: return temp_result else: return {'status': 'failure', 'msg': 'Error in updating emails', 'error': result, 'job_id':job_id} else: return {'status': 'failure', 'msg': 'Request timeout', "error": response, 'job_id':job_id} except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return {"status": "failure", "msg": "Failed to update details.", 'job_id':job_id} else: return {"status": "failure", "msg": "Failed to update details."} finally: context.end_span()
def create_email_template(solution_id,payload): job_id = None temp_result = {"status" :"failure"} template = MongoDbConn.find_one(TEMPLATE_COLLECTION, {"solution_id": solution_id, "template_name": "email", "is_deleted": False}) if template is None: template_data = format_template_data(solution_id) response = post_job(TEMPLATE_CONFIG["SAVE"],template_data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: template_id = get_nested_value(response, "result.result.metadata.template_id") if template_id: section_result = create_new_section(template_id,solution_id,DEFAULT_SECTION) if section_result["status"] != "success": return temp_result.update({'msg': 'Failed to create sections', 'error': section_result, 'job_id':job_id}) else: return temp_result.update({'msg': 'Failed to create template', 'error': result, 'job_id':job_id}) else: return temp_result.update({'msg': 'Request timed out', 'error': response, 'job_id':job_id}) else: template_id = template["template_id"] element_result = update_elements(template_id,solution_id,payload) if element_result["status"] == "success": return {'status':'success'} else: return temp_result.update({'msg': 'Failed to create elements', 'error': element_result})
def entity_delete(payload, solution_id, config, validated=False): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: if not validated: domain_object = payload["entity_name"] valid = validate_entity_delete(solution_id, domain_object) if not valid: return invalid_edit_msg() entities_list = [domain_object] else: entities_list = payload for entities in entities_list: complete_list = deepcopy(entities_list) get_all_sub_entities(entities, solution_id, complete_list) data = { "solution_id": solution_id, "data": { "filter_obj": complete_list } } response = post_job(config["DELETE"], data, timeout=100) if 'job_id' in response: job_id = response['job_id'] if not is_request_timeout(response): status, result = get_response(response) if status: for ent in complete_list: query = {"entity_name": ent, "solution_id": solution_id} MongoDbConn.remove(ENTITY_COLLECTION, query) else: return { 'status': 'failure', 'msg': 'Failed to remove', 'error': result, 'job_id': job_id } else: return { 'status': 'failure', 'msg': 'Request timeout', "error": response, 'job_id': job_id } return { 'status': 'success', 'msg': 'Successfully removed', 'job_id': job_id } # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return {"status": "failure", "msg": str(e), 'job_id': job_id} else: return {"status": "failure", "msg": str(e)} finally: context.end_span()
def tables_save(config, data, solution_id): response = post_job(config['SAVE'], { "solution_id": solution_id, "data": data }) if not is_request_timeout(response): if is_message_published(response): return {'status': 'success', 'msg': 'Tables config saved'} else: return {'status': 'failure', 'msg': 'Failed to save config'} else: return {'status': 'failure', 'msg': 'Request timeout'}
def process_action_retrain(solution_id, payload, model, result_id): file_name = model['name'] + '_' + str(len(model['model_ref']) + 1) + '.json' with open(file_name, 'w') as outfile: json.dump(payload['data'], outfile) s3_resp = post_s3(str(file_name), ROOT + '/' + str(file_name), AMAZON_AWS_BUCKET, AMAZON_AWS_KEY_PATH) if s3_resp['status'] == 'success': os.remove(file_name) uri = TRAINING_SET_TRAIN_MODEL_URI if model['type'] == 'action_classifier': uri = TRAINING_SET_ACTION_CLASSIFIER_URI response = post_train_model_job(solution_id, model, s3_resp['key'], uri) if not is_request_timeout(response): if is_message_published(response): model['model_ref'].append({ 'bucket_name': AMAZON_AWS_BUCKET, 'key_name': AMAZON_AWS_KEY_PATH + file_name }) query = { 'is_enabled': True, 'updated_ts': datetime.now(), 'model_ref': model['model_ref'] } MongoDbConn.update(TRAINING_SET_MODELS_COLLECTION, where_clause={"_id": ObjectId(result_id)}, query=query) return {'status': 'success', 'msg': 'Retrain model completed'} else: return { 'status': 'failure', 'msg': 'Error in service while publishing retrained model' } else: return { 'status': 'failure', 'msg': 'Timeout Error while processing retrained model' } else: return { 'status': 'failure', 'msg': 'Error in uploading retrained model to s3' }
def get_pipelines(request): """ This function will fetch all pipeline services and return the dictionary as response :param request: Http request :return: dictionary as response """ context = tracer.get_context(request_id=str(uuid4()), log_level="ERROR") context.start_span(component=__name__) try: result = {"status": "failure"} solution_id = get_solution_from_session(request) if request.method == 'GET': data_dict = {'solution_id': solution_id, 'data': {}} response = post(API_GATEWAY_POST_JOB_URI + PIPELINE['GET_PIPELINE'], data_dict) if 'job_id' in response: result["job_id"] = response["job_id"] if not is_request_timeout(response): status, msg = get_response(response) if status: result["status"] = "success" result['status_code'] = STATUS_CODES['OK'] services = get_nested_value(response, "result.metadata.pipelines") result["data"] = services result['total_services'] = len(services) else: result["error"] = msg result['status_code'] = STATUS_CODES['NOT_FOUND'] result["msg"] = "Error in retrieving the services information" else: result["msg"] = "Request timed out" result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] return result else: return {'status': 'failure', 'status_code': STATUS_CODES['BAD_REQUEST'], 'msg': 'GET request will be accepted.'} except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) return {'status': 'failure', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'msg': 'Failed to fetch pipeline services group.'} finally: context.end_span()
def initialize_learning_service(self): context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: default_config = get_file_contents("learning_default_config.json") # construct insight configure message req_data = dict() req_data['configuration'] = default_config req_data['service_name'] = 'learning-microservice' req = {"solution_id": self.solution_id, "data": req_data, "metadata":{}} response = post_job(LEARNING_CONFIG['initialize_defaults_api'], req) if not is_request_timeout(response): if is_message_published(response): return True # TODO raise specific exception except Exception as e : traceback.print_exc() context.log(message=str(e), obj={"tb": traceback.format_exc()}) context.end_span() return False
def process_action_test(solution_id, payload, model): data = dict() data['solution_id'] = solution_id data['data'] = payload['data'] data['data']['model_name'] = model['name'] data['data']['model_type'] = model['type'] response = post_job(TRAINING_SET_TRAIN_MODEL_TEST_URI, data) if not is_request_timeout(response): if is_message_published(response): return response else: return { 'status': 'failure', 'msg': 'Error in service while publishing model to test' } else: return { 'status': 'failure', 'msg': 'Timeout Error while processing model to test' }
def entity_download(solution_id, file_type, config): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: data = {"solution_id": solution_id, "data": {"type": file_type}} response = post_job(config["GET"], data) if 'job_id' in response: job_id = response['job_id'] if not is_request_timeout(response): status, result = get_response(response) if status: file_path = get_nested_value(response, config["DATA"]) return { 'status': 'success', 'file_path': file_path, 'job_id': job_id } else: return { 'status': 'failure', 'msg': 'Failed to download domain objects', 'error': result, 'job_id': job_id } else: return { 'status': 'failure', 'msg': 'Request timeout', "error": response, 'job_id': job_id } # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return {"status": "failure", "msg": str(e), 'job_id': job_id} else: return {"status": "failure", "msg": str(e)} finally: context.end_span()
def configure_insight_template(self, data): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: # construct insight configure message req_data = dict() #req_data['configuration'] = {"defaults": {"realtime_requests": {data['template_key']: data['template_value']}}} req_data['configuration'] = {"defaults": {"realtime_requests": data}} req_data['service_name'] = 'insight-microservice' req = {"solution_id": self.solution_id, "data": req_data} response = post_job(INSIGHT_CONFIG['configure_template_api'], req) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): if is_message_published(response): #self.update_insight_templates(data) return {'status': 'success', 'msg': 'Configured insight template', 'job_id': job_id} else: return {'status': 'failure', 'msg': 'Failed in configure insight template', 'error': '', 'job_id':job_id} return {'status': 'failure', 'msg': 'Request timed out', 'job_id': job_id} # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) traceback.print_exc() if job_id: return {'status': 'failure', 'msg': 'Error in configure insight template', 'error': traceback.format_exc(), 'job_id': job_id} else: return {'status': 'failure', 'msg': 'Error in configure insight template', 'error': traceback.format_exc()} finally: context.end_span()
def solution_trigger(solution_id): context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: job_id = None payload = { "solution_id": solution_id, "entity_id": DEFAULT_ENTITY_ID, "data": {} } response = post_job(NEW_SOLN_TRIGGER, payload) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): if is_message_published(response): return { 'status': 'success', 'msg': 'Solution initialized', 'job_id': job_id } else: return { 'status': 'failure', 'msg': 'Failed to initialise', 'job_id': job_id } else: return { 'status': 'failure', 'msg': 'Request timeout', 'job_id': job_id } except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) return {'status': 'failure', 'msg': 'exception occerd : ' + str(e)} finally: context.end_span()
def update_elements(template_id,solution_id,payload): result = {"status":"failure"} element = MongoDbConn.find_one(ELEMENTS_COLLECTION, {"solution_id": solution_id, "template_id": template_id, "is_deleted": False}) email_field = dict(template_id=template_id,section_id="default",name="email_body",type="variable_field", field_type="variable_field",map_to=payload["email_body"],is_doc_var=False,has_label=False, value_coordinates=dict(x1=0,x2=0,y1=0,y2=0),page_no=0) if element is not None: email_field.update({"element_id":element["element_id"]}) email_field["token"]="element" data = {"solution_id":solution_id,"data":email_field} response = post_job(TEMPLATE_CONFIG["SAVE"], data) if 'job_id' in response: result["job_id"] = response["job_id"] if not is_request_timeout(response): status, msg = get_response(response) if status: result["status"] = "success" result["msg"] = "Fields created/updated successfully" else: result["msg"] = msg else: result["msg"] = "Request timed out" return result
def entity_save(solution_id, entity_definitions, config): job_id = None delete_domain_object = [] context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: if "saveType" in entity_definitions.keys( ) and entity_definitions["saveType"] == "import": entity_definitions.pop("saveType") return process_uploaded_entities(solution_id, entity_definitions, config) else: if "old_domain_name" in entity_definitions or "entity_removed" in entity_definitions: valid, delete_domain_object = process_deletion( entity_definitions, delete_domain_object, solution_id) if not valid: return invalid_edit_msg() updated_entities = convert_heirarchial_to_flat( entity_definitions["entity_cfg"]) valid = validate_entity_updates(solution_id, updated_entities) if not valid: return invalid_edit_msg() entity_definitions["entity_cfg"] = updated_entities response = post_job(config['SAVE'], { "solution_id": solution_id, "data": entity_definitions }) if 'job_id' in response: job_id = response['job_id'] if not is_request_timeout(response): status, result = get_response(response) # Calculating Failed Entities status and error message failed_entity_status, err_msg = getFailedEntityStatus(response) # Integarting Failed Entities status as well and error message if status and failed_entity_status: if delete_domain_object: result = entity_delete(delete_domain_object, solution_id, config, validated=True) if result["status"] != "success": return { "status": "failure", "msg": "New domain object saved successfully but " "failed to delete the old domain object", 'job_id': job_id } return { 'status': 'success', 'msg': "Domain Objects created/updated successfully", 'job_id': job_id } else: return { 'status': 'failure', 'msg': "Error while saving Domain Objects. " + err_msg, 'error': result, 'job_id': job_id } else: return { 'status': 'failure', 'msg': 'Request timeout', "error": response, 'job_id': job_id } # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return { 'status': 'failure', 'msg': "Internal Error occured", "error": str(e), 'job_id': job_id } else: return { 'status': 'failure', 'msg': "Internal Error occured", "error": str(e) } finally: context.end_span()
def test_is_request_timeout(): response = {'msg':'Timeout'} assert is_request_timeout(response) is True response = {'status':'success'} assert is_request_timeout(response) is False
def process_action_default(solution_id, payload, request): training_set_id = payload['training_set_id'] training_set = MongoDbConn.find_one(TRAINING_SET_COLLECTION, {'_id': ObjectId(training_set_id)}) if training_set is not None: # post job uri = TRAINING_SET_TRAIN_MODEL_URI if payload['type'] == 'action_classifier': uri = TRAINING_SET_ACTION_CLASSIFIER_URI file_name = AMAZON_AWS_KEY_PATH + training_set['file_name'] if 's3_key' in training_set.keys(): file_name = training_set['s3_key'] response = post_train_model_job(solution_id, payload, file_name, uri) is_published = False if not is_request_timeout(response): if is_message_published(response): data = dict() data['name'] = payload['name'] data['model_id'] = response['result']['result']['metadata'][ 'model_id'] data['description'] = payload[ 'description'] if 'description' in payload.keys() else '' data['type'] = payload['type'] data['solution_id'] = get_solution_from_session(request) data['model_ref'] = [] data['model_ref'].append({ 'bucket_name': AMAZON_AWS_BUCKET, 'key_name': AMAZON_AWS_KEY_PATH + training_set['file_name'] }) data['is_published'] = is_published data['service'] = payload['service'] data['created_ts'] = datetime.now() data['updated_ts'] = datetime.now() data['is_enabled'] = False data['is_deleted'] = False MongoDbConn.insert(TRAINING_SET_MODELS_COLLECTION, query=data) return { 'status': 'success', 'msg': payload['name'] + ' model created' } else: status = response['result']['status'] return { 'status': 'failure', 'msg': 'Error from service while creating model', 'error': status['message'] } else: return { 'status': 'failure', 'msg': 'Service is not running or taking more time to process', 'error': response } else: return { 'status': 'failure', 'msg': 'Selected training set is not available' }