def get_ner_labels(self, solution_id): try: result = {"status": "failure"} res_data = {"solution_id": solution_id, "data": {}} func_result = post_job(NER_LABEL_ENDPOINT['GET'], res_data) if 'job_id' in func_result: result["job_id"] = func_result["job_id"] if not is_request_timeout(func_result): status, msg = get_response(func_result) if status: result["status"] = "success" func_result = get_nested_value(func_result, "result.result.metadata.ner_label_response") result["data"] = func_result result["msg"] = "NER labels fetched successfully" result['status_code'] = STATUS_CODES['OK'] else: result["error"] = msg result['status_code'] = STATUS_CODES['NOT_FOUND'] result["msg"] = "Error in retrieving the list of NER labels." else: result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] result["msg"] = "Request timed out" return result except Exception as e: self.context.log(message=str(e), obj={"tb": traceback.format_exc()}) return {'status': 'failure', 'msg': 'Internal error occurred while fetching ' 'the NER LABELS list.', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'error': str(e)}
def create_rules(payload, solution_id, config): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: if is_rule_valid(payload): data = {"solution_id": solution_id, "data": payload} response = post_job(config['EP'], data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: rule_id = get_nested_value(response, config["DATA"]) return {"status": "success", "data": str(rule_id), "msg": "Successfully created rule.", 'job_id':job_id} else: return result else: return {"status": "failure", "msg": "Request timed out.", 'job_id':job_id} else: return {"status": "failure", "msg": "Invalid rule format.", 'job_id': job_id} # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return {"status": "failure", "msg": str(e), "data": "", 'job_id':job_id} else: return {"status": "failure", "msg": str(e), "data": ""} finally: context.end_span()
def get_rules(solution_id, config): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: data = {"solution_id": solution_id, "data": {"filter_obj": {}}} response = post_job(config["EP"], data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: resp = get_nested_value(response, config["DATA"]) return {"status": "success", "data": resp, "msg": "Successfully retrieved rules", 'job_id':job_id} else: return result else: return {"status": "failure", "msg": "request timed out", 'job_id': job_id} # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return {"status": "failure", "data": [], "msg": str(e), 'job_id': job_id} else: return {"status": "failure", "data": [], "msg": str(e)} finally: context.end_span()
def create_ontology(self, solution_id, payload): try: result = {"status": "failure"} req_data = {'solution_id': solution_id, 'data': payload} func_result = post_job(ONTOLOGY_ENDPOINT['SAVE'], req_data) if 'job_id' in func_result: result["job_id"] = func_result["job_id"] if not is_request_timeout(func_result): status, msg = get_response(func_result) if status: result["status"] = "success" func_result = get_nested_value( func_result, "result.result.metadata.ontology") result["data"] = func_result result["msg"] = func_result["msg"] result['status_code'] = STATUS_CODES['OK'] else: result["error"] = msg result["msg"] = "Error in creating the ontology." result['status_code'] = STATUS_CODES['NOT_FOUND'] else: result["msg"] = "Request timed out" result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] return result except Exception as e: self.context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { 'status': 'failure', 'msg': 'Internal error occurred while creating ' 'the ontology.', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'error': str(e) }
def create_new_session(solution_id, payload): result = {"status": "failure"} data_post = format_save_model(payload) if 'resource_ids' in payload: data_post.update({'resources_ids': payload['resource_ids']}) data = {"solution_id": solution_id, "data": data_post, "metadata": {}} response = post_job(LEARNING_CONFIG['get_session'], data) if 'job_id' in response: result["job_id"] = response["job_id"] if not is_request_timeout(response): status, msg = get_response(response) if status: result["status"] = "success" result["data"] = get_nested_value(response, "result.result.metadata") else: if 'message' in msg and 'error_message' and "traceback" in msg[ 'message']: result["error"] = msg['message']['traceback'] result["msg"] = msg['message']['error_message'] else: result[ "error"] = 'Some error occurred while processing the result' result["msg"] = "Error occurred while processing request" else: result["msg"] = "Request timed out" return result
def create_service(payload, solution_id): job_id = None response = post_job(INSIGHT_CONFIG["create_service"], { "data": payload, "solution_id": solution_id }) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: return { "status": "success", "msg": "Service created successfully", 'job_id': job_id } else: return { "status": "failure", "msg": "Failed to get service keys", "data": {}, 'error': result, 'job_id': job_id } else: return { "status": "failure", "msg": "Request Timeout", "data": {}, 'error': response, 'job_id': job_id }
def get_model_components(solution_id, payload=None): result = {"status": "failure"} if payload is None: model_data = {"is_active": True} else: model_data = payload data = { "solution_id": solution_id, "data": { "model": model_data }, "metadata": {} } model_result = post_job(LEARNING_CONFIG["components"], data) if 'job_id' in model_result: result["job_id"] = model_result["job_id"] if not is_request_timeout(model_result): status, msg = get_response(model_result) if status: result["status"] = "success" data = get_nested_value(model_result, "result.result.metadata") models = data["models"] result["data"] = { "ensemble": data["ensemble_strategy"], "models": select_required_scores(models) } else: result["error"] = msg result["msg"] = "Error in retrieving the model components" else: result["msg"] = "Request timed out" return result
def create_email_template(solution_id,payload): job_id = None temp_result = {"status" :"failure"} template = MongoDbConn.find_one(TEMPLATE_COLLECTION, {"solution_id": solution_id, "template_name": "email", "is_deleted": False}) if template is None: template_data = format_template_data(solution_id) response = post_job(TEMPLATE_CONFIG["SAVE"],template_data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: template_id = get_nested_value(response, "result.result.metadata.template_id") if template_id: section_result = create_new_section(template_id,solution_id,DEFAULT_SECTION) if section_result["status"] != "success": return temp_result.update({'msg': 'Failed to create sections', 'error': section_result, 'job_id':job_id}) else: return temp_result.update({'msg': 'Failed to create template', 'error': result, 'job_id':job_id}) else: return temp_result.update({'msg': 'Request timed out', 'error': response, 'job_id':job_id}) else: template_id = template["template_id"] element_result = update_elements(template_id,solution_id,payload) if element_result["status"] == "success": return {'status':'success'} else: return temp_result.update({'msg': 'Failed to create elements', 'error': element_result})
def update_email_details(solution_id,payload): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: data = dict(solution_id = solution_id, data=dict(source_type="email",service_name="source",solution_id=solution_id, configuration=payload)) response = post_job(CONFIGURE_EMAIL,data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: MongoDbConn.update(SOURCE_COLLECTION, where_clause=dict(solution_id=solution_id,source_type="email"), query=data["data"]) temp_result = create_email_template(solution_id,payload) if temp_result["status"] == "success": return {"status": "success", "msg": "Updated email details.", 'job_id': job_id} else: return temp_result else: return {'status': 'failure', 'msg': 'Error in updating emails', 'error': result, 'job_id':job_id} else: return {'status': 'failure', 'msg': 'Request timeout', "error": response, 'job_id':job_id} except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return {"status": "failure", "msg": "Failed to update details.", 'job_id':job_id} else: return {"status": "failure", "msg": "Failed to update details."} finally: context.end_span()
def get_service_key(solution_id): job_id = None response = post_job(INSIGHT_CONFIG["service_keys"], { "data": {}, "solution_id": solution_id }) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: service_keys = get_nested_value( response, "result.result.metadata.service_keys") return { "status": "success", "data": service_keys, 'job_id': job_id } else: return { "status": "failure", "msg": "Failed to get service keys", "data": {}, 'error': result, 'job_id': job_id } else: return { "status": "failure", "msg": "Request Timeout", "data": {}, 'error': response, 'job_id': job_id }
def feedback(payload, solution_id): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: if is_draft_valid(feedback_schema, payload): data = { "solution_id": solution_id, "entity_id": DEFAULT_ENTITY_ID, "data": payload } response = post_job(FEEDBACK_ENDPOINT, data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: return { "status": "success", "msg": "feedback submitted", 'job_id': job_id } else: return { "status": "failure", "msg": "failed to submit feedback", "error": result, 'job_id': job_id } else: return { "status": "failure", "msg": "request timeout", "error": response, 'job_id': job_id } else: return { "status": "failure", "msg": "Invalid Json", 'job_id': job_id } # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return { "status": "failure", "msg": "failed to submit feedback", "error": str(e), 'job_id': job_id } else: return { "status": "failure", "msg": "failed to submit feedback", "error": str(e) } finally: context.end_span()
def get_all_datasets(solution_id, payload=None): filter_obj = None if payload and 'filter_obj' in payload: filter_obj = payload['filter_obj'] result = {"status": "failure"} data = {"solution_id": solution_id, "data": {}, "metadata": {}} model_result = post_job(LEARNING_CONFIG["datasets"], data) if 'job_id' in model_result: result["job_id"] = model_result["job_id"] if not is_request_timeout(model_result): status, msg = get_response(model_result) if status: result["status"] = "success" dataset_list = get_nested_value(model_result, "result.result.metadata.datasets") # for ele in dataset_list: # ele['file_path'] = MOUNT_PATH + ele['file_path'] if filter_obj: result["data"], result[ 'total_datasets'] = implement_pagination( dataset_list, filter_obj, 'ts') else: result["data"] = dataset_list result['total_datasets'] = len(dataset_list) else: result["error"] = msg result["msg"] = "Error getting dataset lists" else: result["msg"] = "Request timed out" return result
def entity_delete(payload, solution_id, config, validated=False): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: if not validated: domain_object = payload["entity_name"] valid = validate_entity_delete(solution_id, domain_object) if not valid: return invalid_edit_msg() entities_list = [domain_object] else: entities_list = payload for entities in entities_list: complete_list = deepcopy(entities_list) get_all_sub_entities(entities, solution_id, complete_list) data = { "solution_id": solution_id, "data": { "filter_obj": complete_list } } response = post_job(config["DELETE"], data, timeout=100) if 'job_id' in response: job_id = response['job_id'] if not is_request_timeout(response): status, result = get_response(response) if status: for ent in complete_list: query = {"entity_name": ent, "solution_id": solution_id} MongoDbConn.remove(ENTITY_COLLECTION, query) else: return { 'status': 'failure', 'msg': 'Failed to remove', 'error': result, 'job_id': job_id } else: return { 'status': 'failure', 'msg': 'Request timeout', "error": response, 'job_id': job_id } return { 'status': 'success', 'msg': 'Successfully removed', 'job_id': job_id } # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return {"status": "failure", "msg": str(e), 'job_id': job_id} else: return {"status": "failure", "msg": str(e)} finally: context.end_span()
def get_previous_run_details(request): """ :param request: Http request :return: result Json """ context = tracer.get_context(request_id=str(uuid4()), log_level="ERROR") context.start_span(component=__name__) try: result = {"status": "failure"} if request.method == "POST": try: payload = json.loads(request.body.decode()) except: payload = request.POST solution_id = get_solution_from_session(request) data = { "solution_id": solution_id, "data": { 'model': { 'model_id': payload['model_id'], 'version_id': payload['version_id'] } } } runs_result = post_job(LEARNING_CONFIG['get_prev_run'], data) if 'job_id' in runs_result: result["job_id"] = runs_result["job_id"] if not is_request_timeout(runs_result): status, msg = get_response(runs_result) if status: result["status"] = "success" runs_result = get_nested_value( runs_result, "result.result.metadata.run_data") runs_result.sort(key=lambda f: f['update_ts'], reverse=True) result['data'] = runs_result result['total_runs'] = len(runs_result) else: if 'message' in msg and 'error_message' in msg['message']: result["error"] = msg['message']['error_message'] else: result["error"] = 'Error' result[ "msg"] = "Error in retrieving the previous runs information" else: result["msg"] = "Request timed out" return result else: result["msg"] = 'POST api is expected' return result except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { "status": "failure", "msg": "Binaries list not available.", "error": str(e) } finally: context.end_span()
def get_custom_functions_list(self, solution_id, payload): """ This function will call the function service API to get the all custom functions and return the dict response with all required fields :param solution_id: Session solution id :param payload: request payload :return: dict response """ try: result = {"status": "failure"} searched_text, filter_obj = None, None res_data = {"solution_id": solution_id, "data": {}} if 'searched_text' in payload: searched_text = payload['searched_text'] if 'filter_obj' in payload: filter_obj = payload['filter_obj'] func_result = post_job(CUSTOM_FUNCTIONS_ENDPOINT['GET'], res_data) if 'job_id' in func_result: result["job_id"] = func_result["job_id"] if not is_request_timeout(func_result): status, msg = get_response(func_result) if status: result["status"] = "success" func_result = get_nested_value( func_result, "result.result.metadata.list_functions_response") if searched_text and searched_text.strip() != '': func_result = self.get_filtered_result( func_result, searched_text) if filter_obj and len(func_result) > 0: func_result, total_functions = implement_pagination( func_result, filter_obj, 'updated_ts') func_result = self.change_version(func_result) else: total_functions = len(func_result) result["data"] = func_result result['total_functions'] = total_functions result['status_code'] = STATUS_CODES['OK'] else: result["error"] = msg result['status_code'] = STATUS_CODES['NOT_FOUND'] result["msg"] = msg[ 'message'] if "message" in msg else "Error in retrieving the list of custom function." else: result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] result["msg"] = "Request timed out" return result except Exception as e: self.context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { 'status': 'failure', 'msg': 'Internal error occurred while fetching ' 'the custom functions list.', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'error': str(e) }
def get_binaries(request): """ :param request: API request :return: json response """ context = tracer.get_context(request_id=str(uuid4()), log_level="ERROR") context.start_span(component=__name__) try: result = {"status": "failure"} if request.method == "POST": try: payload = json.loads(request.body.decode()) except: payload = request.POST filter_obj = None if 'filter_obj' in payload: filter_obj = payload['filter_obj'] solution_id = get_solution_from_session(request) data = {"solution_id": solution_id, "data": {}, 'metadata': {}} bin_result = post_job(LEARNING_CONFIG['get_binaries'], data) if 'job_id' in bin_result: result["job_id"] = bin_result["job_id"] if not is_request_timeout(bin_result): status, msg = get_response(bin_result) if status: result["status"] = "success" binaries = get_nested_value( bin_result, "result.result.metadata.binaries") # for ele in binaries: # ele['file_path'] = MOUNT_PATH + ele['file_path'] if filter_obj: result["data"], total_binaries = implement_pagination( binaries, filter_obj, 'created_ts') else: result['data'] = binaries total_binaries = len(binaries) result['total_binaries'] = total_binaries else: result["error"] = msg result[ "msg"] = "Error in retrieving the binaries information" else: result["msg"] = "Request timed out" return result else: result["msg"] = 'POST api is expected' return result except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { "status": "failure", "msg": "Binaries list not available.", "error": str(e) } finally: context.end_span()
def enable_version_custom_function(self, solution_id, payload): """ This function will call the function service API to enable the required custom function and disables the other versions of the custom function and return the response as dictionary :param solution_id: Session solution id :param payload: Http request payload :return: Dictionary response """ try: result = {"status": "failure"} res_data = {'solution_id': solution_id, 'data': {}} res_data["data"].update({ 'function_name': payload['function_name'], 'is_active': payload['is_active'], 'function_version': payload['function_version'] }) func_result = post_job(CUSTOM_FUNCTIONS_ENDPOINT['ENABLE_VERSION'], res_data) if 'job_id' in func_result: result["job_id"] = func_result["job_id"] if not is_request_timeout(func_result): status, msg = get_response(func_result) if status: result["status"] = "success" func_result = get_nested_value( func_result, "result.result.metadata.enable_version_response") result["data"] = func_result result[ "msg"] = func_result # "version enabled for the custom function." result['status_code'] = STATUS_CODES['OK'] else: result["error"] = msg result["msg"] = msg[ 'message'] if "message" in msg else "Error in enabling the custom function." result['status_code'] = STATUS_CODES['NOT_FOUND'] else: result["msg"] = "Request timed out" result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] return result except Exception as e: self.context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { 'status': 'failure', 'msg': 'Internal error occurred while enabling ' 'the custom function.', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'error': str(e) }
def upload_binary(uploaded_file, solution_id, payload): """ :param uploaded_file: File to be uploaded :param solution_id: Session solution id :param payload: request payload :return: response """ result = {"status": "failure"} if uploaded_file: save_result = save_to_folder(solution_id, uploaded_file, MOUNT_PATH, "binaries", "uploads", flag=True) else: save_result = dict(status="success", data={}) sftp_data = get_mountpath_fromsftp(solution_id, payload["files"][0]) save_result["data"]["file_path"] = sftp_data["file_path"] if save_result["status"] == "success": file_data = save_result["data"] dataset = [{ "name": payload["file_name"], "description": payload["description"], "value": file_data["file_path"] }] data = { "solution_id": solution_id, "data": { "binaries": dataset }, "metadata": {} } upload_result = post_job(LEARNING_CONFIG["upload_binary"], data) if 'job_id' in upload_result: result["job_id"] = upload_result["job_id"] if not is_request_timeout(upload_result): status, msg = get_response(upload_result) if status: result["status"] = "success" result["msg"] = "File uploaded successfully" else: if 'message' in msg and 'error_message' in msg['message']: result["error"] = msg['message']['error_message'] else: result[ "error"] = 'Some error occurred while uploading the binary file' result["msg"] = "Error while uploading file" else: result["msg"] = "Request timed out" else: result["msg"] = "Internal error occurred in saving file" return result
def get_services(solution_id): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: data = { "solution_id": solution_id, "entity_id": DEFAULT_ENTITY_ID, "data": {} } response = post_job(GET_CATALOG_SERVICE, data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: resp = get_nested_value(response, "result.result.metadata") # check_service_version(resp) # check service version change return { "status": 'success', "msg": "Successfully retrieved services list", "data": resp, 'job_id': job_id } else: return { 'status': "failure", "msg": "Failed to retrieve services", "data": [], "error": result, 'job_id': job_id } else: return { "status": "failure", "msg": "Request Timeout", "data": {}, 'error': response, 'job_id': job_id } # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={'tb': traceback.format_exc()}) if job_id: return { 'status': 'failure', 'msg': 'Request failed ' + str(e), 'job_id': job_id } else: return {'status': 'failure', 'msg': 'Request failed ' + str(e)} finally: context.end_span()
def template_train_trigger(request): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: solution_id = get_solution_from_session(request) payload = json.loads(request.body.decode()) # assertions for essentials assert "template_id" in payload assert "documents" in payload assert len(payload["documents"]) > 0 data = {"template_id": payload["template_id"], "documents": payload["documents"]} # payload to post payload = {"solution_id": solution_id, "data": data} response = post_job(TEMPLATE_TRAIN_TRIGGER_ENDPOINT, payload) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: return {"status": "success", "msg": "triggered training successfully", 'job_id': job_id} else: return {"status": "failure", "msg": "failed to trigger training", 'error': result, 'job_id':job_id} else: return {"status": "failure", "msg": "request timeout", 'error': response, 'job_id':job_id} # TODO raise specific exception except AssertionError as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) tb = traceback.format_exc() if job_id: return {"status": "failure", "msg": "Assertion failed, " + str(e), "traceback": str(tb), 'job_id':job_id} else: return {"status": "failure", "msg": "Assertion failed, " + str(e), "traceback": str(tb)} # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) tb = traceback.format_exc() if job_id: return {"status": "failure", "msg": "unknown error, " + str(e), "traceback": str(tb), 'job_id':job_id} else: return {"status": "failure", "msg": "unknown error, " + str(e), "traceback": str(tb)} finally: context.end_span()
def open_custom_function(self, solution_id, payload): """ This function will call the function service API to return the Jupyter notebook path to open a particular custom function for editing purpose. :param solution_id: Session Solution Id :param payload: Http request Payload :return: Jupyter Notebook path for a particular custom function """ try: result = {"status": "failure"} res_data = {'solution_id': solution_id, "data": {}} res_data["data"].update({ 'function_name': payload['function_name'], 'function_version': payload['function_version'], 'is_fork': payload["is_fork"] }) func_result = post_job(CUSTOM_FUNCTIONS_ENDPOINT['OPEN'], res_data) if 'job_id' in func_result: result["job_id"] = func_result["job_id"] if not is_request_timeout(func_result): status, msg = get_response(func_result) if status: result["status"] = "success" func_result = get_nested_value( func_result, "result.result.metadata.open_function_response") result["data"] = func_result result['status_code'] = STATUS_CODES['OK'] else: result["error"] = msg result["msg"] = msg[ 'message'] if "message" in msg else "Error in opening the custom function." result['status_code'] = STATUS_CODES['NOT_FOUND'] else: result["msg"] = "Request timed out" result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] return result except Exception as e: self.context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { 'status': 'failure', 'msg': 'Internal error occurred while opening ' 'the custom function.', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'error': str(e) }
def delete_custom_function(self, solution_id, payload=None): """ This method will call the function service API to delete the particular custom function and return the response as dictionary :param solution_id: Session solution id :param function_name: Specific Function Name :return: response as dictionary """ try: result = {"status": "failure"} res_data = {'solution_id': solution_id, "data": {}} if payload: res_data["data"].update({ 'function_name': payload["function_name"], 'function_version': payload['function_version'] }) func_result = post_job(CUSTOM_FUNCTIONS_ENDPOINT['DELETE'], res_data) if 'job_id' in func_result: result["job_id"] = func_result["job_id"] if not is_request_timeout(func_result): status, msg = get_response(func_result) if status: result["status"] = "success" func_result = get_nested_value( func_result, "result.result.metadata.delete_function_response") result["data"] = func_result result["msg"] = "deleted the custom function version." result['status_code'] = STATUS_CODES['OK'] else: result["error"] = msg result["msg"] = "Error in deleting the custom function." result['status_code'] = STATUS_CODES['NOT_FOUND'] else: result["msg"] = "Request timed out" result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] return result except Exception as e: self.context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { 'status': 'failure', 'msg': 'Internal error occurred while deleting ' 'the custom function.', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'error': str(e) }
def get_evaluation_details(solution_id, payload): """ :param solution_id: Solution Id :param payload: request Payload :return: Json """ context = tracer.get_context(request_id=str(uuid4()), log_level="ERROR") context.start_span(component=__name__) try: result = {"status": "failure"} data = { "solution_id": solution_id, "data": { 'model': { 'model_id': payload['model_id'] } } } evaluation_data = post_job(LEARNING_CONFIG['get_evaluation'], data) if 'job_id' in evaluation_data: result["job_id"] = evaluation_data["job_id"] if not is_request_timeout(evaluation_data): status, msg = get_response(evaluation_data) if status: result["status"] = "success" evaluation_data = get_nested_value( evaluation_data, "result.result.metadata.evaluation_data") for res in evaluation_data: res['update_ts'] = datetime.strptime( res['update_ts'], '%Y-%m-%dT%H:%M:%S.%f') evaluation_data.sort(key=lambda f: f['update_ts'], reverse=True) result['data'] = evaluation_data else: if 'message' in msg and 'error_message' in msg['message']: result["error"] = msg['message']['error_message'] else: result["error"] = 'Error' result["msg"] = "Error in retrieving the binaries information" else: result["msg"] = "Request timed out" return result except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { "status": "failure", "msg": "Binaries list not available.", "error": str(e) } finally: context.end_span()
def get_custom_functions_detail(self, solution_id, function_name=None): """ This function will call the function service API to get the custom functions detail and return the dict response with all required fields :param solution_id: Session solution id :param function_name: Specific function name :return: dict response """ try: result = {"status": "failure"} res_data = {"solution_id": solution_id, "data": {}} if function_name: res_data["data"].update({'function_name': function_name}) func_result = post_job(CUSTOM_FUNCTIONS_ENDPOINT['DETAILS'], res_data) if 'job_id' in func_result: result["job_id"] = func_result["job_id"] if not is_request_timeout(func_result): status, msg = get_response(func_result) if status: result["status"] = "success" func_result = get_nested_value( func_result, "result.result.metadata.list_functions_response") func_result.sort(key=lambda f: f['updated_ts'], reverse=True) total_functions = len(func_result) result["data"] = func_result result['total_functions'] = total_functions result['status_code'] = STATUS_CODES['OK'] else: result["error"] = msg result["msg"] = msg[ 'message'] if "message" in msg else "Error in retrieving the details of custom function." result['status_code'] = STATUS_CODES['NOT_FOUND'] else: result["msg"] = "Request timed out" result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] return result except Exception as e: self.context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { 'status': 'failure', 'msg': 'Internal error occurred while fetching ' 'the custom functions detail.', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'error': str(e) }
def train_test_model(solution_id, payload, request): result = {"status": "failure"} dataset_list = ["dataset_id"] if request == "train": model_list = ["model_type"] else: model_list = ["model_id", "version_id"] if "train" in request: model_list.extend(["name", "description"]) dataset_list.append({"target_columns": "column"}) model = create_dict(payload, model_list) model.update({"parameters": {}}) if request == 'train': model.update({'is_integrated': False}) dataset = create_dict(payload, dataset_list) data = { "solution_id": solution_id, "data": { "model": model, "dataset": dataset }, "metadata": {} } if "resource_ids" in payload: data["data"]["resources_ids"] = payload['resource_ids'] if request == 'run': data['data'].update({"request_type": "run_model"}) train_result = post_job(INSIGHT_CONFIG["get_insight"], data) else: train_result = post_job(LEARNING_CONFIG[request], data) if 'job_id' in train_result: result["job_id"] = train_result["job_id"] if not is_request_timeout(train_result): status, msg = get_response(train_result) if status: result["status"] = "success" result["data"] = get_nested_value(train_result, "result.result.metadata") else: if 'message' in msg and 'error_message' and "traceback" in msg[ 'message']: result["error"] = msg['message']['traceback'] result["msg"] = msg['message']['error_message'] else: result[ "error"] = 'Some error occurred while processing the result' result["msg"] = "Error occurred while processing request" else: result["msg"] = "Request timed out" return result
def retrain_model(solution_id): job_id = None context = tracer.get_context(request_id=str(uuid4()), log_level="INFO") context.start_span(component=__name__) try: data = {"solution_id": solution_id, "data": {}} response = post_job(RETRAIN_ENDPOINT, data) if 'job_id' in response: job_id = response["job_id"] if not is_request_timeout(response): status, result = get_response(response) if status: return { "status": "success", "msg": "request for retrain submitted", 'job_id': job_id } else: return { "status": "failure", "msg": "failed to request retrain", "error": result, 'job_id': job_id } else: return { "status": "failure", "msg": "request timeout", "error": response, 'job_id': job_id } # TODO raise specific exception except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) if job_id: return { "status": "failure", "msg": "failed to request retrain", "error": str(e), 'job_id': job_id } else: return { "status": "failure", "msg": "failed to request retrain", "error": str(e) } finally: context.end_span()
def get_ontology_versions(self, solution_id, id): try: result = {"status": "failure"} res_data = {"solution_id": solution_id, "data": {"id": id}} func_result = post_job(ONTOLOGY_ENDPOINT["GET_DETAILS"], res_data) if 'job_id' in func_result: result["job_id"] = func_result["job_id"] if not is_request_timeout(func_result): status, msg = get_response(func_result) if status: result["status"] = "success" func_result = get_nested_value( func_result, "result.result.metadata.ontology_versions") #to get presigned path of versions # version_url = [] # for version in func_result["versions_info"]: # path_url = version["file_path"].split("xpms-dev-d1109/")[-1] # try: # url=StorageHandler.presigned_get_url(AMAZON_AWS_BUCKET, path_url) # except Exception as e: # print(str(e)) # url = "" # version['get_ontology_version_path'] = url # version_url.append(version) # func_result["versions_info"] = version_url result["data"] = func_result result["msg"] = "ontology versions fetched successfully" result['status_code'] = STATUS_CODES['OK'] else: result["error"] = msg result['status_code'] = STATUS_CODES['NOT_FOUND'] result[ "msg"] = "Error in retrieving the list of ontology versions" else: result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] result["msg"] = "Request timed out" return result except Exception as e: self.context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { 'status': 'failure', 'msg': 'Internal error occurred while fetching ' 'the Ontology versions_list.', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'error': str(e) }
def test_custom_function(self, solution_id, payload): """ This function will call the function service API to test the custom function and return the response as dictionary :param solution_id: Session solution id :param payload: Http request payload :return: response as dictionary """ try: result = {"status": "failure"} req_data = {'solution_id': solution_id, "data": {}} payload["exec_id"] = str(uuid4()) if 'params' not in payload.keys(): payload["params"] = {} req_data["data"].update(payload) func_result = post_job(CUSTOM_FUNCTIONS_ENDPOINT['TEST'], req_data) if 'job_id' in func_result: result["job_id"] = func_result["job_id"] if not is_request_timeout(func_result): status, msg = get_response(func_result) if status: result["status"] = "success" func_result = get_nested_value( func_result, "result.result.metadata.execution_response") result["data"] = func_result result['status_code'] = STATUS_CODES['OK'] else: result["error"] = msg result["msg"] = msg[ 'message'] if "message" in msg else "Error in testing the custom function." result['status_code'] = STATUS_CODES['NOT_FOUND'] else: result["msg"] = "Request timed out" result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] return result except Exception as e: self.context.log(message=str(e), obj={"tb": traceback.format_exc()}) return { 'status': 'failure', 'msg': 'Internal error occurred while testing ' 'the custom function.', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'error': str(e) }
def upload_dataset(uploaded_file, solution_id, payload): result = {"status": "failure"} if uploaded_file: save_result = save_to_folder(solution_id, uploaded_file, MOUNT_PATH, "datasets", "uploads", flag=True) else: save_result = dict(status="success", data={}) sftp_data = get_mountpath_fromsftp(solution_id, payload["files"][0]) save_result["data"]["file_path"] = sftp_data["file_path"] if save_result["status"] == "success": file_data = save_result["data"] dataset = { "name": payload["file_name"], "description": payload["description"], "data_format": payload["format"], "value": file_data["file_path"] } data = { "solution_id": solution_id, "data": { "dataset": dataset }, "metadata": {} } upload_result = post_job(LEARNING_CONFIG["upload"], data) if 'job_id' in upload_result: result["job_id"] = upload_result["job_id"] if not is_request_timeout(upload_result): status, msg = get_response(upload_result) if status: result["status"] = "success" result["msg"] = "File uploaded successfully" else: result["error"] = msg result["msg"] = "Error while uploading file" else: result["msg"] = "Request timed out" else: result["msg"] = "Internal error occurred in saving file" return result
def get_pipelines(request): """ This function will fetch all pipeline services and return the dictionary as response :param request: Http request :return: dictionary as response """ context = tracer.get_context(request_id=str(uuid4()), log_level="ERROR") context.start_span(component=__name__) try: result = {"status": "failure"} solution_id = get_solution_from_session(request) if request.method == 'GET': data_dict = {'solution_id': solution_id, 'data': {}} response = post(API_GATEWAY_POST_JOB_URI + PIPELINE['GET_PIPELINE'], data_dict) if 'job_id' in response: result["job_id"] = response["job_id"] if not is_request_timeout(response): status, msg = get_response(response) if status: result["status"] = "success" result['status_code'] = STATUS_CODES['OK'] services = get_nested_value(response, "result.metadata.pipelines") result["data"] = services result['total_services'] = len(services) else: result["error"] = msg result['status_code'] = STATUS_CODES['NOT_FOUND'] result["msg"] = "Error in retrieving the services information" else: result["msg"] = "Request timed out" result['status_code'] = STATUS_CODES['REQUEST_TIMEOUT'] return result else: return {'status': 'failure', 'status_code': STATUS_CODES['BAD_REQUEST'], 'msg': 'GET request will be accepted.'} except Exception as e: context.log(message=str(e), obj={"tb": traceback.format_exc()}) return {'status': 'failure', 'status_code': STATUS_CODES['INTERNAL_SERVER_ERROR'], 'msg': 'Failed to fetch pipeline services group.'} finally: context.end_span()