Example #1
0
 def mark_inactive(self, req_criteria):
     succeeded, failed, job_ids, message = [], [], [], None
     if 'jobIDs' in req_criteria:
         job_ids = req_criteria["jobIDs"]
     else:
         return {
             "status": "FAILED",
             "message": "No job ids found",
             "succeeded": [],
             "failed": []
         }
     if job_ids:
         try:
             log_info("Marking jobs inactive......", None)
             job_details = self.get_job_details_bulk(req_criteria, True)
             if job_details:
                 if len(job_details) < len(job_ids):
                     failed = job_ids
                     message = "This user doesn't have access to either all or few of these jobs"
                 else:
                     for job in job_details:
                         job["active"] = False
                         self.update_job_details(job, False)
                         succeeded.append(str(job["jobID"]))
                         log_info("Job marked as inactive by the user", job)
             else:
                 failed = job_ids
                 message = "No jobs were found for these jobIDs"
             if failed:
                 return {
                     "status": "FAILED",
                     "message": message,
                     "succeeded": succeeded,
                     "failed": failed
                 }
             if len(succeeded) == len(job_ids):
                 message = "All jobs have been successfully marked inactive."
                 return {
                     "status": "SUCCESS",
                     "message": message,
                     "succeeded": succeeded,
                     "failed": failed
                 }
         except Exception as e:
             log_exception(
                 "Exception while marking jobs as inactive: " + str(e),
                 None, None)
             return {
                 "status": "FAILED",
                 "message": "Exception while marking inactive",
                 "succeeded": [],
                 "failed": job_ids
             }
     else:
         return {
             "status": "FAILED",
             "message": "Empty job IDs List",
             "succeeded": [],
             "failed": []
         }
Example #2
0
 def get_job_details_bulk(self, req_criteria, skip_pagination):
     try:
         criteria = {"metadata.userID": {"$in": req_criteria["userIDs"]}}
         if 'jobIDs' in req_criteria.keys():
             if req_criteria["jobIDs"]:
                 jobIDs = []
                 for jobID in req_criteria["jobIDs"]:
                     if jobID:
                         jobIDs.append(jobID)
                     if len(jobIDs) > 0:
                         criteria["jobID"] = {"$in": jobIDs}
         if 'workflowCodes' in req_criteria.keys():
             if req_criteria["workflowCodes"]:
                 wCodes = []
                 for wCode in req_criteria["workflowCodes"]:
                     if wCode:
                         wCodes.append(wCode)
                     if len(wCodes) > 0:
                         criteria["workflowCode"] = {"$in": wCodes}
         if 'statuses' in req_criteria.keys():
             if req_criteria["statuses"]:
                 statuses = []
                 for status in statuses:
                     if status:
                         statuses.append(status)
                     if len(statuses) > 0:
                         criteria["status"] = {
                             "$in": req_criteria["statuses"]
                         }
         exclude = {'_id': False}
         if 'taskDetails' not in req_criteria.keys():
             exclude["taskDetails"] = False
         else:
             if req_criteria["taskDetails"] is False:
                 exclude["taskDetails"] = False
         if 'error' in req_criteria.keys():
             if req_criteria["error"] is False:
                 exclude["error"] = False
         if not skip_pagination:
             offset = 0 if 'offset' not in req_criteria.keys(
             ) else req_criteria["offset"]
             limit = eval(str(
                 page_default_limit)) if 'limit' not in req_criteria.keys(
                 ) else req_criteria["limit"]
             criteria["active"] = {"$ne": False}
             jobs = wfmrepo.search_job(criteria, exclude, offset, limit)
             total_jobs = wfmrepo.search_job(criteria, exclude, None, None)
             return {"count": len(total_jobs), "jobs": jobs}
         else:
             return wfmrepo.search_job(criteria, exclude, None, None)
     except Exception as e:
         log_exception("Exception while searching jobs: " + str(e), None, e)
         return None
Example #3
0
 def process_sync(self, wf_input):
     try:
         ctx = wf_input
         order_of_execution = wfmutils.get_order_of_exc(
             wf_input["workflowCode"])
         tool_output = None
         previous_tool = None
         for tool_order in order_of_execution.keys():
             step_details = order_of_execution[tool_order]
             tool_details = step_details["tool"][0]
             log_info(
                 tool_details["name"] + log_msg_start + " jobID: " +
                 ctx["jobID"], ctx)
             if not tool_output:
                 tool_input = wfmutils.get_tool_input_sync(
                     tool_details["name"], None, None, wf_input)
             else:
                 tool_input = wfmutils.get_tool_input_sync(
                     tool_details["name"], previous_tool, tool_output, None)
             response = wfmutils.call_api(
                 tool_details["api-details"][0]["uri"], tool_input,
                 wf_input["metadata"]["userID"])
             error = self.validate_tool_response(response, tool_details,
                                                 wf_input)
             if error:
                 return error
             tool_output = response
             previous_tool = tool_details["name"]
             ctx["metadata"]["module"] = module_wfm_name
             tool_output["metadata"] = ctx["metadata"]
             log_info(
                 tool_details["name"] + log_msg_end + " jobID: " +
                 ctx["jobID"], ctx)
         client_output = self.get_wf_details_sync(None, tool_output, True,
                                                  None)
         self.update_job_details(client_output, False)
         log_info("Job COMPLETED, jobID: " + str(wf_input["jobID"]), ctx)
         return client_output
     except Exception as e:
         log_exception(
             "Exception while processing SYNC workflow: " + str(e),
             wf_input, e)
         error = post_error(
             "SYNC_WFLOW_ERROR",
             "Exception while processing the sync workflow: " + str(e), e)
         client_output = self.get_wf_details_sync(wf_input, None, True,
                                                  error)
         self.update_job_details(client_output, False)
         log_info("Job FAILED, jobID: " + str(wf_input["jobID"]), wf_input)
         return client_output
Example #4
0
 def update_errors(self, error):
     try:
         job_id = error["jobID"]
         job_details = wfmutils.get_job_details(job_id)
         job_details = job_details[0]
         if job_details["status"] == "FAILED" or job_details[
                 "status"] == "COMPLETED" or job_details[
                     "status"] == "INTERRUPTED":
             return None
         job_details["status"] = "FAILED"
         job_details["endTime"] = eval(
             str(time.time()).replace('.', '')[0:13])
         job_details["error"] = error
         self.update_job_details(job_details, False)
         log_info("Job FAILED: " + error["jobID"], error)
     except Exception as e:
         log_exception("Failed to update tool error: " + str(e), error, e)
Example #5
0
 def get_next_step_details(self, task_output):
     wf_code = task_output["workflowCode"]
     step_completed = task_output["stepOrder"]
     order_of_execution = wfmutils.get_order_of_exc(wf_code)
     try:
         next_step_details = order_of_execution[step_completed + 1]
         next_tool = next_step_details["tool"][0]
         next_task_input = wfmutils.get_tool_input_async(
             next_tool["name"], task_output["tool"], task_output, None)
         return next_task_input, next_tool
     except KeyError as e:
         log_exception("No next step found: " + str(e), task_output, e)
         return None
     except Exception as e:
         log_exception("Exception while fetching next step" + str(e),
                       task_output, e)
         return "EXC"
Example #6
0
 def initiate_wf(self, wf_input):
     try:
         order_of_execution = wfmutils.get_order_of_exc(
             wf_input["workflowCode"])
         first_step_details = order_of_execution[0]
         first_tool = first_step_details["tool"][0]
         input_topic = first_tool["kafka-input"][0]["topic"]
         first_tool_input = wfmutils.get_tool_input_async(
             first_tool["name"], None, None, wf_input)
         if first_tool_input is None:
             error = validator.get_error(
                 "INCOMPATIBLE_TOOL_SEQUENCE",
                 "The workflow contains incompatible steps.")
             client_output = self.get_wf_details_async(
                 wf_input, None, True, error)
             self.update_job_details(client_output, False)
             log_error("The workflow contains incompatible steps.",
                       wf_input, None)
             return None
         producer.push_to_queue(first_tool_input, input_topic)
         client_output = self.get_wf_details_async(wf_input, None, False,
                                                   None)
         self.update_job_details(client_output, False)
         wf_input["metadata"][
             "module"] = module_wfm_name  # FOR LOGGING ONLY.
         log_info(
             "Workflow: " + wf_input["workflowCode"] +
             " initiated for the job: " + wf_input["jobID"], wf_input)
         log_info(
             first_tool["name"] + log_msg_start + " jobID: " +
             wf_input["jobID"], wf_input)
     except Exception as e:
         log_exception(
             "Exception while initiating ASYNC workflow: " + str(e),
             wf_input, e)
         post_error_wf("WFLOW_INITIATE_ERROR",
                       "Exception while initiating workflow: " + str(e),
                       wf_input, e)
Example #7
0
 def manage_wf(self, task_output):
     try:
         job_id = task_output["jobID"]
         job_details = wfmutils.get_job_details(job_id)
         if not job_details:
             log_error(
                 "This job is not found in the system, jobID: " + job_id,
                 task_output, None)
             return None
         log_info(
             task_output["tool"] + log_msg_end + " jobID: " +
             task_output["jobID"], task_output)
         job_details = job_details[0]
         if job_details["status"] == "FAILED" or job_details[
                 "status"] == "COMPLETED" or job_details[
                     "status"] == "INTERRUPTED":
             log_error(
                 "The job is already completed/failed/interrupted, jobID: "
                 + job_id, task_output, None)
             return None
         if task_output["status"] != "FAILED":
             next_step_details = self.get_next_step_details(task_output)
             if next_step_details is not None:
                 if next_step_details == "EXC":
                     log_error("Job FAILED: " + task_output["jobID"],
                               task_output, None)
                     post_error_wf(
                         "NEXT_STEP_EXCEPTION",
                         "There was an error while fetching the next step for this wf",
                         task_output, None)
                     return None
                 client_output = self.get_wf_details_async(
                     None, task_output, False, None)
                 self.update_job_details(client_output, False)
                 next_step_input = next_step_details[0]
                 if next_step_input is None:
                     log_error(
                         "The workflow contains incompatible steps in sequence. Please check the wf config.",
                         task_output, None)
                     post_error_wf(
                         "INCOMPATIBLE_TOOL_SEQUENCE",
                         "The wf contains incompatible steps in sequence. Please check the wf config.",
                         task_output, None)
                     return None
                 next_tool = next_step_details[1]
                 step_completed = task_output["stepOrder"]
                 next_step_input["stepOrder"] = step_completed + 1
                 producer.push_to_queue(
                     next_step_input, next_tool["kafka-input"][0]["topic"])
                 log_info(
                     next_tool["name"] + log_msg_start + " jobID: " +
                     task_output["jobID"], task_output)
             else:
                 client_output = self.get_wf_details_async(
                     None, task_output, True, None)
                 self.update_job_details(client_output, False)
                 log_info("Job COMPLETED: " + task_output["jobID"],
                          task_output)
         else:  # Safety else block, in case module fails to push data to error topic
             log_error("Job FAILED: " + task_output["jobID"], task_output,
                       None)
             client_output = self.get_wf_details_async(
                 None, task_output, False, task_output["error"])
             self.update_job_details(client_output, False)
         #self.push_to_notifier(task_output)
     except Exception as e:
         log_exception(
             "Exception while managing the ASYNC workflow: " + str(e),
             task_output, e)
         post_error_wf("WFLOW_MANAGE_ERROR",
                       "Exception while managing workflow: " + str(e),
                       task_output, e)