def run_master_spec(spec_model, study): """Executes a BPMN specification for the given study, without recording any information to the database Useful for running the master specification, which should not persist. """ lasttime = firsttime() spec_files = SpecFileService().get_spec_files(spec_model.id, include_libraries=True) lasttime = sincetime('load Files', lasttime) spec = WorkflowProcessor.get_spec(spec_files, spec_model.id) lasttime = sincetime('get spec', lasttime) try: bpmn_workflow = BpmnWorkflow( spec, script_engine=WorkflowProcessor._script_engine) bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study.id bpmn_workflow.data[ WorkflowProcessor.VALIDATION_PROCESS_KEY] = False lasttime = sincetime('get_workflow', lasttime) bpmn_workflow.do_engine_steps() lasttime = sincetime('run steps', lasttime) except WorkflowException as we: raise ApiError.from_task_spec("error_running_master_spec", str(we), we.sender) if not bpmn_workflow.is_completed(): raise ApiError( "master_spec_not_automatic", "The master spec should only contain fully automated tasks, it failed to complete." ) return bpmn_workflow.last_task.data
def hard_reset(self): """Recreate this workflow. This will be useful when a workflow specification changes. """ self.spec_data_files = FileService.get_spec_data_files( workflow_spec_id=self.workflow_spec_id) new_spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id) new_bpmn_workflow = BpmnWorkflow(new_spec, script_engine=self._script_engine) new_bpmn_workflow.data = self.bpmn_workflow.data try: new_bpmn_workflow.do_engine_steps() except WorkflowException as we: raise ApiError.from_task_spec("hard_reset_engine_steps_error", str(we), we.sender) self.bpmn_workflow = new_bpmn_workflow
def _add_all_workflow_specs_to_study(study_model:StudyModel): existing_models = session.query(WorkflowModel).filter(WorkflowModel.study == study_model).all() existing_specs = list(m.workflow_spec_id for m in existing_models) new_specs = session.query(WorkflowSpecModel). \ filter(WorkflowSpecModel.is_master_spec == False). \ filter(WorkflowSpecModel.id.notin_(existing_specs)). \ all() errors = [] for workflow_spec in new_specs: try: StudyService._create_workflow_model(study_model, workflow_spec) except WorkflowTaskExecException as wtee: errors.append(ApiError.from_task("workflow_startup_exception", str(wtee), wtee.task)) except WorkflowException as we: errors.append(ApiError.from_task_spec("workflow_startup_exception", str(we), we.sender)) return errors
def __get_bpmn_workflow(self, workflow_model: WorkflowModel, spec: WorkflowSpec, validate_only=False): if workflow_model.bpmn_workflow_json: bpmn_workflow = self._serializer.deserialize_workflow( workflow_model.bpmn_workflow_json, workflow_spec=spec) else: bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine) bpmn_workflow.data[ WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id bpmn_workflow.data[ WorkflowProcessor.VALIDATION_PROCESS_KEY] = validate_only try: bpmn_workflow.do_engine_steps() except WorkflowException as we: raise ApiError.from_task_spec("error_loading_workflow", str(we), we.sender) return bpmn_workflow
def create_lookup_model(workflow_model, task_spec_id, field_id): """ This is all really expensive, but should happen just once (per file change). Checks to see if the options are provided in a separate lookup table associated with the workflow, and if so, assures that data exists in the database, and return a model than can be used to locate that data. Returns: an array of LookupData, suitable for returning to the API. """ processor = WorkflowProcessor( workflow_model ) # VERY expensive, Ludicrous for lookup / type ahead spec, field = processor.find_spec_and_field(task_spec_id, field_id) # Clear out all existing lookup models for this workflow and field. existing_models = db.session.query(LookupFileModel) \ .filter(LookupFileModel.workflow_spec_id == workflow_model.workflow_spec_id) \ .filter(LookupFileModel.task_spec_id == task_spec_id) \ .filter(LookupFileModel.field_id == field_id).all() for model in existing_models: # Do it one at a time to cause the required cascade of deletes. db.session.delete(model) # Use the contents of a file to populate enum field options if field.has_property(Task.FIELD_PROP_SPREADSHEET_NAME): if not (field.has_property(Task.FIELD_PROP_VALUE_COLUMN) or field.has_property(Task.FIELD_PROP_LABEL_COLUMN)): raise ApiError.from_task_spec( "invalid_enum", "For enumerations based on an xls file, you must include 3 properties: %s, " "%s, and %s" % (Task.FIELD_PROP_SPREADSHEET_NAME, Task.FIELD_PROP_VALUE_COLUMN, Task.FIELD_PROP_LABEL_COLUMN), task_spec=spec) # Get the file data from the File Service file_name = field.get_property(Task.FIELD_PROP_SPREADSHEET_NAME) value_column = field.get_property(Task.FIELD_PROP_VALUE_COLUMN) label_column = field.get_property(Task.FIELD_PROP_LABEL_COLUMN) latest_files = SpecFileService().get_spec_files( workflow_spec_id=workflow_model.workflow_spec_id, file_name=file_name) if len(latest_files) < 1: raise ApiError( "invalid_enum", "Unable to locate the lookup data file '%s'" % file_name) else: file = latest_files[0] file_data = SpecFileService().get_spec_file_data(file.id).data lookup_model = LookupService.build_lookup_table( file.id, file_name, file_data, value_column, label_column, workflow_model.workflow_spec_id, task_spec_id, field_id) # Use the results of an LDAP request to populate enum field options elif field.has_property(Task.FIELD_PROP_LDAP_LOOKUP): lookup_model = LookupFileModel( workflow_spec_id=workflow_model.workflow_spec_id, task_spec_id=task_spec_id, field_id=field_id, is_ldap=True) else: raise ApiError.from_task_spec( "unknown_lookup_option", "Lookup supports using spreadsheet or LDAP options, " "and neither of those was provided.", spec) db.session.add(lookup_model) db.session.commit() return lookup_model