예제 #1
0
 def get_version_string(self):
     # this could potentially become expensive to load all the data in the data models.
     # in which case we might consider using a deferred loader for the actual data, but
     # trying not to pre-optimize.
     file_data_models = FileService.get_spec_data_files(
         self.workflow_model.workflow_spec_id, self.workflow_model.id)
     return WorkflowProcessor.__get_version_string_for_data_models(
         file_data_models)
예제 #2
0
    def update_dependencies(self, spec_data_files):
        existing_dependencies = FileService.get_spec_data_files(
            workflow_spec_id=self.workflow_model.workflow_spec_id,
            workflow_id=self.workflow_model.id)

        # Don't save the dependencies if they haven't changed.
        if existing_dependencies == spec_data_files:
            return

        # Remove all existing dependencies, and replace them.
        self.workflow_model.dependencies = []
        for file_data in spec_data_files:
            self.workflow_model.dependencies.append(
                WorkflowSpecDependencyFile(file_data_id=file_data.id))
예제 #3
0
 def hard_reset(self):
     """Recreate this workflow. This will be useful when a workflow specification changes.
      """
     self.spec_data_files = FileService.get_spec_data_files(
         workflow_spec_id=self.workflow_spec_id)
     new_spec = WorkflowProcessor.get_spec(self.spec_data_files,
                                           self.workflow_spec_id)
     new_bpmn_workflow = BpmnWorkflow(new_spec,
                                      script_engine=self._script_engine)
     new_bpmn_workflow.data = self.bpmn_workflow.data
     try:
         new_bpmn_workflow.do_engine_steps()
     except WorkflowException as we:
         raise ApiError.from_task_spec("hard_reset_engine_steps_error",
                                       str(we), we.sender)
     self.bpmn_workflow = new_bpmn_workflow
예제 #4
0
    def process_template(self, task, study_id, workflow=None, *args, **kwargs):
        """Entry point, mostly worried about wiring it all up."""
        if len(args) < 2 or len(args) > 3:
            raise ApiError(
                code="missing_argument",
                message=
                "The CompleteTemplate script requires 2 arguments.  The first argument is "
                "the name of the docx template to use.  The second "
                "argument is a code for the document, as "
                "set in the reference document %s. " %
                FileService.DOCUMENT_LIST)
        task_study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
        file_name = args[0]

        if task_study_id != study_id:
            raise ApiError(
                code="invalid_argument",
                message="The given task does not match the given study.")

        file_data_model = None
        if workflow is not None:
            # Get the workflow specification file with the given name.
            file_data_models = FileService.get_spec_data_files(
                workflow_spec_id=workflow.workflow_spec_id,
                workflow_id=workflow.id,
                name=file_name)
            if len(file_data_models) > 0:
                file_data_model = file_data_models[0]
            else:
                raise ApiError(
                    code="invalid_argument",
                    message="Uable to locate a file with the given name.")

        # Get images from file/files fields
        if len(args) == 3:
            image_file_data = self.get_image_file_data(args[2], task)
        else:
            image_file_data = None

        return self.make_template(BytesIO(file_data_model.data), task.data,
                                  image_file_data)
예제 #5
0
    def run_master_spec(spec_model, study):
        """Executes a BPMN specification for the given study, without recording any information to the database
        Useful for running the master specification, which should not persist. """
        spec_data_files = FileService.get_spec_data_files(spec_model.id)
        spec = WorkflowProcessor.get_spec(spec_data_files, spec_model.id)
        try:
            bpmn_workflow = BpmnWorkflow(
                spec, script_engine=WorkflowProcessor._script_engine)
            bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study.id
            bpmn_workflow.data[
                WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
            bpmn_workflow.do_engine_steps()
        except WorkflowException as we:
            raise ApiError.from_task_spec("error_running_master_spec", str(we),
                                          we.sender)

        if not bpmn_workflow.is_completed():
            raise ApiError(
                "master_spec_not_automatic",
                "The master spec should only contain fully automated tasks, it failed to complete."
            )

        return bpmn_workflow.last_task.data
예제 #6
0
 def get_latest_version_string_for_spec(spec_id):
     file_data_models = FileService.get_spec_data_files(spec_id)
     return WorkflowProcessor.__get_version_string_for_data_models(
         file_data_models)
예제 #7
0
    def __init__(self,
                 workflow_model: WorkflowModel,
                 soft_reset=False,
                 hard_reset=False,
                 validate_only=False):
        """Create a Workflow Processor based on the serialized information available in the workflow model.
        If soft_reset is set to true, it will try to use the latest version of the workflow specification
            without resetting to the beginning of the workflow.  This will work for some minor changes to the spec.
        If hard_reset is set to true, it will use the latest spec, and start the workflow over from the beginning.
            which should work in casees where a soft reset fails.
        If neither flag is set, it will use the same version of the specification that was used to originally
        create the workflow model. """
        self.workflow_model = workflow_model

        if soft_reset or len(
                workflow_model.dependencies
        ) == 0:  # Depenencies of 0 means the workflow was never started.
            self.spec_data_files = FileService.get_spec_data_files(
                workflow_spec_id=workflow_model.workflow_spec_id)
        else:
            self.spec_data_files = FileService.get_spec_data_files(
                workflow_spec_id=workflow_model.workflow_spec_id,
                workflow_id=workflow_model.id)

        spec = self.get_spec(self.spec_data_files,
                             workflow_model.workflow_spec_id)
        self.workflow_spec_id = workflow_model.workflow_spec_id
        try:
            self.bpmn_workflow = self.__get_bpmn_workflow(
                workflow_model, spec, validate_only)
            self.bpmn_workflow.script_engine = self._script_engine

            if self.WORKFLOW_ID_KEY not in self.bpmn_workflow.data:
                if not workflow_model.id:
                    session.add(workflow_model)
                    session.commit()
                    # If the model is new, and has no id, save it, write it into the workflow model
                    # and save it again.  In this way, the workflow process is always aware of the
                    # database model to which it is associated, and scripts running within the model
                    # can then load data as needed.
                self.bpmn_workflow.data[
                    WorkflowProcessor.WORKFLOW_ID_KEY] = workflow_model.id
                workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(
                    self.bpmn_workflow)
                self.save()

        except MissingSpecError as ke:
            raise ApiError(
                code="unexpected_workflow_structure",
                message="Failed to deserialize workflow"
                " '%s' version %s, due to a mis-placed or missing task '%s'" %
                (self.workflow_spec_id, self.get_version_string(), str(ke)) +
                " This is very likely due to a soft reset where there was a structural change."
            )
        if hard_reset:
            # Now that the spec is loaded, get the data and rebuild the bpmn with the new details
            self.hard_reset()
            workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(
                self.bpmn_workflow)
            self.save()
        if soft_reset:
            self.save()

        # set whether this is the latest spec file.
        if self.spec_data_files == FileService.get_spec_data_files(
                workflow_spec_id=workflow_model.workflow_spec_id):
            self.is_latest_spec = True
        else:
            self.is_latest_spec = False
예제 #8
0
    def create_lookup_model(workflow_model, field_id):
        """
        This is all really expensive, but should happen just once (per file change).

        Checks to see if the options are provided in a separate lookup table associated with the workflow, and if so,
        assures that data exists in the database, and return a model than can be used to locate that data.

        Returns:  an array of LookupData, suitable for returning to the API.
        """
        processor = WorkflowProcessor(
            workflow_model
        )  # VERY expensive, Ludicrous for lookup / type ahead
        spiff_task, field = processor.find_task_and_field_by_field_id(field_id)

        # Clear out all existing lookup models for this workflow and field.
        existing_models = db.session.query(LookupFileModel) \
            .filter(LookupFileModel.workflow_spec_id == workflow_model.workflow_spec_id) \
            .filter(LookupFileModel.field_id == field_id).all()
        for model in existing_models:  # Do it one at a time to cause the required cascade of deletes.
            db.session.delete(model)

        #  Use the contents of a file to populate enum field options
        if field.has_property(Task.PROP_OPTIONS_FILE_NAME):
            if not (field.has_property(Task.PROP_OPTIONS_FILE_VALUE_COLUMN) or
                    field.has_property(Task.PROP_OPTIONS_FILE_LABEL_COLUMN)):
                raise ApiError.from_task(
                    "invalid_enum",
                    "For enumerations based on an xls file, you must include 3 properties: %s, "
                    "%s, and %s" % (Task.PROP_OPTIONS_FILE_NAME,
                                    Task.PROP_OPTIONS_FILE_VALUE_COLUMN,
                                    Task.PROP_OPTIONS_FILE_LABEL_COLUMN),
                    task=spiff_task)

            # Get the file data from the File Service
            file_name = field.get_property(Task.PROP_OPTIONS_FILE_NAME)
            value_column = field.get_property(
                Task.PROP_OPTIONS_FILE_VALUE_COLUMN)
            label_column = field.get_property(
                Task.PROP_OPTIONS_FILE_LABEL_COLUMN)
            latest_files = FileService.get_spec_data_files(
                workflow_spec_id=workflow_model.workflow_spec_id,
                workflow_id=workflow_model.id,
                name=file_name)
            if len(latest_files) < 1:
                raise ApiError(
                    "invalid_enum",
                    "Unable to locate the lookup data file '%s'" % file_name)
            else:
                data_model = latest_files[0]

            lookup_model = LookupService.build_lookup_table(
                data_model, value_column, label_column,
                workflow_model.workflow_spec_id, field_id)

        #  Use the results of an LDAP request to populate enum field options
        elif field.has_property(Task.PROP_LDAP_LOOKUP):
            lookup_model = LookupFileModel(
                workflow_spec_id=workflow_model.workflow_spec_id,
                field_id=field_id,
                is_ldap=True)
        else:
            raise ApiError(
                "unknown_lookup_option",
                "Lookup supports using spreadsheet or LDAP options, "
                "and neither of those was provided.")
        db.session.add(lookup_model)
        db.session.commit()
        return lookup_model