示例#1
0
 def get_options_from_task_data(spiff_task, field):
     if not (field.has_property(Task.PROP_OPTIONS_DATA_VALUE_COLUMN)
             or field.has_property(Task.PROP_OPTIONS_DATA_LABEL_COLUMN)):
         raise ApiError.from_task(
             "invalid_enum",
             f"For enumerations based on task data, you must include 3 properties: "
             f"{Task.PROP_OPTIONS_DATA_NAME}, {Task.PROP_OPTIONS_DATA_VALUE_COLUMN}, "
             f"{Task.PROP_OPTIONS_DATA_LABEL_COLUMN}",
             task=spiff_task)
     prop = field.get_property(Task.PROP_OPTIONS_DATA_NAME)
     if prop not in spiff_task.data:
         raise ApiError.from_task(
             "invalid_enum",
             f"For enumerations based on task data, task data must have "
             f"a property called {prop}",
             task=spiff_task)
     # Get the enum options from the task data
     data_model = spiff_task.data[prop]
     value_column = field.get_property(Task.PROP_OPTIONS_DATA_VALUE_COLUMN)
     label_column = field.get_property(Task.PROP_OPTIONS_DATA_LABEL_COLUMN)
     items = data_model.items() if isinstance(data_model,
                                              dict) else data_model
     options = []
     for item in items:
         options.append({
             "id": item[value_column],
             "name": item[label_column],
             "data": item
         })
     return options
    def do_task(self, task, study_id, workflow_id, *args, **kwargs):

        # Get new status
        if 'new_status' in kwargs.keys() or len(args) > 0:
            if 'new_status' in kwargs.keys():
                new_status = kwargs['new_status']
            else:
                new_status = args[0]

            # Get ProgressStatus object for new_status
            try:
                progress_status = getattr(ProgressStatus, new_status)

            # Invalid argument
            except AttributeError as ae:
                raise ApiError.from_task(code='invalid_argument',
                                         message=f"We could not find a status matching `{new_status}`. Original message: {ae}.",
                                         task=task)

            # Set new status
            study_model = session.query(StudyModel).filter(StudyModel.id == study_id).first()
            study_model.progress_status = progress_status
            session.commit()

            return study_model.progress_status.value

        # Missing argument
        else:
            raise ApiError.from_task(code='missing_argument',
                                     message='You must include the new progress status when calling `set_study_progress_status` script. ',
                                     task=task)
    def __update_study(self, task, study, *args):
        if len(args) < 1:
            raise ApiError.from_task("missing_argument",
                                     self.argument_error_message,
                                     task=task)

        for arg in args:
            try:
                field, value_lookup = arg.split(':')
            except:
                raise ApiError.from_task("invalid_argument",
                                         self.argument_error_message,
                                         task=task)

            value = task.workflow.script_engine.evaluate_expression(
                task, value_lookup)

            if field.lower() == "title":
                study.title = value
            elif field.lower() == "pi":
                study.primary_investigator_id = value
            else:
                raise ApiError.from_task("invalid_argument",
                                         self.argument_error_message,
                                         task=task)
示例#4
0
 def do_task(self, task, study_id, workflow_id, *args, **kwargs):
     check_study = self.pb.check_study(study_id)
     if check_study:
         return check_study
     else:
         raise ApiError.from_task(
             code='missing_check_study',
             message=
             'There was a problem checking information for this study.',
             task=task)
示例#5
0
 def do_task_validate_only(self, task, study_id, workflow_id, *args,
                           **kwargs):
     study = StudyService.get_study(study_id)
     if study:
         return {"DETAIL": "Passed validation.", "STATUS": "No Error"}
     else:
         raise ApiError.from_task(
             code='bad_study',
             message=f'No study for study_id {study_id}',
             task=task)
示例#6
0
 def do_task(self, task, study_id, workflow_id, *args, **kwargs):
     irb_info = self.pb.get_irb_info(study_id)
     if irb_info:
         return irb_info
     else:
         raise ApiError.from_task(
             code='missing_irb_info',
             message=
             f'There was a problem retrieving IRB Info for study {study_id}.',
             task=task)
    def __update_study(self, task, study, *args, **kwargs):
        if len(kwargs.keys()) < 1:
            raise ApiError.from_task("missing_argument", self.argument_error_message,
                                     task=task)

        for arg in kwargs.keys():
            if arg.lower() == "title":
                study.title = kwargs[arg]
            elif arg.lower() == "short_title":
                study.short_title = kwargs[arg]
            elif arg.lower() == "short_name":
                study.short_name = kwargs[arg]
            elif arg.lower() == "proposal_name":
                study.proposal_name = kwargs[arg]
            elif arg.lower() == "pi":
                study.primary_investigator_id = kwargs[arg]
            else:
                raise ApiError.from_task("invalid_argument", self.argument_error_message,
                                         task=task)
 def process_document_deletion(doc_code, workflow_id, task):
     if DocumentService.is_allowed_document(doc_code):
         result = session.query(FileModel).filter(
             FileModel.workflow_id == workflow_id,
             FileModel.irb_doc_code == doc_code).all()
         if isinstance(result, list) and len(result) > 0 and isinstance(
                 result[0], FileModel):
             for file in result:
                 FileService.delete_file(file.id)
         else:
             raise ApiError.from_task(
                 code='no_document_found',
                 message=
                 f'No document of type {doc_code} was found for this workflow.',
                 task=task)
     else:
         raise ApiError.from_task(
             code='invalid_document_code',
             message=f'{doc_code} is not a valid document code',
             task=task)
示例#9
0
def _verify_user_and_role(processor, spiff_task):
    """Assures the currently logged in user can access the given workflow and task, or
    raises an error.  """

    user = UserService.current_user(allow_admin_impersonate=True)
    allowed_users = WorkflowService.get_users_assigned_to_task(
        processor, spiff_task)
    if user.uid not in allowed_users:
        raise ApiError.from_task(
            "permission_denied",
            f"This task must be completed by '{allowed_users}', "
            f"but you are {user.uid}", spiff_task)
示例#10
0
    def run_predefined_script(self, task: SpiffTask, script, data):
        commands = shlex.split(script)
        path_and_command = commands[0].rsplit(".", 1)
        if len(path_and_command) == 1:
            module_name = "crc.scripts." + self.camel_to_snake(
                path_and_command[0])
            class_name = path_and_command[0]
        else:
            module_name = "crc.scripts." + path_and_command[
                0] + "." + self.camel_to_snake(path_and_command[1])
            class_name = path_and_command[1]
        try:
            mod = __import__(module_name, fromlist=[class_name])
            klass = getattr(mod, class_name)
            study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
            if WorkflowProcessor.WORKFLOW_ID_KEY in task.workflow.data:
                workflow_id = task.workflow.data[
                    WorkflowProcessor.WORKFLOW_ID_KEY]
            else:
                workflow_id = None

            if not isinstance(klass(), Script):
                raise ApiError.from_task(
                    "invalid_script",
                    "This is an internal error. The script '%s:%s' you called "
                    % (module_name, class_name) +
                    "does not properly implement the CRC Script class.",
                    task=task)
            if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
                """If this is running a validation, and not a normal process, then we want to
                mimic running the script, but not make any external calls or database changes."""
                klass().do_task_validate_only(task, study_id, workflow_id,
                                              *commands[1:])
            else:
                klass().do_task(task, study_id, workflow_id, *commands[1:])
        except ModuleNotFoundError:
            raise ApiError.from_task("invalid_script",
                                     "Unable to locate Script: '%s:%s'" %
                                     (module_name, class_name),
                                     task=task)
    def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):

        if 'new_status' in kwargs.keys() or len(args) > 0:
            if 'new_status' in kwargs.keys():
                new_status = kwargs['new_status']
            else:
                new_status = args[0]

            try:
                progress_status = getattr(ProgressStatus, new_status)

            except AttributeError as ae:
                raise ApiError.from_task(code='invalid_argument',
                                         message=f"We could not find a status matching `{new_status}`. Original message: {ae}",
                                         task=task)
            return progress_status.value

        else:
            raise ApiError.from_task(code='missing_argument',
                                     message='You must include the new status when calling `set_study_progress_status` script. '
                                             'The new status must be one of `in_progress`, `hold`, `open_for_enrollment`, or `abandoned`.',
                                     task=task)
示例#12
0
    def _process_documentation(spiff_task):
        """Runs the given documentation string through the Jinja2 processor to inject data
        create loops, etc...  - If a markdown file exists with the same name as the task id,
        it will use that file instead of the documentation. """

        documentation = spiff_task.task_spec.documentation if hasattr(
            spiff_task.task_spec, "documentation") else ""

        try:
            doc_file_name = spiff_task.task_spec.name + ".md"
            data_model = FileService.get_workflow_file_data(
                spiff_task.workflow, doc_file_name)
            raw_doc = data_model.data.decode("utf-8")
        except ApiError:
            raw_doc = documentation

        if not raw_doc:
            return ""

        try:
            template = Template(raw_doc)
            return template.render(**spiff_task.data)
        except jinja2.exceptions.TemplateError as ue:
            raise ApiError.from_task(
                code="template_error",
                message="Error processing template for task %s: %s" %
                (spiff_task.task_spec.name, str(ue)),
                task=spiff_task)
        except TypeError as te:
            raise ApiError.from_task(
                code="template_error",
                message="Error processing template for task %s: %s" %
                (spiff_task.task_spec.name, str(te)),
                task=spiff_task)
        except Exception as e:
            app.logger.error(str(e), exc_info=True)
示例#13
0
 def _add_all_workflow_specs_to_study(study_model:StudyModel):
     existing_models = session.query(WorkflowModel).filter(WorkflowModel.study == study_model).all()
     existing_specs = list(m.workflow_spec_id for m in existing_models)
     new_specs = session.query(WorkflowSpecModel). \
         filter(WorkflowSpecModel.is_master_spec == False). \
         filter(WorkflowSpecModel.id.notin_(existing_specs)). \
         all()
     errors = []
     for workflow_spec in new_specs:
         try:
             StudyService._create_workflow_model(study_model, workflow_spec)
         except WorkflowTaskExecException as wtee:
             errors.append(ApiError.from_task("workflow_startup_exception", str(wtee), wtee.task))
         except WorkflowException as we:
             errors.append(ApiError.from_task_spec("workflow_startup_exception", str(we), we.sender))
     return errors
示例#14
0
    def do_task(self, task, study_id, workflow_id, *args, **kwargs):
        if 'search_workflow_id' in kwargs.keys() or len(args) > 0:
            if 'search_workflow_id' in kwargs.keys():
                search_workflow_id = kwargs['search_workflow_id']
            else:
                search_workflow_id = args[0]
            workflow_model = session.query(WorkflowModel).filter(
                WorkflowModel.id == search_workflow_id).first()
            if workflow_model:
                return workflow_model.status.value
            else:
                return f'No model found for workflow {search_workflow_id}.'

        else:
            raise ApiError.from_task(
                code='missing_argument',
                message=
                'You must include a workflow_id when calling the `get_workflow_status` script.',
                task=task)
示例#15
0
    def test_spec(spec_id, required_only=False):
        """Runs a spec through it's paces to see if it results in any errors.
          Not fool-proof, but a good sanity check.  Returns the final data
          output form the last task if successful.

          required_only can be set to true, in which case this will run the
          spec, only completing the required fields, rather than everything.
          """

        workflow_model = WorkflowService.make_test_workflow(spec_id)

        try:
            processor = WorkflowProcessor(workflow_model, validate_only=True)
        except WorkflowException as we:
            WorkflowService.delete_test_data()
            raise ApiError.from_workflow_exception(
                "workflow_validation_exception", str(we), we)

        while not processor.bpmn_workflow.is_completed():
            try:
                processor.bpmn_workflow.do_engine_steps()
                tasks = processor.bpmn_workflow.get_tasks(SpiffTask.READY)
                for task in tasks:
                    if task.task_spec.lane is not None and task.task_spec.lane not in task.data:
                        raise ApiError.from_task(
                            "invalid_role",
                            f"This task is in a lane called '{task.task_spec.lane}', The "
                            f" current task data must have information mapping this role to "
                            f" a unique user id.", task)
                    task_api = WorkflowService.spiff_task_to_api_task(
                        task, add_docs_and_forms=True
                    )  # Assure we try to process the documentation, and raise those errors.
                    WorkflowService.populate_form_with_random_data(
                        task, task_api, required_only)
                    processor.complete_task(task)
            except WorkflowException as we:
                WorkflowService.delete_test_data()
                raise ApiError.from_workflow_exception(
                    "workflow_validation_exception", str(we), we)

        WorkflowService.delete_test_data()
        return processor.bpmn_workflow.last_task.data
    def do_task_validate_only(self, task, study_id, workflow_id, *args,
                              **kwargs):
        if 'email_id' in kwargs or 'workflow_spec_id' in kwargs:
            subject = 'My Test Email'
            recipients = '*****@*****.**'
            content = "Hello"
            content_html = "<!DOCTYPE html><html><head></head><body><div><h2>Hello</h2></div></body></html>"
            email_model = EmailModel(subject=subject,
                                     recipients=recipients,
                                     content=content,
                                     content_html=content_html,
                                     timestamp=datetime.datetime.utcnow())
            return EmailModelSchema(many=True).dump([email_model])

        else:
            raise ApiError.from_task(
                code='missing_email_id',
                message=
                'You must include an email_id or workflow_spec_id with the get_email_data script.',
                task=task)
    def do_task(self, task, study_id, workflow_id, *args, **kwargs):
        email_models = None
        email_data = None
        if 'email_id' in kwargs:
            email_models = session.query(EmailModel).filter(
                EmailModel.id == kwargs['email_id']).all()
        elif 'workflow_spec_id' in kwargs:
            email_models = session.query(EmailModel)\
                .filter(EmailModel.study_id == study_id)\
                .filter(EmailModel.workflow_spec_id == str(kwargs['workflow_spec_id']))\
                .all()
        else:
            raise ApiError.from_task(
                code='missing_email_id',
                message=
                'You must include an email_id or workflow_spec_id with the get_email_data script.',
                task=task)

        if email_models:
            email_data = EmailModelSchema(many=True).dump(email_models)
        return email_data
    def get_codes(self, task, args, kwargs):
        if 'code' in kwargs:
            if isinstance(kwargs['code'], list):
                codes = kwargs['code']
            else:
                codes = [kwargs['code']]
        else:
            codes = []
            for arg in args:
                if isinstance(arg, list):
                    codes.extend(arg)
                else:
                    codes.append(arg)

        if codes is None or len(codes) == 0:
            raise ApiError.from_task(
                "invalid_argument",
                "Please provide a valid document code to delete.  "
                "No valid arguments found.",
                task=task)
        return codes
示例#19
0
def _verify_user_and_role(processor, spiff_task):
    """Assures the currently logged in user can access the given workflow and task, or
    raises an error.
     Allow administrators to modify tasks, otherwise assure that the current user
     is allowed to edit or update the task. Will raise the appropriate error if user
     is not authorized. """

    if 'user' not in g:
        raise ApiError("logged_out",
                       "You are no longer logged in.",
                       status_code=401)

    if g.user.uid in app.config['ADMIN_UIDS']:
        return g.user.uid

    allowed_users = WorkflowService.get_users_assigned_to_task(
        processor, spiff_task)
    if g.user.uid not in allowed_users:
        raise ApiError.from_task(
            "permission_denied",
            f"This task must be completed by '{allowed_users}', "
            f"but you are {g.user.uid}", spiff_task)
示例#20
0
 def execute(self, task: SpiffTask, script, data):
     """
     Functions in two modes.
     1. If the command is proceeded by #! then this is assumed to be a python script, and will
        attempt to load that python module and execute the do_task method on that script.  Scripts
        must be located in the scripts package and they must extend the script.py class.
     2. If not proceeded by the #! this will attempt to execute the script directly and assumes it is
        valid Python.
     """
     # Shlex splits the whole string while respecting double quoted strings within
     if not script.startswith('#!'):
         try:
             super().execute(task, script, data)
         except SyntaxError as e:
             raise ApiError.from_task(
                 'syntax_error',
                 f'If you are running a pre-defined script, please'
                 f' proceed the script with "#!", otherwise this is assumed to be'
                 f' pure python: {script}, {e.msg}',
                 task=task)
     else:
         self.run_predefined_script(
             task, script[2:], data)  # strip off the first two characters.
示例#21
0
 def do_engine_steps(self):
     try:
         self.bpmn_workflow.do_engine_steps()
     except WorkflowTaskExecException as we:
         raise ApiError.from_task("task_error", str(we), we.task)
示例#22
0
    def get_random_data_for_field(field, task):
        has_ldap_lookup = field.has_property(Task.PROP_LDAP_LOOKUP)
        has_file_lookup = field.has_property(Task.PROP_OPTIONS_FILE_NAME)
        has_data_lookup = field.has_property(Task.PROP_OPTIONS_DATA_NAME)
        has_lookup = has_ldap_lookup or has_file_lookup or has_data_lookup

        if field.type == "enum" and not has_lookup:
            # If it's a normal enum field with no lookup,
            # return a random option.
            if len(field.options) > 0:
                random_choice = random.choice(field.options)
                if isinstance(random_choice, dict):
                    choice = random.choice(field.options)
                    return {'value': choice['id'], 'label': choice['name']}
                else:
                    # fixme: why it is sometimes an EnumFormFieldOption, and other times not?
                    # Assume it is an EnumFormFieldOption
                    return {
                        'value': random_choice.id,
                        'label': random_choice.name
                    }
            else:
                raise ApiError.from_task(
                    "invalid_enum", "You specified an enumeration field (%s),"
                    " with no options" % field.id, task)
        elif field.type == "autocomplete" or field.type == "enum":
            # If it has a lookup, get the lookup model from the spreadsheet or task data, then return a random option
            # from the lookup model
            lookup_model = LookupService.get_lookup_model(task, field)
            if has_ldap_lookup:  # All ldap records get the same person.
                return {
                    "label": "dhf8r",
                    "value": "Dan Funk",
                    "data": {
                        "uid": "dhf8r",
                        "display_name": "Dan Funk",
                        "given_name": "Dan",
                        "email_address": "*****@*****.**",
                        "department": "Depertment of Psychocosmographictology",
                        "affiliation": "Rousabout",
                        "sponsor_type": "Staff"
                    }
                }
            elif lookup_model:
                data = db.session.query(LookupDataModel).filter(
                    LookupDataModel.lookup_file_model == lookup_model).limit(
                        10).all()
                options = [{
                    "value": d.value,
                    "label": d.label,
                    "data": d.data
                } for d in data]
                return random.choice(options)
            else:
                raise ApiError.from_task(
                    "unknown_lookup_option",
                    "The settings for this auto complete field "
                    "are incorrect: %s " % field.id, task)
        elif field.type == "long":
            return random.randint(1, 1000)
        elif field.type == 'boolean':
            return random.choice([True, False])
        elif field.type == 'file':
            # fixme: produce some something sensible for files.
            return random.randint(1, 100)
            # fixme: produce some something sensible for files.
        elif field.type == 'files':
            return random.randrange(1, 100)
        else:
            return WorkflowService._random_string()
示例#23
0
    def create_lookup_model(workflow_model, field_id):
        """
        This is all really expensive, but should happen just once (per file change).

        Checks to see if the options are provided in a separate lookup table associated with the workflow, and if so,
        assures that data exists in the database, and return a model than can be used to locate that data.

        Returns:  an array of LookupData, suitable for returning to the API.
        """
        processor = WorkflowProcessor(
            workflow_model
        )  # VERY expensive, Ludicrous for lookup / type ahead
        spiff_task, field = processor.find_task_and_field_by_field_id(field_id)

        # Clear out all existing lookup models for this workflow and field.
        existing_models = db.session.query(LookupFileModel) \
            .filter(LookupFileModel.workflow_spec_id == workflow_model.workflow_spec_id) \
            .filter(LookupFileModel.field_id == field_id).all()
        for model in existing_models:  # Do it one at a time to cause the required cascade of deletes.
            db.session.delete(model)

        #  Use the contents of a file to populate enum field options
        if field.has_property(Task.PROP_OPTIONS_FILE_NAME):
            if not (field.has_property(Task.PROP_OPTIONS_FILE_VALUE_COLUMN) or
                    field.has_property(Task.PROP_OPTIONS_FILE_LABEL_COLUMN)):
                raise ApiError.from_task(
                    "invalid_enum",
                    "For enumerations based on an xls file, you must include 3 properties: %s, "
                    "%s, and %s" % (Task.PROP_OPTIONS_FILE_NAME,
                                    Task.PROP_OPTIONS_FILE_VALUE_COLUMN,
                                    Task.PROP_OPTIONS_FILE_LABEL_COLUMN),
                    task=spiff_task)

            # Get the file data from the File Service
            file_name = field.get_property(Task.PROP_OPTIONS_FILE_NAME)
            value_column = field.get_property(
                Task.PROP_OPTIONS_FILE_VALUE_COLUMN)
            label_column = field.get_property(
                Task.PROP_OPTIONS_FILE_LABEL_COLUMN)
            latest_files = FileService.get_spec_data_files(
                workflow_spec_id=workflow_model.workflow_spec_id,
                workflow_id=workflow_model.id,
                name=file_name)
            if len(latest_files) < 1:
                raise ApiError(
                    "invalid_enum",
                    "Unable to locate the lookup data file '%s'" % file_name)
            else:
                data_model = latest_files[0]

            lookup_model = LookupService.build_lookup_table(
                data_model, value_column, label_column,
                workflow_model.workflow_spec_id, field_id)

        #  Use the results of an LDAP request to populate enum field options
        elif field.has_property(Task.PROP_LDAP_LOOKUP):
            lookup_model = LookupFileModel(
                workflow_spec_id=workflow_model.workflow_spec_id,
                field_id=field_id,
                is_ldap=True)
        else:
            raise ApiError(
                "unknown_lookup_option",
                "Lookup supports using spreadsheet or LDAP options, "
                "and neither of those was provided.")
        db.session.add(lookup_model)
        db.session.commit()
        return lookup_model