Пример #1
0
def add_study(body):
    """Or any study like object. Body should include a title, and primary_investigator_id """
    if 'primary_investigator_id' not in body:
        raise ApiError(
            "missing_pi",
            "Can't create a new study without a Primary Investigator.")
    if 'title' not in body:
        raise ApiError("missing_title",
                       "Can't create a new study without a title.")

    study_model = StudyModel(
        user_uid=UserService.current_user().uid,
        title=body['title'],
        primary_investigator_id=body['primary_investigator_id'],
        last_updated=datetime.utcnow(),
        status=StudyStatus.in_progress)
    session.add(study_model)
    StudyService.add_study_update_event(study_model,
                                        status=StudyStatus.in_progress,
                                        event_type=StudyEventType.user,
                                        user_uid=g.user.uid)

    errors = StudyService._add_all_workflow_specs_to_study(study_model)
    session.commit()
    study = StudyService().get_study(study_model.id, do_status=True)
    study_data = StudySchema().dump(study)
    study_data["errors"] = ApiErrorSchema(many=True).dump(errors)
    return study_data
    def find_spec_and_field(self, spec_name, field_id):
        """Tracks down a form field by name in the workflow spec(s),
           Returns a tuple of the task, and form"""
        workflows = [self.bpmn_workflow]
        for task in self.bpmn_workflow.get_ready_user_tasks():
            if task.workflow not in workflows:
                workflows.append(task.workflow)
        spec_found = False
        for workflow in workflows:
            for spec in workflow.spec.task_specs.values():
                if spec.name == spec_name:
                    spec_found = True
                    if not hasattr(spec, "form"):
                        raise ApiError(
                            "invalid_spec",
                            "The spec name you provided does not contain a form."
                        )

                    for field in spec.form.fields:
                        if field.id == field_id:
                            return spec, field

                    raise ApiError(
                        "invalid_field",
                        f"The task '{spec_name}' has no field named '{field_id}'"
                    )

        raise ApiError(
            "invalid_spec",
            f"Unable to find a task in the workflow called '{spec_name}'")
Пример #3
0
def delete_workflow_specification(spec_id):
    if spec_id is None:
        raise ApiError('unknown_spec',
                       'Please provide a valid Workflow Specification ID.')

    spec: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(
        id=spec_id).first()

    if spec is None:
        raise ApiError(
            'unknown_spec',
            'The Workflow Specification "' + spec_id + '" is not recognized.')

    # Delete all items in the database related to the deleted workflow spec.
    files = session.query(FileModel).filter_by(workflow_spec_id=spec_id).all()
    for file in files:
        FileService.delete_file(file.id)

    session.query(TaskEventModel).filter(
        TaskEventModel.workflow_spec_id == spec_id).delete()

    # Delete all events and workflow models related to this specification
    for workflow in session.query(WorkflowModel).filter_by(
            workflow_spec_id=spec_id):
        StudyService.delete_workflow(workflow)
    session.query(WorkflowSpecModel).filter_by(id=spec_id).delete()
    session.commit()
    def get_reference_file_data(self, file_name):
        file_model = session.query(FileModel).filter(
            FileModel.name == file_name).filter(
                FileModel.is_reference == True).first()
        if file_model is not None:
            file_path = self.get_reference_file_path(file_model.name)
            if os.path.exists(file_path):
                mtime = os.path.getmtime(file_path)
                with open(file_path, 'rb') as f_open:
                    reference_file_data = f_open.read()
                    size = len(reference_file_data)
                    md5_checksum = UUID(
                        hashlib.md5(reference_file_data).hexdigest())

                    reference_file_data_model = FileDataModel(
                        data=reference_file_data,
                        md5_hash=md5_checksum,
                        size=size,
                        date_created=datetime.datetime.fromtimestamp(mtime),
                        file_model_id=file_model.id)
                    return reference_file_data_model
            else:
                raise ApiError(
                    'file_not_found',
                    f"There was no file in the location: {file_path}")
        else:
            raise ApiError(
                "file_not_found",
                "There is no reference file with the name '%s'" % file_name)
Пример #5
0
    def build_lookup_table(data_model: FileDataModel, value_column,
                           label_column, workflow_spec_id, field_id):
        """ In some cases the lookup table can be very large.  This method will add all values to the database
         in a way that can be searched and returned via an api call - rather than sending the full set of
          options along with the form.  It will only open the file and process the options if something has
          changed.  """
        xls = ExcelFile(data_model.data)
        df = xls.parse(
            xls.sheet_names[0])  # Currently we only look at the fist sheet.
        df = pd.DataFrame(df).replace({np.nan: None})
        if value_column not in df:
            raise ApiError(
                "invalid_enum",
                "The file %s does not contain a column named % s" %
                (data_model.file_model.name, value_column))
        if label_column not in df:
            raise ApiError(
                "invalid_enum",
                "The file %s does not contain a column named % s" %
                (data_model.file_model.name, label_column))

        lookup_model = LookupFileModel(workflow_spec_id=workflow_spec_id,
                                       field_id=field_id,
                                       file_data_model_id=data_model.id,
                                       is_ldap=False)

        db.session.add(lookup_model)
        for index, row in df.iterrows():
            lookup_data = LookupDataModel(lookup_file_model=lookup_model,
                                          value=row[value_column],
                                          label=row[label_column],
                                          data=row.to_dict(OrderedDict))
            db.session.add(lookup_data)
        db.session.commit()
        return lookup_model
Пример #6
0
    def __update_study(self, task, study, *args):
        if len(args) < 1:
            raise ApiError.from_task("missing_argument",
                                     self.argument_error_message,
                                     task=task)

        for arg in args:
            try:
                field, value_lookup = arg.split(':')
            except:
                raise ApiError.from_task("invalid_argument",
                                         self.argument_error_message,
                                         task=task)

            value = task.workflow.script_engine.evaluate_expression(
                task, value_lookup)

            if field.lower() == "title":
                study.title = value
            elif field.lower() == "pi":
                study.primary_investigator_id = value
            else:
                raise ApiError.from_task("invalid_argument",
                                         self.argument_error_message,
                                         task=task)
Пример #7
0
    def get_path(self, file_id: int):
        # Returns the path on the file system for the given File id

        # Assure we have a file.
        file_model = session.query(FileModel).filter(
            FileModel.id == file_id).first()
        if not file_model:
            raise ApiError(
                code='model_not_found',
                message=f'No model found for file with file_id: {file_id}')

        # Assure we have a spec.
        spec_model = session.query(WorkflowSpecModel).filter(
            WorkflowSpecModel.id == file_model.workflow_spec_id).first()
        if not spec_model:
            raise ApiError(
                code='spec_not_found',
                message=f'No spec found for file with file_id: '
                f'{file_model.id}, and spec_id: {file_model.workflow_spec_id}')

        # Calculate the path.
        sync_file_root = self.get_sync_file_root()
        category_name = self.get_spec_file_category_name(spec_model)
        return os.path.join(sync_file_root, category_name,
                            spec_model.display_name, file_model.name)
    def run_master_spec(spec_model, study):
        """Executes a BPMN specification for the given study, without recording any information to the database
        Useful for running the master specification, which should not persist. """
        lasttime = firsttime()
        spec_files = SpecFileService().get_spec_files(spec_model.id,
                                                      include_libraries=True)
        lasttime = sincetime('load Files', lasttime)
        spec = WorkflowProcessor.get_spec(spec_files, spec_model.id)
        lasttime = sincetime('get spec', lasttime)
        try:
            bpmn_workflow = BpmnWorkflow(
                spec, script_engine=WorkflowProcessor._script_engine)
            bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study.id
            bpmn_workflow.data[
                WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
            lasttime = sincetime('get_workflow', lasttime)
            bpmn_workflow.do_engine_steps()
            lasttime = sincetime('run steps', lasttime)
        except WorkflowException as we:
            raise ApiError.from_task_spec("error_running_master_spec", str(we),
                                          we.sender)

        if not bpmn_workflow.is_completed():
            raise ApiError(
                "master_spec_not_automatic",
                "The master spec should only contain fully automated tasks, it failed to complete."
            )

        return bpmn_workflow.last_task.data
Пример #9
0
    def do_task(self, task, study_id, workflow_id, *args, **kwargs):

        if 'reset_id' in kwargs.keys():
            reset_id = kwargs['reset_id']
            workflow_spec: WorkflowSpecModel = session.query(
                WorkflowSpecModel).filter_by(id=reset_id).first()
            if workflow_spec:
                workflow_model: WorkflowModel = session.query(
                    WorkflowModel).filter_by(workflow_spec_id=workflow_spec.id,
                                             study_id=study_id).first()
                if workflow_model:
                    workflow_processor = WorkflowProcessor.reset(
                        workflow_model, clear_data=False, delete_files=False)
                    return workflow_processor
                else:
                    raise ApiError(code='missing_workflow_model',
                                   message=f'No WorkflowModel returned. \
                                            workflow_spec_id: {workflow_spec.id} \
                                            study_id: {study_id}')
            else:
                raise ApiError(code='missing_workflow_spec',
                               message=f'No WorkflowSpecModel returned. \
                                        id: {workflow_id}')
        else:
            raise ApiError(code='missing_workflow_id',
                           message='Reset workflow requires a workflow id')
    def do_task(self, task, study_id, workflow_id, *args, **kwargs):

        # Get new status
        if 'new_status' in kwargs.keys() or len(args) > 0:
            if 'new_status' in kwargs.keys():
                new_status = kwargs['new_status']
            else:
                new_status = args[0]

            # Get ProgressStatus object for new_status
            try:
                progress_status = getattr(ProgressStatus, new_status)

            # Invalid argument
            except AttributeError as ae:
                raise ApiError.from_task(code='invalid_argument',
                                         message=f"We could not find a status matching `{new_status}`. Original message: {ae}.",
                                         task=task)

            # Set new status
            study_model = session.query(StudyModel).filter(StudyModel.id == study_id).first()
            study_model.progress_status = progress_status
            session.commit()

            return study_model.progress_status.value

        # Missing argument
        else:
            raise ApiError.from_task(code='missing_argument',
                                     message='You must include the new progress status when calling `set_study_progress_status` script. ',
                                     task=task)
Пример #11
0
def delete_workflow_specification(spec_id):
    if spec_id is None:
        raise ApiError('unknown_spec',
                       'Please provide a valid Workflow Specification ID.')

    spec: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(
        id=spec_id).first()
    category_id = spec.category_id

    if spec is None:
        raise ApiError(
            'unknown_spec',
            'The Workflow Specification "' + spec_id + '" is not recognized.')

    # Delete all workflow models related to this specification
    WorkflowService.delete_workflow_spec_workflow_models(spec_id)

    # Delete all files related to this specification
    WorkflowService.delete_workflow_spec_files(spec_id)

    # Delete all events related to this specification
    WorkflowService.delete_workflow_spec_task_events(spec_id)

    # .delete() doesn't work when we need a cascade. Must grab the record, and explicitly delete
    workflow_spec = session.query(WorkflowSpecModel).filter_by(
        id=spec_id).first()
    session.delete(workflow_spec)
    session.commit()

    # Reorder the remaining specs
    WorkflowService.cleanup_workflow_spec_display_order(category_id)
Пример #12
0
    def get_study_associate(study_id=None, uid=None):
        """
        gets details on how one uid is related to a study, returns a StudyAssociated model
        """
        study = db.session.query(StudyModel).filter(
            StudyModel.id == study_id).first()

        if study is None:
            raise ApiError('study_not_found',
                           'No study found with id = %d' % study_id)

        if uid is None:
            raise ApiError('uid not specified',
                           'A valid uva uid is required for this function')

        if uid == study.user_uid:
            return StudyAssociated(uid=uid,
                                   role='owner',
                                   send_email=True,
                                   access=True)

        people = db.session.query(
            StudyAssociated).filter((StudyAssociated.study_id == study_id)
                                    & (StudyAssociated.uid == uid)).first()
        if people:
            return people
        else:
            raise ApiError(
                'uid_not_associated_with_study',
                "user id %s was not associated with study number %d" %
                (uid, study_id))
Пример #13
0
    def get_spec(file_data_models: List[FileDataModel], workflow_spec_id):
        """Returns a SpiffWorkflow specification for the given workflow spec,
        using the files provided.  The Workflow_spec_id is only used to generate
        better error messages."""
        parser = WorkflowProcessor.get_parser()
        process_id = None

        for file_data in file_data_models:
            if file_data.file_model.type == FileType.bpmn:
                bpmn: etree.Element = etree.fromstring(file_data.data)
                if file_data.file_model.primary:
                    process_id = FileService.get_process_id(bpmn)
                parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name)
            elif file_data.file_model.type == FileType.dmn:
                dmn: etree.Element = etree.fromstring(file_data.data)
                parser.add_dmn_xml(dmn, filename=file_data.file_model.name)
        if process_id is None:
            raise (ApiError(
                code="no_primary_bpmn_error",
                message="There is no primary BPMN model defined for workflow %s"
                % workflow_spec_id))
        try:
            spec = parser.get_spec(process_id)
        except ValidationException as ve:
            raise ApiError(
                code="workflow_validation_error",
                message="Failed to parse Workflow Specification '%s'" %
                workflow_spec_id + "Error is %s" % str(ve),
                file_name=ve.filename,
                task_id=ve.id,
                tag=ve.tag)
        return spec
Пример #14
0
    def get_files(attachments, study_id):
        files = []
        codes = None
        if isinstance(attachments, str):
            codes = [attachments]
        elif isinstance(attachments, list):
            codes = attachments

        if codes is not None:
            for code in codes:
                if DocumentService.is_allowed_document(code):
                    workflows = session.query(WorkflowModel).filter(
                        WorkflowModel.study_id == study_id).all()
                    for workflow in workflows:
                        workflow_files = session.query(FileModel).\
                            filter(FileModel.workflow_id == workflow.id).\
                            filter(FileModel.irb_doc_code == code).all()
                        for file in workflow_files:
                            files.append({
                                'id': file.id,
                                'name': file.name,
                                'type': CONTENT_TYPES[file.type.value]
                            })
                else:
                    raise ApiError(
                        code='bad_doc_code',
                        message=f'The doc_code {code} is not valid.')
        else:
            raise ApiError(
                code='bad_argument_type',
                message=
                'The attachments argument must be a string or list of strings')

        return files
Пример #15
0
def add_file(workflow_id=None, task_spec_name=None, form_field_key=None):
    file = connexion.request.files['file']
    if workflow_id:
        if form_field_key is None:
            raise ApiError(
                'invalid_workflow_file',
                'When adding a workflow related file, you must specify a form_field_key'
            )
        if task_spec_name is None:
            raise ApiError(
                'invalid_workflow_file',
                'When adding a workflow related file, you must specify a task_spec_name'
            )
        file_model = FileService.add_workflow_file(
            workflow_id=workflow_id,
            irb_doc_code=form_field_key,
            task_spec_name=task_spec_name,
            name=file.filename,
            content_type=file.content_type,
            binary_data=file.stream.read())
    else:
        raise ApiError(
            "invalid_file",
            "You must supply either a workflow spec id or a workflow_id and form_field_key."
        )

    return FileSchema().dump(to_file_api(file_model))
Пример #16
0
    def get_users_info(self, task, args):
        if len(args) < 1:
            raise ApiError(
                code="missing_argument",
                message="Email script requires at least one argument.  The "
                "name of the variable in the task data that contains user"
                "id to process.  Multiple arguments are accepted.")
        emails = []
        for arg in args:
            try:
                uid = task.workflow.script_engine.evaluate_expression(
                    task, arg)
            except Exception as e:
                app.logger.error(f'Workflow engines could not parse {arg}',
                                 exc_info=True)
                continue
            user_info = LdapService.user_info(uid)
            email = user_info.email_address
            emails.append(user_info.email_address)
            if not isinstance(email, str):
                raise ApiError(
                    code="invalid_argument",
                    message=
                    "The Email script requires at least 1 UID argument.  The "
                    "name of the variable in the task data that contains subject and"
                    " user ids to process.  This must point to an array or a string, but "
                    "it currently points to a %s " % emails.__class__.__name__)

        return emails
Пример #17
0
 def get_options_from_task_data(spiff_task, field):
     if not (field.has_property(Task.PROP_OPTIONS_DATA_VALUE_COLUMN)
             or field.has_property(Task.PROP_OPTIONS_DATA_LABEL_COLUMN)):
         raise ApiError.from_task(
             "invalid_enum",
             f"For enumerations based on task data, you must include 3 properties: "
             f"{Task.PROP_OPTIONS_DATA_NAME}, {Task.PROP_OPTIONS_DATA_VALUE_COLUMN}, "
             f"{Task.PROP_OPTIONS_DATA_LABEL_COLUMN}",
             task=spiff_task)
     prop = field.get_property(Task.PROP_OPTIONS_DATA_NAME)
     if prop not in spiff_task.data:
         raise ApiError.from_task(
             "invalid_enum",
             f"For enumerations based on task data, task data must have "
             f"a property called {prop}",
             task=spiff_task)
     # Get the enum options from the task data
     data_model = spiff_task.data[prop]
     value_column = field.get_property(Task.PROP_OPTIONS_DATA_VALUE_COLUMN)
     label_column = field.get_property(Task.PROP_OPTIONS_DATA_LABEL_COLUMN)
     items = data_model.items() if isinstance(data_model,
                                              dict) else data_model
     options = []
     for item in items:
         options.append({
             "id": item[value_column],
             "name": item[label_column],
             "data": item
         })
     return options
    def validate_kw_args(self, **kwargs):
        if kwargs.get('key', None) is None:
            raise ApiError(
                code="missing_argument",
                message=
                f"The 'file_data_get' script requires a keyword argument of 'key'"
            )
        if kwargs.get('file_id', None) is None:
            raise ApiError(
                code="missing_argument",
                message=
                f"The 'file_data_get' script requires a keyword argument of 'file_id'"
            )
        if kwargs.get('value', None) is None:
            raise ApiError(
                code="missing_argument",
                message=
                f"The 'file_data_get' script requires a keyword argument of 'value'"
            )

        if kwargs[
                'key'] == 'irb_code' and not DocumentService.is_allowed_document(
                    kwargs.get('value')):
            raise ApiError(
                "invalid_form_field_key",
                "When setting an irb_code, the form field id must match a known document in the "
                "irb_docunents.xslx reference file.  This code is not found in that file '%s'"
                % kwargs.get('value'))

        return True
Пример #19
0
    def update_workflow_spec_file_model(workflow_spec: WorkflowSpecModel,
                                        file_model: FileModel, binary_data,
                                        content_type):
        # Verify the extension
        file_extension = FileService.get_extension(file_model.name)
        if file_extension not in FileType._member_names_:
            raise ApiError(
                'unknown_extension',
                'The file you provided does not have an accepted extension:' +
                file_extension,
                status_code=404)
        else:
            file_model.type = FileType[file_extension]
            file_model.content_type = content_type
            file_model.archived = False  # Unarchive the file if it is archived.

        # If this is a BPMN, extract the process id.
        if file_model.type == FileType.bpmn:
            try:
                bpmn: etree.Element = etree.fromstring(binary_data)
                file_model.primary_process_id = SpecFileService.get_process_id(
                    bpmn)
                file_model.is_review = FileService.has_swimlane(bpmn)
            except etree.XMLSyntaxError as xse:
                raise ApiError("invalid_xml",
                               "Failed to parse xml: " + str(xse),
                               file_name=file_model.name)

        session.add(file_model)
        session.commit()

        return file_model
    def validate_kw_args(self,**kwargs):
        if kwargs.get('key',None) is None:
            raise ApiError(code="missing_argument",
                            message=f"The 'file_data_get' script requires a keyword argument of 'key'")

        if kwargs.get('file_id',None) is None:
            raise ApiError(code="missing_argument",
                            message=f"The 'file_data_get' script requires a keyword argument of 'file_id'")
        return True
Пример #21
0
    def _update_status_of_workflow_meta(workflow_metas, status):
        # Update the status on each workflow
        warnings = []
        unused_statuses = status.copy(
        )  # A list of all the statuses that are not used.
        for wfm in workflow_metas:
            unused_statuses.pop(wfm.workflow_spec_id, None)
            wfm.state_message = ''
            # do we have a status for you
            if wfm.workflow_spec_id not in status.keys():
                warnings.append(
                    ApiError(
                        "missing_status",
                        "No status information provided about workflow %s" %
                        wfm.workflow_spec_id))
                continue
            if not isinstance(status[wfm.workflow_spec_id], dict):
                warnings.append(
                    ApiError(
                        code='invalid_status',
                        message=
                        f'Status must be a dictionary with "status" and "message" keys. Name is {wfm.workflow_spec_id}. Status is {status[wfm.workflow_spec_id]}'
                    ))
                continue
            if 'message' in status[wfm.workflow_spec_id].keys():
                wfm.state_message = status[wfm.workflow_spec_id]['message']
            if 'status' not in status[wfm.workflow_spec_id].keys():
                warnings.append(
                    ApiError(
                        "missing_status_key",
                        "Workflow '%s' is present in master workflow, but doesn't have a status"
                        % wfm.workflow_spec_id))
                continue
            if not WorkflowState.has_value(
                    status[wfm.workflow_spec_id]['status']):
                warnings.append(
                    ApiError(
                        "invalid_state",
                        "Workflow '%s' can not be set to '%s', should be one of %s"
                        % (wfm.workflow_spec_id,
                           status[wfm.workflow_spec_id]['status'], ",".join(
                               WorkflowState.list()))))
                continue

            wfm.state = WorkflowState[status[wfm.workflow_spec_id]['status']]

        for status in unused_statuses:
            if isinstance(unused_statuses[status],
                          dict) and 'status' in unused_statuses[status]:
                warnings.append(
                    ApiError(
                        "unmatched_status",
                        "The master workflow provided a status for '%s' a "
                        "workflow that doesn't seem to exist." % status))

        return warnings
Пример #22
0
    def build_lookup_table(file_id,
                           file_name,
                           file_data,
                           value_column,
                           label_column,
                           workflow_spec_id=None,
                           task_spec_id=None,
                           field_id=None):
        """ In some cases the lookup table can be very large.  This method will add all values to the database
         in a way that can be searched and returned via an api call - rather than sending the full set of
          options along with the form.  It will only open the file and process the options if something has
          changed.  """
        try:
            xlsx = ExcelFile(file_data, engine='openpyxl')
        # Pandas--or at least openpyxl, cannot read old xls files.
        # The error comes back as zipfile.BadZipFile because xlsx files are zipped xml files
        except BadZipFile:
            raise ApiError(
                code='excel_error',
                message=
                f"Error opening excel file {file_name}. You may have an older .xls spreadsheet. (file_model_id: {file_id} workflow_spec_id: {workflow_spec_id}, task_spec_id: {task_spec_id}, and field_id: {field_id})"
            )
        df = xlsx.parse(
            xlsx.sheet_names[0])  # Currently we only look at the fist sheet.
        df = df.convert_dtypes()
        df = df.loc[:, ~df.columns.str.contains(
            '^Unnamed')]  # Drop unnamed columns.
        df = pd.DataFrame(df).dropna(how='all')  # Drop null rows
        df = pd.DataFrame(df).replace({NA: ''})

        if value_column not in df:
            raise ApiError(
                "invalid_enum",
                "The file %s does not contain a column named % s" %
                (file_name, value_column))
        if label_column not in df:
            raise ApiError(
                "invalid_enum",
                "The file %s does not contain a column named % s" %
                (file_name, label_column))

        lookup_model = LookupFileModel(workflow_spec_id=workflow_spec_id,
                                       field_id=field_id,
                                       task_spec_id=task_spec_id,
                                       file_model_id=file_id,
                                       is_ldap=False)

        db.session.add(lookup_model)
        for index, row in df.iterrows():
            lookup_data = LookupDataModel(lookup_file_model=lookup_model,
                                          value=row[value_column],
                                          label=row[label_column],
                                          data=row.to_dict(OrderedDict))
            db.session.add(lookup_data)
        db.session.commit()
        return lookup_model
 def do_task(self, task, study_id, workflow_id, *args, **kwargs):
     if len(args) < 1:
         raise ApiError('no_user_id_specified',
                        'A uva uid is the sole argument to this function')
     if not isinstance(args[0], str):
         raise ApiError('argument_should_be_string',
                        'A uva uid is always a string, please check type')
     associate = StudyService.get_study_associate(study_id=study_id,
                                                  uid=args[0])
     return StudyAssociatedSchema().dump(associate)
Пример #24
0
 def find_field(self, task_name, field_name, workflow):
     for spec in workflow.spec.task_specs.values():
         if spec.name == task_name:
             for field in spec.form.fields:
                 if field.id == field_name:
                     return field
             raise ApiError("invalid_field",
                            f"The task '{task_name}' has no field named '{field_name}'")
     raise ApiError("invalid_spec",
                f"Unable to find a task in the workflow called '{task_name}'")
def update_reference_file_info(name, body):
    if name is None:
        raise ApiError(code='missing_parameter',
                       message='Please provide a reference file name')
    file_model = session.query(FileModel).filter(FileModel.name==name).first()
    if file_model is None:
        raise ApiError(code='no_such_file',
                       message=f"No reference file was found with name: {name}")
    new_file_model = ReferenceFileService.update_reference_file_info(file_model, body)
    return FileSchema().dump(to_file_api(new_file_model))
 def validate_arg(self, arg):
     if not isinstance(arg, list):
         raise ApiError(
             "invalid parameter",
             "This function is expecting a list of dictionaries")
     if len(arg[0]) > 0:
         if not len(arg) > 0 and not isinstance(arg[0], dict):
             raise ApiError(
                 "invalid paramemter",
                 "This function is expecting a list of dictionaries")
Пример #27
0
def verify_token(token=None):
    """
        Verifies the token for the user (if provided). If in production environment and token is not provided,
        gets user from the SSO headers and returns their token.

        Args:
            token: Optional[str]

        Returns:
            token: str

        Raises:
            ApiError.  If not on production and token is not valid, returns an 'invalid_token' 403 error.
            If on production and user is not authenticated, returns a 'no_user' 403 error.
   """

    failure_error = ApiError(
        "invalid_token",
        "Unable to decode the token you provided.  Please re-authenticate",
        status_code=403)

    if not _is_production() and (token is None or 'user' not in g):
        g.user = UserModel.query.first()
        token = g.user.encode_auth_token()

    if token:
        try:
            token_info = UserModel.decode_auth_token(token)
            g.user = UserModel.query.filter_by(uid=token_info['sub']).first()
        except:
            raise failure_error
        if g.user is not None:
            return token_info
        else:
            raise failure_error

    # If there's no token and we're in production, get the user from the SSO headers and return their token
    if not token and _is_production():
        uid = _get_request_uid(request)

        if uid is not None:
            db_user = UserModel.query.filter_by(uid=uid).first()

            if db_user is not None:
                g.user = db_user
                token = g.user.encode_auth_token().decode()
                token_info = UserModel.decode_auth_token(token)
                return token_info

            else:
                raise ApiError(
                    "no_user",
                    "User not found. Please login via the frontend app before accessing this feature.",
                    status_code=403)
Пример #28
0
def update_spec_file_info(file_id, body):
    if file_id is None:
        raise ApiError('no_such_file', 'Please provide a valid File ID.')
    file_model = session.query(FileModel).filter(
        FileModel.id == file_id).first()
    if file_model is None:
        raise ApiError('unknown_file_model',
                       'The file_model "' + file_id + '" is not recognized.')

    new_file_model = SpecFileService().update_spec_file_info(file_model, body)
    return FileSchema().dump(to_file_api(new_file_model))
Пример #29
0
    def __get_study_status(study_model):
        """Uses the Top Level Workflow to calculate the status of the study, and it's
        workflow models."""
        master_specs = db.session.query(WorkflowSpecModel). \
            filter_by(is_master_spec=True).all()
        if len(master_specs) < 1:
            raise ApiError("missing_master_spec", "No specifications are currently marked as the master spec.")
        if len(master_specs) > 1:
            raise ApiError("multiple_master_specs",
                           "There is more than one master specification, and I don't know what to do.")

        return WorkflowProcessor.run_master_spec(master_specs[0], study_model)
Пример #30
0
def render_markdown(data, template):
    """
    Provides a quick way to very that a Jinja markdown template will work properly on a given json
    data structure.  Useful for folks that are building these markdown templates.
    """
    try:
        data = json.loads(data)
        return JinjaService.get_content(template, data)
    except UndefinedError as ue:
        raise ApiError(code="undefined_field", message=ue.message)
    except Exception as e:
        raise ApiError(code="invalid_render", message=str(e))