def get_document_directory(study_id, workflow_id=None):
    """
    return a nested list of files arranged according to the category hierarchy
    defined in the doc dictionary
    """
    file_models = FileService.get_files_for_study(study_id=study_id)
    doc_dict = DocumentService.get_dictionary()
    files = (File.from_models(model, FileService.get_file_data(model.id),
                              doc_dict) for model in file_models)
    directory = DocumentService.get_directory(doc_dict, files, workflow_id)

    return DocumentDirectorySchema(many=True).dump(directory)
    def validate_kw_args(self, **kwargs):
        if kwargs.get('key', None) is None:
            raise ApiError(
                code="missing_argument",
                message=
                f"The 'file_data_get' script requires a keyword argument of 'key'"
            )
        if kwargs.get('file_id', None) is None:
            raise ApiError(
                code="missing_argument",
                message=
                f"The 'file_data_get' script requires a keyword argument of 'file_id'"
            )
        if kwargs.get('value', None) is None:
            raise ApiError(
                code="missing_argument",
                message=
                f"The 'file_data_get' script requires a keyword argument of 'value'"
            )

        if kwargs[
                'key'] == 'irb_code' and not DocumentService.is_allowed_document(
                    kwargs.get('value')):
            raise ApiError(
                "invalid_form_field_key",
                "When setting an irb_code, the form field id must match a known document in the "
                "irb_docunents.xslx reference file.  This code is not found in that file '%s'"
                % kwargs.get('value'))

        return True
示例#3
0
    def get_files(attachments, study_id):
        files = []
        codes = None
        if isinstance(attachments, str):
            codes = [attachments]
        elif isinstance(attachments, list):
            codes = attachments

        if codes is not None:
            for code in codes:
                if DocumentService.is_allowed_document(code):
                    workflows = session.query(WorkflowModel).filter(
                        WorkflowModel.study_id == study_id).all()
                    for workflow in workflows:
                        workflow_files = session.query(FileModel).\
                            filter(FileModel.workflow_id == workflow.id).\
                            filter(FileModel.irb_doc_code == code).all()
                        for file in workflow_files:
                            files.append({
                                'id': file.id,
                                'name': file.name,
                                'type': CONTENT_TYPES[file.type.value]
                            })
                else:
                    raise ApiError(
                        code='bad_doc_code',
                        message=f'The doc_code {code} is not valid.')
        else:
            raise ApiError(
                code='bad_argument_type',
                message=
                'The attachments argument must be a string or list of strings')

        return files
示例#4
0
def to_file_api(file_model):
    """Converts a FileModel object to something we can return via the api"""
    if file_model.workflow_spec_id is not None:
        file_data_model = SpecFileService().get_spec_file_data(file_model.id)
    elif file_model.is_reference:
        file_data_model = ReferenceFileService().get_reference_file_data(
            file_model.name)
    else:
        file_data_model = FileService.get_file_data(file_model.id)
    return File.from_models(file_model, file_data_model,
                            DocumentService.get_dictionary())
示例#5
0
    def get_study(study_id, study_model: StudyModel = None, do_status=False):
        """Returns a study model that contains all the workflows organized by category.
        IMPORTANT:  This is intended to be a lightweight call, it should never involve
        loading up and executing all the workflows in a study to calculate information."""
        if not study_model:
            study_model = session.query(StudyModel).filter_by(
                id=study_id).first()

        study = Study.from_model(study_model)
        study.create_user_display = LdapService.user_info(
            study.user_uid).display_name
        last_event: TaskEventModel = session.query(TaskEventModel) \
            .filter_by(study_id=study_id, action='COMPLETE') \
            .order_by(TaskEventModel.date.desc()).first()
        if last_event is None:
            study.last_activity_user = '******'
            study.last_activity_date = ""
        else:
            study.last_activity_user = LdapService.user_info(
                last_event.user_uid).display_name
            study.last_activity_date = last_event.date
        study.categories = StudyService.get_categories()
        workflow_metas = StudyService._get_workflow_metas(study_id)
        files = FileService.get_files_for_study(study.id)
        files = (File.from_models(model, FileService.get_file_data(model.id),
                                  DocumentService.get_dictionary())
                 for model in files)
        study.files = list(files)
        # Calling this line repeatedly is very very slow.  It creates the
        # master spec and runs it.  Don't execute this for Abandoned studies, as
        # we don't have the information to process them.
        if study.status != StudyStatus.abandoned:
            # this line is taking 99% of the time that is used in get_study.
            # see ticket #196
            if do_status:
                # __get_study_status() runs the master workflow to generate the status dictionary
                status = StudyService._get_study_status(study_model)
                study.warnings = StudyService._update_status_of_workflow_meta(
                    workflow_metas, status)

            # Group the workflows into their categories.
            for category in study.categories:
                category.workflows = {
                    w
                    for w in workflow_metas if w.category_id == category.id
                }

        return study
 def process_document_deletion(doc_code, workflow_id, task):
     if DocumentService.is_allowed_document(doc_code):
         result = session.query(FileModel).filter(
             FileModel.workflow_id == workflow_id,
             FileModel.irb_doc_code == doc_code).all()
         if isinstance(result, list) and len(result) > 0 and isinstance(
                 result[0], FileModel):
             for file in result:
                 FileService.delete_file(file.id)
         else:
             raise ApiError.from_task(
                 code='no_document_found',
                 message=
                 f'No document of type {doc_code} was found for this workflow.',
                 task=task)
     else:
         raise ApiError.from_task(
             code='invalid_document_code',
             message=f'{doc_code} is not a valid document code',
             task=task)
示例#7
0
    def get_documents_status(study_id):
        """Returns a list of documents related to the study, and any file information
        that is available.."""

        # Get PB required docs, if Protocol Builder Service is enabled.
        if ProtocolBuilderService.is_enabled() and study_id is not None:
            try:
                pb_docs = ProtocolBuilderService.get_required_docs(
                    study_id=study_id)
            except requests.exceptions.ConnectionError as ce:
                app.logger.error(
                    f'Failed to connect to the Protocol Builder - {str(ce)}',
                    exc_info=True)
                pb_docs = []
        else:
            pb_docs = []

        # Loop through all known document types, get the counts for those files,
        # and use pb_docs to mark those as required.
        doc_dictionary = DocumentService.get_dictionary()

        documents = {}
        for code, doc in doc_dictionary.items():

            doc['required'] = False
            if ProtocolBuilderService.is_enabled() and doc['id'] != '':
                pb_data = next(
                    (item for item in pb_docs['AUXDOCS']
                     if int(item['SS_AUXILIARY_DOC_TYPE_ID']) == int(doc['id'])
                     ), None)
                if pb_data:
                    doc['required'] = True

            doc['study_id'] = study_id
            doc['code'] = code

            # Make a display name out of categories
            name_list = []
            for cat_key in ['category1', 'category2', 'category3']:
                if doc[cat_key] not in ['', 'NULL', None]:
                    name_list.append(doc[cat_key])
            doc['display_name'] = ' / '.join(name_list)

            # For each file, get associated workflow status
            doc_files = FileService.get_files_for_study(study_id=study_id,
                                                        irb_doc_code=code)
            doc['count'] = len(doc_files)
            doc['files'] = []

            for file_model in doc_files:
                file = File.from_models(
                    file_model, FileService.get_file_data(file_model.id), [])
                file_data = FileSchema().dump(file)
                del file_data['document']
                doc['files'].append(Box(file_data))
                # update the document status to match the status of the workflow it is in.
                if 'status' not in doc or doc['status'] is None:
                    status = session.query(WorkflowModel.status).filter_by(
                        id=file.workflow_id).scalar()
                    doc['status'] = status.value

            documents[code] = doc
        return Box(documents)
 def test_load_lookup_data(self):
     self.create_reference_document()
     dict = DocumentService.get_dictionary()
     self.assertIsNotNone(dict)