Пример #1
0
    def test_new_approval_requests_after_file_modification_create_new_requests(
            self):
        self.load_example_data()
        self.create_reference_document()
        workflow = self.create_workflow('empty_workflow')
        FileService.add_workflow_file(workflow_id=workflow.id,
                                      name="anything.png",
                                      content_type="text",
                                      binary_data=b'5678',
                                      irb_doc_code="AD_CoCAppr")

        ApprovalService.add_approval(study_id=workflow.study_id,
                                     workflow_id=workflow.id,
                                     approver_uid="dhf8r")

        FileService.add_workflow_file(workflow_id=workflow.id,
                                      name="anything.png",
                                      content_type="text",
                                      binary_data=b'5678',
                                      irb_doc_code="UVACompl_PRCAppr")

        ApprovalService.add_approval(study_id=workflow.study_id,
                                     workflow_id=workflow.id,
                                     approver_uid="dhf8r")
        self.assertEqual(2, db.session.query(ApprovalModel).count())
        models = db.session.query(ApprovalModel).order_by(
            ApprovalModel.version).all()
        self.assertEqual(1, models[0].version)
        self.assertEqual(2, models[1].version)
Пример #2
0
    def test_get_documents_has_file_details(self, mock_docs):

        # mock out the protocol builder
        docs_response = self.protocol_builder_response('required_docs.json')
        mock_docs.return_value = json.loads(docs_response)

        user = self.create_user_with_study_and_workflow()

        # Add a document to the study with the correct code.
        workflow = self.create_workflow('docx')
        irb_code = "UVACompl_PRCAppr"  # The first file referenced in pb required docs.
        FileService.add_workflow_file(workflow_id=workflow.id,
                                      name="anything.png",
                                      content_type="text",
                                      binary_data=b'1234',
                                      irb_doc_code=irb_code)

        docs = StudyService().get_documents_status(workflow.study_id)
        self.assertIsNotNone(docs)
        self.assertEqual("not_started", docs["UVACompl_PRCAppr"]['status'])
        self.assertEqual(1, docs["UVACompl_PRCAppr"]['count'])
        self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0])
        self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0]['file_id'])
        self.assertEqual(workflow.id,
                         docs["UVACompl_PRCAppr"]['files'][0]['workflow_id'])
Пример #3
0
def delete_workflow_specification(spec_id):
    if spec_id is None:
        raise ApiError('unknown_spec',
                       'Please provide a valid Workflow Specification ID.')

    spec: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(
        id=spec_id).first()

    if spec is None:
        raise ApiError(
            'unknown_spec',
            'The Workflow Specification "' + spec_id + '" is not recognized.')

    # Delete all items in the database related to the deleted workflow spec.
    files = session.query(FileModel).filter_by(workflow_spec_id=spec_id).all()
    for file in files:
        FileService.delete_file(file.id)

    session.query(TaskEventModel).filter(
        TaskEventModel.workflow_spec_id == spec_id).delete()

    # Delete all events and workflow models related to this specification
    for workflow in session.query(WorkflowModel).filter_by(
            workflow_spec_id=spec_id):
        StudyService.delete_workflow(workflow)
    session.query(WorkflowSpecModel).filter_by(id=spec_id).delete()
    session.commit()
Пример #4
0
    def update_workflow_spec_file_model(workflow_spec: WorkflowSpecModel,
                                        file_model: FileModel, binary_data,
                                        content_type):
        # Verify the extension
        file_extension = FileService.get_extension(file_model.name)
        if file_extension not in FileType._member_names_:
            raise ApiError(
                'unknown_extension',
                'The file you provided does not have an accepted extension:' +
                file_extension,
                status_code=404)
        else:
            file_model.type = FileType[file_extension]
            file_model.content_type = content_type
            file_model.archived = False  # Unarchive the file if it is archived.

        # If this is a BPMN, extract the process id.
        if file_model.type == FileType.bpmn:
            try:
                bpmn: etree.Element = etree.fromstring(binary_data)
                file_model.primary_process_id = SpecFileService.get_process_id(
                    bpmn)
                file_model.is_review = FileService.has_swimlane(bpmn)
            except etree.XMLSyntaxError as xse:
                raise ApiError("invalid_xml",
                               "Failed to parse xml: " + str(xse),
                               file_name=file_model.name)

        session.add(file_model)
        session.commit()

        return file_model
Пример #5
0
    def get_study(study_id, study_model: StudyModel = None):
        """Returns a study model that contains all the workflows organized by category.
        IMPORTANT:  This is intended to be a lightweight call, it should never involve
        loading up and executing all the workflows in a study to calculate information."""
        if not study_model:
            study_model = session.query(StudyModel).filter_by(id=study_id).first()
        study = Study.from_model(study_model)
        study.categories = StudyService.get_categories()
        workflow_metas = StudyService.__get_workflow_metas(study_id)
        study.approvals = ApprovalService.get_approvals_for_study(study.id)
        files = FileService.get_files_for_study(study.id)
        files = (File.from_models(model, FileService.get_file_data(model.id),
                         FileService.get_doc_dictionary()) for model in files)
        study.files = list(files)

        # Calling this line repeatedly is very very slow.  It creates the
        # master spec and runs it.  Don't execute this for Abandoned studies, as
        # we don't have the information to process them.
        if study.protocol_builder_status != ProtocolBuilderStatus.ABANDONED:
            status = StudyService.__get_study_status(study_model)
            study.warnings = StudyService.__update_status_of_workflow_meta(workflow_metas, status)

            # Group the workflows into their categories.
            for category in study.categories:
                category.workflows = {w for w in workflow_metas if w.category_id == category.id}

        return study
Пример #6
0
    def test_updates_to_file_cause_lookup_rebuild(self):
        spec = BaseTest.load_test_spec('enum_options_with_search')
        workflow = self.create_workflow('enum_options_with_search')
        file_model = session.query(FileModel).filter(
            FileModel.name == "sponsors.xls").first()
        LookupService.lookup(workflow, "sponsor", "sam", limit=10)
        lookup_records = session.query(LookupFileModel).all()
        self.assertIsNotNone(lookup_records)
        self.assertEqual(1, len(lookup_records))
        lookup_record = lookup_records[0]
        lookup_data = session.query(LookupDataModel).filter(
            LookupDataModel.lookup_file_model == lookup_record).all()
        self.assertEqual(28, len(lookup_data))

        # Update the workflow specification file.
        file_path = os.path.join(app.root_path, '..', 'tests', 'data',
                                 'enum_options_with_search',
                                 'sponsors_modified.xls')
        file = open(file_path, 'rb')
        FileService.update_file(file_model, file.read(), CONTENT_TYPES['xls'])
        file.close()

        # restart the workflow, so it can pick up the changes.
        WorkflowProcessor(workflow, soft_reset=True)

        LookupService.lookup(workflow, "sponsor", "sam", limit=10)
        lookup_records = session.query(LookupFileModel).all()
        lookup_record = lookup_records[0]
        lookup_data = session.query(LookupDataModel).filter(
            LookupDataModel.lookup_file_model == lookup_record).all()
        self.assertEqual(4, len(lookup_data))
Пример #7
0
 def delete_workflow(workflow):
     for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
         FileService.delete_file(file.id)
     for dep in workflow.dependencies:
         session.delete(dep)
     session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
     session.query(WorkflowModel).filter_by(id=workflow.id).delete()
def update_or_create_current_file(remote, workflow_spec_id, updatefile):
    currentfile = file_get(workflow_spec_id, updatefile['filename'])
    if not currentfile:
        currentfile = FileModel()
        currentfile.name = updatefile['filename']
        if workflow_spec_id == 'REFERENCE_FILES':
            currentfile.workflow_spec_id = None
            currentfile.is_reference = True
        else:
            currentfile.workflow_spec_id = workflow_spec_id

    currentfile.date_created = updatefile['date_created']
    currentfile.type = updatefile['type']
    currentfile.primary = updatefile['primary']
    currentfile.content_type = updatefile['content_type']
    currentfile.primary_process_id = updatefile['primary_process_id']
    session.add(currentfile)
    try:
        content = WorkflowSyncService.get_remote_file_by_hash(
            remote, updatefile['md5_hash'])
        FileService.update_file(currentfile, content, updatefile['type'])
    except ApiError:
        # Remote files doesn't exist, don't update it.
        print("Remote file " + currentfile.name +
              " does not exist, so not syncing.")
Пример #9
0
    def test_delete_irb_document(self):
        self.load_example_data()
        irb_code = 'Study_Protocol_Document'

        workflow = self.create_workflow('add_delete_irb_document')
        study_id = workflow.study_id

        workflow_api = self.get_workflow_api(workflow)
        first_task = workflow_api.next_task

        # Should not have any files yet
        files = FileService.get_files_for_study(study_id)
        self.assertEqual(0, len(files))
        self.assertEqual(False, IsFileUploaded.do_task(
            IsFileUploaded, first_task, study_id, workflow.id, irb_code))

        # Add a file
        FileService.add_workflow_file(workflow_id=workflow.id,
                                      task_spec_name=first_task.name,
                                      name="filename.txt", content_type="text",
                                      binary_data=b'1234', irb_doc_code=irb_code)
        # Assert we have the file
        self.assertEqual(True, IsFileUploaded.do_task(
            IsFileUploaded, first_task, study_id, workflow.id, irb_code))

        # run the workflow, which deletes the file
        self.complete_form(workflow, first_task, {'irb_document': irb_code})
        workflow_api = self.get_workflow_api(workflow)
        second_task = workflow_api.next_task
        # make sure it is deleted
        self.assertEqual(False, IsFileUploaded.do_task(
            IsFileUploaded, second_task, study_id, workflow.id, irb_code))

        print('test_delete_irb_document')
Пример #10
0
    def load_rrt(self):
        file_path = os.path.join(app.root_path, 'static', 'reference',
                                 'rrt_documents.xlsx')
        file = open(file_path, "rb")
        FileService.add_reference_file(FileService.DOCUMENT_LIST,
                                       binary_data=file.read(),
                                       content_type=CONTENT_TYPES['xls'])
        file.close()

        category = WorkflowSpecCategoryModel(
            id=0,
            name='research_rampup_category',
            display_name='Research Ramp-up Category',
            display_order=0)
        db.session.add(category)
        db.session.commit()

        self.create_spec(
            id="rrt_top_level_workflow",
            name="rrt_top_level_workflow",
            display_name="Top Level Workflow",
            description="Does nothing, we don't use the master workflow here.",
            category_id=None,
            master_spec=True)

        self.create_spec(
            id="research_rampup",
            name="research_rampup",
            display_name="Research Ramp-up Toolkit",
            description="Process for creating a new research ramp-up request.",
            category_id=0,
            master_spec=False)
Пример #11
0
    def test_get_study_has_details_about_files(self):

        # Set up the study and attach a file to it.
        self.load_example_data()
        self.create_reference_document()
        workflow = self.create_workflow('file_upload_form')
        processor = WorkflowProcessor(workflow)
        task = processor.next_task()
        irb_code = "UVACompl_PRCAppr"  # The first file referenced in pb required docs.
        FileService.add_workflow_file(workflow_id=workflow.id,
                                      name="anything.png",
                                      content_type="png",
                                      binary_data=b'1234',
                                      irb_doc_code=irb_code)

        api_response = self.app.get('/v1.0/study/%i' % workflow.study_id,
                                    headers=self.logged_in_headers(),
                                    content_type="application/json")
        self.assert_success(api_response)
        study = StudySchema().loads(api_response.get_data(as_text=True))
        self.assertEqual(1, len(study.files))
        self.assertEqual("UVA Compliance/PRC Approval",
                         study.files[0]["category"])
        self.assertEqual("Cancer Center's PRC Approval Form",
                         study.files[0]["description"])
        self.assertEqual("UVA Compliance/PRC Approval.png",
                         study.files[0]["download_name"])
    def reset(workflow_model, clear_data=False, delete_files=False):
        print('WorkflowProcessor: reset: ')

        # Try to execute a cancel notify
        try:
            wp = WorkflowProcessor(workflow_model)
            wp.cancel_notify(
            )  # The executes a notification to all endpoints that
        except Exception as e:
            app.logger.error(
                f"Unable to send a cancel notify for workflow %s during a reset."
                f" Continuing with the reset anyway so we don't get in an unresolvable"
                f" state. An %s error occured with the following information: %s"
                % (workflow_model.id, e.__class__.__name__, str(e)))
        workflow_model.bpmn_workflow_json = None
        if clear_data:
            # Clear form_data from task_events
            task_events = session.query(TaskEventModel). \
                filter(TaskEventModel.workflow_id == workflow_model.id).all()
            for task_event in task_events:
                task_event.form_data = {}
                session.add(task_event)
        if delete_files:
            files = FileModel.query.filter(
                FileModel.workflow_id == workflow_model.id).all()
            for file in files:
                FileService.delete_file(file.id)
        session.commit()
        return WorkflowProcessor(workflow_model)
Пример #13
0
 def create_reference_document(self):
     file_path = os.path.join(app.root_path, 'static', 'reference',
                              'irb_documents.xlsx')
     file = open(file_path, "rb")
     FileService.add_reference_file(FileService.DOCUMENT_LIST,
                                    binary_data=file.read(),
                                    content_type=CONTENT_TYPES['xls'])
     file.close()
    def test_get_zipped_files(self):
        self.load_example_data()

        workflow = self.create_workflow('get_zip_file')
        study_id = workflow.study_id
        workflow_api = self.get_workflow_api(workflow)
        task = workflow_api.next_task

        # Add files to use in the test
        model_1 = FileService.add_workflow_file(
            workflow_id=workflow.id,
            name="document_1.png",
            content_type="text",
            task_spec_name=task.name,
            binary_data=b'1234',
            irb_doc_code='Study_Protocol_Document')
        model_2 = FileService.add_workflow_file(workflow_id=workflow.id,
                                                name="document_2.txt",
                                                content_type="text",
                                                task_spec_name=task.name,
                                                binary_data=b'1234',
                                                irb_doc_code='Study_App_Doc')
        model_3 = FileService.add_workflow_file(
            workflow_id=workflow.id,
            name="document_3.pdf",
            content_type="text",
            task_spec_name=task.name,
            binary_data=b'1234',
            irb_doc_code='AD_Consent_Model')

        file_ids = [{
            'file_id': model_1.id
        }, {
            'file_id': model_2.id
        }, {
            'file_id': model_3.id
        }]
        workflow_api = self.complete_form(workflow, task,
                                          {'file_ids': file_ids})
        next_task = workflow_api.next_task
        file_model_id = next_task.data['zip_file']['id']

        file_data = session.query(FileDataModel).filter(
            FileDataModel.file_model_id == file_model_id).first()

        # Test what we get back in the zipped file
        with zipfile.ZipFile(io.BytesIO(file_data.data), 'r') as zf:
            self.assertIsInstance(zf, zipfile.ZipFile)
            for name in zf.namelist():
                info = zf.getinfo(name)
                self.assertIn(os.path.basename(info.filename), [
                    f'{study_id} Protocol document_1.png',
                    f'{study_id} Application document_2.txt',
                    f'{study_id} Model document_3.pdf'
                ])
                file = zf.read(name)
                self.assertEqual(b'1234', file)
Пример #15
0
    def create_spec(self,
                    id,
                    name,
                    display_name="",
                    description="",
                    filepath=None,
                    master_spec=False,
                    category_id=None,
                    display_order=None,
                    from_tests=False):
        """Assumes that a directory exists in static/bpmn with the same name as the given id.
           further assumes that the [id].bpmn is the primary file for the workflow.
           returns an array of data models to be added to the database."""
        global file
        file_service = FileService()
        spec = WorkflowSpecModel(id=id,
                                 name=name,
                                 display_name=display_name,
                                 description=description,
                                 is_master_spec=master_spec,
                                 category_id=category_id,
                                 display_order=display_order)
        db.session.add(spec)
        db.session.commit()
        if not filepath and not from_tests:
            filepath = os.path.join(app.root_path, 'static', 'bpmn', id, "*")
        if not filepath and from_tests:
            filepath = os.path.join(app.root_path, '..', 'tests', 'data', id,
                                    "*")

        files = glob.glob(filepath)
        for file_path in files:
            noise, file_extension = os.path.splitext(file_path)
            filename = os.path.basename(file_path)

            is_status = filename.lower() == 'status.bpmn'
            is_primary = filename.lower() == id + '.bpmn'
            file = None
            try:
                file = open(file_path, 'rb')
                data = file.read()
                content_type = CONTENT_TYPES[file_extension[1:]]
                file_service.add_workflow_spec_file(workflow_spec=spec,
                                                    name=filename,
                                                    content_type=content_type,
                                                    binary_data=data,
                                                    primary=is_primary,
                                                    is_status=is_status)
            except IsADirectoryError as de:
                # Ignore sub directories
                pass
            finally:
                if file:
                    file.close()
        return spec
Пример #16
0
    def replace_file(self, name, file_path):
        """Replaces a stored file with the given name with the contents of the file at the given path."""
        file_service = FileService()
        file = open(file_path, "rb")
        data = file.read()

        file_model = db.session.query(FileModel).filter(
            FileModel.name == name).first()
        noise, file_extension = os.path.splitext(file_path)
        content_type = CONTENT_TYPES[file_extension[1:]]
        file_service.update_file(file_model, data, content_type)
Пример #17
0
def get_document_directory(study_id, workflow_id=None):
    """
    return a nested list of files arranged according to the category hierarchy
    defined in the doc dictionary
    """
    file_models = FileService.get_files_for_study(study_id=study_id)
    doc_dict = DocumentService.get_dictionary()
    files = (File.from_models(model, FileService.get_file_data(model.id),
                              doc_dict) for model in file_models)
    directory = DocumentService.get_directory(doc_dict, files, workflow_id)

    return DocumentDirectorySchema(many=True).dump(directory)
Пример #18
0
 def do_task(self, task, study_id, workflow_id, *args, **kwargs):
     workflow = session.query(WorkflowModel).filter(
         WorkflowModel.id == workflow_id).first()
     final_document_stream = self.process_template(task, study_id, workflow,
                                                   *args, **kwargs)
     file_name = args[0]
     irb_doc_code = args[1]
     FileService.add_workflow_file(workflow_id=workflow_id,
                                   name=file_name,
                                   content_type=CONTENT_TYPES['docx'],
                                   binary_data=final_document_stream.read(),
                                   irb_doc_code=irb_doc_code)
Пример #19
0
    def get_documents_status(study_id):
        """Returns a list of documents related to the study, and any file information
        that is available.."""

        # Get PB required docs, if Protocol Builder Service is enabled.
        if ProtocolBuilderService.is_enabled() and study_id is not None:
            try:
                pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id)
            except requests.exceptions.ConnectionError as ce:
                app.logger.error(f'Failed to connect to the Protocol Builder - {str(ce)}', exc_info=True)
                pb_docs = []
        else:
            pb_docs = []

        # Loop through all known document types, get the counts for those files,
        # and use pb_docs to mark those as required.
        doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])

        documents = {}
        for code, doc in doc_dictionary.items():

            if ProtocolBuilderService.is_enabled():
                pb_data = next((item for item in pb_docs if int(item['AUXDOCID']) == int(doc['id'])), None)
                doc['required'] = False
                if pb_data:
                    doc['required'] = True

            doc['study_id'] = study_id
            doc['code'] = code

            # Make a display name out of categories
            name_list = []
            for cat_key in ['category1', 'category2', 'category3']:
                if doc[cat_key] not in ['', 'NULL']:
                    name_list.append(doc[cat_key])
            doc['display_name'] = ' / '.join(name_list)

            # For each file, get associated workflow status
            doc_files = FileService.get_files_for_study(study_id=study_id, irb_doc_code=code)
            doc['count'] = len(doc_files)
            doc['files'] = []
            for file in doc_files:
                doc['files'].append({'file_id': file.id,
                                     'workflow_id': file.workflow_id})

                # update the document status to match the status of the workflow it is in.
                if 'status' not in doc or doc['status'] is None:
                    workflow: WorkflowModel = session.query(WorkflowModel).filter_by(id=file.workflow_id).first()
                    doc['status'] = workflow.status.value

            documents[code] = doc
        return documents
    def test_updates_to_file_cause_lookup_rebuild(self):
        spec = BaseTest.load_test_spec('enum_options_with_search')
        workflow = self.create_workflow('enum_options_with_search')
        file_model = session.query(FileModel).filter(
            FileModel.name == "sponsors.xlsx").first()
        LookupService.lookup(workflow,
                             "Task_Enum_Lookup",
                             "sponsor",
                             "sam",
                             limit=10)
        lookup_records = session.query(LookupFileModel).all()
        self.assertIsNotNone(lookup_records)
        self.assertEqual(1, len(lookup_records))
        lookup_record = lookup_records[0]
        lookup_data = session.query(LookupDataModel).filter(
            LookupDataModel.lookup_file_model == lookup_record).all()
        self.assertEqual(28, len(lookup_data))

        # Update the workflow specification file.
        file_path = os.path.join(app.root_path, '..', 'tests', 'data',
                                 'enum_options_with_search',
                                 'sponsors_modified.xlsx')
        file = open(file_path, 'rb')
        if file_model.workflow_spec_id is not None:
            workflow_spec_model = session.query(WorkflowSpecModel).filter(
                WorkflowSpecModel.id == file_model.workflow_spec_id).first()
            SpecFileService().update_spec_file_data(workflow_spec_model,
                                                    file_model.name,
                                                    file.read())
        elif file_model.is_reference:
            ReferenceFileService().update_reference_file(
                file_model, file.read())
        else:
            FileService.update_file(file_model, file.read(),
                                    CONTENT_TYPES['xlsx'])
        file.close()

        # restart the workflow, so it can pick up the changes.

        processor = WorkflowProcessor.reset(workflow)
        workflow = processor.workflow_model

        LookupService.lookup(workflow,
                             "Task_Enum_Lookup",
                             "sponsor",
                             "sam",
                             limit=10)
        lookup_records = session.query(LookupFileModel).all()
        lookup_record = lookup_records[0]
        lookup_data = session.query(LookupDataModel).filter(
            LookupDataModel.lookup_file_model == lookup_record).all()
        self.assertEqual(4, len(lookup_data))
Пример #21
0
 def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
     """For validation only, pretend no results come back from pb"""
     self.check_args(args)
     # Assure the reference file exists (a bit hacky, but we want to raise this error early, and cleanly.)
     FileService.get_reference_file_data(FileService.DOCUMENT_LIST)
     FileService.get_reference_file_data(FileService.INVESTIGATOR_LIST)
     data = {
         "study":{
             "info": {
                 "id": 12,
                 "title": "test",
                 "primary_investigator_id":21,
                 "user_uid": "dif84",
                 "sponsor": "sponsor",
                 "ind_number": "1234",
                 "inactive": False
             },
             "investigators":
                 {
                     "INVESTIGATORTYPE": "PI",
                     "INVESTIGATORTYPEFULL": "Primary Investigator",
                     "NETBADGEID": "dhf8r"
                 },
             "roles":
                 {
                     "INVESTIGATORTYPE": "PI",
                     "INVESTIGATORTYPEFULL": "Primary Investigator",
                     "NETBADGEID": "dhf8r"
                 },
             "details":
                 {
                     "IS_IND": 0,
                     "IS_IDE": 0,
                     "IS_MULTI_SITE": 0,
                     "IS_UVA_PI_MULTI": 0
                 },
             "approvals": {
                 "study_id": 12,
                 "workflow_id": 321,
                 "display_name": "IRB API Details",
                 "name": "irb_api_details",
                 "status": WorkflowStatus.not_started.value,
                 "workflow_spec_id": "irb_api_details",
             },
             'protocol': {
                 'id': 0,
             }
         }
     }
     self.add_data_to_task(task=task, data=data["study"])
     self.add_data_to_task(task, {"documents": StudyService().get_documents_status(study_id)})
Пример #22
0
 def test_do_task_with_blank_second_approver(self):
     self.load_example_data()
     self.create_reference_document()
     workflow = self.create_workflow('empty_workflow')
     processor = WorkflowProcessor(workflow)
     task = processor.next_task()
     task.data = {"study": {"approval1": "dhf8r", 'approval2':''}}
     FileService.add_workflow_file(workflow_id=workflow.id,
                                   irb_doc_code="UVACompl_PRCAppr",
                                   name="anything.png", content_type="text",
                                   binary_data=b'1234')
     script = RequestApproval()
     script.do_task(task, workflow.study_id, workflow.id, "study.approval1", "study.approval2")
     self.assertEqual(1, db.session.query(ApprovalModel).count())
Пример #23
0
    def test_workflow_restart_delete_files(self):
        self.load_example_data()
        irb_code = 'Study_Protocol_Document'

        workflow = self.create_workflow('add_delete_irb_document')
        study_id = workflow.study_id

        workflow_api = self.get_workflow_api(workflow)
        first_task = workflow_api.next_task

        # Should not have any files yet
        files = FileService.get_files_for_study(study_id)
        self.assertEqual(0, len(files))
        self.assertEqual(
            False,
            IsFileUploaded.do_task(IsFileUploaded, first_task, study_id,
                                   workflow.id, irb_code))

        # Add a file
        FileService.add_workflow_file(workflow_id=workflow.id,
                                      task_spec_name=first_task.name,
                                      name="filename.txt",
                                      content_type="text",
                                      binary_data=b'1234',
                                      irb_doc_code=irb_code)
        # Assert we have the file
        self.assertEqual(
            True,
            IsFileUploaded.do_task(IsFileUploaded, first_task, study_id,
                                   workflow.id, irb_code))

        workflow_api = self.restart_workflow_api(workflow_api,
                                                 delete_files=False)
        first_task = workflow_api.next_task

        # Assert we still have the file
        self.assertEqual(
            True,
            IsFileUploaded.do_task(IsFileUploaded, first_task, study_id,
                                   workflow.id, irb_code))

        workflow_api = self.restart_workflow_api(workflow_api,
                                                 delete_files=True)
        first_task = workflow_api.next_task

        # Assert we do not have the file
        self.assertEqual(
            False,
            IsFileUploaded.do_task(IsFileUploaded, first_task, study_id,
                                   workflow.id, irb_code))
Пример #24
0
def get_files(workflow_id=None, form_field_key=None, study_id=None):
    if workflow_id is None:
        raise ApiError(
            'missing_parameter',
            'Please specify a workflow_id with an optional form_field_key')

    if study_id is not None:
        file_models = FileService.get_files_for_study(
            study_id=study_id, irb_doc_code=form_field_key)
    else:
        file_models = FileService.get_files(workflow_id=workflow_id,
                                            irb_doc_code=form_field_key)

    files = (to_file_api(model) for model in file_models)
    return FileSchema(many=True).dump(files)
Пример #25
0
    def get_study(study_id, study_model: StudyModel = None, do_status=False):
        """Returns a study model that contains all the workflows organized by category.
        IMPORTANT:  This is intended to be a lightweight call, it should never involve
        loading up and executing all the workflows in a study to calculate information."""
        if not study_model:
            study_model = session.query(StudyModel).filter_by(
                id=study_id).first()

        study = Study.from_model(study_model)
        study.create_user_display = LdapService.user_info(
            study.user_uid).display_name
        last_event: TaskEventModel = session.query(TaskEventModel) \
            .filter_by(study_id=study_id, action='COMPLETE') \
            .order_by(TaskEventModel.date.desc()).first()
        if last_event is None:
            study.last_activity_user = '******'
            study.last_activity_date = ""
        else:
            study.last_activity_user = LdapService.user_info(
                last_event.user_uid).display_name
            study.last_activity_date = last_event.date
        study.categories = StudyService.get_categories()
        workflow_metas = StudyService._get_workflow_metas(study_id)
        files = FileService.get_files_for_study(study.id)
        files = (File.from_models(model, FileService.get_file_data(model.id),
                                  DocumentService.get_dictionary())
                 for model in files)
        study.files = list(files)
        # Calling this line repeatedly is very very slow.  It creates the
        # master spec and runs it.  Don't execute this for Abandoned studies, as
        # we don't have the information to process them.
        if study.status != StudyStatus.abandoned:
            # this line is taking 99% of the time that is used in get_study.
            # see ticket #196
            if do_status:
                # __get_study_status() runs the master workflow to generate the status dictionary
                status = StudyService._get_study_status(study_model)
                study.warnings = StudyService._update_status_of_workflow_meta(
                    workflow_metas, status)

            # Group the workflows into their categories.
            for category in study.categories:
                category.workflows = {
                    w
                    for w in workflow_metas if w.category_id == category.id
                }

        return study
Пример #26
0
    def get_spec(file_data_models: List[FileDataModel], workflow_spec_id):
        """Returns a SpiffWorkflow specification for the given workflow spec,
        using the files provided.  The Workflow_spec_id is only used to generate
        better error messages."""
        parser = WorkflowProcessor.get_parser()
        process_id = None

        for file_data in file_data_models:
            if file_data.file_model.type == FileType.bpmn:
                bpmn: etree.Element = etree.fromstring(file_data.data)
                if file_data.file_model.primary:
                    process_id = FileService.get_process_id(bpmn)
                parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name)
            elif file_data.file_model.type == FileType.dmn:
                dmn: etree.Element = etree.fromstring(file_data.data)
                parser.add_dmn_xml(dmn, filename=file_data.file_model.name)
        if process_id is None:
            raise (ApiError(
                code="no_primary_bpmn_error",
                message="There is no primary BPMN model defined for workflow %s"
                % workflow_spec_id))
        try:
            spec = parser.get_spec(process_id)
        except ValidationException as ve:
            raise ApiError(
                code="workflow_validation_error",
                message="Failed to parse Workflow Specification '%s'" %
                workflow_spec_id + "Error is %s" % str(ve),
                file_name=ve.filename,
                task_id=ve.id,
                tag=ve.tag)
        return spec
Пример #27
0
    def do_task(self, task, study_id, workflow_id, *args, **kwargs):
        # fixme: using task_id is confusing, this is actually the name of the task_spec

        # make sure we have a task_id
        if 'task_id' in kwargs:
            task_spec_name = kwargs['task_id']
        elif len(args) == 1:
            task_spec_name = args[0]
        else:
            raise ApiError(code='missing_task_id',
                           message='The delete_task_data requires task_id. This is the ID of the task used to upload the file(s)')

        # delete task events
        session.query(TaskEventModel).filter(TaskEventModel.workflow_id == workflow_id).filter(
            TaskEventModel.study_id == study_id).filter(TaskEventModel.task_name == task_spec_name).filter_by(
            action=WorkflowService.TASK_ACTION_COMPLETE).delete()

        files_to_delete = session.query(FileModel). \
            filter(FileModel.workflow_id == workflow_id). \
            filter(FileModel.task_spec == task_spec_name).all()

        # delete files
        for file in files_to_delete:
            FileService().delete_file(file.id)

            # delete the data store
            session.query(DataStoreModel). \
                filter(DataStoreModel.file_id == file.id).delete()
Пример #28
0
def update_file_data(file_id):
    file_model = session.query(FileModel).filter_by(id=file_id).with_for_update().first()
    file = connexion.request.files['file']
    if file_model is None:
        raise ApiError('no_such_file', 'The file id you provided does not exist')
    file_model = FileService.update_file(file_model, file.stream.read(), file.content_type)
    return FileSchema().dump(to_file_api(file_model))
    def get_image_file_data(self, fields_str, task):
        image_file_data = []
        images_field_str = re.sub(r'[\[\]]', '', fields_str)
        images_field_keys = [
            v.strip() for v in images_field_str.strip().split(',')
        ]
        for field_key in images_field_keys:
            if field_key in task.data:
                v = task.data[field_key]
                file_ids = v if isinstance(v, list) else [v]

                for file_id in file_ids:
                    if isinstance(file_id, str) and file_id.isnumeric():
                        file_id = int(file_id)

                    if file_id is not None and isinstance(file_id, int):
                        if not task.workflow.data[
                                WorkflowProcessor.VALIDATION_PROCESS_KEY]:
                            # Get the actual image data
                            image_file_model = session.query(
                                FileModel).filter_by(id=file_id).first()
                            image_file_data_model = FileService.get_file_data(
                                file_id, image_file_model)
                            if image_file_data_model is not None:
                                image_file_data.append(image_file_data_model)

                    else:
                        raise ApiError(
                            code="not_a_file_id",
                            message=
                            "The CompleteTemplate script requires 2-3 arguments. The third argument should "
                            "be a comma-delimited list of File IDs")

        return image_file_data
Пример #30
0
    def test_get_not_really_csv_content(self):
        self.load_example_data()
        self.create_reference_document()
        workflow = self.create_workflow('empty_workflow')
        FileService.add_workflow_file(workflow_id=workflow.id,
                                      name="anything.png",
                                      content_type="text",
                                      binary_data=b'5678',
                                      irb_doc_code="AD_CoCAppr")

        ApprovalService.add_approval(study_id=workflow.study_id,
                                     workflow_id=workflow.id,
                                     approver_uid="dhf8r")
        records = ApprovalService.get_not_really_csv_content()

        self.assertEqual(len(records), 2)