def get_files(attachments, study_id):
        files = []
        codes = None
        if isinstance(attachments, str):
            codes = [attachments]
        elif isinstance(attachments, list):
            codes = attachments

        if codes is not None:
            for code in codes:
                if DocumentService.is_allowed_document(code):
                    workflows = session.query(WorkflowModel).filter(
                        WorkflowModel.study_id == study_id).all()
                    for workflow in workflows:
                        workflow_files = session.query(FileModel).\
                            filter(FileModel.workflow_id == workflow.id).\
                            filter(FileModel.irb_doc_code == code).all()
                        for file in workflow_files:
                            files.append({
                                'id': file.id,
                                'name': file.name,
                                'type': CONTENT_TYPES[file.type.value]
                            })
                else:
                    raise ApiError(
                        code='bad_doc_code',
                        message=f'The doc_code {code} is not valid.')
        else:
            raise ApiError(
                code='bad_argument_type',
                message=
                'The attachments argument must be a string or list of strings')

        return files
    def test_delete_study_with_workflow_and_status_etc(self):
        self.load_example_data()
        workflow = session.query(WorkflowModel).first()
        stats1 = StudyEvent(
            study_id=workflow.study_id,
            status=StudyStatus.in_progress,
            comment='Some study status change event',
            event_type=StudyEventType.user,
            user_uid=self.users[0]['uid'],
        )
        LdapService.user_info('dhf8r') # Assure that there is a dhf8r in ldap for StudyAssociated.

        email = EmailModel(subject="x", study_id=workflow.study_id)
        associate = StudyAssociated(study_id=workflow.study_id, uid=self.users[0]['uid'])
        event = StudyEvent(study_id=workflow.study_id)
        session.add_all([email, associate, event])


        stats2 = TaskEventModel(study_id=workflow.study_id, workflow_id=workflow.id, user_uid=self.users[0]['uid'])
        session.add_all([stats1, stats2])
        session.commit()
        rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
        self.assert_success(rv)
        del_study = session.query(StudyModel).filter(StudyModel.id == workflow.study_id).first()
        self.assertIsNone(del_study)
Exemple #3
0
    def test_embedded_template(self):
        workflow = self.create_workflow('docx_embedded')
        workflow_api = self.get_workflow_api(workflow)
        task = workflow_api.next_task

        data = {
            'include_me': 'Hello {{ name }}!',
            'name': 'World',
            'file_name': 'simple.docx',
            'irb_doc_code': 'Study_App_Doc'
        }
        self.complete_form(workflow, task, data)

        # Get the file data created for us in the workflow
        file_model = session.query(FileModel).\
            filter(FileModel.workflow_id == workflow.id).\
            filter(FileModel.irb_doc_code == 'Study_App_Doc').\
            first()
        file_data_model = session.query(FileDataModel). \
            filter(FileDataModel.file_model_id == file_model.id).\
            first()

        # read the data as a word document
        document = docx.Document(BytesIO(file_data_model.data))
        # Make sure 'Hello World!' is there
        self.assertEqual('Hello World!', document.paragraphs[4].text)
    def test_workflow_spec_reorder_up(self):
        self.load_example_data()
        self._load_sample_workflow_specs()

        # Check what order is in the DB
        ordered = session.query(WorkflowSpecModel).\
            filter(WorkflowSpecModel.category_id == 0).\
            order_by(WorkflowSpecModel.display_order).\
            all()
        self.assertEqual('test_spec_2', ordered[2].id)

        # Move test_spec_2 up
        rv = self.app.put(
            f"/v1.0/workflow-specification/test_spec_2/reorder?direction=up",
            headers=self.logged_in_headers())

        # rv json contains the newly order list of specs
        self.assertEqual(1, rv.json[1]['display_order'])
        self.assertEqual('test_spec_2', rv.json[1]['id'])

        # Check what new order is in the DB
        reordered = session.query(WorkflowSpecModel).\
            filter(WorkflowSpecModel.category_id == 0).\
            order_by(WorkflowSpecModel.display_order).\
            all()
        self.assertEqual('test_spec_2', reordered[1].id)
        print('test_workflow_spec_reorder_up')
    def process_workflow_spec(json_file, directory):
        file_path = os.path.join(directory, json_file)

        with open(file_path, 'r') as f_open:
            data = f_open.read()
            data_obj = json.loads(data)
            workflow_spec_model = session.query(WorkflowSpecModel).\
                filter(WorkflowSpecModel.id == data_obj['id']).\
                first()
            if not workflow_spec_model:
                category_id = None
                if data_obj['category'] is not None:
                    category_id = session.query(
                        WorkflowSpecCategoryModel.id).filter(
                            WorkflowSpecCategoryModel.display_name ==
                            data_obj['category']['display_name']).scalar()
                workflow_spec_model = WorkflowSpecModel(
                    id=data_obj['id'],
                    display_name=data_obj['display_name'],
                    description=data_obj['description'],
                    is_master_spec=data_obj['is_master_spec'],
                    category_id=category_id,
                    display_order=data_obj['display_order'],
                    standalone=data_obj['standalone'],
                    library=data_obj['library'])
                session.add(workflow_spec_model)
                session.commit()

            return workflow_spec_model
Exemple #6
0
    def test_non_production_auth_creates_user(self):
        new_uid = self.non_admin_uid  ## Assure this user id is in the fake responses from ldap.
        #        self.load_example_data()
        user = session.query(UserModel).filter(
            UserModel.uid == new_uid).first()
        self.assertIsNone(user)

        user_info = {
            'uid': new_uid,
            'first_name': 'Cordi',
            'last_name': 'Nator',
            'email_address': '*****@*****.**'
        }
        redirect_url = 'http://worlds.best.website/admin'
        query_string = self.user_info_to_query_string(user_info, redirect_url)
        url = '/v1.0/login%s' % query_string
        rv_1 = self.app.get(url, follow_redirects=False)
        self.assertTrue(rv_1.status_code == 302)
        self.assertTrue(str.startswith(rv_1.location, redirect_url))

        user = session.query(UserModel).filter(
            UserModel.uid == new_uid).first()
        self.assertIsNotNone(user)
        self.assertIsNotNone(user.ldap_info.display_name)
        self.assertIsNotNone(user.ldap_info.email_address)

        # Hitting the same endpoint again with the same info should not cause an error
        rv_2 = self.app.get(url, follow_redirects=False)
        self.assertTrue(rv_2.status_code == 302)
        self.assertTrue(str.startswith(rv_2.location, redirect_url))
Exemple #7
0
 def test_workflow_with_dmn(self):
     self.load_example_data()
     study = session.query(StudyModel).first()
     workflow_spec_model = self.load_test_spec("decision_table")
     files = session.query(FileModel).filter_by(
         workflow_spec_id='decision_table').all()
     self.assertEqual(2, len(files))
     processor = self.get_processor(study, workflow_spec_model)
     self.assertEqual(WorkflowStatus.user_input_required,
                      processor.get_status())
     next_user_tasks = processor.next_user_tasks()
     self.assertEqual(1, len(next_user_tasks))
     task = next_user_tasks[0]
     self.assertEqual("get_num_presents", task.get_name())
     model = {"num_presents": 1}
     if task.data is None:
         task.data = {}
     task.data.update(model)
     processor.complete_task(task)
     processor.do_engine_steps()
     data = processor.get_data()
     self.assertIsNotNone(data)
     self.assertIn("message", data)
     self.assertEqual("Oh, Ginger.", data.get('message'))
     self.assertEqual("End",
                      processor.bpmn_workflow.last_task.task_spec.name)
     self.assertEqual("Oh, Ginger.",
                      processor.bpmn_workflow.last_task.data.get('message'))
 def test_lookup_table_is_not_created_more_than_once(self):
     spec = BaseTest.load_test_spec('enum_options_with_search')
     workflow = self.create_workflow('enum_options_with_search')
     LookupService.lookup(workflow,
                          "Task_Enum_Lookup",
                          "sponsor",
                          "sam",
                          limit=10)
     LookupService.lookup(workflow,
                          "Task_Enum_Lookup",
                          "sponsor",
                          "something",
                          limit=10)
     LookupService.lookup(workflow,
                          "Task_Enum_Lookup",
                          "sponsor",
                          "blah",
                          limit=10)
     lookup_records = session.query(LookupFileModel).all()
     self.assertIsNotNone(lookup_records)
     self.assertEqual(1, len(lookup_records))
     lookup_record = lookup_records[0]
     lookup_data = session.query(LookupDataModel).filter(
         LookupDataModel.lookup_file_model == lookup_record).all()
     self.assertEqual(28, len(lookup_data))
    def test_lookup_fails_for_xls(self):
        BaseTest.load_test_spec('enum_options_with_search')

        # Using an old xls file should raise an error
        file_model_xls = session.query(FileModel).filter(
            FileModel.name == 'sponsors.xls').first()
        file_data_xls = SpecFileService().get_spec_file_data(file_model_xls.id)
        # file_data_model_xls = session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model_xls.id).first()
        with self.assertRaises(ApiError) as ae:
            LookupService.build_lookup_table(file_model_xls.id, 'sponsors.xls',
                                             file_data_xls.data,
                                             'CUSTOMER_NUMBER',
                                             'CUSTOMER_NAME')
        self.assertIn('Error opening excel file', ae.exception.args[0])

        # Using an xlsx file should work
        file_model_xlsx = session.query(FileModel).filter(
            FileModel.name == 'sponsors.xlsx').first()
        file_data_xlsx = SpecFileService().get_spec_file_data(
            file_model_xlsx.id)
        # file_data_model_xlsx = session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model_xlsx.id).first()
        lookup_model = LookupService.build_lookup_table(
            file_model_xlsx.id, 'sponsors.xlsx', file_data_xlsx.data,
            'CUSTOMER_NUMBER', 'CUSTOMER_NAME')
        self.assertEqual(28, len(lookup_model.dependencies))
        self.assertIn('CUSTOMER_NAME',
                      lookup_model.dependencies[0].data.keys())
        self.assertIn('CUSTOMER_NUMBER',
                      lookup_model.dependencies[0].data.keys())
Exemple #10
0
    def get_path(self, file_id: int):
        # Returns the path on the file system for the given File id

        # Assure we have a file.
        file_model = session.query(FileModel).filter(
            FileModel.id == file_id).first()
        if not file_model:
            raise ApiError(
                code='model_not_found',
                message=f'No model found for file with file_id: {file_id}')

        # Assure we have a spec.
        spec_model = session.query(WorkflowSpecModel).filter(
            WorkflowSpecModel.id == file_model.workflow_spec_id).first()
        if not spec_model:
            raise ApiError(
                code='spec_not_found',
                message=f'No spec found for file with file_id: '
                f'{file_model.id}, and spec_id: {file_model.workflow_spec_id}')

        # Calculate the path.
        sync_file_root = self.get_sync_file_root()
        category_name = self.get_spec_file_category_name(spec_model)
        return os.path.join(sync_file_root, category_name,
                            spec_model.display_name, file_model.name)
Exemple #11
0
 def delete_workflow(workflow):
     for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
         FileService.delete_file(file.id)
     for dep in workflow.dependencies:
         session.delete(dep)
     session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
     session.query(WorkflowModel).filter_by(id=workflow.id).delete()
Exemple #12
0
    def test_updates_to_file_cause_lookup_rebuild(self):
        spec = BaseTest.load_test_spec('enum_options_with_search')
        workflow = self.create_workflow('enum_options_with_search')
        file_model = session.query(FileModel).filter(
            FileModel.name == "sponsors.xls").first()
        LookupService.lookup(workflow, "sponsor", "sam", limit=10)
        lookup_records = session.query(LookupFileModel).all()
        self.assertIsNotNone(lookup_records)
        self.assertEqual(1, len(lookup_records))
        lookup_record = lookup_records[0]
        lookup_data = session.query(LookupDataModel).filter(
            LookupDataModel.lookup_file_model == lookup_record).all()
        self.assertEqual(28, len(lookup_data))

        # Update the workflow specification file.
        file_path = os.path.join(app.root_path, '..', 'tests', 'data',
                                 'enum_options_with_search',
                                 'sponsors_modified.xls')
        file = open(file_path, 'rb')
        FileService.update_file(file_model, file.read(), CONTENT_TYPES['xls'])
        file.close()

        # restart the workflow, so it can pick up the changes.
        WorkflowProcessor(workflow, soft_reset=True)

        LookupService.lookup(workflow, "sponsor", "sam", limit=10)
        lookup_records = session.query(LookupFileModel).all()
        lookup_record = lookup_records[0]
        lookup_data = session.query(LookupDataModel).filter(
            LookupDataModel.lookup_file_model == lookup_record).all()
        self.assertEqual(4, len(lookup_data))
Exemple #13
0
    def delete_spec_file(file_id):
        """This should remove the record in the file table, and both files on the filesystem."""
        sync_file_root = SpecFileService.get_sync_file_root()
        file_model = session.query(FileModel).filter(
            FileModel.id == file_id).first()
        workflow_spec_id = file_model.workflow_spec_id
        workflow_spec_model = session.query(WorkflowSpecModel).filter(
            WorkflowSpecModel.id == workflow_spec_id).first()
        category_name = SpecFileService.get_spec_file_category_name(
            workflow_spec_model)
        file_model_name = file_model.name
        spec_directory_path = os.path.join(sync_file_root, category_name,
                                           workflow_spec_model.display_name)
        file_path = os.path.join(spec_directory_path, file_model_name)
        json_file_path = os.path.join(spec_directory_path,
                                      f'{file_model_name}.json')

        try:
            os.remove(file_path)
            os.remove(json_file_path)
            session.delete(file_model)
            session.commit()
        except IntegrityError as ie:
            session.rollback()
            file_model = session.query(FileModel).filter_by(id=file_id).first()
            file_model.archived = True
            session.commit()
            app.logger.info(
                "Failed to delete file, so archiving it instead. %i, due to %s"
                % (file_id, str(ie)))
Exemple #14
0
    def do_task(self, task, study_id, workflow_id, *args, **kwargs):
        # fixme: using task_id is confusing, this is actually the name of the task_spec

        # make sure we have a task_id
        if 'task_id' in kwargs:
            task_spec_name = kwargs['task_id']
        elif len(args) == 1:
            task_spec_name = args[0]
        else:
            raise ApiError(code='missing_task_id',
                           message='The delete_task_data requires task_id. This is the ID of the task used to upload the file(s)')

        # delete task events
        session.query(TaskEventModel).filter(TaskEventModel.workflow_id == workflow_id).filter(
            TaskEventModel.study_id == study_id).filter(TaskEventModel.task_name == task_spec_name).filter_by(
            action=WorkflowService.TASK_ACTION_COMPLETE).delete()

        files_to_delete = session.query(FileModel). \
            filter(FileModel.workflow_id == workflow_id). \
            filter(FileModel.task_spec == task_spec_name).all()

        # delete files
        for file in files_to_delete:
            FileService().delete_file(file.id)

            # delete the data store
            session.query(DataStoreModel). \
                filter(DataStoreModel.file_id == file.id).delete()
Exemple #15
0
    def do_task(self, task, study_id, workflow_id, *args, **kwargs):

        if 'reset_id' in kwargs.keys():
            reset_id = kwargs['reset_id']
            workflow_spec: WorkflowSpecModel = session.query(
                WorkflowSpecModel).filter_by(id=reset_id).first()
            if workflow_spec:
                workflow_model: WorkflowModel = session.query(
                    WorkflowModel).filter_by(workflow_spec_id=workflow_spec.id,
                                             study_id=study_id).first()
                if workflow_model:
                    workflow_processor = WorkflowProcessor.reset(
                        workflow_model, clear_data=False, delete_files=False)
                    return workflow_processor
                else:
                    raise ApiError(code='missing_workflow_model',
                                   message=f'No WorkflowModel returned. \
                                            workflow_spec_id: {workflow_spec.id} \
                                            study_id: {study_id}')
            else:
                raise ApiError(code='missing_workflow_spec',
                               message=f'No WorkflowSpecModel returned. \
                                        id: {workflow_id}')
        else:
            raise ApiError(code='missing_workflow_id',
                           message='Reset workflow requires a workflow id')
Exemple #16
0
def delete_workflow_specification(spec_id):
    if spec_id is None:
        raise ApiError('unknown_spec',
                       'Please provide a valid Workflow Specification ID.')

    spec: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(
        id=spec_id).first()
    category_id = spec.category_id

    if spec is None:
        raise ApiError(
            'unknown_spec',
            'The Workflow Specification "' + spec_id + '" is not recognized.')

    # Delete all workflow models related to this specification
    WorkflowService.delete_workflow_spec_workflow_models(spec_id)

    # Delete all files related to this specification
    WorkflowService.delete_workflow_spec_files(spec_id)

    # Delete all events related to this specification
    WorkflowService.delete_workflow_spec_task_events(spec_id)

    # .delete() doesn't work when we need a cascade. Must grab the record, and explicitly delete
    workflow_spec = session.query(WorkflowSpecModel).filter_by(
        id=spec_id).first()
    session.delete(workflow_spec)
    session.commit()

    # Reorder the remaining specs
    WorkflowService.cleanup_workflow_spec_display_order(category_id)
Exemple #17
0
def get_task_events(action=None, workflow=None, study=None):
    """Provides a way to see a history of what has happened, or get a list of tasks that need your attention."""
    user = UserService.current_user(allow_admin_impersonate=True)
    studies = session.query(StudyModel).filter(StudyModel.user_uid == user.uid)
    studyids = [s.id for s in studies]
    query = session.query(TaskEventModel).filter((TaskEventModel.study_id.in_(studyids)) | \
                                                 (TaskEventModel.user_uid==user.uid))
    if action:
        query = query.filter(TaskEventModel.action == action)
    if workflow:
        query = query.filter(TaskEventModel.workflow_id == workflow)
    if study:
        query = query.filter(TaskEventModel.study_id == study)
    events = query.all()

    # Turn the database records into something a little richer for the UI to use.
    task_events = []
    for event in events:
        study = session.query(StudyModel).filter(
            StudyModel.id == event.study_id).first()
        workflow = session.query(WorkflowModel).filter(
            WorkflowModel.id == event.workflow_id).first()
        workflow_meta = WorkflowMetadata.from_workflow(workflow)
        if study and study.status in [
                StudyStatus.open_for_enrollment, StudyStatus.in_progress
        ]:
            task_events.append(TaskEvent(event, study, workflow_meta))
    return TaskEventSchema(many=True).dump(task_events)
Exemple #18
0
    def test_update_workflow_specification(self):
        self.load_example_data()

        category_id = 99
        category = WorkflowSpecCategoryModel(id=category_id,
                                             name='trap',
                                             display_name="It's a trap!",
                                             display_order=0)
        session.add(category)
        session.commit()

        db_spec_before: WorkflowSpecModel = session.query(
            WorkflowSpecModel).first()
        spec_id = db_spec_before.id
        self.assertNotEqual(db_spec_before.category_id, category_id)

        db_spec_before.category_id = category_id
        rv = self.app.put('/v1.0/workflow-specification/%s' % spec_id,
                          content_type="application/json",
                          headers=self.logged_in_headers(),
                          data=json.dumps(
                              WorkflowSpecModelSchema().dump(db_spec_before)))
        self.assert_success(rv)
        json_data = json.loads(rv.get_data(as_text=True))
        api_spec = WorkflowSpecModelSchema().load(json_data, session=session)
        self.assertEqual(db_spec_before, api_spec)

        db_spec_after: WorkflowSpecModel = session.query(
            WorkflowSpecModel).filter_by(id=spec_id).first()
        self.assertIsNotNone(db_spec_after.category_id)
        self.assertIsNotNone(db_spec_after.category)
        self.assertEqual(db_spec_after.category.display_name,
                         category.display_name)
        self.assertEqual(db_spec_after.category.display_order,
                         category.display_order)
Exemple #19
0
    def test_all_approvals(self):
        self._add_lots_of_random_approvals()

        not_canceled = session.query(ApprovalModel).filter(
            ApprovalModel.status != 'CANCELED').all()
        not_canceled_study_ids = []
        for a in not_canceled:
            if a.study_id not in not_canceled_study_ids:
                not_canceled_study_ids.append(a.study_id)

        rv_all = self.app.get(f'/v1.0/all_approvals?status=false',
                              headers=self.logged_in_headers())
        self.assert_success(rv_all)
        all_data = json.loads(rv_all.get_data(as_text=True))
        self.assertEqual(
            len(all_data), len(not_canceled_study_ids),
            'Should return all non-canceled approvals, grouped by study')

        all_approvals = session.query(ApprovalModel).all()
        all_approvals_study_ids = []
        for a in all_approvals:
            if a.study_id not in all_approvals_study_ids:
                all_approvals_study_ids.append(a.study_id)

        rv_all = self.app.get(f'/v1.0/all_approvals?status=true',
                              headers=self.logged_in_headers())
        self.assert_success(rv_all)
        all_data = json.loads(rv_all.get_data(as_text=True))
        self.assertEqual(len(all_data), len(all_approvals_study_ids),
                         'Should return all approvals, grouped by study')
Exemple #20
0
    def test_change_primary_bpmn(self):
        self.load_example_data()
        spec = session.query(WorkflowSpecModel).first()
        data = {}
        data['file'] = io.BytesIO(
            self.minimal_bpmn("abcdef")), 'my_new_file.bpmn'

        # Add a new BPMN file to the specification
        rv = self.app.post('/v1.0/file?workflow_spec_id=%s' % spec.id,
                           data=data,
                           follow_redirects=True,
                           content_type='multipart/form-data',
                           headers=self.logged_in_headers())
        self.assert_success(rv)
        self.assertIsNotNone(rv.get_data())
        json_data = json.loads(rv.get_data(as_text=True))
        file = FileModelSchema().load(json_data, session=session)

        # Delete the primary BPMN file for the workflow.
        orig_model = session.query(FileModel).\
            filter(FileModel.primary == True).\
            filter(FileModel.workflow_spec_id == spec.id).first()
        rv = self.app.delete('/v1.0/file?file_id=%s' % orig_model.id,
                             headers=self.logged_in_headers())

        # Set that new file to be the primary BPMN, assure it has a primary_process_id
        file.primary = True
        rv = self.app.put('/v1.0/file/%i' % file.id,
                          content_type="application/json",
                          data=json.dumps(FileModelSchema().dump(file)),
                          headers=self.logged_in_headers())
        self.assert_success(rv)
        json_data = json.loads(rv.get_data(as_text=True))
        self.assertTrue(json_data['primary'])
        self.assertIsNotNone(json_data['primary_process_id'])
Exemple #21
0
def drop_workflow_spec_library(spec_id, library_id):
    validate_spec_and_library(spec_id, library_id)
    session.query(WorkflowLibraryModel).filter_by(
        workflow_spec_id=spec_id, library_spec_id=library_id).delete()
    session.commit()
    libraries: WorkflowLibraryModel = session.query(
        WorkflowLibraryModel).filter_by(workflow_spec_id=spec_id).all()
    return WorkflowLibraryModelSchema(many=True).dump(libraries)
Exemple #22
0
 def test_get_spec_files(self):
     self.load_example_data()
     spec = session.query(WorkflowSpecModel.id).first()
     spec_files = SpecFileService().get_spec_files(spec.id)
     workflow = session.query(WorkflowModel).first()
     processor = WorkflowProcessor(workflow)
     self.assertIsInstance(processor, WorkflowProcessor)
     print('test_get_spec_files')
 def test_delete_datastore(self):
     self.load_example_data()
     new_study = self.add_test_study_data()
     oldid = new_study['id']
     new_study = session.query(DataStoreModel).filter_by(id=new_study["id"]).first()
     rv = self.app.delete('/v1.0/datastore/%i' % new_study.id, headers=self.logged_in_headers())
     self.assert_success(rv)
     studyreponse = session.query(DataStoreModel).filter_by(id=oldid).first()
     self.assertEqual(studyreponse,None)
Exemple #24
0
 def test_get_file(self):
     self.load_example_data()
     spec = session.query(WorkflowSpecModel).first()
     file = session.query(FileModel).filter_by(
         workflow_spec_id=spec.id).first()
     rv = self.app.get('/v1.0/file/%i/data' % file.id,
                       headers=self.logged_in_headers())
     self.assert_success(rv)
     self.assertEqual("text/xml; charset=utf-8", rv.content_type)
     self.assertTrue(rv.content_length > 1)
    def test_workflow_spec_reorder_bad_order(self):
        self.load_example_data()
        self._load_sample_workflow_specs()
        ordered = session.query(WorkflowSpecModel).\
            filter(WorkflowSpecModel.category_id == 0).\
            order_by(WorkflowSpecModel.display_order).\
            all()

        # Set bad display_orders
        spec_model = ordered[0]
        spec_model.display_order = 1
        session.add(spec_model)
        spec_model = ordered[1]
        spec_model.display_order = 1
        session.add(spec_model)
        spec_model = ordered[2]
        spec_model.display_order = 1
        session.add(spec_model)
        session.commit()

        bad_orders = session.query(WorkflowSpecModel).\
            filter(WorkflowSpecModel.category_id == 0).\
            order_by(WorkflowSpecModel.display_order).\
            all()
        # Not sure how Postgres chooses an order
        # when we have multiple specs with display_order == 1
        # but it is
        # test_spec_1, random_fact, test_spec_2, test_spec_3
        self.assertEqual(1, bad_orders[0].display_order)
        self.assertEqual('test_spec_1', bad_orders[0].id)
        self.assertEqual(1, bad_orders[1].display_order)
        self.assertEqual('random_fact', bad_orders[1].id)
        self.assertEqual(1, bad_orders[2].display_order)
        self.assertEqual('test_spec_2', bad_orders[2].id)
        self.assertEqual(3, bad_orders[3].display_order)
        self.assertEqual('test_spec_3', bad_orders[3].id)

        # Move test_spec_2 up
        # This should cause a cleanup of the bad display_order numbers
        rv = self.app.put(
            f"/v1.0/workflow-specification/test_spec_2/reorder?direction=up",
            headers=self.logged_in_headers())

        # After moving 2 up, the order should be
        # test_spec_1, test_spec_2, random_fact, test_spec_3
        # Make sure we have good display_order numbers too
        self.assertEqual('test_spec_1', rv.json[0]['id'])
        self.assertEqual(0, rv.json[0]['display_order'])
        self.assertEqual('test_spec_2', rv.json[1]['id'])
        self.assertEqual(1, rv.json[1]['display_order'])
        self.assertEqual('random_fact', rv.json[2]['id'])
        self.assertEqual(2, rv.json[2]['display_order'])
        self.assertEqual('test_spec_3', rv.json[3]['id'])
        self.assertEqual(3, rv.json[3]['display_order'])
    def put_study_on_hold(self, study_id):
        study = session.query(StudyModel).filter_by(id=study_id).first()

        study_schema = StudySchema().dump(study)
        study_schema['status'] = 'hold'
        study_schema['comment'] = 'This is my hold comment'

        self.update_study_status(study, study_schema)

        study_result = session.query(StudyModel).filter(StudyModel.id == study_id).first()
        return study_result
Exemple #27
0
 def test_get_file_contains_data_store_elements(self):
     self.load_example_data()
     spec = session.query(WorkflowSpecModel).first()
     file = session.query(FileModel).filter_by(
         workflow_spec_id=spec.id).first()
     ds = DataStoreModel(key="my_key", value="my_value", file_id=file.id)
     db.session.add(ds)
     rv = self.app.get('/v1.0/file/%i' % file.id,
                       headers=self.logged_in_headers())
     self.assert_success(rv)
     json_data = json.loads(rv.get_data(as_text=True))
     self.assertEqual("my_value", json_data['data_store']['my_key'])
    def test_waiting_task_error(self):

        workflow = self.create_workflow('raise_error')
        workflow.status = WorkflowStatus.waiting
        session.commit()

        status_before = session.query(WorkflowModel.status).filter(WorkflowModel.id == workflow.id).scalar()
        WorkflowService.do_waiting()
        status_after = session.query(WorkflowModel.status).filter(WorkflowModel.id == workflow.id).scalar()

        self.assertEqual('waiting', status_before.value)
        self.assertEqual('erroring', status_after.value)
 def validate_workflow(self, workflow_name):
     spec_model = self.load_test_spec(workflow_name)
     total_workflows = session.query(WorkflowModel).count()
     rv = self.app.get('/v1.0/workflow-specification/%s/validate' %
                       spec_model.id,
                       headers=self.logged_in_headers())
     self.assert_success(rv)
     total_workflows_after = session.query(WorkflowModel).count()
     self.assertEqual(total_workflows, total_workflows_after,
                      "No rogue workflow exists after validation.")
     json_data = json.loads(rv.get_data(as_text=True))
     return ApiErrorSchema(many=True).load(json_data)
Exemple #30
0
 def test_lookup_returns_good_error_on_bad_field(self):
     spec = BaseTest.load_test_spec('enum_options_with_search')
     workflow = self.create_workflow('enum_options_with_search')
     file_model = session.query(FileModel).filter(
         FileModel.name == "customer_list.xls").first()
     file_data_model = session.query(FileDataModel).filter(
         FileDataModel.file_model == file_model).first()
     with self.assertRaises(ApiError):
         LookupService.lookup(workflow,
                              "not_the_right_field",
                              "sam",
                              limit=10)