def synch_with_protocol_builder_if_enabled(user): """Assures that the studies we have locally for the given user are in sync with the studies available in protocol builder. """ if ProtocolBuilderService.is_enabled(): app.logger.info("The Protocol Builder is enabled. app.config['PB_ENABLED'] = " + str(app.config['PB_ENABLED'])) # Get studies matching this user from Protocol Builder pb_studies: List[ProtocolBuilderStudy] = ProtocolBuilderService.get_studies(user.uid) # Get studies from the database db_studies = session.query(StudyModel).filter_by(user_uid=user.uid).all() # Update all studies from the protocol builder, create new studies as needed. # Futher assures that every active study (that does exist in the protocol builder) # has a reference to every available workflow (though some may not have started yet) for pb_study in pb_studies: db_study = next((s for s in db_studies if s.id == pb_study.STUDYID), None) if not db_study: db_study = StudyModel(id=pb_study.STUDYID) session.add(db_study) db_studies.append(db_study) db_study.update_from_protocol_builder(pb_study) StudyService._add_all_workflow_specs_to_study(db_study) # Mark studies as inactive that are no longer in Protocol Builder for study in db_studies: pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study.id), None) if not pb_study: study.protocol_builder_status = ProtocolBuilderStatus.ABANDONED db.session.commit()
def add_study(body): """Or any study like object. Body should include a title, and primary_investigator_id """ if 'primary_investigator_id' not in body: raise ApiError( "missing_pi", "Can't create a new study without a Primary Investigator.") if 'title' not in body: raise ApiError("missing_title", "Can't create a new study without a title.") study_model = StudyModel( user_uid=UserService.current_user().uid, title=body['title'], primary_investigator_id=body['primary_investigator_id'], last_updated=datetime.utcnow(), status=StudyStatus.in_progress) session.add(study_model) StudyService.add_study_update_event(study_model, status=StudyStatus.in_progress, event_type=StudyEventType.user, user_uid=g.user.uid) errors = StudyService._add_all_workflow_specs_to_study(study_model) session.commit() study = StudyService().get_study(study_model.id, do_status=True) study_data = StudySchema().dump(study) study_data["errors"] = ApiErrorSchema(many=True).dump(errors) return study_data
def make_test_workflow(spec_id): user = db.session.query(UserModel).filter_by(uid="test").first() if not user: db.session.add(UserModel(uid="test")) study = db.session.query(StudyModel).filter_by(user_uid="test").first() if not study: db.session.add(StudyModel(user_uid="test", title="test")) db.session.commit() workflow_model = WorkflowModel(status=WorkflowStatus.not_started, workflow_spec_id=spec_id, last_updated=datetime.now(), study=study) return workflow_model
def load_example_data(self, use_crc_data=False, use_rrt_data=False): """use_crc_data will cause this to load the mammoth collection of documents we built up developing crc, use_rrt_data will do the same for hte rrt project, otherwise it depends on a small setup for running tests.""" from example_data import ExampleDataLoader ExampleDataLoader.clean_db() # If in production mode, only add the first user. if app.config['PRODUCTION']: ldap_info = LdapService.user_info(self.users[0]['uid']) session.add( UserModel(uid=self.users[0]['uid'], ldap_info=ldap_info)) else: for user_json in self.users: ldap_info = LdapService.user_info(user_json['uid']) session.add( UserModel(uid=user_json['uid'], ldap_info=ldap_info)) if use_crc_data: ExampleDataLoader().load_all() elif use_rrt_data: ExampleDataLoader().load_rrt() else: ExampleDataLoader().load_test_data() session.commit() for study_json in self.studies: study_model = StudyModel(**study_json) session.add(study_model) StudyService._add_all_workflow_specs_to_study(study_model) session.commit() update_seq = f"ALTER SEQUENCE %s RESTART WITH %s" % ( StudyModel.__tablename__ + '_id_seq', study_model.id + 1) print("Update Sequence." + update_seq) session.execute(update_seq) session.flush() specs = session.query(WorkflowSpecModel).all() self.assertIsNotNone(specs) for spec in specs: files = session.query(FileModel).filter_by( workflow_spec_id=spec.id).all() self.assertIsNotNone(files) self.assertGreater(len(files), 0) for file in files: # file_data = session.query(FileDataModel).filter_by(file_model_id=file.id).all() file_data = SpecFileService().get_spec_file_data(file.id).data self.assertIsNotNone(file_data) self.assertGreater(len(file_data), 0)
def create_study(self, uid="dhf8r", title="Beer consumption in the bipedal software engineer", primary_investigator_id="lb3dp"): study = session.query(StudyModel).filter_by(user_uid=uid).filter_by( title=title).first() if study is None: user = self.create_user(uid=uid) study = StudyModel(title=title, status=StudyStatus.in_progress, user_uid=user.uid, primary_investigator_id=primary_investigator_id) session.add(study) session.commit() return study
def create_user_with_study_and_workflow(self): # clear it all out. from example_data import ExampleDataLoader ExampleDataLoader.clean_db() # Assure some basic models are in place, This is a damn mess. Our database models need an overhaul to make # this easier - better relationship modeling is now critical. self.load_test_spec("top_level_workflow", master_spec=True) user = db.session.query(UserModel).filter( UserModel.uid == "dhf8r").first() if not user: user = UserModel(uid="dhf8r", email_address="*****@*****.**", display_name="Stayathome Smellalots") db.session.add(user) db.session.commit() else: for study in db.session.query(StudyModel).all(): StudyService().delete_study(study.id) study = StudyModel( title="My title", protocol_builder_status=ProtocolBuilderStatus.ACTIVE, user_uid=user.uid) db.session.add(study) cat = WorkflowSpecCategoryModel(name="approvals", display_name="Approvals", display_order=0) db.session.add(cat) db.session.commit() self.assertIsNotNone(cat.id) self.load_test_spec("random_fact", category_id=cat.id) self.assertIsNotNone(study.id) workflow = WorkflowModel(workflow_spec_id="random_fact", study_id=study.id, status=WorkflowStatus.not_started, last_updated=datetime.now()) db.session.add(workflow) db.session.commit() # Assure there is a master specification, one standard spec, and lookup tables. ExampleDataLoader().load_reference_documents() return user
def load_example_data(self, use_crc_data=False, use_rrt_data=False): """use_crc_data will cause this to load the mammoth collection of documents we built up developing crc, use_rrt_data will do the same for hte rrt project, otherwise it depends on a small setup for running tests.""" from example_data import ExampleDataLoader ExampleDataLoader.clean_db() if use_crc_data: ExampleDataLoader().load_all() elif use_rrt_data: ExampleDataLoader().load_rrt() else: ExampleDataLoader().load_test_data() for user_json in self.users: db.session.add(UserModel(**user_json)) db.session.commit() for study_json in self.studies: study_model = StudyModel(**study_json) db.session.add(study_model) StudyService._add_all_workflow_specs_to_study(study_model) db.session.execute(Sequence(StudyModel.__tablename__ + '_id_seq')) db.session.commit() db.session.flush() specs = session.query(WorkflowSpecModel).all() self.assertIsNotNone(specs) for spec in specs: files = session.query(FileModel).filter_by( workflow_spec_id=spec.id).all() self.assertIsNotNone(files) self.assertGreater(len(files), 0) for spec in specs: files = session.query(FileModel).filter_by( workflow_spec_id=spec.id).all() self.assertIsNotNone(files) self.assertGreater(len(files), 0) for file in files: file_data = session.query(FileDataModel).filter_by( file_model_id=file.id).all() self.assertIsNotNone(file_data) self.assertGreater(len(file_data), 0)
def add_study(body): """Or any study like object. Body should include a title, and primary_investigator_id """ if 'primary_investigator_id' not in body: raise ApiError( "missing_pi", "Can't create a new study without a Primary Investigator.") if 'title' not in body: raise ApiError("missing_title", "Can't create a new study without a title.") study_model = StudyModel( user_uid=g.user.uid, title=body['title'], primary_investigator_id=body['primary_investigator_id'], last_updated=datetime.now(), protocol_builder_status=ProtocolBuilderStatus.ACTIVE) session.add(study_model) errors = StudyService._add_all_workflow_specs_to_study(study_model) session.commit() study = StudyService().get_study(study_model.id) study_data = StudySchema().dump(study) study_data["errors"] = ApiErrorSchema(many=True).dump(errors) return study_data
def synch_with_protocol_builder_if_enabled(user): """Assures that the studies we have locally for the given user are in sync with the studies available in protocol builder. """ if ProtocolBuilderService.is_enabled(): app.logger.info( "The Protocol Builder is enabled. app.config['PB_ENABLED'] = " + str(app.config['PB_ENABLED'])) # Get studies matching this user from Protocol Builder pb_studies: List[ ProtocolBuilderCreatorStudy] = ProtocolBuilderService.get_studies( user.uid) # Get studies from the database db_studies = session.query(StudyModel).filter_by( user_uid=user.uid).all() # Update all studies from the protocol builder, create new studies as needed. # Further assures that every active study (that does exist in the protocol builder) # has a reference to every available workflow (though some may not have started yet) for pb_study in pb_studies: new_status = None new_progress_status = None db_study = next( (s for s in db_studies if s.id == pb_study.STUDYID), None) if not db_study: db_study = StudyModel(id=pb_study.STUDYID) db_study.status = None # Force a new sa new_status = StudyStatus.in_progress new_progress_status = ProgressStatus.in_progress session.add(db_study) db_studies.append(db_study) db_study.update_from_protocol_builder(pb_study, user.uid) StudyService._add_all_workflow_specs_to_study(db_study) # If there is a new automatic status change and there isn't a manual change in place, record it. if new_status and db_study.status != StudyStatus.hold: db_study.status = new_status # make sure status is `in_progress`, before processing new automatic progress_status. if new_progress_status and db_study.status == StudyStatus.in_progress: db_study.progress_status = new_progress_status StudyService.add_study_update_event( db_study, status=new_status, event_type=StudyEventType.automatic) # Mark studies as inactive that are no longer in Protocol Builder for study in db_studies: pb_study = next( (pbs for pbs in pb_studies if pbs.STUDYID == study.id), None) if not pb_study and study.status != StudyStatus.abandoned: study.status = StudyStatus.abandoned StudyService.add_study_update_event( study, status=StudyStatus.abandoned, event_type=StudyEventType.automatic) db.session.commit()
def test_get_all_studies(self, mock_studies, mock_details, mock_docs, mock_investigators): # Enable the protocol builder for these tests, as the master_workflow and other workflows # depend on using the PB for data. app.config['PB_ENABLED'] = True self.load_example_data() s = StudyModel( id= 54321, # This matches one of the ids from the study_details_json data. title='The impact of pandemics on dog owner sanity after 12 days', user_uid='dhf8r', ) session.add(s) session.commit() num_db_studies_before = session.query(StudyModel).count() # Mock Protocol Builder responses studies_response = self.protocol_builder_response('user_studies.json') mock_studies.return_value = ProtocolBuilderStudySchema( many=True).loads(studies_response) details_response = self.protocol_builder_response('study_details.json') mock_details.return_value = json.loads(details_response) docs_response = self.protocol_builder_response('required_docs.json') mock_docs.return_value = json.loads(docs_response) investigators_response = self.protocol_builder_response( 'investigators.json') mock_investigators.return_value = json.loads(investigators_response) # Make the api call to get all studies api_response = self.app.get('/v1.0/study', headers=self.logged_in_headers(), content_type="application/json") self.assert_success(api_response) json_data = json.loads(api_response.get_data(as_text=True)) num_incomplete = 0 num_abandoned = 0 num_active = 0 num_open = 0 for study in json_data: if study[ 'protocol_builder_status'] == 'ABANDONED': # One study does not exist in user_studies.json num_abandoned += 1 if study[ 'protocol_builder_status'] == 'ACTIVE': # One study is marked complete without HSR Number num_active += 1 if study[ 'protocol_builder_status'] == 'OPEN': # One study is marked complete and has an HSR Number num_open += 1 db_studies_after = session.query(StudyModel).all() num_db_studies_after = len(db_studies_after) self.assertGreater(num_db_studies_after, num_db_studies_before) self.assertEqual(num_abandoned, 1) self.assertEqual(num_open, 1) self.assertEqual(num_active, 2) self.assertEqual(num_incomplete, 0) self.assertEqual(len(json_data), num_db_studies_after) self.assertEqual( num_open + num_active + num_incomplete + num_abandoned, num_db_studies_after)
def test_get_all_studies(self, mock_studies, mock_details, mock_docs, mock_investigators): # Enable the protocol builder for these tests, as the master_workflow and other workflows # depend on using the PB for data. app.config['PB_ENABLED'] = True self.load_example_data() with session.no_autoflush: s = StudyModel( id=54321, # This matches one of the ids from the study_details_json data. title='The impact of pandemics on dog owner sanity after 12 days', user_uid='dhf8r', ) session.add(s) session.commit() num_db_studies_before = session.query(StudyModel).count() # Mock Protocol Builder responses studies_response = self.protocol_builder_response('user_studies.json') mock_studies.return_value = ProtocolBuilderCreatorStudySchema(many=True).loads(studies_response) details_response = self.protocol_builder_response('study_details.json') mock_details.return_value = json.loads(details_response) docs_response = self.protocol_builder_response('required_docs.json') mock_docs.return_value = json.loads(docs_response) investigators_response = self.protocol_builder_response('investigators.json') mock_investigators.return_value = json.loads(investigators_response) # Make the api call to get all studies api_response = self.app.get('/v1.0/study', headers=self.logged_in_headers(), content_type="application/json") self.assert_success(api_response) json_data = json.loads(api_response.get_data(as_text=True)) num_abandoned = 0 num_in_progress = 0 num_open = 0 for study in json_data: if study['status'] == 'abandoned': # One study does not exist in user_studies.json num_abandoned += 1 if study['status'] == 'in_progress': # One study is marked complete without HSR Number num_in_progress += 1 if study['status'] == 'open_for_enrollment': # Currently, we don't automatically set studies to open for enrollment num_open += 1 db_studies_after = session.query(StudyModel).all() num_db_studies_after = len(db_studies_after) self.assertGreater(num_db_studies_after, num_db_studies_before) self.assertEqual(num_abandoned, 1) self.assertEqual(num_open, 0) # Currently, we don't automatically set studies to open for enrollment self.assertEqual(num_in_progress, 2) self.assertEqual(len(json_data), num_db_studies_after) # The sum below is off, since we don't automatically set studies to Open for Enrollment # Leaving the test here because we will need it again # when we implement a new way to set Open for Enrollment # self.assertEqual(num_open + num_in_progress + num_abandoned, num_db_studies_after) # Automatic events check in_progress_events = session.query(StudyEvent).filter_by(status=StudyStatus.in_progress) self.assertEqual(in_progress_events.count(), 1) # 1 study is in progress abandoned_events = session.query(StudyEvent).filter_by(status=StudyStatus.abandoned) self.assertEqual(abandoned_events.count(), 1) # 1 study has been abandoned