def test_pd_whitelist(self): """Any pd keys other than those whitelisted should not be returned.""" pid = 'Participant_0123456789ABCDEF' context_params = { 'participant_id': pid, 'program_label': 'demo-program', 'project_id': 'Project_12345678', 'code': 'trout viper', 'cohort_label': '2017_spring', 'project_cohort_id': 'ProjectCohort_12345678', 'survey_id': 'Survey_12345678', } portal_pd = [ { 'key': 'link', # whitelisted 'value': '', }, { 'key': 'not_whitelisted', 'value': 'secret', }, ] portal_pd = [ ParticipantData.create(**dict(pd, **context_params)) for pd in portal_pd ] ParticipantData.put_multi(portal_pd) results = ParticipantData.get_by_participant(pid, 'ProjectCohort_12345678') self.assertEqual(len(results), 1) self.assertNotEqual(results[0].value, 'secret')
def test_nobody_done(self): """If no one finished the survey, counts are zero.""" survey = Survey.create([], ordinal=1, program_label=self.program_label) survey.put() kwargs = { 'key': 'progress', 'program_label': self.program_label, 'project_id': 'Project_12345678', 'cohort_label': '2017_spring', 'project_cohort_id': 'ProjectCohort_12345678', 'code': 'trout viper', 'survey_id': survey.uid, 'survey_ordinal': survey.ordinal, } pd = [ ParticipantData.create(participant_id='Participant_unfinished1', value=1, **kwargs), ParticipantData.create(participant_id='Participant_unfinished2', value=1, **kwargs), ] ParticipantData.put_multi(pd) result = ParticipantData.participation(survey_id=pd[0].survey_id) expected = [ { 'value': '1', 'n': 2, 'survey_ordinal': 1 }, ] self.assertEqual(result, expected)
def test_get_survey_participation(self): """Get count of particpants at each marker.""" pds = self.mock_one_finished_one_unfinished(1) survey_id = pds[0].survey_id start = datetime.date.today() end = start + datetime.timedelta(days=1) result = ParticipantData.participation(survey_id=survey_id, start=start, end=end) expected = [ { 'value': '1', 'n': 1, 'survey_ordinal': 1 }, { 'value': '100', 'n': 1, 'survey_ordinal': 1 }, ] self.assertEqual(result, expected) # The same result should also now be available in memcache, so if we # clear the db the result should be the same. ParticipantData.delete_multi(pds) result = ParticipantData.participation(survey_id=survey_id, start=start, end=end) self.assertEqual(result, expected)
def test_put_insert(self): """put() a new uid: succeeds.""" params = dict(self.context_params, value='1') pd = ParticipantData.create(**params) pd.put() # Db shows saved values, and db-provided defaults like timestamps # are present in the saved object. fetched = ParticipantData.get_by_id(pd.uid) self.assertEqual(pd.to_dict(), fetched.to_dict())
def test_put_insert_duplicate(self): """put() a new uid but matching an index: raises.""" params = dict(self.context_params, value='1') pd = ParticipantData.create(**params) pd.put() dupe_params = dict(self.context_params, value='2') dupe_pd = ParticipantData.create(**params) with self.assertRaises(IntegrityError): dupe_pd.put()
def test_put_update(self): """put() an exisiting uid: succeeds.""" params = dict(self.context_params, value='1') pd = ParticipantData.create(**params) pd.put() pd.value = '2' pd.put() # Db shows values. fetched = ParticipantData.get_by_id(pd.uid) self.assertEqual(pd.to_dict(), fetched.to_dict())
def test_get_project_cohort_participation(self): """Get stats for all surveys in the project cohort.""" pds1 = self.mock_one_finished_one_unfinished(1) pds2 = self.mock_one_finished_one_unfinished(2) project_cohort_id = pds1[0].project_cohort_id ProjectCohort( id=project_cohort_id, program_label=self.program_label, ).put() start = datetime.date.today() end = start + datetime.timedelta(days=1) expected = [ { 'value': '1', 'n': 1, 'survey_ordinal': 1 }, { 'value': '100', 'n': 1, 'survey_ordinal': 1 }, { 'value': '1', 'n': 1, 'survey_ordinal': 2 }, { 'value': '100', 'n': 1, 'survey_ordinal': 2 }, ] result = ParticipantData.participation( project_cohort_id=project_cohort_id, start=start, end=end) self.assertEqual(result, expected) # The same result should also now be available in memcache, so if we # clear the db the result should be the same. ParticipantData.delete_multi(pds1 + pds2) result = ParticipantData.participation( project_cohort_id=project_cohort_id, start=start, end=end) self.assertEqual(result, expected) # It should also work if some other kwargs are set with value None. result = ParticipantData.participation( project_cohort_id=project_cohort_id, start=start, end=end, cohort_label=None) self.assertEqual(result, expected)
def test_put_for_index_insert(self): """put_for_index() a new uid: succeeds.""" params = dict(self.context_params, value='1') pd = ParticipantData.create(**params) synced_pd = ParticipantData.put_for_index(pd, 'participant-survey-key') # Returns same uid. self.assertEqual(pd.uid, synced_pd.uid) # Db shows values. fetched = ParticipantData.get_by_id(pd.uid) self.assertEqual(synced_pd.to_dict(), fetched.to_dict())
def test_batch_participation(self): user = User.create(email='*****@*****.**') user.put() pc_kwargs = { 'program_label': self.program_label, 'cohort_label': self.cohort_label, } pcs = [ ProjectCohort.create(**pc_kwargs), ProjectCohort.create(**pc_kwargs), ] ndb.put_multi(pcs) all_pds = [] for pc in pcs: pds = mock_one_finished_one_unfinished( 1, 'Participant_unfinished', 'Participant_finished', pc_id=pc.uid, code=pc.code, program_label=self.program_label, cohort_label=self.cohort_label, ) all_pds += pds # Forbidden without allowed endpoints. pc_ids = [pc.uid for pc in pcs] self.testapp.get( '/api/project_cohorts/participation?uid={}&uid={}'.format(*pc_ids), headers=jwt_headers(user), status=403) # Running various queries works as expected. self.batch_participation(user, pcs) # Simulate a new pd being written to the first pc by clearing that # memcache key. The server should fall back to sql and still give the # same results. id_key = ParticipantData.participation_by_pc_cache_key(pcs[0].uid) code_key = ParticipantData.participation_by_pc_cache_key(pcs[0].code) self.assertIsNotNone(memcache.get(id_key)) self.assertIsNotNone(memcache.get(code_key)) memcache.delete(id_key) memcache.delete(code_key) self.batch_participation(user, pcs) # Now with everything cached, clearing the db and running the same # queries again should have the same result. ParticipantData.delete_multi(all_pds) self.batch_participation(user, pcs)
def test_put_update_duplicate(self): """put() an existing uid but matching an index: raises.""" params1 = dict(self.context_params, value='1', survey_id='Survey_1') pd1 = ParticipantData.create(**params1) pd1.put() params2 = dict(self.context_params, value='1', survey_id='Survey_2') pd2 = ParticipantData.create(**params2) pd2.put() with self.assertRaises(IntegrityError): # Now changing 1 so that it collides with 2. pd1.survey_id = 'Survey_2' pd1.put()
def test_update_cross_site_pd_with_descriptor(self): """Tests two ways: as a param, and as a compound survey id.""" pc, survey, participant = self.create_pd_context() survey_descriptor = 'cycle-1' compound_id = '{}:{}'.format(survey.uid, survey_descriptor) # As a param. url = ('/api/participants/{participant_id}/data/cross_site.gif?' 'survey_id={survey_id}&survey_descriptor={survey_descriptor}&' 'key=progress&value={value}').format( participant_id=participant.uid, survey_id=survey.uid, survey_descriptor=survey_descriptor, value='1', ) self.testapp.get(url) # As a compound id. url = ('/api/participants/{participant_id}/data/cross_site.gif?' 'survey_id={survey_id}&key=progress&value={value}').format( participant_id=participant.uid, survey_id=compound_id, value='100', ) self.testapp.get(url) pds = ParticipantData.get_by_participant(participant.uid, survey.project_cohort_id) self.assertEqual(len(pds), 1) self.assertEqual(pds[0].survey_id, compound_id) self.assertEqual(pds[0].value, '100')
def set_up(self): # Let ConsistencyTestCase set up the datastore testing stub. super(TestApiParticipation, self).set_up() application = webapp2.WSGIApplication(api_routes, config={ 'webapp2_extras.sessions': { 'secret_key': self.cookie_key } }, debug=True) self.testapp = webtest.TestApp(application) # Successful download of completion ids triggers a notification, which # requires a cohort name. Program.mock_program_config( self.program_label, {'cohorts': { self.cohort_label: { 'name': self.cohort_label } }}, ) with mysql_connection.connect() as sql: sql.reset({ 'participant': Participant.get_table_definition(), 'participant_data': ParticipantData.get_table_definition(), })
def test_portal_pd_includes_testing(self): """A testing user should still have a normal portal experience.""" # A testing participant should have some pd. pid = self.test_create_portal_pd(testing=True) results = ParticipantData.get_by_participant(pid, 'ProjectCohort_12345678') self.assertGreater(len(results), 0)
def test_write_cross_site_pd(self, testing=False): pc, survey, participant = self.create_pd_context() url = ('/api/participants/{participant_id}/data/cross_site.gif?' 'survey_id={survey_id}&key=progress&value=1{testing_param}' ).format( participant_id=participant.uid, survey_id=survey.uid, testing_param='&testing=true' if testing else '', ) self.testapp.get(url) # The response is not designed to be useful (it's a gif), so check the # db to ensure the pd was written. result = ParticipantData.get_by_participant(participant.uid, survey.project_cohort_id) pd = result[0] self.assertEqual(pd.participant_id, participant.uid) self.assertEqual(pd.project_cohort_id, pc.uid) self.assertEqual(pd.code, pc.code) self.assertEqual(pd.survey_id, survey.uid) self.assertEqual(pd.key, 'progress') self.assertEqual(pd.value, '1') self.assertEqual(pd.testing, testing)
def test_get_cohort_participation(self): """Get stats for all surveys in a cohort.""" pc_id1 = 'ProjectCohort_one' pc_id2 = 'ProjectCohort_two' pds11 = self.mock_one_finished_one_unfinished(1, pc_id=pc_id1) pds12 = self.mock_one_finished_one_unfinished(2, pc_id=pc_id1) pds21 = self.mock_one_finished_one_unfinished(1, pc_id=pc_id2) pds22 = self.mock_one_finished_one_unfinished(2, pc_id=pc_id2) expected = [ { 'value': '1', 'n': 2, 'survey_ordinal': 1 }, { 'value': '100', 'n': 2, 'survey_ordinal': 1 }, { 'value': '1', 'n': 2, 'survey_ordinal': 2 }, { 'value': '100', 'n': 2, 'survey_ordinal': 2 }, ] stats = ParticipantData.participation(program_label=self.program_label, cohort_label=self.cohort_label) self.assertEqual(stats, expected)
def test_cache_truncation(self): big_cache_value = { '2019-01-01T00:00:00{}'.format(x): 'foo' for x in range(1001) } truncated = ParticipantData.truncate_cached(big_cache_value) self.assertEqual(len(truncated), 100)
def test_get_portal_pd(self): """The same query made by the portal client, not scoped to pc.""" # A new participant should have no pd. new_pid = 'Participant_new' results = ParticipantData.get_by_participant(new_pid) self.assertEqual(len(results), 0) # An existing participant should have some pd. pid = self.test_create_portal_pd() results = ParticipantData.get_by_participant(pid) self.assertGreater(len(results), 0) # Some returned pd should be from other pcs. self.assertFalse( all(pd.project_cohort_id == 'ProjectCohort_12345678' for pd in results))
def test_update_cross_site_pd(self): pc, survey, participant = self.create_pd_context() user = User.create( email='*****@*****.**', owned_organizations=[pc.organization_id], ) user.put() def write_value(value): url = ('/api/participants/{participant_id}/data/cross_site.gif?' 'survey_id={survey_id}&key=progress&value={value}').format( participant_id=participant.uid, survey_id=survey.uid, value=value, ) self.testapp.get(url) # Query for participation between each write to make sure there are # no errors while clearing and re-populating the participation cache. self.query_pc_participation(user, pc) write_value('1') self.query_pc_participation(user, pc) write_value('100') pd = ParticipantData.get_by_participant(participant.uid, survey.project_cohort_id)[0] self.assertEqual(pd.value, '100')
def test_put_for_index_insert_duplicate(self): """put_for_index() a new uid but matching an index: succeeds.""" params = dict(self.context_params, value='1') pd = ParticipantData.create(**params) pd.put() dupe_params = dict(self.context_params, value='2') dupe_pd = ParticipantData.create(**params) synced_pd = ParticipantData.put_for_index(dupe_pd, 'participant-survey-key') # Returns original uid, not the new one. self.assertEqual(synced_pd.uid, pd.uid) # Db shows values fetched = ParticipantData.get_by_id(pd.uid) self.assertEqual(synced_pd.to_dict(), fetched.to_dict())
def test_get_by_project_cohort(self): """Retrieve pd for a participant, for a given pc.""" # A new participant should have no pd. new_pid = 'Participant_new' results = ParticipantData.get_by_participant(new_pid, 'ProjectCohort_12345678') self.assertEqual(len(results), 0) # An existing participant should have some pd. pid = self.test_create_portal_pd() results = ParticipantData.get_by_participant(pid, 'ProjectCohort_12345678') self.assertGreater(len(results), 0) # All returned pd should match the project cohort id. self.assertTrue( all(pd.project_cohort_id == 'ProjectCohort_12345678' for pd in results))
def set_up(self): """Clear relevant tables from testing SQL database.""" # Let ConsistencyTestCase set up the datastore testing stub. super(TestParticipantData, self).set_up() with mysql_connection.connect() as sql: sql.reset({ 'participant_data': ParticipantData.get_table_definition(), })
def test_completion_ids_exclude_testing(self): """Don't count testing pd as that participant being done.""" pid = self.test_create_portal_pd(testing=True) participant = Participant.create( id=SqlModel.convert_uid(pid), name='Pascal', organization_id='Organization_PERTS', ) participant.put() results = ParticipantData.completion_ids( project_cohort_id='ProjectCohort_12345678') self.assertEqual(results, [])
def test_whitelist(self): """Certain pd values should readable, other's shouldn't.""" project_cohort, survey, participant = self.create_pd_context() keys = ( 'progress', 'link', 'condition', 'ep_assent', 'last_login', 'saw_baseline', 'saw_demographics', 'saw_validation', 'secret', # NOT on whitelist; should remain secret ) pds = [ ParticipantData.create( key=k, value='foo', participant_id=participant.uid, program_label=self.program_label, cohort_label=self.cohort_label, project_cohort_id=project_cohort.uid, code=project_cohort.code, survey_id=survey.uid, survey_ordinal=survey.ordinal, ) for k in keys ] ParticipantData.put_multi(pds) url = '/api/participants/{}/data?project_cohort_id={}'.format( participant.uid, project_cohort.uid) result = self.testapp.get(url) result_dict = json.loads(result.body) self.assertEqual(len(result_dict), len(keys) - 1) secret_pd = [pd for pd in result_dict if pd['key'] == 'secret'] self.assertEqual(len(secret_pd), 0)
def test_project_cohort_participation(self): pid = self.test_create_portal_pd() results = ParticipantData.participation( project_cohort_id='ProjectCohort_12345678') expected = [ { 'survey_ordinal': 1, 'value': '100', 'n': 1 }, { 'survey_ordinal': 2, 'value': '33', 'n': 1 }, ] self.assertEqual(results, expected)
def test_put_multi(self): """If no indexes collide: succeeds, otherwise: raises.""" params1 = dict(self.context_params, value='1', survey_id='Survey_1') pd1 = ParticipantData.create(**params1) params2 = dict(self.context_params, value='1', survey_id='Survey_2') pd2 = ParticipantData.create(**params2) affected_rows = ParticipantData.put_multi([pd1, pd2]) self.assertEqual(affected_rows, 2) self.assertIsNotNone(ParticipantData.get_by_id(pd1.uid)) self.assertIsNotNone(ParticipantData.get_by_id(pd2.uid)) pd3 = ParticipantData.create(**params1) with self.assertRaises(IntegrityError): ParticipantData.put_multi([pd3])
def test_downgrade_progress_fails_cross_site(self): pc, survey, participant = self.create_pd_context() def write_value(value): url = ('/api/participants/{participant_id}/data/cross_site.gif?' 'survey_id={survey_id}&key=progress&value={value}').format( participant_id=participant.uid, survey_id=survey.uid, value=value, ) self.testapp.get(url) write_value('100') write_value('1') # should silently fail # Recorded pd is still 100. pd = ParticipantData.get_by_participant(participant.uid, survey.project_cohort_id)[0] self.assertEqual(pd.value, '100')
def test_get_survey_completion_ids(self): participants = {p.uid: p for p in self.mock_participants()} pds = self.mock_one_finished_one_unfinished(1) result = ParticipantData.completion_ids(survey_id=pds[0].survey_id) expected = [ { 'token': 'unfinished', 'percent_progress': '1', 'module': 1 }, { 'token': 'finished', 'percent_progress': '100', 'module': 1 }, ] self.assertEqual(result, expected)
def set_up(self): # Let ConsistencyTestCase set up the datastore testing stub. super(TestApiParticipant, self).set_up() with mysql_connection.connect() as sql: sql.reset({ 'participant': Participant.get_table_definition(), 'participant_data': ParticipantData.get_table_definition(), }) application = webapp2.WSGIApplication( api_routes, config={ 'webapp2_extras.sessions': { 'secret_key': self.cookie_key } }, debug=True ) self.testapp = webtest.TestApp(application)
def test_update_local_pd(self): pc, survey, participant = self.create_pd_context() def write_value(value): return self.testapp.post_json( '/api/participants/{participant_id}/data/{key}'.format( participant_id=participant.uid, key='progress', ), { 'value': value, 'survey_id': survey.uid }, ) original_id = json.loads(write_value('1').body)['uid'] updated_id = json.loads(write_value('100').body)['uid'] self.assertEqual(original_id, updated_id) pd = ParticipantData.get_by_id(original_id) self.assertEqual(pd.value, '100')
def test_update_local_pd_with_descriptor(self): """Uses a descriptor two ways: in a param, and in the survey id.""" pc, survey, participant = self.create_pd_context() survey_descriptor = 'cycle-1' # Using the param. response = self.testapp.post_json( '/api/participants/{participant_id}/data/{key}'.format( participant_id=participant.uid, key='progress', ), { 'value': '1', 'survey_id': survey.uid, 'survey_descriptor': survey_descriptor }, ) original_id = json.loads(response.body)['uid'] # With the descriptor combined in the id. response = self.testapp.post_json( '/api/participants/{participant_id}/data/{key}'.format( participant_id=participant.uid, key='progress', ), { 'value': '100', 'survey_id': '{}:{}'.format(survey.uid, survey_descriptor) }, ) updated_id = json.loads(response.body)['uid'] self.assertEqual(original_id, updated_id) pd = ParticipantData.get_by_id(original_id) self.assertEqual(pd.value, '100') self.assertEqual(pd.survey_id, '{}:{}'.format(survey.uid, survey_descriptor))