def test_rserve_skips_existing(self): program = Program.create( name="The Engagement Project", label="ep19", preview_url='foo.com', ) week = util.datelike_to_iso_string(datetime.date.today()) org = Organization.create(name="Organization", captain_id="User_cap", program_id=program.uid) org_to_skip = Organization.create(name="Organization", captain_id="User_cap", program_id=program.uid) Organization.put_multi([org, org_to_skip]) team = Team.create(name="Team", captain_id="User_cap", program_id=program.uid) team_to_skip = Team.create(name="Team", captain_id="User_cap", program_id=program.uid) Team.put_multi([team, team_to_skip]) cl = Classroom.create(name="Classroom", team_id=team.uid, code="foo", contact_id="User_contact") cl_to_skip = Classroom.create(name="Classroom", team_id=team.uid, code="foo", contact_id="User_contact") Classroom.put_multi([cl, cl_to_skip]) Report.put_multi([ Report.create(parent_id=org_to_skip.uid, filename="foo", issue_date=week), Report.create(parent_id=team_to_skip.uid, filename="foo", issue_date=week), Report.create(parent_id=cl_to_skip.uid, filename="foo", issue_date=week), ]) # Skips all the parents who have reports already this week. orgs, teams, classes = cron_rserve.get_report_parents( program, week, False) self.assertEqual(len(orgs), 1) self.assertEqual(len(teams), 1) self.assertEqual(len(classes), 1) # ...unless you force it, then they're all there. orgs, teams, classes = cron_rserve.get_report_parents( program, week, True) self.assertEqual(len(orgs), 2) self.assertEqual(len(teams), 2) self.assertEqual(len(classes), 2)
def test_digest_user(self): # This is the same thing that /task/digest_user_notifications does. user1, user2, notes = self.create_many() start = datetime.datetime.now() - datetime.timedelta(hours=12) end = datetime.datetime.now() + datetime.timedelta(hours=12) digests, emails, smss = Digest.process_user( user1, util.datelike_to_iso_string(start), util.datelike_to_iso_string(end), ) self.assertEqual(len(digests), 2) self.assertEqual(len(emails), 2) # @todo: once sms is built, test this # self.assertEqual(len(smss), 2) for d in digests: # They are strings of some length self.assertGreater(len(d.body), 0)
def get(self): # Takes query string param `today`, which defaults to a date string for # today, if today is a Monday, else next Monday. start = self.request.get('start', None) end = self.request.get('end', None) # What time period is of interest? if start and end: # Make sure inputs are formatted correctly datetime.datetime.strptime(start, config.iso_datetime_format) datetime.datetime.strptime(end, config.iso_datetime_format) if start > end: raise Exception("DigestNotifications requires end > start.") logging.info("Received custom start and end times: {} - {}".format( start, end)) else: # most recent midnight, pacific standard time today = datetime.date.today() end = datetime.datetime(today.year, today.month, today.day, 8) start = end - datetime.timedelta(hours=24) # from here forward we'll work in ISO 8601 strings. start = util.datelike_to_iso_string(start) end = util.datelike_to_iso_string(end) logging.info("Using default start and end times: {} - {}".format( start, end)) user_ids = Notification.users_with_notifications(start, end) # Launch a task to process each user. This way we can handle arbitrary # growth in the number of users without worrying about time or memory # limits, assuming number of notifications per user is fairly constant. for id in user_ids: taskqueue.add( url='/task/{}/digest_notifications'.format(id), params={ 'start': start, 'end': end }, queue_name='default', ) logging.info(user_ids) self.response.write(json.dumps({'user_ids': user_ids}))
def test_precheck_jwt_used(self): token = self.test_new_success() # should use the token response = self.testapp.get( '/api/auth_tokens/{}/user'.format(token), status=410, ) # Grab the timestamp from when the server saw the token; it should have # only one entry. expiration = memcache.get('jwt_jtis').values()[0] self.assertEqual(json.loads(response.body), 'used ' + util.datelike_to_iso_string(expiration))
def get_participation_for_cycle(self, classrooms, cycle): if not cycle.start_date or not cycle.end_date: return {} handler = TeamsClassrooms() user = User.create(id='triton', email='') jwt = handler.classroom_participation_jwt(user, classrooms) if util.is_localhost(): protocol = 'http' neptune_domain = 'localhost:8080' else: protocol = 'https' neptune_domain = os.environ['NEPTUNE_DOMAIN'] start_datetime = datetime.datetime.combine( cycle.start_date, datetime.datetime.min.time()) end_datetime = datetime.datetime.combine(cycle.end_date, datetime.datetime.max.time()) url = ( '{protocol}://{domain}/api/project_cohorts/participation?{ids}&start={start_date}&end={end_date}' .format( protocol=protocol, domain=neptune_domain, ids='&'.join(['uid={}'.format(c.url_code) for c in classrooms]), start_date=util.datelike_to_iso_string(start_datetime), end_date=util.datelike_to_iso_string(end_datetime), )) result = urlfetch.fetch( url=url, method=urlfetch.GET, headers={'Authorization': 'Bearer {}'.format(jwt)}) if not result or result.status_code != 200: raise Exception("Failed to get participation {}".format(result)) return json.loads(result.content)
def get(self, date_str=None): try: # Make sure this is a valid date. datetime.datetime.strptime(date_str, config.iso_date_format) except: date_str = util.datelike_to_iso_string(datetime.date.today()) # Launch a separate task to get participation for each team, to make # sure we can scale memory and cpu time easily. teams = Team.get(n=float('inf')) for t in teams: taskqueue.add( url='/task/{}/team_participation/{}'.format(t.uid, date_str), queue_name='default', ) team_ids = [t.uid for t in teams] logging.info("Started tasks for {} teams".format(len(team_ids))) self.response.write(json.dumps({'team_ids': team_ids}))
def test_check_roster_cycle_data(self): team = Team.create(name='foo', captain_id="User_cap", program_id=self.program.uid) classroom = Classroom.create( name="CompSci 101", team_id=team.uid, contact_id="User_contact", code="foo bar", num_students=1, ) ppt = Participant.create(team_id=team.uid, classroom_ids=[classroom.uid], student_id='STUDENTID001') today = datetime.date.today() cycle1 = Cycle.create( team_id=team.uid, ordinal=1, # schedule to not be current (starts and ends in the past) start_date=today - datetime.timedelta(days=3), end_date=today - datetime.timedelta(days=2), ) cycle1.put() team.put() classroom.put() ppt.put() # Without a current cycle, no cycle data response = self.testapp.get( '/api/codes/{}/participants/{}'.format(classroom.url_code, ppt.student_id), status=200, ) self.assertEqual( json.loads(response.body), { 'uid': ppt.uid, 'team_id': ppt.team_id }, ) # Add a new cycle that is current. cycle2 = Cycle.create( team_id=team.uid, ordinal=2, # schedule to not be current (starts and ends in the past) start_date=today - datetime.timedelta(days=1), end_date=today + datetime.timedelta(days=1), ) cycle2.put() # Cycle data present. response = self.testapp.get( '/api/codes/{}/participants/{}'.format(classroom.url_code, ppt.student_id), status=200, ) expected = { 'uid': ppt.uid, 'team_id': ppt.team_id, 'cycle': { 'uid': cycle2.uid, 'team_id': cycle2.team_id, 'ordinal': cycle2.ordinal, 'start_date': util.datelike_to_iso_string(cycle2.start_date), 'end_date': util.datelike_to_iso_string(cycle2.end_date), } } self.assertEqual(json.loads(response.body), expected)
def serialize(d): return util.datelike_to_iso_string(d) if d else None
def exp_to_string(exp): return util.datelike_to_iso_string(exp_to_datetime(exp))
def get(self, script): """Describe for RServe all the classrooms and teams that we'd like reports on, including what URLs to post the report data back to.""" week = self.request.get('week', None) if not week: monday = datetime.date.today() while monday.weekday() != 0: monday = monday + datetime.timedelta(days=1) week = util.datelike_to_iso_string(monday) should_force = self.request.get('force', 'false') == 'true' really_send = self.request.get('really_send', 'true') == 'true' script_to_label = { 'ep': 'ep19', 'beleset': 'beleset19', 'cset': 'cset19', 'mset': 'mset19', } program = Program.get_by_label(script_to_label[script]) # Manual calls to this handler may specify reporting unit ids. # We'll use them as a whitelist to filter what we query. reporting_unit_ids = self.request.get_all('ru') or [] # What reporting units should get reports this week? If should_force is # true, this function will return all possible units for the program. # Otherwise it will ignore units that already have reports. orgs, teams, classrooms = cron_rserve.get_report_parents( program, week, should_force) payload = cron_rserve.build_payload( orgs, teams, classrooms, cron_rserve.get_secrets(self.request), ru_whitelist=reporting_unit_ids, ) fetch_params = cron_rserve.get_fetch_params(script, payload) if not really_send: logging.info("really_send is false, not contacting RServe.") # More convenient to see payload parsed rather than dumpsed. fetch_params['payload'] = payload self.write(fetch_params) return try: result = urlfetch.fetch(**fetch_params) except urlfetch.DeadlineExceededError as e: logging.warning("RServe took a long time to reply (caught a " "DeadlineExceededError). Exiting without checking " "results. Original error message follows.") logging.warning(e.message) return if not result: raise Exception("No response from RServe.") if result.status_code >= 300: # App Engine will consider this cron job to have failed, and will # follow any retry instructions in cron.yaml. raise Exception( "Non-successful response from RServe: {} {}".format( result.status_code, result.content)) logging.info("response status: {}".format(result.status_code)) try: json.loads(result.content) # ok, it's valid logging.info(util.truncate_json(result.content)) logging.info(result.content) except: # just log as text logging.info(result.content)
def test_completion_anonymous_allowed(self): org_id = 'Organization_foo' pc = ProjectCohort.create( organization_id=org_id, program_label=self.program_label, cohort_label=self.cohort_label, ) pc.put() today = datetime.datetime.now() yesterday = today - datetime.timedelta(days=1) tomorrow = today + datetime.timedelta(days=1) pd_params = { 'key': 'progress', 'value': '100', 'program_label': self.program_label, 'cohort_label': self.cohort_label, 'project_cohort_id': pc.uid, 'code': pc.code, 'survey_id': 'Survey_foo', 'survey_ordinal': 1, } old_pd = ParticipantData.create( created=yesterday.strftime(config.sql_datetime_format), modified=yesterday.strftime(config.sql_datetime_format), participant_id='Participant_foo', **pd_params) # Use a lower-level interface so we can set the modified time. row = ParticipantData.coerce_row_dict(old_pd.to_dict()) with mysql_connection.connect() as sql: sql.insert_or_update(ParticipantData.table, row) current_pd = ParticipantData.create(participant_id='Participant_bar', **pd_params) current_pd.put() user = User.create( email='*****@*****.**', owned_organizations=[org_id], ) user.put() result = self.testapp.get( '/api/project_cohorts/{}/completion'.format(pc.uid), params={ 'start': util.datelike_to_iso_string(today), 'end': util.datelike_to_iso_string(tomorrow), }, headers=login_headers(user.uid), # Authenticated, has permission: 200. status=200, ) expected = [{ 'value': '100', 'survey_ordinal': 1, 'participant_id': current_pd.participant_id, }] self.assertEqual(json.loads(result.body), expected)
def test_checklist_nudge(self): month_from_now = util.datelike_to_iso_string(datetime.date.today() + datetime.timedelta( days=30)) Program.mock_program_config( 'p1', { 'cohorts': { '2019': { 'label': '2019', 'open_date': '2019-06-01' }, '2020': { 'label': '2020', 'open_date': month_from_now }, }, 'project_tasklist_template': [] }, ) Program.mock_program_config( 'p2', { 'cohorts': {}, 'project_tasklist_template': [] }, ) templates = [ self.create_mandrill_template('p1-{}'.format( auto_prompt.CHECKLIST_NUDGE_SUFFIX)), ] # Case 1: 2020 PCs gets prompt current_pc1 = ProjectCohort.create( program_label='p1', organization_id='Organization_foo', project_id='Project_current1', cohort_label='2020', ) current_pc1.put() current_pc2 = ProjectCohort.create( program_label='p1', organization_id='Organization_bar', project_id='Project_current2', cohort_label='2020', ) current_pc2.put() # Case 2: 2019 PC does not old_pc = ProjectCohort.create( program_label='p1', cohort_label='2019', project_id='Project_old', ) old_pc.put() # Case 3: PC in other program does not other_pc = ProjectCohort.create( program_label='p2', cohort_label='2020', project_id='Project_other', ) other_pc.put() # Some tasks are created on put. We're not interested in these. creation_tasks = self.taskqueue_stub.get_filtered_tasks() auto_prompt.queue_checklist_nudge(templates) tasks = self.taskqueue_stub.get_filtered_tasks() num_new_tasks = len(tasks) - len(creation_tasks) # Only the 2 2020 pcs in the right program should have a task queued. self.assertEqual(num_new_tasks, 2) expected_url1 = '/task/email_project/Project_current1/p1-checklist-nudge' self.assertIn(expected_url1, [t.url for t in tasks]) expected_url2 = '/task/email_project/Project_current2/p1-checklist-nudge' self.assertIn(expected_url2, [t.url for t in tasks]) Program.reset_mocks()