def queue_checklist_nudge(templates): # Is it exactly one month before the program opens for student # participation for this cohort year? Find all orgs registered and send # them reminder email (complete checklist, participation code, etc) tasks = [] month_from_now = util.datelike_to_iso_string(datetime.date.today() + datetime.timedelta(days=30)) to_prompt = programs_with_template(templates, CHECKLIST_NUDGE_SUFFIX) for program in to_prompt: for cohort in program['cohorts'].values(): project_ids = set() if cohort['open_date'] == month_from_now: # This whole cohort needs a nudge. Get all the project cohorts. pcs = ProjectCohort.get( program_label=program['label'], cohort_label=cohort['label'], projection=['project_id'], n=float('inf'), ) for pc in pcs: url = '/task/email_project/{}/{}'.format( pc.project_id, get_slug(program['label'], CHECKLIST_NUDGE_SUFFIX)) task = taskqueue.add(url=url) tasks.append(task) return tasks
def get(self): program_label = 'cg17' cohort = Program.get_current_cohort(program_label) url = '{protocol}://{domain}/api/scripts/cg'.format( protocol='http' if util.is_localhost() else 'https', domain=('localhost:9080' if util.is_localhost() else os.environ['RSERVE_DOMAIN']), ) # Look up all the valid project cohorts pc_ids = [ pc_key.id() for pc_key in ProjectCohort.get( program_label=program_label, cohort_label=cohort['label'], status='open', keys_only=True, n=float('inf'), ) ] # To match up the right report tasks, we'll need tasks and checkpoints. checkpoints = [ c for c in Checkpoint.get( label='cg17_survey__monitor_1', cohort_label=cohort['label'], n=float('inf'), ) if c.project_cohort_id in pc_ids ] checkpoint_ids = [c.uid for c in checkpoints] tasks = [ t for t in Task.get( label='cg17_survey__report_1', n=float('inf'), ) if t.checkpoint_id in checkpoint_ids ] # Alternate way to get tasks, via surveys, which may or may not be # more efficient. My current assumption is, for large result sets, # SQL-back checkpoints are faster. # # survey_keys = [s.key for s in Survey.get( # program_label=program_label, # cohort_label=cohort['label'], # n=float('inf'), # ) if s.project_cohort_id in pc_ids] # tasks = [t for t in Task.get( # label='cg17_survey__report_1', # n=float('inf'), # ) if t.key.parent() in survey_keys] payload = { 'reporting_units': [ self.build_reporting_unit(uid, checkpoints, tasks) for uid in pc_ids ], } secrets = ('neptune_sql_credentials', 'big_pipe_credentials', 'qualtrics_credentials') for s in secrets: payload[s] = SecretValue.get(s, None) result = urlfetch.fetch(url=url, payload=json.dumps(payload), method=urlfetch.POST, headers={ 'Authorization': 'Bearer ' + rserve_jwt(), 'Content-Type': 'application/json', }) if not result or result.status_code >= 300: logging.error("Non-successful response from RServe: {} {}".format( result.status_code, result.content)) else: logging.info("response status: {}".format(result.status_code)) try: json.loads(result.content) # ok, it's valid # logging.info(util.truncate_json(result.content)) logging.info(result.content) except: # just log as text logging.info(result.content)