async def request_access_token( session: ClientSession, url: str, creds: ServiceAccountCredentials, now: Optional[DateTime] = None, ) -> AccessToken: now = now or DateTime.utcnow() assertion = encode( { "iss": creds.client_email, "scope": SCOPE_ANDROIDPUBLISHER, "aud": creds.token_uri, "iat": int(now.timestamp()), "exp": int(now.add(hours=1).timestamp()), }, creds.private_key, algorithm="RS256", ).decode("utf8") async with session.post( url, data={ "grant_type": ACCESS_TOKEN_GRANT_TYPE, "assertion": assertion }, ) as resp: payload = await resp.read() if resp.status != 200: raise HttpError(resp.status, payload) return converter.structure(loads(payload), AccessToken)
def reindex_everything(session: SqlASession) -> None: """ Deletes from and rebuilds all server index tables. Args: session: an SQLAlchemy Session """ now = Pendulum.utcnow() log.info("Reindexing database; indexed_at_utc = {}", now) PatientIdNumIndexEntry.rebuild_idnum_index(session, now) TaskIndexEntry.rebuild_entire_task_index(session, now)
def _skip_to_latest(self, earliest: Optional[DateTime]) -> DateTime: """Bound the earliest time a run can be scheduled. The logic is that we move start_date up until one period before, so the current time is AFTER the period end, and the job can be created... This is slightly different from the cron version at terminal values. """ new_start = self._get_prev(DateTime.utcnow()) if earliest is None: return new_start return max(new_start, earliest)
def get_server_device(cls, dbsession: SqlASession) -> "Device": """ Return the special device meaning "the server", creating it if it doesn't already exist. """ device = cls.get_device_by_name(dbsession, DEVICE_NAME_FOR_SERVER) if device is None: device = Device() device.name = DEVICE_NAME_FOR_SERVER device.friendly_name = "CamCOPS server" device.registered_by_user = User.get_system_user(dbsession) device.when_registered_utc = Pendulum.utcnow() device.camcops_version = CAMCOPS_SERVER_VERSION dbsession.add(device) return device
def reindex_everything( session: SqlASession, skip_tasks_with_missing_tables: bool = False ) -> None: """ Deletes from and rebuilds all server index tables. Args: session: an SQLAlchemy Session skip_tasks_with_missing_tables: should we skip over tasks if their tables are not in the database? (This is so we can rebuild an index from a database upgrade, but not crash because newer tasks haven't had their tables created yet.) """ now = Pendulum.utcnow() log.info("Reindexing database; indexed_at_utc = {}", now) PatientIdNumIndexEntry.rebuild_idnum_index(session, now) TaskIndexEntry.rebuild_entire_task_index( session, now, skip_tasks_with_missing_tables=skip_tasks_with_missing_tables, )
def _skip_to_latest(self, earliest: Optional[DateTime]) -> DateTime: """Bound the earliest time a run can be scheduled. The logic is that we move start_date up until one period before, so the current time is AFTER the period end, and the job can be created... This is slightly different from the delta version at terminal values. If the next schedule should start *right now*, we want the data interval that start now, not the one that ends now. """ current_time = DateTime.utcnow() last_start = self._get_prev(current_time) next_start = self._get_next(last_start) if next_start == current_time: # Current time is on interval boundary. new_start = last_start elif next_start > current_time: # Current time is between boundaries. new_start = self._get_prev(last_start) else: raise AssertionError("next schedule shouldn't be earlier") if earliest is None: return new_start return max(new_start, earliest)
def next_dagrun_info( self, *, last_automated_data_interval: DataInterval | None, restriction: TimeRestriction, ) -> DagRunInfo | None: if restriction.catchup: if last_automated_data_interval is None: if restriction.earliest is None: return None next_start_time = self._align_to_next(restriction.earliest) else: next_start_time = self._get_next( last_automated_data_interval.end) else: current_time = DateTime.utcnow() if restriction.earliest is not None and current_time < restriction.earliest: next_start_time = self._align_to_next(restriction.earliest) else: next_start_time = self._align_to_next(current_time) if restriction.latest is not None and restriction.latest < next_start_time: return None return DagRunInfo.interval(next_start_time - self._interval, next_start_time)
def get_now_utc_pendulum() -> DateTime: """ Get the time now in the UTC timezone, as a :class:`pendulum.DateTime`. """ tz = get_tz_utc() return DateTime.utcnow().in_tz(tz)
def test_returns_task_schedules(self) -> None: from pendulum import DateTime as Pendulum, Duration, local, parse from camcops_server.cc_modules.cc_taskindex import ( PatientIdNumIndexEntry, TaskIndexEntry, ) from camcops_server.cc_modules.cc_taskschedule import ( PatientTaskSchedule, TaskSchedule, TaskScheduleItem, ) from camcops_server.tasks.bmi import Bmi schedule1 = TaskSchedule() schedule1.group_id = self.group.id schedule1.name = "Test 1" self.dbsession.add(schedule1) schedule2 = TaskSchedule() schedule2.group_id = self.group.id self.dbsession.add(schedule2) self.dbsession.commit() item1 = TaskScheduleItem() item1.schedule_id = schedule1.id item1.task_table_name = "phq9" item1.due_from = Duration(days=0) item1.due_by = Duration(days=7) self.dbsession.add(item1) item2 = TaskScheduleItem() item2.schedule_id = schedule1.id item2.task_table_name = "bmi" item2.due_from = Duration(days=0) item2.due_by = Duration(days=8) self.dbsession.add(item2) item3 = TaskScheduleItem() item3.schedule_id = schedule1.id item3.task_table_name = "phq9" item3.due_from = Duration(days=30) item3.due_by = Duration(days=37) self.dbsession.add(item3) item4 = TaskScheduleItem() item4.schedule_id = schedule1.id item4.task_table_name = "gmcpq" item4.due_from = Duration(days=30) item4.due_by = Duration(days=38) self.dbsession.add(item4) self.dbsession.commit() patient = self.create_patient() idnum = self.create_patient_idnum( patient_id=patient.id, which_idnum=self.nhs_iddef.which_idnum, idnum_value=TEST_NHS_NUMBER, ) PatientIdNumIndexEntry.index_idnum(idnum, self.dbsession) server_patient = self.create_patient(as_server_patient=True) _ = self.create_patient_idnum( patient_id=server_patient.id, which_idnum=self.nhs_iddef.which_idnum, idnum_value=TEST_NHS_NUMBER, as_server_patient=True, ) schedule_1 = PatientTaskSchedule() schedule_1.patient_pk = server_patient.pk schedule_1.schedule_id = schedule1.id schedule_1.settings = { "bmi": {"bmi_key": "bmi_value"}, "phq9": {"phq9_key": "phq9_value"}, } schedule_1.start_datetime = local(2020, 7, 31) self.dbsession.add(schedule_1) schedule_2 = PatientTaskSchedule() schedule_2.patient_pk = server_patient.pk schedule_2.schedule_id = schedule2.id self.dbsession.add(schedule_2) bmi = Bmi() self.apply_standard_task_fields(bmi) bmi.id = 1 bmi.height_m = 1.83 bmi.mass_kg = 67.57 bmi.patient_id = patient.id bmi.when_created = local(2020, 8, 1) self.dbsession.add(bmi) self.dbsession.commit() self.assertTrue(bmi.is_complete()) TaskIndexEntry.index_task( bmi, self.dbsession, indexed_at_utc=Pendulum.utcnow() ) self.dbsession.commit() proquint = server_patient.uuid_as_proquint # For type checker assert proquint is not None assert self.other_device.name is not None self.req.fake_request_post_from_dict( { TabletParam.CAMCOPS_VERSION: MINIMUM_TABLET_VERSION, TabletParam.DEVICE: self.other_device.name, TabletParam.OPERATION: Operations.GET_TASK_SCHEDULES, TabletParam.PATIENT_PROQUINT: proquint, } ) response = client_api(self.req) reply_dict = get_reply_dict_from_response(response) self.assertEqual( reply_dict[TabletParam.SUCCESS], SUCCESS_CODE, msg=reply_dict ) task_schedules = json.loads(reply_dict[TabletParam.TASK_SCHEDULES]) self.assertEqual(len(task_schedules), 2) s = task_schedules[0] self.assertEqual(s[TabletParam.TASK_SCHEDULE_NAME], "Test 1") schedule_items = s[TabletParam.TASK_SCHEDULE_ITEMS] self.assertEqual(len(schedule_items), 4) phq9_1_sched = schedule_items[0] self.assertEqual(phq9_1_sched[TabletParam.TABLE], "phq9") self.assertEqual( phq9_1_sched[TabletParam.SETTINGS], {"phq9_key": "phq9_value"} ) self.assertEqual( parse(phq9_1_sched[TabletParam.DUE_FROM]), local(2020, 7, 31) ) self.assertEqual( parse(phq9_1_sched[TabletParam.DUE_BY]), local(2020, 8, 7) ) self.assertFalse(phq9_1_sched[TabletParam.COMPLETE]) self.assertFalse(phq9_1_sched[TabletParam.ANONYMOUS]) bmi_sched = schedule_items[1] self.assertEqual(bmi_sched[TabletParam.TABLE], "bmi") self.assertEqual( bmi_sched[TabletParam.SETTINGS], {"bmi_key": "bmi_value"} ) self.assertEqual( parse(bmi_sched[TabletParam.DUE_FROM]), local(2020, 7, 31) ) self.assertEqual( parse(bmi_sched[TabletParam.DUE_BY]), local(2020, 8, 8) ) self.assertTrue(bmi_sched[TabletParam.COMPLETE]) self.assertFalse(bmi_sched[TabletParam.ANONYMOUS]) phq9_2_sched = schedule_items[2] self.assertEqual(phq9_2_sched[TabletParam.TABLE], "phq9") self.assertEqual( phq9_2_sched[TabletParam.SETTINGS], {"phq9_key": "phq9_value"} ) self.assertEqual( parse(phq9_2_sched[TabletParam.DUE_FROM]), local(2020, 8, 30) ) self.assertEqual( parse(phq9_2_sched[TabletParam.DUE_BY]), local(2020, 9, 6) ) self.assertFalse(phq9_2_sched[TabletParam.COMPLETE]) self.assertFalse(phq9_2_sched[TabletParam.ANONYMOUS]) # GMCPQ gmcpq_sched = schedule_items[3] self.assertTrue(gmcpq_sched[TabletParam.ANONYMOUS])
def __call__(self, environ: TYPE_WSGI_ENVIRON, start_response: TYPE_WSGI_START_RESPONSE) \ -> TYPE_WSGI_APP_RESULT: query_string = environ.get(WsgiEnvVar.QUERY_STRING, "") try: # https://stackoverflow.com/questions/7835030/obtaining-client-ip-address-from-a-wsgi-app-using-eventlet # noqa # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For # noqa forwarded_for = " [forwarded for {}]".format( environ[WsgiEnvVar.HTTP_X_FORWARDED_FOR]) except KeyError: forwarded_for = "" request_details = ( '{remote}{fwd}: "{method} {path}{qmark}{query} {proto}"'.format( remote=environ.get(WsgiEnvVar.REMOTE_ADDR, ""), fwd=forwarded_for, method=environ.get(WsgiEnvVar.REQUEST_METHOD, ""), path=environ.get(WsgiEnvVar.PATH_INFO, ""), qmark="?" if query_string else "", query=query_string, proto=environ.get(WsgiEnvVar.SERVER_PROTOCOL, ""), )) msg_parts = [] # type: List[str] if self.show_request_immediately: msg_parts.append("Request from") msg_parts.append(request_details) self.log(" ".join(msg_parts)) msg_parts.clear() captured_status = None # type: Optional[int] def custom_start_response(status: TYPE_WSGI_STATUS, headers: TYPE_WSGI_RESPONSE_HEADERS, exc_info: TYPE_WSGI_EXC_INFO = None) \ -> TYPE_WSGI_START_RESP_RESULT: nonlocal captured_status captured_status = status return start_response(status, headers, exc_info) # noinspection PyBroadException try: if self.show_timing: t1 = Pendulum.utcnow() result = self.app(environ, custom_start_response) return result except Exception: msg_parts.append("[RAISED EXCEPTION]") raise finally: if self.show_request_immediately: msg_parts.append("Response to") else: msg_parts.append("Request from") msg_parts.append(request_details) if self.show_timing: t2 = Pendulum.utcnow() if self.show_response: if captured_status is not None: msg_parts.append(f"-> {captured_status}") else: msg_parts.append("[no response status]") if self.show_timing: # noinspection PyUnboundLocalVariable time_taken_s = (t2 - t1).total_seconds() msg_parts.append(f"[{time_taken_s} s]") if msg_parts: self.log(" ".join(msg_parts))