def result(self, test_id, status): """ Tester got a result, which should be STATUS_INFECTED or STATUS_HEALTHY """ test = self.tests[test_id] provider_daily = get_provider_daily(self.provider_id, test_id, test.get('pin')) provider_proof = get_id_proof( provider_daily ) # This is the replaces value Alice will have derived UpdateTokens from id_for_provider = get_next_id( provider_daily, 0) # This is the id that Alice will be watching for test['status'] = status length = 256 # How many points to account for - adjust this with experience test['seed'] = new_seed() update_tokens = [ get_update_token(get_replacement_token(test['seed'], n)) for n in range(length) ] message = "Please call 0412-345-6789 to speak to a contact tracer and quote {proof}" if status == STATUS_INFECTED else None return self.server.result(replaces=provider_proof, status=status, update_tokens=update_tokens, id=id_for_provider, message=message)
def _next_update_token(self): """ Find a unique update_token to use, based on the seed and length """ ut = get_update_token(get_replacement_token(self.seed, self.length)) self.length += 1 return ut
def status_data_points(self, data, args): seed = data.get('seed') ret = {} locations = [] contact_ids = [] consecutive_missed_updates = 0 i = 0 while consecutive_missed_updates < self.max_missing_updates: update_token = get_update_token(get_replacement_token(seed, i)) file_path = self.spatial_dict.update_index.get(update_token) if file_path: locations.append(file_path) consecutive_missed_updates = 0 else: file_path = self.contact_dict.update_index.get(update_token) if file_path: contact_ids.append(file_path) consecutive_missed_updates = 0 else: consecutive_missed_updates += 1 i += 1 # TODO-MITRA should use file-paths so dnt have to go back into data def get_location_id_data(): return list(self.spatial_dict.get_blob_from_file_paths(locations)) ret['locations'] = get_location_id_data def get_contact_id_data(): return list( self.contact_dict.get_blob_from_file_paths(contact_ids)) ret['contact_ids'] = get_contact_id_data return ret
def _update_or_result(self, length=0, floating_seconds_and_serial_number=(0, 0), update_tokens=None, max_missing_updates=None, replaces=None, status=None, message=None, **kwargs): """ max_missing_updates is the number of CONSECUTIVE missing data points to store updates to, i.e. once we see this big a gap we stop saving them (they slow down calculations significantly) """ floating_seconds, serial_number = floating_seconds_and_serial_number if max_missing_updates is None: max_missing_updates = length if not update_tokens: update_tokens = [] consecutive_missed_updates = 0 if length: for i in range(length): rt = get_replacement_token(replaces, i) ut = get_update_token(rt) updates = { 'replaces': rt, 'status': status, 'update_token': update_tokens[i], 'message': message, } # SEE-OTHER-ADD-FIELDS # If some of the update_tokens are not found, it might be a sync issue, # hold the update tokens till sync comes in new_floating_seconds_and_serial_number = (floating_seconds, serial_number) if not self._update(ut, updates, new_floating_seconds_and_serial_number): consecutive_missed_updates = 0 else: consecutive_missed_updates += 1 if consecutive_missed_updates <= max_missing_updates: logger.info( "Holding update tokens for later {update_token}:{updates}", update_token=ut, updates=str(updates)) self.unused_update_tokens.insert( ut, updates, new_floating_seconds_and_serial_number) serial_number += 1 return serial_number
def _status(self, endpoint_name, seed, contacts, locations, **kwargs): # contacts and locations should already have update_tokens if want that functionality # logger.info('before %s call' % endpoint_name) data = {} if seed and kwargs.get('replaces'): data['update_tokens'] = [ get_update_token(get_replacement_token(seed, i)) for i in range(kwargs.get('length')) ] if contacts: data['contact_ids'] = contacts if locations: data['locations'] = locations headers = {} current_time = kwargs.get('current_time') if current_time: headers['X-Testing-Time'] = str(current_time) data.update(kwargs) logger.info("Sending %s: %s" % (endpoint_name, str(data))) req = requests.post(self.url + endpoint_name, json=data, headers=headers) # logger.info('after %s call' % endpoint_name) return req
def poll(self): """ Perform a regular poll of the server with /status/scan, and process any results. """ json_data = self.server.scan_status_json( contact_prefixes=self._prefixes(), locations=[self._box()] if len(self.locations) else [], since=self.since) logging.info("%s: poll result: %s" % (self.name, str(json_data))) # Record when data is updated till, for our next request self.since = json_data.get('until') # Record any ids in the poll that match one we have used (id = {id, last_used}) # Note that this can include a test result which might be STATUS_HEALTHY matched_ids = [ i for i in json_data['contact_ids'] if i.get('id') in self.map_ids_used() ] for id_obj in matched_ids: id_obj['received_at'] = current_time() # Scan for a test result and flag the id we will record (via its 'test' field) so that it can effect score calculations if self.pending_test: if id_obj['id'] == self.pending_test['id']: id_obj[ 'test'] = self.pending_test # Save the test (includes test_id and pin) # Time of test -> old daily_ids (t-1day) -> ids used on those -> remove id_alerts; location_alerts dated < t-1day ignore_alerts_before = self.pending_test[ "time"] - self.on_test_ignore_before self.pending_test = None # Clear pending test # Clear out older alerts for alert_list in self.id_alerts, self.location_alerts: self.id_alerts = [ alert_obj for alert_obj in alert_list if alert_obj['received_at'] < ignore_alerts_before ] self.local_status = STATUS_HEALTHY # Note this is correct even if the test is INFECTED, as its the infected test that counts, and there is no LOCAL status event any more self.id_alerts.extend( matched_ids ) # Add the new ones after we have cleared out alerts no longer valid # Filter incoming location updates for those close to where we have been, # but exclude any of our own (based on matching update_token existing_location_update_tokens = [ loc.get('update_token') for loc in self.locations ] matching_locations = [ loc for loc in json_data.get('locations', []) if self._location_match(loc) and not loc.get('update_token') in existing_location_update_tokens ] for loc in matching_locations: loc['received_at'] = current_time() self.location_alerts.extend(matching_locations) # Look for any updated data points # Find the replaces tokens for both ids and locations - these are the locations this data point replaces # Note that by checking all id_alerts we also handle any received out of order (replace received before original) id_replaces = [ i.get('replaces') for i in self.id_alerts if i.get('replaces') ] location_replaces = [ loc.get('replaces') for loc in self.location_alerts if loc.get('replaces') ] # Find update_tokens that have been replaced id_update_tokens = [get_update_token(rt) for rt in id_replaces] location_update_tokens = [ get_update_token(rt) for rt in location_replaces ] # Mark any ids or locations that have been replaced for i in self.id_alerts: if i.get('update_token') in id_update_tokens: i['replaced'] = True for loc in self.location_alerts: if loc.get('update_token') in location_update_tokens: loc['replaced'] = True # Recalculate our own status based on the current set of location and id alerts and our local_status # Note that if it has changed it may trigger a notify_status which can cause a /status/send or /status/update self._recalculate_status()