def clear_threadlocal(): """ :return: :rtype: None """ clear_threadlocal()
def _cleanup_threadlocals(threadlocals): for key in list(threadlocals.__dict__.keys()): try: delattr(threadlocals, key) except AttributeError: pass # for the structlog logger: clear_threadlocal()
def test_merge_works_without_bind(self): """ merge_threadlocal returns values as normal even when there has been no previous calls to bind_threadlocal. """ clear_threadlocal() assert {"b": 2} == merge_threadlocal(None, None, {"b": 2})
def test_clear(self): """ The thread-local context can be cleared, causing any previously bound variables to not be included in merge_threadlocal's result. """ bind_threadlocal(a=1) clear_threadlocal() assert {"b": 2} == merge_threadlocal(None, None, {"b": 2})
def test_bind_and_merge(self): """ Binding a variable causes it to be included in the result of merge_threadlocal. """ clear_threadlocal() bind_threadlocal(a=1) assert {"a": 1, "b": 2} == merge_threadlocal(None, None, {"b": 2})
def test_multiple_binds(self): """ Multiple calls to bind_threadlocal accumulate values instead of replacing them. """ clear_threadlocal() bind_threadlocal(a=1, b=2) bind_threadlocal(c=3) assert { "a": 1, "b": 2, "c": 3 } == merge_threadlocal(None, None, {"b": 2})
def test_unbind_threadlocal(self): """ Test that unbinding from threadlocal works for keys that exist and does not raise error when they do not exist. """ clear_threadlocal() bind_threadlocal(a=234, b=34) assert {"a": 234, "b": 34} == merge_threadlocal_context(None, None, {}) unbind_threadlocal("a") assert {"b": 34} == merge_threadlocal_context(None, None, {}) unbind_threadlocal("non-existing-key") assert {"b": 34} == merge_threadlocal_context(None, None, {})
def _init_threadlocals(filename, settings, threadlocals): threadlocals.settings = settings local_types = find_local_types(filename) threadlocals.signatures = local_types.signatures threadlocals.import_strategist = ImportStrategist(local_types) threadlocals.strategy_to_names = {} # per-file counters threadlocals.docstring_count = 0 threadlocals.typed_docstring_count = 0 threadlocals.comment_count = 0 threadlocals.warning_count = 0 threadlocals.error_count = 0 # for the structlog logger (it manages its own threadlocals): clear_threadlocal() bind_threadlocal(filename=filename)
def __init__( self, fips, window_size=14, kernel_std=5, r_list=np.linspace(0, 10, 501), process_sigma=0.05, ref_date=datetime(year=2020, month=1, day=1), confidence_intervals=(0.68, 0.95), min_cases=5, min_deaths=5, include_testing_correction=True, ): np.random.seed(InferRtConstants.RNG_SEED) # Param Generation used for Xcor in align_time_series, has some stochastic FFT elements. self.fips = fips self.r_list = r_list self.window_size = window_size self.kernel_std = kernel_std self.process_sigma = process_sigma self.ref_date = ref_date self.confidence_intervals = confidence_intervals self.min_cases = min_cases self.min_deaths = min_deaths self.include_testing_correction = include_testing_correction if len(fips) == 2: # State FIPS are 2 digits self.agg_level = AggregationLevel.STATE self.state_obj = us.states.lookup(self.fips) self.state = self.state_obj.name ( self.times, self.observed_new_cases, self.observed_new_deaths, ) = load_data.load_new_case_data_by_state( self.state, self.ref_date, include_testing_correction=self.include_testing_correction, ) ( self.hospital_times, self.hospitalizations, self.hospitalization_data_type, ) = load_data.load_hospitalization_data_by_state( state=self.state_obj.abbr, t0=self.ref_date) self.display_name = self.state else: self.agg_level = AggregationLevel.COUNTY self.geo_metadata = (load_data.load_county_metadata().set_index( "fips").loc[fips].to_dict()) self.state = self.geo_metadata["state"] self.state_obj = us.states.lookup(self.state) self.county = self.geo_metadata["county"] if self.county: self.display_name = self.county + ", " + self.state else: self.display_name = self.state ( self.times, self.observed_new_cases, self.observed_new_deaths, ) = load_data.load_new_case_data_by_fips( self.fips, t0=self.ref_date, include_testing_correction=self.include_testing_correction, ) ( self.hospital_times, self.hospitalizations, self.hospitalization_data_type, ) = load_data.load_hospitalization_data(self.fips, t0=self.ref_date) clear_threadlocal() bind_threadlocal(Rt_Inference_Target=self.display_name) log.info("Running") self.case_dates = [ ref_date + timedelta(days=int(t)) for t in self.times ] if self.hospitalization_data_type: self.hospital_dates = [ ref_date + timedelta(days=int(t)) for t in self.hospital_times ] self.default_parameters = ParameterEnsembleGenerator( fips=self.fips, N_samples=500, t_list=np.linspace(0, 365, 366)).get_average_seir_parameters() # Serial period = Incubation + 0.5 * Infections self.serial_period = (1 / self.default_parameters["sigma"] + 0.5 * 1 / self.default_parameters["delta"]) # If we only receive current hospitalizations, we need to account for # the outflow to reconstruct new admissions. if (self.hospitalization_data_type is load_data.HospitalizationDataType.CURRENT_HOSPITALIZATIONS): los_general = self.default_parameters[ "hospitalization_length_of_stay_general"] los_icu = self.default_parameters[ "hospitalization_length_of_stay_icu"] hosp_rate_general = self.default_parameters[ "hospitalization_rate_general"] hosp_rate_icu = self.default_parameters["hospitalization_rate_icu"] icu_rate = hosp_rate_icu / hosp_rate_general flow_out_of_hosp = self.hospitalizations[:-1] * ( (1 - icu_rate) / los_general + icu_rate / los_icu) # We are attempting to reconstruct the cumulative hospitalizations. self.hospitalizations = np.diff( self.hospitalizations) + flow_out_of_hosp self.hospital_dates = self.hospital_dates[1:] self.hospital_times = self.hospital_times[1:] self.log_likelihood = None
def run(self): clear_threadlocal() bind_threadlocal(slot=self._slot, cell_id=self._cell_infoset.fetch('.id')) workflow_log = self._workflow_log log.info('launching workflow') workflow_log.append( dict(action='lvc recovery', event='start', ts=time.time())) lvc_outcome = low_voltage_recovery(self._sess, self._slot, self._queue) workflow_log.append( dict(action='lvc recovery', event='end', outcome=lvc_outcome, ts=time.time())) if lvc_outcome['ok']: # Take the results from the LVC (if there are any) workflow_log.main_event['results'].update( lvc_outcome.get('results', {})) workflow_log.append( dict(action='capacity measure', event='start', ts=time.time())) mcap_outcome = measure_capacity(self._sess, self._slot, self._queue) workflow_log.append( dict(action='capacity measure', event='end', outcome=mcap_outcome, ts=time.time())) if mcap_outcome['ok']: # Take the results from the capacity measurement (if there are any) workflow_log.main_event['results'].update( mcap_outcome.get('results', {})) else: log.warning('failed capacity measurement', outcome=mcap_outcome) self._cell_infoset.put('.props.tags.workflow_failure', True) self._cell_infoset.put( '.props.workflow_failure_outcome', dict(state_text=mcap_outcome['state_text'], status_text=mcap_outcome['status_text'])) status_text = mcap_outcome['status_text'] if status_text == StatusStrings.HOT_CHARGED or status_text == StatusStrings.HOT_DISCHARGED: self._cell_infoset.put('.props.tags.excessive_heat', True) else: log.warning('failed low voltage recovery attempt', outcome=lvc_outcome) self._cell_infoset.put('.props.tags.workflow_failure', True) self._cell_infoset.put( '.props.workflow_failure_outcome', dict(state_text=mcap_outcome['state_text'], status_text=mcap_outcome['status_text'])) self._cell_infoset.put('.props.tags.precharge_fail', True) log.info('workflow finished')