def latest_growth_rate(self) -> float: # check if None if self._latest_growth_rate is None: # this should really only happen on the initialization. self.logger.debug("Waiting for OD and growth rate data to arrive") if not is_pio_job_running("od_reading", "growth_rate_calculating"): raise exc.JobRequiredError( "`od_reading` and `growth_rate_calculating` should be running." ) # check most stale time if (time.time() - self.most_stale_time) > 5 * 60: raise exc.JobRequiredError( "readings are too stale (over 5 minutes old) - are `od_reading` and `growth_rate_calculating` running?" ) return cast(float, self._latest_growth_rate)
def get_precomputed_values(self) -> tuple: if self.ignore_cache: if not is_pio_job_running("od_reading"): self.logger.error("OD reading should be running. Stopping.") raise exc.JobRequiredError("OD reading should be running. Stopping.") self.logger.info( "Computing OD normalization metrics. This may take a few minutes" ) od_normalization_factors, od_variances = od_normalization( unit=self.unit, experiment=self.experiment ) self.logger.info("Completed OD normalization metrics.") initial_growth_rate = 0.0 initial_od = 1.0 else: od_normalization_factors = self.get_od_normalization_from_cache() od_variances = self.get_od_variances_from_cache() initial_growth_rate = self.get_growth_rate_from_cache() initial_od = self.get_previous_od_from_cache() initial_acc = 0 od_blank = self.get_od_blank_from_cache() # what happens if od_blank is near / less than od_normalization_factors? # this means that the inoculant had near 0 impact on the turbidity => very dilute. # I think we should not use od_blank if so for channel in od_normalization_factors.keys(): if od_normalization_factors[channel] * 0.95 < od_blank[channel]: self.logger.debug( "Resetting od_blank because it is too close to current observations." ) od_blank[channel] = od_normalization_factors[channel] * 0.95 return ( initial_growth_rate, initial_od, od_normalization_factors, od_variances, od_blank, initial_acc, )
def od_normalization( unit: str, experiment: str, n_samples: int = 35 ) -> tuple[dict[PdChannel, float], dict[PdChannel, float]]: from statistics import mean, variance action_name = "od_normalization" logger = create_logger(action_name) logger.debug("Starting OD normalization.") with publish_ready_to_disconnected_state(unit, experiment, action_name): if (not (is_pio_job_running("od_reading")) # but if test mode, ignore and not is_testing_env()): logger.error( " OD Reading should be running. Run OD Reading first. Exiting." ) raise exc.JobRequiredError( "OD Reading should be running. Run OD Reading first. Exiting.") # TODO: write tests for this def yield_from_mqtt() -> Generator[dict, None, None]: while True: msg = pubsub.subscribe( f"pioreactor/{unit}/{experiment}/od_reading/od_raw_batched", allow_retained=False, ) if msg is None: continue yield json.loads(msg.payload) signal = yield_from_mqtt() readings = defaultdict(list) for count, batched_reading in enumerate(signal, start=1): for (sensor, reading) in batched_reading["od_raw"].items(): readings[sensor].append(reading["voltage"]) pubsub.publish( f"pioreactor/{unit}/{experiment}/{action_name}/percent_progress", count // n_samples * 100, ) logger.debug(f"Progress: {count/n_samples:.0%}") if count == n_samples: break variances = {} means = {} autocorrelations = {} # lag 1 for sensor, od_reading_series in readings.items(): variances[sensor] = variance( residuals_of_simple_linear_regression(list( range(n_samples)), od_reading_series)) # see issue #206 means[sensor] = mean(od_reading_series) autocorrelations[sensor] = correlation(od_reading_series[:-1], od_reading_series[1:]) with local_persistant_storage("od_normalization_mean") as cache: cache[experiment] = json.dumps(means) with local_persistant_storage("od_normalization_variance") as cache: cache[experiment] = json.dumps(variances) logger.debug(f"measured mean: {means}") logger.debug(f"measured variances: {variances}") logger.debug(f"measured autocorrelations: {autocorrelations}") logger.debug("OD normalization finished.") if config.getboolean( "data_sharing_with_pioreactor", "send_od_statistics_to_Pioreactor", fallback=False, ): add_on = { "ir_intensity": config["od_config"]["ir_intensity"], } pubsub.publish_to_pioreactor_cloud( "od_normalization_variance", json={ **variances, **add_on, }, # TODO: this syntax changed in a recent python version... ) pubsub.publish_to_pioreactor_cloud( "od_normalization_mean", json={ **means, **add_on }, ) return means, variances