class AltMediaCalculator(BackgroundSubJob): """ Computes the fraction of the vial that is from the alt-media vs the regular media. We periodically publish this, too, so the UI graph looks better. """ def __init__(self, unit=None, experiment=None, **kwargs) -> None: super(AltMediaCalculator, self).__init__(job_name=JOB_NAME, unit=unit, experiment=experiment) self.latest_alt_media_fraction = self.get_initial_alt_media_fraction() # publish often to fill in gaps in UI chart. self.publish_periodically_thead = RepeatedTimer( 5 * 60, self.publish_latest_alt_media_fraction, job_name=self.job_name) self.publish_periodically_thead.start() self.start_passive_listeners() def on_disconnect(self): self.publish_periodically_thead.cancel() def on_dosing_event(self, message): payload = json.loads(message.payload) volume, event = float(payload["volume_change"]), payload["event"] if event == "add_media": self.update_alt_media_fraction(volume, 0) elif event == "add_alt_media": self.update_alt_media_fraction(0, volume) elif event == "remove_waste": pass else: raise ValueError("Unknown event type") def publish_latest_alt_media_fraction(self): self.publish( f"pioreactor/{self.unit}/{self.experiment}/{JOB_NAME}/alt_media_fraction", self.latest_alt_media_fraction, retain=True, qos=QOS.EXACTLY_ONCE, ) def update_alt_media_fraction(self, media_delta, alt_media_delta): total_delta = media_delta + alt_media_delta # current mL alt_media_ml = VIAL_VOLUME * self.latest_alt_media_fraction media_ml = VIAL_VOLUME * (1 - self.latest_alt_media_fraction) # remove alt_media_ml = alt_media_ml * (1 - total_delta / VIAL_VOLUME) media_ml = media_ml * (1 - total_delta / VIAL_VOLUME) # add (alt) media alt_media_ml = alt_media_ml + alt_media_delta media_ml = media_ml + media_delta self.latest_alt_media_fraction = alt_media_ml / VIAL_VOLUME self.publish_latest_alt_media_fraction() return self.latest_alt_media_fraction def get_initial_alt_media_fraction(self): message = subscribe( f"pioreactor/{self.unit}/{self.experiment}/{self.job_name}/alt_media_fraction", timeout=2, ) if message: return float(message.payload) else: return 0 def start_passive_listeners(self) -> None: self.subscribe_and_callback( self.on_dosing_event, f"pioreactor/{self.unit}/{self.experiment}/dosing_events", qos=QOS.EXACTLY_ONCE, )
class Stirrer(BackgroundJob): """ Parameters ------------ target_rpm: float Send message to "pioreactor/{unit}/{experiment}/stirring/target_rpm/set" to change the stirring speed. rpm_calculator: RpmCalculator See RpmCalculator and examples below. Notes ------- The create a feedback loop between the duty-cycle level and the RPM, we set up a polling algorithm. We set up an edge detector on the hall sensor pin, and count the number of pulses in N seconds. We convert this count to RPM, and then use a PID system to update the amount of duty cycle to apply. We perform the above every N seconds. That is, there is PID controller that checks every N seconds and nudges the duty cycle to match the requested RPM. Examples --------- > st = Stirrer(500, unit, experiment) > st.start_stirring() """ published_settings = { "target_rpm": { "datatype": "json", "settable": True, "unit": "RPM" }, "measured_rpm": { "datatype": "json", "settable": False, "unit": "RPM" }, "duty_cycle": { "datatype": "float", "settable": True, "unit": "%" }, } _previous_duty_cycle: float = 0 duty_cycle: float = config.getint( "stirring", "initial_duty_cycle", fallback=60.0) # only used if calibration isn't defined. _measured_rpm: Optional[float] = None def __init__( self, target_rpm: float, unit: str, experiment: str, rpm_calculator: Optional[RpmCalculator], hertz: float = 150, ) -> None: super(Stirrer, self).__init__(job_name="stirring", unit=unit, experiment=experiment) self.logger.debug(f"Starting stirring with initial {target_rpm} RPM.") self.rpm_calculator = rpm_calculator if not hardware.is_HAT_present(): self.logger.error("Pioreactor HAT must be present.") self.set_state(self.DISCONNECTED) raise exc.HardwareNotFoundError("Pioreactor HAT must be present.") if (self.rpm_calculator is not None) and not hardware.is_heating_pcb_present(): self.logger.error("Heating PCB must be present to measure RPM.") self.set_state(self.DISCONNECTED) raise exc.HardwareNotFoundError( "Heating PCB must be present to measure RPM.") pin = hardware.PWM_TO_PIN[config.get("PWM_reverse", "stirring")] self.pwm = PWM(pin, hertz) self.pwm.lock() self.rpm_to_dc_lookup = self.initialize_rpm_to_dc_lookup() self.target_rpm = target_rpm self.duty_cycle = self.rpm_to_dc_lookup(self.target_rpm) # set up PID self.pid = PID( Kp=config.getfloat("stirring.pid", "Kp"), Ki=config.getfloat("stirring.pid", "Ki"), Kd=config.getfloat("stirring.pid", "Kd"), setpoint=self.target_rpm, unit=self.unit, experiment=self.experiment, job_name=self.job_name, target_name="rpm", output_limits=(-20, 20), # avoid whiplashing ) # set up thread to periodically check the rpm self.rpm_check_repeated_thread = RepeatedTimer( 17, # 17 and 5 are coprime self.poll_and_update_dc, job_name=self.job_name, run_immediately=True, run_after=5, poll_for_seconds= 4, # technically should be a function of the RPM: lower RPM, longer to get sufficient data. ) def initialize_rpm_to_dc_lookup(self) -> Callable: if self.rpm_calculator is None: # if we can't track RPM, no point in adjusting DC return lambda rpm: self.duty_cycle with local_persistant_storage("stirring_calibration") as cache: if "linear_v1" in cache: parameters = json.loads(cache["linear_v1"]) coef = parameters["rpm_coef"] intercept = parameters["intercept"] # we scale this by 90% to make sure the PID + prediction doesn't overshoot, # better to be conservative here. # equivalent to a weighted average: 0.1 * current + 0.9 * predicted return lambda rpm: self.duty_cycle - 0.90 * ( self.duty_cycle - (coef * rpm + intercept)) else: return lambda rpm: self.duty_cycle def on_disconnected(self) -> None: with suppress(AttributeError): self.rpm_check_repeated_thread.cancel() with suppress(AttributeError): self.stop_stirring() self.pwm.cleanup() with suppress(AttributeError): if self.rpm_calculator: self.rpm_calculator.cleanup() def start_stirring(self) -> None: self.pwm.start(100) # get momentum to start sleep(0.25) self.set_duty_cycle(self.duty_cycle) sleep(0.75) self.rpm_check_repeated_thread.start() # .start is idempotent def poll(self, poll_for_seconds: float) -> Optional[float]: """ Returns an RPM, or None if not measuring RPM. """ if self.rpm_calculator is None: return None recent_rpm = self.rpm_calculator(poll_for_seconds) if recent_rpm == 0: # TODO: attempt to restart stirring self.publish( f"pioreactor/{self.unit}/{self.experiment}/monitor/flicker_led_with_error_code", error_codes.STIRRING_FAILED_ERROR_CODE, ) self.logger.warning("Stirring RPM is 0 - has it failed?") if self._measured_rpm is not None: # use a simple EMA, alpha chosen arbitrarily, but should be a function of delta time. self._measured_rpm = 0.025 * self._measured_rpm + 0.975 * recent_rpm else: self._measured_rpm = recent_rpm self.measured_rpm = { "timestamp": current_utc_time(), "rpm": self._measured_rpm } return self._measured_rpm def poll_and_update_dc(self, poll_for_seconds: float) -> None: self.poll(poll_for_seconds) if self._measured_rpm is None: return result = self.pid.update(self._measured_rpm, dt=1) self.set_duty_cycle(self.duty_cycle + result) def stop_stirring(self) -> None: # if the user unpauses, we want to go back to their previous value, and not the default. self.set_duty_cycle(0) def on_ready_to_sleeping(self) -> None: self.rpm_check_repeated_thread.pause() self.stop_stirring() def on_sleeping_to_ready(self) -> None: self.duty_cycle = self._previous_duty_cycle self.rpm_check_repeated_thread.unpause() self.start_stirring() def set_duty_cycle(self, value: float) -> None: self._previous_duty_cycle = self.duty_cycle self.duty_cycle = clamp(0, round(float(value), 5), 100) self.pwm.change_duty_cycle(self.duty_cycle) def set_target_rpm(self, value: float) -> None: self.target_rpm = float(value) self.set_duty_cycle(self.rpm_to_dc_lookup(self.target_rpm)) self.pid.set_setpoint(self.target_rpm) def block_until_rpm_is_close_to_target(self, abs_tolerance: float = 15) -> None: """ This function blocks until the stirring is "close enough" to the target RPM. """ if self.rpm_calculator is None: # can't block if we aren't recording the RPM return while (self._measured_rpm is not None ) and abs(self._measured_rpm - self.target_rpm) > abs_tolerance: sleep(0.25)
class TemperatureController(BackgroundJob): """ This job publishes to pioreactor/<unit>/<experiment>/temperature_control/temperature the following: { "temperature": <float>, "timestamp": <ISO 8601 timestamp> } If you have your own thermo-couple, you can publish to this topic, with the same schema and all should just work™️. You'll need to provide your own feedback loops however. Parameters ------------ eval_and_publish_immediately: bool, default True evaluate and publish the temperature once the class is created (in the background) TODO: do I need this still? """ MAX_TEMP_TO_REDUCE_HEATING = 60.0 # ~PLA glass transition temp MAX_TEMP_TO_DISABLE_HEATING = 62.0 MAX_TEMP_TO_SHUTDOWN = 64.0 automations = {} # type: ignore published_settings = { "automation": { "datatype": "json", "settable": True }, "automation_name": { "datatype": "string", "settable": False }, "temperature": { "datatype": "json", "settable": False, "unit": "℃" }, "heater_duty_cycle": { "datatype": "float", "settable": False, "unit": "%" }, } temperature: Optional[dict[str, Any]] = None def __init__( self, automation_name: str, unit: str, experiment: str, eval_and_publish_immediately: bool = True, **kwargs, ) -> None: super().__init__(job_name="temperature_control", unit=unit, experiment=experiment) if not is_HAT_present(): self.logger.error("Pioreactor HAT must be present.") self.set_state(self.DISCONNECTED) raise exc.HardwareNotFoundError("Pioreactor HAT must be present.") if not is_heating_pcb_present(): self.logger.error("Heating PCB must be attached to Pioreactor HAT") self.set_state(self.DISCONNECTED) raise exc.HardwareNotFoundError( "Heating PCB must be attached to Pioreactor HAT") if is_testing_env(): self.logger.debug("TMP1075 not available; using MockTMP1075") from pioreactor.utils.mock import MockTMP1075 as TMP1075 else: from TMP1075 import TMP1075 # type: ignore self.pwm = self.setup_pwm() self.update_heater(0) self.tmp_driver = TMP1075() self.read_external_temperature_timer = RepeatedTimer( 45, self.read_external_temperature, run_immediately=False) self.read_external_temperature_timer.start() self.publish_temperature_timer = RepeatedTimer( 4 * 60, self.evaluate_and_publish_temperature, run_immediately=eval_and_publish_immediately, run_after=60, ) self.publish_temperature_timer.start() self.automation = AutomationDict(automation_name=automation_name, **kwargs) try: automation_class = self.automations[ self.automation["automation_name"]] except KeyError: raise KeyError( f"Unable to find automation {self.automation['automation_name']}. Available automations are {list(self.automations.keys())}" ) self.logger.info(f"Starting {self.automation}.") try: self.automation_job = automation_class(unit=self.unit, experiment=self.experiment, parent=self, **kwargs) except Exception as e: self.logger.error(e) self.logger.debug(e, exc_info=True) self.set_state(self.DISCONNECTED) raise e self.automation_name = self.automation["automation_name"] self.temperature = { "temperature": self.read_external_temperature(), "timestamp": current_utc_time(), } def turn_off_heater(self) -> None: self._update_heater(0) self.pwm.stop() self.pwm.cleanup() # we re-instantiate it as some other process may have messed with the channel. self.pwm = self.setup_pwm() self._update_heater(0) self.pwm.stop() def update_heater(self, new_duty_cycle: float) -> bool: """ Update heater's duty cycle. This function checks for the PWM lock, and will not update if the PWM is locked. Returns true if the update was made (eg: no lock), else returns false """ if not self.pwm.is_locked(): self._update_heater(new_duty_cycle) return True else: return False def update_heater_with_delta(self, delta_duty_cycle: float) -> bool: """ Update heater's duty cycle by `delta_duty_cycle` amount. This function checks for the PWM lock, and will not update if the PWM is locked. Returns true if the update was made (eg: no lock), else returns false """ return self.update_heater(self.heater_duty_cycle + delta_duty_cycle) def read_external_temperature(self) -> float: """ Read the current temperature from our sensor, in Celsius """ try: # check temp is fast, let's do it twice to reduce variance. pcb_temp = 0.5 * (self.tmp_driver.get_temperature() + self.tmp_driver.get_temperature()) except OSError: # could not find temp driver on i2c self.logger.error( "Is the Heating PCB attached to the Pioreactor HAT? Unable to find I²C for temperature driver." ) raise exc.HardwareNotFoundError( "Is the Heating PCB attached to the Pioreactor HAT? Unable to find I²C for temperature driver." ) self._check_if_exceeds_max_temp(pcb_temp) return pcb_temp ##### internal and private methods ######## def set_automation(self, new_temperature_automation_json) -> None: # TODO: this needs a better rollback. Ex: in except, something like # self.automation_job.set_state("init") # self.automation_job.set_state("ready") # OR should just bail... algo_metadata = AutomationDict( **loads(new_temperature_automation_json)) try: self.automation_job.set_state("disconnected") except AttributeError: # sometimes the user will change the job too fast before the dosing job is created, let's protect against that. sleep(1) self.set_automation(new_temperature_automation_json) # reset heater back to 0. self._update_heater(0) try: self.logger.info(f"Starting {algo_metadata}.") self.automation_job = self.automations[ algo_metadata["automation_name"]](unit=self.unit, experiment=self.experiment, parent=self, **algo_metadata) self.automation = algo_metadata self.automation_name = algo_metadata["automation_name"] except KeyError: self.logger.debug( f"Unable to find automation {algo_metadata['automation_name']}. Available automations are {list(self.automations.keys())}", exc_info=True, ) self.logger.warning( f"Unable to find automation {algo_metadata['automation_name']}. Available automations are {list(self.automations.keys())}" ) except Exception as e: self.logger.debug(f"Change failed because of {str(e)}", exc_info=True) self.logger.warning(f"Change failed because of {str(e)}") def _update_heater(self, new_duty_cycle: float) -> None: self.heater_duty_cycle = round(float(new_duty_cycle), 5) self.pwm.change_duty_cycle(self.heater_duty_cycle) def _check_if_exceeds_max_temp(self, temp: float) -> None: if temp > self.MAX_TEMP_TO_SHUTDOWN: self.logger.error( f"Temperature of heating surface has exceeded {self.MAX_TEMP_TO_SHUTDOWN}℃ - currently {temp} ℃. This is beyond our recommendations. Shutting down Raspberry Pi to prevent further problems. Take caution when touching the heating surface and wetware." ) from subprocess import call call("sudo shutdown --poweroff", shell=True) elif temp > self.MAX_TEMP_TO_DISABLE_HEATING: self.publish( f"pioreactor/{self.unit}/{self.experiment}/monitor/flicker_led_with_error_code", error_codes.PCB_TEMPERATURE_TOO_HIGH, ) self.logger.warning( f"Temperature of heating surface has exceeded {self.MAX_TEMP_TO_DISABLE_HEATING}℃ - currently {temp} ℃. This is beyond our recommendations. The heating PWM channel will be forced to 0. Take caution when touching the heating surface and wetware." ) self._update_heater(0) elif temp > self.MAX_TEMP_TO_REDUCE_HEATING: self.publish( f"pioreactor/{self.unit}/{self.experiment}/monitor/flicker_led_with_error_code", error_codes.PCB_TEMPERATURE_TOO_HIGH, ) self.logger.debug( f"Temperature of heating surface has exceeded {self.MAX_TEMP_TO_REDUCE_HEATING}℃ - currently {temp} ℃. This is close to our maximum recommended value. The heating PWM channel will be reduced to 90% its current value. Take caution when touching the heating surface and wetware." ) self._update_heater(self.heater_duty_cycle * 0.9) def on_sleeping(self) -> None: self.automation_job.set_state(self.SLEEPING) def on_sleeping_to_ready(self) -> None: self.automation_job.set_state(self.READY) def on_disconnected(self) -> None: try: self.automation_job.set_state(self.DISCONNECTED) except AttributeError: # if disconnect is called right after starting, temperature_automation_job isn't instantiated pass try: self.read_external_temperature_timer.cancel() self.publish_temperature_timer.cancel() except AttributeError: pass try: self._update_heater(0) self.pwm.stop() self.pwm.cleanup() except AttributeError: pass def setup_pwm(self) -> PWM: hertz = 1 pin = PWM_TO_PIN[HEATER_PWM_TO_PIN] pin = PWM_TO_PIN[config.get("PWM_reverse", "heating")] pwm = PWM(pin, hertz) pwm.start(0) return pwm def evaluate_and_publish_temperature(self) -> None: """ 1. lock PWM and turn off heater 2. start recording temperatures from the sensor 3. After collected M samples, pass to a model to approx temp 4. assign temp to publish to ../temperature 5. return heater to previous DC value and unlock heater """ assert not self.pwm.is_locked( ), "PWM is locked - it shouldn't be though!" with self.pwm.lock_temporarily(): previous_heater_dc = self.heater_duty_cycle self._update_heater(0) # we pause heating for (N_sample_points * time_between_samples) seconds N_sample_points = 30 time_between_samples = 5 features = {} features["prev_temp"] = (self.temperature["temperature"] if self.temperature else None) features["previous_heater_dc"] = previous_heater_dc time_series_of_temp = [] for i in range(N_sample_points): time_series_of_temp.append(self.read_external_temperature()) sleep(time_between_samples) if self.state != self.READY: # if our state changes in this loop, exit. return features["time_series_of_temp"] = time_series_of_temp self.logger.debug(features) # update heater first, before publishing the temperature. Why? A downstream process # might listen for the updating temperature, and update the heater (pid_stable), # and if we update here too late, we may overwrite their changes. # We also want to remove the lock first, so close this context early. self._update_heater(previous_heater_dc) try: approximated_temperature = self.approximate_temperature(features) except Exception as e: self.logger.debug(e, exc_info=True) self.logger.error(e) self.temperature = { "temperature": approximated_temperature, "timestamp": current_utc_time(), } def approximate_temperature(self, features: dict[str, Any]) -> float: """ models temp = b * exp(p * t) + c * exp(q * t) + ROOM_TEMP Reference ------------- https://www.scribd.com/doc/14674814/Regressions-et-equations-integrales page 71 - 72 It's possible that we can determine if the vial is in using the heat loss coefficient. Quick look: when the vial is in, heat coefficient is ~ -0.008, when not in, coefficient is ~ -0.028. """ if features["previous_heater_dc"] == 0: return features["time_series_of_temp"][-1] import numpy as np from numpy import exp ROOM_TEMP = 10.0 # ?? times_series = features["time_series_of_temp"] n = len(times_series) y = np.array(times_series) - ROOM_TEMP x = np.arange(n) # scaled by factor of 1/10 seconds S = np.zeros(n) SS = np.zeros(n) for i in range(1, n): S[i] = S[i - 1] + 0.5 * (y[i - 1] + y[i]) * (x[i] - x[i - 1]) SS[i] = SS[i - 1] + 0.5 * (S[i - 1] + S[i]) * (x[i] - x[i - 1]) # first regression M1 = np.array([ [(SS**2).sum(), (SS * S).sum(), (SS * x).sum(), (SS).sum()], [(SS * S).sum(), (S**2).sum(), (S * x).sum(), (S).sum()], [(SS * x).sum(), (S * x).sum(), (x**2).sum(), (x).sum()], [(SS).sum(), (S).sum(), (x).sum(), n], ]) Y1 = np.array([(y * SS).sum(), (y * S).sum(), (y * x).sum(), y.sum()]) try: A, B, _, _ = np.linalg.solve(M1, Y1) except np.linalg.LinAlgError: self.logger.error("Error in first regression.") self.logger.debug(f"x={x}") self.logger.debug(f"y={y}") return features["prev_temp"] if (B**2 + 4 * A) < 0: # something when wrong in the data collection - the data doesn't look enough like a sum of two expos self.logger.error(f"Error in regression: {(B ** 2 + 4 * A)=} < 0") self.logger.debug(f"x={x}") self.logger.debug(f"y={y}") return features["prev_temp"] p = 0.5 * (B + np.sqrt(B**2 + 4 * A)) q = 0.5 * (B - np.sqrt(B**2 + 4 * A)) # second regression M2 = np.array([ [exp(2 * p * x).sum(), exp((p + q) * x).sum()], [exp((q + p) * x).sum(), exp(2 * q * x).sum()], ]) Y2 = np.array([(y * exp(p * x)).sum(), (y * exp(q * x)).sum()]) try: b, c = np.linalg.solve(M2, Y2) except np.linalg.LinAlgError: self.logger.error("Error in second regression") self.logger.debug(f"x={x}") self.logger.debug(f"y={y}") return features["prev_temp"] if abs(p) < abs(q): # since the regression can have identifiable problems, we use # our domain knowledge to choose the pair that has the lower heat transfer coefficient. alpha, beta = b, p else: alpha, beta = c, q self.logger.debug(f"{b=}, {c=}, {p=} , {q=}") temp_at_start_of_obs = ROOM_TEMP + alpha * exp(beta * 0) temp_at_end_of_obs = ROOM_TEMP + alpha * exp(beta * n) # the recent estimate weighted because I trust the predicted temperature at the start of observation more # than the predicted temperature at the end. return 2 / 3 * temp_at_start_of_obs + 1 / 3 * temp_at_end_of_obs