def __init__(self, max_od: float, **kwargs) -> None: super(TrackOD, self).__init__(**kwargs) assert max_od is not None, "max_od should be set" self.max_od = max_od self.white_light = config.get( "leds", "white_light") # TODO: update to new led config.ini syntax self.set_led_intensity(self.white_light, 0)
def setup_pwm(self) -> PWM: hertz = 1 pin = PWM_TO_PIN[HEATER_PWM_TO_PIN] pin = PWM_TO_PIN[config.get("PWM_reverse", "heating")] pwm = PWM(pin, hertz) pwm.start(0) return pwm
def __init__(self, duty_cycle: float = 100.0, hz: float = 150.0, **kwargs) -> None: super(ContinuousCycle, self).__init__(**kwargs) pin = PWM_TO_PIN[config.get("PWM_reverse", "media")] self.pwm = PWM(pin, hz) self.duty_cycle = duty_cycle
def stop_ir_led(self): if self.fake_data: return ir_channel = config.get("leds", "ir_led") led_intensity(ir_channel, intensity=0, unit=self.unit, experiment=self.experiment)
def __init__( self, target_rpm: float, unit: str, experiment: str, rpm_calculator: Optional[RpmCalculator], hertz: float = 150, ) -> None: super(Stirrer, self).__init__(job_name="stirring", unit=unit, experiment=experiment) self.logger.debug(f"Starting stirring with initial {target_rpm} RPM.") self.rpm_calculator = rpm_calculator if not hardware.is_HAT_present(): self.logger.error("Pioreactor HAT must be present.") self.set_state(self.DISCONNECTED) raise exc.HardwareNotFoundError("Pioreactor HAT must be present.") if (self.rpm_calculator is not None) and not hardware.is_heating_pcb_present(): self.logger.error("Heating PCB must be present to measure RPM.") self.set_state(self.DISCONNECTED) raise exc.HardwareNotFoundError( "Heating PCB must be present to measure RPM.") pin = hardware.PWM_TO_PIN[config.get("PWM_reverse", "stirring")] self.pwm = PWM(pin, hertz) self.pwm.lock() self.rpm_to_dc_lookup = self.initialize_rpm_to_dc_lookup() self.target_rpm = target_rpm self.duty_cycle = self.rpm_to_dc_lookup(self.target_rpm) # set up PID self.pid = PID( Kp=config.getfloat("stirring.pid", "Kp"), Ki=config.getfloat("stirring.pid", "Ki"), Kd=config.getfloat("stirring.pid", "Kd"), setpoint=self.target_rpm, unit=self.unit, experiment=self.experiment, job_name=self.job_name, target_name="rpm", output_limits=(-20, 20), # avoid whiplashing ) # set up thread to periodically check the rpm self.rpm_check_repeated_thread = RepeatedTimer( 17, # 17 and 5 are coprime self.poll_and_update_dc, job_name=self.job_name, run_immediately=True, run_after=5, poll_for_seconds= 4, # technically should be a function of the RPM: lower RPM, longer to get sufficient data. )
def get_ir_channel_from_configuration(self) -> pt.LedChannel: try: return cast(pt.LedChannel, config.get("leds_reverse", IR_keyword)) except Exception: self.logger.error( """`leds` section must contain `IR` value. Ex: [leds] A=IR """ ) raise KeyError("`IR` value not found in section.")
def backup_database(output): """ This action will create a backup of the SQLite3 database into specified output. It then will try to scp the backup to any available worker Pioreactors as a futher backup. A cronjob is set up as well to run this action every 12 hours. """ import sqlite3 from sh import scp, ErrorReturnCode def progress(status, remaining, total): logger.debug(f"Copied {total-remaining} of {total} pages.") logger.debug(f"Starting backup of database to {output}") con = sqlite3.connect(config.get("storage", "database")) bck = sqlite3.connect(output) with bck: con.backup(bck, pages=-1, progress=progress) bck.close() con.close() logger.debug( f"Completed backup of database to {output}. Attempting distributed backup..." ) n_backups = 2 backups_complete = 0 available_workers = get_active_workers_in_inventory() while (backups_complete < n_backups) and (len(available_workers) > 0): backup_unit = available_workers.pop() if backup_unit == get_unit_name(): continue try: scp(output, f"{backup_unit}:{output}") except ErrorReturnCode: logger.debug( f"Unable to backup database to {backup_unit}. Is it online?", exc_info=True, ) logger.warning(f"Unable to backup database to {backup_unit}.") else: logger.debug(f"Backed up database to {backup_unit}:{output}.") backups_complete += 1 return
def start_ir_led(self): ir_channel = config.get("leds", "ir_led") r = led_intensity( ir_channel, intensity=100, source_of_event=self.job_name, unit=self.unit, experiment=self.experiment, ) if not r: raise ValueError( "IR LED could not be started. Stopping OD reading.") time.sleep(0.25) # give LED a moment to get to max value return
def __init__(self, **kwargs) -> None: super(FlashUV, self).__init__(**kwargs) self.uv_led = config.get("leds_reverse", "uv") self.set_led_intensity(self.uv_led, 0)
def backup_database(output_file: str) -> None: """ This action will create a backup of the SQLite3 database into specified output. It then will try to copy the backup to any available worker Pioreactors as a further backup. This job actually consumes _a lot_ of resources, and I've seen the LED output drop due to this running. See issue #81. For now, we will skip the backup if `od_reading` is running Elsewhere, a cronjob is set up as well to run this action every N days. """ import sqlite3 from sh import ErrorReturnCode, rsync # type: ignore unit = get_unit_name() experiment = UNIVERSAL_EXPERIMENT with publish_ready_to_disconnected_state(unit, experiment, "backup_database"): logger = create_logger("backup_database", experiment=experiment, unit=unit) if is_pio_job_running("od_reading"): logger.warning("Won't run if OD Reading is running. Exiting") return def progress(status: int, remaining: int, total: int) -> None: logger.debug(f"Copied {total-remaining} of {total} SQLite3 pages.") logger.debug(f"Writing to local backup {output_file}.") logger.debug(f"Starting backup of database to {output_file}") sleep( 1 ) # pause a second so the log entry above gets recorded into the DB. con = sqlite3.connect(config.get("storage", "database")) bck = sqlite3.connect(output_file) with bck: con.backup(bck, pages=-1, progress=progress) bck.close() con.close() with local_persistant_storage("database_backups") as cache: cache["latest_backup_timestamp"] = current_utc_time() logger.info("Completed backup of database.") n_backups = config.getint("number_of_backup_replicates_to_workers", fallback=2) backups_complete = 0 available_workers = list(get_active_workers_in_inventory()) while (backups_complete < n_backups) and (len(available_workers) > 0): backup_unit = available_workers.pop() if backup_unit == get_unit_name(): continue try: rsync( "-hz", "--partial", "--inplace", output_file, f"{backup_unit}:{output_file}", ) except ErrorReturnCode: logger.debug( f"Unable to backup database to {backup_unit}. Is it online?", exc_info=True, ) logger.warning( f"Unable to backup database to {backup_unit}. Is it online?" ) else: logger.debug( f"Backed up database to {backup_unit}:{output_file}.") backups_complete += 1 return
return ODReader( channel_angle_map, interval=sampling_rate, unit=unit, experiment=experiment, adc_reader=ADCReader( channels=channels, fake_data=fake_data, interval=sampling_rate ), ir_led_reference_tracker=ir_led_reference_tracker, ) @click.command(name="od_reading") @click.option( "--od-angle-channel1", default=config.get("od_config.photodiode_channel", "1", fallback=None), type=click.STRING, show_default=True, help="specify the angle(s) between the IR LED(s) and the PD in channel 1, separated by commas. Don't specify if channel is empty.", ) @click.option( "--od-angle-channel2", default=config.get("od_config.photodiode_channel", "2", fallback=None), type=click.STRING, show_default=True, help="specify the angle(s) between the IR LED(s) and the PD in channel 2, separated by commas. Don't specify if channel is empty.", ) @click.option("--fake-data", is_flag=True, help="produce fake data (for testing)") def click_od_reading( od_angle_channel1: pt.PdAngle, od_angle_channel2: pt.PdAngle, fake_data: bool ):
ODReader( channel_label_map, sampling_rate=sampling_rate, unit=unit, experiment=experiment, fake_data=fake_data, ) signal.pause() @click.command(name="od_reading") @click.option( "--od-angle-channel", multiple=True, default=config.get("od_config.photodiode_channel", "od_angle_channel").split("|"), type=click.STRING, show_default=True, help=""" pair of angle,channel for optical density reading. Can be invoked multiple times. Ex: --od-angle-channel 135,0 --od-angle-channel 90,1 --od-angle-channel 45,3 """, ) @click.option("--fake-data", is_flag=True, help="produce fake data (for testing)") def click_od_reading(od_angle_channel, fake_data): """ Start the optical density reading job
def __init__(self, **kwargs): super(FlashUV, self).__init__(**kwargs) self.uv_led = config.get("leds", "uv380") self.set_led_intensity(self.uv_led, 0)
def __init__(self, **kwargs): super(TrackOD, self).__init__(**kwargs) self.white_light = config.get("leds", "white_light") # set luminosity to 10% initially self.set_led_intensity(self.white_light, 0.1)
def pump( unit: str, experiment: str, pump_name: str, ml: Optional[float] = None, duration: Optional[float] = None, source_of_event: Optional[str] = None, calibration: Optional[dict] = None, continuously: bool = False, ): """ Parameters ------------ pump_name: one of "media", "alt_media", "waste" calibration: specify a calibration for the dosing. Should be a dict with fields "duration_", "hz", "dc", and "bias_" Returns ----------- Amount of volume passed (approximate in some cases) """ action_name = { "media": "add_media", "alt_media": "add_alt_media", "waste": "remove_waste", }[pump_name] logger = create_logger(action_name) with utils.publish_ready_to_disconnected_state( unit, experiment, action_name ) as exit_event: assert ( (ml is not None) or (duration is not None) or continuously ), "either ml or duration must be set" assert not ( (ml is not None) and (duration is not None) ), "Only select ml or duration" if calibration is None: with utils.local_persistant_storage("pump_calibration") as cache: try: calibration = loads(cache[f"{pump_name}_ml_calibration"]) except KeyError: logger.error("Calibration not defined. Run pump calibration first.") return 0.0 try: GPIO_PIN = PWM_TO_PIN[config.get("PWM_reverse", pump_name)] except NoOptionError: logger.error(f"Add `{pump_name}` to `PWM` section to config_{unit}.ini.") return 0.0 if ml is not None: assert ml >= 0, "ml should be greater than 0" duration = utils.pump_ml_to_duration( ml, calibration["duration_"], calibration["bias_"] ) logger.info(f"{round(ml, 2)}mL") elif duration is not None: ml = utils.pump_duration_to_ml( duration, calibration["duration_"], calibration["bias_"] ) logger.info(f"{round(duration, 2)}s") elif continuously: duration = 600 ml = utils.pump_duration_to_ml( duration, calibration["duration_"], calibration["bias_"] ) logger.info("Running pump continuously.") assert isinstance(ml, (float, int)) assert isinstance(duration, (float, int)) assert duration >= 0, "duration should be greater than 0" if duration == 0: return 0.0 # publish this first, as downstream jobs need to know about it. json_output = dumps( { "volume_change": ml, "event": action_name, "source_of_event": source_of_event, "timestamp": current_utc_time(), } ) publish( f"pioreactor/{unit}/{experiment}/dosing_events", json_output, qos=QOS.EXACTLY_ONCE, ) try: pwm = PWM(GPIO_PIN, calibration["hz"]) pwm.lock() with catchtime() as delta_time: pwm.start(calibration["dc"]) pump_start_time = time.time() exit_event.wait(max(0, duration - delta_time())) if continuously: while not exit_event.wait(duration): publish( f"pioreactor/{unit}/{experiment}/dosing_events", json_output, qos=QOS.EXACTLY_ONCE, ) except SystemExit: # a SigInt, SigKill occurred pass except Exception as e: # some other unexpected error logger.debug(e, exc_info=True) logger.error(e) finally: pwm.stop() pwm.cleanup() if continuously: logger.info(f"Stopping {pump_name} pump.") if exit_event.is_set(): # ended early for some reason shortened_duration = time.time() - pump_start_time ml = utils.pump_duration_to_ml( shortened_duration, calibration["duration_"], calibration["bias_"] ) return ml