Example #1
0
 def __setattr__(self, name, value) -> None:
     super(LEDAutomation, self).__setattr__(name, value)
     if name in self.published_settings and name != "state":
         self._latest_settings_ended_at = current_utc_time()
         self._send_details_to_mqtt()
         self._latest_settings_started_at = current_utc_time()
         self._latest_settings_ended_at = None
Example #2
0
 def __setattr__(self, name, value) -> None:
     super(DosingAutomation, self).__setattr__(name, value)
     if name in self.published_settings and name not in [
             "state",
             "alt_media_fraction",
             "media_throughput",
             "alt_media_throughput",
     ]:
         self._latest_settings_ended_at = current_utc_time()
         self._send_details_to_mqtt()
         self._latest_settings_started_at = current_utc_time()
         self._latest_settings_ended_at = None
Example #3
0
    def poll(self, poll_for_seconds: float) -> Optional[float]:
        """
        Returns an RPM, or None if not measuring RPM.
        """
        if self.rpm_calculator is None:
            return None

        recent_rpm = self.rpm_calculator(poll_for_seconds)
        if recent_rpm == 0:
            # TODO: attempt to restart stirring
            self.publish(
                f"pioreactor/{self.unit}/{self.experiment}/monitor/flicker_led_with_error_code",
                error_codes.STIRRING_FAILED_ERROR_CODE,
            )
            self.logger.warning("Stirring RPM is 0 - has it failed?")

        if self._measured_rpm is not None:
            # use a simple EMA, alpha chosen arbitrarily, but should be a function of delta time.
            self._measured_rpm = 0.025 * self._measured_rpm + 0.975 * recent_rpm
        else:
            self._measured_rpm = recent_rpm

        self.measured_rpm = {
            "timestamp": current_utc_time(),
            "rpm": self._measured_rpm
        }
        return self._measured_rpm
Example #4
0
    def record_and_publish_from_adc(self) -> None:

        if self.first_od_obs_time is None:
            self.first_od_obs_time = time()

        pre_duration = 0.01  # turn on LED prior to taking snapshot and wait

        # we put a soft lock on the LED channels - it's up to the
        # other jobs to make sure they check the locks.
        with change_leds_intensities_temporarily(
            ALL_LED_CHANNELS,
            [0.0, 0.0, 0.0, 0.0],
            unit=self.unit,
            experiment=self.experiment,
            source_of_event=self.job_name,
            pubsub_client=self.pub_client,
            verbose=False,
        ):
            with lock_leds_temporarily(self.non_ir_led_channels):

                self.start_ir_led()
                sleep(pre_duration)

                timestamp_of_readings = current_utc_time()
                batched_readings = self.adc_reader.take_reading()

        self.latest_reading = batched_readings
        self.ir_led_reference_tracker.update(batched_readings)

        self.publish_single(batched_readings, timestamp_of_readings)
        self.publish_batch(batched_readings, timestamp_of_readings)
Example #5
0
def publish_to_pioreactor_cloud(endpoint: str, data=None, json=None):
    """
    Parameters
    ------------
    endpoint: the function to send to the data to
    data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body.
    json: (optional) json data to send in the body.

    """
    from pioreactor.mureq import post
    from pioreactor.whoami import get_uuid, is_testing_env
    from pioreactor.utils.timing import current_utc_time

    if is_testing_env():
        return

    if json is not None:
        json["rpi_uuid"] = get_uuid()
        json["timestamp"] = current_utc_time()

    headers = {"Content-type": "application/json", "Accept": "text/plain"}
    try:
        post(
            f"https://cloud.pioreactor.com/{endpoint}",
            data=data,
            json=json,
            headers=headers,
        )
    except Exception:
        pass
Example #6
0
    def on_disconnected(self) -> None:
        self._latest_settings_ended_at = current_utc_time()
        self._send_details_to_mqtt()

        with suppress(AttributeError):
            self.run_thread.join()

        for channel in self.edited_channels:
            led_intensity(channel, 0, unit=self.unit, experiment=self.experiment)
 def parse_kalman_filter_outputs(topic, payload) -> dict:
     metadata, _ = m2db.produce_metadata(topic)
     payload = json.loads(payload)
     return {
         "experiment": metadata.experiment,
         "pioreactor_unit": metadata.pioreactor_unit,
         "timestamp": current_utc_time(),
         "state": json.dumps(payload["state"]),
         "covariance_matrix": json.dumps(payload["covariance_matrix"]),
     }
Example #8
0
    def evaluate_and_publish_temperature(self) -> None:
        """
        1. lock PWM and turn off heater
        2. start recording temperatures from the sensor
        3. After collected M samples, pass to a model to approx temp
        4. assign temp to publish to ../temperature
        5. return heater to previous DC value and unlock heater
        """
        assert not self.pwm.is_locked(
        ), "PWM is locked - it shouldn't be though!"
        with self.pwm.lock_temporarily():

            previous_heater_dc = self.heater_duty_cycle
            self._update_heater(0)

            # we pause heating for (N_sample_points * time_between_samples) seconds
            N_sample_points = 30
            time_between_samples = 5

            features = {}
            features["prev_temp"] = (self.temperature["temperature"]
                                     if self.temperature else None)
            features["previous_heater_dc"] = previous_heater_dc

            time_series_of_temp = []
            for i in range(N_sample_points):
                time_series_of_temp.append(self.read_external_temperature())
                sleep(time_between_samples)

                if self.state != self.READY:
                    # if our state changes in this loop, exit.
                    return

            features["time_series_of_temp"] = time_series_of_temp

            self.logger.debug(features)

            # update heater first, before publishing the temperature. Why? A downstream process
            # might listen for the updating temperature, and update the heater (pid_stable),
            # and if we update here too late, we may overwrite their changes.
            # We also want to remove the lock first, so close this context early.
            self._update_heater(previous_heater_dc)

        try:
            approximated_temperature = self.approximate_temperature(features)
        except Exception as e:
            self.logger.debug(e, exc_info=True)
            self.logger.error(e)

        self.temperature = {
            "temperature": approximated_temperature,
            "timestamp": current_utc_time(),
        }
Example #9
0
    def json_record(self, message: str, extra: dict, record: logging.LogRecord) -> dict:
        extra["message"] = message

        # Include builtins
        extra["level"] = record.levelname
        extra["task"] = record.name
        extra["timestamp"] = current_utc_time()
        extra["rpi_uuid"] = get_uuid()

        if record.exc_info:
            extra["message"] += "\n" + self.formatException(record.exc_info)

        return extra
Example #10
0
def save_config_files_to_db(units: tuple[str, ...], shared: bool,
                            specific: bool) -> None:
    import sqlite3

    conn = sqlite3.connect(config["storage"]["database"])
    cur = conn.cursor()

    timestamp = current_utc_time()
    sql = "INSERT INTO config_files(timestamp,filename,data) VALUES(?,?,?)"

    if specific:
        for unit in units:
            with open(f"/home/pi/.pioreactor/config_{unit}.ini") as f:
                cur.execute(sql, (timestamp, f"config_{unit}.ini", f.read()))

    if shared:
        with open("/home/pi/.pioreactor/config.ini") as f:
            cur.execute(sql, (timestamp, "config.ini", f.read()))

    conn.commit()
    conn.close()
Example #11
0
def backup_database(output_file: str) -> None:
    """
    This action will create a backup of the SQLite3 database into specified output. It then
    will try to copy the backup to any available worker Pioreactors as a further backup.

    This job actually consumes _a lot_ of resources, and I've seen the LED output
    drop due to this running. See issue #81. For now, we will skip the backup if `od_reading` is running

    Elsewhere, a cronjob is set up as well to run this action every N days.
    """

    import sqlite3
    from sh import ErrorReturnCode, rsync  # type: ignore

    unit = get_unit_name()
    experiment = UNIVERSAL_EXPERIMENT

    with publish_ready_to_disconnected_state(unit, experiment,
                                             "backup_database"):

        logger = create_logger("backup_database",
                               experiment=experiment,
                               unit=unit)

        if is_pio_job_running("od_reading"):
            logger.warning("Won't run if OD Reading is running. Exiting")
            return

        def progress(status: int, remaining: int, total: int) -> None:
            logger.debug(f"Copied {total-remaining} of {total} SQLite3 pages.")
            logger.debug(f"Writing to local backup {output_file}.")

        logger.debug(f"Starting backup of database to {output_file}")
        sleep(
            1
        )  # pause a second so the log entry above gets recorded into the DB.

        con = sqlite3.connect(config.get("storage", "database"))
        bck = sqlite3.connect(output_file)

        with bck:
            con.backup(bck, pages=-1, progress=progress)

        bck.close()
        con.close()

        with local_persistant_storage("database_backups") as cache:
            cache["latest_backup_timestamp"] = current_utc_time()

        logger.info("Completed backup of database.")

        n_backups = config.getint("number_of_backup_replicates_to_workers",
                                  fallback=2)
        backups_complete = 0
        available_workers = list(get_active_workers_in_inventory())

        while (backups_complete < n_backups) and (len(available_workers) > 0):
            backup_unit = available_workers.pop()
            if backup_unit == get_unit_name():
                continue

            try:
                rsync(
                    "-hz",
                    "--partial",
                    "--inplace",
                    output_file,
                    f"{backup_unit}:{output_file}",
                )
            except ErrorReturnCode:
                logger.debug(
                    f"Unable to backup database to {backup_unit}. Is it online?",
                    exc_info=True,
                )
                logger.warning(
                    f"Unable to backup database to {backup_unit}. Is it online?"
                )
            else:
                logger.debug(
                    f"Backed up database to {backup_unit}:{output_file}.")
                backups_complete += 1

        return
Example #12
0
def stirring_calibration(min_dc: int, max_dc: int) -> None:

    unit = get_unit_name()
    experiment = get_latest_testing_experiment_name()
    action_name = "stirring_calibration"
    logger = create_logger(action_name)

    with publish_ready_to_disconnected_state(unit, experiment, action_name):

        logger.info("Starting stirring calibration.")

        if is_pio_job_running("stirring"):
            logger.error(
                "Make sure Stirring job is off before running stirring calibration. Exiting."
            )
            return

        measured_rpms = []

        # go up and down to observe any hystersis.
        dcs = (list(range(max_dc, min_dc, -3)) +
               list(range(min_dc, max_dc, 4)) +
               list(range(max_dc, min_dc, -5)))

        with stirring.RpmFromFrequency() as rpm_calc, stirring.Stirrer(
                target_rpm=0,
                unit=unit,
                experiment=experiment,
                rpm_calculator=None,
        ) as st:

            st.duty_cycle = dcs[0]
            st.start_stirring()
            time.sleep(8)
            n_samples = len(dcs)

            for count, dc in enumerate(dcs, start=1):
                st.set_duty_cycle(dc)
                time.sleep(8)
                rpm = rpm_calc(4)
                measured_rpms.append(rpm)
                logger.debug(f"Detected {rpm=} RPM @ {dc=}%")

                # log progress
                publish(
                    f"pioreactor/{unit}/{experiment}/{action_name}/percent_progress",
                    count / n_samples * 100,
                )
                logger.debug(f"Progress: {count/n_samples:.0%}")

        publish_to_pioreactor_cloud(action_name,
                                    json=dict(zip(dcs, measured_rpms)))
        logger.debug(list(zip(dcs, measured_rpms)))

        # drop any 0 in RPM, too little DC
        try:
            filtered_dcs, filtered_measured_rpms = zip(
                *filter(lambda d: d[1] > 0, zip(dcs, measured_rpms)))
        except ValueError:
            # the above can fail if all measured rpms are 0
            logger.error("No RPMs were measured. Is the stirring spinning?")
            return

        # since in practice, we want a look up from RPM -> required DC, we
        # set x=measure_rpms, y=dcs
        (rpm_coef, rpm_coef_std), (intercept,
                                   intercept_std) = simple_linear_regression(
                                       filtered_measured_rpms, filtered_dcs)
        logger.debug(
            f"{rpm_coef=}, {rpm_coef_std=}, {intercept=}, {intercept_std=}")

        if rpm_coef <= 0:
            logger.warning(
                "Something went wrong - detected negative correlation between RPM and stirring."
            )
            return

        if intercept <= 0:
            logger.warning(
                "Something went wrong - the intercept should be greater than 0."
            )
            return

        with local_persistant_storage(action_name) as cache:
            cache["linear_v1"] = json.dumps({
                "rpm_coef": rpm_coef,
                "intercept": intercept,
                "timestamp": current_utc_time(),
            })
            cache["stirring_calibration_data"] = json.dumps({
                "timestamp":
                current_utc_time(),
                "data": {
                    "dcs": dcs,
                    "measured_rpms": measured_rpms
                },
            })
Example #13
0
class TemperatureAutomation(BackgroundSubJob):
    """
    This is the super class that Temperature automations inherit from.
    The `execute` function, which is what subclasses will define, is updated every time a new temperature is recorded to MQTT.
    Temperatures are updated every 10 minutes.

    To change setting over MQTT:

    `pioreactor/<unit>/<experiment>/temperature_automation/<setting>/set` value

    """

    _latest_growth_rate: Optional[float] = None
    _latest_od: Optional[float] = None
    previous_od: Optional[float] = None
    previous_growth_rate: Optional[float] = None
    latest_od_timestamp: float = 0
    latest_growth_rate_timestamp: float = 0

    latest_temperature = None
    previous_temperature = None

    _latest_settings_started_at = current_utc_time()
    _latest_settings_ended_at = None
    automation_name = "temperature_automation_base"  # is overwritten in subclasses

    def __init_subclass__(cls, **kwargs):
        super().__init_subclass__(**kwargs)

        # this registers all subclasses of TemperatureAutomation back to TemperatureController, so the subclass
        # can be invoked in TemperatureController.
        if hasattr(cls, "automation_name") and cls.automation_name is not None:
            TemperatureController.automations[cls.automation_name] = cls

    def __init__(self, unit: str, experiment: str,
                 parent: TemperatureController, **kwargs) -> None:
        super(TemperatureAutomation,
              self).__init__(job_name="temperature_automation",
                             unit=unit,
                             experiment=experiment)

        self.temperature_control_parent = parent

        self.start_passive_listeners()

    def update_heater(self, new_duty_cycle: float) -> bool:
        """
        Update heater's duty cycle. This function checks for a lock on the PWM, and will not
        update if the PWM is locked.

        Returns true if the update was made (eg: no lock), else returns false
        """
        return self.temperature_control_parent.update_heater(new_duty_cycle)

    def is_heater_pwm_locked(self) -> bool:
        return self.temperature_control_parent.pwm.is_locked()

    def update_heater_with_delta(self, delta_duty_cycle: float) -> bool:
        """
        Update heater's duty cycle by value `delta_duty_cycle`. This function checks for a lock on the PWM, and will not
        update if the PWM is locked.

        Returns true if the update was made (eg: no lock), else returns false
        """
        return self.temperature_control_parent.update_heater_with_delta(
            delta_duty_cycle)

    def execute(self):
        raise NotImplementedError

    @property
    def most_stale_time(self) -> float:
        return min(self.latest_od_timestamp, self.latest_growth_rate_timestamp)

    @property
    def latest_growth_rate(self) -> float:
        # check if None
        if self._latest_growth_rate is None:
            # this should really only happen on the initialization.
            self.logger.debug("Waiting for OD and growth rate data to arrive")
            if not is_pio_job_running("od_reading", "growth_rate_calculating"):
                raise exc.JobRequiredError(
                    "`od_reading` and `growth_rate_calculating` should be running."
                )

        # check most stale time
        if (time.time() - self.most_stale_time) > 5 * 60:
            raise exc.JobRequiredError(
                "readings are too stale (over 5 minutes old) - are `od_reading` and `growth_rate_calculating` running?"
            )

        return cast(float, self._latest_growth_rate)

    @property
    def latest_od(self) -> float:
        # check if None
        if self._latest_od is None:
            # this should really only happen on the initialization.
            self.logger.debug("Waiting for OD and growth rate data to arrive")
            if not is_pio_job_running("od_reading", "growth_rate_calculating"):
                raise exc.JobRequiredError(
                    "`od_reading` and `growth_rate_calculating` should be running."
                )

        # check most stale time
        if (time.time() - self.most_stale_time) > 5 * 60:
            raise exc.JobRequiredError(
                "readings are too stale (over 5 minutes old) - are `od_reading` and `growth_rate_calculating` running?"
            )

        return cast(float, self._latest_od)

    ########## Private & internal methods

    def on_disconnected(self) -> None:
        self._latest_settings_ended_at = current_utc_time()
        self._send_details_to_mqtt()

    def __setattr__(self, name, value) -> None:
        super(TemperatureAutomation, self).__setattr__(name, value)
        if name in self.published_settings and name != "state":
            self._latest_settings_ended_at = current_utc_time()
            self._send_details_to_mqtt()
            self._latest_settings_started_at, self._latest_settings_ended_at = (
                current_utc_time(),
                None,
            )

    def _set_growth_rate(self, message) -> None:
        if not message.payload:
            return

        self.previous_growth_rate = self._latest_growth_rate
        self._latest_growth_rate = float(
            json.loads(message.payload)["growth_rate"])

    def _set_temperature(self, message) -> None:
        if not message.payload:
            return

        self.previous_temperature = self.latest_temperature
        self.latest_temperature = float(
            json.loads(message.payload)["temperature"])

        if self.state == self.READY or self.state == self.INIT:
            self.execute()

    def _set_OD(self, message) -> None:

        self.previous_od = self._latest_od
        self._latest_od = float(json.loads(message.payload)["od_filtered"])
        self.latest_od_timestamp = time.time()

    def _send_details_to_mqtt(self) -> None:
        self.publish(
            f"pioreactor/{self.unit}/{self.experiment}/{self.job_name}/temperature_automation_settings",
            json.dumps({
                "pioreactor_unit":
                self.unit,
                "experiment":
                self.experiment,
                "started_at":
                self._latest_settings_started_at,
                "ended_at":
                self._latest_settings_ended_at,
                "automation":
                self.automation_name,
                "settings":
                json.dumps({
                    attr: getattr(self, attr, None)
                    for attr in self.published_settings if attr != "state"
                }),
            }),
            qos=QOS.EXACTLY_ONCE,
        )

    def start_passive_listeners(self) -> None:
        self.subscribe_and_callback(
            self._set_growth_rate,
            f"pioreactor/{self.unit}/{self.experiment}/growth_rate_calculating/growth_rate",
            allow_retained=False,
        )

        self.subscribe_and_callback(
            self._set_temperature,
            f"pioreactor/{self.unit}/{self.experiment}/temperature_control/temperature",
            allow_retained=False,  # only use fresh data from Temp Control.
        )

        self.subscribe_and_callback(
            self._set_OD,
            f"pioreactor/{self.unit}/{self.experiment}/growth_rate_calculating/od_filtered",
            allow_retained=False,
        )
Example #14
0
 def on_disconnected(self) -> None:
     self._latest_settings_ended_at = current_utc_time()
     self._send_details_to_mqtt()
Example #15
0
def od_blank(
    od_angle_channel1,
    od_angle_channel2,
    n_samples: int = 30,
):
    """
    Compute the sample average of the photodiodes attached.

    Note that because of the sensitivity of the growth rate (and normalized OD) to the starting values,
    we need a very accurate estimate of these statistics.

    """
    from statistics import mean, variance

    action_name = "od_blank"
    logger = create_logger(action_name)
    unit = get_unit_name()
    experiment = get_latest_experiment_name()
    testing_experiment = get_latest_testing_experiment_name()
    logger.info(
        "Starting reading of blank OD. This will take about a few minutes.")

    with publish_ready_to_disconnected_state(unit, experiment, action_name):

        # running this will mess with OD Reading - best to just not let it happen.
        if (is_pio_job_running("od_reading")
                # but if test mode, ignore
                and not is_testing_env()):
            logger.error(
                "od_reading should not be running. Stop od_reading first. Exiting."
            )
            return

        # turn on stirring if not already on
        if not is_pio_job_running("stirring"):
            # start stirring
            st = start_stirring(
                target_rpm=config.getint("stirring", "target_rpm"),
                unit=unit,
                experiment=testing_experiment,
            )
            st.block_until_rpm_is_close_to_target()
        else:
            # TODO: it could be paused, we should make sure it's running
            ...

        sampling_rate = 1 / config.getfloat("od_config", "samples_per_second")

        # start od_reading
        start_od_reading(
            od_angle_channel1,
            od_angle_channel2,
            sampling_rate=sampling_rate,
            unit=unit,
            experiment=testing_experiment,
            fake_data=is_testing_env(),
        )

        def yield_from_mqtt():
            while True:
                msg = pubsub.subscribe(
                    f"pioreactor/{unit}/{testing_experiment}/od_reading/od_raw_batched"
                )
                yield json.loads(msg.payload)

        signal = yield_from_mqtt()
        readings = defaultdict(list)

        for count, batched_reading in enumerate(signal, start=1):
            for (channel, reading) in batched_reading["od_raw"].items():
                readings[channel].append(reading["voltage"])

            pubsub.publish(
                f"pioreactor/{unit}/{experiment}/{action_name}/percent_progress",
                count // n_samples * 100,
            )
            logger.debug(f"Progress: {count/n_samples:.0%}")
            if count == n_samples:
                break

        means = {}
        variances = {}
        autocorrelations = {}  # lag 1

        for channel, od_reading_series in readings.items():
            # measure the mean and publish. The mean will be used to normalize the readings in downstream jobs
            means[channel] = mean(od_reading_series)
            variances[channel] = variance(od_reading_series)
            autocorrelations[channel] = correlation(od_reading_series[:-1],
                                                    od_reading_series[1:])

            # warn users that a blank is 0 - maybe this should be an error instead? TODO: link this to a docs page.
            if means[channel] == 0.0:
                logger.warning(
                    f"OD reading for PD Channel {channel} is 0.0 - that shouldn't be. Is there a loose connection, or an extra channel in the configuration's [od_config.photodiode_channel] section?"
                )

            pubsub.publish(
                f"pioreactor/{unit}/{experiment}/od_blank/{channel}",
                json.dumps({
                    "timestamp": current_utc_time(),
                    "od_reading_v": means[channel]
                }),
            )

        # store locally as the source of truth.
        with local_persistant_storage(action_name) as cache:
            cache[experiment] = json.dumps(means)

        # publish to UI and database
        pubsub.publish(
            f"pioreactor/{unit}/{experiment}/{action_name}/mean",
            json.dumps(means),
            qos=pubsub.QOS.AT_LEAST_ONCE,
            retain=True,
        )

        if config.getboolean(
                "data_sharing_with_pioreactor",
                "send_od_statistics_to_Pioreactor",
                fallback=False,
        ):
            to_share = {"mean": means, "variance": variances}
            to_share["ir_intensity"] = config["od_config"]["ir_intensity"]
            to_share["od_angle_channel1"] = od_angle_channel1
            to_share["od_angle_channel2"] = od_angle_channel2
            pubsub.publish_to_pioreactor_cloud("od_blank_mean", json=to_share)

        logger.debug(f"measured mean: {means}")
        logger.debug(f"measured variances: {variances}")
        logger.debug(f"measured autocorrelations: {autocorrelations}")
        logger.debug("OD normalization finished.")

        return means
Example #16
0
def pump_calibration(min_duration: float, max_duration: float) -> None:

    unit = get_unit_name()
    experiment = get_latest_experiment_name()

    logger = create_logger("pump_calibration",
                           unit=unit,
                           experiment=experiment)
    logger.info("Starting pump calibration.")

    with publish_ready_to_disconnected_state(unit, experiment,
                                             "pump_calibration"):

        click.clear()
        click.echo()
        pump_name, execute_pump = which_pump_are_you_calibrating()

        hz, dc = choose_settings()

        setup(pump_name, execute_pump, hz, dc)
        durations, volumes = run_tests(execute_pump, hz, dc, min_duration,
                                       max_duration)

        (slope, std_slope), (
            bias,
            std_bias,
        ) = simple_linear_regression_with_forced_nil_intercept(
            durations, volumes)

        # check parameters for problems
        if slope < 0:
            logger.warning(
                "Slope is negative - you probably want to rerun this calibration..."
            )
        if slope / std_slope < 5.0:
            logger.warning(
                "Too much uncertainty in slope - you probably want to rerun this calibration..."
            )

        # save to cache
        with local_persistant_storage("pump_calibration") as cache:
            cache[f"{pump_name}_ml_calibration"] = json.dumps({
                "duration_":
                slope,
                "hz":
                hz,
                "dc":
                dc,
                "bias_":
                bias,
                "timestamp":
                current_utc_time(),
            })
            cache[f"{pump_name}_calibration_data"] = json.dumps({
                "timestamp":
                current_utc_time(),
                "data": {
                    "durations": durations,
                    "volumes": volumes
                },
            })

        logger.debug(
            f"slope={slope:0.2f} ± {std_slope:0.2f}, bias={bias:0.2f} ± {std_bias:0.2f}"
        )

        logger.debug(
            f"Calibration is best for volumes between {(slope * min_duration + bias):0.1f}mL to {(slope * max_duration + bias):0.1f}mL, but will be okay for slightly outside this range too."
        )
        logger.info("Finished pump calibration.")
Example #17
0
def led_intensity(
    channels: LedChannel | list[LedChannel],
    intensities: float | list[float],
    unit: str,
    experiment: str,
    verbose: bool = True,
    source_of_event: Optional[str] = None,
    pubsub_client: Optional[Client] = None,
) -> bool:
    """

    Parameters
    ------------
    channel: an LED channel or list
    intensity: float or list
        a value between 0 and 100 to set the LED channel to.
    verbose: bool
        if True, log the change, and send event to led_event table & mqtt. This is FALSE
        in od_reading job, so as to not create spam.
    pubsub_client:
        provide a MQTT paho client to use for publishing.

    Returns
    --------
    bool representing if the all LED channels intensity were successfully changed


    State is also updated in

    pioreactor/<unit>/<experiment>/led/<channel>/intensity   <intensity>

    and

    pioreactor/<unit>/<experiment>/leds/intensity    {'A': intensityA, 'B': intensityB, ...}

    """
    logger = create_logger("led_intensity", experiment=experiment, unit=unit)
    updated_successfully = True
    if not is_testing_env():
        from DAC43608 import DAC43608
    else:
        logger.debug("DAC43608 not available; using MockDAC43608")
        from pioreactor.utils.mock import MockDAC43608 as DAC43608  # type: ignore

    if pubsub_client is None:
        pubsub_client = create_client()

    channels, intensities = _list(channels), _list(intensities)

    if len(channels) != len(intensities):
        raise ValueError("channels must be the same length as intensities")

    # any locked channels?
    for channel in channels:
        if is_led_channel_locked(channel):
            updated_successfully = False
            logger.warning(
                f"Unable to update channel {channel} due to a lock on it. Please try again."
            )

    # remove locked channels:
    try:
        channels, intensities = zip(  # type: ignore
            *[
                (c, i)
                for c, i in zip(channels, intensities)
                if not is_led_channel_locked(c)
            ]
        )
    except ValueError:
        # if the only channel being updated is locked, the resulting error is a ValueError: not enough values to unpack (expected 2, got 0)
        return updated_successfully

    for channel, intensity in zip(channels, intensities):
        try:
            assert (
                0.0 <= intensity <= 100.0
            ), "intensity should be between 0 and 100, inclusive"
            assert (
                channel in ALL_LED_CHANNELS
            ), f"saw incorrect channel {channel}, not in {ALL_LED_CHANNELS}"
            intensity = float(intensity)

            dac = DAC43608()
            dac.power_up(getattr(dac, channel))
            dac.set_intensity_to(getattr(dac, channel), intensity / 100.0)

            if intensity == 0:
                # setting to 0 doesn't fully remove the current, there is some residual current. We turn off
                # the channel to guarantee no output.
                dac.power_down(getattr(dac, channel))

            pubsub_client.publish(
                f"pioreactor/{unit}/{experiment}/led/{channel}/intensity",
                intensity,
                qos=QOS.AT_MOST_ONCE,
                retain=True,
            )

        except ValueError as e:
            logger.debug(e, exc_info=True)
            logger.error(
                "Unable to find I²C for LED driver. Is the Pioreactor HAT attached to the Raspberry Pi? Is I²C enabled on the Raspberry Pi?"
            )
            updated_successfully = False
            return updated_successfully

    new_state, old_state = _update_current_state(channels, intensities)

    pubsub_client.publish(
        f"pioreactor/{unit}/{experiment}/leds/intensity",
        dumps(new_state),
        qos=QOS.AT_MOST_ONCE,
        retain=True,
    )

    if verbose:
        for channel, intensity in zip(channels, intensities):
            event = {
                "channel": channel,
                "intensity": intensity,
                "source_of_event": source_of_event,
                "timestamp": current_utc_time(),
            }

            pubsub_client.publish(
                f"pioreactor/{unit}/{experiment}/led_events",
                dumps(event),
                qos=QOS.AT_MOST_ONCE,
                retain=False,
            )

            logger.info(
                f"Updated LED {channel} from {old_state[channel]:0.3g}% to {new_state[channel]:0.3g}%."
            )

    return updated_successfully
Example #18
0
def pump(
    unit: str,
    experiment: str,
    pump_name: str,
    ml: Optional[float] = None,
    duration: Optional[float] = None,
    source_of_event: Optional[str] = None,
    calibration: Optional[dict] = None,
    continuously: bool = False,
):

    """

    Parameters
    ------------
    pump_name: one of "media", "alt_media", "waste"
    calibration:
        specify a calibration for the dosing. Should be a dict
        with fields "duration_", "hz", "dc", and "bias_"

    Returns
    -----------
    Amount of volume passed (approximate in some cases)


    """
    action_name = {
        "media": "add_media",
        "alt_media": "add_alt_media",
        "waste": "remove_waste",
    }[pump_name]
    logger = create_logger(action_name)
    with utils.publish_ready_to_disconnected_state(
        unit, experiment, action_name
    ) as exit_event:
        assert (
            (ml is not None) or (duration is not None) or continuously
        ), "either ml or duration must be set"
        assert not (
            (ml is not None) and (duration is not None)
        ), "Only select ml or duration"

        if calibration is None:
            with utils.local_persistant_storage("pump_calibration") as cache:
                try:
                    calibration = loads(cache[f"{pump_name}_ml_calibration"])
                except KeyError:
                    logger.error("Calibration not defined. Run pump calibration first.")
                    return 0.0

        try:
            GPIO_PIN = PWM_TO_PIN[config.get("PWM_reverse", pump_name)]
        except NoOptionError:
            logger.error(f"Add `{pump_name}` to `PWM` section to config_{unit}.ini.")
            return 0.0

        if ml is not None:
            assert ml >= 0, "ml should be greater than 0"
            duration = utils.pump_ml_to_duration(
                ml, calibration["duration_"], calibration["bias_"]
            )
            logger.info(f"{round(ml, 2)}mL")
        elif duration is not None:
            ml = utils.pump_duration_to_ml(
                duration, calibration["duration_"], calibration["bias_"]
            )
            logger.info(f"{round(duration, 2)}s")
        elif continuously:
            duration = 600
            ml = utils.pump_duration_to_ml(
                duration, calibration["duration_"], calibration["bias_"]
            )
            logger.info("Running pump continuously.")

        assert isinstance(ml, (float, int))
        assert isinstance(duration, (float, int))
        assert duration >= 0, "duration should be greater than 0"
        if duration == 0:
            return 0.0

        # publish this first, as downstream jobs need to know about it.
        json_output = dumps(
            {
                "volume_change": ml,
                "event": action_name,
                "source_of_event": source_of_event,
                "timestamp": current_utc_time(),
            }
        )
        publish(
            f"pioreactor/{unit}/{experiment}/dosing_events",
            json_output,
            qos=QOS.EXACTLY_ONCE,
        )

        try:

            pwm = PWM(GPIO_PIN, calibration["hz"])
            pwm.lock()

            with catchtime() as delta_time:
                pwm.start(calibration["dc"])
                pump_start_time = time.time()

            exit_event.wait(max(0, duration - delta_time()))

            if continuously:
                while not exit_event.wait(duration):
                    publish(
                        f"pioreactor/{unit}/{experiment}/dosing_events",
                        json_output,
                        qos=QOS.EXACTLY_ONCE,
                    )

        except SystemExit:
            # a SigInt, SigKill occurred
            pass
        except Exception as e:
            # some other unexpected error
            logger.debug(e, exc_info=True)
            logger.error(e)

        finally:
            pwm.stop()
            pwm.cleanup()
            if continuously:
                logger.info(f"Stopping {pump_name} pump.")

            if exit_event.is_set():
                # ended early for some reason
                shortened_duration = time.time() - pump_start_time
                ml = utils.pump_duration_to_ml(
                    shortened_duration, calibration["duration_"], calibration["bias_"]
                )
        return ml
Example #19
0
class DosingAutomation(BackgroundSubJob):
    """
    This is the super class that automations inherit from. The `run` function will
    execute every `duration` minutes (selected at the start of the program). If `duration` is left
    as None, manually call `run`. This calls the `execute` function, which is what subclasses will define.

    To change setting over MQTT:

    `pioreactor/<unit>/<experiment>/dosing_automation/<setting>/set` value

    """

    automation_name = "dosing_automation_base"  # is overwritten in subclasses
    published_settings: dict[str, PublishableSetting] = {}

    _latest_growth_rate: Optional[float] = None
    _latest_od: Optional[float] = None
    previous_od: Optional[float] = None
    previous_growth_rate: Optional[float] = None

    latest_event: Optional[events.Event] = None
    _latest_settings_started_at: str = current_utc_time()
    _latest_settings_ended_at: Optional[str] = None
    _latest_run_at: Optional[float] = None
    run_thread: RepeatedTimer | Thread
    duration: float | None

    # overwrite to use your own dosing programs.
    # interface must look like types.DosingProgram
    add_media_to_bioreactor: DosingProgram = partial(add_media,
                                                     duration=None,
                                                     calibration=None,
                                                     continuously=False)
    remove_waste_from_bioreactor: DosingProgram = partial(remove_waste,
                                                          duration=None,
                                                          calibration=None)
    add_alt_media_to_bioreactor: DosingProgram = partial(add_alt_media,
                                                         duration=None,
                                                         calibration=None)

    # dosing metrics that are available, and published to MQTT
    alt_media_fraction: float = (
        0  # fraction of the vial that is alt-media (vs regular media).
    )
    media_throughput: float = 0  # amount of media that has been expelled
    alt_media_throughput: float = 0  # amount of alt-media that has been expelled

    # next two are seconds-since-unix-epoch
    latest_od_at: float = 0
    latest_growth_rate_at: float = 0

    def __init_subclass__(cls, **kwargs):
        super().__init_subclass__(**kwargs)

        # this registers all subclasses of DosingAutomation back to DosingController, so the subclass
        # can be invoked in DosingController.
        if hasattr(cls, "automation_name"):
            DosingController.automations[cls.automation_name] = cls

    def __init__(
        self,
        unit: str,
        experiment: str,
        duration: Optional[float] = None,
        skip_first_run: bool = False,
        **kwargs,
    ) -> None:
        super(DosingAutomation, self).__init__(job_name="dosing_automation",
                                               unit=unit,
                                               experiment=experiment)
        self.skip_first_run = skip_first_run

        self._alt_media_fraction_calculator = self._init_alt_media_fraction_calculator(
        )
        self._volume_throughput_calculator = self._init_volume_throughput_calculator(
        )

        # we republish metadata to broker since we edited it above - this is techdebt
        self.publish_settings_to_broker()

        self.set_duration(duration)

        self.start_passive_listeners()

    def set_duration(self, duration: Optional[float]) -> None:
        if duration:
            self.duration = float(duration)

            with suppress(AttributeError):
                self.run_thread.cancel()  # type: ignore

            if self._latest_run_at:
                # what's the correct logic when changing from duration N and duration M?
                # - N=20, and it's been 5m since the last run (or initialization). I change to M=30, I should wait M-5 minutes.
                # - N=60, and it's been 50m since last run. I change to M=30, I should run immediately.
                run_after = max(0, (self.duration * 60) -
                                (time.time() - self._latest_run_at))
            else:
                # there is a race condition here: self.run() will run immediately (see run_immediately), but the state of the job is not READY, since
                # set_duration is run in the __init__ (hence the job is INIT). So we wait 2 seconds for the __init__ to finish, and then run.
                run_after = 2

            self.run_thread = RepeatedTimer(
                self.duration * 60,
                self.run,
                job_name=self.job_name,
                run_immediately=(not self.skip_first_run)
                or (self._latest_run_at is not None),
                run_after=run_after,
            ).start()

        else:
            self.duration = None
            self.run_thread = Thread(target=self.run, daemon=True)
            self.run_thread.start()

    def run(self) -> Optional[events.Event]:
        event: Optional[events.Event]

        if self.state == self.DISCONNECTED:
            # NOOP
            # we ended early.
            return None

        elif self.state != self.READY:
            # wait a minute, and if not unpaused, just move on.
            time_waited = 0
            sleep_for = 5

            while self.state != self.READY:
                time.sleep(sleep_for)
                time_waited += sleep_for

                if time_waited > 60:
                    return None

            else:
                return self.run()

        else:
            try:
                event = self.execute()
            except Exception as e:
                self.logger.debug(e, exc_info=True)
                self.logger.error(e)
                event = events.ErrorOccurred()

        if event:
            self.logger.info(str(event))

        self.latest_event = event
        self._latest_run_at = time.time()
        return event

    def execute(self) -> Optional[events.Event]:
        # should be defined in subclass
        return events.NoEvent()

    def wait_until_not_sleeping(self) -> bool:
        while self.state == self.SLEEPING:
            brief_pause()
        return True

    def execute_io_action(self,
                          alt_media_ml: float = 0,
                          media_ml: float = 0,
                          waste_ml: float = 0) -> SummableList:
        """
        This function recursively reduces the amount to add so that
        we don't end up adding 5ml, and then removing 5ml (this could cause
        overflow). We also want sufficient time to mix, and this procedure will
        slow dosing down.
        """
        volumes_moved = SummableList([0.0, 0.0, 0.0])

        max_ = 0.36  # arbitrary
        if alt_media_ml > max_:
            volumes_moved += self.execute_io_action(
                alt_media_ml=alt_media_ml / 2,
                media_ml=media_ml,
                waste_ml=media_ml + alt_media_ml / 2,
            )
            volumes_moved += self.execute_io_action(alt_media_ml=alt_media_ml /
                                                    2,
                                                    media_ml=0,
                                                    waste_ml=alt_media_ml / 2)
        elif media_ml > max_:
            volumes_moved += self.execute_io_action(alt_media_ml=0,
                                                    media_ml=media_ml / 2,
                                                    waste_ml=media_ml / 2)
            volumes_moved += self.execute_io_action(
                alt_media_ml=alt_media_ml,
                media_ml=media_ml / 2,
                waste_ml=alt_media_ml + media_ml / 2,
            )
        else:
            source_of_event = f"{self.job_name}:{self.automation_name}"

            if (media_ml > 0 and (self.state in [self.READY, self.SLEEPING])
                    and self.wait_until_not_sleeping()):
                media_moved = self.add_media_to_bioreactor(
                    ml=media_ml,
                    source_of_event=source_of_event,
                    unit=self.unit,
                    experiment=self.experiment,
                )
                volumes_moved[0] += media_moved
                brief_pause()

            if (
                    alt_media_ml > 0
                    and (self.state in [self.READY, self.SLEEPING])
                    and self.wait_until_not_sleeping()
            ):  # always check that we are still in a valid state, as state can change between pump runs.
                alt_media_moved = self.add_alt_media_to_bioreactor(
                    ml=alt_media_ml,
                    source_of_event=source_of_event,
                    unit=self.unit,
                    experiment=self.experiment,
                )
                volumes_moved[1] += alt_media_moved
                brief_pause(
                )  # allow time for the addition to mix, and reduce the step response that can cause ringing in the output V.

            # remove waste last.
            if (waste_ml > 0 and (self.state in [self.READY, self.SLEEPING])
                    and self.wait_until_not_sleeping()):
                waste_moved = self.remove_waste_from_bioreactor(
                    ml=waste_ml,
                    source_of_event=source_of_event,
                    unit=self.unit,
                    experiment=self.experiment,
                )
                volumes_moved[2] += waste_moved
                # run remove_waste for an additional few seconds to keep volume constant (determined by the length of the waste tube)
                self.remove_waste_from_bioreactor(
                    ml=waste_ml * 2,
                    source_of_event=source_of_event,
                    unit=self.unit,
                    experiment=self.experiment,
                )
                brief_pause()

        return volumes_moved

    @property
    def most_stale_time(self) -> float:
        return min(self.latest_od_at, self.latest_growth_rate_at)

    @property
    def latest_growth_rate(self) -> float:
        # check if None
        if self._latest_growth_rate is None:
            # this should really only happen on the initialization.
            self.logger.debug("Waiting for OD and growth rate data to arrive")
            if not is_pio_job_running("od_reading", "growth_rate_calculating"):
                raise exc.JobRequiredError(
                    "`od_reading` and `growth_rate_calculating` should be running."
                )

        # check most stale time
        if (time.time() - self.most_stale_time) > 5 * 60:
            raise exc.JobRequiredError(
                f"readings are too stale (over 5 minutes old) - are `od_reading` and `growth_rate_calculating` running?. Last reading occurred at {self.most_stale_time}, current time is {time.time()}."
            )

        return cast(float, self._latest_growth_rate)

    @property
    def latest_od(self) -> float:
        # check if None
        if self._latest_od is None:
            # this should really only happen on the initialization.
            self.logger.debug("Waiting for OD and growth rate data to arrive")
            if not is_pio_job_running("od_reading", "growth_rate_calculating"):
                raise exc.JobRequiredError(
                    "`od_reading` and `growth_rate_calculating` should be running."
                )

        # check most stale time
        if (time.time() - self.most_stale_time) > 5 * 60:
            raise exc.JobRequiredError(
                f"readings are too stale (over 5 minutes old) - are `od_reading` and `growth_rate_calculating` running?. Last reading occurred at {self.most_stale_time}, current time is {time.time()}."
            )

        return cast(float, self._latest_od)

    ########## Private & internal methods

    def on_disconnected(self) -> None:
        self._latest_settings_ended_at = current_utc_time()
        self._send_details_to_mqtt()

        with suppress(AttributeError):
            self.run_thread.join()

    def __setattr__(self, name, value) -> None:
        super(DosingAutomation, self).__setattr__(name, value)
        if name in self.published_settings and name not in [
                "state",
                "alt_media_fraction",
                "media_throughput",
                "alt_media_throughput",
        ]:
            self._latest_settings_ended_at = current_utc_time()
            self._send_details_to_mqtt()
            self._latest_settings_started_at = current_utc_time()
            self._latest_settings_ended_at = None

    def _set_growth_rate(self, message) -> None:
        self.previous_growth_rate = self._latest_growth_rate
        self._latest_growth_rate = float(
            json.loads(message.payload)["growth_rate"])
        self.latest_growth_rate_at = time.time()

    def _set_OD(self, message) -> None:
        self.previous_od = self._latest_od
        self._latest_od = float(json.loads(message.payload)["od_filtered"])
        self.latest_od_at = time.time()

    def _send_details_to_mqtt(self) -> None:
        self.publish(
            f"pioreactor/{self.unit}/{self.experiment}/{self.job_name}/dosing_automation_settings",
            json.dumps({
                "pioreactor_unit":
                self.unit,
                "experiment":
                self.experiment,
                "started_at":
                self._latest_settings_started_at,
                "ended_at":
                self._latest_settings_ended_at,
                "automation":
                self.automation_name,
                "settings":
                json.dumps({
                    attr: getattr(self, attr, None)
                    for attr in self.published_settings if attr not in [
                        "state",
                        "alt_media_fraction",
                        "media_throughput",
                        "alt_media_throughput",
                    ]
                }),
            }),
            qos=QOS.EXACTLY_ONCE,
        )

    def _update_dosing_metrics(self, message) -> None:

        self._update_alt_media_fraction(message)
        self._update_throughput(message)

    def _update_alt_media_fraction(self, message) -> None:
        self.alt_media_fraction = self._alt_media_fraction_calculator.update(
            json.loads(message.payload), self.alt_media_fraction)
        # add to cache
        with local_persistant_storage("alt_media_fraction") as cache:
            cache[self.experiment] = str(self.alt_media_fraction)

    def _update_throughput(self, message) -> None:
        payload = json.loads(message.payload)
        (
            self.media_throughput,
            self.alt_media_throughput,
        ) = self._volume_throughput_calculator.update(
            payload, self.media_throughput, self.alt_media_throughput)

        # add to cache
        with local_persistant_storage("alt_media_throughput") as cache:
            cache[self.experiment] = str(self.alt_media_throughput)

        with local_persistant_storage("media_throughput") as cache:
            cache[self.experiment] = str(self.media_throughput)

    def _init_alt_media_fraction_calculator(self) -> AltMediaCalculator:
        self.published_settings["alt_media_fraction"] = {
            "datatype": "float",
            "settable": True,
        }

        with local_persistant_storage("alt_media_fraction") as cache:
            self.alt_media_fraction = float(cache.get(self.experiment, 0.0))
            return AltMediaCalculator()

    def _init_volume_throughput_calculator(self) -> ThroughputCalculator:
        self.published_settings["alt_media_throughput"] = {
            "datatype": "float",
            "settable": True,
            "unit": "mL",
            "persist": True,
        }
        self.published_settings["media_throughput"] = {
            "datatype": "float",
            "settable": True,
            "unit": "mL",
            "persist": True,
        }

        with local_persistant_storage("alt_media_throughput") as cache:
            self.alt_media_throughput = float(cache.get(self.experiment, 0.0))

        with local_persistant_storage("media_throughput") as cache:
            self.media_throughput = float(cache.get(self.experiment, 0.0))

        return ThroughputCalculator()

    def start_passive_listeners(self) -> None:
        self.subscribe_and_callback(
            self._set_OD,
            f"pioreactor/{self.unit}/{self.experiment}/growth_rate_calculating/od_filtered",
        )
        self.subscribe_and_callback(
            self._set_growth_rate,
            f"pioreactor/{self.unit}/{self.experiment}/growth_rate_calculating/growth_rate",
        )
        self.subscribe_and_callback(
            self._update_dosing_metrics,
            f"pioreactor/{self.unit}/{self.experiment}/dosing_events",
        )
Example #20
0
    def on_disconnected(self) -> None:
        self._latest_settings_ended_at = current_utc_time()
        self._send_details_to_mqtt()

        with suppress(AttributeError):
            self.run_thread.join()
Example #21
0
class LEDAutomation(BackgroundSubJob):
    """
    This is the super class that LED automations inherit from. The `run` function will
    execute every `duration` minutes (selected at the start of the program), and call the `execute` function
    which is what subclasses define.

    To change setting over MQTT:

    `pioreactor/<unit>/<experiment>/led_automation/<setting>/set` value

    """

    automation_name = "led_automation_base"  # is overwritten in subclasses

    published_settings = {"duration": {"datatype": "float", "settable": True}}

    _latest_growth_rate: Optional[float] = None
    _latest_od: Optional[float] = None
    previous_od: Optional[float] = None
    previous_growth_rate: Optional[float] = None

    _latest_settings_started_at: str = current_utc_time()
    _latest_settings_ended_at: Optional[str] = None
    _latest_run_at: Optional[float] = None

    latest_event: Optional[events.Event] = None
    run_thread: RepeatedTimer | Thread

    # next two are seconds-since-unix-epoch
    latest_od_at: float = 0
    latest_growth_rate_at: float = 0

    def __init_subclass__(cls, **kwargs):
        super().__init_subclass__(**kwargs)

        # this registers all subclasses of LEDAutomation back to LEDController, so the subclass
        # can be invoked in LEDController.
        if hasattr(cls, "automation_name"):
            LEDController.automations[cls.automation_name] = cls

    def __init__(
        self,
        duration: float,
        skip_first_run: bool = False,
        unit: str = None,
        experiment: str = None,
        **kwargs,
    ) -> None:
        super(LEDAutomation, self).__init__(
            job_name="led_automation", unit=unit, experiment=experiment
        )

        self.skip_first_run = skip_first_run
        self.edited_channels: set[LedChannel] = set()

        self.set_duration(duration)
        self.start_passive_listeners()

        self.logger.info(f"Starting {self.automation_name} LED automation.")

    def set_duration(self, duration: float) -> None:
        self.duration = float(duration)
        if self._latest_run_at:
            # what's the correct logic when changing from duration N and duration M?
            # - N=20, and it's been 5m since the last run (or initialization). I change to M=30, I should wait M-5 minutes.
            # - N=60, and it's been 50m since last run. I change to M=30, I should run immediately.
            run_after = max(0, (self.duration * 60) - (time.time() - self._latest_run_at))
        else:
            # there is a race condition here: self.run() will run immediately (see run_immediately), but the state of the job is not READY, since
            # set_duration is run in the __init__ (hence the job is INIT). So we wait 2 seconds for the __init__ to finish, and then run.
            run_after = 2

        self.run_thread = RepeatedTimer(
            self.duration * 60,  # RepeatedTimer uses seconds
            self.run,
            job_name=self.job_name,
            run_immediately=(not self.skip_first_run)
            or (self._latest_run_at is not None),
            run_after=run_after,
        ).start()

    def run(self) -> Optional[events.Event]:
        # TODO: this should be close to or equal to the function in DosingAutomation
        event: Optional[events.Event]
        if self.state == self.DISCONNECTED:
            # NOOP
            # we ended early.
            return None

        elif self.state != self.READY:
            # wait a minute, and if not unpaused, just move on.

            time_waited = 0
            sleep_for = 5

            while self.state != self.READY:
                time.sleep(sleep_for)
                time_waited += sleep_for

                if time_waited > 60:
                    return None

            else:
                return self.run()

        else:
            try:
                event = self.execute()
            except Exception as e:
                self.logger.debug(e, exc_info=True)
                self.logger.error(e)
                event = events.ErrorOccurred()

        if event:
            self.logger.info(str(event))

        self.latest_event = event
        self._latest_run_at = time.time()
        return event

    def execute(self) -> Optional[events.Event]:
        pass

    @property
    def most_stale_time(self) -> float:
        return min(self.latest_od_at, self.latest_growth_rate_at)

    def set_led_intensity(self, channel: LedChannel, intensity: float) -> bool:
        """
        This first checks the lock on the LED channel, and will wait a few seconds for it to clear,
        and error out if it waits too long.

        Parameters
        ------------

        Channel:
            The LED channel to modify.
        Intensity: float
            A float between 0-100, inclusive.

        """
        for _ in range(12):
            success = led_intensity(
                channel,
                intensity,
                unit=self.unit,
                experiment=self.experiment,
                pubsub_client=self.pub_client,
                source_of_event=self.job_name,
            )

            if success:
                self.edited_channels.add(channel)
                return True

            time.sleep(0.1)

        self.logger.warning(
            f"Unable to update channel {channel} due to a long lock being on the channel."
        )
        return False

    ########## Private & internal methods

    def on_disconnected(self) -> None:
        self._latest_settings_ended_at = current_utc_time()
        self._send_details_to_mqtt()

        with suppress(AttributeError):
            self.run_thread.join()

        for channel in self.edited_channels:
            led_intensity(channel, 0, unit=self.unit, experiment=self.experiment)

    @property
    def latest_growth_rate(self) -> float:
        # check if None
        if self._latest_growth_rate is None:
            # this should really only happen on the initialization.
            self.logger.debug("Waiting for OD and growth rate data to arrive")
            if not is_pio_job_running("od_reading", "growth_rate_calculating"):
                raise exc.JobRequiredError(
                    "`od_reading` and `growth_rate_calculating` should be running."
                )

        # check most stale time
        if (time.time() - self.most_stale_time) > 5 * 60:
            raise exc.JobRequiredError(
                "readings are too stale (over 5 minutes old) - are `od_reading` and `growth_rate_calculating` running?"
            )

        return cast(float, self._latest_growth_rate)

    @property
    def latest_od(self) -> float:
        # check if None
        if self._latest_od is None:
            # this should really only happen on the initialization.
            self.logger.debug("Waiting for OD and growth rate data to arrive")
            if not is_pio_job_running("od_reading", "growth_rate_calculating"):
                raise exc.JobRequiredError(
                    "`od_reading` and `growth_rate_calculating` should be running."
                )

        # check most stale time
        if (time.time() - self.most_stale_time) > 5 * 60:
            raise exc.JobRequiredError(
                "readings are too stale (over 5 minutes old) - are `od_reading` and `growth_rate_calculating` running?"
            )

        return cast(float, self._latest_od)

    def __setattr__(self, name, value) -> None:
        super(LEDAutomation, self).__setattr__(name, value)
        if name in self.published_settings and name != "state":
            self._latest_settings_ended_at = current_utc_time()
            self._send_details_to_mqtt()
            self._latest_settings_started_at = current_utc_time()
            self._latest_settings_ended_at = None

    def _set_growth_rate(self, message) -> None:
        self.previous_growth_rate = self._latest_growth_rate
        self._latest_growth_rate = float(json.loads(message.payload)["growth_rate"])
        self.latest_growth_rate_at = time.time()

    def _set_OD(self, message) -> None:

        self.previous_od = self._latest_od
        self._latest_od = float(json.loads(message.payload)["od_filtered"])
        self.latest_od_at = time.time()

    def _send_details_to_mqtt(self) -> None:
        self.publish(
            f"pioreactor/{self.unit}/{self.experiment}/{self.job_name}/led_automation_settings",
            json.dumps(
                {
                    "pioreactor_unit": self.unit,
                    "experiment": self.experiment,
                    "started_at": self._latest_settings_started_at,
                    "ended_at": self._latest_settings_ended_at,
                    "automation": self.automation_name,
                    "settings": json.dumps(
                        {
                            attr: getattr(self, attr, None)
                            for attr in self.published_settings
                            if attr != "state"
                        }
                    ),
                }
            ),
            qos=QOS.EXACTLY_ONCE,
        )

    def start_passive_listeners(self) -> None:
        self.subscribe_and_callback(
            self._set_OD,
            f"pioreactor/{self.unit}/{self.experiment}/growth_rate_calculating/od_filtered",
        )
        self.subscribe_and_callback(
            self._set_growth_rate,
            f"pioreactor/{self.unit}/{self.experiment}/growth_rate_calculating/growth_rate",
        )
Example #22
0
    def __init__(
        self,
        automation_name: str,
        unit: str,
        experiment: str,
        eval_and_publish_immediately: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(job_name="temperature_control",
                         unit=unit,
                         experiment=experiment)

        if not is_HAT_present():
            self.logger.error("Pioreactor HAT must be present.")
            self.set_state(self.DISCONNECTED)
            raise exc.HardwareNotFoundError("Pioreactor HAT must be present.")

        if not is_heating_pcb_present():
            self.logger.error("Heating PCB must be attached to Pioreactor HAT")
            self.set_state(self.DISCONNECTED)
            raise exc.HardwareNotFoundError(
                "Heating PCB must be attached to Pioreactor HAT")

        if is_testing_env():
            self.logger.debug("TMP1075 not available; using MockTMP1075")
            from pioreactor.utils.mock import MockTMP1075 as TMP1075
        else:
            from TMP1075 import TMP1075  # type: ignore

        self.pwm = self.setup_pwm()
        self.update_heater(0)

        self.tmp_driver = TMP1075()
        self.read_external_temperature_timer = RepeatedTimer(
            45, self.read_external_temperature, run_immediately=False)
        self.read_external_temperature_timer.start()

        self.publish_temperature_timer = RepeatedTimer(
            4 * 60,
            self.evaluate_and_publish_temperature,
            run_immediately=eval_and_publish_immediately,
            run_after=60,
        )
        self.publish_temperature_timer.start()

        self.automation = AutomationDict(automation_name=automation_name,
                                         **kwargs)

        try:
            automation_class = self.automations[
                self.automation["automation_name"]]
        except KeyError:
            raise KeyError(
                f"Unable to find automation {self.automation['automation_name']}. Available automations are {list(self.automations.keys())}"
            )

        self.logger.info(f"Starting {self.automation}.")
        try:
            self.automation_job = automation_class(unit=self.unit,
                                                   experiment=self.experiment,
                                                   parent=self,
                                                   **kwargs)
        except Exception as e:
            self.logger.error(e)
            self.logger.debug(e, exc_info=True)
            self.set_state(self.DISCONNECTED)
            raise e
        self.automation_name = self.automation["automation_name"]

        self.temperature = {
            "temperature": self.read_external_temperature(),
            "timestamp": current_utc_time(),
        }