Exemple #1
0
    def __init__(self, duty_cycle, unit, experiment, hertz=50):
        super(Stirrer, self).__init__(job_name=JOB_NAME,
                                      unit=unit,
                                      experiment=experiment)

        self.hertz = hertz
        self.pin = PWM_TO_PIN[config.getint("PWM", "stirring")]

        GPIO.setup(self.pin, GPIO.OUT)
        GPIO.output(self.pin, 0)
        self.pwm = GPIO.PWM(self.pin, self.hertz)
        self.set_duty_cycle(duty_cycle)
        self.start_stirring()
Exemple #2
0
 def __setattr__(self, name, value):
     if name == "state":
         if value != self.READY:
             try:
                 self.stop_stirring()
             except AttributeError:
                 pass
         elif (value == self.READY) and (self.state == self.SLEEPING):
             self.duty_cycle = config.getint("stirring",
                                             f"duty_cycle_{self.unit}",
                                             fallback=0)
             self.start_stirring()
     super(Stirrer, self).__setattr__(name, value)
Exemple #3
0
def backup_database(output_file: str) -> None:
    """
    This action will create a backup of the SQLite3 database into specified output. It then
    will try to copy the backup to any available worker Pioreactors as a further backup.

    This job actually consumes _a lot_ of resources, and I've seen the LED output
    drop due to this running. See issue #81. For now, we will skip the backup if `od_reading` is running

    Elsewhere, a cronjob is set up as well to run this action every N days.
    """

    import sqlite3
    from sh import ErrorReturnCode, rsync  # type: ignore

    unit = get_unit_name()
    experiment = UNIVERSAL_EXPERIMENT

    with publish_ready_to_disconnected_state(unit, experiment,
                                             "backup_database"):

        logger = create_logger("backup_database",
                               experiment=experiment,
                               unit=unit)

        if is_pio_job_running("od_reading"):
            logger.warning("Won't run if OD Reading is running. Exiting")
            return

        def progress(status: int, remaining: int, total: int) -> None:
            logger.debug(f"Copied {total-remaining} of {total} SQLite3 pages.")
            logger.debug(f"Writing to local backup {output_file}.")

        logger.debug(f"Starting backup of database to {output_file}")
        sleep(
            1
        )  # pause a second so the log entry above gets recorded into the DB.

        con = sqlite3.connect(config.get("storage", "database"))
        bck = sqlite3.connect(output_file)

        with bck:
            con.backup(bck, pages=-1, progress=progress)

        bck.close()
        con.close()

        with local_persistant_storage("database_backups") as cache:
            cache["latest_backup_timestamp"] = current_utc_time()

        logger.info("Completed backup of database.")

        n_backups = config.getint("number_of_backup_replicates_to_workers",
                                  fallback=2)
        backups_complete = 0
        available_workers = list(get_active_workers_in_inventory())

        while (backups_complete < n_backups) and (len(available_workers) > 0):
            backup_unit = available_workers.pop()
            if backup_unit == get_unit_name():
                continue

            try:
                rsync(
                    "-hz",
                    "--partial",
                    "--inplace",
                    output_file,
                    f"{backup_unit}:{output_file}",
                )
            except ErrorReturnCode:
                logger.debug(
                    f"Unable to backup database to {backup_unit}. Is it online?",
                    exc_info=True,
                )
                logger.warning(
                    f"Unable to backup database to {backup_unit}. Is it online?"
                )
            else:
                logger.debug(
                    f"Backed up database to {backup_unit}:{output_file}.")
                backups_complete += 1

        return
Exemple #4
0
    try:
        stirrer = Stirrer(duty_cycle, unit=unit, experiment=experiment)
        stirrer.start_stirring()

        if duration is None:
            signal.pause()
        else:
            time.sleep(duration)

    except Exception as e:
        GPIO.cleanup()
        logger.error(f"failed with {str(e)}")
        raise e

    return


@click.command(name="stirring")
@click.option(
    "--duty-cycle",
    default=config.getint("stirring", f"duty_cycle_{unit}", fallback=0),
    help="set the duty cycle",
    show_default=True,
    type=click.IntRange(0, 100, clamp=True),
)
def click_stirring(duty_cycle):
    """
    Start the stirring of the Pioreactor.
    """
    stirring(duty_cycle=duty_cycle)
Exemple #5
0
def od_blank(
    od_angle_channel1,
    od_angle_channel2,
    n_samples: int = 30,
):
    """
    Compute the sample average of the photodiodes attached.

    Note that because of the sensitivity of the growth rate (and normalized OD) to the starting values,
    we need a very accurate estimate of these statistics.

    """
    from statistics import mean, variance

    action_name = "od_blank"
    logger = create_logger(action_name)
    unit = get_unit_name()
    experiment = get_latest_experiment_name()
    testing_experiment = get_latest_testing_experiment_name()
    logger.info(
        "Starting reading of blank OD. This will take about a few minutes.")

    with publish_ready_to_disconnected_state(unit, experiment, action_name):

        # running this will mess with OD Reading - best to just not let it happen.
        if (is_pio_job_running("od_reading")
                # but if test mode, ignore
                and not is_testing_env()):
            logger.error(
                "od_reading should not be running. Stop od_reading first. Exiting."
            )
            return

        # turn on stirring if not already on
        if not is_pio_job_running("stirring"):
            # start stirring
            st = start_stirring(
                target_rpm=config.getint("stirring", "target_rpm"),
                unit=unit,
                experiment=testing_experiment,
            )
            st.block_until_rpm_is_close_to_target()
        else:
            # TODO: it could be paused, we should make sure it's running
            ...

        sampling_rate = 1 / config.getfloat("od_config", "samples_per_second")

        # start od_reading
        start_od_reading(
            od_angle_channel1,
            od_angle_channel2,
            sampling_rate=sampling_rate,
            unit=unit,
            experiment=testing_experiment,
            fake_data=is_testing_env(),
        )

        def yield_from_mqtt():
            while True:
                msg = pubsub.subscribe(
                    f"pioreactor/{unit}/{testing_experiment}/od_reading/od_raw_batched"
                )
                yield json.loads(msg.payload)

        signal = yield_from_mqtt()
        readings = defaultdict(list)

        for count, batched_reading in enumerate(signal, start=1):
            for (channel, reading) in batched_reading["od_raw"].items():
                readings[channel].append(reading["voltage"])

            pubsub.publish(
                f"pioreactor/{unit}/{experiment}/{action_name}/percent_progress",
                count // n_samples * 100,
            )
            logger.debug(f"Progress: {count/n_samples:.0%}")
            if count == n_samples:
                break

        means = {}
        variances = {}
        autocorrelations = {}  # lag 1

        for channel, od_reading_series in readings.items():
            # measure the mean and publish. The mean will be used to normalize the readings in downstream jobs
            means[channel] = mean(od_reading_series)
            variances[channel] = variance(od_reading_series)
            autocorrelations[channel] = correlation(od_reading_series[:-1],
                                                    od_reading_series[1:])

            # warn users that a blank is 0 - maybe this should be an error instead? TODO: link this to a docs page.
            if means[channel] == 0.0:
                logger.warning(
                    f"OD reading for PD Channel {channel} is 0.0 - that shouldn't be. Is there a loose connection, or an extra channel in the configuration's [od_config.photodiode_channel] section?"
                )

            pubsub.publish(
                f"pioreactor/{unit}/{experiment}/od_blank/{channel}",
                json.dumps({
                    "timestamp": current_utc_time(),
                    "od_reading_v": means[channel]
                }),
            )

        # store locally as the source of truth.
        with local_persistant_storage(action_name) as cache:
            cache[experiment] = json.dumps(means)

        # publish to UI and database
        pubsub.publish(
            f"pioreactor/{unit}/{experiment}/{action_name}/mean",
            json.dumps(means),
            qos=pubsub.QOS.AT_LEAST_ONCE,
            retain=True,
        )

        if config.getboolean(
                "data_sharing_with_pioreactor",
                "send_od_statistics_to_Pioreactor",
                fallback=False,
        ):
            to_share = {"mean": means, "variance": variances}
            to_share["ir_intensity"] = config["od_config"]["ir_intensity"]
            to_share["od_angle_channel1"] = od_angle_channel1
            to_share["od_angle_channel2"] = od_angle_channel2
            pubsub.publish_to_pioreactor_cloud("od_blank_mean", json=to_share)

        logger.debug(f"measured mean: {means}")
        logger.debug(f"measured variances: {variances}")
        logger.debug(f"measured autocorrelations: {autocorrelations}")
        logger.debug("OD normalization finished.")

        return means
Exemple #6
0
def add_media(
    ml=None,
    duration=None,
    duty_cycle=33,
    source_of_event=None,
    unit=None,
    experiment=None,
):
    assert 0 <= duty_cycle <= 100
    assert (ml is not None) or (duration is not None)
    assert not ((ml is not None) and
                (duration is not None)), "Only select ml or duration"

    hz = 100

    try:
        config["pump_calibration"][f"media_ml_calibration_{unit}"]
    except KeyError:
        logger.error(
            f"Calibration not defined. Add `pump_calibration` section to config_{unit}.ini."
        )

    if ml is not None:
        user_submitted_ml = True
        assert ml >= 0
        duration = pump_ml_to_duration(
            ml,
            duty_cycle,
            **loads(
                config["pump_calibration"][f"media_ml_calibration_{unit}"]),
        )
    elif duration is not None:
        user_submitted_ml = False
        ml = pump_duration_to_ml(
            duration,
            duty_cycle,
            **loads(
                config["pump_calibration"][f"media_ml_calibration_{unit}"]),
        )
    assert duration >= 0

    publish(
        f"pioreactor/{unit}/{experiment}/dosing_events",
        dumps({
            "volume_change": ml,
            "event": "add_media",
            "source_of_event": source_of_event,
        }),
        qos=QOS.EXACTLY_ONCE,
    )

    if user_submitted_ml:
        logger.info(f"add media: {round(ml,2)}mL")
    else:
        logger.info(f"add media: {round(duration,2)}s")

    try:
        MEDIA_PIN = PWM_TO_PIN[config.getint("PWM", "media")]
        GPIO.setup(MEDIA_PIN, GPIO.OUT)
        GPIO.output(MEDIA_PIN, 0)
        pwm = GPIO.PWM(MEDIA_PIN, hz)

        pwm.start(duty_cycle)
        time.sleep(duration)
        pwm.stop()

        GPIO.output(MEDIA_PIN, 0)

    except Exception as e:
        logger.error(e, exc_info=True)
        raise e
    finally:
        clean_up_gpio()
    return
Exemple #7
0
def clean_up_gpio():
    GPIO.cleanup(PWM_TO_PIN[config.getint("PWM", "media")])
Exemple #8
0
class Stirrer(BackgroundJob):
    """
    Parameters
    ------------

    target_rpm: float
        Send message to "pioreactor/{unit}/{experiment}/stirring/target_rpm/set" to change the stirring speed.
    rpm_calculator: RpmCalculator
        See RpmCalculator and examples below.

    Notes
    -------

    The create a feedback loop between the duty-cycle level and the RPM, we set up a polling algorithm. We set up
    an edge detector on the hall sensor pin, and count the number of pulses in N seconds. We convert this count to RPM, and
    then use a PID system to update the amount of duty cycle to apply.

    We perform the above every N seconds. That is, there is PID controller that checks every N seconds and nudges the duty cycle
    to match the requested RPM.


    Examples
    ---------

    > st = Stirrer(500, unit, experiment)
    > st.start_stirring()
    """

    published_settings = {
        "target_rpm": {
            "datatype": "json",
            "settable": True,
            "unit": "RPM"
        },
        "measured_rpm": {
            "datatype": "json",
            "settable": False,
            "unit": "RPM"
        },
        "duty_cycle": {
            "datatype": "float",
            "settable": True,
            "unit": "%"
        },
    }
    _previous_duty_cycle: float = 0
    duty_cycle: float = config.getint(
        "stirring", "initial_duty_cycle",
        fallback=60.0)  # only used if calibration isn't defined.
    _measured_rpm: Optional[float] = None

    def __init__(
        self,
        target_rpm: float,
        unit: str,
        experiment: str,
        rpm_calculator: Optional[RpmCalculator],
        hertz: float = 150,
    ) -> None:
        super(Stirrer, self).__init__(job_name="stirring",
                                      unit=unit,
                                      experiment=experiment)
        self.logger.debug(f"Starting stirring with initial {target_rpm} RPM.")
        self.rpm_calculator = rpm_calculator

        if not hardware.is_HAT_present():
            self.logger.error("Pioreactor HAT must be present.")
            self.set_state(self.DISCONNECTED)
            raise exc.HardwareNotFoundError("Pioreactor HAT must be present.")

        if (self.rpm_calculator
                is not None) and not hardware.is_heating_pcb_present():
            self.logger.error("Heating PCB must be present to measure RPM.")
            self.set_state(self.DISCONNECTED)
            raise exc.HardwareNotFoundError(
                "Heating PCB must be present to measure RPM.")

        pin = hardware.PWM_TO_PIN[config.get("PWM_reverse", "stirring")]
        self.pwm = PWM(pin, hertz)
        self.pwm.lock()

        self.rpm_to_dc_lookup = self.initialize_rpm_to_dc_lookup()
        self.target_rpm = target_rpm
        self.duty_cycle = self.rpm_to_dc_lookup(self.target_rpm)

        # set up PID
        self.pid = PID(
            Kp=config.getfloat("stirring.pid", "Kp"),
            Ki=config.getfloat("stirring.pid", "Ki"),
            Kd=config.getfloat("stirring.pid", "Kd"),
            setpoint=self.target_rpm,
            unit=self.unit,
            experiment=self.experiment,
            job_name=self.job_name,
            target_name="rpm",
            output_limits=(-20, 20),  # avoid whiplashing
        )

        # set up thread to periodically check the rpm
        self.rpm_check_repeated_thread = RepeatedTimer(
            17,  # 17 and 5 are coprime
            self.poll_and_update_dc,
            job_name=self.job_name,
            run_immediately=True,
            run_after=5,
            poll_for_seconds=
            4,  # technically should be a function of the RPM: lower RPM, longer to get sufficient data.
        )

    def initialize_rpm_to_dc_lookup(self) -> Callable:
        if self.rpm_calculator is None:
            # if we can't track RPM, no point in adjusting DC
            return lambda rpm: self.duty_cycle

        with local_persistant_storage("stirring_calibration") as cache:

            if "linear_v1" in cache:
                parameters = json.loads(cache["linear_v1"])
                coef = parameters["rpm_coef"]
                intercept = parameters["intercept"]
                # we scale this by 90% to make sure the PID + prediction doesn't overshoot,
                # better to be conservative here.
                # equivalent to a weighted average: 0.1 * current + 0.9 * predicted
                return lambda rpm: self.duty_cycle - 0.90 * (
                    self.duty_cycle - (coef * rpm + intercept))
            else:
                return lambda rpm: self.duty_cycle

    def on_disconnected(self) -> None:

        with suppress(AttributeError):
            self.rpm_check_repeated_thread.cancel()

        with suppress(AttributeError):
            self.stop_stirring()
            self.pwm.cleanup()

        with suppress(AttributeError):
            if self.rpm_calculator:
                self.rpm_calculator.cleanup()

    def start_stirring(self) -> None:
        self.pwm.start(100)  # get momentum to start
        sleep(0.25)
        self.set_duty_cycle(self.duty_cycle)
        sleep(0.75)
        self.rpm_check_repeated_thread.start()  # .start is idempotent

    def poll(self, poll_for_seconds: float) -> Optional[float]:
        """
        Returns an RPM, or None if not measuring RPM.
        """
        if self.rpm_calculator is None:
            return None

        recent_rpm = self.rpm_calculator(poll_for_seconds)
        if recent_rpm == 0:
            # TODO: attempt to restart stirring
            self.publish(
                f"pioreactor/{self.unit}/{self.experiment}/monitor/flicker_led_with_error_code",
                error_codes.STIRRING_FAILED_ERROR_CODE,
            )
            self.logger.warning("Stirring RPM is 0 - has it failed?")

        if self._measured_rpm is not None:
            # use a simple EMA, alpha chosen arbitrarily, but should be a function of delta time.
            self._measured_rpm = 0.025 * self._measured_rpm + 0.975 * recent_rpm
        else:
            self._measured_rpm = recent_rpm

        self.measured_rpm = {
            "timestamp": current_utc_time(),
            "rpm": self._measured_rpm
        }
        return self._measured_rpm

    def poll_and_update_dc(self, poll_for_seconds: float) -> None:
        self.poll(poll_for_seconds)

        if self._measured_rpm is None:
            return

        result = self.pid.update(self._measured_rpm, dt=1)
        self.set_duty_cycle(self.duty_cycle + result)

    def stop_stirring(self) -> None:
        # if the user unpauses, we want to go back to their previous value, and not the default.
        self.set_duty_cycle(0)

    def on_ready_to_sleeping(self) -> None:
        self.rpm_check_repeated_thread.pause()
        self.stop_stirring()

    def on_sleeping_to_ready(self) -> None:
        self.duty_cycle = self._previous_duty_cycle
        self.rpm_check_repeated_thread.unpause()
        self.start_stirring()

    def set_duty_cycle(self, value: float) -> None:
        self._previous_duty_cycle = self.duty_cycle
        self.duty_cycle = clamp(0, round(float(value), 5), 100)
        self.pwm.change_duty_cycle(self.duty_cycle)

    def set_target_rpm(self, value: float) -> None:
        self.target_rpm = float(value)
        self.set_duty_cycle(self.rpm_to_dc_lookup(self.target_rpm))
        self.pid.set_setpoint(self.target_rpm)

    def block_until_rpm_is_close_to_target(self,
                                           abs_tolerance: float = 15) -> None:
        """
        This function blocks until the stirring is "close enough" to the target RPM.

        """
        if self.rpm_calculator is None:
            # can't block if we aren't recording the RPM
            return

        while (self._measured_rpm is not None
               ) and abs(self._measured_rpm - self.target_rpm) > abs_tolerance:
            sleep(0.25)