Beispiel #1
0
def publish_multiple(
    list_of_topic_message_tuples, hostname=leader_hostname, retries=10, **mqtt_kwargs
):
    """
    list_of_topic_message_tuples is of the form ("<topic>", "<payload>", qos, retain)

    """
    for retry_count in range(retries):
        try:
            mqtt_publish.multiple(
                list_of_topic_message_tuples, hostname=hostname, **mqtt_kwargs
            )
            return
        except (ConnectionRefusedError, socket.gaierror, OSError, socket.timeout):
            # possible that leader is down/restarting, keep trying, but log to local machine.
            from pioreactor.logging import create_logger

            logger = create_logger("pubsub.publish_multiple", to_mqtt=False)
            logger.debug(
                f"Attempt {retry_count}: Unable to connect to host: {hostname}",
                exc_info=True,
            )
            time.sleep(5 * retry_count)  # linear backoff

    else:

        logger = create_logger("pubsub.publish_multiple", to_mqtt=False)
        logger.error(f"Unable to connect to host: {hostname}. Exiting.")
        raise ConnectionRefusedError(f"Unable to connect to host: {hostname}.")
Beispiel #2
0
    def get(self, section: str, option: str, *args, **kwargs):  # type: ignore
        try:
            return super().get(section, option, *args, **kwargs)
        except (configparser.NoSectionError, configparser.NoOptionError):
            from pioreactor.logging import create_logger

            create_logger("read config").error(
                f"""No found in configuration: '{section}.{option}'. Are you missing the following in your config?

[{section}]
{option}=some value

""")
            raise
Beispiel #3
0
def update(ui: bool, app: bool, dev: bool) -> None:
    import subprocess
    from json import loads
    from pioreactor.mureq import get

    logger = create_logger(
        "update", unit=get_unit_name(), experiment=UNIVERSAL_EXPERIMENT
    )

    if (not app) and (not ui):
        click.echo("Nothing to do. Specify either --app or --ui.")

    if app:

        if not dev:
            latest_release_metadata = loads(
                get(
                    "https://api.github.com/repos/pioreactor/pioreactor/releases/latest"
                ).body
            )
            latest_release_version = latest_release_metadata["name"]
            url_to_get_whl = f"https://github.com/Pioreactor/pioreactor/releases/download/{latest_release_version}/pioreactor-{latest_release_version}-py3-none-any.whl"

            command = f'sudo pip3 install "pioreactor @ {url_to_get_whl}"'
        else:
            latest_release_version = "master"
            command = "sudo pip3 install -U --force-reinstall https://github.com/pioreactor/pioreactor/archive/master.zip"

        p = subprocess.run(
            command,
            shell=True,
            universal_newlines=True,
            stdout=subprocess.DEVNULL,
            stderr=subprocess.PIPE,
        )
        if p.returncode == 0:
            logger.info(f"Updated Pioreactor to version {latest_release_version}.")
        else:
            logger.error(p.stderr)

    if ui and am_I_leader():
        cd = "cd ~/pioreactorui/backend"
        gitp = "git pull origin master"
        npm_install = "npm install"
        setup = "pm2 restart ui"
        unedit_edited_files = "git checkout ."  # TODO: why do I do this. Can I be more specific than `.`? This blocks edits to the contrib folder from sticking around.
        command = " && ".join([cd, gitp, setup, npm_install, unedit_edited_files])
        p = subprocess.run(
            command,
            shell=True,
            universal_newlines=True,
            stdout=subprocess.DEVNULL,
            stderr=subprocess.PIPE,
        )
        if p.returncode == 0:
            logger.info("Updated PioreactorUI to latest version.")
        else:
            logger.error(p.stderr)
Beispiel #4
0
def export_experiment_data(experiment: str, output: str, tables: list) -> None:
    """
    Set an experiment, else it defaults to the entire table.

    """
    import sqlite3
    import zipfile
    import csv

    logger = create_logger("export_experiment_data")
    logger.info(
        f"Starting export of table{'s' if len(tables) > 1 else ''}: {', '.join(tables)}."
    )

    time = datetime.now().strftime("%Y%m%d%H%m%S")
    zf = zipfile.ZipFile(output, mode="w", compression=zipfile.ZIP_DEFLATED)
    con = sqlite3.connect(config["storage"]["database"])

    for table in tables:
        cursor = con.cursor()

        # so apparently, you can't parameterize the table name in python's sqlite3, so I
        # have to use string formatting (SQL-injection vector), but first check that the table exists (else fail)
        if not exists_table(cursor, table):
            raise ValueError(f"Table {table} does not exist.")

        timestamp_to_localtimestamp_clause = generate_timestamp_to_localtimestamp_clause(
            cursor, table)
        order_by = filter_to_timestamp_columns(get_column_names(
            cursor, table)).pop()  # just take first...

        if experiment is None:
            query = f"SELECT {timestamp_to_localtimestamp_clause} * from {table} ORDER BY :order_by"
            cursor.execute(query, {"order_by": order_by})
            _filename = f"{table}-{time}.dump.csv".replace(" ", "_")

        else:
            query = f"SELECT {timestamp_to_localtimestamp_clause} * from {table} WHERE experiment=:experiment ORDER BY :order_by"
            cursor.execute(query, {
                "experiment": experiment,
                "order_by": order_by
            })
            _filename = f"{experiment}-{table}-{time}.dump.csv".replace(
                " ", "_")

        path_to_file = os.path.join(os.path.dirname(output), _filename)
        with open(path_to_file, "w") as csv_file:
            csv_writer = csv.writer(csv_file, delimiter=",")
            csv_writer.writerow([i[0] for i in cursor.description])
            csv_writer.writerows(cursor)

        zf.write(path_to_file, arcname=_filename)

    con.close()
    zf.close()

    logger.info("Finished export.")
    return
Beispiel #5
0
def kill(job: str, units: tuple[str, ...], all_jobs: bool, y: bool) -> None:
    """
    Send a SIGTERM signal to JOB. JOB can be any Pioreactor job name, like "stirring".
    Example:

    > pios kill stirring


    multiple jobs accepted:

    > pios kill stirring dosing_control


    Kill all worker jobs (i.e. this excludes leader jobs like watchdog). Ignores `job` argument.

    > pios kill --all


    """
    from sh import ssh  # type: ignore

    if not y:
        confirm = input(
            f"Confirm killing {str(job) if (not all_jobs) else 'all jobs'} on {units}? Y/n: "
        ).strip()
        if confirm != "Y":
            return

    command = f"pio kill {' '.join(job)}"
    command += "--all-jobs" if all_jobs else ""

    logger = create_logger("CLI",
                           unit=get_unit_name(),
                           experiment=get_latest_experiment_name())

    def _thread_function(unit: str):
        logger.debug(f"Executing `{command}` on {unit}.")
        try:
            ssh(unit, command)
            if all_jobs:  # tech debt
                ssh(
                    unit,
                    "pio run led_intensity --intensity 0 --channel A --channel B --channel C --channel D --no-log",
                )
            return True

        except Exception as e:
            logger.debug(e, exc_info=True)
            logger.error(f"Unable to connect to unit {unit}.")
            return False

    units = universal_identifier_to_all_active_workers(units)
    with ThreadPoolExecutor(max_workers=len(units)) as executor:
        results = executor.map(_thread_function, units)

    if not all(results):
        sys.exit(1)
Beispiel #6
0
 def _thread_function(unit: str) -> bool:
     click.echo(f"Executing `{core_command}` on {unit}.")
     try:
         ssh(unit, command)
         return True
     except Exception as e:
         logger = create_logger("CLI",
                                unit=get_unit_name(),
                                experiment=get_latest_experiment_name())
         logger.debug(e, exc_info=True)
         logger.error(f"Unable to connect to unit {unit}.")
         return False
Beispiel #7
0
        def _callback(client: Client, userdata: dict, message):
            try:

                if not allow_retained and message.retain:
                    return

                return actual_callback(message)

            except Exception as e:
                from pioreactor.logging import create_logger

                logger = create_logger(userdata.get("job_name", "pioreactor"))
                logger.error(e, exc_info=True)
                raise e
Beispiel #8
0
def publish(
    topic: str, message, hostname: str = leader_hostname, retries: int = 10, **mqtt_kwargs
):

    for retry_count in range(retries):
        try:
            mqtt_publish.single(topic, payload=message, hostname=hostname, **mqtt_kwargs)
            return
        except (ConnectionRefusedError, socket.gaierror, OSError, socket.timeout):
            # possible that leader is down/restarting, keep trying, but log to local machine.
            from pioreactor.logging import create_logger

            logger = create_logger("pubsub.publish", to_mqtt=False)
            logger.debug(
                f"Attempt {retry_count}: Unable to connect to host: {hostname}",
                exc_info=True,
            )
            time.sleep(5 * retry_count)  # linear backoff

    else:

        logger = create_logger("pubsub.publish", to_mqtt=False)
        logger.error(f"Unable to connect to host: {hostname}.")
        raise ConnectionRefusedError(f"Unable to connect to host: {hostname}.")
def uninstall_plugin(name_of_plugin):

    logger = create_logger("install_plugin", experiment=UNIVERSAL_EXPERIMENT)

    result = subprocess.run(
        [
            "bash",
            "/usr/local/bin/uninstall_pioreactor_plugin.sh",
            quote(name_of_plugin),
        ]
    )

    if result.returncode == 0:
        logger.info(f"Successfully uninstalled plugin {name_of_plugin}.")
    else:
        logger.error(f"Failed to uninstall plugin {name_of_plugin}. See logs.")
        logger.debug(result.stdout)
        logger.debug(result.stderr)
Beispiel #10
0
def get_latest_experiment_name() -> str:

    if os.environ.get("EXPERIMENT") is not None:
        return os.environ["EXPERIMENT"]
    elif is_testing_env():
        return "_testing_experiment"

    from pioreactor.pubsub import subscribe

    mqtt_msg = subscribe("pioreactor/latest_experiment", timeout=2)
    if mqtt_msg:
        return mqtt_msg.payload.decode()
    else:
        from pioreactor.logging import create_logger

        logger = create_logger("pioreactor", experiment=UNIVERSAL_EXPERIMENT)
        logger.info(
            "No experiment running. Try creating a new experiment first.")
        return NO_EXPERIMENT
Beispiel #11
0
    def __init__(self,
                 pin: GpioPin,
                 hz: float,
                 always_use_software: bool = False) -> None:
        self.logger = create_logger("PWM")
        self.pin = pin
        self.hz = hz

        if self.is_locked():
            self.logger.debug(
                f"GPIO-{self.pin} is currently locked but a task is overwriting it. Either too many jobs are trying to access this pin, or a job didn't clean up properly."
            )

        gpio_helpers.set_gpio_availability(
            self.pin, gpio_helpers.GPIO_states.GPIO_UNAVAILABLE)

        if (not always_use_software) and (pin in self.HARDWARE_PWM_CHANNELS):

            self.pwm = HardwarePWM(self.HARDWARE_PWM_CHANNELS[self.pin],
                                   self.hz)

        else:

            import RPi.GPIO as GPIO  # type: ignore

            GPIO.setmode(GPIO.BCM)
            GPIO.setup(self.pin, GPIO.OUT, initial=GPIO.LOW)

            if self.hz > 2000:
                self.logger.warning(
                    "Setting a PWM to a very high frequency with software. Did you mean to use a hardware PWM?"
                )

            self.pwm = GPIO.PWM(self.pin, self.hz)

        with local_intermittent_storage("pwm_hz") as cache:
            cache[str(self.pin)] = str(self.hz)

        self.logger.debug(
            f"Initialized PWM-{self.pin} with {'hardware' if self.using_hardware else 'software'}, initial frequency is {self.hz}hz."
        )
Beispiel #12
0
    def add_pioreactor(new_name: str) -> None:
        """
        Add a new pioreactor worker to the cluster. The pioreactor should already have the worker image installed and is turned on.

        """
        # TODO: move this to its own file
        import socket
        import subprocess

        logger = create_logger(
            "add_pioreactor", unit=get_unit_name(), experiment=UNIVERSAL_EXPERIMENT
        )
        logger.info(f"Adding new pioreactor {new_name} to cluster.")

        # check to make sure new_name isn't already on the network

        # check to make sure raspberrypi.local is on network
        checks, max_checks = 0, 20
        while not networking.is_hostname_on_network(new_name):
            checks += 1
            try:
                socket.gethostbyname(new_name)
            except socket.gaierror:
                sleep(3)
                click.echo(f"`{new_name}` not found on network - checking again.")
                if checks >= max_checks:
                    logger.error(
                        f"`{new_name}` not found on network after {max_checks} seconds. Check that you provided the right WiFi credentials to the network, and that the Raspberry Pi is turned on."
                    )
                    sys.exit(1)

        res = subprocess.call(
            [
                "bash /usr/local/bin//add_new_pioreactor_worker_from_leader.sh %s"
                % (new_name)
            ],
            shell=True,
        )
        if res == 0:
            logger.info(f"New pioreactor {new_name} successfully added to cluster.")
Beispiel #13
0
def update(units: tuple[str, ...], dev: bool) -> None:
    """
    Pulls and installs the latest code
    """
    import paramiko  # type: ignore

    logger = create_logger("update",
                           unit=get_unit_name(),
                           experiment=get_latest_experiment_name())

    if dev:
        command = "pio update --app --dev"
    else:
        command = "pio update --app"

    def _thread_function(unit: str):
        logger.debug(f"Executing `{command}` on {unit}...")
        try:

            with paramiko.SSHClient() as client:
                client.load_system_host_keys()
                client.connect(unit, username="******", compress=True)

                (stdin, stdout, stderr) = client.exec_command(command)
                for line in stderr.readlines():
                    pass

            return True

        except Exception as e:
            logger.error(f"Unable to connect to unit {unit}.")
            logger.debug(e, exc_info=True)
            return False

    units = universal_identifier_to_all_active_workers(units)
    with ThreadPoolExecutor(max_workers=len(units)) as executor:
        results = executor.map(_thread_function, units)

    if not all(results):
        sys.exit(1)
Beispiel #14
0
def sync_configs(units: tuple[str, ...], shared: bool, specific: bool) -> None:
    """
    Deploys the shared config.ini and worker specific config.inis to the workers.

    If neither `--shared` not `--specific` are specified, both are set to true.
    """
    import paramiko

    logger = create_logger("sync_configs",
                           unit=get_unit_name(),
                           experiment=get_latest_experiment_name())
    units = universal_identifier_to_all_active_workers(units)

    if not shared and not specific:
        shared = specific = True

    def _thread_function(unit: str) -> bool:
        logger.debug(f"Syncing configs on {unit}...")
        try:
            with paramiko.SSHClient() as client:
                client.load_system_host_keys()
                client.connect(unit, username="******", compress=True)

                with client.open_sftp() as ftp_client:
                    sync_config_files(ftp_client, unit, shared, specific)

            return True
        except Exception as e:
            logger.error(f"Unable to connect to unit {unit}.")
            logger.debug(e, exc_info=True)
            return False

    # save config.inis to database
    save_config_files_to_db(units, shared, specific)

    with ThreadPoolExecutor(max_workers=len(units)) as executor:
        results = executor.map(_thread_function, units)

    if not all(results):
        sys.exit(1)
Beispiel #15
0
def start_led_control(
    automation_name: str,
    duration: Optional[float] = None,
    skip_first_run=False,
    unit: Optional[str] = None,
    experiment: Optional[str] = None,
    **kwargs,
) -> LEDController:
    try:
        return LEDController(
            automation_name=automation_name,
            unit=unit or get_unit_name(),
            experiment=experiment or get_latest_experiment_name(),
            skip_first_run=skip_first_run,
            duration=duration,
            **kwargs,
        )

    except Exception as e:
        logger = create_logger("led_automation")
        logger.error(e)
        logger.debug(e, exc_info=True)
        raise e
Beispiel #16
0
def uninstall_plugin(plugin: str, units: tuple[str, ...]) -> None:
    """
    Uninstalls a plugin from worker and leader
    """
    import paramiko

    logger = create_logger("uninstall_plugin",
                           unit=get_unit_name(),
                           experiment=get_latest_experiment_name())

    command = f"pio uninstall-plugin {plugin}"

    def _thread_function(unit: str):
        logger.debug(f"Executing `{command}` on {unit}...")
        try:

            with paramiko.SSHClient() as client:
                client.load_system_host_keys()
                client.connect(unit, username="******", compress=True)

                (stdin, stdout, stderr) = client.exec_command(command)
                for line in stderr.readlines():
                    pass

            return True

        except Exception as e:
            logger.error(f"Unable to connect to unit {unit}.")
            logger.debug(e, exc_info=True)
            return False

    units = add_leader(universal_identifier_to_all_active_workers(units))
    with ThreadPoolExecutor(max_workers=len(units)) as executor:
        results = executor.map(_thread_function, units)

    if not all(results):
        sys.exit(1)
Beispiel #17
0
def start_dosing_control(
    automation_name: str,
    duration: Optional[float] = None,
    skip_first_run: bool = False,
    unit: Optional[str] = None,
    experiment: Optional[str] = None,
    **kwargs,
) -> DosingController:
    unit = unit or get_unit_name()
    experiment = experiment or get_latest_experiment_name()

    try:

        kwargs["duration"] = duration
        kwargs["unit"] = unit
        kwargs["experiment"] = experiment
        kwargs["skip_first_run"] = skip_first_run
        return DosingController(automation_name, **kwargs)  # noqa: F841

    except Exception as e:
        logger = create_logger("dosing_automation")
        logger.error(e)
        logger.debug(e, exc_info=True)
        raise e
Beispiel #18
0
def click_self_test(k: str) -> int:
    """
    Test the input/output in the Pioreactor
    """
    import sys

    unit = get_unit_name()
    testing_experiment = get_latest_testing_experiment_name()
    experiment = get_latest_experiment_name()
    logger = create_logger("self_test", unit=unit, experiment=experiment)

    with publish_ready_to_disconnected_state(unit, testing_experiment, "self_test"):

        if is_pio_job_running("od_reading", "temperature_automation", "stirring"):
            logger.error(
                "Make sure Optical Density, Temperature Automation, and Stirring are off before running a self test. Exiting."
            )
            return 1

        functions_to_test = [
            (name, f)
            for (name, f) in vars(sys.modules[__name__]).items()
            if name.startswith("test_")
        ]  # automagically finds the test_ functions.
        if k:
            functions_to_test = [
                (name, f) for (name, f) in functions_to_test if (k in name)
            ]

        # clear the mqtt cache
        for name, _ in functions_to_test:
            publish(
                f"pioreactor/{unit}/{testing_experiment}/self_test/{name}",
                None,
                retain=True,
            )

        count_tested: int = 0
        count_passed: int = 0
        for name, test in functions_to_test:

            try:
                test(logger, unit, testing_experiment)
            except Exception:
                import traceback

                traceback.print_exc()

                res = False
            else:
                res = True

            logger.debug(f"{name}: {'✅' if res else '❌'}")

            count_tested += 1
            count_passed += res

            publish(
                f"pioreactor/{unit}/{testing_experiment}/self_test/{name}",
                int(res),
                retain=True,
            )

        publish(
            f"pioreactor/{unit}/{testing_experiment}/self_test/all_tests_passed",
            int(count_passed == count_tested),
            retain=True,
        )

        if count_passed == count_tested:
            logger.info("All tests passed ✅")
        else:
            logger.info(
                f"{count_tested-count_passed} failed test{'s' if (count_tested-count_passed) > 1 else ''}."
            )

        return int(count_passed != count_tested)
Beispiel #19
0
def led_intensity(
    channels: LedChannel | list[LedChannel],
    intensities: float | list[float],
    unit: str,
    experiment: str,
    verbose: bool = True,
    source_of_event: Optional[str] = None,
    pubsub_client: Optional[Client] = None,
) -> bool:
    """

    Parameters
    ------------
    channel: an LED channel or list
    intensity: float or list
        a value between 0 and 100 to set the LED channel to.
    verbose: bool
        if True, log the change, and send event to led_event table & mqtt. This is FALSE
        in od_reading job, so as to not create spam.
    pubsub_client:
        provide a MQTT paho client to use for publishing.

    Returns
    --------
    bool representing if the all LED channels intensity were successfully changed


    State is also updated in

    pioreactor/<unit>/<experiment>/led/<channel>/intensity   <intensity>

    and

    pioreactor/<unit>/<experiment>/leds/intensity    {'A': intensityA, 'B': intensityB, ...}

    """
    logger = create_logger("led_intensity", experiment=experiment, unit=unit)
    updated_successfully = True
    if not is_testing_env():
        from DAC43608 import DAC43608
    else:
        logger.debug("DAC43608 not available; using MockDAC43608")
        from pioreactor.utils.mock import MockDAC43608 as DAC43608  # type: ignore

    if pubsub_client is None:
        pubsub_client = create_client()

    channels, intensities = _list(channels), _list(intensities)

    if len(channels) != len(intensities):
        raise ValueError("channels must be the same length as intensities")

    # any locked channels?
    for channel in channels:
        if is_led_channel_locked(channel):
            updated_successfully = False
            logger.warning(
                f"Unable to update channel {channel} due to a lock on it. Please try again."
            )

    # remove locked channels:
    try:
        channels, intensities = zip(  # type: ignore
            *[
                (c, i)
                for c, i in zip(channels, intensities)
                if not is_led_channel_locked(c)
            ]
        )
    except ValueError:
        # if the only channel being updated is locked, the resulting error is a ValueError: not enough values to unpack (expected 2, got 0)
        return updated_successfully

    for channel, intensity in zip(channels, intensities):
        try:
            assert (
                0.0 <= intensity <= 100.0
            ), "intensity should be between 0 and 100, inclusive"
            assert (
                channel in ALL_LED_CHANNELS
            ), f"saw incorrect channel {channel}, not in {ALL_LED_CHANNELS}"
            intensity = float(intensity)

            dac = DAC43608()
            dac.power_up(getattr(dac, channel))
            dac.set_intensity_to(getattr(dac, channel), intensity / 100.0)

            if intensity == 0:
                # setting to 0 doesn't fully remove the current, there is some residual current. We turn off
                # the channel to guarantee no output.
                dac.power_down(getattr(dac, channel))

            pubsub_client.publish(
                f"pioreactor/{unit}/{experiment}/led/{channel}/intensity",
                intensity,
                qos=QOS.AT_MOST_ONCE,
                retain=True,
            )

        except ValueError as e:
            logger.debug(e, exc_info=True)
            logger.error(
                "Unable to find I²C for LED driver. Is the Pioreactor HAT attached to the Raspberry Pi? Is I²C enabled on the Raspberry Pi?"
            )
            updated_successfully = False
            return updated_successfully

    new_state, old_state = _update_current_state(channels, intensities)

    pubsub_client.publish(
        f"pioreactor/{unit}/{experiment}/leds/intensity",
        dumps(new_state),
        qos=QOS.AT_MOST_ONCE,
        retain=True,
    )

    if verbose:
        for channel, intensity in zip(channels, intensities):
            event = {
                "channel": channel,
                "intensity": intensity,
                "source_of_event": source_of_event,
                "timestamp": current_utc_time(),
            }

            pubsub_client.publish(
                f"pioreactor/{unit}/{experiment}/led_events",
                dumps(event),
                qos=QOS.AT_MOST_ONCE,
                retain=False,
            )

            logger.info(
                f"Updated LED {channel} from {old_state[channel]:0.3g}% to {new_state[channel]:0.3g}%."
            )

    return updated_successfully
Beispiel #20
0
    def on_connect(client: Client, userdata, flags, rc: int, properties=None):
        if rc > 1:
            from pioreactor.logging import create_logger

            logger = create_logger("pubsub.create_client", to_mqtt=False)
            logger.error(f"Connection failed with error code {rc=}: {connack_string(rc)}")
Beispiel #21
0
def pump(
    unit: str,
    experiment: str,
    pump_name: str,
    ml: Optional[float] = None,
    duration: Optional[float] = None,
    source_of_event: Optional[str] = None,
    calibration: Optional[dict] = None,
    continuously: bool = False,
):

    """

    Parameters
    ------------
    pump_name: one of "media", "alt_media", "waste"
    calibration:
        specify a calibration for the dosing. Should be a dict
        with fields "duration_", "hz", "dc", and "bias_"

    Returns
    -----------
    Amount of volume passed (approximate in some cases)


    """
    action_name = {
        "media": "add_media",
        "alt_media": "add_alt_media",
        "waste": "remove_waste",
    }[pump_name]
    logger = create_logger(action_name)
    with utils.publish_ready_to_disconnected_state(
        unit, experiment, action_name
    ) as exit_event:
        assert (
            (ml is not None) or (duration is not None) or continuously
        ), "either ml or duration must be set"
        assert not (
            (ml is not None) and (duration is not None)
        ), "Only select ml or duration"

        if calibration is None:
            with utils.local_persistant_storage("pump_calibration") as cache:
                try:
                    calibration = loads(cache[f"{pump_name}_ml_calibration"])
                except KeyError:
                    logger.error("Calibration not defined. Run pump calibration first.")
                    return 0.0

        try:
            GPIO_PIN = PWM_TO_PIN[config.get("PWM_reverse", pump_name)]
        except NoOptionError:
            logger.error(f"Add `{pump_name}` to `PWM` section to config_{unit}.ini.")
            return 0.0

        if ml is not None:
            assert ml >= 0, "ml should be greater than 0"
            duration = utils.pump_ml_to_duration(
                ml, calibration["duration_"], calibration["bias_"]
            )
            logger.info(f"{round(ml, 2)}mL")
        elif duration is not None:
            ml = utils.pump_duration_to_ml(
                duration, calibration["duration_"], calibration["bias_"]
            )
            logger.info(f"{round(duration, 2)}s")
        elif continuously:
            duration = 600
            ml = utils.pump_duration_to_ml(
                duration, calibration["duration_"], calibration["bias_"]
            )
            logger.info("Running pump continuously.")

        assert isinstance(ml, (float, int))
        assert isinstance(duration, (float, int))
        assert duration >= 0, "duration should be greater than 0"
        if duration == 0:
            return 0.0

        # publish this first, as downstream jobs need to know about it.
        json_output = dumps(
            {
                "volume_change": ml,
                "event": action_name,
                "source_of_event": source_of_event,
                "timestamp": current_utc_time(),
            }
        )
        publish(
            f"pioreactor/{unit}/{experiment}/dosing_events",
            json_output,
            qos=QOS.EXACTLY_ONCE,
        )

        try:

            pwm = PWM(GPIO_PIN, calibration["hz"])
            pwm.lock()

            with catchtime() as delta_time:
                pwm.start(calibration["dc"])
                pump_start_time = time.time()

            exit_event.wait(max(0, duration - delta_time()))

            if continuously:
                while not exit_event.wait(duration):
                    publish(
                        f"pioreactor/{unit}/{experiment}/dosing_events",
                        json_output,
                        qos=QOS.EXACTLY_ONCE,
                    )

        except SystemExit:
            # a SigInt, SigKill occurred
            pass
        except Exception as e:
            # some other unexpected error
            logger.debug(e, exc_info=True)
            logger.error(e)

        finally:
            pwm.stop()
            pwm.cleanup()
            if continuously:
                logger.info(f"Stopping {pump_name} pump.")

            if exit_event.is_set():
                # ended early for some reason
                shortened_duration = time.time() - pump_start_time
                ml = utils.pump_duration_to_ml(
                    shortened_duration, calibration["duration_"], calibration["bias_"]
                )
        return ml
Beispiel #22
0
def subscribe(
    topics: str | list[str],
    hostname=leader_hostname,
    retries: int = 10,
    timeout: Optional[float] = None,
    allow_retained: bool = True,
    **mqtt_kwargs,
) -> Optional[MQTTMessage]:
    """
    Modeled closely after the paho version, this also includes some try/excepts and
    a timeout. Note that this _does_ disconnect after receiving a single message.

    A failure case occurs if this is called in a thread (eg: a callback) and is waiting
    indefinitely for a message. The parent job may not exit properly.

    """

    retry_count = 1
    for retry_count in range(retries):
        try:

            lock: Optional[threading.Lock]

            def on_connect(client, userdata, flags, rc):
                client.subscribe(userdata["topics"])
                return

            def on_message(client, userdata, message: MQTTMessage):
                if not allow_retained and message.retain:
                    return

                userdata["messages"] = message
                client.disconnect()

                if userdata["lock"]:
                    userdata["lock"].release()

                return

            if timeout:
                lock = threading.Lock()
            else:
                lock = None

            topics = [topics] if isinstance(topics, str) else topics
            userdata: dict[str, Any] = {
                "topics": [(topic, mqtt_kwargs.pop("qos", 0)) for topic in topics],
                "messages": None,
                "lock": lock,
            }

            client = Client(userdata=userdata)
            client.on_connect = on_connect
            client.on_message = on_message
            client.connect(leader_hostname)

            if timeout is None:
                client.loop_forever()
            else:
                assert lock is not None
                lock.acquire()
                client.loop_start()
                lock.acquire(timeout=timeout)
                client.loop_stop()
                client.disconnect()

            return userdata["messages"]

        except (ConnectionRefusedError, socket.gaierror, OSError, socket.timeout):
            from pioreactor.logging import create_logger

            logger = create_logger("pubsub.subscribe", to_mqtt=False)
            logger.debug(
                f"Attempt {retry_count}: Unable to connect to host: {hostname}",
                exc_info=True,
            )

            time.sleep(5 * retry_count)  # linear backoff

    else:
        logger = create_logger("pubsub.subscribe", to_mqtt=False)
        logger.error(f"Unable to connect to host: {hostname}. Exiting.")
        raise ConnectionRefusedError(f"Unable to connect to host: {hostname}.")
Beispiel #23
0
def backup_database(output_file: str) -> None:
    """
    This action will create a backup of the SQLite3 database into specified output. It then
    will try to copy the backup to any available worker Pioreactors as a further backup.

    This job actually consumes _a lot_ of resources, and I've seen the LED output
    drop due to this running. See issue #81. For now, we will skip the backup if `od_reading` is running

    Elsewhere, a cronjob is set up as well to run this action every N days.
    """

    import sqlite3
    from sh import ErrorReturnCode, rsync  # type: ignore

    unit = get_unit_name()
    experiment = UNIVERSAL_EXPERIMENT

    with publish_ready_to_disconnected_state(unit, experiment,
                                             "backup_database"):

        logger = create_logger("backup_database",
                               experiment=experiment,
                               unit=unit)

        if is_pio_job_running("od_reading"):
            logger.warning("Won't run if OD Reading is running. Exiting")
            return

        def progress(status: int, remaining: int, total: int) -> None:
            logger.debug(f"Copied {total-remaining} of {total} SQLite3 pages.")
            logger.debug(f"Writing to local backup {output_file}.")

        logger.debug(f"Starting backup of database to {output_file}")
        sleep(
            1
        )  # pause a second so the log entry above gets recorded into the DB.

        con = sqlite3.connect(config.get("storage", "database"))
        bck = sqlite3.connect(output_file)

        with bck:
            con.backup(bck, pages=-1, progress=progress)

        bck.close()
        con.close()

        with local_persistant_storage("database_backups") as cache:
            cache["latest_backup_timestamp"] = current_utc_time()

        logger.info("Completed backup of database.")

        n_backups = config.getint("number_of_backup_replicates_to_workers",
                                  fallback=2)
        backups_complete = 0
        available_workers = list(get_active_workers_in_inventory())

        while (backups_complete < n_backups) and (len(available_workers) > 0):
            backup_unit = available_workers.pop()
            if backup_unit == get_unit_name():
                continue

            try:
                rsync(
                    "-hz",
                    "--partial",
                    "--inplace",
                    output_file,
                    f"{backup_unit}:{output_file}",
                )
            except ErrorReturnCode:
                logger.debug(
                    f"Unable to backup database to {backup_unit}. Is it online?",
                    exc_info=True,
                )
                logger.warning(
                    f"Unable to backup database to {backup_unit}. Is it online?"
                )
            else:
                logger.debug(
                    f"Backed up database to {backup_unit}:{output_file}.")
                backups_complete += 1

        return
def pump_calibration(min_duration: float, max_duration: float) -> None:

    unit = get_unit_name()
    experiment = get_latest_experiment_name()

    logger = create_logger("pump_calibration",
                           unit=unit,
                           experiment=experiment)
    logger.info("Starting pump calibration.")

    with publish_ready_to_disconnected_state(unit, experiment,
                                             "pump_calibration"):

        click.clear()
        click.echo()
        pump_name, execute_pump = which_pump_are_you_calibrating()

        hz, dc = choose_settings()

        setup(pump_name, execute_pump, hz, dc)
        durations, volumes = run_tests(execute_pump, hz, dc, min_duration,
                                       max_duration)

        (slope, std_slope), (
            bias,
            std_bias,
        ) = simple_linear_regression_with_forced_nil_intercept(
            durations, volumes)

        # check parameters for problems
        if slope < 0:
            logger.warning(
                "Slope is negative - you probably want to rerun this calibration..."
            )
        if slope / std_slope < 5.0:
            logger.warning(
                "Too much uncertainty in slope - you probably want to rerun this calibration..."
            )

        # save to cache
        with local_persistant_storage("pump_calibration") as cache:
            cache[f"{pump_name}_ml_calibration"] = json.dumps({
                "duration_":
                slope,
                "hz":
                hz,
                "dc":
                dc,
                "bias_":
                bias,
                "timestamp":
                current_utc_time(),
            })
            cache[f"{pump_name}_calibration_data"] = json.dumps({
                "timestamp":
                current_utc_time(),
                "data": {
                    "durations": durations,
                    "volumes": volumes
                },
            })

        logger.debug(
            f"slope={slope:0.2f} ± {std_slope:0.2f}, bias={bias:0.2f} ± {std_bias:0.2f}"
        )

        logger.debug(
            f"Calibration is best for volumes between {(slope * min_duration + bias):0.1f}mL to {(slope * max_duration + bias):0.1f}mL, but will be okay for slightly outside this range too."
        )
        logger.info("Finished pump calibration.")
Beispiel #25
0
            else:
                try:
                    ip = socket.gethostbyname(hostname)
                except OSError:
                    ip = "Unknown"

            # get state
            result = subscribe(
                f"pioreactor/{hostname}/{UNIVERSAL_EXPERIMENT}/monitor/$state", timeout=1
            )
            if result:
                state = result.payload.decode()
            else:
                state = "Unknown"

            state = click.style(f"{state:15s}", fg="green" if state == "ready" else "red")

            # is reachable?
            reachable = networking.is_reachable(hostname)

            click.echo(
                f"{hostname:20s} {('Y' if hostname==get_leader_hostname() else 'N'):15s} {ip:20s} {state} {(  click.style('Y', fg='green') if reachable else click.style('N', fg='red') ):10s}"
            )


if not am_I_leader() and not am_I_active_worker():
    logger = create_logger("CLI", unit=get_unit_name(), experiment=UNIVERSAL_EXPERIMENT)
    logger.info(
        f"Running `pio` on a non-active Pioreactor. Do you need to change `{get_unit_name()}` in `network.inventory` section in `config.ini`?"
    )
Beispiel #26
0
def od_blank(
    od_angle_channel1,
    od_angle_channel2,
    n_samples: int = 30,
):
    """
    Compute the sample average of the photodiodes attached.

    Note that because of the sensitivity of the growth rate (and normalized OD) to the starting values,
    we need a very accurate estimate of these statistics.

    """
    from statistics import mean, variance

    action_name = "od_blank"
    logger = create_logger(action_name)
    unit = get_unit_name()
    experiment = get_latest_experiment_name()
    testing_experiment = get_latest_testing_experiment_name()
    logger.info(
        "Starting reading of blank OD. This will take about a few minutes.")

    with publish_ready_to_disconnected_state(unit, experiment, action_name):

        # running this will mess with OD Reading - best to just not let it happen.
        if (is_pio_job_running("od_reading")
                # but if test mode, ignore
                and not is_testing_env()):
            logger.error(
                "od_reading should not be running. Stop od_reading first. Exiting."
            )
            return

        # turn on stirring if not already on
        if not is_pio_job_running("stirring"):
            # start stirring
            st = start_stirring(
                target_rpm=config.getint("stirring", "target_rpm"),
                unit=unit,
                experiment=testing_experiment,
            )
            st.block_until_rpm_is_close_to_target()
        else:
            # TODO: it could be paused, we should make sure it's running
            ...

        sampling_rate = 1 / config.getfloat("od_config", "samples_per_second")

        # start od_reading
        start_od_reading(
            od_angle_channel1,
            od_angle_channel2,
            sampling_rate=sampling_rate,
            unit=unit,
            experiment=testing_experiment,
            fake_data=is_testing_env(),
        )

        def yield_from_mqtt():
            while True:
                msg = pubsub.subscribe(
                    f"pioreactor/{unit}/{testing_experiment}/od_reading/od_raw_batched"
                )
                yield json.loads(msg.payload)

        signal = yield_from_mqtt()
        readings = defaultdict(list)

        for count, batched_reading in enumerate(signal, start=1):
            for (channel, reading) in batched_reading["od_raw"].items():
                readings[channel].append(reading["voltage"])

            pubsub.publish(
                f"pioreactor/{unit}/{experiment}/{action_name}/percent_progress",
                count // n_samples * 100,
            )
            logger.debug(f"Progress: {count/n_samples:.0%}")
            if count == n_samples:
                break

        means = {}
        variances = {}
        autocorrelations = {}  # lag 1

        for channel, od_reading_series in readings.items():
            # measure the mean and publish. The mean will be used to normalize the readings in downstream jobs
            means[channel] = mean(od_reading_series)
            variances[channel] = variance(od_reading_series)
            autocorrelations[channel] = correlation(od_reading_series[:-1],
                                                    od_reading_series[1:])

            # warn users that a blank is 0 - maybe this should be an error instead? TODO: link this to a docs page.
            if means[channel] == 0.0:
                logger.warning(
                    f"OD reading for PD Channel {channel} is 0.0 - that shouldn't be. Is there a loose connection, or an extra channel in the configuration's [od_config.photodiode_channel] section?"
                )

            pubsub.publish(
                f"pioreactor/{unit}/{experiment}/od_blank/{channel}",
                json.dumps({
                    "timestamp": current_utc_time(),
                    "od_reading_v": means[channel]
                }),
            )

        # store locally as the source of truth.
        with local_persistant_storage(action_name) as cache:
            cache[experiment] = json.dumps(means)

        # publish to UI and database
        pubsub.publish(
            f"pioreactor/{unit}/{experiment}/{action_name}/mean",
            json.dumps(means),
            qos=pubsub.QOS.AT_LEAST_ONCE,
            retain=True,
        )

        if config.getboolean(
                "data_sharing_with_pioreactor",
                "send_od_statistics_to_Pioreactor",
                fallback=False,
        ):
            to_share = {"mean": means, "variance": variances}
            to_share["ir_intensity"] = config["od_config"]["ir_intensity"]
            to_share["od_angle_channel1"] = od_angle_channel1
            to_share["od_angle_channel2"] = od_angle_channel2
            pubsub.publish_to_pioreactor_cloud("od_blank_mean", json=to_share)

        logger.debug(f"measured mean: {means}")
        logger.debug(f"measured variances: {variances}")
        logger.debug(f"measured autocorrelations: {autocorrelations}")
        logger.debug("OD normalization finished.")

        return means
Beispiel #27
0
def stirring_calibration(min_dc: int, max_dc: int) -> None:

    unit = get_unit_name()
    experiment = get_latest_testing_experiment_name()
    action_name = "stirring_calibration"
    logger = create_logger(action_name)

    with publish_ready_to_disconnected_state(unit, experiment, action_name):

        logger.info("Starting stirring calibration.")

        if is_pio_job_running("stirring"):
            logger.error(
                "Make sure Stirring job is off before running stirring calibration. Exiting."
            )
            return

        measured_rpms = []

        # go up and down to observe any hystersis.
        dcs = (list(range(max_dc, min_dc, -3)) +
               list(range(min_dc, max_dc, 4)) +
               list(range(max_dc, min_dc, -5)))

        with stirring.RpmFromFrequency() as rpm_calc, stirring.Stirrer(
                target_rpm=0,
                unit=unit,
                experiment=experiment,
                rpm_calculator=None,
        ) as st:

            st.duty_cycle = dcs[0]
            st.start_stirring()
            time.sleep(8)
            n_samples = len(dcs)

            for count, dc in enumerate(dcs, start=1):
                st.set_duty_cycle(dc)
                time.sleep(8)
                rpm = rpm_calc(4)
                measured_rpms.append(rpm)
                logger.debug(f"Detected {rpm=} RPM @ {dc=}%")

                # log progress
                publish(
                    f"pioreactor/{unit}/{experiment}/{action_name}/percent_progress",
                    count / n_samples * 100,
                )
                logger.debug(f"Progress: {count/n_samples:.0%}")

        publish_to_pioreactor_cloud(action_name,
                                    json=dict(zip(dcs, measured_rpms)))
        logger.debug(list(zip(dcs, measured_rpms)))

        # drop any 0 in RPM, too little DC
        try:
            filtered_dcs, filtered_measured_rpms = zip(
                *filter(lambda d: d[1] > 0, zip(dcs, measured_rpms)))
        except ValueError:
            # the above can fail if all measured rpms are 0
            logger.error("No RPMs were measured. Is the stirring spinning?")
            return

        # since in practice, we want a look up from RPM -> required DC, we
        # set x=measure_rpms, y=dcs
        (rpm_coef, rpm_coef_std), (intercept,
                                   intercept_std) = simple_linear_regression(
                                       filtered_measured_rpms, filtered_dcs)
        logger.debug(
            f"{rpm_coef=}, {rpm_coef_std=}, {intercept=}, {intercept_std=}")

        if rpm_coef <= 0:
            logger.warning(
                "Something went wrong - detected negative correlation between RPM and stirring."
            )
            return

        if intercept <= 0:
            logger.warning(
                "Something went wrong - the intercept should be greater than 0."
            )
            return

        with local_persistant_storage(action_name) as cache:
            cache["linear_v1"] = json.dumps({
                "rpm_coef": rpm_coef,
                "intercept": intercept,
                "timestamp": current_utc_time(),
            })
            cache["stirring_calibration_data"] = json.dumps({
                "timestamp":
                current_utc_time(),
                "data": {
                    "dcs": dcs,
                    "measured_rpms": measured_rpms
                },
            })
def od_normalization(
    unit: str,
    experiment: str,
    n_samples: int = 35
) -> tuple[dict[PdChannel, float], dict[PdChannel, float]]:
    from statistics import mean, variance

    action_name = "od_normalization"
    logger = create_logger(action_name)
    logger.debug("Starting OD normalization.")

    with publish_ready_to_disconnected_state(unit, experiment, action_name):

        if (not (is_pio_job_running("od_reading"))
                # but if test mode, ignore
                and not is_testing_env()):
            logger.error(
                " OD Reading should be running. Run OD Reading first. Exiting."
            )
            raise exc.JobRequiredError(
                "OD Reading should be running. Run OD Reading first. Exiting.")

        # TODO: write tests for this
        def yield_from_mqtt() -> Generator[dict, None, None]:
            while True:
                msg = pubsub.subscribe(
                    f"pioreactor/{unit}/{experiment}/od_reading/od_raw_batched",
                    allow_retained=False,
                )
                if msg is None:
                    continue

                yield json.loads(msg.payload)

        signal = yield_from_mqtt()
        readings = defaultdict(list)

        for count, batched_reading in enumerate(signal, start=1):
            for (sensor, reading) in batched_reading["od_raw"].items():
                readings[sensor].append(reading["voltage"])

            pubsub.publish(
                f"pioreactor/{unit}/{experiment}/{action_name}/percent_progress",
                count // n_samples * 100,
            )
            logger.debug(f"Progress: {count/n_samples:.0%}")
            if count == n_samples:
                break

        variances = {}
        means = {}
        autocorrelations = {}  # lag 1

        for sensor, od_reading_series in readings.items():
            variances[sensor] = variance(
                residuals_of_simple_linear_regression(list(
                    range(n_samples)), od_reading_series))  # see issue #206
            means[sensor] = mean(od_reading_series)
            autocorrelations[sensor] = correlation(od_reading_series[:-1],
                                                   od_reading_series[1:])

        with local_persistant_storage("od_normalization_mean") as cache:
            cache[experiment] = json.dumps(means)

        with local_persistant_storage("od_normalization_variance") as cache:
            cache[experiment] = json.dumps(variances)

        logger.debug(f"measured mean: {means}")
        logger.debug(f"measured variances: {variances}")
        logger.debug(f"measured autocorrelations: {autocorrelations}")
        logger.debug("OD normalization finished.")

        if config.getboolean(
                "data_sharing_with_pioreactor",
                "send_od_statistics_to_Pioreactor",
                fallback=False,
        ):

            add_on = {
                "ir_intensity": config["od_config"]["ir_intensity"],
            }

            pubsub.publish_to_pioreactor_cloud(
                "od_normalization_variance",
                json={
                    **variances,
                    **add_on,
                },  # TODO: this syntax changed in a recent python version...
            )
            pubsub.publish_to_pioreactor_cloud(
                "od_normalization_mean",
                json={
                    **means,
                    **add_on
                },
            )

        return means, variances