def backup_database(output): """ This action will create a backup of the SQLite3 database into specified output. It then will try to scp the backup to any available worker Pioreactors as a futher backup. A cronjob is set up as well to run this action every 12 hours. """ import sqlite3 from sh import scp, ErrorReturnCode def progress(status, remaining, total): logger.debug(f"Copied {total-remaining} of {total} pages.") logger.debug(f"Starting backup of database to {output}") con = sqlite3.connect(config.get("storage", "database")) bck = sqlite3.connect(output) with bck: con.backup(bck, pages=-1, progress=progress) bck.close() con.close() logger.debug( f"Completed backup of database to {output}. Attempting distributed backup..." ) n_backups = 2 backups_complete = 0 available_workers = get_active_workers_in_inventory() while (backups_complete < n_backups) and (len(available_workers) > 0): backup_unit = available_workers.pop() if backup_unit == get_unit_name(): continue try: scp(output, f"{backup_unit}:{output}") except ErrorReturnCode: logger.debug( f"Unable to backup database to {backup_unit}. Is it online?", exc_info=True, ) logger.warning(f"Unable to backup database to {backup_unit}.") else: logger.debug(f"Backed up database to {backup_unit}:{output}.") backups_complete += 1 return
def pios(): """ Command each of the worker Pioreactors with the `pios` command. See full documentation here: https://github.com/Pioreactor/pioreactor/wiki/Command-line-interface#leader-commands Report errors or feedback here: https://github.com/Pioreactor/pioreactor/issues """ import sys if not am_I_leader(): print("workers cannot run `pios` commands. Try `pio` instead.") sys.exit(0) if len(get_active_workers_in_inventory()) == 0: print("No active workers. See `inventory` section in config.ini.") sys.exit(0)
def pios() -> None: """ Command each of the worker Pioreactors with the `pios` command. See full documentation here: https://docs.pioreactor.com/user_guide/Advanced/Command%20line%20interface#leader-only-commands-to-control-workers Report errors or feedback here: https://github.com/Pioreactor/pioreactor/issues """ import sys if not am_I_leader(): click.echo("workers cannot run `pios` commands. Try `pio` instead.", err=True) sys.exit(1) if len(get_active_workers_in_inventory()) == 0: click.echo( "No active workers. See `network.inventory` section in config.ini.", err=True) sys.exit(1)
def backup_database(output_file: str) -> None: """ This action will create a backup of the SQLite3 database into specified output. It then will try to copy the backup to any available worker Pioreactors as a further backup. This job actually consumes _a lot_ of resources, and I've seen the LED output drop due to this running. See issue #81. For now, we will skip the backup if `od_reading` is running Elsewhere, a cronjob is set up as well to run this action every N days. """ import sqlite3 from sh import ErrorReturnCode, rsync # type: ignore unit = get_unit_name() experiment = UNIVERSAL_EXPERIMENT with publish_ready_to_disconnected_state(unit, experiment, "backup_database"): logger = create_logger("backup_database", experiment=experiment, unit=unit) if is_pio_job_running("od_reading"): logger.warning("Won't run if OD Reading is running. Exiting") return def progress(status: int, remaining: int, total: int) -> None: logger.debug(f"Copied {total-remaining} of {total} SQLite3 pages.") logger.debug(f"Writing to local backup {output_file}.") logger.debug(f"Starting backup of database to {output_file}") sleep( 1 ) # pause a second so the log entry above gets recorded into the DB. con = sqlite3.connect(config.get("storage", "database")) bck = sqlite3.connect(output_file) with bck: con.backup(bck, pages=-1, progress=progress) bck.close() con.close() with local_persistant_storage("database_backups") as cache: cache["latest_backup_timestamp"] = current_utc_time() logger.info("Completed backup of database.") n_backups = config.getint("number_of_backup_replicates_to_workers", fallback=2) backups_complete = 0 available_workers = list(get_active_workers_in_inventory()) while (backups_complete < n_backups) and (len(available_workers) > 0): backup_unit = available_workers.pop() if backup_unit == get_unit_name(): continue try: rsync( "-hz", "--partial", "--inplace", output_file, f"{backup_unit}:{output_file}", ) except ErrorReturnCode: logger.debug( f"Unable to backup database to {backup_unit}. Is it online?", exc_info=True, ) logger.warning( f"Unable to backup database to {backup_unit}. Is it online?" ) else: logger.debug( f"Backed up database to {backup_unit}:{output_file}.") backups_complete += 1 return
def universal_identifier_to_all_active_workers( units: tuple[str, ...]) -> tuple[str, ...]: if units == (UNIVERSAL_IDENTIFIER, ): units = get_active_workers_in_inventory() return units
def universal_identifier_to_all_units(units): if units == (UNIVERSAL_IDENTIFIER, ): units = get_active_workers_in_inventory() return units
def am_I_active_worker() -> bool: from pioreactor.config import get_active_workers_in_inventory return get_unit_name() in get_active_workers_in_inventory()