def test_booted(): if os.path.exists('/run/systemd/system'): # assume we are running under systemd assert booted() else: # don't assume anything assert booted() in {False, True}
def serve(self): if sd is not None and sd.booted(): sd.notify("READY=1") return self.httpd.serve_forever()
# It's time to setup the controller! sys.stdout.write(f"{settings['radio']} Controller {bluetooth_mac} on {settings['uart_name']} at {int(settings['uart_baud'])/1000}k\n") sys.stdout.flush() sleep(1) # Put together the command string and start btattach. # -P bcm = Select the Broadcom protocol. # -S <baud> = Set the baud rate for our chipset and UART. # -B <device> = Attach a primary controller to this serial device. btattach_cmd = [ btattach_bin, '-P', 'bcm', '-S', settings['uart_baud'], '-B', settings['uart_dev'] ] btattach = subprocess.Popen(btattach_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # Wait for firmware upload to complete and the HCI device to attach. for i in range(10): if not hci.is_symlink(): sleep(1) else: break if not hci.is_symlink(): sys.stdout.write(f"Failed to attach {hci.name}. Aborting!\n") sys.stdout.flush() btattach.terminate() sys.exit(1) # Log our status, notify systemd and exit; leave btattach running to manage the HCI port. sys.stdout.write(f"Raspberry Pi {rpi.model} Bluetooth on {hci.name} is ready.\n") sys.stdout.flush() if daemon.booted(): daemon.notify("READY=1") daemon.notify(f"STATUS=Raspberry Pi {rpi.model} Bluetooth on {hci.name} is ready.") sys.exit(0)
def main(): argparser = argparse.ArgumentParser() argparser.add_argument( "--config", default="/etc/dbusers.yaml", help="Path to YAML config file, default - /etc/dbusers.yaml", ) argparser.add_argument("--debug", help="Turn on debug logging", action="store_true") argparser.add_argument( "--account-type", choices=["tool", "user"], help=""" Type of accounts to harvest|delete, not useful for maintain default - tool """, default="tool", ) argparser.add_argument( "action", choices=["harvest", "harvest-replicas", "maintain", "delete"], help=""" What action to take. harvest: Collect information about all existing users from replica.my.cnf files and accounts already created in legacy databases, and put them into the account database. Runs as a one shot script. harvest-replicas: Collect information about all existing users account status on the database replicas and set the status to absent or present in the account host metadata tables. maintain: Runs as a daemon that watches for new tools and tool users being created, creates accounts for them in all the labsdbs, maintains state in the account database, and writes out replica.my.cnf files. delete: Deletes a given user. Provide a username like tools.admin or user shellname, not a mysql user name. """, ) argparser.add_argument( "extra_args", nargs="?", help=""" Optional argument used when more info needs to be passed in. Currently used with `delete` to pass in a username. """, ) args = argparser.parse_args() log_lvl = logging.DEBUG if args.debug else logging.INFO if daemon.booted(): logging.basicConfig( format="%(message)s", level=log_lvl, handlers=[journal.JournalHandler()], ) else: logging.basicConfig(format="%(message)s", level=log_lvl) with open(args.config) as f: config = yaml.safe_load(f) if args.action == "harvest": harvest_cnf_files(config, args.account_type) harvest_replica_accts(config) elif args.action == "harvest-replicas": harvest_replica_accts(config) elif args.action == "maintain": while True: # Check if we're the primary NFS server. # If we aren't, just loop lamely, not exit. This allows this script # to run continuously on both labstores, making for easier # monitoring given our puppet situation and also easy failover. When # NFS primaries are switched, nothing new needs to be done to # switch this over. if is_active_nfs(config): populate_new_accounts(config, "tool") populate_new_accounts(config, "user") create_accounts(config) time.sleep(60) elif args.action == "delete": if args.extra_args is None: logging.error("Need to provide username to delete") sys.exit(1) delete_account(config, args.extra_args, args.account_type)
default="active", help="The state units will transition to") return parser def main(): observer = log.PythonLoggingObserver(loggerName="") observer.start() parser = build_arg_parser() args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) for notifier in get_enabled_notifiers(args.notifier): notifier.handle_arguments(args) if not args.test: reactor.callWhenRunning(partial(setup, args)) else: test(args) systemd_status("Discovering units") reactor.run() if __name__ == "__main__": if not booted(): exit("This system doesn't run systemd!") main()
def main(): argparser = argparse.ArgumentParser() argparser.add_argument( '--config', default='/etc/dbusers.yaml', help='Path to YAML config file, default - /etc/dbusers.yaml' ) argparser.add_argument( '--debug', help='Turn on debug logging', action='store_true' ) argparser.add_argument( '--account-type', choices=['tool', 'user'], help=""" Type of accounts to harvest|delete, not useful for maintain default - tool """, default='tool' ) argparser.add_argument( 'action', choices=['harvest', 'maintain', 'delete'], help=""" What action to take. harvest: Collect information about all existing users from replica.my.cnf files and accounts already created in legacy databases, and put them into the account database. Runs as a one shot script. maintain: Runs as a daemon that watches for new tools and tool users being created, creates accounts for them in all the labsdbs, maintains state in the account database, and writes out replica.my.cnf files. delete: Deletes a given user. Provide a username like tools.admin or user shellname, not a mysql user name. """ ) argparser.add_argument( 'extra_args', nargs='?', help=""" Optional argument used when more info needs to be passed in. Currently used with `delete` to pass in a username. """ ) args = argparser.parse_args() log_lvl = logging.DEBUG if args.debug else logging.INFO if daemon.booted(): logging.basicConfig(format='%(message)s', level=log_lvl, handlers=[journal.JournalHandler()]) else: logging.basicConfig(format='%(message)s', level=log_lvl) with open(args.config) as f: config = yaml.safe_load(f) if args.action == 'harvest': harvest_cnf_files(config, args.account_type) harvest_replica_accts(config) elif args.action == 'maintain': while True: # Check if we're the primary NFS server. # If we aren't, just loop lamely, not exit. This allows this script to # run continuously on both labstores, making for easier monitoring given # our puppet situation and also easy failover. When NFS primaries are # switched, nothing new needs to be done to switch this over. if is_active_nfs(config): populate_new_accounts(config, 'tool') populate_new_accounts(config, 'user') create_accounts(config) time.sleep(60) elif args.action == 'delete': if args.extra_args is None: logging.error('Need to provide username to delete') sys.exit(1) delete_account(config, args.extra_args, args.account_type)
def load_config(config_dir): config = {} config_d = os.path.join(config_dir, "config.d") config_files = [ os.path.join(config_d, fn) for fn in os.listdir(config_d) if ( fn.endswith(".json") or (fn.endswith(".yaml") and not isinstance(yaml, ImportError)) ) and os.path.isfile(os.path.join(config_d, fn)) and os.access(os.path.join(config_d, fn), os.R_OK) ] config_files.sort() for file in config_files: config = dict_merge(config, config_load_file(file)) required_keys = ["name", "secret", "api_url", "volumes"] # XXX legacy if "api_auth" not in config: required_keys += ["api_auth_name", "api_auth_secret"] for k in required_keys: if k not in config: raise Exception("Incomplete config") if "accept_new_high_water_pct" not in config: config["accept_new_high_water_pct"] = 80 for volume_name in config["volumes"]: if "path" not in config["volumes"][volume_name]: del config["volumes"][volume_name] continue if "accept_new" not in config["volumes"][volume_name]: config["volumes"][volume_name]["accept_new"] = True if "accept_new_high_water_pct" not in config["volumes"][volume_name]: config["volumes"][volume_name]["accept_new_high_water_pct"] = config[ "accept_new_high_water_pct" ] if len(config["volumes"]) == 0: raise Exception("Incomplete config") if "log_file" not in config: if (not isinstance(systemd_daemon, ImportError)) and systemd_daemon.booted(): config["log_file"] = "systemd" else: config["log_file"] = "syslog" if "lock_dir" not in config: for dir in ("/run/lock", "/var/lock", "/run", "/var/run", "/tmp"): if os.path.exists(dir): config["lock_dir"] = dir break if "var_dir" not in config: config["var_dir"] = "/var/lib/turku-storage" if "snapshot_mode" not in config: config["snapshot_mode"] = "link-dest" if "preserve_hard_links" not in config: config["preserve_hard_links"] = False if "ssh_ping_host" not in config: config["ssh_ping_host"] = socket.getfqdn() if "ssh_ping_port" not in config: config["ssh_ping_port"] = 22 if "ssh_ping_user" not in config: config["ssh_ping_user"] = "******" if "ssh_ping_host_keys" not in config: config["ssh_ping_host_keys"] = [] keys_glob = "/etc/ssh/ssh_host_*_key.pub" if "ssh_ping_host_keys_glob" in config: keys_glob = config["ssh_ping_host_keys_glob"] for pubkey in glob.glob(keys_glob): with open(pubkey) as f: config["ssh_ping_host_keys"].append(f.read().rstrip()) if "authorized_keys_file" not in config: config["authorized_keys_file"] = os.path.expanduser( "~{}/.ssh/authorized_keys".format(config["ssh_ping_user"]) ) if "authorized_keys_user" not in config: config["authorized_keys_user"] = config["ssh_ping_user"] if "authorized_keys_command" not in config: config["authorized_keys_command"] = "turku-storage-ping" if "timezone" not in config: config["timezone"] = "UTC" if config["timezone"]: os.environ["TZ"] = config["timezone"] time.tzset() return config