@command.wrap def gen_local_upstream_user(): "generate a fake local user CA and sample user for testing" config = configuration.get_config() if config.user_grant_email_domain == "": command.fail( "user-grant-email-domain not populated when trying to generate local fake upstream certificate" ) ca_key, ca_cert = get_upstream_cert_paths() user_key, user_cert = get_local_grant_user_paths() if os.path.exists(ca_key) or os.path.exists(ca_cert): command.fail( "upstream certificate authority already exists; not generating") if os.path.exists(user_key) or os.path.exists(user_cert): command.fail("locally-generated user already exists; not generating") print( "warning: user-grant-upstream.key and local-grant-user.key will not be encrypted; you should not use these " "as long-term keys or commit them into your project repository. this feature is only intended for temporary " "cluster testing.") subprocess.check_call([ "keygenupstream", ca_key, ca_cert, "%s@%s" % (UPSTREAM_USER_NAME, config.user_grant_email_domain), user_key, user_cert, "24h" ]) main_command = command.Mux("commands about cluster authorities", { "gen": generate, "genupstream": gen_local_upstream_user, })
"%s.%s" % (node.hostname, config.external_domain), str(node.ip) ]: subprocess.check_call(["ssh-keygen", "-f", known_hosts, "-R", remove], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) keys = hostkeys_by_fingerprint(node, fingerprints) with open(known_hosts, "a") as f: for key in keys: f.write("%s.%s %s\n" % (node.hostname, config.external_domain, key)) @command.wrap def pull_supervisor_key_from(source_file): "update ~/.ssh/known_hosts file with the supervisor host keys, based on their known hashes" pull_supervisor_key( util.readfile(source_file).decode().strip().split("\n")) etcdctl_command = dispatch_etcdctl kubectl_command = dispatch_kubectl foreach_command = ssh_foreach main_command = command.Mux( "commands about establishing access to a cluster", { "ssh": access_ssh, "update-known-hosts": update_known_hosts, "pull-supervisor-key": pull_supervisor_key_from, })
with gzip.open(os.path.join(cddir, "initrd.gz"), "ab") as f: f.write( subprocess.check_output( ["cpio", "--create", "--format=newc"], input="".join("%s\n" % filename for filename in inclusion).encode(), cwd=d)) files_for_md5sum = subprocess.check_output( ["find", ".", "-follow", "-type", "f", "-print0"], cwd=cddir).decode().split("\0") files_for_md5sum = [x for x in files_for_md5sum if x] md5s = subprocess.check_output(["md5sum", "--"] + files_for_md5sum, cwd=cddir) util.writefile(os.path.join(cddir, "md5sum.txt"), md5s) temp_iso = os.path.join(d, "temp.iso") subprocess.check_call([ "xorriso", "-as", "mkisofs", "-quiet", "-o", temp_iso, "-r", "-J", "-c", "boot.cat", "-b", "isolinux.bin", "-no-emul-boot", "-boot-load-size", "4", "-boot-info-table", cddir ]) subprocess.check_call(["isohybrid", "-h", "64", "-s", "32", temp_iso]) util.copy(temp_iso, iso_image) main_command = command.Mux("commands about building installation ISOs", { "gen": gen_iso, "passphrases": list_passphrases, })
"install and update packages on a node" config = configuration.get_config() for node in config.nodes: setup.ssh_cmd(ops, "update apt repositories on @HOST", node, "apt-get", "update") setup.ssh_cmd(ops, "upgrade packages on @HOST", node, "apt-get", "dist-upgrade", "-y") @command.wrapop def infra_sync(ops: command.Operations, node_name: str) -> None: "synchronize the filesystem to disk on a node" node = configuration.get_config().get_node(node_name) setup.ssh_cmd(ops, "synchronize operations on @HOST", node, "sync") @command.wrapop def infra_sync_supervisor(ops: command.Operations) -> None: "synchronize the filesystem to disk on the supervisor" infra_sync(ops, configuration.get_config().keyserver.hostname) main_command = command.Mux( "commands about maintaining the infrastructure of a cluster", { "admit": infra_admit, "admit-all": infra_admit_all, "install-packages": infra_install_packages, "sync": infra_sync, "sync-supervisor": infra_sync_supervisor, })
"hyperkube", "kubectl", "--kubeconfig", kubeconfig_path, "-o", "json", "get", "rolebindings", "auto-grant-" + authority.UPSTREAM_USER_NAME ]).decode()) if rolebinding.get("roleRef", {}).get("name") != "admin": command.fail("rolebinding for user was not admin in %s" % repr(rolebinding)) print("autogenerated rolebinding for user", repr(authority.UPSTREAM_USER_NAME), "passed basic check!") main_command = command.Mux( "commands about verifying the state of a cluster", { "keystatics": check_keystatics, "keygateway": check_keygateway, "supervisor-accessible": check_supervisor_accessible, "online": check_online, "ssh-with-certs": check_ssh_with_certs, "supervisor-certs": check_certs_on_supervisor, "systemd": check_systemd_services, "etcd": check_etcd_health, "kubernetes-init": check_kube_init, "kubernetes": check_kube_health, "pull": check_pull, "flannel": check_flannel, "dns-addon": check_dns, "user-grant": check_user_grant, })
ssh_cmd(ops, "update package of OCIs on @HOST", node, "apt-get", "install", "-y", "homeworld-oci-pack") ssh_cmd(ops, "upgrade apt packages on @HOST", node, "apt-get", "upgrade", "-y") ssh_cmd(ops, "re-push OCIs to registry on @HOST", node, "/usr/lib/homeworld/push-ocis.sh") @command.wrapop def setup_prometheus(ops: command.Operations) -> None: "bring up the supervisor node prometheus instance" config = configuration.get_config() for node in config.nodes: if node.kind != "supervisor": continue ssh_upload_bytes(ops, "upload prometheus config to @HOST", node, configuration.get_prometheus_yaml().encode(), "/etc/prometheus.yaml") ssh_cmd(ops, "enable prometheus on @HOST", node, "systemctl", "enable", "prometheus") ssh_cmd(ops, "restart prometheus on @HOST", node, "systemctl", "restart", "prometheus") main_command = command.Mux("commands about setting up a cluster", { "keyserver": setup_keyserver, "self-admit": admit_keyserver, "keygateway": setup_keygateway, "update-keygateway": update_keygateway, "supervisor-ssh": setup_supervisor_ssh, "dns-bootstrap": setup_dns_bootstrap, "stop-dns-bootstrap": teardown_dns_bootstrap, "bootstrap-registry": setup_bootstrap_registry, "update-registry": update_registry, "prometheus": setup_prometheus, })
return opener def get_keyurl_data(path): config = configuration.get_config() keyserver_hostname = config.keyserver.hostname url = "https://%s.%s:20557/%s" % (keyserver_hostname, config.external_domain, path.lstrip("/")) try: with get_verified_keyserver_opener().open(url) as req: if req.code != 200: command.fail("request failed: %s" % req.read().decode()) return req.read().decode() except urllib.error.HTTPError as e: if e.code == 400: command.fail("request failed: 400 " + e.msg + " (possibly an auth error?)") elif e.code == 404: command.fail("path not found: 404 " + e.msg) else: raise e @command.wrap def query_keyurl(path): "request data from unprotected URLs on keyserver" print(get_keyurl_data(path)) main_command = command.Mux("commands about querying the state of a cluster", { "keyurl": query_keyurl, })
with ops.context("termination", TerminationContext()) as tc: with ops.context("debug shell", DebugContext(True)): ops.add_subcommand(auto_launch_supervisor, tc, config.keyserver, debug_qemu=debug_qemu) other_nodes = [ n for n in config.nodes if n != config.keyserver ] ops.add_subcommand(auto_launch_nodes, tc, other_nodes, debug_qemu=debug_qemu) main_command = command.Mux( "commands to run local testing VMs", { "net": command.Mux( "commands to control the state of the local testing network", { "up": net_up, "down": net_down, }), "auto": command.SeqMux( "commands to perform large-scale operations automatically", { "install": auto_install, "launch": auto_launch, }), })
"SERVIP_DNS": config.service_dns, # TODO: stop allowing use of just a single apiserver "SOME_APISERVER": [node for node in config.nodes if node.kind == "master"][0].ip, } if extra_kvs: kvs.update(extra_kvs) return kvs def get_single_kube_spec(path: str, extra_kvs: dict = None) -> str: templ = resource.get(path).decode() return template.yaml_template(templ, get_kube_spec_vars(extra_kvs)) main_command = command.Mux( "commands about cluster configuration", { "populate": populate, "edit": edit, "show": command.Mux( "commands about showing different aspects of the configuration", { "cluster.conf": print_cluster_conf, "kubeconfig": print_local_kubeconfig, "prometheus.yaml": print_prometheus_yaml, }), })
key_path = os.path.join(dir, "gen.key") cert_path = os.path.join(dir, "gen.pem") util.writefile(ca_key, authority.get_decrypted_by_filename("./clusterca.key")) pem = authority.get_pubkey_by_filename("./clusterca.pem") util.writefile(ca_pem, pem) os.chmod(ca_key, 0o600) subprocess.check_call([ "keylocalcert", ca_key, ca_pem, name, "4h", key_path, cert_path, name, "" ]) import_https(name, key_path, cert_path) print("generated local-only https cert!") keytab_command = command.Mux( "commands about keytabs granted by external sources", { "import": import_keytab, "rotate": rotate_keytab, "delold": delold_keytab, "list": list_keytabs, "export": export_keytab, }) https_command = command.Mux( "commands about HTTPS certs granted by external sources", { "import": import_https, "export": export_https, "genlocal": gen_local_https_cert, })
import infra import keys import seq import deploy import metadata import virt main_command = command.Mux("invoke a top-level command", { "iso": iso.main_command, "config": configuration.main_command, "authority": authority.main_command, "keytab": keys.keytab_command, "https": keys.https_command, "setup": setup.main_command, "query": query.main_command, "verify": verify.main_command, "access": access.main_command, "etcdctl": access.etcdctl_command, "kubectl": access.kubectl_command, "foreach": access.foreach_command, "infra": infra.main_command, "seq": seq.main_command, "deploy": deploy.main_command, "virt": virt.main_command, "version": metadata.version_command, }) if __name__ == "__main__": sys.exit(command.main_invoke(main_command))
skey64, scert64 = base64.b64encode(skey), base64.b64encode(scert) ikey = authority.get_decrypted_by_filename("./kubernetes.key") icert = authority.get_pubkey_by_filename("./kubernetes.pem") ikey64, icert64 = base64.b64encode(ikey), base64.b64encode(icert) _, upstream_cert_path = authority.get_upstream_cert_paths() if not os.path.exists(upstream_cert_path): command.fail( "user-grant-upstream.pem not found in homeworld directory") upstream_cert = util.readfile(upstream_cert_path).decode() launch_spec("//user-grant:kubernetes.yaml", { "SERVER_KEY_BASE64": skey64.decode(), "SERVER_CERT_BASE64": scert64.decode(), "ISSUER_KEY_BASE64": ikey64.decode(), "ISSUER_CERT_BASE64": icert64.decode(), "EMAIL_DOMAIN": config.user_grant_email_domain, "UPSTREAM_CERTIFICATE": upstream_cert, }, export=export) main_command = command.Mux( "commands to deploy systems onto the kubernetes cluster", { "flannel": launch_flannel, "flannel-monitor": launch_flannel_monitor, "dns-addon": launch_dns_addon, "dns-monitor": launch_dns_monitor, "user-grant": launch_user_grant, })