def logs( services: List[str] = typer.Argument( None, help="Services to be inspected", shell_complete=Application.autocomplete_service, ), follow: bool = typer.Option( False, "--follow", "-f", help="Follow logs", show_default=False, ), tail: int = typer.Option( "500", "--tail", "-t", help="Number of lines to show", ), ) -> None: Application.print_command( Application.serialize_parameter("--follow", follow, IF=follow), Application.serialize_parameter("--tail", tail, IF=tail), Application.serialize_parameter("", services), ) Application.get_controller().controller_init(services) services = Application.data.services docker = Docker() try: docker.compose.logs(services, follow=follow, tail=tail) except KeyboardInterrupt: # pragma: no cover log.info("Stopped by keyboard")
def interfaces( service: ServiceTypes = typer.Argument( ..., help="Service name", ), detach: bool = typer.Option( False, "--detach", help="Detached mode to run the container in background", show_default=False, ), port: Optional[int] = typer.Option( None, "--port", "-p", help="port to be associated to the current service interface", ), ) -> None: Application.print_command( Application.serialize_parameter("--detach", detach, IF=detach), Application.serialize_parameter("--port", port, IF=port), Application.serialize_parameter("", service), ) # Deprecated since 1.2 if service.value == "sqlalchemy": log.warning("Deprecated interface sqlalchemy, use adminer instead") return None # Deprecated since 2.1 print_and_exit("Interfaces command is replaced by rapydo run {}", service)
def tuning( service: SupportedServices = typer.Argument(..., help="Service name"), cpu: int = typer.Option(None, "--cpu", help="Force the amount of cpus", min=1), ram: int = typer.Option(None, "--ram", help="Force the amount of ram", min=1), ) -> None: Application.print_command( Application.serialize_parameter("--cpu", cpu, IF=cpu), Application.serialize_parameter("--ram", ram, IF=ram), Application.serialize_parameter("", service), ) Application.get_controller().controller_init() if not cpu: cpu = os.cpu_count() or 1 if not ram: ram = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") log.info("Number of CPU(s): {}", cpu) log.info("Amount of RAM: {}", system.bytes_to_str(ram)) log.info("Suggested settings:") module = TUNING_MODULES.get(service.value) if not module: # pragma: no cover print_and_exit(f"{service.value} misconfiguration, module not found") module.tuning(ram, cpu)
def scale( scaling: str = typer.Argument(..., help="scale SERVICE to NUM_REPLICA"), wait: bool = typer.Option( False, "--wait", help="Wait service convergence", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--wait", wait, IF=wait), Application.serialize_parameter("", scaling), ) Application.get_controller().controller_init() options = scaling.split("=") if len(options) == 2: service, nreplicas = options else: scale_var = f"DEFAULT_SCALE_{scaling.upper()}" nreplicas = glom(Configuration.specs, f"variables.env.{scale_var}", default="1") service = scaling docker = Docker() service_name = docker.get_service(service) scales: Dict[Union[str, Service], int] = {} try: scales[service_name] = int(nreplicas) except ValueError: print_and_exit("Invalid number of replicas: {}", nreplicas) # Stop core services non compatible with scale with 2+ instances if scales[service_name] >= 2: core_services = list(Application.data.base_services.keys()) if service in core_services and service not in supported_services: print_and_exit( "Service {} is not guaranteed to support the scale, " "can't accept the request", service, ) docker.registry.ping() verify_available_images( [service], Application.data.compose_config, Application.data.base_services, ) try: docker.client.service.scale(scales, detach=not wait) # Can happens in case of scale before start except NoSuchService: print_and_exit( "No such service: {}, have you started your stack?", service_name )
def pull( services: List[str] = typer.Argument( None, help="Services to be pulled", shell_complete=Application.autocomplete_service, ), include_all: bool = typer.Option( False, "--all", help="Include both core and custom images", show_default=False, ), quiet: bool = typer.Option( False, "--quiet", help="Pull without printing progress information", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--all", include_all, IF=include_all), Application.serialize_parameter("--quiet", quiet, IF=quiet), Application.serialize_parameter("", services), ) Application.get_controller().controller_init(services) docker = Docker() if Configuration.swarm_mode: docker.registry.ping() docker.registry.login() image: str = "" images: Set[str] = set() for service in Application.data.active_services: if Application.data.services and service not in Application.data.services: continue if base_image := glom(Application.data.base_services, f"{service}.image", default=""): images.add(base_image) image = glom(Application.data.compose_config, f"{service}.image", default="") # include custom services without a bulid to base images build = glom(Application.data.compose_config, f"{service}.build", default="") if image and (include_all or not build): images.add(image)
def update( ignore_submodules: List[str] = typer.Option( [], "--ignore-submodule", "-i", help="Ignore a submodule", show_default=False, shell_complete=Application.autocomplete_submodule, ), ) -> None: Application.print_command( Application.serialize_parameter("--ignore-submodule", ignore_submodules), ) Application.get_controller().controller_init() Application.git_update(ignore_submodules) # Reading again the configuration, it may change with git updates Application.get_controller().read_specs(read_extended=True) Application.get_controller().make_env() # Compose services and variables base_services, config = Application.get_controller( ).get_compose_configuration() active_services = services.find_active(config) Application.get_controller().check_placeholders_and_passwords( config, active_services) log.info("All updated")
def scale(scaling: str = typer.Argument( ..., help="scale SERVICE to NUM_REPLICA")) -> None: Application.print_command(Application.serialize_parameter("", scaling)) Application.get_controller().controller_init() options = scaling.split("=") if len(options) != 2: scale_var = f"DEFAULT_SCALE_{scaling.upper()}" nreplicas = glom(Configuration.specs, f"variables.env.{scale_var}", default="1") service = scaling else: service, nreplicas = options if isinstance(nreplicas, str) and not nreplicas.isnumeric(): print_and_exit("Invalid number of replicas: {}", nreplicas) verify_available_images( [service], Application.data.compose_config, Application.data.base_services, ) docker = Docker() docker.compose.start_containers([service], scales={service: int(nreplicas)})
def add( element_type: ElementTypes = typer.Argument( ..., help="Type of element to be created"), name: str = typer.Argument(..., help="Name to be assigned to the new element"), force: bool = typer.Option( False, "--force", help="Force files overwriting", show_default=False, ), add_tests: bool = typer.Option( False, "--add-tests", help="Add tests files", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--add-tests", add_tests, IF=add_tests), Application.serialize_parameter("--force", force, IF=force), Application.serialize_parameter("", element_type), Application.serialize_parameter("", name), ) Application.get_controller().controller_init() auth = glom(Configuration.specs, "variables.env.AUTH_SERVICE", default="NO_AUTHENTICATION") fn = get_function(element_type) fn( Application.project_scaffold, name, Application.data.services, auth, force, add_tests, )
def logs( services: List[str] = typer.Argument( None, help="Services to be inspected", shell_complete=Application.autocomplete_service, ), follow: bool = typer.Option( False, "--follow", "-f", help="Follow logs", show_default=False, ), tail: int = typer.Option( "500", "--tail", "-t", help="Number of lines to show", ), ) -> None: Application.print_command( Application.serialize_parameter("--follow", follow, IF=follow), Application.serialize_parameter("--tail", tail), Application.serialize_parameter("", services), ) Application.get_controller().controller_init(services) if follow and len(Application.data.services) > 1: print_and_exit("Follow flag is not supported on multiple services") for service in Application.data.services: if service == "frontend": timestamps = True else: timestamps = False docker = Docker() try: docker.swarm.logs(service, follow, tail, timestamps) except KeyboardInterrupt: # pragma: no cover log.info("Stopped by keyboard") print("")
def list_cmd( element_type: ElementTypes = typer.Argument( ..., help="Type of element to be listed"), ) -> None: Application.print_command(Application.serialize_parameter( "", element_type)) Application.get_controller().controller_init() table: List[List[str]] = [] if element_type == ElementTypes.env: log.info("List env variables:\n") headers = ["Key", "Value"] env = read_env() for var in sorted(env): val = env.get(var) or "" table.append([var, val]) if element_type == ElementTypes.services: log.info("List of active services:\n") headers = ["Name", "Image", "Status", "Path"] docker = Docker() services_status = docker.get_services_status(Configuration.project) for name, service in Application.data.compose_config.items(): if name in Application.data.active_services: image = service.image build = service.build status = services_status.get(name, "N/A") if build: build_path = str(build.context.relative_to(os.getcwd())) else: build_path = "" table.append([name, image, status, build_path]) if element_type == ElementTypes.submodules: log.info("List of submodules:\n") headers = ["Repo", "Branch", "Path"] for name in Application.gits: repo = Application.gits.get(name) if repo and repo.working_dir: branch = git.get_active_branch(repo) or "N/A" path = str(repo.working_dir).replace(os.getcwd(), "") # to be replacecd with removeprefix if path.startswith("/"): path = path[1:] table.append([name, branch, path]) print("") print(tabulate(table, tablefmt=TABLE_FORMAT, headers=headers))
def stop(services: List[str] = typer.Argument( None, help="Services to be stopped", shell_complete=Application.autocomplete_service, )) -> None: Application.print_command(Application.serialize_parameter("", services)) Application.get_controller().controller_init(services) docker = Docker() docker.client.compose.stop(Application.data.services) log.info("Stack stopped")
def install( version: str = typer.Argument("auto", help="Version to be installed"), editable: bool = typer.Option( True, "--no-editable", help="Disable editable mode", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--no-editable", not editable, IF=not editable), Application.serialize_parameter("", version), ) if version == "docker": Packages.install_docker() return None if version == "compose": Packages.install_compose() return None if version == "buildx": Packages.install_buildx() return None Application.get_controller().controller_init() if version == "auto": version = Configuration.rapydo_version log.info("Detected version {} to be installed", version) if editable: install_controller_from_folder(version) else: install_controller_from_git(version)
def status( services: List[str] = typer.Argument( None, help="Services to be inspected", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command(Application.serialize_parameter("", services)) Application.get_controller().controller_init(services) docker = Docker() docker.status(Application.data.services)
def start( services: List[str] = typer.Argument( None, help="Services to be started", shell_complete=Application.autocomplete_service, ), force: bool = typer.Option( False, "--force", "-f", help="Force containers restart", show_default=False, ), ) -> None: Application.print_command(Application.serialize_parameter("", services)) Application.get_controller().controller_init(services) docker = Docker() if Configuration.swarm_mode: docker.registry.ping() verify_available_images( Application.data.services, Application.data.compose_config, Application.data.base_services, ) if Configuration.swarm_mode: docker.compose.dump_config(Application.data.services) docker.swarm.deploy() if force: for service in Application.data.services: docker.client.service.update( f"{Configuration.project}_{service}", detach=True, force=True) wait_stack_deploy(docker) else: docker.compose.start_containers(Application.data.services, force=force) log.info("Stack started")
def upgrade( path: Path = typer.Option( ..., "--path", help="path of file to be upgraded", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--path", path, IF=path), ) Application.get_controller().controller_init() frontend = glom( Configuration.specs, "variables.env.FRONTEND_FRAMEWORK", default=NO_FRONTEND ) auth = glom( Configuration.specs, "variables.env.AUTH_SERVICE", default="NO_AUTHENTICATION" ) extend = glom(Configuration.specs, "variables.env.EXTENDED_PROJECT", default=None) if extend == EXTENDED_PROJECT_DISABLED: extend = None force = path is not None auto = path is not None create_project( project_name=Configuration.project, auth=auth, frontend=frontend, services=Application.data.services, extend=extend, force_current=True, force=force, auto=auto, add_optionals=True, path=path, )
def join( manager: bool = typer.Option( False, "--manager", show_default=False, help="join new node with manager role" ) ) -> None: Application.print_command( Application.serialize_parameter("--manager", manager, IF=manager), ) Application.get_controller().controller_init() docker = Docker() manager_address = "N/A" # Search for the manager address for node in docker.client.node.list(): role = node.spec.role state = node.status.state availability = node.spec.availability if ( role == "manager" and state == "ready" and availability == "active" and node.manager_status ): manager_address = node.manager_status.addr if manager: log.info("To add a manager to this swarm, run the following command:") token = docker.swarm.get_token("manager") else: log.info("To add a worker to this swarm, run the following command:") token = docker.swarm.get_token("worker") print("") print(f"docker swarm join --token {token} {manager_address}") print("")
def shell( service: str = typer.Argument( ..., help="Service name", shell_complete=Application.autocomplete_service), command: str = typer.Argument( "bash", help="UNIX command to be executed on selected running service"), user: Optional[str] = typer.Option( None, "--user", "-u", help="User existing in selected service", show_default=False, ), default_command: bool = typer.Option( False, "--default-command", "--default", help="Execute the default command configured for the container", show_default=False, ), no_tty: bool = typer.Option( False, "--no-tty", help= "Disable pseudo-tty allocation (useful for non-interactive script)", show_default=False, ), replica: int = typer.Option( 1, "--replica", "--slot", help="Execute the command on the specified replica", show_default=False, ), broadcast: bool = typer.Option( False, "--broadcast", help="Execute the command on all the replicas", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--user", user, IF=user), Application.serialize_parameter("--default", default_command, IF=default_command), Application.serialize_parameter("", service), Application.serialize_parameter("", command), ) if no_tty: log.warning("--no-tty option is deprecated, you can stop using it") if replica > 1 and broadcast: print_and_exit("--replica and --broadcast options are not compatible") Application.get_controller().controller_init() docker = Docker() if not user: user = services.get_default_user(service) if default_command: command = services.get_default_command(service) log.debug("Requested command: {} with user: {}", command, user or "default") if broadcast: containers = docker.get_containers(service) if not containers: print_and_exit("No running container found for {} service", service) docker.exec_command(containers, user=user, command=command) else: container = docker.get_container(service, slot=replica) if not container: if replica != 1: print_and_exit("Replica number {} not found for {} service", str(replica), service) print_and_exit("No running container found for {} service", service) docker.exec_command(container, user=user, command=command)
def backup( service: SupportedServices = typer.Argument(..., help="Service name"), force: bool = typer.Option( False, "--force", help="Force the backup procedure", show_default=False, ), max_backups: int = typer.Option( 0, "--max", help= "Maximum number of backups, older exceeding this number will be removed", show_default=False, ), dry_run: bool = typer.Option( False, "--dry-run", help="Do not perform any backup or delete backup files", show_default=False, ), restart: List[str] = typer.Option( [], "--restart", help= "Service to be restarted once completed the backup (multiple allowed)", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command( Application.serialize_parameter("--force", force, IF=force), Application.serialize_parameter("--max", max_backups, IF=max_backups), Application.serialize_parameter("--dry-run", dry_run, IF=dry_run), Application.serialize_parameter("--restart", restart, IF=restart), Application.serialize_parameter("", service.value), ) if dry_run: log.warning("Dry run mode is enabled") Application.get_controller().controller_init() service_name = service.value verify_available_images( [service_name], Application.data.compose_config, Application.data.base_services, ) docker = Docker() container = docker.get_container(service_name) backup_dir = BACKUP_DIR.joinpath(service_name) backup_dir.mkdir(parents=True, exist_ok=True) if max_backups > 0: backups = list(backup_dir.glob(get_date_pattern())) if max_backups >= len(backups): log.debug("Found {} backup files, maximum not reached", len(backups)) else: for f in sorted(backups)[:-max_backups]: if not dry_run: f.unlink() log.warning( "{} deleted because exceeding the max number of backup files ({})", f.name, max_backups, ) module = BACKUP_MODULES.get(service.value) if not module: # pragma: no cover print_and_exit(f"{service.value} misconfiguration, module not found") now = datetime.now().strftime("%Y_%m_%d-%H_%M_%S") module.backup(container=container, now=now, force=force, dry_run=dry_run) if restart and not dry_run: log.info("Restarting services in 20 seconds...") time.sleep(10) log.info("Restarting services in 10 seconds...") time.sleep(10) reload(docker, restart)
def build( services: List[str] = typer.Argument( None, help="Services to be built", shell_complete=Application.autocomplete_service, ), core: bool = typer.Option( False, "--core", help="Include core images to the build list", show_default=False, ), force: bool = typer.Option( False, "--force", "-f", help="remove the cache to force the build", show_default=False, ), ) -> bool: Application.print_command( Application.serialize_parameter("--core", core, IF=core), Application.serialize_parameter("--force", force, IF=force), Application.serialize_parameter("", services), ) Application.get_controller().controller_init(services) docker = Docker() if docker.client.buildx.is_installed(): v = docker.client.buildx.version() log.debug("docker buildx is installed: {}", v) else: # pragma: no cover print_and_exit( "A mandatory dependency is missing: docker buildx not found" "\nInstallation guide: https://github.com/docker/buildx#binary-release" "\nor try the automated installation with {command}", command=RED("rapydo install buildx"), ) if Configuration.swarm_mode: docker.registry.ping() docker.registry.login() images: Set[str] = set() if core: log.debug("Forcing rebuild of core builds") # Create merged compose file with core files only docker = Docker(compose_files=Application.data.base_files) docker.compose.dump_config(Application.data.services, set_registry=False) log.debug("Compose configuration dumped on {}", COMPOSE_FILE) docker.client.buildx.bake( targets=Application.data.services, files=[COMPOSE_FILE], pull=True, load=True, cache=not force, ) log.info("Core images built") if Configuration.swarm_mode: log.warning( "Local registry push is not implemented yet for core images") docker = Docker() docker.compose.dump_config(Application.data.services, set_registry=False) log.debug("Compose configuration dumped on {}", COMPOSE_FILE) core_builds = find_templates_build(Application.data.base_services) all_builds = find_templates_build(Application.data.compose_config) services_with_custom_builds: List[str] = [] for image, build in all_builds.items(): if image not in core_builds: # this is used to validate the target Dockerfile: if p := build.get("path"): get_dockerfile_base_image(p, core_builds) services_with_custom_builds.extend(build["services"]) images.add(image)
def test( test: str = typer.Argument(None, help="Name of the test to be executed"), swarm_mode: bool = typer.Option( False, "--swarm", help="Execute the test in swarm mode", show_default=False, ), no_remove: bool = typer.Option( False, "--no-rm", help="Do not remove the container", show_default=False, ), # I have no need to test a command to locally execute tests # and I would like to preventany recursive test execution! ) -> None: # pragma: no cover Application.print_command( Application.serialize_parameter("--swarm", swarm_mode, IF=swarm_mode), Application.serialize_parameter("--no-rm", no_remove, IF=no_remove), Application.serialize_parameter("", test), ) controller_path = Packages.get_installation_path("rapydo") # Can't really happen... if not controller_path: # pragma: no cover print_and_exit("Controller path not found") if not test: log.info("Choose a test to be executed:") for f in sorted(controller_path.joinpath("tests").glob("test_*.py")): test_name = f.with_suffix("").name.replace("test_", "") print(f" - {test_name}") return None test_file = Path("tests", f"test_{test}.py") if not controller_path.joinpath(test_file).exists(): print_and_exit("Invalid test name {}", test) image_name = f"rapydo/controller:{__version__}" container_name = "controller" docker.image.pull(image_name) if docker.container.exists(container_name): docker.container.remove(container_name, force=True, volumes=True) docker.container.run( image_name, detach=True, privileged=True, remove=True, volumes=[(controller_path, "/code")], name=container_name, envs={ "TESTING": "1", "SWARM_MODE": "1" if swarm_mode else "0", }, ) docker.container.execute( container_name, command="syslogd", interactive=False, tty=False, stream=False, detach=True, ) # Wait few seconds to let the docker daemon to start log.info("Waiting for docker daemon to start...") time.sleep(3) command = ["py.test", "-s", "-x", f"/code/{test_file}"] log.info("Executing command: {}", " ".join(command)) try: docker.container.execute( container_name, command=command, workdir="/tmp", interactive=True, tty=True, stream=False, detach=False, ) except DockerException as e: log.error(e) # Do not remove the container to let for some debugging if not no_remove: docker.container.remove(container_name, force=True, volumes=True) log.info("Test container ({}) removed", container_name)
def reload( services: List[str] = typer.Argument( None, help="Services to be reloaded", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command(Application.serialize_parameter("", services)) Application.get_controller().controller_init(services) docker = Docker() running_services = docker.get_running_services() if "frontend" in services and len(services) > 1: print_and_exit("Can't reload frontend and other services at once") reloaded = 0 for service in Application.data.services: # Special case: frontend in production mode if Configuration.production and service == "frontend": # Only consider it if explicitly requested in input if "frontend" not in services: log.debug( "Can't reload the frontend if not explicitly requested") else: log.info("Reloading frontend...") # The frontend build stucks in swarm mode... let's start the container # always in compose mode when using the reload comand Configuration.FORCE_COMPOSE_ENGINE = True Application.get_controller().controller_init([service]) docker = Docker() docker.compose.start_containers([service], force=True) reloaded += 1 continue if service not in running_services: continue containers = docker.get_containers(service) if not containers: log.warning("Can't find any container for {}", service) continue try: # get the first container from the containers dict container = containers.get(list(containers.keys())[0]) # Just added for typing purpose if not container: # pragma: no conver log.warning("Can't find any container for {}", service) continue output = docker.exec_command( container, user="******", command="ls /usr/local/bin/reload", force_output_return=True, ) # this is to consume the iterator and raise the exception with exit code if output: [_ for _ in output] except DockerException as e: # fail2ban fails with code 1 if "It returned with code 1" in str(e): log.warning("Service {} does not support the reload command", service) continue # backend fails with code 2 if "It returned with code 2" in str(e): log.warning("Service {} does not support the reload command", service) continue raise docker.exec_command(containers, user="******", command="/usr/local/bin/reload") reloaded += 1 if reloaded == 0: log.info("No service reloaded") else: log.info("Services reloaded")
def ssl( volatile: bool = typer.Option( False, "--volatile", help="Create a volatile proxy service to request the certificate", show_default=False, ), no_tty: bool = typer.Option( False, "--no-tty", help="Disable pseudo-tty allocation (e.g. to execute from a cronjob)", show_default=False, ), chain_file: Optional[Path] = typer.Option( None, "--chain-file", help="Path to existing chain file (.pem format)", show_default=False, ), key_file: Optional[Path] = typer.Option( None, "--key-file", help="Path to existing key file (.pem format)", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--volatile", volatile, IF=volatile), Application.serialize_parameter("--chain-file", chain_file, IF=chain_file), Application.serialize_parameter("--key-file", key_file, IF=key_file), ) if no_tty: log.warning("--no-tty option is deprecated, you can stop using it") Application.get_controller().controller_init() if chain_file is not None or key_file is not None: if chain_file is None: print_and_exit("Invalid chain file (you provided none)") elif not chain_file.exists(): print_and_exit("Invalid chain file (you provided {})", chain_file) if key_file is None: print_and_exit("Invalid key file (you provided none)") elif not key_file.exists(): print_and_exit("Invalid key file (you provided {})", key_file) service = "proxy" verify_available_images( [service], Application.data.compose_config, Application.data.base_services, ) if chain_file is not None and key_file is not None: log.info("Unable to automatically perform the requested operation") log.info("You can execute the following commands by your-self:") c = f"{Configuration.project}_{service}_1" letsencrypt_path = "/etc/letsencrypt/real" print("") print(f"docker cp {chain_file} {c}:{letsencrypt_path}/fullchain1.pem") print(f"docker cp {key_file} {c}:{letsencrypt_path}/privkey1.pem") print(f"rapydo shell {service} 'nginx -s reload'") print("") return docker = Docker() command = f"/bin/bash updatecertificates {Configuration.hostname}" if volatile: docker.compose.create_volatile_container(service, command=command, publish=[(443, 443), (80, 80)]) else: container = docker.get_container(service) if not container: print_and_exit( "The proxy is not running, start your stack or try with {command}", command=RED("rapydo ssl --volatile"), ) docker.exec_command(container, user="******", command=command) container = docker.get_container("neo4j") if container: # This is not true!! A full restart is needed # log.info("Neo4j is running, but it will reload the certificate by itself") # But not implemented yet... log.info( "Neo4j is running, a full restart is needed. NOT IMPLEMENTED YET.") containers = docker.get_containers("rabbit") if containers: log.info( "RabbitMQ is running, executing command to refresh the certificate" ) # Please note that Erland is able to automatically reload the certificate # But RabbitMQ does not. Probably in the future releases this command will # No longer be required. To test it after the creation of the new cert: # echo -n | openssl s_client -showcerts -connect hostname:5671 # Please note that this command can fail if RabbitMQ is still starting docker.exec_command(containers, user="******", command="/usr/local/bin/reload_certificate") containers = docker.get_containers("swaggerui") if containers: # pragma: no cover log.info( "SwaggerUI is running, executing command to refresh the certificate" ) docker.exec_command(containers, user="******", command="nginx -s reload") log.info("New certificate successfully enabled")
def remove( services: List[str] = typer.Argument( None, help="Services to be removed", shell_complete=Application.autocomplete_service, ), rm_all: bool = typer.Option( False, "--all", help="Also remove persistent data stored in docker volumes", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--all", rm_all, IF=rm_all), Application.serialize_parameter("", services), ) remove_extras: List[str] = [] for extra in ( REGISTRY, "adminer", "swaggerui", ): if services and extra in services: # services is a tuple, even if defined as List[str] ... services = list(services) services.pop(services.index(extra)) remove_extras.append(extra) Application.get_controller().controller_init(services) docker = Docker() if remove_extras: for extra_service in remove_extras: if not docker.client.container.exists(extra_service): log.error("Service {} is not running", extra_service) continue docker.client.container.remove(extra_service, force=True) log.info("Service {} removed", extra_service) # Nothing more to do if not services: return all_services = Application.data.services == Application.data.active_services if all_services and rm_all: # Networks are not removed, but based on docker compose down --help they should # Also docker-compose down removes network from what I remember # Should be reported as bug? If corrected a specific check in test_remove.py # will start to fail docker.client.compose.down( remove_orphans=False, remove_images="local", # Remove named volumes declared in the volumes section of the # Compose file and anonymous volumes attached to containers. volumes=rm_all, ) else: # Important note: volumes=True only destroy anonymous volumes, # not named volumes like down should do docker.client.compose.rm(Application.data.services, stop=True, volumes=rm_all) log.info("Stack removed")
def create( project_name: str = typer.Argument(..., help="Name of your project"), auth: AuthTypes = typer.Option(..., "--auth", help="Auth service to enable"), frontend: FrontendTypes = typer.Option( ..., "--frontend", help="Frontend framework to enable"), extend: str = typer.Option(None, "--extend", help="Extend from another project"), services: List[ServiceTypes] = typer.Option( [], "--service", "-s", help="Service to be enabled (multiple is enabled)", shell_complete=Application.autocomplete_service, ), origin_url: Optional[str] = typer.Option( None, "--origin-url", help="Set the git origin url for the project"), envs: List[str] = typer.Option( None, "--env", "-e", help= "Command separated list of ENV=VALUE to be added in project_configuration", ), force_current: bool = typer.Option( False, "--current", help="Force creation in current folder", show_default=False, ), force: bool = typer.Option( False, "--force", help="Force files overwriting", show_default=False, ), auto: bool = typer.Option( True, "--no-auto", help="Disable automatic project creation", show_default=False, ), add_optionals: bool = typer.Option( False, "--add-optionals", help="Include all optionals files (html templates and customizers)", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--auth", auth), Application.serialize_parameter("--frontend", frontend), Application.serialize_parameter("--extend", extend, IF=extend), Application.serialize_parameter("--service", services), Application.serialize_parameter("--origin-url", origin_url, IF=origin_url), Application.serialize_parameter("--env", envs), Application.serialize_parameter("--current", force_current, IF=force_current), Application.serialize_parameter("--force", force, IF=force), Application.serialize_parameter("--auto", auto, IF=auto), Application.serialize_parameter("--add-optionals", add_optionals), Application.serialize_parameter("", project_name), ) Application.get_controller().controller_init() if extend is not None: if project_name == extend: print_and_exit("A project cannot extend itself") if not PROJECT_DIR.joinpath(extend).is_dir(): print_and_exit("Invalid extend value: project {} not found", extend) services_list: List[str] = [service.value for service in services] create_project( project_name=project_name, auth=auth.value, frontend=frontend.value, services=services_list, extend=extend, envs=envs, auto=auto, force=force, force_current=force_current, add_optionals=add_optionals, ) log.info("Project {} successfully created", project_name) git_repo = git.get_repo(".") if git_repo is None: git_repo = git.init(".") print("\nYou can now init and start the project:\n") current_origin = git.get_origin(git_repo) if current_origin is None: if origin_url is None: # pragma: no cover print( "git remote add origin https://your_remote_git/your_project.git" ) else: git_repo.create_remote("origin", origin_url) print("rapydo init") print("rapydo pull") print("rapydo start")
def restore( service: SupportedServices = typer.Argument(..., help="Service name"), backup_file: Optional[str] = typer.Argument( None, help="Specify the backup to be restored", show_default=False, ), force: bool = typer.Option( False, "--force", help="Force the backup procedure", show_default=False, ), restart: List[str] = typer.Option( [], "--restart", help= "Service to be restarted once completed the restore (multiple allowed)", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command( Application.serialize_parameter("--force", force, IF=force), Application.serialize_parameter("--restart", restart, IF=restart), Application.serialize_parameter("", service.value), Application.serialize_parameter("", backup_file), ) Application.get_controller().controller_init() service_name = service.value verify_available_images( [service_name], Application.data.compose_config, Application.data.base_services, ) docker = Docker() container = docker.get_container(service_name) backup_dir = BACKUP_DIR.joinpath(service_name) if not backup_dir.exists(): print_and_exit( "No backup found, the following folder does not exist: {}", backup_dir) module = RESTORE_MODULES.get(service.value) if not module: # pragma: no cover print_and_exit(f"{service.value} misconfiguration, module not found") expected_ext = module.EXPECTED_EXT if backup_file is None: files = backup_dir.iterdir() filtered_files = [ d.name for d in files if d.name.endswith(expected_ext) ] filtered_files.sort() if not len(filtered_files): print_and_exit("No backup found, {} is empty", backup_dir) log.info("Please specify one of the following backup:") for f in filtered_files: print(f"- {f}") return backup_host_path = backup_dir.joinpath(backup_file) if not backup_host_path.exists(): print_and_exit("Invalid backup file, {} does not exist", backup_host_path) module.restore(container=container, backup_file=backup_file, force=force) if restart: log.info("Restarting services in 20 seconds...") time.sleep(10) log.info("Restarting services in 10 seconds...") time.sleep(10) reload(docker, restart)
def run( service: str = typer.Argument( ..., help="Service name", shell_complete=Application.autocomplete_allservice, ), pull: bool = typer.Option( False, "--pull", help="Pull the image before starting the container", show_default=False, ), debug: bool = typer.Option( False, "--debug", help="Start the container in debug mode", show_default=False, ), command: str = typer.Option( None, "--command", help="UNIX command to be executed in the container", show_default=False, ), user: str = typer.Option( None, "--user", "-u", help="User existing in selected service", show_default=False, ), first_port: Optional[int] = typer.Option( None, "--port", "-p", help="port to be associated to the current service interface", ), detach: Optional[bool] = typer.Option( None, "--detach", help="Start the container in detach mode (default for non-interfaces)", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--pull", pull, IF=pull), Application.serialize_parameter("--debug", debug, IF=debug), Application.serialize_parameter("--command", command, IF=command), Application.serialize_parameter("--user", user, IF=user), Application.serialize_parameter("--port", first_port, IF=first_port), Application.serialize_parameter("", service), ) Configuration.FORCE_COMPOSE_ENGINE = True Application.get_controller().controller_init() Application.get_controller().check_placeholders_and_passwords( Application.data.compose_config, [service]) if service == REGISTRY and not Configuration.swarm_mode: print_and_exit("Can't start the registry in compose mode") docker = Docker() if Configuration.swarm_mode: if service != REGISTRY: docker.registry.ping() else: if docker.registry.ping(do_exit=False): registry = docker.registry.get_host() print_and_exit("The registry is already running at {}", registry) if docker.client.container.exists("registry"): log.debug( "The registry container is already existing, removing") docker.client.container.remove("registry", force=True) if not debug: if user: print_and_exit("Can't specify a user if debug mode is OFF") if command: print_and_exit("Can't specify a command if debug mode is OFF") if user: log.warning( "Please remember that users in volatile containers are not mapped on" " current uid and gid. You should not write or modify files on volumes" " to prevent permissions errors") if pull: log.info("Pulling image for {}...", service) docker.client.compose.pull([service]) else: verify_available_images( [service], Application.data.compose_config, Application.data.base_services, is_run_command=True, ) # This is equivalent to the old volatile command if debug: if not command: command = "bash" log.info("Starting {}...", service) docker.compose.create_volatile_container( service, command=command, user=user, # if None the wrapper will automatically switch the default ones # How to prevent ports on volatile containers? # publish=None, ) log.info("Service {} removed", service) return None # This is equivalent to the old registry command if service == REGISTRY: # @ symbol in secrets is not working # https://github.com/bitnami/charts/issues/1954 # Other symbols like # and " also lead to configuration errors os.environ["REGISTRY_HTTP_SECRET"] = password( param_not_used="", length=96 # , symbols="%*,-.=?[]^_~" ) publish_ports = get_publish_ports(service, first_port) if detach is None: if service == "swaggerui" or service == "adminer": detach = False else: detach = True log.info("Running {}...", service) if service == "swaggerui": if Configuration.production: prot = "https" else: prot = "http" port = publish_ports[0][0] if publish_ports else first_port log.info( "You can access SwaggerUI web page here: {}\n", f"{prot}://{Configuration.hostname}:{port}", ) if service == "adminer": if Configuration.production: prot = "https" else: prot = "http" port = publish_ports[0][0] if publish_ports else first_port log.info( "You can access Adminer interface on: {}\n", f"{prot}://{Configuration.hostname}:{port}", ) docker.compose.create_volatile_container(service, detach=detach, publish=publish_ports)
def check( no_git: bool = typer.Option( False, "--no-git", "-s", help="Skip checks on git commits", show_default=False, ), no_builds: bool = typer.Option( False, "--no-builds", help="Skip check on docker builds", show_default=False, ), ignore_submodules: List[str] = typer.Option( [], "--ignore-submodule", "-i", help="Ignore submodule", show_default=False, shell_complete=Application.autocomplete_submodule, ), ) -> None: Application.print_command( Application.serialize_parameter("--no-git", no_git, IF=no_git), Application.serialize_parameter("--no-builds", no_builds, IF=no_builds), Application.serialize_parameter("--ignore-submodule", ignore_submodules), ) Application.get_controller().controller_init() docker = Docker() if Configuration.swarm_mode: log.debug("Swarm is correctly initialized") docker.swarm.check_resources() if no_git: log.info("Skipping git checks") else: log.info("Checking git (skip with --no-git)") Application.git_checks(ignore_submodules) if no_builds: log.info("Skipping builds checks") else: log.info("Checking builds (skip with --no-builds)") dimages: List[str] = [] for img in docker.client.images(): if img.repo_tags: for i in img.repo_tags: dimages.append(i) all_builds = find_templates_build(Application.data.compose_config) core_builds = find_templates_build(Application.data.base_services) overriding_builds = find_templates_override( Application.data.compose_config, core_builds) for image_tag, build in all_builds.items(): services = build["services"] if not any(x in Application.data.active_services for x in services): continue if image_tag not in dimages: if image_tag in core_builds: log.warning( "Missing {} image, execute {command}", image_tag, command=RED("rapydo pull"), ) else: log.warning( "Missing {} image, execute {command}", image_tag, command=RED("rapydo build"), ) continue image_creation = get_image_creation(image_tag) # Check if some recent commit modified the Dockerfile d1, d2 = build_is_obsolete(image_creation, build.get("path")) if d1 and d2: tmp_from_image = overriding_builds.get(image_tag) # This is the case of a build not overriding a core image, # e.g nifi or geoserver. In that case from_image is faked to image_tag # just to make print_obsolete to print 'build' instead of 'pull' if not tmp_from_image and image_tag not in core_builds: tmp_from_image = image_tag print_obsolete(image_tag, d1, d2, build.get("service"), tmp_from_image) # if FROM image is newer, this build should be re-built elif image_tag in overriding_builds: from_img = overriding_builds.get(image_tag, "") from_build: Optional[TemplateInfo] = core_builds.get(from_img) if not from_build: # pragma: no cover log.critical("Malformed {} image, from build is missing", image_tag) continue # Verify if template build exists if from_img not in dimages: # pragma: no cover log.warning( "Missing template build for {} ({})\n{}", from_build.get("services"), from_img, ) from_timestamp = get_image_creation(from_img) # Verify if template build is obsolete or not d1, d2 = build_is_obsolete(from_timestamp, from_build.get("path")) if d1 and d2: # pragma: no cover print_obsolete(from_img, d1, d2, from_build.get("service")) if from_timestamp > image_creation: b = image_creation.strftime(DATE_FORMAT) c = from_timestamp.strftime(DATE_FORMAT) print_obsolete(image_tag, b, c, build.get("service"), from_img) templating = Templating() for filename in Application.project_scaffold.fixed_files: if templating.file_changed(str(filename)): log.warning( "{} changed, please execute {command}", filename, command=RED(f"rapydo upgrade --path {filename}"), ) compose_version = "Unknown" buildx_version = "Unknown" m = re.search( r"^Docker Compose version (v[0-9]+\.[0-9]+\.[0-9]+)$", docker.client.compose.version(), ) if m: compose_version = m.group(1) m = re.search( r"^github.com/docker/buildx (v[0-9]+\.[0-9]+\.[0-9]+) .*$", docker.client.buildx.version(), ) if m: buildx_version = m.group(1) if compose_version == COMPOSE_VERSION: log.info("Compose is installed with version {}", COMPOSE_VERSION) else: # pragma: no cover cmd = RED("rapydo install compose") fix_hint = f"You can update it with {cmd}" log.warning( "Compose is installed with version {}, expected version is {}.\n{}", compose_version, COMPOSE_VERSION, fix_hint, ) if buildx_version == BUILDX_VERSION: log.info("Buildx is installed with version {}", BUILDX_VERSION) else: # pragma: no cover cmd = RED("rapydo install buildx") fix_hint = f"You can update it with {cmd}" log.warning( "Buildx is installed with version {}, expected version is {}.\n{}", buildx_version, BUILDX_VERSION, fix_hint, ) for expired_passwords in get_expired_passwords(): log.warning( "{} is expired on {}", expired_passwords[0], expired_passwords[1].strftime("%Y-%m-%d"), ) log.info("Checks completed")
def init( create_projectrc: bool = typer.Option( False, "--force", "-f", help="Overwrite initialization files if already exist", show_default=False, ), submodules_path: Path = typer.Option( None, "--submodules-path", help= "Link all submodules in an existing folder instead of download them", ), ) -> None: Application.print_command( Application.serialize_parameter("--force", create_projectrc, IF=create_projectrc), Application.serialize_parameter("--submodules-path", submodules_path, IF=submodules_path), ) Application.get_controller().controller_init() for p in Application.project_scaffold.data_folders: if not p.exists(): p.mkdir(parents=True, exist_ok=True) for p in Application.project_scaffold.data_files: if not p.exists(): p.touch() if not Configuration.projectrc and not Configuration.host_configuration: create_projectrc = True # We have to create the .projectrc twice # One generic here with main options and another after the complete # conf reading to set services variables if create_projectrc: Application.get_controller().create_projectrc() Application.get_controller().read_specs(read_extended=False) if submodules_path is not None: if not submodules_path.exists(): print_and_exit("Local path not found: {}", submodules_path) Application.git_submodules(from_path=submodules_path) Application.get_controller().read_specs(read_extended=True) Application.get_controller().make_env() # Compose services and variables Application.get_controller().get_compose_configuration() # We have to create the .projectrc twice # One generic with main options and another here # when services are available to set specific configurations if create_projectrc: Application.get_controller().create_projectrc() Application.get_controller().read_specs(read_extended=True) Application.get_controller().make_env() if Configuration.swarm_mode: docker = Docker(verify_swarm=False) if not docker.swarm.get_token(): docker.swarm.init() log.info("Swarm is now initialized") else: log.debug("Swarm is already initialized") if Configuration.frontend == ANGULAR: yarn_lock = DATA_DIR.joinpath(Configuration.project, "frontend", "yarn.lock") if yarn_lock.exists(): yarn_lock.unlink() log.info("Yarn lock file deleted") log.info("Project initialized")
def remove( services: List[str] = typer.Argument( None, help="Services to be removed", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command(Application.serialize_parameter("", services)) remove_extras: List[str] = [] for extra in ( REGISTRY, "adminer", "swaggerui", ): if services and extra in services: # services is a tuple, even if defined as List[str] ... services = list(services) services.pop(services.index(extra)) remove_extras.append(extra) Application.get_controller().controller_init(services) docker = Docker() if remove_extras: for extra_service in remove_extras: if not docker.client.container.exists(extra_service): log.error("Service {} is not running", extra_service) continue docker.client.container.remove(extra_service, force=True) log.info("Service {} removed", extra_service) # Nothing more to do if not services: return all_services = Application.data.services == Application.data.active_services if all_services: docker.swarm.remove() # This is needed because docker stack remove does not support a --wait flag # To make the remove command sync and chainable with a start command engine = Application.env.get("DEPLOY_ENGINE", "swarm") network_name = f"{Configuration.project}_{engine}_default" wait_network_removal(docker, network_name) log.info("Stack removed") else: if not docker.swarm.stack_is_running(): print_and_exit( "Stack {} is not running, deploy it with {command}", Configuration.project, command=RED("rapydo start"), ) scales: Dict[Union[str, Service], int] = {} for service in Application.data.services: service_name = Docker.get_service(service) scales[service_name] = 0 docker.client.service.scale(scales, detach=False) log.info("Services removed")
def images( remove_images: List[str] = typer.Option( [], "--rm", "--remove", help="Remove the specified image(s)", show_default=False, shell_complete=Application.autocomplete_submodule, ), ) -> None: Application.print_command( Application.serialize_parameter("--remove", remove_images, IF=remove_images), ) Application.get_controller().controller_init() urllib3.disable_warnings( urllib3.exceptions.InsecureRequestWarning) # type: ignore # https://docs.docker.com/registry/spec/api/ docker = Docker() docker.registry.ping() registry = docker.registry.get_host() host = f"https://{registry}" # Docker Registry API Reference # https://docs.docker.com/registry/spec/api/ # Retrieve a sorted, json list of repositories available in the registry r = docker.registry.send_request(f"{host}/v2/_catalog") catalog = r.json() images: List[Tuple[str, str, str, int, Optional[datetime]]] = [] for repository in catalog.get("repositories", {}): # Fetch the tags under the repository identified by <name> r = docker.registry.send_request(f"{host}/v2/{repository}/tags/list") # tags can be None if all the tags of a repository have deleted # this or ensure that every None will be converted in an empty dictionary tags = r.json().get("tags") or {} for tag in tags: # Fetch the manifest identified by name and reference r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}") manifest = r.json() size = 0 for layer in manifest.get("layers", []): size += layer.get("size", 0) headers = r.headers _id = cast(str, headers.get("Docker-Content-Digest", "N/A")) # Creation date is only available on schema version 1 :\ r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}", version="1") manifest = r.json() layers = manifest.get("history", []) created: Optional[datetime] = None if len(layers) > 0: first_layer = json.loads(layers[0].get("v1Compatibility", {})) creation_date = first_layer.get("created", "N/A") if creation_date != "N/A": creation_date = creation_date[0:19] created = datetime.strptime(creation_date, "%Y-%m-%dT%H:%M:%S") images.append( (_id, cast(str, repository), cast(str, tag), size, created)) if not images: log.warning("This registry contains no images") else: log.info("This registry contains {} image(s):", len(images)) images_to_be_removed: List[Tuple[str, str, str]] = [] table: List[List[str]] = [] for img in images: digest = img[0] # to be replaced with removeprefix starting from py39 if digest.startswith("sha256:"): digest = digest[7:] _id = digest[0:12] repository = img[1] tag = img[2] SIZE = system.bytes_to_str(img[3]) d = img[4] to_be_removed = (_id in remove_images or f"{repository}:{tag}" in remove_images) creation_date = d.strftime("%Y-%m-%d %H:%M:%S") if d else "N/A" image_line: List[str] = [] if to_be_removed: image_line.append(RED(repository)) image_line.append(RED(tag)) image_line.append(RED(_id)) image_line.append(RED(creation_date)) image_line.append(RED(SIZE)) creation_date = "DELETING ..." images_to_be_removed.append((repository, digest, tag)) else: image_line.append(repository) image_line.append(tag) image_line.append(_id) image_line.append(creation_date) image_line.append(SIZE) table.append(image_line) print("") print( tabulate( table, tablefmt=TABLE_FORMAT, headers=["REPOSITORY", "TAG", "IMAGE ID", "CREATED", "SIZE"], )) if len(remove_images) != len(images_to_be_removed): log.error( "Some of the images that you specified are not found in this registry" ) # DELETE /v2/<name>/manifests/<reference> for image in images_to_be_removed: repository = image[0] reference = image[1] # digest without sha256: tag = image[2] # For deletes reference must be a digest or the delete will fail r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/sha256:{reference}", method="DELETE") log.info("Image {}:{} deleted from {}", repository, tag, host) if images_to_be_removed: log.info("Executing registry garbage collector...") command = "/bin/registry garbage-collect -m /etc/docker/registry/config.yml" docker.exec_command("registry", user="******", command=command) log.info("Registry garbage collector successfully executed") # A restart is needed to prevent clashes beetween gc and cache # https://gist.github.com/jaytaylor/86d5efaddda926a25fa68c263830dac1#gistcomment-3653760 # The garbage collector doesn't communicate with the cache, or unlink layers # from the repository so if you immediately try to repush a layer that was # just deleted, the registry will find it for stat calls, but actually # serving the blob will fail. docker.client.container.restart("registry") log.info("Registry restarted to clean the layers cache")