def print_obsolete( image: str, date1: str, date2: str, service: Optional[str], from_img: Optional[str] = None, ) -> None: if service: if from_img: log.warning( """Obsolete image {}: built on {} FROM {} that changed on {} Update it with: {command}""", image, date1, from_img, date2, command=RED(f"rapydo build {service}"), ) else: log.warning( """Obsolete image {}: built on {} but changed on {} Update it with: {command}""", image, date1, date2, command=RED(f"rapydo pull {service}"), )
def verify_available_images( services: List[str], compose_config: ComposeServices, base_services: ComposeServices, is_run_command: bool = False, ) -> None: docker = Docker() # All template builds (core only) templates = find_templates_build(base_services, include_image=True) clean_core_services = get_non_redundant_services(templates, services) for service in sorted(clean_core_services): for image, data in templates.items(): data_services = data["services"] if data["service"] != service and service not in data_services: continue if Configuration.swarm_mode and not is_run_command: image_exists = docker.registry.verify_image(image) else: image_exists = docker.client.image.exists(image) if not image_exists: if is_run_command: print_and_exit("Missing {} image, add {opt} option", image, opt=RED("--pull")) else: print_and_exit( "Missing {} image, execute {command}", image, command=RED(f"rapydo pull {service}"), ) # All builds used for the current configuration (core + custom) builds = find_templates_build(compose_config, include_image=True) clean_services = get_non_redundant_services(builds, services) for service in clean_services: for image, data in builds.items(): data_services = data["services"] if data["service"] != service and service not in data_services: continue if Configuration.swarm_mode and not is_run_command: image_exists = docker.registry.verify_image(image) else: image_exists = docker.client.image.exists(image) if not image_exists: action = "build" if data["path"] else "pull" print_and_exit( "Missing {} image, execute {command}", image, command=RED(f"rapydo {action} {service}"), )
def ping(self, do_exit: bool = True) -> bool: registry_host = Application.env["REGISTRY_HOST"] registry_port = int( Application.env.get("REGISTRY_PORT", "5000") or "5000") with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: sock.settimeout(1) try: result = sock.connect_ex((registry_host, registry_port)) except socket.gaierror: # The error is not important, let's use a generic -1 # result = errno.ESRCH result = -1 if result == 0: return True if do_exit: print_and_exit( "Registry {} not reachable. You can start it with {command}", self.get_host(), command=RED("rapydo run registry"), ) return False
def version() -> None: Application.print_command() Application.get_controller().controller_init() # Check if rapydo version is compatible with version required by the project if __version__ == Configuration.rapydo_version: c = colors.GREEN # Light Green else: c = colors.RED cv = f"{c}{__version__}{colors.RESET}" pv = f"{c}{Configuration.version}{colors.RESET}" rv = f"{c}{Configuration.rapydo_version}{colors.RESET}" print( f"\nrapydo: {cv}\t{Configuration.project}: {pv}\trequired rapydo: {rv}" ) if __version__ != Configuration.rapydo_version: cver = Version(__version__) rver = Version(Configuration.rapydo_version) updown = "upgrade" if cver < rver else "downgrade" rv = Configuration.rapydo_version command = RED(f"rapydo install {Configuration.rapydo_version}") print(f""" This project is not compatible with rapydo version {__version__} Please {updown} rapydo to version {rv} or modify this project {command}""")
def check_installed_software() -> None: log.debug( "python version: {}.{}.{}", sys.version_info.major, sys.version_info.minor, sys.version_info.micro, ) # 17.05 added support for multi-stage builds # https://docs.docker.com/compose/compose-file/compose-file-v3/#compose-and-docker-compatibility-matrix # 18.09.2 fixed the CVE-2019-5736 vulnerability # 20.10.0 introduced copy --chmod and improved logging Packages.check_program("docker", min_version="20.10.0", min_recommended_version="20.10.0") if docker.compose.is_installed(): # too slow to verify the version on every commands... near half a seconds # Sometimes a couple of seconds! # v = docker.compose.version() # log.debug("docker compose is installed: {}", v) log.debug("docker compose is installed") else: # pragma: no cover print_and_exit( "A mandatory dependency is missing: docker compose not found" "\nInstallation guide: " "https://docs.docker.com/compose/cli-command/#installing-compose-v2" "\nor try the automated installation with {command}", command=RED("rapydo install compose"), )
def restart( force: bool = typer.Option( False, "--force", "-f", help="Force services restart", show_default=False, ), ) -> None: # Deprecated since 2.2 print_and_exit( "This command is no longer available " "\nIf you want to reload your services, use {} " "\nIf you want to recreated your containers, use {}", RED("rapydo reload"), RED("rapydo start --force"), )
def __init__(self, docker: Docker, check_initialization: bool = True): self.docker_wrapper = docker self.docker = self.docker_wrapper.client if check_initialization and not self.get_token(): print_and_exit( "Swarm is not initialized, please execute {command}", command=RED("rapydo init"), )
def check_program( program: str, min_version: Optional[str] = None, max_version: Optional[str] = None, min_recommended_version: Optional[str] = None, ) -> str: """ Verify if a binary exists and (optionally) its version """ found_version = Packages.get_bin_version(program) if found_version is None: hints = "" if program == "docker": # pragma: no cover install_cmd = RED("rapydo install docker") hints = "\n\nTo install docker visit: https://get.docker.com" hints += f"or execute {install_cmd}" print_and_exit("A mandatory dependency is missing: {} not found{}", program, hints) v = Version(found_version) if min_version is not None: if Version(min_version) > v: print_and_exit( "Minimum supported version for {} is {}, found {}", program, min_version, found_version, ) if min_recommended_version is not None: if Version(min_recommended_version) > v: log.warning( "Minimum recommended version for {} is {}, found {}", program, min_recommended_version, found_version, ) if max_version is not None: if Version(max_version) < v: print_and_exit( "Maximum supported version for {} is {}, found {}", program, max_version, found_version, ) log.debug("{} version: {}", program, found_version) return found_version
def compare_repository(gitobj: Repo, branch: str, online_url: str) -> bool: # origin = gitobj.remote() # url = list(origin.urls).pop(0) url = gitobj.remotes.origin.url if online_url != url: # pragma: no cover local_url = urlparse(url) expected_url = urlparse(online_url) # Remove username in the URL, if any # i.e. [email protected] became github.com local_netloc = local_url.netloc.split("@").pop() expected_netloc = local_url.netloc.split("@").pop() if local_url.scheme != expected_url.scheme: url_match = False elif local_netloc != expected_netloc: url_match = False elif local_url.path != expected_url.path: url_match = False else: url_match = True if not url_match: print_and_exit( """Unmatched local remote Found: {}\nExpected: {} Suggestion: remove {} and execute the init command """, url, online_url, str(gitobj.working_dir or "N/A"), ) active_branch = get_active_branch(gitobj) if active_branch and active_branch != branch and gitobj.working_dir: print_and_exit( "{}: wrong branch {}, expected {}. You can fix it with {command}", Path(gitobj.working_dir).stem, active_branch, branch, command=RED("rapydo init"), ) return True
def remove( services: List[str] = typer.Argument( None, help="Services to be removed", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command(Application.serialize_parameter("", services)) remove_extras: List[str] = [] for extra in ( REGISTRY, "adminer", "swaggerui", ): if services and extra in services: # services is a tuple, even if defined as List[str] ... services = list(services) services.pop(services.index(extra)) remove_extras.append(extra) Application.get_controller().controller_init(services) docker = Docker() if remove_extras: for extra_service in remove_extras: if not docker.client.container.exists(extra_service): log.error("Service {} is not running", extra_service) continue docker.client.container.remove(extra_service, force=True) log.info("Service {} removed", extra_service) # Nothing more to do if not services: return all_services = Application.data.services == Application.data.active_services if all_services: docker.swarm.remove() # This is needed because docker stack remove does not support a --wait flag # To make the remove command sync and chainable with a start command engine = Application.env.get("DEPLOY_ENGINE", "swarm") network_name = f"{Configuration.project}_{engine}_default" wait_network_removal(docker, network_name) log.info("Stack removed") else: if not docker.swarm.stack_is_running(): print_and_exit( "Stack {} is not running, deploy it with {command}", Configuration.project, command=RED("rapydo start"), ) scales: Dict[Union[str, Service], int] = {} for service in Application.data.services: service_name = Docker.get_service(service) scales[service_name] = 0 docker.client.service.scale(scales, detach=False) log.info("Services removed")
def check( no_git: bool = typer.Option( False, "--no-git", "-s", help="Skip checks on git commits", show_default=False, ), no_builds: bool = typer.Option( False, "--no-builds", help="Skip check on docker builds", show_default=False, ), ignore_submodules: List[str] = typer.Option( [], "--ignore-submodule", "-i", help="Ignore submodule", show_default=False, shell_complete=Application.autocomplete_submodule, ), ) -> None: Application.print_command( Application.serialize_parameter("--no-git", no_git, IF=no_git), Application.serialize_parameter("--no-builds", no_builds, IF=no_builds), Application.serialize_parameter("--ignore-submodule", ignore_submodules), ) Application.get_controller().controller_init() docker = Docker() if Configuration.swarm_mode: log.debug("Swarm is correctly initialized") docker.swarm.check_resources() if no_git: log.info("Skipping git checks") else: log.info("Checking git (skip with --no-git)") Application.git_checks(ignore_submodules) if no_builds: log.info("Skipping builds checks") else: log.info("Checking builds (skip with --no-builds)") dimages: List[str] = [] for img in docker.client.images(): if img.repo_tags: for i in img.repo_tags: dimages.append(i) all_builds = find_templates_build(Application.data.compose_config) core_builds = find_templates_build(Application.data.base_services) overriding_builds = find_templates_override( Application.data.compose_config, core_builds) for image_tag, build in all_builds.items(): services = build["services"] if not any(x in Application.data.active_services for x in services): continue if image_tag not in dimages: if image_tag in core_builds: log.warning( "Missing {} image, execute {command}", image_tag, command=RED("rapydo pull"), ) else: log.warning( "Missing {} image, execute {command}", image_tag, command=RED("rapydo build"), ) continue image_creation = get_image_creation(image_tag) # Check if some recent commit modified the Dockerfile d1, d2 = build_is_obsolete(image_creation, build.get("path")) if d1 and d2: tmp_from_image = overriding_builds.get(image_tag) # This is the case of a build not overriding a core image, # e.g nifi or geoserver. In that case from_image is faked to image_tag # just to make print_obsolete to print 'build' instead of 'pull' if not tmp_from_image and image_tag not in core_builds: tmp_from_image = image_tag print_obsolete(image_tag, d1, d2, build.get("service"), tmp_from_image) # if FROM image is newer, this build should be re-built elif image_tag in overriding_builds: from_img = overriding_builds.get(image_tag, "") from_build: Optional[TemplateInfo] = core_builds.get(from_img) if not from_build: # pragma: no cover log.critical("Malformed {} image, from build is missing", image_tag) continue # Verify if template build exists if from_img not in dimages: # pragma: no cover log.warning( "Missing template build for {} ({})\n{}", from_build.get("services"), from_img, ) from_timestamp = get_image_creation(from_img) # Verify if template build is obsolete or not d1, d2 = build_is_obsolete(from_timestamp, from_build.get("path")) if d1 and d2: # pragma: no cover print_obsolete(from_img, d1, d2, from_build.get("service")) if from_timestamp > image_creation: b = image_creation.strftime(DATE_FORMAT) c = from_timestamp.strftime(DATE_FORMAT) print_obsolete(image_tag, b, c, build.get("service"), from_img) templating = Templating() for filename in Application.project_scaffold.fixed_files: if templating.file_changed(str(filename)): log.warning( "{} changed, please execute {command}", filename, command=RED(f"rapydo upgrade --path {filename}"), ) compose_version = "Unknown" buildx_version = "Unknown" m = re.search( r"^Docker Compose version (v[0-9]+\.[0-9]+\.[0-9]+)$", docker.client.compose.version(), ) if m: compose_version = m.group(1) m = re.search( r"^github.com/docker/buildx (v[0-9]+\.[0-9]+\.[0-9]+) .*$", docker.client.buildx.version(), ) if m: buildx_version = m.group(1) if compose_version == COMPOSE_VERSION: log.info("Compose is installed with version {}", COMPOSE_VERSION) else: # pragma: no cover cmd = RED("rapydo install compose") fix_hint = f"You can update it with {cmd}" log.warning( "Compose is installed with version {}, expected version is {}.\n{}", compose_version, COMPOSE_VERSION, fix_hint, ) if buildx_version == BUILDX_VERSION: log.info("Buildx is installed with version {}", BUILDX_VERSION) else: # pragma: no cover cmd = RED("rapydo install buildx") fix_hint = f"You can update it with {cmd}" log.warning( "Buildx is installed with version {}, expected version is {}.\n{}", buildx_version, BUILDX_VERSION, fix_hint, ) for expired_passwords in get_expired_passwords(): log.warning( "{} is expired on {}", expired_passwords[0], expired_passwords[1].strftime("%Y-%m-%d"), ) log.info("Checks completed")
def build( services: List[str] = typer.Argument( None, help="Services to be built", shell_complete=Application.autocomplete_service, ), core: bool = typer.Option( False, "--core", help="Include core images to the build list", show_default=False, ), force: bool = typer.Option( False, "--force", "-f", help="remove the cache to force the build", show_default=False, ), ) -> bool: Application.print_command( Application.serialize_parameter("--core", core, IF=core), Application.serialize_parameter("--force", force, IF=force), Application.serialize_parameter("", services), ) Application.get_controller().controller_init(services) docker = Docker() if docker.client.buildx.is_installed(): v = docker.client.buildx.version() log.debug("docker buildx is installed: {}", v) else: # pragma: no cover print_and_exit( "A mandatory dependency is missing: docker buildx not found" "\nInstallation guide: https://github.com/docker/buildx#binary-release" "\nor try the automated installation with {command}", command=RED("rapydo install buildx"), ) if Configuration.swarm_mode: docker.registry.ping() docker.registry.login() images: Set[str] = set() if core: log.debug("Forcing rebuild of core builds") # Create merged compose file with core files only docker = Docker(compose_files=Application.data.base_files) docker.compose.dump_config(Application.data.services, set_registry=False) log.debug("Compose configuration dumped on {}", COMPOSE_FILE) docker.client.buildx.bake( targets=Application.data.services, files=[COMPOSE_FILE], pull=True, load=True, cache=not force, ) log.info("Core images built") if Configuration.swarm_mode: log.warning( "Local registry push is not implemented yet for core images") docker = Docker() docker.compose.dump_config(Application.data.services, set_registry=False) log.debug("Compose configuration dumped on {}", COMPOSE_FILE) core_builds = find_templates_build(Application.data.base_services) all_builds = find_templates_build(Application.data.compose_config) services_with_custom_builds: List[str] = [] for image, build in all_builds.items(): if image not in core_builds: # this is used to validate the target Dockerfile: if p := build.get("path"): get_dockerfile_base_image(p, core_builds) services_with_custom_builds.extend(build["services"]) images.add(image)
def ssl( volatile: bool = typer.Option( False, "--volatile", help="Create a volatile proxy service to request the certificate", show_default=False, ), no_tty: bool = typer.Option( False, "--no-tty", help="Disable pseudo-tty allocation (e.g. to execute from a cronjob)", show_default=False, ), chain_file: Optional[Path] = typer.Option( None, "--chain-file", help="Path to existing chain file (.pem format)", show_default=False, ), key_file: Optional[Path] = typer.Option( None, "--key-file", help="Path to existing key file (.pem format)", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--volatile", volatile, IF=volatile), Application.serialize_parameter("--chain-file", chain_file, IF=chain_file), Application.serialize_parameter("--key-file", key_file, IF=key_file), ) if no_tty: log.warning("--no-tty option is deprecated, you can stop using it") Application.get_controller().controller_init() if chain_file is not None or key_file is not None: if chain_file is None: print_and_exit("Invalid chain file (you provided none)") elif not chain_file.exists(): print_and_exit("Invalid chain file (you provided {})", chain_file) if key_file is None: print_and_exit("Invalid key file (you provided none)") elif not key_file.exists(): print_and_exit("Invalid key file (you provided {})", key_file) service = "proxy" verify_available_images( [service], Application.data.compose_config, Application.data.base_services, ) if chain_file is not None and key_file is not None: log.info("Unable to automatically perform the requested operation") log.info("You can execute the following commands by your-self:") c = f"{Configuration.project}_{service}_1" letsencrypt_path = "/etc/letsencrypt/real" print("") print(f"docker cp {chain_file} {c}:{letsencrypt_path}/fullchain1.pem") print(f"docker cp {key_file} {c}:{letsencrypt_path}/privkey1.pem") print(f"rapydo shell {service} 'nginx -s reload'") print("") return docker = Docker() command = f"/bin/bash updatecertificates {Configuration.hostname}" if volatile: docker.compose.create_volatile_container(service, command=command, publish=[(443, 443), (80, 80)]) else: container = docker.get_container(service) if not container: print_and_exit( "The proxy is not running, start your stack or try with {command}", command=RED("rapydo ssl --volatile"), ) docker.exec_command(container, user="******", command=command) container = docker.get_container("neo4j") if container: # This is not true!! A full restart is needed # log.info("Neo4j is running, but it will reload the certificate by itself") # But not implemented yet... log.info( "Neo4j is running, a full restart is needed. NOT IMPLEMENTED YET.") containers = docker.get_containers("rabbit") if containers: log.info( "RabbitMQ is running, executing command to refresh the certificate" ) # Please note that Erland is able to automatically reload the certificate # But RabbitMQ does not. Probably in the future releases this command will # No longer be required. To test it after the creation of the new cert: # echo -n | openssl s_client -showcerts -connect hostname:5671 # Please note that this command can fail if RabbitMQ is still starting docker.exec_command(containers, user="******", command="/usr/local/bin/reload_certificate") containers = docker.get_containers("swaggerui") if containers: # pragma: no cover log.info( "SwaggerUI is running, executing command to refresh the certificate" ) docker.exec_command(containers, user="******", command="nginx -s reload") log.info("New certificate successfully enabled")
def images( remove_images: List[str] = typer.Option( [], "--rm", "--remove", help="Remove the specified image(s)", show_default=False, shell_complete=Application.autocomplete_submodule, ), ) -> None: Application.print_command( Application.serialize_parameter("--remove", remove_images, IF=remove_images), ) Application.get_controller().controller_init() urllib3.disable_warnings( urllib3.exceptions.InsecureRequestWarning) # type: ignore # https://docs.docker.com/registry/spec/api/ docker = Docker() docker.registry.ping() registry = docker.registry.get_host() host = f"https://{registry}" # Docker Registry API Reference # https://docs.docker.com/registry/spec/api/ # Retrieve a sorted, json list of repositories available in the registry r = docker.registry.send_request(f"{host}/v2/_catalog") catalog = r.json() images: List[Tuple[str, str, str, int, Optional[datetime]]] = [] for repository in catalog.get("repositories", {}): # Fetch the tags under the repository identified by <name> r = docker.registry.send_request(f"{host}/v2/{repository}/tags/list") # tags can be None if all the tags of a repository have deleted # this or ensure that every None will be converted in an empty dictionary tags = r.json().get("tags") or {} for tag in tags: # Fetch the manifest identified by name and reference r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}") manifest = r.json() size = 0 for layer in manifest.get("layers", []): size += layer.get("size", 0) headers = r.headers _id = cast(str, headers.get("Docker-Content-Digest", "N/A")) # Creation date is only available on schema version 1 :\ r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}", version="1") manifest = r.json() layers = manifest.get("history", []) created: Optional[datetime] = None if len(layers) > 0: first_layer = json.loads(layers[0].get("v1Compatibility", {})) creation_date = first_layer.get("created", "N/A") if creation_date != "N/A": creation_date = creation_date[0:19] created = datetime.strptime(creation_date, "%Y-%m-%dT%H:%M:%S") images.append( (_id, cast(str, repository), cast(str, tag), size, created)) if not images: log.warning("This registry contains no images") else: log.info("This registry contains {} image(s):", len(images)) images_to_be_removed: List[Tuple[str, str, str]] = [] table: List[List[str]] = [] for img in images: digest = img[0] # to be replaced with removeprefix starting from py39 if digest.startswith("sha256:"): digest = digest[7:] _id = digest[0:12] repository = img[1] tag = img[2] SIZE = system.bytes_to_str(img[3]) d = img[4] to_be_removed = (_id in remove_images or f"{repository}:{tag}" in remove_images) creation_date = d.strftime("%Y-%m-%d %H:%M:%S") if d else "N/A" image_line: List[str] = [] if to_be_removed: image_line.append(RED(repository)) image_line.append(RED(tag)) image_line.append(RED(_id)) image_line.append(RED(creation_date)) image_line.append(RED(SIZE)) creation_date = "DELETING ..." images_to_be_removed.append((repository, digest, tag)) else: image_line.append(repository) image_line.append(tag) image_line.append(_id) image_line.append(creation_date) image_line.append(SIZE) table.append(image_line) print("") print( tabulate( table, tablefmt=TABLE_FORMAT, headers=["REPOSITORY", "TAG", "IMAGE ID", "CREATED", "SIZE"], )) if len(remove_images) != len(images_to_be_removed): log.error( "Some of the images that you specified are not found in this registry" ) # DELETE /v2/<name>/manifests/<reference> for image in images_to_be_removed: repository = image[0] reference = image[1] # digest without sha256: tag = image[2] # For deletes reference must be a digest or the delete will fail r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/sha256:{reference}", method="DELETE") log.info("Image {}:{} deleted from {}", repository, tag, host) if images_to_be_removed: log.info("Executing registry garbage collector...") command = "/bin/registry garbage-collect -m /etc/docker/registry/config.yml" docker.exec_command("registry", user="******", command=command) log.info("Registry garbage collector successfully executed") # A restart is needed to prevent clashes beetween gc and cache # https://gist.github.com/jaytaylor/86d5efaddda926a25fa68c263830dac1#gistcomment-3653760 # The garbage collector doesn't communicate with the cache, or unlink layers # from the repository so if you immediately try to repush a layer that was # just deleted, the registry will find it for stat calls, but actually # serving the blob will fail. docker.client.container.restart("registry") log.info("Registry restarted to clean the layers cache")
def password( service: SupportedServices = typer.Argument(None, help="Service name"), show: bool = typer.Option( False, "--show", help="Show the current password(s)", show_default=False, ), random: bool = typer.Option( False, "--random", help="Generate a random password", show_default=False, ), new_password: str = typer.Option( None, "--password", help="Force the given password", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--show", show, IF=show), Application.serialize_parameter("--random", random, IF=random), Application.serialize_parameter("--password", new_password, IF=new_password), Application.serialize_parameter("", service), ) Application.get_controller().controller_init() # No service specified, only a summary will be reported if not service: if random: print_and_exit("--random flag is not supported without a service") if new_password: print_and_exit( "--password option is not supported without a service") MIN_PASSWORD_SCORE = int( Application.env.get("MIN_PASSWORD_SCORE", 2) # type: ignore ) last_updates = parse_projectrc() now = datetime.now() table: List[List[str]] = [] for s in PASSWORD_MODULES: # This should never happens and can't be (easily) tested if s not in Application.data.base_services: # pragma: no cover print_and_exit("Command misconfiguration, unknown {} service", s) if s != REGISTRY and s not in Application.data.active_services: continue if s == REGISTRY and not Configuration.swarm_mode: continue module = PASSWORD_MODULES.get(s) if not module: # pragma: no cover print_and_exit(f"{s} misconfiguration, module not found") for variable in module.PASSWORD_VARIABLES: password = Application.env.get(variable) if password == PLACEHOLDER: score = None else: result = zxcvbn(password) score = result["score"] if variable in last_updates: change_date = last_updates.get(variable, datetime.fromtimestamp(0)) expiration_date = change_date + timedelta( days=PASSWORD_EXPIRATION) expired = now > expiration_date last_change = change_date.strftime("%Y-%m-%d") else: expired = True last_change = "N/A" pass_line: List[str] = [] pass_line.append(s) pass_line.append(variable) if expired: pass_line.append(RED(last_change)) else: pass_line.append(GREEN(last_change)) if score is None: pass_line.append(RED("NOT SET")) elif score < MIN_PASSWORD_SCORE: pass_line.append(RED(score)) else: pass_line.append(GREEN(score)) if show: pass_line.append(str(password)) table.append(pass_line) headers = ["SERVICE", "VARIABLE", "LAST CHANGE", "STRENGTH"] if show: headers.append("PASSWORD") print("") print(tabulate( table, tablefmt=TABLE_FORMAT, headers=headers, )) # In this case a service is asked to be updated else: module = PASSWORD_MODULES.get(service.value) if not module: # pragma: no cover print_and_exit( f"{service.value} misconfiguration, module not found") if random: new_password = get_strong_password() elif not new_password: print_and_exit( "Please specify one between --random and --password options") docker = Docker() variables = module.PASSWORD_VARIABLES old_password = Application.env.get(variables[0]) new_variables = {variable: new_password for variable in variables} # Some services can only be updated if already running, # others can be updated even if offline, # but in every case if the stack is running it has to be restarted if service.value == REGISTRY: is_running = docker.registry.ping(do_exit=False) container: Optional[Tuple[str, str]] = ("registry", "") else: container = docker.get_container(service.value) is_running = container is not None is_running_needed = module.IS_RUNNING_NEEDED log.info("Changing password for {}...", service.value) if is_running_needed and (not is_running or not container): print_and_exit( "Can't update {} because it is not running. Please start your stack", service.value, ) update_projectrc(new_variables) if container: module.password(container, old_password, new_password) if is_running: log.info("{} was running, restarting services...", service.value) Application.get_controller().check_placeholders_and_passwords( Application.data.compose_config, Application.data.services) if service.value == REGISTRY: port = cast(int, Application.env["REGISTRY_PORT"]) docker.client.container.remove(REGISTRY, force=True) docker.compose.create_volatile_container(REGISTRY, detach=True, publish=[(port, port) ]) elif Configuration.swarm_mode: docker.compose.dump_config(Application.data.services) docker.swarm.deploy() else: docker.compose.start_containers(Application.data.services) else: log.info("{} was not running, restart is not needed", service.value) log.info( "The password of {} has been changed. " "Please find the new password into your .projectrc file as {} variable", service.value, variables[0], )