def tuning(ram: int, cpu: int) -> None: verify_available_images( [SERVICE_NAME], Application.data.compose_config, Application.data.base_services, ) docker = Docker() container = docker.get_container(SERVICE_NAME) command = f"neo4j-admin memrec --memory {ram}" if container: docker.exec_command(container, user="******", command=command) else: docker.compose.create_volatile_container(SERVICE_NAME, command=command) # output = temporary_stream.getvalue().split("\\") # print(output) # Don't allocate more than 31g of heap, # since this will disable pointer compression, also known as "compressed oops", # in the JVM and make less effective use of the heap. # heap = min(ram * 0.4, 31 * GB) # print(f"NEO4J_HEAP_SIZE: {bytes_to_str(heap)}") # print(f"NEO4J_PAGECACHE_SIZE: {bytes_to_str(ram * 0.3)}") log.info("Use 'dbms.memory.heap.max_size' as NEO4J_HEAP_SIZE") log.info("Use 'dbms.memory.pagecache.size' as NEO4J_PAGECACHE_SIZE") log.info("Keep enough free memory for lucene indexes " "(check size reported in the output, if any)")
def password(container: Tuple[str, str], old_password: str, new_password: str) -> None: docker = Docker() user = Application.env.get("RABBITMQ_USER") docker.exec_command( container, user=services.get_default_user(SERVICE_NAME), command=f'rabbitmqctl change_password "{user}" "{new_password}"', )
def backup(container: Optional[Tuple[str, str]], now: datetime, force: bool, dry_run: bool) -> None: if not container: print_and_exit( "The backup procedure requires {} running, please start your stack", SERVICE_NAME, ) docker = Docker() log.info("Starting backup on {}...", SERVICE_NAME) tmp_backup_path = f"/tmp/{now}" command = f"sh -c 'mariabackup --backup --target-dir={tmp_backup_path} " command += '-uroot -p"$MYSQL_ROOT_PASSWORD"\'' # Creating backup on a tmp folder as mysql user if not dry_run: docker.exec_command(container, user="******", command=command) # Creating backup on a tmp folder as mysql user if not dry_run: log.info("Executing mariabackup...") docker.exec_command( container, user="******", command= f"sh -c 'mariabackup --prepare --target-dir={tmp_backup_path}'", ) # Compress the prepared data folder. Used -C to skip the /tmp from folders paths if not dry_run: log.info("Compressing the backup file...") docker.exec_command( container, user="******", command=f"tar -zcf {tmp_backup_path}.tar.gz -C /tmp {now}", ) # Verify the gz integrity if not dry_run: log.info("Verifying the integrity of the backup file...") docker.exec_command(container, user="******", command=f"gzip -t {tmp_backup_path}.tar.gz") # Move the backup from /tmp to /backup (as root user) backup_path = f"/backup/{SERVICE_NAME}/{now}.tar.gz" if not dry_run: docker.exec_command( container, user="******", command=f"mv {tmp_backup_path}.tar.gz {backup_path}", ) log.info("Backup completed: data{}", backup_path)
def password(container: Tuple[str, str], old_password: str, new_password: str) -> None: docker = Docker() docker.exec_command( container, user=services.get_default_user(SERVICE_NAME), command=f"""bin/cypher-shell \" ALTER CURRENT USER SET PASSWORD FROM '{old_password}' TO '{new_password}'; \"""", )
def restore( container: Optional[Tuple[str, str]], backup_file: str, force: bool ) -> None: if not container: print_and_exit( "The restore procedure requires {} running, please start your stack", SERVICE_NAME, ) docker = Docker() log.info("Starting restore on {}...", SERVICE_NAME) backup_path = f"/backup/{SERVICE_NAME}/{backup_file}" dump_file = backup_file.replace(".gz", "") dump_path = f"/tmp/{dump_file}" docker.exec_command(container, user="******", command=f"cp {backup_path} /tmp/") docker.exec_command( container, user="******", command=f"gunzip -kf /tmp/{backup_file}" ) # Executed as root docker.exec_command(container, user="******", command=f"chown postgres {dump_path}") # By using pg_dumpall the resulting dump can be restored with psql: docker.exec_command( container, user="******", command=f"psql -U sqluser -f {dump_path} postgres", ) log.info("Restore from data{} completed", backup_path)
def password(container: Tuple[str, str], old_password: str, new_password: str) -> None: docker = Docker() # restapi init need the env variable to be updated but can't be done after # the restart because it often fails because unable to re-connect to # services in a short time and some long sleep would be needed # => applied a workaround to be able to execute it before the restart docker = Docker() docker.exec_command( container, user=services.get_default_user(SERVICE_NAME), command=f"""/bin/bash -c ' AUTH_DEFAULT_PASSWORD=\"{new_password}\" restapi init --force-user ' """, )
def password(container: Tuple[str, str], old_password: str, new_password: str) -> None: # https://dev.mysql.com/doc/refman/8.0/en/set-password.html docker = Docker() user = Application.env.get("ALCHEMY_USER") pwd = Application.env.get("MYSQL_ROOT_PASSWORD") db = Application.env.get("ALCHEMY_DB") docker.exec_command( container, user=services.get_default_user(SERVICE_NAME), command=f""" mysql -uroot -p\"{pwd}\" -D\"{db}\" -e "ALTER USER '{user}'@'%' IDENTIFIED BY '{new_password}';" """, )
def password(container: Tuple[str, str], old_password: str, new_password: str) -> None: docker = Docker() # Interactively: # \password username # Non interactively: # https://ubiq.co/database-blog/how-to-change-user-password-in-postgresql user = Application.env.get("ALCHEMY_USER") db = Application.env.get("ALCHEMY_DB") docker.exec_command( container, user=services.get_default_user(SERVICE_NAME), command=f""" psql -U {user} -d {db} -c \" ALTER USER {user} WITH PASSWORD \'{new_password}\'; \" """, )
def backup( container: Optional[Tuple[str, str]], now: datetime, force: bool, dry_run: bool ) -> None: if not container: print_and_exit( "The backup procedure requires {} running, please start your stack", SERVICE_NAME, ) docker = Docker() log.info("Starting backup on {}...", SERVICE_NAME) # This double step is required because postgres user is uid 70 # It is not fixed with host uid as the other SERVICE_NAMEs tmp_backup_path = f"/tmp/{now}.sql" # Creating backup on a tmp folder as postgres user if not dry_run: log.info("Executing pg_dumpall...") docker.exec_command( container, user="******", command=f"pg_dumpall --clean -U sqluser -f {tmp_backup_path}", ) # Compress the sql with best compression ratio if not dry_run: log.info("Compressing the backup file...") docker.exec_command( container, user="******", command=f"gzip -9 {tmp_backup_path}" ) # Verify the gz integrity if not dry_run: log.info("Verifying the integrity of the backup file...") docker.exec_command( container, user="******", command=f"gzip -t {tmp_backup_path}.gz" ) # Move the backup from /tmp to /backup (as root user) backup_path = f"/backup/{SERVICE_NAME}/{now}.sql.gz" if not dry_run: docker.exec_command( container, user="******", command=f"mv {tmp_backup_path}.gz {backup_path}" ) log.info("Backup completed: data{}", backup_path)
def backup(container: Optional[Tuple[str, str]], now: datetime, force: bool, dry_run: bool) -> None: docker = Docker() log.info("Starting backup on {}...", SERVICE_NAME) backup_path = f"/backup/{SERVICE_NAME}/{now}.tar.gz" # If running, ask redis to synchronize the database if container: docker.exec_command( container, user="******", command="sh -c 'redis-cli --pass \"$REDIS_PASSWORD\" save'", ) command = f"tar -zcf {backup_path} -C /data dump.rdb appendonly.aof" if not dry_run: log.info("Compressing the data files...") if container: docker.exec_command(container, user="******", command=command) else: docker.compose.create_volatile_container(SERVICE_NAME, command=command) # Verify the gz integrity command = f"gzip -t {backup_path}" if not dry_run: log.info("Verifying the integrity of the backup file...") if container: docker.exec_command(container, user="******", command=command) else: docker.compose.create_volatile_container(SERVICE_NAME, command=command) log.info("Backup completed: data{}", backup_path)
def reload(docker: Docker, services: List[str]) -> None: for service in services: containers = docker.get_containers(service) docker.exec_command(containers, user="******", command="/usr/local/bin/reload")
def ssl( volatile: bool = typer.Option( False, "--volatile", help="Create a volatile proxy service to request the certificate", show_default=False, ), no_tty: bool = typer.Option( False, "--no-tty", help="Disable pseudo-tty allocation (e.g. to execute from a cronjob)", show_default=False, ), chain_file: Optional[Path] = typer.Option( None, "--chain-file", help="Path to existing chain file (.pem format)", show_default=False, ), key_file: Optional[Path] = typer.Option( None, "--key-file", help="Path to existing key file (.pem format)", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--volatile", volatile, IF=volatile), Application.serialize_parameter("--chain-file", chain_file, IF=chain_file), Application.serialize_parameter("--key-file", key_file, IF=key_file), ) if no_tty: log.warning("--no-tty option is deprecated, you can stop using it") Application.get_controller().controller_init() if chain_file is not None or key_file is not None: if chain_file is None: print_and_exit("Invalid chain file (you provided none)") elif not chain_file.exists(): print_and_exit("Invalid chain file (you provided {})", chain_file) if key_file is None: print_and_exit("Invalid key file (you provided none)") elif not key_file.exists(): print_and_exit("Invalid key file (you provided {})", key_file) service = "proxy" verify_available_images( [service], Application.data.compose_config, Application.data.base_services, ) if chain_file is not None and key_file is not None: log.info("Unable to automatically perform the requested operation") log.info("You can execute the following commands by your-self:") c = f"{Configuration.project}_{service}_1" letsencrypt_path = "/etc/letsencrypt/real" print("") print(f"docker cp {chain_file} {c}:{letsencrypt_path}/fullchain1.pem") print(f"docker cp {key_file} {c}:{letsencrypt_path}/privkey1.pem") print(f"rapydo shell {service} 'nginx -s reload'") print("") return docker = Docker() command = f"/bin/bash updatecertificates {Configuration.hostname}" if volatile: docker.compose.create_volatile_container(service, command=command, publish=[(443, 443), (80, 80)]) else: container = docker.get_container(service) if not container: print_and_exit( "The proxy is not running, start your stack or try with {command}", command=RED("rapydo ssl --volatile"), ) docker.exec_command(container, user="******", command=command) container = docker.get_container("neo4j") if container: # This is not true!! A full restart is needed # log.info("Neo4j is running, but it will reload the certificate by itself") # But not implemented yet... log.info( "Neo4j is running, a full restart is needed. NOT IMPLEMENTED YET.") containers = docker.get_containers("rabbit") if containers: log.info( "RabbitMQ is running, executing command to refresh the certificate" ) # Please note that Erland is able to automatically reload the certificate # But RabbitMQ does not. Probably in the future releases this command will # No longer be required. To test it after the creation of the new cert: # echo -n | openssl s_client -showcerts -connect hostname:5671 # Please note that this command can fail if RabbitMQ is still starting docker.exec_command(containers, user="******", command="/usr/local/bin/reload_certificate") containers = docker.get_containers("swaggerui") if containers: # pragma: no cover log.info( "SwaggerUI is running, executing command to refresh the certificate" ) docker.exec_command(containers, user="******", command="nginx -s reload") log.info("New certificate successfully enabled")
def images( remove_images: List[str] = typer.Option( [], "--rm", "--remove", help="Remove the specified image(s)", show_default=False, shell_complete=Application.autocomplete_submodule, ), ) -> None: Application.print_command( Application.serialize_parameter("--remove", remove_images, IF=remove_images), ) Application.get_controller().controller_init() urllib3.disable_warnings( urllib3.exceptions.InsecureRequestWarning) # type: ignore # https://docs.docker.com/registry/spec/api/ docker = Docker() docker.registry.ping() registry = docker.registry.get_host() host = f"https://{registry}" # Docker Registry API Reference # https://docs.docker.com/registry/spec/api/ # Retrieve a sorted, json list of repositories available in the registry r = docker.registry.send_request(f"{host}/v2/_catalog") catalog = r.json() images: List[Tuple[str, str, str, int, Optional[datetime]]] = [] for repository in catalog.get("repositories", {}): # Fetch the tags under the repository identified by <name> r = docker.registry.send_request(f"{host}/v2/{repository}/tags/list") # tags can be None if all the tags of a repository have deleted # this or ensure that every None will be converted in an empty dictionary tags = r.json().get("tags") or {} for tag in tags: # Fetch the manifest identified by name and reference r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}") manifest = r.json() size = 0 for layer in manifest.get("layers", []): size += layer.get("size", 0) headers = r.headers _id = cast(str, headers.get("Docker-Content-Digest", "N/A")) # Creation date is only available on schema version 1 :\ r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}", version="1") manifest = r.json() layers = manifest.get("history", []) created: Optional[datetime] = None if len(layers) > 0: first_layer = json.loads(layers[0].get("v1Compatibility", {})) creation_date = first_layer.get("created", "N/A") if creation_date != "N/A": creation_date = creation_date[0:19] created = datetime.strptime(creation_date, "%Y-%m-%dT%H:%M:%S") images.append( (_id, cast(str, repository), cast(str, tag), size, created)) if not images: log.warning("This registry contains no images") else: log.info("This registry contains {} image(s):", len(images)) images_to_be_removed: List[Tuple[str, str, str]] = [] table: List[List[str]] = [] for img in images: digest = img[0] # to be replaced with removeprefix starting from py39 if digest.startswith("sha256:"): digest = digest[7:] _id = digest[0:12] repository = img[1] tag = img[2] SIZE = system.bytes_to_str(img[3]) d = img[4] to_be_removed = (_id in remove_images or f"{repository}:{tag}" in remove_images) creation_date = d.strftime("%Y-%m-%d %H:%M:%S") if d else "N/A" image_line: List[str] = [] if to_be_removed: image_line.append(RED(repository)) image_line.append(RED(tag)) image_line.append(RED(_id)) image_line.append(RED(creation_date)) image_line.append(RED(SIZE)) creation_date = "DELETING ..." images_to_be_removed.append((repository, digest, tag)) else: image_line.append(repository) image_line.append(tag) image_line.append(_id) image_line.append(creation_date) image_line.append(SIZE) table.append(image_line) print("") print( tabulate( table, tablefmt=TABLE_FORMAT, headers=["REPOSITORY", "TAG", "IMAGE ID", "CREATED", "SIZE"], )) if len(remove_images) != len(images_to_be_removed): log.error( "Some of the images that you specified are not found in this registry" ) # DELETE /v2/<name>/manifests/<reference> for image in images_to_be_removed: repository = image[0] reference = image[1] # digest without sha256: tag = image[2] # For deletes reference must be a digest or the delete will fail r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/sha256:{reference}", method="DELETE") log.info("Image {}:{} deleted from {}", repository, tag, host) if images_to_be_removed: log.info("Executing registry garbage collector...") command = "/bin/registry garbage-collect -m /etc/docker/registry/config.yml" docker.exec_command("registry", user="******", command=command) log.info("Registry garbage collector successfully executed") # A restart is needed to prevent clashes beetween gc and cache # https://gist.github.com/jaytaylor/86d5efaddda926a25fa68c263830dac1#gistcomment-3653760 # The garbage collector doesn't communicate with the cache, or unlink layers # from the repository so if you immediately try to repush a layer that was # just deleted, the registry will find it for stat calls, but actually # serving the blob will fail. docker.client.container.restart("registry") log.info("Registry restarted to clean the layers cache")
def reload( services: List[str] = typer.Argument( None, help="Services to be reloaded", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command(Application.serialize_parameter("", services)) Application.get_controller().controller_init(services) docker = Docker() running_services = docker.get_running_services() if "frontend" in services and len(services) > 1: print_and_exit("Can't reload frontend and other services at once") reloaded = 0 for service in Application.data.services: # Special case: frontend in production mode if Configuration.production and service == "frontend": # Only consider it if explicitly requested in input if "frontend" not in services: log.debug( "Can't reload the frontend if not explicitly requested") else: log.info("Reloading frontend...") # The frontend build stucks in swarm mode... let's start the container # always in compose mode when using the reload comand Configuration.FORCE_COMPOSE_ENGINE = True Application.get_controller().controller_init([service]) docker = Docker() docker.compose.start_containers([service], force=True) reloaded += 1 continue if service not in running_services: continue containers = docker.get_containers(service) if not containers: log.warning("Can't find any container for {}", service) continue try: # get the first container from the containers dict container = containers.get(list(containers.keys())[0]) # Just added for typing purpose if not container: # pragma: no conver log.warning("Can't find any container for {}", service) continue output = docker.exec_command( container, user="******", command="ls /usr/local/bin/reload", force_output_return=True, ) # this is to consume the iterator and raise the exception with exit code if output: [_ for _ in output] except DockerException as e: # fail2ban fails with code 1 if "It returned with code 1" in str(e): log.warning("Service {} does not support the reload command", service) continue # backend fails with code 2 if "It returned with code 2" in str(e): log.warning("Service {} does not support the reload command", service) continue raise docker.exec_command(containers, user="******", command="/usr/local/bin/reload") reloaded += 1 if reloaded == 0: log.info("No service reloaded") else: log.info("Services reloaded")
def shell( service: str = typer.Argument( ..., help="Service name", shell_complete=Application.autocomplete_service), command: str = typer.Argument( "bash", help="UNIX command to be executed on selected running service"), user: Optional[str] = typer.Option( None, "--user", "-u", help="User existing in selected service", show_default=False, ), default_command: bool = typer.Option( False, "--default-command", "--default", help="Execute the default command configured for the container", show_default=False, ), no_tty: bool = typer.Option( False, "--no-tty", help= "Disable pseudo-tty allocation (useful for non-interactive script)", show_default=False, ), replica: int = typer.Option( 1, "--replica", "--slot", help="Execute the command on the specified replica", show_default=False, ), broadcast: bool = typer.Option( False, "--broadcast", help="Execute the command on all the replicas", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--user", user, IF=user), Application.serialize_parameter("--default", default_command, IF=default_command), Application.serialize_parameter("", service), Application.serialize_parameter("", command), ) if no_tty: log.warning("--no-tty option is deprecated, you can stop using it") if replica > 1 and broadcast: print_and_exit("--replica and --broadcast options are not compatible") Application.get_controller().controller_init() docker = Docker() if not user: user = services.get_default_user(service) if default_command: command = services.get_default_command(service) log.debug("Requested command: {} with user: {}", command, user or "default") if broadcast: containers = docker.get_containers(service) if not containers: print_and_exit("No running container found for {} service", service) docker.exec_command(containers, user=user, command=command) else: container = docker.get_container(service, slot=replica) if not container: if replica != 1: print_and_exit("Replica number {} not found for {} service", str(replica), service) print_and_exit("No running container found for {} service", service) docker.exec_command(container, user=user, command=command)