Esempio n. 1
0
def test_reload_prod(capfd: Capture, faker: Faker) -> None:
    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="angular",
    )

    init_project(capfd, " --prod ", "--force")

    start_registry(capfd)
    pull_images(capfd)

    start_project(capfd)

    time.sleep(5)

    exec_command(capfd, "reload backend", "Reloading gunicorn (PID #")

    exec_command(
        capfd,
        "reload",
        "Can't reload the frontend if not explicitly requested",
        "Services reloaded",
    )

    docker = Docker()
    container = docker.get_container("frontend")
    assert container is not None

    docker.client.container.stop(container[0])
    exec_command(capfd, "reload frontend", "Reloading frontend...")

    container = docker.get_container("frontend")

    if Configuration.swarm_mode:
        # frontend reload is always execute in compose mode
        # => the container retrieved from docker.get_container in swarm mode is None
        assert container is None
        # Let's retrieve the container name in compose mode:

        Configuration.swarm_mode = False
        docker = Docker()
        container = docker.get_container("frontend")

        # Let's restore the docker client
        Configuration.swarm_mode = True
        docker = Docker()

    assert container is not None

    docker.client.container.remove(container[0], force=True)
    exec_command(capfd, "reload frontend", "Reloading frontend...")

    exec_command(
        capfd,
        "reload frontend backend",
        "Can't reload frontend and other services at once",
    )
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 2
0
def tuning(ram: int, cpu: int) -> None:

    verify_available_images(
        [SERVICE_NAME],
        Application.data.compose_config,
        Application.data.base_services,
    )

    docker = Docker()

    container = docker.get_container(SERVICE_NAME)

    command = f"neo4j-admin memrec --memory {ram}"

    if container:
        docker.exec_command(container, user="******", command=command)
    else:
        docker.compose.create_volatile_container(SERVICE_NAME, command=command)

    # output = temporary_stream.getvalue().split("\\")
    # print(output)
    # Don't allocate more than 31g of heap,
    # since this will disable pointer compression, also known as "compressed oops",
    # in the JVM and make less effective use of the heap.
    # heap = min(ram * 0.4, 31 * GB)
    # print(f"NEO4J_HEAP_SIZE: {bytes_to_str(heap)}")
    # print(f"NEO4J_PAGECACHE_SIZE: {bytes_to_str(ram * 0.3)}")
    log.info("Use 'dbms.memory.heap.max_size' as NEO4J_HEAP_SIZE")
    log.info("Use 'dbms.memory.pagecache.size' as NEO4J_PAGECACHE_SIZE")
    log.info("Keep enough free memory for lucene indexes "
             "(check size reported in the output, if any)")
Esempio n. 3
0
def test_base(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "reload")

    project_name = random_project_name(faker)

    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["fail2ban"],
    )
    init_project(capfd)

    exec_command(capfd, "reload", "No service reloaded")
    exec_command(capfd, "reload backend", "No service reloaded")
    exec_command(capfd, "reload invalid", "No such service: invalid")
    exec_command(capfd, "reload backend invalid", "No such service: invalid")

    start_registry(capfd)
    pull_images(capfd)

    start_project(capfd)

    exec_command(capfd, "reload backend", "Reloading Flask...")

    if Configuration.swarm_mode:
        service = "backend"

        exec_command(
            capfd,
            "start backend",
            "Stack started",
        )

        exec_command(
            capfd,
            "scale backend=2 --wait",
            f"{project_name}_backend scaled to 2",
            "Service converged",
        )
    else:

        service = "fail2ban"
        exec_command(
            capfd,
            "scale fail2ban=2",
            "Scaling services: fail2ban=2...",
            "Services scaled: fail2ban=2",
        )

    time.sleep(4)

    docker = Docker()
    container1 = docker.get_container(service, slot=1)
    container2 = docker.get_container(service, slot=2)
    assert container1 is not None
    assert container2 is not None
    assert container1 != container2

    exec_command(
        capfd,
        f"reload {service}",
        f"Executing command on {container1[0]}",
        f"Executing command on {container2[0]}",
    )

    exec_command(capfd, "shell backend -u root 'rm /usr/local/bin/reload'")

    exec_command(
        capfd, "reload backend", "Service backend does not support the reload command"
    )

    exec_command(capfd, "remove", "Stack removed")
Esempio n. 4
0
def backup(
    service: SupportedServices = typer.Argument(..., help="Service name"),
    force: bool = typer.Option(
        False,
        "--force",
        help="Force the backup procedure",
        show_default=False,
    ),
    max_backups: int = typer.Option(
        0,
        "--max",
        help=
        "Maximum number of backups, older exceeding this number will be removed",
        show_default=False,
    ),
    dry_run: bool = typer.Option(
        False,
        "--dry-run",
        help="Do not perform any backup or delete backup files",
        show_default=False,
    ),
    restart: List[str] = typer.Option(
        [],
        "--restart",
        help=
        "Service to be restarted once completed the backup (multiple allowed)",
        shell_complete=Application.autocomplete_service,
    ),
) -> None:

    Application.print_command(
        Application.serialize_parameter("--force", force, IF=force),
        Application.serialize_parameter("--max", max_backups, IF=max_backups),
        Application.serialize_parameter("--dry-run", dry_run, IF=dry_run),
        Application.serialize_parameter("--restart", restart, IF=restart),
        Application.serialize_parameter("", service.value),
    )

    if dry_run:
        log.warning("Dry run mode is enabled")

    Application.get_controller().controller_init()

    service_name = service.value

    verify_available_images(
        [service_name],
        Application.data.compose_config,
        Application.data.base_services,
    )

    docker = Docker()

    container = docker.get_container(service_name)

    backup_dir = BACKUP_DIR.joinpath(service_name)
    backup_dir.mkdir(parents=True, exist_ok=True)

    if max_backups > 0:
        backups = list(backup_dir.glob(get_date_pattern()))
        if max_backups >= len(backups):
            log.debug("Found {} backup files, maximum not reached",
                      len(backups))
        else:
            for f in sorted(backups)[:-max_backups]:
                if not dry_run:
                    f.unlink()
                log.warning(
                    "{} deleted because exceeding the max number of backup files ({})",
                    f.name,
                    max_backups,
                )

    module = BACKUP_MODULES.get(service.value)

    if not module:  # pragma: no cover
        print_and_exit(f"{service.value} misconfiguration, module not found")

    now = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
    module.backup(container=container, now=now, force=force, dry_run=dry_run)

    if restart and not dry_run:
        log.info("Restarting services in 20 seconds...")
        time.sleep(10)
        log.info("Restarting services in 10 seconds...")
        time.sleep(10)
        reload(docker, restart)
Esempio n. 5
0
def ssl(
    volatile: bool = typer.Option(
        False,
        "--volatile",
        help="Create a volatile proxy service to request the certificate",
        show_default=False,
    ),
    no_tty: bool = typer.Option(
        False,
        "--no-tty",
        help="Disable pseudo-tty allocation (e.g. to execute from a cronjob)",
        show_default=False,
    ),
    chain_file: Optional[Path] = typer.Option(
        None,
        "--chain-file",
        help="Path to existing chain file (.pem format)",
        show_default=False,
    ),
    key_file: Optional[Path] = typer.Option(
        None,
        "--key-file",
        help="Path to existing key file (.pem format)",
        show_default=False,
    ),
) -> None:
    Application.print_command(
        Application.serialize_parameter("--volatile", volatile, IF=volatile),
        Application.serialize_parameter("--chain-file",
                                        chain_file,
                                        IF=chain_file),
        Application.serialize_parameter("--key-file", key_file, IF=key_file),
    )

    if no_tty:
        log.warning("--no-tty option is deprecated, you can stop using it")

    Application.get_controller().controller_init()

    if chain_file is not None or key_file is not None:
        if chain_file is None:
            print_and_exit("Invalid chain file (you provided none)")
        elif not chain_file.exists():
            print_and_exit("Invalid chain file (you provided {})", chain_file)

        if key_file is None:
            print_and_exit("Invalid key file (you provided none)")
        elif not key_file.exists():
            print_and_exit("Invalid key file (you provided {})", key_file)

    service = "proxy"

    verify_available_images(
        [service],
        Application.data.compose_config,
        Application.data.base_services,
    )

    if chain_file is not None and key_file is not None:

        log.info("Unable to automatically perform the requested operation")
        log.info("You can execute the following commands by your-self:")

        c = f"{Configuration.project}_{service}_1"
        letsencrypt_path = "/etc/letsencrypt/real"
        print("")
        print(f"docker cp {chain_file} {c}:{letsencrypt_path}/fullchain1.pem")
        print(f"docker cp {key_file} {c}:{letsencrypt_path}/privkey1.pem")
        print(f"rapydo shell {service} 'nginx -s reload'")
        print("")

        return

    docker = Docker()
    command = f"/bin/bash updatecertificates {Configuration.hostname}"

    if volatile:
        docker.compose.create_volatile_container(service,
                                                 command=command,
                                                 publish=[(443, 443),
                                                          (80, 80)])
    else:
        container = docker.get_container(service)
        if not container:
            print_and_exit(
                "The proxy is not running, start your stack or try with {command}",
                command=RED("rapydo ssl --volatile"),
            )
        docker.exec_command(container, user="******", command=command)

    container = docker.get_container("neo4j")
    if container:
        # This is not true!! A full restart is needed
        # log.info("Neo4j is running, but it will reload the certificate by itself")
        # But not implemented yet...
        log.info(
            "Neo4j is running, a full restart is needed. NOT IMPLEMENTED YET.")

    containers = docker.get_containers("rabbit")
    if containers:
        log.info(
            "RabbitMQ is running, executing command to refresh the certificate"
        )
        # Please note that Erland is able to automatically reload the certificate
        # But RabbitMQ does not. Probably in the future releases this command will
        # No longer be required. To test it after the creation of the new cert:
        #   echo -n | openssl s_client -showcerts -connect hostname:5671
        # Please note that this command can fail if RabbitMQ is still starting
        docker.exec_command(containers,
                            user="******",
                            command="/usr/local/bin/reload_certificate")

    containers = docker.get_containers("swaggerui")
    if containers:  # pragma: no cover
        log.info(
            "SwaggerUI is running, executing command to refresh the certificate"
        )
        docker.exec_command(containers, user="******", command="nginx -s reload")

    log.info("New certificate successfully enabled")
Esempio n. 6
0
def restore(
    service: SupportedServices = typer.Argument(..., help="Service name"),
    backup_file: Optional[str] = typer.Argument(
        None,
        help="Specify the backup to be restored",
        show_default=False,
    ),
    force: bool = typer.Option(
        False,
        "--force",
        help="Force the backup procedure",
        show_default=False,
    ),
    restart: List[str] = typer.Option(
        [],
        "--restart",
        help=
        "Service to be restarted once completed the restore (multiple allowed)",
        shell_complete=Application.autocomplete_service,
    ),
) -> None:

    Application.print_command(
        Application.serialize_parameter("--force", force, IF=force),
        Application.serialize_parameter("--restart", restart, IF=restart),
        Application.serialize_parameter("", service.value),
        Application.serialize_parameter("", backup_file),
    )
    Application.get_controller().controller_init()

    service_name = service.value

    verify_available_images(
        [service_name],
        Application.data.compose_config,
        Application.data.base_services,
    )

    docker = Docker()

    container = docker.get_container(service_name)

    backup_dir = BACKUP_DIR.joinpath(service_name)
    if not backup_dir.exists():
        print_and_exit(
            "No backup found, the following folder does not exist: {}",
            backup_dir)

    module = RESTORE_MODULES.get(service.value)

    if not module:  # pragma: no cover
        print_and_exit(f"{service.value} misconfiguration, module not found")

    expected_ext = module.EXPECTED_EXT

    if backup_file is None:
        files = backup_dir.iterdir()

        filtered_files = [
            d.name for d in files if d.name.endswith(expected_ext)
        ]
        filtered_files.sort()

        if not len(filtered_files):
            print_and_exit("No backup found, {} is empty", backup_dir)

        log.info("Please specify one of the following backup:")
        for f in filtered_files:
            print(f"- {f}")

        return

    backup_host_path = backup_dir.joinpath(backup_file)
    if not backup_host_path.exists():
        print_and_exit("Invalid backup file, {} does not exist",
                       backup_host_path)

    module.restore(container=container, backup_file=backup_file, force=force)

    if restart:
        log.info("Restarting services in 20 seconds...")
        time.sleep(10)
        log.info("Restarting services in 10 seconds...")
        time.sleep(10)
        reload(docker, restart)
Esempio n. 7
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "shell backend ls")

    create_project(
        capfd=capfd,
        name="first",
        auth="no",
        frontend="angular",
        services=["redis", "fail2ban"],
    )
    init_project(capfd)

    start_registry(capfd)

    pull_images(capfd)
    start_project(capfd)

    exec_command(
        capfd, "shell invalid", "No running container found for invalid service"
    )

    exec_command(
        capfd,
        "shell --no-tty backend invalid",
        "--no-tty option is deprecated, you can stop using it",
    )

    exec_command(
        capfd,
        "shell backend invalid",
        "The command execution was terminated by command cannot be invoked. "
        "Exit code is 126",
    )

    exec_command(
        capfd,
        'shell backend "bash invalid"',
        "The command execution was terminated by command not found. "
        "Exit code is 127",
    )

    exec_command(
        capfd,
        "shell backend hostname",
        "backend-server",
    )

    signal.signal(signal.SIGALRM, signal_handler)
    signal.alarm(2)
    exec_command(
        capfd,
        "shell backend --default-command",
        "Time is up",
    )

    # This can't work on GitHub Actions due to the lack of tty
    # signal.signal(signal.SIGALRM, handler)
    # signal.alarm(2)
    # exec_command(
    #     capfd,
    #     "shell backend",
    #     # "developer@backend-server:[/code]",
    #     "Time is up",
    # )

    # Testing default users. I did't include all the containers because:
    #   1. this will greatly slow down this test for a very small benefit
    #   2. check the presence of 'postgres' in the output of shell postgres whoami
    #      is trivial because it is always in the output, due to the echo of the command
    exec_command(
        capfd,
        "shell backend whoami",
        "developer",
    )

    exec_command(
        capfd,
        "shell frontend whoami",
        "node",
    )

    # Added because fail2ban is deployed in global mode, so that the container name is
    # different and this can make the command to fail
    # (as happened before the introduction of this test)
    exec_command(
        capfd,
        "shell fail2ban whoami",
        "root",
    )

    exec_command(
        capfd,
        "remove",
        "Stack removed",
    )

    exec_command(
        capfd,
        "shell backend hostname",
        "Requested command: hostname with user: developer",
        "No running container found for backend service",
    )

    exec_command(
        capfd,
        "shell backend --default",
        "Requested command: restapi launch with user: developer",
        "No running container found for backend service",
    )

    exec_command(
        capfd,
        "shell backend --replica 1 --default",
        "Requested command: restapi launch with user: developer",
        "No running container found for backend service",
    )

    exec_command(
        capfd,
        "shell backend --replica 2 --default",
        "Requested command: restapi launch with user: developer",
        "Replica number 2 not found for backend service",
    )

    if Configuration.swarm_mode:
        service = "backend"

        exec_command(
            capfd,
            "start backend",
            "Stack started",
        )

        exec_command(
            capfd,
            "scale backend=2 --wait",
            "first_backend scaled to 2",
            "Service converged",
        )
    else:

        service = "redis"
        exec_command(
            capfd,
            "scale redis=2",
            "Scaling services: redis=2...",
            "Services scaled: redis=2",
        )

    docker = Docker()
    container1 = docker.get_container(service, slot=1)
    container2 = docker.get_container(service, slot=2)
    assert container1 is not None
    assert container2 is not None
    assert container1 != container2

    string1 = faker.pystr(min_chars=30, max_chars=30)
    string2 = faker.pystr(min_chars=30, max_chars=30)

    docker.client.container.execute(
        container1[0],
        command=["touch", f"/tmp/{string1}"],
        tty=False,
        detach=False,
    )

    docker.client.container.execute(
        container2[0],
        command=["touch", f"/tmp/{string2}"],
        tty=False,
        detach=False,
    )

    exec_command(capfd, f"shell {service} --replica 1 'ls /tmp/'", string1)

    exec_command(capfd, f"shell {service} --replica 2 'ls /tmp/'", string2)

    exec_command(
        capfd,
        f"shell {service} mycommand --replica 2 --broadcast",
        "--replica and --broadcast options are not compatible",
    )

    exec_command(
        capfd,
        f"shell {service} --broadcast 'ls /tmp/'",
        string1,
        string2,
    )

    exec_command(
        capfd,
        "remove",
        "Stack removed",
    )

    exec_command(
        capfd,
        f"shell {service} mycommand --broadcast",
        f"No running container found for {service} service",
    )
Esempio n. 8
0
def test_all(capfd: Capture) -> None:

    execute_outside(capfd, "start")
    if not Configuration.swarm_mode:
        execute_outside(capfd, "stop")

    project_name = "first"
    create_project(
        capfd=capfd,
        name=project_name,
        auth="neo4j",
        frontend="angular",
    )

    init_project(capfd)

    if Configuration.swarm_mode:
        exec_command(
            capfd,
            "start",
            "Registry 127.0.0.1:5000 not reachable.",
        )

        start_registry(capfd)

    exec_command(
        capfd,
        "start backend invalid",
        "No such service: invalid",
    )

    exec_command(
        capfd,
        "start backend",
        f"image, execute {colors.RED}rapydo pull backend",
    )

    pull_images(capfd)

    docker = Docker()

    if Configuration.swarm_mode:

        # Deploy a sub-stack
        exec_command(
            capfd,
            "start backend",
            "Enabled services: backend",
            "Stack started",
        )

        # Only backend is expected to be running
        assert docker.get_container("backend") is not None
        assert docker.get_container("neo4j") is None

        # Once started a stack in swarm mode, it's not possible
        # to re-deploy another stack
        # exec_command(
        #     capfd,
        #     "start",
        #     "A stack is already running",
        #     f"Stop it with {colors.RED}rapydo remove{colors.RESET} "
        #     "if you want to start a new stack",
        # )

        # Deploy an additional sub-stack
        exec_command(
            capfd,
            "start neo4j",
            "Enabled services: neo4j",
            "Stack started",
        )

        # In swarm mode new stack replaces the previous
        # => Only neo4j is expected to be running
        assert docker.get_container("backend") is None
        assert docker.get_container("neo4j") is not None

        exec_command(
            capfd,
            "remove",
            "Stack removed",
        )

        # Deploy the full stack
        exec_command(
            capfd,
            "start",
            "Stack started",
        )

        # Now both backend and neo4j are expected to be running
        assert docker.get_container("backend") is not None
        assert docker.get_container("neo4j") is not None

        # ############################
        # Verify bind volumes checks #
        # ############################

        exec_command(
            capfd,
            "remove",
            "Stack removed",
        )

        data_folder = DATA_DIR.joinpath(project_name)
        karma_folder = data_folder.joinpath("karma")

        # Delete data/project_name/karma and it will be recreated
        assert karma_folder.exists()
        shutil.rmtree(karma_folder)
        assert not karma_folder.exists()

        # set the data folder read only
        data_folder.chmod(0o550)

        # The missing folder can't be recreated due to permissions denied
        exec_command(
            capfd,
            "start frontend",
            "A bind folder is missing and can't be automatically created: ",
            f"/data/{project_name}/karma",
        )
        assert not karma_folder.exists()

        # Restore RW permissions
        data_folder.chmod(0o770)

        exec_command(
            capfd,
            "start frontend",
            "A bind folder was missing and was automatically created: ",
            f"/data/{project_name}/karma",
            "Stack started",
        )
        assert karma_folder.exists()
    else:

        # Deploy a sub-stack
        exec_command(
            capfd,
            "start backend",
            "Enabled services: backend",
            "Stack started",
        )

        # Only backend is expected to be running
        assert docker.get_container("backend") is not None
        assert docker.get_container("neo4j") is None

        # Deploy an additional sub-stack
        exec_command(
            capfd,
            "start neo4j",
            "Enabled services: neo4j",
            "Stack started",
        )

        # In compose mode additional stack are aggregated
        # => both backend and neo4j are expected to be running
        assert docker.get_container("backend") is not None
        assert docker.get_container("neo4j") is not None

        # exec_command(
        #     capfd,
        #     "start",
        #     "A stack is already running.",
        # )
        exec_command(
            capfd,
            "start",
            "Stack started",
        )
Esempio n. 9
0
def shell(
    service: str = typer.Argument(
        ...,
        help="Service name",
        shell_complete=Application.autocomplete_service),
    command: str = typer.Argument(
        "bash",
        help="UNIX command to be executed on selected running service"),
    user: Optional[str] = typer.Option(
        None,
        "--user",
        "-u",
        help="User existing in selected service",
        show_default=False,
    ),
    default_command: bool = typer.Option(
        False,
        "--default-command",
        "--default",
        help="Execute the default command configured for the container",
        show_default=False,
    ),
    no_tty: bool = typer.Option(
        False,
        "--no-tty",
        help=
        "Disable pseudo-tty allocation (useful for non-interactive script)",
        show_default=False,
    ),
    replica: int = typer.Option(
        1,
        "--replica",
        "--slot",
        help="Execute the command on the specified replica",
        show_default=False,
    ),
    broadcast: bool = typer.Option(
        False,
        "--broadcast",
        help="Execute the command on all the replicas",
        show_default=False,
    ),
) -> None:

    Application.print_command(
        Application.serialize_parameter("--user", user, IF=user),
        Application.serialize_parameter("--default",
                                        default_command,
                                        IF=default_command),
        Application.serialize_parameter("", service),
        Application.serialize_parameter("", command),
    )

    if no_tty:
        log.warning("--no-tty option is deprecated, you can stop using it")

    if replica > 1 and broadcast:
        print_and_exit("--replica and --broadcast options are not compatible")
    Application.get_controller().controller_init()

    docker = Docker()

    if not user:
        user = services.get_default_user(service)

    if default_command:
        command = services.get_default_command(service)

    log.debug("Requested command: {} with user: {}", command, user
              or "default")
    if broadcast:
        containers = docker.get_containers(service)
        if not containers:
            print_and_exit("No running container found for {} service",
                           service)

        docker.exec_command(containers, user=user, command=command)
    else:
        container = docker.get_container(service, slot=replica)

        if not container:
            if replica != 1:
                print_and_exit("Replica number {} not found for {} service",
                               str(replica), service)
            print_and_exit("No running container found for {} service",
                           service)

        docker.exec_command(container, user=user, command=command)
Esempio n. 10
0
def password(
    service: SupportedServices = typer.Argument(None, help="Service name"),
    show: bool = typer.Option(
        False,
        "--show",
        help="Show the current password(s)",
        show_default=False,
    ),
    random: bool = typer.Option(
        False,
        "--random",
        help="Generate a random password",
        show_default=False,
    ),
    new_password: str = typer.Option(
        None,
        "--password",
        help="Force the given password",
        show_default=False,
    ),
) -> None:

    Application.print_command(
        Application.serialize_parameter("--show", show, IF=show),
        Application.serialize_parameter("--random", random, IF=random),
        Application.serialize_parameter("--password",
                                        new_password,
                                        IF=new_password),
        Application.serialize_parameter("", service),
    )

    Application.get_controller().controller_init()

    # No service specified, only a summary will be reported
    if not service:

        if random:
            print_and_exit("--random flag is not supported without a service")

        if new_password:
            print_and_exit(
                "--password option is not supported without a service")

        MIN_PASSWORD_SCORE = int(
            Application.env.get("MIN_PASSWORD_SCORE", 2)  # type: ignore
        )

        last_updates = parse_projectrc()
        now = datetime.now()

        table: List[List[str]] = []
        for s in PASSWORD_MODULES:
            # This should never happens and can't be (easily) tested
            if s not in Application.data.base_services:  # pragma: no cover
                print_and_exit("Command misconfiguration, unknown {} service",
                               s)

            if s != REGISTRY and s not in Application.data.active_services:
                continue

            if s == REGISTRY and not Configuration.swarm_mode:
                continue

            module = PASSWORD_MODULES.get(s)

            if not module:  # pragma: no cover
                print_and_exit(f"{s} misconfiguration, module not found")

            for variable in module.PASSWORD_VARIABLES:

                password = Application.env.get(variable)

                if password == PLACEHOLDER:
                    score = None
                else:
                    result = zxcvbn(password)
                    score = result["score"]

                if variable in last_updates:
                    change_date = last_updates.get(variable,
                                                   datetime.fromtimestamp(0))
                    expiration_date = change_date + timedelta(
                        days=PASSWORD_EXPIRATION)
                    expired = now > expiration_date
                    last_change = change_date.strftime("%Y-%m-%d")
                else:
                    expired = True
                    last_change = "N/A"

                pass_line: List[str] = []

                pass_line.append(s)
                pass_line.append(variable)

                if expired:
                    pass_line.append(RED(last_change))
                else:
                    pass_line.append(GREEN(last_change))

                if score is None:
                    pass_line.append(RED("NOT SET"))
                elif score < MIN_PASSWORD_SCORE:
                    pass_line.append(RED(score))
                else:
                    pass_line.append(GREEN(score))

                if show:
                    pass_line.append(str(password))

                table.append(pass_line)

        headers = ["SERVICE", "VARIABLE", "LAST CHANGE", "STRENGTH"]
        if show:
            headers.append("PASSWORD")

        print("")
        print(tabulate(
            table,
            tablefmt=TABLE_FORMAT,
            headers=headers,
        ))

    # In this case a service is asked to be updated
    else:

        module = PASSWORD_MODULES.get(service.value)

        if not module:  # pragma: no cover
            print_and_exit(
                f"{service.value} misconfiguration, module not found")

        if random:
            new_password = get_strong_password()
        elif not new_password:
            print_and_exit(
                "Please specify one between --random and --password options")

        docker = Docker()

        variables = module.PASSWORD_VARIABLES
        old_password = Application.env.get(variables[0])
        new_variables = {variable: new_password for variable in variables}

        # Some services can only be updated if already running,
        # others can be updated even if offline,
        # but in every case if the stack is running it has to be restarted

        if service.value == REGISTRY:
            is_running = docker.registry.ping(do_exit=False)
            container: Optional[Tuple[str, str]] = ("registry", "")
        else:
            container = docker.get_container(service.value)
            is_running = container is not None

        is_running_needed = module.IS_RUNNING_NEEDED

        log.info("Changing password for {}...", service.value)

        if is_running_needed and (not is_running or not container):
            print_and_exit(
                "Can't update {} because it is not running. Please start your stack",
                service.value,
            )

        update_projectrc(new_variables)

        if container:
            module.password(container, old_password, new_password)

        if is_running:
            log.info("{} was running, restarting services...", service.value)

            Application.get_controller().check_placeholders_and_passwords(
                Application.data.compose_config, Application.data.services)
            if service.value == REGISTRY:
                port = cast(int, Application.env["REGISTRY_PORT"])

                docker.client.container.remove(REGISTRY, force=True)

                docker.compose.create_volatile_container(REGISTRY,
                                                         detach=True,
                                                         publish=[(port, port)
                                                                  ])
            elif Configuration.swarm_mode:

                docker.compose.dump_config(Application.data.services)
                docker.swarm.deploy()

            else:
                docker.compose.start_containers(Application.data.services)
        else:
            log.info("{} was not running, restart is not needed",
                     service.value)

        log.info(
            "The password of {} has been changed. "
            "Please find the new password into your .projectrc file as {} variable",
            service.value,
            variables[0],
        )