Esempio n. 1
0
def test_reload_prod(capfd: Capture, faker: Faker) -> None:
    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="angular",
    )

    init_project(capfd, " --prod ", "--force")

    start_registry(capfd)
    pull_images(capfd)

    start_project(capfd)

    time.sleep(5)

    exec_command(capfd, "reload backend", "Reloading gunicorn (PID #")

    exec_command(
        capfd,
        "reload",
        "Can't reload the frontend if not explicitly requested",
        "Services reloaded",
    )

    docker = Docker()
    container = docker.get_container("frontend")
    assert container is not None

    docker.client.container.stop(container[0])
    exec_command(capfd, "reload frontend", "Reloading frontend...")

    container = docker.get_container("frontend")

    if Configuration.swarm_mode:
        # frontend reload is always execute in compose mode
        # => the container retrieved from docker.get_container in swarm mode is None
        assert container is None
        # Let's retrieve the container name in compose mode:

        Configuration.swarm_mode = False
        docker = Docker()
        container = docker.get_container("frontend")

        # Let's restore the docker client
        Configuration.swarm_mode = True
        docker = Docker()

    assert container is not None

    docker.client.container.remove(container[0], force=True)
    exec_command(capfd, "reload frontend", "Reloading frontend...")

    exec_command(
        capfd,
        "reload frontend backend",
        "Can't reload frontend and other services at once",
    )
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 2
0
def test_base(capfd: Capture) -> None:

    execute_outside(capfd, "update")

    create_project(
        capfd=capfd,
        name="third",
        auth="postgres",
        frontend="angular",
    )
    init_project(capfd)

    # Skipping main because we are on a fake git repository
    exec_command(
        capfd,
        "update -i main",
        "All updated",
    )

    open("submodules/do/temp.file", "a").close()
    with open("submodules/do/setup.py", "a") as f:
        f.write("# added from tests\n")

    exec_command(
        capfd,
        "update -i main",
        "Unable to update do repo, you have unstaged files",
        "Untracked files:",
        "submodules/do/temp.file",
        "Changes not staged for commit:",
        "submodules/do/setup.py",
        "Can't continue with updates",
    )
Esempio n. 3
0
def test_reload_dev(capfd: Capture, faker: Faker) -> None:
    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)

    pull_images(capfd)

    start_project(capfd)

    time.sleep(5)

    # For each support service verify:
    #   1) a start line in the logs
    #   2) the container is not re-created after the command
    #   3) the start line in the logs is printed again
    #   4) some more deep check based on the service?
    #      For example API is loading a change in the code?
    exec_command(capfd, "reload backend", "Reloading Flask...")

    exec_command(capfd, "remove", "Stack removed")

    if Configuration.swarm_mode:
        exec_command(capfd, "remove registry", "Service registry removed")
Esempio n. 4
0
def test_git(capfd: Capture, faker: Faker) -> None:

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
    )
    init_project(capfd)

    assert git.get_repo("does/not/exist") is None
    do_repo = git.get_repo("submodules/do")
    assert do_repo is not None
    assert git.get_active_branch(None) is None
    assert git.get_active_branch(do_repo) == __version__
    assert not git.switch_branch(None, branch_name="0.7.3")
    # Same branch => no change => return True
    assert git.switch_branch(do_repo, branch_name=__version__)
    assert not git.switch_branch(do_repo, branch_name="XYZ")

    assert git.switch_branch(do_repo, branch_name="0.7.3")
    assert git.get_active_branch(do_repo) == "0.7.3"
    assert git.switch_branch(do_repo, branch_name=__version__)
    assert git.get_active_branch(do_repo) == __version__

    assert git.get_origin(None) is None

    r = git.get_repo(".")
    assert git.get_origin(r) == "https://your_remote_git/your_project.git"

    # Create an invalid repo (i.e. without any remote)
    r = git.init("../justatest")
    assert git.get_origin(r) is None
Esempio n. 5
0
def test_autocomplete(capfd: Capture, faker: Faker) -> None:

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
    )
    init_project(capfd)

    app = Application()

    values = app.autocomplete_service(None, None, "")  # type: ignore
    assert len(values) > 0
    assert "backend" in values
    values = app.autocomplete_service(None, None, "invalid")  # type: ignore
    assert len(values) == 0
    values = app.autocomplete_service(None, None, "b")  # type: ignore
    assert len(values) >= 1
    assert "backend" in values

    values = app.autocomplete_allservice(None, None, "")  # type: ignore
    assert len(values) > 0
    assert "backend" in values
    values = app.autocomplete_allservice(None, None, "invalid")  # type: ignore
    assert len(values) == 0
    values = app.autocomplete_allservice(None, None, "b")  # type: ignore
    assert len(values) >= 1
    assert "backend" in values
    values = app.autocomplete_allservice(None, None, "c")  # type: ignore
    assert len(values) >= 1
    assert "backend" not in values

    values = app.autocomplete_submodule(None, None, "")  # type: ignore
    assert len(values) > 0
    assert "main" in values
    values = app.autocomplete_submodule(None, None, "invalid")  # type: ignore
    assert len(values) == 0
    values = app.autocomplete_submodule(None, None, "m")  # type: ignore
    assert len(values) >= 1
    assert "main" in values
    values = app.autocomplete_submodule(None, None, "d")  # type: ignore
    assert len(values) >= 1
    assert "main" not in values

    os.unlink(".rapydo")
    values = app.autocomplete_service(None, None, "")  # type: ignore
    assert len(values) == 0
    values = app.autocomplete_allservice(None, None, "")  # type: ignore
    assert len(values) == 0
    values = app.autocomplete_submodule(None, None, "")  # type: ignore
    assert len(values) == 0
Esempio n. 6
0
def test_cronjobs(capfd: Capture, faker: Faker) -> None:

    project = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project,
        auth="postgres",
        frontend="no",
    )
    init_project(capfd, "-e CRONTAB_ENABLE=1")
    start_registry(capfd)
    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")

    exec_command(
        capfd,
        "logs --tail 50 backend",
        # Logs are not prefixed because only one service is shown
        "Found no cronjob to be enabled, skipping crontab setup",
        "Testing mode",
    )

    with open(f"projects/{project}/backend/cron/hello-world.cron", "w+") as f:
        f.write("* * * * * echo 'Hello world' >> /var/log/cron.log 2>&1\n")
        f.write("\n")

    exec_command(
        capfd,
        "-e CRONTAB_ENABLE=1 start --force",
        "Stack started",
    )

    if Configuration.swarm_mode:
        time.sleep(10)

    exec_command(
        capfd,
        "logs --tail 50 backend",
        # Logs are not prefixed because only one service is shown
        # "Testing mode",
        "Enabling cron...",
        "Cron enabled",
        # this is the output of crontab -l that verifies the cronjob installation
        "* * * * * echo 'Hello world'",
    )
Esempio n. 7
0
def test_dump(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "dump")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)

    exec_command(
        capfd,
        "dump",
        "Config dump: docker-compose.yml",
    )
Esempio n. 8
0
def test_all(capfd: Capture) -> None:

    exec_command(capfd, "restart", "This command is no longer available")

    create_project(
        capfd=capfd,
        name="first",
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)
    pull_images(capfd)
    start_project(capfd)

    start_date1 = get_container_start_date(capfd, "backend")
    exec_command(
        capfd,
        "start",
        "Stack started",
    )

    start_date2 = get_container_start_date(capfd, "backend")

    # The service is not restarted because its definition is unchanged
    assert start_date1 == start_date2

    if Configuration.swarm_mode:
        exec_command(
            capfd,
            "remove backend",
            "first_backend scaled to 0",
            "verify: Service converged",
            "Services removed",
        )

    exec_command(
        capfd,
        "start --force",
        "Stack started",
    )

    start_date3 = get_container_start_date(capfd, "backend")

    assert start_date2 != start_date3
Esempio n. 9
0
def test_password(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "password")

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="postgres",
        frontend="no",
    )

    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "password backend",
        "Please specify one between --random and --password options",
    )
Esempio n. 10
0
def test_join(capfd: Capture) -> None:

    if not Configuration.swarm_mode:
        return None

    execute_outside(capfd, "join")

    create_project(capfd=capfd, name="myname", auth="postgres", frontend="no")
    init_project(capfd)

    exec_command(
        capfd,
        "join",
        "To add a worker to this swarm, run the following command:",
        "docker swarm join --token ",
    )

    exec_command(
        capfd,
        "join --manager",
        "To add a manager to this swarm, run the following command:",
        "docker swarm join --token ",
    )
Esempio n. 11
0
def test_base(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "version")

    exec_command(
        capfd,
        "--version",
        f"rapydo version: {__version__}",
    )

    project = random_project_name(faker)

    exec_command(
        capfd,
        f"--invalid-option create {project}",
        "Error: No such option: --invalid-option",
    )

    exec_command(capfd, "rapydo", "Usage")

    create_project(
        capfd=capfd,
        name=project,
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)

    exec_command(
        capfd,
        "version",
        f"rapydo: {colors.GREEN}{__version__}",
        f"required rapydo: {colors.GREEN}{__version__}",
    )

    folder = os.getcwd()
    # Tests from a subfolder
    os.chdir("projects")
    exec_command(
        capfd,
        "check -i main --no-git --no-builds",
        "You are not in the main folder, please change your working dir",
        "Found a valid parent folder:",
        "Suggested command: cd ..",
    )

    os.chdir(project)
    exec_command(
        capfd,
        "check -i main --no-git --no-builds",
        "You are not in the main folder, please change your working dir",
        "Found a valid parent folder:",
        "Suggested command: cd ../..",
    )

    # Tests from outside the folder
    os.chdir(tempfile.gettempdir())
    exec_command(
        capfd,
        "check -i main",
        "You are not in a git repository",
        "Please note that this command only works from inside a rapydo-like repository",
        "Verify that you are in the right folder, now you are in:",
    )

    os.chdir(folder)

    exec_command(
        capfd,
        "--remote invalid check -i main --no-git",
        "Could not resolve hostname invalid: ",
    )

    exec_command(
        capfd,
        "--remote invalid@invalid check -i main --no-git",
        # Temporary failure in name resolution depends by the OS
        # on alpine che message is: Name does not resolve
        # "Could not resolve hostname invalid: Temporary failure in name resolution",
        "Could not resolve hostname invalid: ",
    )

    exec_command(
        capfd,
        "-s backend check -i main --no-git --no-builds",
        # warnings are not catched !?
        # "-s is replaced by rapydo <command> service",
    )

    exec_command(
        capfd,
        "start backend",
        "Enabled services: backend",
    )

    exec_command(
        capfd,
        "start backend postgres",
        "Enabled services: backend, postgres",
    )

    exec_command(
        capfd,
        "start backend postgres _backend",
        "Enabled services: postgres",
    )

    exec_command(
        capfd,
        "start backend postgres _invalid",
        "No such service: invalid",
    )

    exec_command(
        capfd,
        "-e ACTIVATE_FAIL2BAN start fail2ban",
        "Invalid enviroment, missing value in ACTIVATE_FAIL2BAN",
    )

    Path(PROJECT_DIR, project, "commands").mkdir(exist_ok=True)
    with open(f"projects/{project}/commands/custom.py", "w+") as f:
        f.write("""
from controller.app import Application
from controller import log

@Application.app.command(help="This is a custom command")
def custom() -> None:
    Application.print_command()
    log.info("Hello from custom command!")
""")

    exec_command(
        capfd,
        "custom",
        "Hello from custom command!",
    )
Esempio n. 12
0
def test_base(capfd: Capture) -> None:

    execute_outside(capfd, "check")

    create_project(
        capfd=capfd,
        name="third",
        auth="postgres",
        frontend="angular",
    )
    init_project(capfd)

    repo = git.get_repo("submodules/http-api")
    git.switch_branch(repo, "0.7.6")
    exec_command(
        capfd,
        "check -i main",
        f"http-api: wrong branch 0.7.6, expected {__version__}",
        f"You can fix it with {colors.RED}rapydo init{colors.RESET}",
    )
    init_project(capfd)

    with TemporaryRemovePath(DATA_DIR):
        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            "Folder not found: data",
            "Please note that this command only works from inside a rapydo-like repo",
            "Verify that you are in the right folder, now you are in: ",
        )

    with TemporaryRemovePath(Path("projects/third/builds")):
        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            "Project third is invalid: required folder not found projects/third/builds",
        )

    with TemporaryRemovePath(Path(".gitignore")):
        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            "Project third is invalid: required file not found .gitignore",
        )

    # Add a custom image to extend base backend image:
    with open("projects/third/confs/commons.yml", "a") as f:
        f.write(
            """
services:
  backend:
    build: ${PROJECT_DIR}/builds/backend
    image: third/backend:${RAPYDO_VERSION}

    """
        )

    os.makedirs("projects/third/builds/backend")
    with open("projects/third/builds/backend/Dockerfile", "w+") as f:
        f.write(
            f"""
FROM rapydo/backend:{__version__}
RUN mkdir xyz
"""
        )

    # Skipping main because we are on a fake git repository
    exec_command(
        capfd,
        "check -i main",
        f" image, execute {colors.RED}rapydo pull",
        f" image, execute {colors.RED}rapydo build",
        f"Compose is installed with version {COMPOSE_VERSION}",
        f"Buildx is installed with version {BUILDX_VERSION}",
        "Checks completed",
    )

    exec_command(
        capfd,
        "--stack invalid check -i main",
        "Failed to read projects/third/confs/invalid.yml: File does not exist",
    )

    os.mkdir("submodules/rapydo-confs")
    exec_command(
        capfd,
        "check -i main --no-git --no-builds",
        "Project third contains an obsolete file or folder: submodules/rapydo-confs",
    )

    shutil.rmtree("submodules/rapydo-confs")

    # Test selection with two projects
    create_project(
        capfd=capfd,
        name="justanother",
        auth="postgres",
        frontend="no",
    )

    os.remove(".projectrc")

    exec_command(
        capfd,
        "check -i main --no-git --no-builds",
        "Multiple projects found, please use --project to specify one of the following",
    )

    # Test with zero projects
    with TemporaryRemovePath(Path("projects")):
        os.mkdir("projects")
        # in this case SystemExit is raised in the command init...
        with pytest.raises(SystemExit):
            exec_command(
                capfd,
                "check -i main --no-git --no-builds",
                "No project found (is projects folder empty?)",
            )
        shutil.rmtree("projects")

    exec_command(
        capfd,
        "-p third check -i main --no-git --no-builds",
        "Checks completed",
    )

    # Numbers are not allowed as first characters
    pname = "2invalidcharacter"
    os.makedirs(f"projects/{pname}")
    exec_command(
        capfd,
        f"-p {pname} check -i main --no-git --no-builds",
        "Wrong project name, found invalid characters: 2",
    )
    shutil.rmtree(f"projects/{pname}")

    invalid_characters = {
        "_": "_",
        "-": "-",
        "C": "C",
        # Invalid characters in output are ordered
        # Numbers are allowed if not leading
        "_C-2": "-C_",
    }
    # Check invalid and reserved project names
    for invalid_key, invalid_value in invalid_characters.items():
        pname = f"invalid{invalid_key}character"
        os.makedirs(f"projects/{pname}")
        exec_command(
            capfd,
            f"-p {pname} check -i main --no-git --no-builds",
            f"Wrong project name, found invalid characters: {invalid_value}",
        )
        shutil.rmtree(f"projects/{pname}")

    os.makedirs("projects/celery")
    exec_command(
        capfd,
        "-p celery check -i main --no-git --no-builds",
        "You selected a reserved name, invalid project name: celery",
    )
    shutil.rmtree("projects/celery")

    exec_command(
        capfd,
        "-p fourth check -i main --no-git --no-builds",
        "Wrong project fourth",
        "Select one of the following: ",
    )

    # Test init of data folders
    shutil.rmtree(LOGS_FOLDER)
    assert not LOGS_FOLDER.is_dir()
    # Let's restore .projectrc and data/logs
    init_project(capfd, "--project third")

    assert LOGS_FOLDER.is_dir()
    exec_command(
        capfd,
        "check -i main --no-git --no-builds",
        "Checks completed",
    )

    # Test dirty repo
    fin = open("submodules/do/new_file", "wt+")
    fin.write("xyz")
    fin.close()

    exec_command(
        capfd,
        "check -i main",
        "You have unstaged files on do",
        "Untracked files:",
        "submodules/do/new_file",
    )

    with open(".gitattributes", "a") as a_file:
        a_file.write("\n")
        a_file.write("# new line")

    exec_command(
        capfd,
        "check -i main",
        ".gitattributes changed, "
        f"please execute {colors.RED}rapydo upgrade --path .gitattributes",
    )

    exec_command(
        capfd,
        "--prod check -i main --no-git --no-builds",
        "The following variables are missing in your configuration",
        "You can fix this error by updating your .projectrc file",
    )

    # Default ALCHEMY_PASSWORD has as score of 2
    exec_command(
        capfd,
        "-e MIN_PASSWORD_SCORE=3 check -i main --no-git --no-builds",
        "The password used in ALCHEMY_PASSWORD is weak",
    )
    exec_command(
        capfd,
        "-e MIN_PASSWORD_SCORE=4 check -i main --no-git --no-builds",
        "The password used in ALCHEMY_PASSWORD is very weak",
    )
    exec_command(
        capfd,
        "-e MIN_PASSWORD_SCORE=4 -e AUTH_DEFAULT_PASSWORD=x check -i main --no-git --no-builds",
        "The password used in AUTH_DEFAULT_PASSWORD is extremely weak",
    )

    exec_command(
        capfd,
        "--prod init -f",
        "Created default .projectrc file",
        "Project initialized",
    )

    exec_command(
        capfd,
        "--prod check -i main --no-git --no-builds",
        "Checks completed",
    )

    if Configuration.swarm_mode:
        # Skipping main because we are on a fake git repository
        exec_command(
            capfd,
            "check -i main",
            "Swarm is correctly initialized",
            "Checks completed",
        )

        docker = Docker()
        docker.client.swarm.leave(force=True)

        exec_command(
            capfd,
            "check -i main",
            f"Swarm is not initialized, please execute {colors.RED}rapydo init",
        )
        exec_command(
            capfd,
            "init",
            "Swarm is now initialized",
            "Project initialized",
        )
        exec_command(
            capfd,
            "check -i main",
            "Swarm is correctly initialized",
            "Checks completed",
        )

        check = "check -i main --no-git --no-builds"

        exec_command(
            capfd,
            f"-e ASSIGNED_MEMORY_BACKEND=50G {check}",
            "Your deployment requires 50GB of RAM but your nodes only have",
            # The error does not halt the checks execution
            "Checks completed",
        )

        exec_command(
            capfd,
            f"-e ASSIGNED_CPU_BACKEND=50.0 {check}",
            "Your deployment requires ",
            " cpus but your nodes only have ",
            # The error does not halt the checks execution
            "Checks completed",
        )

        exec_command(
            capfd,
            f"-e DEFAULT_SCALE_BACKEND=55 -e ASSIGNED_MEMORY_BACKEND=1G {check}",
            "Your deployment requires 55GB of RAM but your nodes only have",
            # The error does not halt the checks execution
            "Checks completed",
        )

        exec_command(
            capfd,
            f"-e DEFAULT_SCALE_BACKEND=50 -e ASSIGNED_CPU_BACKEND=1.0 {check}",
            "Your deployment requires ",
            " cpus but your nodes only have ",
            # The error does not halt the checks execution
            "Checks completed",
        )
Esempio n. 13
0
def test_password_mysql(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="mysql",
        frontend="no",
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password mariadb --random",
        "Can't update mariadb because it is not running. Please start your stack",
    )

    exec_command(
        capfd,
        "password",
        f"mariadb    ALCHEMY_PASSWORD       {colors.RED}N/A",
        # f"mariadb    MYSQL_ROOT_PASSWORD    {colors.RED}N/A",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "sqlalchemy")

    backend_start_date = get_container_start_date(capfd, "backend")
    mariadb_start_date = get_container_start_date(capfd, "mariadb")
    mariadb_pass1 = get_variable_from_projectrc("ALCHEMY_PASSWORD")

    exec_command(
        capfd,
        "password mariadb --random",
        "mariadb was running, restarting services...",
        "The password of mariadb has been changed. ",
        "Please find the new password into your .projectrc file as "
        "ALCHEMY_PASSWORD variable",
    )

    mariadb_pass2 = get_variable_from_projectrc("ALCHEMY_PASSWORD")
    assert mariadb_pass1 != mariadb_pass2

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)
    mariadb_start_date2 = get_container_start_date(capfd,
                                                   "mariadb",
                                                   wait=False)

    # Verify that both backend and mariadb are restarted
    assert backend_start_date2 != backend_start_date
    assert mariadb_start_date2 != mariadb_start_date

    service_verify(capfd, "sqlalchemy")

    exec_command(
        capfd,
        "password",
        f"mariadb    ALCHEMY_PASSWORD       {colors.GREEN}{today}",
        # f"mariadb    MYSQL_ROOT_PASSWORD    {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password mariadb --password {mypassword}",
        "The password of mariadb has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("ALCHEMY_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    service_verify(capfd, "sqlalchemy")

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"mariadb    ALCHEMY_PASSWORD       {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"ALCHEMY_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 14
0
def test_password_flower(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["flower"],
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password",
        f"flower     FLOWER_PASSWORD        {colors.RED}N/A",
    )

    flower_pass1 = get_variable_from_projectrc("FLOWER_PASSWORD")
    exec_command(
        capfd,
        "password flower --random",
        "flower was not running, restart is not needed",
        "The password of flower has been changed. ",
        "Please find the new password into your .projectrc file as "
        "FLOWER_PASSWORD variable",
    )
    flower_pass2 = get_variable_from_projectrc("FLOWER_PASSWORD")
    assert flower_pass1 != flower_pass2

    exec_command(
        capfd,
        "password",
        f"flower     FLOWER_PASSWORD        {colors.GREEN}{today}",
    )

    pull_images(capfd)
    start_project(capfd)

    flower_start_date = get_container_start_date(capfd, "flower", wait=True)

    exec_command(
        capfd,
        "password flower --random",
        "flower was running, restarting services...",
        "The password of flower has been changed. ",
        "Please find the new password into your .projectrc file as "
        "FLOWER_PASSWORD variable",
    )

    flower_pass3 = get_variable_from_projectrc("FLOWER_PASSWORD")
    assert flower_pass2 != flower_pass3

    flower_start_date2 = get_container_start_date(capfd, "flower", wait=True)

    assert flower_start_date2 != flower_start_date

    exec_command(
        capfd,
        "password",
        f"flower     FLOWER_PASSWORD        {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password flower --password {mypassword}",
        "The password of flower has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("FLOWER_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"flower     FLOWER_PASSWORD        {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"FLOWER_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 15
0
def test_password_redis(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["redis"],
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password",
        f"redis      REDIS_PASSWORD         {colors.RED}N/A",
    )

    redis_pass1 = get_variable_from_projectrc("REDIS_PASSWORD")
    exec_command(
        capfd,
        "password redis --random",
        "redis was not running, restart is not needed",
        "The password of redis has been changed. ",
        "Please find the new password into your .projectrc file as "
        "REDIS_PASSWORD variable",
    )
    redis_pass2 = get_variable_from_projectrc("REDIS_PASSWORD")
    assert redis_pass1 != redis_pass2

    exec_command(
        capfd,
        "password",
        f"redis      REDIS_PASSWORD         {colors.GREEN}{today}",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "redis")

    backend_start_date = get_container_start_date(capfd, "backend")
    redis_start_date = get_container_start_date(capfd, "redis")

    exec_command(
        capfd,
        "password redis --random",
        "redis was running, restarting services...",
        "The password of redis has been changed. ",
        "Please find the new password into your .projectrc file as "
        "REDIS_PASSWORD variable",
    )

    redis_pass3 = get_variable_from_projectrc("REDIS_PASSWORD")
    assert redis_pass2 != redis_pass3

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)
    redis_start_date2 = get_container_start_date(capfd, "redis", wait=False)

    # Verify that both backend and redis are restarted
    assert backend_start_date2 != backend_start_date
    assert redis_start_date2 != redis_start_date

    service_verify(capfd, "redis")

    exec_command(
        capfd,
        "password",
        f"redis      REDIS_PASSWORD         {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password redis --password {mypassword}",
        "The password of redis has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("REDIS_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    service_verify(capfd, "redis")

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"redis      REDIS_PASSWORD         {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"REDIS_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 16
0
def test_install(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "install")

    project = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project,
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)

    # Initially the controller is installed from pip
    exec_command(
        capfd,
        "update -i main",
        "Controller not updated because it is installed outside this project",
        "Installation path is ",
        ", the current folder is ",
        "All updated",
    )

    with TemporaryRemovePath(SUBMODULES_DIR.joinpath("do")):
        exec_command(
            capfd,
            "install",
            "missing as submodules/do. You should init your project",
        )

    exec_command(capfd, "install 100.0", "Invalid version")

    exec_command(capfd, "install docker", "Docker current version:",
                 "Docker installed version:")
    exec_command(capfd, "install compose", "Docker compose is installed")
    exec_command(
        capfd,
        "install buildx",
        "Docker buildx current version:",
        "Docker buildx installed version:",
    )

    exec_command(capfd, "install auto")

    r = git.get_repo("submodules/do")
    git.switch_branch(r, "0.7.6")

    exec_command(
        capfd,
        "install",
        f"Controller repository switched to {__version__}",
    )

    # Here the controller is installed in editable mode from the correct submodules
    # folder (this is exactly the default normal condition)
    exec_command(
        capfd,
        "update -i main",
        # Controller installed from {} and updated
        "Controller installed from ",
        " and updated",
        "All updated",
    )

    # Install the controller from a linked folder to verify that the post-update checks
    # are able to correctly resolve symlinks
    # ###########################################################
    # Copied from test_init_check_update.py from here...
    SUBMODULES_DIR.rename("submodules.bak")
    SUBMODULES_DIR.mkdir()

    # This is to re-fill the submodules folder,
    # these folder will be removed by the next init
    exec_command(capfd, "init", "Project initialized")

    modules_path = Path("submodules.bak").resolve()

    exec_command(
        capfd,
        f"init --submodules-path {modules_path}",
        "Path submodules/http-api already exists, removing",
        "Project initialized",
    )
    # ... to here
    # ###########################################################
    exec_command(
        capfd,
        "update -i main",
        # Controller installed from {} and updated
        "Controller installed from ",
        " and updated",
        "All updated",
    )

    # This test will change the required version
    pconf = f"projects/{project}/project_configuration.yaml"

    # Read and change the content
    fin = open(pconf)
    data = fin.read()
    data = data.replace(f'rapydo: "{__version__}"', 'rapydo: "0.7.6"')
    fin.close()
    # Write the new content
    fin = open(pconf, "wt")
    fin.write(data)
    fin.close()

    exec_command(
        capfd,
        "version",
        f"This project is not compatible with rapydo version {__version__}",
        "Please downgrade rapydo to version 0.7.6 or modify this project",
    )

    # Read and change the content
    fin = open(pconf)
    data = fin.read()
    data = data.replace('rapydo: "0.7.6"', 'rapydo: "99.99.99"')
    fin.close()
    # Write the new content
    fin = open(pconf, "wt")
    fin.write(data)
    fin.close()

    exec_command(
        capfd,
        "version",
        f"This project is not compatible with rapydo version {__version__}",
        "Please upgrade rapydo to version 99.99.99 or modify this project",
    )

    exec_command(capfd, "install --no-editable 0.8")

    exec_command(capfd, "install --no-editable")

    exec_command(capfd, "install")
Esempio n. 17
0
def test_docker_registry(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "run registry")
    if Configuration.swarm_mode:
        execute_outside(capfd, "images")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
        services=["rabbit"],
    )
    init_project(capfd)

    if not Configuration.swarm_mode:
        exec_command(
            capfd,
            "run registry",
            "Can't start the registry in compose mode",
        )

        return None

    exec_command(
        capfd,
        "pull backend",
        "Registry 127.0.0.1:5000 not reachable. "
        f"You can start it with {colors.RED}rapydo run registry",
    )

    exec_command(
        capfd,
        "build backend",
        "Registry 127.0.0.1:5000 not reachable. "
        f"You can start it with {colors.RED}rapydo run registry",
    )

    exec_command(
        capfd,
        "start backend",
        "Registry 127.0.0.1:5000 not reachable. "
        f"You can start it with {colors.RED}rapydo run registry",
    )

    exec_command(
        capfd,
        "images",
        "Registry 127.0.0.1:5000 not reachable. "
        f"You can start it with {colors.RED}rapydo run registry",
    )

    exec_command(
        capfd,
        "registry",
        "Registry command is replaced by rapydo run registry",
    )

    img = f"rapydo/registry:{__version__}"
    exec_command(
        capfd,
        "run registry",
        f"Missing {img} image, add {colors.RED}--pull{colors.RESET} option",
    )

    exec_command(
        capfd,
        "run registry --pull",
        "Running registry...",
    )

    time.sleep(2)

    exec_command(
        capfd,
        "images",
        "This registry contains no images",
    )

    exec_command(
        capfd,
        "pull backend",
        "Base images pulled from docker hub and pushed into the local registry",
    )

    exec_command(
        capfd,
        "images",
        "This registry contains 1 image(s):",
        "rapydo/backend",
    )

    exec_command(
        capfd,
        "pull rabbit",
        "Base images pulled from docker hub and pushed into the local registry",
    )

    exec_command(
        capfd,
        "images",
        "This registry contains 2 image(s):",
        "rapydo/backend",
        "rapydo/rabbitmq",
    )

    exec_command(
        capfd,
        "run registry",
        "The registry is already running at 127.0.0.1:5000",
    )

    exec_command(
        capfd,
        "-e REGISTRY_PORT=5001 run registry",
        "The registry container is already existing, removing",
    )

    exec_command(
        capfd,
        "images --remove invalid",
        "Some of the images that you specified are not found in this registry",
    )

    # Copied from images.py
    docker = Docker()
    registry = docker.registry.get_host()
    host = f"https://{registry}"
    r = docker.registry.send_request(f"{host}/v2/_catalog")

    catalog = r.json()

    assert "repositories" in catalog
    assert "rapydo/backend" in catalog["repositories"]

    r = docker.registry.send_request(f"{host}/v2/rapydo/backend/tags/list")

    tags_list = r.json()

    assert "name" in tags_list
    assert tags_list["name"] == "rapydo/backend"
    assert "tags" in tags_list
    assert __version__ in tags_list["tags"]

    exec_command(
        capfd,
        f"images --remove rapydo/backend:{__version__}",
        f"Image rapydo/backend:{__version__} deleted from ",
        "Executing registry garbage collector...",
        "Registry garbage collector successfully executed",
        "Registry restarted to clean the layers cache",
    )

    time.sleep(1)

    r = docker.registry.send_request(f"{host}/v2/_catalog")

    catalog = r.json()

    assert "repositories" in catalog
    # After the delete the repository is still in the catalog but with no tag associated
    assert "rapydo/backend" in catalog["repositories"]

    r = docker.registry.send_request(f"{host}/v2/rapydo/backend/tags/list")

    tags_list = r.json()

    assert "name" in tags_list
    assert tags_list["name"] == "rapydo/backend"
    assert "tags" in tags_list
    # No tags associated to this repository
    assert tags_list["tags"] is None

    exec_command(
        capfd,
        f"images --remove rapydo/backend:{__version__}",
        "Some of the images that you specified are not found in this registry",
    )

    exec_command(
        capfd,
        "images",
        "This registry contains 1 image(s):",
        "rapydo/rabbitmq",
    )

    exec_command(
        capfd,
        f"images --remove rapydo/backend:{__version__}",
        "Some of the images that you specified are not found in this registry",
    )

    exec_command(
        capfd,
        f"images --remove rapydo/rabbitmq:{__version__}",
        f"Image rapydo/rabbitmq:{__version__} deleted from ",
        "Executing registry garbage collector...",
        "Registry garbage collector successfully executed",
        "Registry restarted to clean the layers cache",
    )

    exec_command(
        capfd,
        f"images --remove rapydo/rabbitmq:{__version__}",
        "This registry contains no images",
    )
Esempio n. 18
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "list env")
    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="postgres",
        frontend="no",
        services=["redis"],
        extra="--env CUSTOMVAR1=mycustomvalue --env CUSTOMVAR2=mycustomvalue",
    )
    init_project(capfd)

    # Some tests with list
    exec_command(
        capfd,
        "list",
        "Missing argument 'ELEMENT_TYPE:{env|services|submodules}'. Choose from:",
    )

    exec_command(
        capfd,
        "list invalid",
        "Invalid value for",
        "'invalid' is not one of 'env', 'services', 'submodules'",
    )

    exec_command(
        capfd,
        "list env",
        "List env variables:",
        "ACTIVATE_ALCHEMY",
        "CUSTOMVAR1",
        "CUSTOMVAR2",
        "mycustomvalue",
    )
    exec_command(
        capfd,
        "list submodules",
        "List of submodules:",
    )

    exec_command(
        capfd,
        "list services",
        "List of active services:",
        "backend",
        "postgres",
        "redis",
        "N/A",
    )

    start_registry(capfd)

    pull_images(capfd)

    start_project(capfd)

    exec_command(
        capfd,
        "list services",
        "List of active services:",
        "backend",
        "postgres",
        "redis",
        "running",
    )
Esempio n. 19
0
def test_init(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "init")
    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="postgres",
        frontend="no",
    )

    exec_command(
        capfd,
        "check -i main",
        "Repo https://github.com/rapydo/http-api.git missing as submodules/http-api.",
        "You should init your project",
    )

    if Configuration.swarm_mode:
        exec_command(
            capfd,
            "-e HEALTHCHECK_INTERVAL=1s -e SWARM_MANAGER_ADDRESS=127.0.0.1 init",
            "docker compose is installed",
            "Initializing Swarm with manager IP 127.0.0.1",
            "Swarm is now initialized",
            "Project initialized",
        )

        docker = Docker()
        docker.client.swarm.leave(force=True)
        local_ip = system.get_local_ip(production=False)
        exec_command(
            capfd,
            "-e HEALTHCHECK_INTERVAL=1s -e SWARM_MANAGER_ADDRESS= init",
            "docker compose is installed",
            "Swarm is now initialized",
            f"Initializing Swarm with manager IP {local_ip}",
            "Project initialized",
        )

        exec_command(
            capfd,
            "init",
            "Swarm is already initialized",
            "Project initialized",
        )

    else:
        init_project(capfd)

    repo = git.get_repo("submodules/http-api")
    git.switch_branch(repo, "0.7.6")

    exec_command(
        capfd,
        "init",
        f"Switched http-api branch from 0.7.6 to {__version__}",
        f"build-templates already set on branch {__version__}",
        f"do already set on branch {__version__}",
    )

    os.rename("submodules", "submodules.bak")
    os.mkdir("submodules")

    # This is to re-fill the submodules folder,
    # these folder will be removed by the next init
    exec_command(capfd, "init", "Project initialized")

    modules_path = Path("submodules.bak").resolve()

    with TemporaryRemovePath(Path("submodules.bak/do")):
        exec_command(
            capfd,
            f"init --submodules-path {modules_path}",
            "Submodule do not found in ",
        )
    exec_command(
        capfd,
        f"init --submodules-path {modules_path}",
        "Path submodules/http-api already exists, removing",
        "Project initialized",
    )

    assert os.path.islink("submodules/do")
    assert not os.path.islink("submodules.bak/do")

    # Init again, this time in submodules there are links...
    # and will be removed as well as the folders
    exec_command(
        capfd,
        f"init --submodules-path {modules_path}",
        "Path submodules/http-api already exists, removing",
        "Project initialized",
    )

    exec_command(
        capfd,
        "init --submodules-path invalid/path",
        "Local path not found: invalid/path",
    )

    exec_command(
        capfd,
        "--prod init -f",
        "Created default .projectrc file",
        "Project initialized",
    )

    exec_command(
        capfd,
        "--prod -e MYVAR=MYVAL init -f",
        "Created default .projectrc file",
        "Project initialized",
    )

    with open(".projectrc") as projectrc:
        lines = [line.strip() for line in projectrc.readlines()]
        assert "MYVAR: MYVAL" in lines
Esempio n. 20
0
def test_add(capfd: Capture) -> None:

    execute_outside(capfd, "add endpoint x")
    execute_outside(capfd, "upgrade --path x")
    create_project(
        capfd=capfd,
        name="second",
        auth="postgres",
        frontend="angular",
    )
    init_project(capfd)

    path = Path("projects/second/backend/endpoints/xyz.py")
    test_path = Path("projects/second/backend/tests/test_endpoints_xyz.py")
    assert not path.exists()
    assert not test_path.exists()
    exec_command(
        capfd,
        "add endpoint xyz --add-tests",
        f"Endpoint created: {path}",
        f"Tests scaffold created: {test_path}",
    )
    exec_command(
        capfd,
        "add endpoint xyz",
        f"{path} already exists",
    )
    exec_command(
        capfd,
        "add --force endpoint xyz",
        f"Endpoint created: {path}",
    )
    assert path.is_file()
    assert test_path.is_file()

    path = Path("projects/second/backend/tasks/xyz.py")
    assert not path.exists()
    exec_command(
        capfd,
        "add task xyz --add-tests",
        f"Task created: {path}",
        "Tests for tasks not implemented yet",
    )
    exec_command(
        capfd,
        "add task xyz",
        f"{path} already exists",
    )
    exec_command(
        capfd,
        "add --force task xyz",
        f"Task created: {path}",
    )
    assert path.is_file()

    path = Path("projects/second/frontend/app/components/xyz")
    test_path = Path("projects/second/frontend/app/components/xyz/xyz.spec.ts")
    assert not path.exists()
    assert not path.joinpath("xyz.ts").exists()
    assert not path.joinpath("xyz.html").exists()
    exec_command(
        capfd,
        "add component xyz --add-tests",
        "Added import { XyzComponent } from '@app/components/xyz/xyz'; to module ",
        "Added XyzComponent to module declarations",
        f"Component created: {path}",
        f"Tests scaffold created: {test_path}",
    )

    assert path.is_dir()
    assert path.joinpath("xyz.ts").is_file()
    assert path.joinpath("xyz.html").is_file()
    exec_command(
        capfd,
        "add component xyz",
        f"{path}/xyz.ts already exists",
    )
    exec_command(
        capfd,
        "add --force component xyz",
        f"Component created: {path}",
    )
    shutil.rmtree(path)
    exec_command(
        capfd,
        "add component xyz",
        "Import already included in module file",
        "Added XyzComponent to module declarations",
        f"Component created: {path}",
    )

    exec_command(
        capfd,
        "add component sink",
        "Added route to module declarations",
        "Added SinkComponent to module declarations",
    )

    path = Path("projects/second/frontend/app/services")
    assert not path.exists()
    assert not path.joinpath("xyz.ts").exists()
    exec_command(
        capfd,
        "add service xyz --add-tests",
        "Added import { XyzService } from '@app/services/xyz'; to module file",
        "Added XyzService to module declarations",
        f"Service created: {path}",
        "Tests for services not implemented yet",
    )
    assert path.is_dir()
    assert path.joinpath("xyz.ts").is_file()
    exec_command(
        capfd,
        "add service xyz",
        f"{path}/xyz.ts already exists",
    )
    exec_command(
        capfd,
        "add --force service xyz",
        f"Service created: {path}",
    )
    path.joinpath("xyz.ts").unlink()
    exec_command(
        capfd,
        "add service xyz",
        "Import already included in module file",
        "Added XyzService to module declarations",
        f"Service created: {path}",
    )

    path = Path(
        "projects/second/frontend/integration/app_mypath_my_id.spec.ts")
    assert not path.exists()
    exec_command(
        capfd,
        "add integration_test app/mypath/:my_id --add-tests",
        "Add integration_test does not support --add-tests flag",
    )

    exec_command(
        capfd,
        "add integration_test app/mypath/:my_id",
        f"Integration test created: {path}",
    )
    exec_command(
        capfd,
        "add integration_test app/mypath/:my_id",
        f"{path} already exists",
    )
    # Here a little variant, by adding a leading /
    exec_command(
        capfd,
        "add --force integration_test /app/mypath/:my_id",
        f"Integration test created: {path}",
    )
    assert path.is_file()

    path = Path(".github/workflows/github_actions-backend.yml")
    assert not path.exists()

    exec_command(
        capfd,
        "add workflow unexpectedname",
        "Invalid workflow name, expected: backend, frontend, cypress, mypy",
    )

    exec_command(
        capfd,
        "add workflow backend --add-tests",
        "Add workflow does not support --add-tests flag",
    )

    exec_command(
        capfd,
        "add workflow backend",
        f"GitHub Actions workflow created: {path}",
    )

    exec_command(
        capfd,
        "add workflow backend",
        f"{path} already exists",
    )
    exec_command(
        capfd,
        "add --force workflow backend",
        f"GitHub Actions workflow created: {path}",
    )
    assert path.is_file()

    exec_command(
        capfd,
        "add abc xyz",
        "Invalid value for",
        "'abc' is not one of 'endpoint', 'task', 'component', 'service', ",
    )

    exec_command(capfd, "upgrade")
    exec_command(capfd, "upgrade --path invalid",
                 "Invalid path, cannot upgrade invalid")
    exec_command(capfd, "upgrade --path .gitignore")
Esempio n. 21
0
def test_remove(capfd: Capture) -> None:

    execute_outside(capfd, "remove")

    create_project(
        capfd=capfd,
        name="rem",
        auth="postgres",
        frontend="no",
    )
    init_project(capfd, " -e HEALTHCHECK_INTERVAL=20s ")

    start_registry(capfd)

    pull_images(capfd)

    if Configuration.swarm_mode:
        # In swarm mode single service remove is not permitted if nothing is running
        exec_command(
            capfd,
            "remove postgres",
            f"Stack rem is not running, deploy it with {colors.RED}rapydo start",
        )

    # Even if nothing is running, remove is permitted both on Compose and Swarm
    exec_command(capfd, "remove", "Stack removed")

    NONE: List[str] = []
    if Configuration.swarm_mode:
        BACKEND_ONLY = ["rem_backend"]
        ALL = ["rem_backend", "rem_postgres"]
    else:
        BACKEND_ONLY = ["rem-backend"]
        ALL = ["rem-backend", "rem-postgres"]

    assert get_containers() == NONE

    start_project(capfd)

    if Configuration.swarm_mode:
        NETWORK_NAME = "rem_swarm_default"
    else:
        NETWORK_NAME = "rem_compose_default"

    assert get_containers() == ALL

    NAMED_VOLUMES_NUM, UNNAMED_VOLUMES_NUM = count_volumes()

    if Configuration.swarm_mode:
        # In swarm mode remove single service is equivalent to scale 0
        exec_command(
            capfd,
            "remove postgres",
            "rem_postgres scaled to 0",
            "verify: Service converged",
            "Services removed",
        )

        assert get_containers() == BACKEND_ONLY
        # Single service remove does not remove the network
        assert NETWORK_NAME in get_networks()
        # Single service remove also remove unnamed volumes
        time.sleep(2)
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        assert UNNAMED_VOLUMES_NUM > u

        exec_command(
            capfd,
            "start",
            "Stack started",
        )

        time.sleep(2)

        assert get_containers() == ALL

        NAMED_VOLUMES_NUM, UNNAMED_VOLUMES_NUM = count_volumes()

        exec_command(
            capfd,
            "remove",
            "Stack removed",
        )

        assert get_containers() == NONE
        # Removal of all services also drop the network
        assert NETWORK_NAME not in get_networks()
        # Removal of all services also remove unnamed volumes
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        assert UNNAMED_VOLUMES_NUM > u
    else:

        exec_command(
            capfd,
            "remove postgres",
            "Stack removed",
        )

        assert get_containers() == BACKEND_ONLY
        # Single service remove does not remove the network
        assert NETWORK_NAME in get_networks()
        # Removal of all services does not remove any volume
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        assert UNNAMED_VOLUMES_NUM == u

        exec_command(
            capfd,
            "remove",
            "Stack removed",
        )

        assert get_containers() == NONE
        # Removal of all services also drop the network
        # assert NETWORK_NAME not in get_networks()

        # Networks are not removed, but based on docker compose down --help they should
        # Also docker-compose down removes network from what I remember
        # Should be reported as bug? If corrected this check will start to fail
        assert NETWORK_NAME in get_networks()

        # Removal of all services does not remove any volume
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        assert UNNAMED_VOLUMES_NUM == u

        start_project(capfd)

        assert get_containers() == ALL

        exec_command(
            capfd,
            "remove --all postgres",
            "Stack removed",
        )

        assert get_containers() == BACKEND_ONLY
        # Removal of all services with --all flag remove unnamed volumes
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        # This locally works... but not on GA ... mistery
        # assert UNNAMED_VOLUMES_NUM > u

        # New counts, after single service --all has removed some unnamed volume
        NAMED_VOLUMES_NUM, UNNAMED_VOLUMES_NUM = count_volumes()

        exec_command(capfd, "remove --all", "Stack removed")

        assert get_containers() == NONE
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM > n
        assert UNNAMED_VOLUMES_NUM > u

    if Configuration.swarm_mode:
        # Remove the registry
        exec_command(
            capfd,
            "remove registry",
            "Service registry removed",
        )

        # Verify that the registry is no longer running
        exec_command(
            capfd,
            "start",
            "Registry 127.0.0.1:5000 not reachable.",
        )

        exec_command(
            capfd,
            "remove registry",
            "Service registry is not running",
        )

        # Mix both registry and normal services
        exec_command(
            capfd,
            "remove registry postgres",
            # Registry is already removed, can't remove it again
            # But this is enough to confirm that registry and services can be mixed up
            "Service registry is not running",
            # The main stack is already removed, can't remove postgres
            # But this is enough to confirm that registry and services can be mixed up
            "Stack rem is not running, deploy it with",
        )

        start_registry(capfd)

    exec_command(
        capfd,
        "run --detach --pull --port 7777 adminer",
        "You can access Adminer interface",
    )
    exec_command(
        capfd,
        "run --detach --pull --port 8888 swaggerui",
        "You can access SwaggerUI web page",
    )

    exec_command(
        capfd,
        "remove adminer postgres swaggerui",
        "Service adminer removed",
        "Service swaggerui removed",
    )

    exec_command(
        capfd,
        "remove adminer postgres swaggerui",
        "Service adminer is not running",
        "Service swaggerui is not running",
    )

    assert get_containers() == NONE
    # Verify that removal of interfaces does not stop the main stack, if not requested
    exec_command(capfd, "start backend", "Stack started")
    time.sleep(2)
    assert get_containers() == BACKEND_ONLY
    exec_command(capfd, "remove adminer", "Service adminer is not running")
    assert get_containers() == BACKEND_ONLY

    exec_command(capfd, "remove", "Stack removed")
Esempio n. 22
0
def test_tuning(capfd: Capture, faker: Faker) -> None:

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="neo4j",
        services=["postgres"],
        frontend="no",
    )
    init_project(capfd)

    start_registry(capfd)

    exec_command(
        capfd,
        "tuning neo4j",
        f"image, execute {colors.RED}rapydo pull neo4j",
    )

    pull_images(capfd)

    # Tuning command with neo4j container OFF
    exec_command(
        capfd,
        "tuning neo4j",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "Use 'dbms.memory.heap.max_size' as NEO4J_HEAP_SIZE",
        "Use 'dbms.memory.pagecache.size' as NEO4J_PAGECACHE_SIZE",
        "Memory settings recommendation from neo4j-admin memrec:",
        "Based on the above, the following memory settings are recommended:",
        "dbms.memory.heap.initial_size=",
        "dbms.memory.heap.max_size=",
        "dbms.memory.pagecache.size=",
        "Total size of lucene indexes in all databases:",
        "Total size of data and native indexes in all databases:",
    )

    start_project(capfd)

    service_verify(capfd, "neo4j")
    service_verify(capfd, "sqlalchemy")

    exec_command(
        capfd,
        "tuning backend",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "GUNICORN_MAX_NUM_WORKERS",
    )

    # Tuning command with neo4j container ON
    exec_command(
        capfd,
        "tuning neo4j",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "Use 'dbms.memory.heap.max_size' as NEO4J_HEAP_SIZE",
        "Use 'dbms.memory.pagecache.size' as NEO4J_PAGECACHE_SIZE",
        "Memory settings recommendation from neo4j-admin memrec:",
        "Based on the above, the following memory settings are recommended:",
        "dbms.memory.heap.initial_size=",
        "dbms.memory.heap.max_size=",
        "dbms.memory.pagecache.size=",
        "Total size of lucene indexes in all databases:",
        "Total size of data and native indexes in all databases:",
    )

    exec_command(
        capfd,
        "tuning postgres",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "POSTGRES_SHARED_BUFFERS",
        "POSTGRES_EFFECTIVE_CACHE_SIZE",
        "POSTGRES_MAINTENANCE_WORK_MEM",
        "POSTGRES_MAX_WORKER_PROCESSES",
    )
Esempio n. 23
0
def test_debug_run(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "run backend")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
    )
    init_project(capfd)

    start_registry(capfd)

    exec_command(
        capfd,
        "volatile backend",
        "Volatile command is replaced by rapydo run --debug backend",
    )

    img = f"rapydo/backend:{__version__}"
    exec_command(
        capfd,
        "run --debug backend",
        f"Missing {img} image, add {colors.RED}--pull{colors.RESET} option",
    )

    pull_images(capfd)
    # start_project(capfd)

    # exec_command(
    #     capfd,
    #     "run --debug backend --command hostname",
    #     "Bind for 0.0.0.0:8080 failed: port is already allocated",
    # )

    # exec_command(
    #     capfd,
    #     "remove",
    #     "Stack removed",
    # )

    exec_command(
        capfd,
        "run backend --command hostname",
        "Can't specify a command if debug mode is OFF",
    )

    exec_command(
        capfd,
        "run backend --command hostname --user developer",
        "Can't specify a user if debug mode is OFF",
    )

    exec_command(
        capfd,
        "run --debug backend --command hostname",
        "backend-server",
    )

    exec_command(
        capfd,
        "run --debug backend --command whoami",
        "root",
    )

    exec_command(
        capfd,
        "run --debug backend -u developer --command whoami",
        "Please remember that users in volatile containers are not mapped on current ",
        "developer",
    )

    exec_command(
        capfd,
        "run --debug backend -u invalid --command whoami",
        "Error response from daemon:",
        "unable to find user invalid:",
        "no matching entries in passwd file",
    )
Esempio n. 24
0
def test_interfaces(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "run adminer")

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)

    start_registry(capfd)

    exec_command(
        capfd,
        "interfaces sqlalchemy",
        "Deprecated interface sqlalchemy, use adminer instead",
    )

    exec_command(
        capfd,
        "interfaces adminer",
        "Interfaces command is replaced by rapydo run adminer",
    )

    exec_command(
        capfd,
        "run invalid",
        "Services misconfiguration, can't find invalid",
    )

    exec_command(
        capfd,
        "run adminer --port XYZ",
        "Invalid value for '--port' / '-p': 'XYZ' is not a valid integer",
    )

    img = f"rapydo/adminer:{__version__}"
    exec_command(
        capfd,
        "run adminer",
        f"Missing {img} image, add {colors.RED}--pull{colors.RESET} option",
    )

    # Launch Adminer UI with default port
    exec_command(
        capfd,
        "run adminer --pull --detach",
        "Pulling image for adminer...",
        # f"Creating {project_name}_adminer_run",
        "You can access Adminer interface on: http://localhost:7777",
    )

    # Admin or SwaggerUI does not start? You can debug with:
    # from python_on_whales import docker
    # assert docker.logs("adminer", tail=10) == "debug"

    exec_command(
        capfd,
        "remove adminer",
        "Service adminer removed",
    )

    # Launch Adminer UI with custom port
    exec_command(
        capfd,
        "run adminer --port 3333 --detach",
        # "Pulling adminer",
        # f"Creating {project_name}_adminer_run",
        "You can access Adminer interface on: http://localhost:3333",
    )

    # Launch Swagger UI with default port
    exec_command(
        capfd,
        "run swaggerui --pull --detach",
        "Pulling image for swaggerui...",
        "You can access SwaggerUI web page here: http://localhost:7777",
    )

    exec_command(
        capfd,
        "remove swaggerui",
        "Service swaggerui removed",
    )

    # Launch Swagger UI with custom port
    exec_command(
        capfd,
        "run swaggerui --port 4444 --detach",
        "You can access SwaggerUI web page here: http://localhost:4444",
    )

    # This fails if the interfaces are non running, i.e. in case of a post-start crash
    # Introduced after a BUG due to the tty setting in volatile container
    # that made run interfaces fail on GA
    exec_command(
        capfd,
        "remove adminer swaggerui",
        "Service adminer removed",
        "Service swaggerui removed",
    )

    # Test Swagger UI and Admin in production mode
    exec_command(
        capfd,
        "--prod init -f",
        "Created default .projectrc file",
        "Project initialized",
    )

    exec_command(
        capfd,
        "--prod run swaggerui --port 5555 --detach",
        "You can access SwaggerUI web page here: https://localhost:5555",
    )

    exec_command(
        capfd,
        "--prod run adminer --port 6666 --detach",
        "You can access Adminer interface on: https://localhost:6666",
    )
Esempio n. 25
0
def test_all(capfd: Capture) -> None:

    execute_outside(capfd, "logs backend")

    create_project(
        capfd=capfd,
        name="first",
        auth="postgres",
        frontend="angular",
    )
    init_project(capfd)

    start_registry(capfd)

    pull_images(capfd)
    start_project(capfd)

    # Invalid services are refused
    exec_command(
        capfd,
        "logs --tail 1 invalid",
        "No such service: invalid",
    )

    now = datetime.now()

    signal.signal(signal.SIGALRM, mock_KeyboardInterrupt)
    signal.alarm(5)
    # Here using main services option
    exec_command(
        capfd,
        "logs --tail 10 --follow backend",
        "REST API backend server is ready to be launched",
    )
    end = datetime.now()

    assert (end - now).seconds >= 4
    signal.alarm(0)

    exec_command(
        capfd,
        "logs backend",
        "REST API backend server is ready to be launched",
    )

    exec_command(
        capfd,
        "logs --tail 1",
        "Enabled services: backend, frontend, postgres",
    )

    exec_command(
        capfd,
        "logs --tail 1 backend",
        "Enabled services: backend",
    )

    exec_command(
        capfd,
        "logs --tail 1 frontend",
        "Enabled services: frontend",
    )

    exec_command(
        capfd,
        "logs --tail 1 backend frontend",
        "Enabled services: backend, frontend",
    )

    exec_command(
        capfd,
        "logs --tail 1 frontend backend",
        "Enabled services: backend, frontend",
    )

    exec_command(
        capfd,
        "logs --tail 1 backend invalid",
        "No such service: invalid",
    )

    # Backend logs are never timestamped
    exec_command(
        capfd,
        "logs --tail 20 backend",
        # Logs are not prefixed because only one service is shown
        "Testing mode",
    )

    # Debug code... no logs in swarm mode for frontend, even after a wait 20...
    if Configuration.swarm_mode:
        exec_command(
            capfd,
            "logs --tail 10 frontend",
        )
    else:
        timestamp = now.strftime("%Y-%m-%dT")
        # Frontend logs are always timestamped
        exec_command(
            capfd,
            "logs --tail 10 frontend",
            # Logs are not prefixed because only one service is shown
            f"{timestamp}",
        )

    # Follow flag is not supported in swarm mode with multiple services
    if Configuration.swarm_mode:
        # Multiple services are not supported in swarm mode
        exec_command(
            capfd,
            "logs --follow",
            "Follow flag is not supported on multiple services",
        )

        exec_command(
            capfd,
            "logs --follow backend frontend",
            "Follow flag is not supported on multiple services",
        )
Esempio n. 26
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "pull")
    execute_outside(capfd, "build")

    project2 = random_project_name(faker)
    create_project(
        capfd=capfd,
        name="testbuild",
        auth="no",
        frontend="no",
        services=["rabbit"],
    )
    init_project(capfd)
    create_project(
        capfd=capfd,
        name=project2,
        auth="no",
        frontend="no",
        services=["rabbit"],
    )

    if Configuration.swarm_mode:

        exec_command(
            capfd,
            "pull",
            "Registry 127.0.0.1:5000 not reachable.",
        )

        exec_command(
            capfd,
            "build",
            "docker buildx is installed",
            "Registry 127.0.0.1:5000 not reachable.",
        )

        start_registry(capfd)

    image = f"rapydo/backend:{__version__}"
    exec_command(
        capfd,
        "start",
        f"Missing {image} image, execute {colors.RED}rapydo pull backend",
    )

    exec_command(
        capfd,
        "-e ACTIVATE_RABBIT=0 pull --quiet rabbit",
        "No such service: rabbit",
    )

    exec_command(
        capfd,
        "pull --quiet proxy",
        "No such service: proxy",
    )

    exec_command(
        capfd,
        "pull --quiet",
        "Base images pulled from docker hub",
    )

    # Basic pull
    exec_command(
        capfd,
        "pull xxx",
        "No such service: xxx",
    )

    # --all is useless here... added just to include the parameter in some tests.
    # A true test on such parameter would be quite complex...
    exec_command(
        capfd,
        "pull --all --quiet backend",
        "Images pulled from docker hub",
    )

    # Add a custom image to extend base rabbit image:
    with open("projects/testbuild/confs/commons.yml", "a") as f:
        f.write("""
services:
  rabbit:
    build: ${PROJECT_DIR}/builds/rabbit
    image: testbuild/rabbit:${RAPYDO_VERSION}

    """)

    # Missing folder
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Build path not found",
    )

    os.makedirs("projects/testbuild/builds/rabbit")

    # Missing Dockerfile
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Build path not found: ",
        "projects/testbuild/builds/rabbit/Dockerfile",
    )

    # Empty Dockerfile
    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        pass
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Invalid Dockerfile, no base image found in ",
        "projects/testbuild/builds/rabbit/Dockerfile",
    )

    # Missing base image
    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        f.write("RUN ls")
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Invalid Dockerfile, no base image found in ",
        "projects/testbuild/builds/rabbit/Dockerfile",
    )

    # Invalid RAPyDo template
    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        f.write("FROM rapydo/invalid")
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Unable to find rapydo/invalid in this project",
        "Please inspect the FROM image in",
        "projects/testbuild/builds/rabbit/Dockerfile",
    )

    image = f"testbuild/rabbit:${__version__}"
    exec_command(
        capfd,
        "start",
        f" image, execute {colors.RED}rapydo build rabbit",
    )

    # Not a RAPyDo child but build is possibile
    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        f.write("FROM ubuntu")
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Custom images built",
    )

    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        f.write(f"""
FROM rapydo/rabbitmq:{__version__}
# Just a simple command to differentiate from the parent
RUN mkdir xyz
""")

    r = Repo(".")
    r.git.add("-A")
    r.git.commit("-a", "-m", "'fake'")

    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        f"naming to docker.io/testbuild/rabbit:{__version__}",
        "Custom images built",
    )

    test_file = Path("projects/testbuild/builds/rabbit/test")
    with open(test_file, "w+") as f:
        f.write("test")

    exec_command(
        capfd,
        "check -i main --no-git",
        "Can't retrieve a commit history for ",
        "Checks completed",
    )

    test_file.unlink()

    exec_command(
        capfd,
        f"-e ACTIVATE_RABBIT=0 -p {project2} build --core rabbit",
        "No such service: rabbit",
    )

    # Rebuild core rabbit image => custom rabbit is now obsolete
    # Please note the use of the project 2.
    # This way we prevent to rebuilt the custom image of testbuild
    # This simulate a pull updating a core image making the custom image obsolete

    if Configuration.swarm_mode:
        swarm_push_warn = "Local registry push is not implemented yet for core images"
    else:
        swarm_push_warn = ""

    exec_command(
        capfd,
        f"-p {project2} build --core rabbit",
        "Core images built",
        swarm_push_warn,
        "No custom images to build",
    )
    exec_command(
        capfd,
        "check -i main --no-git",
        f"Obsolete image testbuild/rabbit:{__version__}",
        "built on ",
        " that changed on ",
        f"Update it with: {colors.RED}rapydo build rabbit",
    )

    # Add a second service with the same image to test redundant builds
    with open("projects/testbuild/confs/commons.yml", "a") as f:
        f.write("""
  rabbit2:
    build: ${PROJECT_DIR}/builds/rabbit
    image: testbuild/rabbit:${RAPYDO_VERSION}

    """)

    fin = open("submodules/build-templates/backend/Dockerfile", "a")
    fin.write("xyz")
    fin.close()
    r = Repo("submodules/build-templates")
    r.git.commit("-a", "-m", "'fake'")
    exec_command(
        capfd,
        "check -i main",
        f"Obsolete image rapydo/backend:{__version__}",
        "built on ",
        " but changed on ",
        f"Update it with: {colors.RED}rapydo pull backend",
    )

    exec_command(capfd, "remove", "Stack removed")

    # Add a third service without a build to verify that pull includes it
    # to be the base image even if defined in custom part
    with open("projects/testbuild/confs/commons.yml", "a") as f:
        f.write("""
  rabbit3:
    image: alpine:latest
    environment:
      ACTIVATE: 1
    """)

    exec_command(
        capfd,
        "pull --quiet rabbit3",
        "Base images pulled from docker hub",
    )

    # Now this should fail because pull does not include custom services
    exec_command(
        capfd,
        "start rabbit3",
        "Stack started",
    )
Esempio n. 27
0
def test_base(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "reload")

    project_name = random_project_name(faker)

    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["fail2ban"],
    )
    init_project(capfd)

    exec_command(capfd, "reload", "No service reloaded")
    exec_command(capfd, "reload backend", "No service reloaded")
    exec_command(capfd, "reload invalid", "No such service: invalid")
    exec_command(capfd, "reload backend invalid", "No such service: invalid")

    start_registry(capfd)
    pull_images(capfd)

    start_project(capfd)

    exec_command(capfd, "reload backend", "Reloading Flask...")

    if Configuration.swarm_mode:
        service = "backend"

        exec_command(
            capfd,
            "start backend",
            "Stack started",
        )

        exec_command(
            capfd,
            "scale backend=2 --wait",
            f"{project_name}_backend scaled to 2",
            "Service converged",
        )
    else:

        service = "fail2ban"
        exec_command(
            capfd,
            "scale fail2ban=2",
            "Scaling services: fail2ban=2...",
            "Services scaled: fail2ban=2",
        )

    time.sleep(4)

    docker = Docker()
    container1 = docker.get_container(service, slot=1)
    container2 = docker.get_container(service, slot=2)
    assert container1 is not None
    assert container2 is not None
    assert container1 != container2

    exec_command(
        capfd,
        f"reload {service}",
        f"Executing command on {container1[0]}",
        f"Executing command on {container2[0]}",
    )

    exec_command(capfd, "shell backend -u root 'rm /usr/local/bin/reload'")

    exec_command(
        capfd, "reload backend", "Service backend does not support the reload command"
    )

    exec_command(capfd, "remove", "Stack removed")
Esempio n. 28
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup rabbit")
    execute_outside(capfd, "restore rabbit")

    backup_folder = BACKUP_DIR.joinpath("rabbit")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
        services=["rabbit"],
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup rabbit",
        f"image, execute {colors.RED}rapydo pull rabbit",
    )
    exec_command(
        capfd,
        "restore rabbit",
        f"image, execute {colors.RED}rapydo pull rabbit",
    )

    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")
    service_verify(capfd, "rabbitmq")

    # Just some delay extra delay, rabbit is a slow starter
    time.sleep(5)

    # NOTE: q = rabbitmq.__name__ is just to have a fixed name to be used to test the
    # queue without the need to introdure further nested " or '
    query_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance();print(q, r.queue_exists(q));'\""
    create_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.create_queue(q);'\""
    delete_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.delete_queue(q);'\""

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False")

    exec_command(
        capfd,
        create_queue,
    )

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True")

    # Backup command
    exec_command(
        capfd,
        "backup rabbit",
        "RabbitMQ is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )
    exec_command(
        capfd,
        "backup rabbit --force --restart backend",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )
    # This is to verify that --force restarted rabbit
    exec_command(
        capfd,
        "backup rabbit",
        "RabbitMQ is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup rabbit",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup rabbit --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup rabbit --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    exec_command(
        capfd,
        "backup rabbit --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup rabbit --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup rabbit",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup rabbit --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup rabbit --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    exec_command(capfd, "start backend rabbit")

    # Just some delay extra delay, rabbit is a slow starter
    if Configuration.swarm_mode:
        time.sleep(20)
    else:
        time.sleep(10)

    exec_command(
        capfd,
        delete_queue,
    )

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False")

    # Restore command
    exec_command(
        capfd,
        "restore rabbit",
        "Please specify one of the following backup:",
        ".tar.gz",
    )

    exec_command(
        capfd,
        "restore rabbit invalid",
        "Invalid backup file, data/backup/rabbit/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore rabbit",
            "No backup found, the following folder "
            "does not exist: data/backup/rabbit",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore rabbit",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/rabbit")

        exec_command(
            capfd,
            "restore rabbit",
            "No backup found, data/backup/rabbit is empty",
        )

        open("data/backup/rabbit/test.gz", "a").close()

        exec_command(
            capfd,
            "restore rabbit",
            "No backup found, data/backup/rabbit is empty",
        )

        open("data/backup/rabbit/test.tar.gz", "a").close()

        exec_command(
            capfd,
            "restore rabbit",
            "Please specify one of the following backup:",
            "test.tar.gz",
        )

        os.remove("data/backup/rabbit/test.gz")
        os.remove("data/backup/rabbit/test.tar.gz")

    # Test restore on rabbit (required rabbit to be down)
    files = os.listdir("data/backup/rabbit")
    files = [f for f in files if f.endswith(".tar.gz")]
    files.sort()
    rabbit_dump_file = files[-1]

    exec_command(capfd, "remove")
    # 3) restore the dump
    exec_command(
        capfd,
        f"restore rabbit {rabbit_dump_file}",
        "Starting restore on rabbit...",
        f"Restore from data/backup/rabbit/{rabbit_dump_file} completed",
    )

    exec_command(capfd, "start", "Stack started")
    # 4) verify data match again point 1 (restore completed)
    # postponed because rabbit needs time to start...

    exec_command(
        capfd,
        f"restore rabbit {rabbit_dump_file}",
        "RabbitMQ is running and the restore will temporary stop it.",
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        f"restore rabbit {rabbit_dump_file} --force --restart backend",
        "Starting restore on rabbit...",
        f"Restore from data/backup/rabbit/{rabbit_dump_file} completed",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )

    # Wait rabbit to completely startup
    service_verify(capfd, "rabbitmq")

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True")
Esempio n. 29
0
def test_password_backend(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="postgres",
        frontend="no",
    )

    init_project(capfd, "-e API_AUTOSTART=1")

    # Let's simplify this task by removing task history
    # Otherwise the wait_until very usual fails due to the logs or previous tasks
    if Configuration.swarm_mode:
        docker.swarm.update(task_history_limit=0)

    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password backend --random",
        "Can't update backend because it is not running. Please start your stack",
    )

    exec_command(
        capfd,
        "password",
        f"backend    AUTH_DEFAULT_PASSWORD  {colors.RED}N/A",
    )

    pull_images(capfd)
    start_project(capfd)

    wait_until(capfd, "logs backend --tail 10", "Boot completed")
    # in dev mode Flask loads the app two times... A "boot completed" only states that
    # the app is loaded at least once, and the second time will success for sure
    # But can't say if now flask is really ready or still loading the second time
    # Added a sleep to wait for the eventual second load
    time.sleep(2)

    exec_command(capfd, "logs backend --tail 10")

    r = requests.post(
        "http://127.0.0.1:8080/auth/login",
        json={
            "username": get_variable_from_projectrc("AUTH_DEFAULT_USERNAME"),
            "password": get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD"),
        },
    )
    exec_command(capfd, "logs backend --tail 10")
    assert r.status_code == 200

    backend_start_date = get_container_start_date(capfd, "backend")
    backend_pass1 = get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD")

    exec_command(
        capfd,
        "password backend --random",
        "backend was running, restarting services...",
        "The password of backend has been changed. ",
        "Please find the new password into your .projectrc file as "
        "AUTH_DEFAULT_PASSWORD variable",
    )

    backend_pass2 = get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD")
    assert backend_pass1 != backend_pass2

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)

    # Verify that backend is restarted
    assert backend_start_date2 != backend_start_date

    # This is needed to wait for the service rolling update
    if Configuration.swarm_mode:
        time.sleep(5)

    wait_until(capfd, "logs backend --tail 10", "Boot completed")
    # in dev mode Flask loads the app two times... A "boot completed" only states that
    # the app is loaded at least once, and the second time will success for sure
    # But can't say if now flask is really ready or still loading the second time
    # Added a sleep to wait for the eventual second load
    time.sleep(2)

    r = requests.post(
        "http://127.0.0.1:8080/auth/login",
        json={
            "username": get_variable_from_projectrc("AUTH_DEFAULT_USERNAME"),
            "password": get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD"),
        },
    )
    exec_command(capfd, "logs backend --tail 10")
    assert r.status_code == 200

    exec_command(
        capfd,
        "password",
        f"backend    AUTH_DEFAULT_PASSWORD  {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password backend --password {mypassword}",
        "The password of backend has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    # This is needed to wait for the service rolling update
    if Configuration.swarm_mode:
        time.sleep(5)

    wait_until(capfd, "logs backend --tail 10", "Boot completed")
    # in dev mode Flask loads the app two times... A "boot completed" only states that
    # the app is loaded at least once, and the second time will success for sure
    # But can't say if now flask is really ready or still loading the second time
    # Added a sleep to wait for the eventual second load
    time.sleep(2)

    r = requests.post(
        "http://127.0.0.1:8080/auth/login",
        json={
            "username": get_variable_from_projectrc("AUTH_DEFAULT_USERNAME"),
            "password": get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD"),
        },
    )
    exec_command(capfd, "logs backend --tail 10")
    assert r.status_code == 200

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"backend    AUTH_DEFAULT_PASSWORD  {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"AUTH_DEFAULT_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 30
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup postgres")
    execute_outside(capfd, "restore postgres")

    backup_folder = BACKUP_DIR.joinpath("postgres")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup postgres",
        f"image, execute {colors.RED}rapydo pull postgres",
    )
    exec_command(
        capfd,
        "restore postgres",
        f"image, execute {colors.RED}rapydo pull postgres",
    )

    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")
    service_verify(capfd, "sqlalchemy")

    # This will initialize postgres
    exec_command(capfd, "shell backend 'restapi init'")

    # Verify the initialization
    psql = "shell postgres 'psql -U sqluser -d SQL_API -c"
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | User",
    )

    exec_command(
        capfd,
        "backup postgres",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # A second backup is needed to test backup retention
    exec_command(
        capfd,
        "backup postgres",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup postgres --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup postgres --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    exec_command(
        capfd,
        "backup postgres --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup postgres --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup postgres",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )
    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup postgres --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.gz").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup postgres --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup postgres",
        "The backup procedure requires postgres running, please start your stack",
    )

    exec_command(
        capfd,
        "restore postgres",
        "Please specify one of the following backup:",
        ".sql.gz",
    )
    exec_command(
        capfd,
        "restore postgres invalid",
        "Invalid backup file, data/backup/postgres/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore postgres",
            "No backup found, the following folder "
            "does not exist: data/backup/postgres",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore postgres",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/postgres")

        exec_command(
            capfd,
            "restore postgres",
            "No backup found, data/backup/postgres is empty",
        )

        open("data/backup/postgres/test.sql.gz", "a").close()

        exec_command(
            capfd,
            "restore postgres",
            "Please specify one of the following backup:",
            "test.sql.gz",
        )

        os.remove("data/backup/postgres/test.sql.gz")

    files = os.listdir("data/backup/postgres")
    files = [f for f in files if f.endswith(".sql.gz")]
    files.sort()
    postgres_dump_file = files[-1]

    # Postgres restore not allowed if container is not running
    exec_command(
        capfd,
        f"restore postgres {postgres_dump_file}",
        "The restore procedure requires postgres running, please start your stack",
    )

    exec_command(capfd, "start", "Stack started")

    # Here we test the restore procedure:
    # 1) verify some data in the database
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | User",
    )

    # 2) Modify the data
    exec_command(
        capfd,
        f'{psql} "update role SET description=name"\'',
    )
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | normal_user",
    )
    # 3) restore the dump
    exec_command(
        capfd,
        f"restore postgres {postgres_dump_file}",
        "Starting restore on postgres...",
        "CREATE DATABASE",
        "ALTER DATABASE",
        f"Restore from data/backup/postgres/{postgres_dump_file} completed",
    )

    # 4) verify data match again point 1 (restore completed)
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | User",
    )